vm.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2013, 2014 Damien P. George
  7. * Copyright (c) 2014 Paul Sokolovsky
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy
  10. * of this software and associated documentation files (the "Software"), to deal
  11. * in the Software without restriction, including without limitation the rights
  12. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13. * copies of the Software, and to permit persons to whom the Software is
  14. * furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in
  17. * all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  22. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25. * THE SOFTWARE.
  26. */
  27. #include <stdio.h>
  28. #include <string.h>
  29. #include <assert.h>
  30. #include "py/emitglue.h"
  31. #include "py/objtype.h"
  32. #include "py/runtime.h"
  33. #include "py/bc0.h"
  34. #include "py/bc.h"
  35. #if 0
  36. #define TRACE(ip) printf("sp=%d ", (int)(sp - &code_state->state[0] + 1)); mp_bytecode_print2(ip, 1, code_state->fun_bc->const_table);
  37. #else
  38. #define TRACE(ip)
  39. #endif
  40. // Value stack grows up (this makes it incompatible with native C stack, but
  41. // makes sure that arguments to functions are in natural order arg1..argN
  42. // (Python semantics mandates left-to-right evaluation order, including for
  43. // function arguments). Stack pointer is pre-incremented and points at the
  44. // top element.
  45. // Exception stack also grows up, top element is also pointed at.
  46. #define DECODE_UINT \
  47. mp_uint_t unum = 0; \
  48. do { \
  49. unum = (unum << 7) + (*ip & 0x7f); \
  50. } while ((*ip++ & 0x80) != 0)
  51. #define DECODE_ULABEL size_t ulab = (ip[0] | (ip[1] << 8)); ip += 2
  52. #define DECODE_SLABEL size_t slab = (ip[0] | (ip[1] << 8)) - 0x8000; ip += 2
  53. #if MICROPY_PERSISTENT_CODE
  54. #define DECODE_QSTR \
  55. qstr qst = ip[0] | ip[1] << 8; \
  56. ip += 2;
  57. #define DECODE_PTR \
  58. DECODE_UINT; \
  59. void *ptr = (void*)(uintptr_t)code_state->fun_bc->const_table[unum]
  60. #define DECODE_OBJ \
  61. DECODE_UINT; \
  62. mp_obj_t obj = (mp_obj_t)code_state->fun_bc->const_table[unum]
  63. #else
  64. #define DECODE_QSTR qstr qst = 0; \
  65. do { \
  66. qst = (qst << 7) + (*ip & 0x7f); \
  67. } while ((*ip++ & 0x80) != 0)
  68. #define DECODE_PTR \
  69. ip = (byte*)MP_ALIGN(ip, sizeof(void*)); \
  70. void *ptr = *(void**)ip; \
  71. ip += sizeof(void*)
  72. #define DECODE_OBJ \
  73. ip = (byte*)MP_ALIGN(ip, sizeof(mp_obj_t)); \
  74. mp_obj_t obj = *(mp_obj_t*)ip; \
  75. ip += sizeof(mp_obj_t)
  76. #endif
  77. #define PUSH(val) *++sp = (val)
  78. #define POP() (*sp--)
  79. #define TOP() (*sp)
  80. #define SET_TOP(val) *sp = (val)
  81. #if MICROPY_PY_SYS_EXC_INFO
  82. #define CLEAR_SYS_EXC_INFO() MP_STATE_VM(cur_exception) = NULL;
  83. #else
  84. #define CLEAR_SYS_EXC_INFO()
  85. #endif
  86. #define PUSH_EXC_BLOCK(with_or_finally) do { \
  87. DECODE_ULABEL; /* except labels are always forward */ \
  88. ++exc_sp; \
  89. exc_sp->handler = ip + ulab; \
  90. exc_sp->val_sp = MP_TAGPTR_MAKE(sp, ((with_or_finally) << 1) | currently_in_except_block); \
  91. exc_sp->prev_exc = NULL; \
  92. currently_in_except_block = 0; /* in a try block now */ \
  93. } while (0)
  94. #define POP_EXC_BLOCK() \
  95. currently_in_except_block = MP_TAGPTR_TAG0(exc_sp->val_sp); /* restore previous state */ \
  96. exc_sp--; /* pop back to previous exception handler */ \
  97. CLEAR_SYS_EXC_INFO() /* just clear sys.exc_info(), not compliant, but it shouldn't be used in 1st place */
  98. // fastn has items in reverse order (fastn[0] is local[0], fastn[-1] is local[1], etc)
  99. // sp points to bottom of stack which grows up
  100. // returns:
  101. // MP_VM_RETURN_NORMAL, sp valid, return value in *sp
  102. // MP_VM_RETURN_YIELD, ip, sp valid, yielded value in *sp
  103. // MP_VM_RETURN_EXCEPTION, exception in fastn[0]
  104. mp_vm_return_kind_t mp_execute_bytecode(mp_code_state_t *code_state, volatile mp_obj_t inject_exc) {
  105. #define SELECTIVE_EXC_IP (0)
  106. #if SELECTIVE_EXC_IP
  107. #define MARK_EXC_IP_SELECTIVE() { code_state->ip = ip; } /* stores ip 1 byte past last opcode */
  108. #define MARK_EXC_IP_GLOBAL()
  109. #else
  110. #define MARK_EXC_IP_SELECTIVE()
  111. #define MARK_EXC_IP_GLOBAL() { code_state->ip = ip; } /* stores ip pointing to last opcode */
  112. #endif
  113. #if MICROPY_OPT_COMPUTED_GOTO
  114. #include "py/vmentrytable.h"
  115. #define DISPATCH() do { \
  116. TRACE(ip); \
  117. MARK_EXC_IP_GLOBAL(); \
  118. goto *entry_table[*ip++]; \
  119. } while (0)
  120. #define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
  121. #define ENTRY(op) entry_##op
  122. #define ENTRY_DEFAULT entry_default
  123. #else
  124. #define DISPATCH() goto dispatch_loop
  125. #define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
  126. #define ENTRY(op) case op
  127. #define ENTRY_DEFAULT default
  128. #endif
  129. // nlr_raise needs to be implemented as a goto, so that the C compiler's flow analyser
  130. // sees that it's possible for us to jump from the dispatch loop to the exception
  131. // handler. Without this, the code may have a different stack layout in the dispatch
  132. // loop and the exception handler, leading to very obscure bugs.
  133. #define RAISE(o) do { nlr_pop(); nlr.ret_val = MP_OBJ_TO_PTR(o); goto exception_handler; } while (0)
  134. #if MICROPY_STACKLESS
  135. run_code_state: ;
  136. #endif
  137. // Pointers which are constant for particular invocation of mp_execute_bytecode()
  138. mp_obj_t * /*const*/ fastn;
  139. mp_exc_stack_t * /*const*/ exc_stack;
  140. {
  141. size_t n_state = mp_decode_uint_value(code_state->fun_bc->bytecode);
  142. fastn = &code_state->state[n_state - 1];
  143. exc_stack = (mp_exc_stack_t*)(code_state->state + n_state);
  144. }
  145. // variables that are visible to the exception handler (declared volatile)
  146. volatile bool currently_in_except_block = MP_TAGPTR_TAG0(code_state->exc_sp); // 0 or 1, to detect nested exceptions
  147. mp_exc_stack_t *volatile exc_sp = MP_TAGPTR_PTR(code_state->exc_sp); // stack grows up, exc_sp points to top of stack
  148. #if MICROPY_PY_THREAD_GIL && MICROPY_PY_THREAD_GIL_VM_DIVISOR
  149. // This needs to be volatile and outside the VM loop so it persists across handling
  150. // of any exceptions. Otherwise it's possible that the VM never gives up the GIL.
  151. volatile int gil_divisor = MICROPY_PY_THREAD_GIL_VM_DIVISOR;
  152. #endif
  153. // outer exception handling loop
  154. for (;;) {
  155. nlr_buf_t nlr;
  156. outer_dispatch_loop:
  157. if (nlr_push(&nlr) == 0) {
  158. // local variables that are not visible to the exception handler
  159. const byte *ip = code_state->ip;
  160. mp_obj_t *sp = code_state->sp;
  161. mp_obj_t obj_shared;
  162. MICROPY_VM_HOOK_INIT
  163. // If we have exception to inject, now that we finish setting up
  164. // execution context, raise it. This works as if RAISE_VARARGS
  165. // bytecode was executed.
  166. // Injecting exc into yield from generator is a special case,
  167. // handled by MP_BC_YIELD_FROM itself
  168. if (inject_exc != MP_OBJ_NULL && *ip != MP_BC_YIELD_FROM) {
  169. mp_obj_t exc = inject_exc;
  170. inject_exc = MP_OBJ_NULL;
  171. exc = mp_make_raise_obj(exc);
  172. RAISE(exc);
  173. }
  174. // loop to execute byte code
  175. for (;;) {
  176. dispatch_loop:
  177. #if MICROPY_OPT_COMPUTED_GOTO
  178. DISPATCH();
  179. #else
  180. TRACE(ip);
  181. MARK_EXC_IP_GLOBAL();
  182. switch (*ip++) {
  183. #endif
  184. ENTRY(MP_BC_LOAD_CONST_FALSE):
  185. PUSH(mp_const_false);
  186. DISPATCH();
  187. ENTRY(MP_BC_LOAD_CONST_NONE):
  188. PUSH(mp_const_none);
  189. DISPATCH();
  190. ENTRY(MP_BC_LOAD_CONST_TRUE):
  191. PUSH(mp_const_true);
  192. DISPATCH();
  193. ENTRY(MP_BC_LOAD_CONST_SMALL_INT): {
  194. mp_int_t num = 0;
  195. if ((ip[0] & 0x40) != 0) {
  196. // Number is negative
  197. num--;
  198. }
  199. do {
  200. num = (num << 7) | (*ip & 0x7f);
  201. } while ((*ip++ & 0x80) != 0);
  202. PUSH(MP_OBJ_NEW_SMALL_INT(num));
  203. DISPATCH();
  204. }
  205. ENTRY(MP_BC_LOAD_CONST_STRING): {
  206. DECODE_QSTR;
  207. PUSH(MP_OBJ_NEW_QSTR(qst));
  208. DISPATCH();
  209. }
  210. ENTRY(MP_BC_LOAD_CONST_OBJ): {
  211. DECODE_OBJ;
  212. PUSH(obj);
  213. DISPATCH();
  214. }
  215. ENTRY(MP_BC_LOAD_NULL):
  216. PUSH(MP_OBJ_NULL);
  217. DISPATCH();
  218. ENTRY(MP_BC_LOAD_FAST_N): {
  219. DECODE_UINT;
  220. obj_shared = fastn[-unum];
  221. load_check:
  222. if (obj_shared == MP_OBJ_NULL) {
  223. local_name_error: {
  224. MARK_EXC_IP_SELECTIVE();
  225. mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NameError, "local variable referenced before assignment");
  226. RAISE(obj);
  227. }
  228. }
  229. PUSH(obj_shared);
  230. DISPATCH();
  231. }
  232. ENTRY(MP_BC_LOAD_DEREF): {
  233. DECODE_UINT;
  234. obj_shared = mp_obj_cell_get(fastn[-unum]);
  235. goto load_check;
  236. }
  237. #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
  238. ENTRY(MP_BC_LOAD_NAME): {
  239. MARK_EXC_IP_SELECTIVE();
  240. DECODE_QSTR;
  241. PUSH(mp_load_name(qst));
  242. DISPATCH();
  243. }
  244. #else
  245. ENTRY(MP_BC_LOAD_NAME): {
  246. MARK_EXC_IP_SELECTIVE();
  247. DECODE_QSTR;
  248. mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
  249. mp_uint_t x = *ip;
  250. if (x < mp_locals_get()->map.alloc && mp_locals_get()->map.table[x].key == key) {
  251. PUSH(mp_locals_get()->map.table[x].value);
  252. } else {
  253. mp_map_elem_t *elem = mp_map_lookup(&mp_locals_get()->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
  254. if (elem != NULL) {
  255. *(byte*)ip = (elem - &mp_locals_get()->map.table[0]) & 0xff;
  256. PUSH(elem->value);
  257. } else {
  258. PUSH(mp_load_name(MP_OBJ_QSTR_VALUE(key)));
  259. }
  260. }
  261. ip++;
  262. DISPATCH();
  263. }
  264. #endif
  265. #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
  266. ENTRY(MP_BC_LOAD_GLOBAL): {
  267. MARK_EXC_IP_SELECTIVE();
  268. DECODE_QSTR;
  269. PUSH(mp_load_global(qst));
  270. DISPATCH();
  271. }
  272. #else
  273. ENTRY(MP_BC_LOAD_GLOBAL): {
  274. MARK_EXC_IP_SELECTIVE();
  275. DECODE_QSTR;
  276. mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
  277. mp_uint_t x = *ip;
  278. if (x < mp_globals_get()->map.alloc && mp_globals_get()->map.table[x].key == key) {
  279. PUSH(mp_globals_get()->map.table[x].value);
  280. } else {
  281. mp_map_elem_t *elem = mp_map_lookup(&mp_globals_get()->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
  282. if (elem != NULL) {
  283. *(byte*)ip = (elem - &mp_globals_get()->map.table[0]) & 0xff;
  284. PUSH(elem->value);
  285. } else {
  286. PUSH(mp_load_global(MP_OBJ_QSTR_VALUE(key)));
  287. }
  288. }
  289. ip++;
  290. DISPATCH();
  291. }
  292. #endif
  293. #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
  294. ENTRY(MP_BC_LOAD_ATTR): {
  295. MARK_EXC_IP_SELECTIVE();
  296. DECODE_QSTR;
  297. SET_TOP(mp_load_attr(TOP(), qst));
  298. DISPATCH();
  299. }
  300. #else
  301. ENTRY(MP_BC_LOAD_ATTR): {
  302. MARK_EXC_IP_SELECTIVE();
  303. DECODE_QSTR;
  304. mp_obj_t top = TOP();
  305. if (mp_obj_is_instance_type(mp_obj_get_type(top))) {
  306. mp_obj_instance_t *self = MP_OBJ_TO_PTR(top);
  307. mp_uint_t x = *ip;
  308. mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
  309. mp_map_elem_t *elem;
  310. if (x < self->members.alloc && self->members.table[x].key == key) {
  311. elem = &self->members.table[x];
  312. } else {
  313. elem = mp_map_lookup(&self->members, key, MP_MAP_LOOKUP);
  314. if (elem != NULL) {
  315. *(byte*)ip = elem - &self->members.table[0];
  316. } else {
  317. goto load_attr_cache_fail;
  318. }
  319. }
  320. SET_TOP(elem->value);
  321. ip++;
  322. DISPATCH();
  323. }
  324. load_attr_cache_fail:
  325. SET_TOP(mp_load_attr(top, qst));
  326. ip++;
  327. DISPATCH();
  328. }
  329. #endif
  330. ENTRY(MP_BC_LOAD_METHOD): {
  331. MARK_EXC_IP_SELECTIVE();
  332. DECODE_QSTR;
  333. mp_load_method(*sp, qst, sp);
  334. sp += 1;
  335. DISPATCH();
  336. }
  337. ENTRY(MP_BC_LOAD_SUPER_METHOD): {
  338. MARK_EXC_IP_SELECTIVE();
  339. DECODE_QSTR;
  340. sp -= 1;
  341. mp_load_super_method(qst, sp - 1);
  342. DISPATCH();
  343. }
  344. ENTRY(MP_BC_LOAD_BUILD_CLASS):
  345. MARK_EXC_IP_SELECTIVE();
  346. PUSH(mp_load_build_class());
  347. DISPATCH();
  348. ENTRY(MP_BC_LOAD_SUBSCR): {
  349. MARK_EXC_IP_SELECTIVE();
  350. mp_obj_t index = POP();
  351. SET_TOP(mp_obj_subscr(TOP(), index, MP_OBJ_SENTINEL));
  352. DISPATCH();
  353. }
  354. ENTRY(MP_BC_STORE_FAST_N): {
  355. DECODE_UINT;
  356. fastn[-unum] = POP();
  357. DISPATCH();
  358. }
  359. ENTRY(MP_BC_STORE_DEREF): {
  360. DECODE_UINT;
  361. mp_obj_cell_set(fastn[-unum], POP());
  362. DISPATCH();
  363. }
  364. ENTRY(MP_BC_STORE_NAME): {
  365. MARK_EXC_IP_SELECTIVE();
  366. DECODE_QSTR;
  367. mp_store_name(qst, POP());
  368. DISPATCH();
  369. }
  370. ENTRY(MP_BC_STORE_GLOBAL): {
  371. MARK_EXC_IP_SELECTIVE();
  372. DECODE_QSTR;
  373. mp_store_global(qst, POP());
  374. DISPATCH();
  375. }
  376. #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
  377. ENTRY(MP_BC_STORE_ATTR): {
  378. MARK_EXC_IP_SELECTIVE();
  379. DECODE_QSTR;
  380. mp_store_attr(sp[0], qst, sp[-1]);
  381. sp -= 2;
  382. DISPATCH();
  383. }
  384. #else
  385. // This caching code works with MICROPY_PY_BUILTINS_PROPERTY and/or
  386. // MICROPY_PY_DESCRIPTORS enabled because if the attr exists in
  387. // self->members then it can't be a property or have descriptors. A
  388. // consequence of this is that we can't use MP_MAP_LOOKUP_ADD_IF_NOT_FOUND
  389. // in the fast-path below, because that store could override a property.
  390. ENTRY(MP_BC_STORE_ATTR): {
  391. MARK_EXC_IP_SELECTIVE();
  392. DECODE_QSTR;
  393. mp_obj_t top = TOP();
  394. if (mp_obj_is_instance_type(mp_obj_get_type(top)) && sp[-1] != MP_OBJ_NULL) {
  395. mp_obj_instance_t *self = MP_OBJ_TO_PTR(top);
  396. mp_uint_t x = *ip;
  397. mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
  398. mp_map_elem_t *elem;
  399. if (x < self->members.alloc && self->members.table[x].key == key) {
  400. elem = &self->members.table[x];
  401. } else {
  402. elem = mp_map_lookup(&self->members, key, MP_MAP_LOOKUP);
  403. if (elem != NULL) {
  404. *(byte*)ip = elem - &self->members.table[0];
  405. } else {
  406. goto store_attr_cache_fail;
  407. }
  408. }
  409. elem->value = sp[-1];
  410. sp -= 2;
  411. ip++;
  412. DISPATCH();
  413. }
  414. store_attr_cache_fail:
  415. mp_store_attr(sp[0], qst, sp[-1]);
  416. sp -= 2;
  417. ip++;
  418. DISPATCH();
  419. }
  420. #endif
  421. ENTRY(MP_BC_STORE_SUBSCR):
  422. MARK_EXC_IP_SELECTIVE();
  423. mp_obj_subscr(sp[-1], sp[0], sp[-2]);
  424. sp -= 3;
  425. DISPATCH();
  426. ENTRY(MP_BC_DELETE_FAST): {
  427. MARK_EXC_IP_SELECTIVE();
  428. DECODE_UINT;
  429. if (fastn[-unum] == MP_OBJ_NULL) {
  430. goto local_name_error;
  431. }
  432. fastn[-unum] = MP_OBJ_NULL;
  433. DISPATCH();
  434. }
  435. ENTRY(MP_BC_DELETE_DEREF): {
  436. MARK_EXC_IP_SELECTIVE();
  437. DECODE_UINT;
  438. if (mp_obj_cell_get(fastn[-unum]) == MP_OBJ_NULL) {
  439. goto local_name_error;
  440. }
  441. mp_obj_cell_set(fastn[-unum], MP_OBJ_NULL);
  442. DISPATCH();
  443. }
  444. ENTRY(MP_BC_DELETE_NAME): {
  445. MARK_EXC_IP_SELECTIVE();
  446. DECODE_QSTR;
  447. mp_delete_name(qst);
  448. DISPATCH();
  449. }
  450. ENTRY(MP_BC_DELETE_GLOBAL): {
  451. MARK_EXC_IP_SELECTIVE();
  452. DECODE_QSTR;
  453. mp_delete_global(qst);
  454. DISPATCH();
  455. }
  456. ENTRY(MP_BC_DUP_TOP): {
  457. mp_obj_t top = TOP();
  458. PUSH(top);
  459. DISPATCH();
  460. }
  461. ENTRY(MP_BC_DUP_TOP_TWO):
  462. sp += 2;
  463. sp[0] = sp[-2];
  464. sp[-1] = sp[-3];
  465. DISPATCH();
  466. ENTRY(MP_BC_POP_TOP):
  467. sp -= 1;
  468. DISPATCH();
  469. ENTRY(MP_BC_ROT_TWO): {
  470. mp_obj_t top = sp[0];
  471. sp[0] = sp[-1];
  472. sp[-1] = top;
  473. DISPATCH();
  474. }
  475. ENTRY(MP_BC_ROT_THREE): {
  476. mp_obj_t top = sp[0];
  477. sp[0] = sp[-1];
  478. sp[-1] = sp[-2];
  479. sp[-2] = top;
  480. DISPATCH();
  481. }
  482. ENTRY(MP_BC_JUMP): {
  483. DECODE_SLABEL;
  484. ip += slab;
  485. DISPATCH_WITH_PEND_EXC_CHECK();
  486. }
  487. ENTRY(MP_BC_POP_JUMP_IF_TRUE): {
  488. DECODE_SLABEL;
  489. if (mp_obj_is_true(POP())) {
  490. ip += slab;
  491. }
  492. DISPATCH_WITH_PEND_EXC_CHECK();
  493. }
  494. ENTRY(MP_BC_POP_JUMP_IF_FALSE): {
  495. DECODE_SLABEL;
  496. if (!mp_obj_is_true(POP())) {
  497. ip += slab;
  498. }
  499. DISPATCH_WITH_PEND_EXC_CHECK();
  500. }
  501. ENTRY(MP_BC_JUMP_IF_TRUE_OR_POP): {
  502. DECODE_SLABEL;
  503. if (mp_obj_is_true(TOP())) {
  504. ip += slab;
  505. } else {
  506. sp--;
  507. }
  508. DISPATCH_WITH_PEND_EXC_CHECK();
  509. }
  510. ENTRY(MP_BC_JUMP_IF_FALSE_OR_POP): {
  511. DECODE_SLABEL;
  512. if (mp_obj_is_true(TOP())) {
  513. sp--;
  514. } else {
  515. ip += slab;
  516. }
  517. DISPATCH_WITH_PEND_EXC_CHECK();
  518. }
  519. ENTRY(MP_BC_SETUP_WITH): {
  520. MARK_EXC_IP_SELECTIVE();
  521. // stack: (..., ctx_mgr)
  522. mp_obj_t obj = TOP();
  523. mp_load_method(obj, MP_QSTR___exit__, sp);
  524. mp_load_method(obj, MP_QSTR___enter__, sp + 2);
  525. mp_obj_t ret = mp_call_method_n_kw(0, 0, sp + 2);
  526. sp += 1;
  527. PUSH_EXC_BLOCK(1);
  528. PUSH(ret);
  529. // stack: (..., __exit__, ctx_mgr, as_value)
  530. DISPATCH();
  531. }
  532. ENTRY(MP_BC_WITH_CLEANUP): {
  533. MARK_EXC_IP_SELECTIVE();
  534. // Arriving here, there's "exception control block" on top of stack,
  535. // and __exit__ method (with self) underneath it. Bytecode calls __exit__,
  536. // and "deletes" it off stack, shifting "exception control block"
  537. // to its place.
  538. // The bytecode emitter ensures that there is enough space on the Python
  539. // value stack to hold the __exit__ method plus an additional 4 entries.
  540. if (TOP() == mp_const_none) {
  541. // stack: (..., __exit__, ctx_mgr, None)
  542. sp[1] = mp_const_none;
  543. sp[2] = mp_const_none;
  544. sp -= 2;
  545. mp_call_method_n_kw(3, 0, sp);
  546. SET_TOP(mp_const_none);
  547. } else if (MP_OBJ_IS_SMALL_INT(TOP())) {
  548. // Getting here there are two distinct cases:
  549. // - unwind return, stack: (..., __exit__, ctx_mgr, ret_val, SMALL_INT(-1))
  550. // - unwind jump, stack: (..., __exit__, ctx_mgr, dest_ip, SMALL_INT(num_exc))
  551. // For both cases we do exactly the same thing.
  552. mp_obj_t data = sp[-1];
  553. mp_obj_t cause = sp[0];
  554. sp[-1] = mp_const_none;
  555. sp[0] = mp_const_none;
  556. sp[1] = mp_const_none;
  557. mp_call_method_n_kw(3, 0, sp - 3);
  558. sp[-3] = data;
  559. sp[-2] = cause;
  560. sp -= 2; // we removed (__exit__, ctx_mgr)
  561. } else {
  562. assert(mp_obj_is_exception_instance(TOP()));
  563. // stack: (..., __exit__, ctx_mgr, exc_instance)
  564. // Need to pass (exc_type, exc_instance, None) as arguments to __exit__.
  565. sp[1] = sp[0];
  566. sp[0] = MP_OBJ_FROM_PTR(mp_obj_get_type(sp[0]));
  567. sp[2] = mp_const_none;
  568. sp -= 2;
  569. mp_obj_t ret_value = mp_call_method_n_kw(3, 0, sp);
  570. if (mp_obj_is_true(ret_value)) {
  571. // We need to silence/swallow the exception. This is done
  572. // by popping the exception and the __exit__ handler and
  573. // replacing it with None, which signals END_FINALLY to just
  574. // execute the finally handler normally.
  575. SET_TOP(mp_const_none);
  576. assert(exc_sp >= exc_stack);
  577. POP_EXC_BLOCK();
  578. } else {
  579. // We need to re-raise the exception. We pop __exit__ handler
  580. // by copying the exception instance down to the new top-of-stack.
  581. sp[0] = sp[3];
  582. }
  583. }
  584. DISPATCH();
  585. }
  586. ENTRY(MP_BC_UNWIND_JUMP): {
  587. MARK_EXC_IP_SELECTIVE();
  588. DECODE_SLABEL;
  589. PUSH((mp_obj_t)(mp_uint_t)(uintptr_t)(ip + slab)); // push destination ip for jump
  590. PUSH((mp_obj_t)(mp_uint_t)(*ip)); // push number of exception handlers to unwind (0x80 bit set if we also need to pop stack)
  591. unwind_jump:;
  592. mp_uint_t unum = (mp_uint_t)POP(); // get number of exception handlers to unwind
  593. while ((unum & 0x7f) > 0) {
  594. unum -= 1;
  595. assert(exc_sp >= exc_stack);
  596. if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
  597. // Getting here the stack looks like:
  598. // (..., X, dest_ip)
  599. // where X is pointed to by exc_sp->val_sp and in the case
  600. // of a "with" block contains the context manager info.
  601. // We're going to run "finally" code as a coroutine
  602. // (not calling it recursively). Set up a sentinel
  603. // on the stack so it can return back to us when it is
  604. // done (when WITH_CLEANUP or END_FINALLY reached).
  605. // The sentinel is the number of exception handlers left to
  606. // unwind, which is a non-negative integer.
  607. PUSH(MP_OBJ_NEW_SMALL_INT(unum));
  608. ip = exc_sp->handler; // get exception handler byte code address
  609. exc_sp--; // pop exception handler
  610. goto dispatch_loop; // run the exception handler
  611. }
  612. POP_EXC_BLOCK();
  613. }
  614. ip = (const byte*)MP_OBJ_TO_PTR(POP()); // pop destination ip for jump
  615. if (unum != 0) {
  616. // pop the exhausted iterator
  617. sp -= MP_OBJ_ITER_BUF_NSLOTS;
  618. }
  619. DISPATCH_WITH_PEND_EXC_CHECK();
  620. }
  621. // matched against: POP_BLOCK or POP_EXCEPT (anything else?)
  622. ENTRY(MP_BC_SETUP_EXCEPT):
  623. ENTRY(MP_BC_SETUP_FINALLY): {
  624. MARK_EXC_IP_SELECTIVE();
  625. #if SELECTIVE_EXC_IP
  626. PUSH_EXC_BLOCK((code_state->ip[-1] == MP_BC_SETUP_FINALLY) ? 1 : 0);
  627. #else
  628. PUSH_EXC_BLOCK((code_state->ip[0] == MP_BC_SETUP_FINALLY) ? 1 : 0);
  629. #endif
  630. DISPATCH();
  631. }
  632. ENTRY(MP_BC_END_FINALLY):
  633. MARK_EXC_IP_SELECTIVE();
  634. // if TOS is None, just pops it and continues
  635. // if TOS is an integer, finishes coroutine and returns control to caller
  636. // if TOS is an exception, reraises the exception
  637. if (TOP() == mp_const_none) {
  638. sp--;
  639. } else if (MP_OBJ_IS_SMALL_INT(TOP())) {
  640. // We finished "finally" coroutine and now dispatch back
  641. // to our caller, based on TOS value
  642. mp_int_t cause = MP_OBJ_SMALL_INT_VALUE(POP());
  643. if (cause < 0) {
  644. // A negative cause indicates unwind return
  645. goto unwind_return;
  646. } else {
  647. // Otherwise it's an unwind jump and we must push as a raw
  648. // number the number of exception handlers to unwind
  649. PUSH((mp_obj_t)cause);
  650. goto unwind_jump;
  651. }
  652. } else {
  653. assert(mp_obj_is_exception_instance(TOP()));
  654. RAISE(TOP());
  655. }
  656. DISPATCH();
  657. ENTRY(MP_BC_GET_ITER):
  658. MARK_EXC_IP_SELECTIVE();
  659. SET_TOP(mp_getiter(TOP(), NULL));
  660. DISPATCH();
  661. // An iterator for a for-loop takes MP_OBJ_ITER_BUF_NSLOTS slots on
  662. // the Python value stack. These slots are either used to store the
  663. // iterator object itself, or the first slot is MP_OBJ_NULL and
  664. // the second slot holds a reference to the iterator object.
  665. ENTRY(MP_BC_GET_ITER_STACK): {
  666. MARK_EXC_IP_SELECTIVE();
  667. mp_obj_t obj = TOP();
  668. mp_obj_iter_buf_t *iter_buf = (mp_obj_iter_buf_t*)sp;
  669. sp += MP_OBJ_ITER_BUF_NSLOTS - 1;
  670. obj = mp_getiter(obj, iter_buf);
  671. if (obj != MP_OBJ_FROM_PTR(iter_buf)) {
  672. // Iterator didn't use the stack so indicate that with MP_OBJ_NULL.
  673. sp[-MP_OBJ_ITER_BUF_NSLOTS + 1] = MP_OBJ_NULL;
  674. sp[-MP_OBJ_ITER_BUF_NSLOTS + 2] = obj;
  675. }
  676. DISPATCH();
  677. }
  678. ENTRY(MP_BC_FOR_ITER): {
  679. MARK_EXC_IP_SELECTIVE();
  680. DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
  681. code_state->sp = sp;
  682. mp_obj_t obj;
  683. if (sp[-MP_OBJ_ITER_BUF_NSLOTS + 1] == MP_OBJ_NULL) {
  684. obj = sp[-MP_OBJ_ITER_BUF_NSLOTS + 2];
  685. } else {
  686. obj = MP_OBJ_FROM_PTR(&sp[-MP_OBJ_ITER_BUF_NSLOTS + 1]);
  687. }
  688. mp_obj_t value = mp_iternext_allow_raise(obj);
  689. if (value == MP_OBJ_STOP_ITERATION) {
  690. sp -= MP_OBJ_ITER_BUF_NSLOTS; // pop the exhausted iterator
  691. ip += ulab; // jump to after for-block
  692. } else {
  693. PUSH(value); // push the next iteration value
  694. }
  695. DISPATCH();
  696. }
  697. // matched against: SETUP_EXCEPT, SETUP_FINALLY, SETUP_WITH
  698. ENTRY(MP_BC_POP_BLOCK):
  699. // we are exiting an exception handler, so pop the last one of the exception-stack
  700. assert(exc_sp >= exc_stack);
  701. POP_EXC_BLOCK();
  702. DISPATCH();
  703. // matched against: SETUP_EXCEPT
  704. ENTRY(MP_BC_POP_EXCEPT):
  705. assert(exc_sp >= exc_stack);
  706. assert(currently_in_except_block);
  707. POP_EXC_BLOCK();
  708. DISPATCH();
  709. ENTRY(MP_BC_BUILD_TUPLE): {
  710. MARK_EXC_IP_SELECTIVE();
  711. DECODE_UINT;
  712. sp -= unum - 1;
  713. SET_TOP(mp_obj_new_tuple(unum, sp));
  714. DISPATCH();
  715. }
  716. ENTRY(MP_BC_BUILD_LIST): {
  717. MARK_EXC_IP_SELECTIVE();
  718. DECODE_UINT;
  719. sp -= unum - 1;
  720. SET_TOP(mp_obj_new_list(unum, sp));
  721. DISPATCH();
  722. }
  723. ENTRY(MP_BC_BUILD_MAP): {
  724. MARK_EXC_IP_SELECTIVE();
  725. DECODE_UINT;
  726. PUSH(mp_obj_new_dict(unum));
  727. DISPATCH();
  728. }
  729. ENTRY(MP_BC_STORE_MAP):
  730. MARK_EXC_IP_SELECTIVE();
  731. sp -= 2;
  732. mp_obj_dict_store(sp[0], sp[2], sp[1]);
  733. DISPATCH();
  734. #if MICROPY_PY_BUILTINS_SET
  735. ENTRY(MP_BC_BUILD_SET): {
  736. MARK_EXC_IP_SELECTIVE();
  737. DECODE_UINT;
  738. sp -= unum - 1;
  739. SET_TOP(mp_obj_new_set(unum, sp));
  740. DISPATCH();
  741. }
  742. #endif
  743. #if MICROPY_PY_BUILTINS_SLICE
  744. ENTRY(MP_BC_BUILD_SLICE): {
  745. MARK_EXC_IP_SELECTIVE();
  746. DECODE_UINT;
  747. if (unum == 2) {
  748. mp_obj_t stop = POP();
  749. mp_obj_t start = TOP();
  750. SET_TOP(mp_obj_new_slice(start, stop, mp_const_none));
  751. } else {
  752. mp_obj_t step = POP();
  753. mp_obj_t stop = POP();
  754. mp_obj_t start = TOP();
  755. SET_TOP(mp_obj_new_slice(start, stop, step));
  756. }
  757. DISPATCH();
  758. }
  759. #endif
  760. ENTRY(MP_BC_STORE_COMP): {
  761. MARK_EXC_IP_SELECTIVE();
  762. DECODE_UINT;
  763. mp_obj_t obj = sp[-(unum >> 2)];
  764. if ((unum & 3) == 0) {
  765. mp_obj_list_append(obj, sp[0]);
  766. sp--;
  767. } else if (!MICROPY_PY_BUILTINS_SET || (unum & 3) == 1) {
  768. mp_obj_dict_store(obj, sp[0], sp[-1]);
  769. sp -= 2;
  770. #if MICROPY_PY_BUILTINS_SET
  771. } else {
  772. mp_obj_set_store(obj, sp[0]);
  773. sp--;
  774. #endif
  775. }
  776. DISPATCH();
  777. }
  778. ENTRY(MP_BC_UNPACK_SEQUENCE): {
  779. MARK_EXC_IP_SELECTIVE();
  780. DECODE_UINT;
  781. mp_unpack_sequence(sp[0], unum, sp);
  782. sp += unum - 1;
  783. DISPATCH();
  784. }
  785. ENTRY(MP_BC_UNPACK_EX): {
  786. MARK_EXC_IP_SELECTIVE();
  787. DECODE_UINT;
  788. mp_unpack_ex(sp[0], unum, sp);
  789. sp += (unum & 0xff) + ((unum >> 8) & 0xff);
  790. DISPATCH();
  791. }
  792. ENTRY(MP_BC_MAKE_FUNCTION): {
  793. DECODE_PTR;
  794. PUSH(mp_make_function_from_raw_code(ptr, MP_OBJ_NULL, MP_OBJ_NULL));
  795. DISPATCH();
  796. }
  797. ENTRY(MP_BC_MAKE_FUNCTION_DEFARGS): {
  798. DECODE_PTR;
  799. // Stack layout: def_tuple def_dict <- TOS
  800. mp_obj_t def_dict = POP();
  801. SET_TOP(mp_make_function_from_raw_code(ptr, TOP(), def_dict));
  802. DISPATCH();
  803. }
  804. ENTRY(MP_BC_MAKE_CLOSURE): {
  805. DECODE_PTR;
  806. size_t n_closed_over = *ip++;
  807. // Stack layout: closed_overs <- TOS
  808. sp -= n_closed_over - 1;
  809. SET_TOP(mp_make_closure_from_raw_code(ptr, n_closed_over, sp));
  810. DISPATCH();
  811. }
  812. ENTRY(MP_BC_MAKE_CLOSURE_DEFARGS): {
  813. DECODE_PTR;
  814. size_t n_closed_over = *ip++;
  815. // Stack layout: def_tuple def_dict closed_overs <- TOS
  816. sp -= 2 + n_closed_over - 1;
  817. SET_TOP(mp_make_closure_from_raw_code(ptr, 0x100 | n_closed_over, sp));
  818. DISPATCH();
  819. }
  820. ENTRY(MP_BC_CALL_FUNCTION): {
  821. MARK_EXC_IP_SELECTIVE();
  822. DECODE_UINT;
  823. // unum & 0xff == n_positional
  824. // (unum >> 8) & 0xff == n_keyword
  825. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe);
  826. #if MICROPY_STACKLESS
  827. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  828. code_state->ip = ip;
  829. code_state->sp = sp;
  830. code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
  831. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1);
  832. #if !MICROPY_ENABLE_PYSTACK
  833. if (new_state == NULL) {
  834. // Couldn't allocate codestate on heap: in the strict case raise
  835. // an exception, otherwise just fall through to stack allocation.
  836. #if MICROPY_STACKLESS_STRICT
  837. deep_recursion_error:
  838. mp_raise_recursion_depth();
  839. #endif
  840. } else
  841. #endif
  842. {
  843. new_state->prev = code_state;
  844. code_state = new_state;
  845. nlr_pop();
  846. goto run_code_state;
  847. }
  848. }
  849. #endif
  850. SET_TOP(mp_call_function_n_kw(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1));
  851. DISPATCH();
  852. }
  853. ENTRY(MP_BC_CALL_FUNCTION_VAR_KW): {
  854. MARK_EXC_IP_SELECTIVE();
  855. DECODE_UINT;
  856. // unum & 0xff == n_positional
  857. // (unum >> 8) & 0xff == n_keyword
  858. // We have following stack layout here:
  859. // fun arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
  860. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 2;
  861. #if MICROPY_STACKLESS
  862. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  863. code_state->ip = ip;
  864. code_state->sp = sp;
  865. code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
  866. mp_call_args_t out_args;
  867. mp_call_prepare_args_n_kw_var(false, unum, sp, &out_args);
  868. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
  869. out_args.n_args, out_args.n_kw, out_args.args);
  870. #if !MICROPY_ENABLE_PYSTACK
  871. // Freeing args at this point does not follow a LIFO order so only do it if
  872. // pystack is not enabled. For pystack, they are freed when code_state is.
  873. mp_nonlocal_free(out_args.args, out_args.n_alloc * sizeof(mp_obj_t));
  874. #endif
  875. #if !MICROPY_ENABLE_PYSTACK
  876. if (new_state == NULL) {
  877. // Couldn't allocate codestate on heap: in the strict case raise
  878. // an exception, otherwise just fall through to stack allocation.
  879. #if MICROPY_STACKLESS_STRICT
  880. goto deep_recursion_error;
  881. #endif
  882. } else
  883. #endif
  884. {
  885. new_state->prev = code_state;
  886. code_state = new_state;
  887. nlr_pop();
  888. goto run_code_state;
  889. }
  890. }
  891. #endif
  892. SET_TOP(mp_call_method_n_kw_var(false, unum, sp));
  893. DISPATCH();
  894. }
  895. ENTRY(MP_BC_CALL_METHOD): {
  896. MARK_EXC_IP_SELECTIVE();
  897. DECODE_UINT;
  898. // unum & 0xff == n_positional
  899. // (unum >> 8) & 0xff == n_keyword
  900. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 1;
  901. #if MICROPY_STACKLESS
  902. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  903. code_state->ip = ip;
  904. code_state->sp = sp;
  905. code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
  906. size_t n_args = unum & 0xff;
  907. size_t n_kw = (unum >> 8) & 0xff;
  908. int adjust = (sp[1] == MP_OBJ_NULL) ? 0 : 1;
  909. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(*sp, n_args + adjust, n_kw, sp + 2 - adjust);
  910. #if !MICROPY_ENABLE_PYSTACK
  911. if (new_state == NULL) {
  912. // Couldn't allocate codestate on heap: in the strict case raise
  913. // an exception, otherwise just fall through to stack allocation.
  914. #if MICROPY_STACKLESS_STRICT
  915. goto deep_recursion_error;
  916. #endif
  917. } else
  918. #endif
  919. {
  920. new_state->prev = code_state;
  921. code_state = new_state;
  922. nlr_pop();
  923. goto run_code_state;
  924. }
  925. }
  926. #endif
  927. SET_TOP(mp_call_method_n_kw(unum & 0xff, (unum >> 8) & 0xff, sp));
  928. DISPATCH();
  929. }
  930. ENTRY(MP_BC_CALL_METHOD_VAR_KW): {
  931. MARK_EXC_IP_SELECTIVE();
  932. DECODE_UINT;
  933. // unum & 0xff == n_positional
  934. // (unum >> 8) & 0xff == n_keyword
  935. // We have following stack layout here:
  936. // fun self arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
  937. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 3;
  938. #if MICROPY_STACKLESS
  939. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  940. code_state->ip = ip;
  941. code_state->sp = sp;
  942. code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
  943. mp_call_args_t out_args;
  944. mp_call_prepare_args_n_kw_var(true, unum, sp, &out_args);
  945. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
  946. out_args.n_args, out_args.n_kw, out_args.args);
  947. #if !MICROPY_ENABLE_PYSTACK
  948. // Freeing args at this point does not follow a LIFO order so only do it if
  949. // pystack is not enabled. For pystack, they are freed when code_state is.
  950. mp_nonlocal_free(out_args.args, out_args.n_alloc * sizeof(mp_obj_t));
  951. #endif
  952. #if !MICROPY_ENABLE_PYSTACK
  953. if (new_state == NULL) {
  954. // Couldn't allocate codestate on heap: in the strict case raise
  955. // an exception, otherwise just fall through to stack allocation.
  956. #if MICROPY_STACKLESS_STRICT
  957. goto deep_recursion_error;
  958. #endif
  959. } else
  960. #endif
  961. {
  962. new_state->prev = code_state;
  963. code_state = new_state;
  964. nlr_pop();
  965. goto run_code_state;
  966. }
  967. }
  968. #endif
  969. SET_TOP(mp_call_method_n_kw_var(true, unum, sp));
  970. DISPATCH();
  971. }
  972. ENTRY(MP_BC_RETURN_VALUE):
  973. MARK_EXC_IP_SELECTIVE();
  974. // These next 3 lines pop a try-finally exception handler, if one
  975. // is there on the exception stack. Without this the finally block
  976. // is executed a second time when the return is executed, because
  977. // the try-finally exception handler is still on the stack.
  978. // TODO Possibly find a better way to handle this case.
  979. if (currently_in_except_block) {
  980. POP_EXC_BLOCK();
  981. }
  982. unwind_return:
  983. while (exc_sp >= exc_stack) {
  984. if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
  985. // Getting here the stack looks like:
  986. // (..., X, [iter0, iter1, ...,] ret_val)
  987. // where X is pointed to by exc_sp->val_sp and in the case
  988. // of a "with" block contains the context manager info.
  989. // There may be 0 or more for-iterators between X and the
  990. // return value, and these must be removed before control can
  991. // pass to the finally code. We simply copy the ret_value down
  992. // over these iterators, if they exist. If they don't then the
  993. // following is a null operation.
  994. mp_obj_t *finally_sp = MP_TAGPTR_PTR(exc_sp->val_sp);
  995. finally_sp[1] = sp[0];
  996. sp = &finally_sp[1];
  997. // We're going to run "finally" code as a coroutine
  998. // (not calling it recursively). Set up a sentinel
  999. // on a stack so it can return back to us when it is
  1000. // done (when WITH_CLEANUP or END_FINALLY reached).
  1001. PUSH(MP_OBJ_NEW_SMALL_INT(-1));
  1002. ip = exc_sp->handler;
  1003. exc_sp--;
  1004. goto dispatch_loop;
  1005. }
  1006. exc_sp--;
  1007. }
  1008. nlr_pop();
  1009. code_state->sp = sp;
  1010. assert(exc_sp == exc_stack - 1);
  1011. MICROPY_VM_HOOK_RETURN
  1012. #if MICROPY_STACKLESS
  1013. if (code_state->prev != NULL) {
  1014. mp_obj_t res = *sp;
  1015. mp_globals_set(code_state->old_globals);
  1016. mp_code_state_t *new_code_state = code_state->prev;
  1017. #if MICROPY_ENABLE_PYSTACK
  1018. // Free code_state, and args allocated by mp_call_prepare_args_n_kw_var
  1019. // (The latter is implicitly freed when using pystack due to its LIFO nature.)
  1020. // The sizeof in the following statement does not include the size of the variable
  1021. // part of the struct. This arg is anyway not used if pystack is enabled.
  1022. mp_nonlocal_free(code_state, sizeof(mp_code_state_t));
  1023. #endif
  1024. code_state = new_code_state;
  1025. *code_state->sp = res;
  1026. goto run_code_state;
  1027. }
  1028. #endif
  1029. return MP_VM_RETURN_NORMAL;
  1030. ENTRY(MP_BC_RAISE_VARARGS): {
  1031. MARK_EXC_IP_SELECTIVE();
  1032. mp_uint_t unum = *ip;
  1033. mp_obj_t obj;
  1034. if (unum == 2) {
  1035. mp_warning("exception chaining not supported");
  1036. // ignore (pop) "from" argument
  1037. sp--;
  1038. }
  1039. if (unum == 0) {
  1040. // search for the inner-most previous exception, to reraise it
  1041. obj = MP_OBJ_NULL;
  1042. for (mp_exc_stack_t *e = exc_sp; e >= exc_stack; e--) {
  1043. if (e->prev_exc != NULL) {
  1044. obj = MP_OBJ_FROM_PTR(e->prev_exc);
  1045. break;
  1046. }
  1047. }
  1048. if (obj == MP_OBJ_NULL) {
  1049. obj = mp_obj_new_exception_msg(&mp_type_RuntimeError, "no active exception to reraise");
  1050. RAISE(obj);
  1051. }
  1052. } else {
  1053. obj = TOP();
  1054. }
  1055. obj = mp_make_raise_obj(obj);
  1056. RAISE(obj);
  1057. }
  1058. ENTRY(MP_BC_YIELD_VALUE):
  1059. yield:
  1060. nlr_pop();
  1061. code_state->ip = ip;
  1062. code_state->sp = sp;
  1063. code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
  1064. return MP_VM_RETURN_YIELD;
  1065. ENTRY(MP_BC_YIELD_FROM): {
  1066. MARK_EXC_IP_SELECTIVE();
  1067. //#define EXC_MATCH(exc, type) MP_OBJ_IS_TYPE(exc, type)
  1068. #define EXC_MATCH(exc, type) mp_obj_exception_match(exc, type)
  1069. #define GENERATOR_EXIT_IF_NEEDED(t) if (t != MP_OBJ_NULL && EXC_MATCH(t, MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) { RAISE(t); }
  1070. mp_vm_return_kind_t ret_kind;
  1071. mp_obj_t send_value = POP();
  1072. mp_obj_t t_exc = MP_OBJ_NULL;
  1073. mp_obj_t ret_value;
  1074. code_state->sp = sp; // Save sp because it's needed if mp_resume raises StopIteration
  1075. if (inject_exc != MP_OBJ_NULL) {
  1076. t_exc = inject_exc;
  1077. inject_exc = MP_OBJ_NULL;
  1078. ret_kind = mp_resume(TOP(), MP_OBJ_NULL, t_exc, &ret_value);
  1079. } else {
  1080. ret_kind = mp_resume(TOP(), send_value, MP_OBJ_NULL, &ret_value);
  1081. }
  1082. if (ret_kind == MP_VM_RETURN_YIELD) {
  1083. ip--;
  1084. PUSH(ret_value);
  1085. goto yield;
  1086. } else if (ret_kind == MP_VM_RETURN_NORMAL) {
  1087. // Pop exhausted gen
  1088. sp--;
  1089. if (ret_value == MP_OBJ_STOP_ITERATION) {
  1090. // Optimize StopIteration
  1091. // TODO: get StopIteration's value
  1092. PUSH(mp_const_none);
  1093. } else {
  1094. PUSH(ret_value);
  1095. }
  1096. // If we injected GeneratorExit downstream, then even
  1097. // if it was swallowed, we re-raise GeneratorExit
  1098. GENERATOR_EXIT_IF_NEEDED(t_exc);
  1099. DISPATCH();
  1100. } else {
  1101. assert(ret_kind == MP_VM_RETURN_EXCEPTION);
  1102. // Pop exhausted gen
  1103. sp--;
  1104. if (EXC_MATCH(ret_value, MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
  1105. PUSH(mp_obj_exception_get_value(ret_value));
  1106. // If we injected GeneratorExit downstream, then even
  1107. // if it was swallowed, we re-raise GeneratorExit
  1108. GENERATOR_EXIT_IF_NEEDED(t_exc);
  1109. DISPATCH();
  1110. } else {
  1111. RAISE(ret_value);
  1112. }
  1113. }
  1114. }
  1115. ENTRY(MP_BC_IMPORT_NAME): {
  1116. MARK_EXC_IP_SELECTIVE();
  1117. DECODE_QSTR;
  1118. mp_obj_t obj = POP();
  1119. SET_TOP(mp_import_name(qst, obj, TOP()));
  1120. DISPATCH();
  1121. }
  1122. ENTRY(MP_BC_IMPORT_FROM): {
  1123. MARK_EXC_IP_SELECTIVE();
  1124. DECODE_QSTR;
  1125. mp_obj_t obj = mp_import_from(TOP(), qst);
  1126. PUSH(obj);
  1127. DISPATCH();
  1128. }
  1129. ENTRY(MP_BC_IMPORT_STAR):
  1130. MARK_EXC_IP_SELECTIVE();
  1131. mp_import_all(POP());
  1132. DISPATCH();
  1133. #if MICROPY_OPT_COMPUTED_GOTO
  1134. ENTRY(MP_BC_LOAD_CONST_SMALL_INT_MULTI):
  1135. PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - 16));
  1136. DISPATCH();
  1137. ENTRY(MP_BC_LOAD_FAST_MULTI):
  1138. obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
  1139. goto load_check;
  1140. ENTRY(MP_BC_STORE_FAST_MULTI):
  1141. fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
  1142. DISPATCH();
  1143. ENTRY(MP_BC_UNARY_OP_MULTI):
  1144. MARK_EXC_IP_SELECTIVE();
  1145. SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
  1146. DISPATCH();
  1147. ENTRY(MP_BC_BINARY_OP_MULTI): {
  1148. MARK_EXC_IP_SELECTIVE();
  1149. mp_obj_t rhs = POP();
  1150. mp_obj_t lhs = TOP();
  1151. SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
  1152. DISPATCH();
  1153. }
  1154. ENTRY_DEFAULT:
  1155. MARK_EXC_IP_SELECTIVE();
  1156. #else
  1157. ENTRY_DEFAULT:
  1158. if (ip[-1] < MP_BC_LOAD_CONST_SMALL_INT_MULTI + 64) {
  1159. PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - 16));
  1160. DISPATCH();
  1161. } else if (ip[-1] < MP_BC_LOAD_FAST_MULTI + 16) {
  1162. obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
  1163. goto load_check;
  1164. } else if (ip[-1] < MP_BC_STORE_FAST_MULTI + 16) {
  1165. fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
  1166. DISPATCH();
  1167. } else if (ip[-1] < MP_BC_UNARY_OP_MULTI + MP_UNARY_OP_NUM_BYTECODE) {
  1168. SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
  1169. DISPATCH();
  1170. } else if (ip[-1] < MP_BC_BINARY_OP_MULTI + MP_BINARY_OP_NUM_BYTECODE) {
  1171. mp_obj_t rhs = POP();
  1172. mp_obj_t lhs = TOP();
  1173. SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
  1174. DISPATCH();
  1175. } else
  1176. #endif
  1177. {
  1178. mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NotImplementedError, "byte code not implemented");
  1179. nlr_pop();
  1180. fastn[0] = obj;
  1181. return MP_VM_RETURN_EXCEPTION;
  1182. }
  1183. #if !MICROPY_OPT_COMPUTED_GOTO
  1184. } // switch
  1185. #endif
  1186. pending_exception_check:
  1187. MICROPY_VM_HOOK_LOOP
  1188. #if MICROPY_ENABLE_SCHEDULER
  1189. // This is an inlined variant of mp_handle_pending
  1190. if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
  1191. MARK_EXC_IP_SELECTIVE();
  1192. mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
  1193. mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
  1194. if (obj != MP_OBJ_NULL) {
  1195. MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
  1196. if (!mp_sched_num_pending()) {
  1197. MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
  1198. }
  1199. MICROPY_END_ATOMIC_SECTION(atomic_state);
  1200. RAISE(obj);
  1201. }
  1202. mp_handle_pending_tail(atomic_state);
  1203. }
  1204. #else
  1205. // This is an inlined variant of mp_handle_pending
  1206. if (MP_STATE_VM(mp_pending_exception) != MP_OBJ_NULL) {
  1207. MARK_EXC_IP_SELECTIVE();
  1208. mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
  1209. MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
  1210. RAISE(obj);
  1211. }
  1212. #endif
  1213. #if MICROPY_PY_THREAD_GIL
  1214. #if MICROPY_PY_THREAD_GIL_VM_DIVISOR
  1215. if (--gil_divisor == 0)
  1216. #endif
  1217. {
  1218. #if MICROPY_PY_THREAD_GIL_VM_DIVISOR
  1219. gil_divisor = MICROPY_PY_THREAD_GIL_VM_DIVISOR;
  1220. #endif
  1221. #if MICROPY_ENABLE_SCHEDULER
  1222. // can only switch threads if the scheduler is unlocked
  1223. if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE)
  1224. #endif
  1225. {
  1226. MP_THREAD_GIL_EXIT();
  1227. MP_THREAD_GIL_ENTER();
  1228. }
  1229. }
  1230. #endif
  1231. } // for loop
  1232. } else {
  1233. exception_handler:
  1234. // exception occurred
  1235. #if MICROPY_PY_SYS_EXC_INFO
  1236. MP_STATE_VM(cur_exception) = nlr.ret_val;
  1237. #endif
  1238. #if SELECTIVE_EXC_IP
  1239. // with selective ip, we store the ip 1 byte past the opcode, so move ptr back
  1240. code_state->ip -= 1;
  1241. #endif
  1242. if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t*)nlr.ret_val)->type), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
  1243. if (code_state->ip) {
  1244. // check if it's a StopIteration within a for block
  1245. if (*code_state->ip == MP_BC_FOR_ITER) {
  1246. const byte *ip = code_state->ip + 1;
  1247. DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
  1248. code_state->ip = ip + ulab; // jump to after for-block
  1249. code_state->sp -= MP_OBJ_ITER_BUF_NSLOTS; // pop the exhausted iterator
  1250. goto outer_dispatch_loop; // continue with dispatch loop
  1251. } else if (*code_state->ip == MP_BC_YIELD_FROM) {
  1252. // StopIteration inside yield from call means return a value of
  1253. // yield from, so inject exception's value as yield from's result
  1254. // (Instead of stack pop then push we just replace exhausted gen with value)
  1255. *code_state->sp = mp_obj_exception_get_value(MP_OBJ_FROM_PTR(nlr.ret_val));
  1256. code_state->ip++; // yield from is over, move to next instruction
  1257. goto outer_dispatch_loop; // continue with dispatch loop
  1258. }
  1259. }
  1260. }
  1261. #if MICROPY_STACKLESS
  1262. unwind_loop:
  1263. #endif
  1264. // set file and line number that the exception occurred at
  1265. // TODO: don't set traceback for exceptions re-raised by END_FINALLY.
  1266. // But consider how to handle nested exceptions.
  1267. if (nlr.ret_val != &mp_const_GeneratorExit_obj) {
  1268. const byte *ip = code_state->fun_bc->bytecode;
  1269. ip = mp_decode_uint_skip(ip); // skip n_state
  1270. ip = mp_decode_uint_skip(ip); // skip n_exc_stack
  1271. ip++; // skip scope_params
  1272. ip++; // skip n_pos_args
  1273. ip++; // skip n_kwonly_args
  1274. ip++; // skip n_def_pos_args
  1275. size_t bc = code_state->ip - ip;
  1276. size_t code_info_size = mp_decode_uint_value(ip);
  1277. ip = mp_decode_uint_skip(ip); // skip code_info_size
  1278. bc -= code_info_size;
  1279. #if MICROPY_PERSISTENT_CODE
  1280. qstr block_name = ip[0] | (ip[1] << 8);
  1281. qstr source_file = ip[2] | (ip[3] << 8);
  1282. ip += 4;
  1283. #else
  1284. qstr block_name = mp_decode_uint_value(ip);
  1285. ip = mp_decode_uint_skip(ip);
  1286. qstr source_file = mp_decode_uint_value(ip);
  1287. ip = mp_decode_uint_skip(ip);
  1288. #endif
  1289. size_t source_line = 1;
  1290. size_t c;
  1291. while ((c = *ip)) {
  1292. size_t b, l;
  1293. if ((c & 0x80) == 0) {
  1294. // 0b0LLBBBBB encoding
  1295. b = c & 0x1f;
  1296. l = c >> 5;
  1297. ip += 1;
  1298. } else {
  1299. // 0b1LLLBBBB 0bLLLLLLLL encoding (l's LSB in second byte)
  1300. b = c & 0xf;
  1301. l = ((c << 4) & 0x700) | ip[1];
  1302. ip += 2;
  1303. }
  1304. if (bc >= b) {
  1305. bc -= b;
  1306. source_line += l;
  1307. } else {
  1308. // found source line corresponding to bytecode offset
  1309. break;
  1310. }
  1311. }
  1312. mp_obj_exception_add_traceback(MP_OBJ_FROM_PTR(nlr.ret_val), source_file, source_line, block_name);
  1313. }
  1314. while (currently_in_except_block) {
  1315. // nested exception
  1316. assert(exc_sp >= exc_stack);
  1317. // TODO make a proper message for nested exception
  1318. // at the moment we are just raising the very last exception (the one that caused the nested exception)
  1319. // move up to previous exception handler
  1320. POP_EXC_BLOCK();
  1321. }
  1322. if (exc_sp >= exc_stack) {
  1323. // set flag to indicate that we are now handling an exception
  1324. currently_in_except_block = 1;
  1325. // catch exception and pass to byte code
  1326. code_state->ip = exc_sp->handler;
  1327. mp_obj_t *sp = MP_TAGPTR_PTR(exc_sp->val_sp);
  1328. // save this exception in the stack so it can be used in a reraise, if needed
  1329. exc_sp->prev_exc = nlr.ret_val;
  1330. // push exception object so it can be handled by bytecode
  1331. PUSH(MP_OBJ_FROM_PTR(nlr.ret_val));
  1332. code_state->sp = sp;
  1333. #if MICROPY_STACKLESS
  1334. } else if (code_state->prev != NULL) {
  1335. mp_globals_set(code_state->old_globals);
  1336. mp_code_state_t *new_code_state = code_state->prev;
  1337. #if MICROPY_ENABLE_PYSTACK
  1338. // Free code_state, and args allocated by mp_call_prepare_args_n_kw_var
  1339. // (The latter is implicitly freed when using pystack due to its LIFO nature.)
  1340. // The sizeof in the following statement does not include the size of the variable
  1341. // part of the struct. This arg is anyway not used if pystack is enabled.
  1342. mp_nonlocal_free(code_state, sizeof(mp_code_state_t));
  1343. #endif
  1344. code_state = new_code_state;
  1345. size_t n_state = mp_decode_uint_value(code_state->fun_bc->bytecode);
  1346. fastn = &code_state->state[n_state - 1];
  1347. exc_stack = (mp_exc_stack_t*)(code_state->state + n_state);
  1348. // variables that are visible to the exception handler (declared volatile)
  1349. currently_in_except_block = MP_TAGPTR_TAG0(code_state->exc_sp); // 0 or 1, to detect nested exceptions
  1350. exc_sp = MP_TAGPTR_PTR(code_state->exc_sp); // stack grows up, exc_sp points to top of stack
  1351. goto unwind_loop;
  1352. #endif
  1353. } else {
  1354. // propagate exception to higher level
  1355. // TODO what to do about ip and sp? they don't really make sense at this point
  1356. fastn[0] = MP_OBJ_FROM_PTR(nlr.ret_val); // must put exception here because sp is invalid
  1357. return MP_VM_RETURN_EXCEPTION;
  1358. }
  1359. }
  1360. }
  1361. }