asmthumb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2013, 2014 Damien P. George
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in
  16. * all copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  24. * THE SOFTWARE.
  25. */
  26. #include <stdio.h>
  27. #include <assert.h>
  28. #include <string.h>
  29. #include "py/mpconfig.h"
  30. // wrapper around everything in this file
  31. #if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
  32. #include "py/mphal.h"
  33. #include "py/asmthumb.h"
  34. #define UNSIGNED_FIT8(x) (((x) & 0xffffff00) == 0)
  35. #define UNSIGNED_FIT16(x) (((x) & 0xffff0000) == 0)
  36. #define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
  37. #define SIGNED_FIT9(x) (((x) & 0xffffff00) == 0) || (((x) & 0xffffff00) == 0xffffff00)
  38. #define SIGNED_FIT12(x) (((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800)
  39. #define SIGNED_FIT23(x) (((x) & 0xffc00000) == 0) || (((x) & 0xffc00000) == 0xffc00000)
  40. static inline byte *asm_thumb_get_cur_to_write_bytes(asm_thumb_t *as, int n) {
  41. return mp_asm_base_get_cur_to_write_bytes(&as->base, n);
  42. }
  43. void asm_thumb_end_pass(asm_thumb_t *as) {
  44. (void)as;
  45. // could check labels are resolved...
  46. #if defined(MCU_SERIES_F7)
  47. if (as->base.pass == MP_ASM_PASS_EMIT) {
  48. // flush D-cache, so the code emitted is stored in memory
  49. MP_HAL_CLEAN_DCACHE(as->base.code_base, as->base.code_size);
  50. // invalidate I-cache
  51. SCB_InvalidateICache();
  52. }
  53. #endif
  54. }
  55. /*
  56. STATIC void asm_thumb_write_byte_1(asm_thumb_t *as, byte b1) {
  57. byte *c = asm_thumb_get_cur_to_write_bytes(as, 1);
  58. c[0] = b1;
  59. }
  60. */
  61. /*
  62. #define IMM32_L0(x) ((x) & 0xff)
  63. #define IMM32_L1(x) (((x) >> 8) & 0xff)
  64. #define IMM32_L2(x) (((x) >> 16) & 0xff)
  65. #define IMM32_L3(x) (((x) >> 24) & 0xff)
  66. STATIC void asm_thumb_write_word32(asm_thumb_t *as, int w32) {
  67. byte *c = asm_thumb_get_cur_to_write_bytes(as, 4);
  68. c[0] = IMM32_L0(w32);
  69. c[1] = IMM32_L1(w32);
  70. c[2] = IMM32_L2(w32);
  71. c[3] = IMM32_L3(w32);
  72. }
  73. */
  74. // rlolist is a bit map indicating desired lo-registers
  75. #define OP_PUSH_RLIST(rlolist) (0xb400 | (rlolist))
  76. #define OP_PUSH_RLIST_LR(rlolist) (0xb400 | 0x0100 | (rlolist))
  77. #define OP_POP_RLIST(rlolist) (0xbc00 | (rlolist))
  78. #define OP_POP_RLIST_PC(rlolist) (0xbc00 | 0x0100 | (rlolist))
  79. #define OP_ADD_SP(num_words) (0xb000 | (num_words))
  80. #define OP_SUB_SP(num_words) (0xb080 | (num_words))
  81. // locals:
  82. // - stored on the stack in ascending order
  83. // - numbered 0 through num_locals-1
  84. // - SP points to first local
  85. //
  86. // | SP
  87. // v
  88. // l0 l1 l2 ... l(n-1)
  89. // ^ ^
  90. // | low address | high address in RAM
  91. void asm_thumb_entry(asm_thumb_t *as, int num_locals) {
  92. assert(num_locals >= 0);
  93. // work out what to push and how many extra spaces to reserve on stack
  94. // so that we have enough for all locals and it's aligned an 8-byte boundary
  95. // we push extra regs (r1, r2, r3) to help do the stack adjustment
  96. // we probably should just always subtract from sp, since this would be more efficient
  97. // for push rlist, lowest numbered register at the lowest address
  98. uint reglist;
  99. uint stack_adjust;
  100. // don't pop r0 because it's used for return value
  101. switch (num_locals) {
  102. case 0:
  103. reglist = 0xf2;
  104. stack_adjust = 0;
  105. break;
  106. case 1:
  107. reglist = 0xf2;
  108. stack_adjust = 0;
  109. break;
  110. case 2:
  111. reglist = 0xfe;
  112. stack_adjust = 0;
  113. break;
  114. case 3:
  115. reglist = 0xfe;
  116. stack_adjust = 0;
  117. break;
  118. default:
  119. reglist = 0xfe;
  120. stack_adjust = ((num_locals - 3) + 1) & (~1);
  121. break;
  122. }
  123. asm_thumb_op16(as, OP_PUSH_RLIST_LR(reglist));
  124. if (stack_adjust > 0) {
  125. asm_thumb_op16(as, OP_SUB_SP(stack_adjust));
  126. }
  127. as->push_reglist = reglist;
  128. as->stack_adjust = stack_adjust;
  129. }
  130. void asm_thumb_exit(asm_thumb_t *as) {
  131. if (as->stack_adjust > 0) {
  132. asm_thumb_op16(as, OP_ADD_SP(as->stack_adjust));
  133. }
  134. asm_thumb_op16(as, OP_POP_RLIST_PC(as->push_reglist));
  135. }
  136. STATIC mp_uint_t get_label_dest(asm_thumb_t *as, uint label) {
  137. assert(label < as->base.max_num_labels);
  138. return as->base.label_offsets[label];
  139. }
  140. void asm_thumb_op16(asm_thumb_t *as, uint op) {
  141. byte *c = asm_thumb_get_cur_to_write_bytes(as, 2);
  142. if (c != NULL) {
  143. // little endian
  144. c[0] = op;
  145. c[1] = op >> 8;
  146. }
  147. }
  148. void asm_thumb_op32(asm_thumb_t *as, uint op1, uint op2) {
  149. byte *c = asm_thumb_get_cur_to_write_bytes(as, 4);
  150. if (c != NULL) {
  151. // little endian, op1 then op2
  152. c[0] = op1;
  153. c[1] = op1 >> 8;
  154. c[2] = op2;
  155. c[3] = op2 >> 8;
  156. }
  157. }
  158. #define OP_FORMAT_4(op, rlo_dest, rlo_src) ((op) | ((rlo_src) << 3) | (rlo_dest))
  159. void asm_thumb_format_4(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src) {
  160. assert(rlo_dest < ASM_THUMB_REG_R8);
  161. assert(rlo_src < ASM_THUMB_REG_R8);
  162. asm_thumb_op16(as, OP_FORMAT_4(op, rlo_dest, rlo_src));
  163. }
  164. void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) {
  165. uint op_lo;
  166. if (reg_src < 8) {
  167. op_lo = reg_src << 3;
  168. } else {
  169. op_lo = 0x40 | ((reg_src - 8) << 3);
  170. }
  171. if (reg_dest < 8) {
  172. op_lo |= reg_dest;
  173. } else {
  174. op_lo |= 0x80 | (reg_dest - 8);
  175. }
  176. // mov reg_dest, reg_src
  177. asm_thumb_op16(as, 0x4600 | op_lo);
  178. }
  179. // if loading lo half with movw, the i16 value will be zero extended into the r32 register!
  180. void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
  181. assert(reg_dest < ASM_THUMB_REG_R15);
  182. // mov[wt] reg_dest, #i16_src
  183. asm_thumb_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff));
  184. }
  185. #define OP_B_N(byte_offset) (0xe000 | (((byte_offset) >> 1) & 0x07ff))
  186. bool asm_thumb_b_n_label(asm_thumb_t *as, uint label) {
  187. mp_uint_t dest = get_label_dest(as, label);
  188. mp_int_t rel = dest - as->base.code_offset;
  189. rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
  190. asm_thumb_op16(as, OP_B_N(rel));
  191. return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT12(rel);
  192. }
  193. #define OP_BCC_N(cond, byte_offset) (0xd000 | ((cond) << 8) | (((byte_offset) >> 1) & 0x00ff))
  194. // all these bit arithmetics need coverage testing!
  195. #define OP_BCC_W_HI(cond, byte_offset) (0xf000 | ((cond) << 6) | (((byte_offset) >> 10) & 0x0400) | (((byte_offset) >> 14) & 0x003f))
  196. #define OP_BCC_W_LO(byte_offset) (0x8000 | ((byte_offset) & 0x2000) | (((byte_offset) >> 1) & 0x0fff))
  197. bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide) {
  198. mp_uint_t dest = get_label_dest(as, label);
  199. mp_int_t rel = dest - as->base.code_offset;
  200. rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
  201. if (!wide) {
  202. asm_thumb_op16(as, OP_BCC_N(cond, rel));
  203. return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT9(rel);
  204. } else {
  205. asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
  206. return true;
  207. }
  208. }
  209. #define OP_BL_HI(byte_offset) (0xf000 | (((byte_offset) >> 12) & 0x07ff))
  210. #define OP_BL_LO(byte_offset) (0xf800 | (((byte_offset) >> 1) & 0x07ff))
  211. bool asm_thumb_bl_label(asm_thumb_t *as, uint label) {
  212. mp_uint_t dest = get_label_dest(as, label);
  213. mp_int_t rel = dest - as->base.code_offset;
  214. rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
  215. asm_thumb_op32(as, OP_BL_HI(rel), OP_BL_LO(rel));
  216. return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT23(rel);
  217. }
  218. void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) {
  219. // movw, movt does it in 8 bytes
  220. // ldr [pc, #], dw does it in 6 bytes, but we might not reach to end of code for dw
  221. asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
  222. asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVT, reg_dest, i32 >> 16);
  223. }
  224. void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {
  225. if (reg_dest < 8 && UNSIGNED_FIT8(i32)) {
  226. asm_thumb_mov_rlo_i8(as, reg_dest, i32);
  227. } else if (UNSIGNED_FIT16(i32)) {
  228. asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
  229. } else {
  230. asm_thumb_mov_reg_i32(as, reg_dest, i32);
  231. }
  232. }
  233. // i32 is stored as a full word in the code, and aligned to machine-word boundary
  234. // TODO this is very inefficient, improve it!
  235. void asm_thumb_mov_reg_i32_aligned(asm_thumb_t *as, uint reg_dest, int i32) {
  236. // align on machine-word + 2
  237. if ((as->base.code_offset & 3) == 0) {
  238. asm_thumb_op16(as, ASM_THUMB_OP_NOP);
  239. }
  240. // jump over the i32 value (instruction prefetch adds 2 to PC)
  241. asm_thumb_op16(as, OP_B_N(2));
  242. // store i32 on machine-word aligned boundary
  243. mp_asm_base_data(&as->base, 4, i32);
  244. // do the actual load of the i32 value
  245. asm_thumb_mov_reg_i32_optimised(as, reg_dest, i32);
  246. }
  247. #define OP_STR_TO_SP_OFFSET(rlo_dest, word_offset) (0x9000 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
  248. #define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
  249. void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num, uint rlo_src) {
  250. assert(rlo_src < ASM_THUMB_REG_R8);
  251. int word_offset = local_num;
  252. assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0);
  253. asm_thumb_op16(as, OP_STR_TO_SP_OFFSET(rlo_src, word_offset));
  254. }
  255. void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num) {
  256. assert(rlo_dest < ASM_THUMB_REG_R8);
  257. int word_offset = local_num;
  258. assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0);
  259. asm_thumb_op16(as, OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset));
  260. }
  261. #define OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset) (0xa800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
  262. void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num) {
  263. assert(rlo_dest < ASM_THUMB_REG_R8);
  264. int word_offset = local_num;
  265. assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0);
  266. asm_thumb_op16(as, OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset));
  267. }
  268. // this could be wrong, because it should have a range of +/- 16MiB...
  269. #define OP_BW_HI(byte_offset) (0xf000 | (((byte_offset) >> 12) & 0x07ff))
  270. #define OP_BW_LO(byte_offset) (0xb800 | (((byte_offset) >> 1) & 0x07ff))
  271. void asm_thumb_b_label(asm_thumb_t *as, uint label) {
  272. mp_uint_t dest = get_label_dest(as, label);
  273. mp_int_t rel = dest - as->base.code_offset;
  274. rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
  275. if (dest != (mp_uint_t)-1 && rel <= -4) {
  276. // is a backwards jump, so we know the size of the jump on the first pass
  277. // calculate rel assuming 12 bit relative jump
  278. if (SIGNED_FIT12(rel)) {
  279. asm_thumb_op16(as, OP_B_N(rel));
  280. } else {
  281. goto large_jump;
  282. }
  283. } else {
  284. // is a forwards jump, so need to assume it's large
  285. large_jump:
  286. asm_thumb_op32(as, OP_BW_HI(rel), OP_BW_LO(rel));
  287. }
  288. }
  289. void asm_thumb_bcc_label(asm_thumb_t *as, int cond, uint label) {
  290. mp_uint_t dest = get_label_dest(as, label);
  291. mp_int_t rel = dest - as->base.code_offset;
  292. rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
  293. if (dest != (mp_uint_t)-1 && rel <= -4) {
  294. // is a backwards jump, so we know the size of the jump on the first pass
  295. // calculate rel assuming 9 bit relative jump
  296. if (SIGNED_FIT9(rel)) {
  297. asm_thumb_op16(as, OP_BCC_N(cond, rel));
  298. } else {
  299. goto large_jump;
  300. }
  301. } else {
  302. // is a forwards jump, so need to assume it's large
  303. large_jump:
  304. asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
  305. }
  306. }
  307. #define OP_BLX(reg) (0x4780 | ((reg) << 3))
  308. #define OP_SVC(arg) (0xdf00 | (arg))
  309. void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp) {
  310. /* TODO make this use less bytes
  311. uint rlo_base = ASM_THUMB_REG_R3;
  312. uint rlo_dest = ASM_THUMB_REG_R7;
  313. uint word_offset = 4;
  314. asm_thumb_op16(as, 0x0000);
  315. asm_thumb_op16(as, 0x6800 | (word_offset << 6) | (rlo_base << 3) | rlo_dest); // ldr rlo_dest, [rlo_base, #offset]
  316. asm_thumb_op16(as, 0x4780 | (ASM_THUMB_REG_R9 << 3)); // blx reg
  317. */
  318. if (fun_id < 32) {
  319. // load ptr to function from table, indexed by fun_id (must be in range 0-31); 4 bytes
  320. asm_thumb_op16(as, ASM_THUMB_FORMAT_9_10_ENCODE(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, reg_temp, ASM_THUMB_REG_R7, fun_id));
  321. asm_thumb_op16(as, OP_BLX(reg_temp));
  322. } else {
  323. // load ptr to function into register using immediate; 6 bytes
  324. asm_thumb_mov_reg_i32(as, reg_temp, (mp_uint_t)fun_ptr);
  325. asm_thumb_op16(as, OP_BLX(reg_temp));
  326. }
  327. }
  328. #endif // MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB