asmarm.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2014 Fabian Vogt
  7. * Copyright (c) 2013, 2014 Damien P. George
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy
  10. * of this software and associated documentation files (the "Software"), to deal
  11. * in the Software without restriction, including without limitation the rights
  12. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13. * copies of the Software, and to permit persons to whom the Software is
  14. * furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in
  17. * all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  22. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25. * THE SOFTWARE.
  26. */
  27. #include <stdio.h>
  28. #include <assert.h>
  29. #include <string.h>
  30. #include "py/mpconfig.h"
  31. // wrapper around everything in this file
  32. #if MICROPY_EMIT_ARM
  33. #include "py/asmarm.h"
  34. #define SIGNED_FIT24(x) (((x) & 0xff800000) == 0) || (((x) & 0xff000000) == 0xff000000)
  35. void asm_arm_end_pass(asm_arm_t *as) {
  36. if (as->base.pass == MP_ASM_PASS_EMIT) {
  37. #ifdef __arm__
  38. // flush I- and D-cache
  39. asm volatile(
  40. "0:"
  41. "mrc p15, 0, r15, c7, c10, 3\n"
  42. "bne 0b\n"
  43. "mov r0, #0\n"
  44. "mcr p15, 0, r0, c7, c7, 0\n"
  45. : : : "r0", "cc");
  46. #endif
  47. }
  48. }
  49. // Insert word into instruction flow
  50. STATIC void emit(asm_arm_t *as, uint op) {
  51. uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4);
  52. if (c != NULL) {
  53. *(uint32_t*)c = op;
  54. }
  55. }
  56. // Insert word into instruction flow, add "ALWAYS" condition code
  57. STATIC void emit_al(asm_arm_t *as, uint op) {
  58. emit(as, op | ASM_ARM_CC_AL);
  59. }
  60. // Basic instructions without condition code
  61. STATIC uint asm_arm_op_push(uint reglist) {
  62. // stmfd sp!, {reglist}
  63. return 0x92d0000 | (reglist & 0xFFFF);
  64. }
  65. STATIC uint asm_arm_op_pop(uint reglist) {
  66. // ldmfd sp!, {reglist}
  67. return 0x8bd0000 | (reglist & 0xFFFF);
  68. }
  69. STATIC uint asm_arm_op_mov_reg(uint rd, uint rn) {
  70. // mov rd, rn
  71. return 0x1a00000 | (rd << 12) | rn;
  72. }
  73. STATIC uint asm_arm_op_mov_imm(uint rd, uint imm) {
  74. // mov rd, #imm
  75. return 0x3a00000 | (rd << 12) | imm;
  76. }
  77. STATIC uint asm_arm_op_mvn_imm(uint rd, uint imm) {
  78. // mvn rd, #imm
  79. return 0x3e00000 | (rd << 12) | imm;
  80. }
  81. STATIC uint asm_arm_op_add_imm(uint rd, uint rn, uint imm) {
  82. // add rd, rn, #imm
  83. return 0x2800000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
  84. }
  85. STATIC uint asm_arm_op_add_reg(uint rd, uint rn, uint rm) {
  86. // add rd, rn, rm
  87. return 0x0800000 | (rn << 16) | (rd << 12) | rm;
  88. }
  89. STATIC uint asm_arm_op_sub_imm(uint rd, uint rn, uint imm) {
  90. // sub rd, rn, #imm
  91. return 0x2400000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
  92. }
  93. STATIC uint asm_arm_op_sub_reg(uint rd, uint rn, uint rm) {
  94. // sub rd, rn, rm
  95. return 0x0400000 | (rn << 16) | (rd << 12) | rm;
  96. }
  97. STATIC uint asm_arm_op_mul_reg(uint rd, uint rm, uint rs) {
  98. // mul rd, rm, rs
  99. assert(rd != rm);
  100. return 0x0000090 | (rd << 16) | (rs << 8) | rm;
  101. }
  102. STATIC uint asm_arm_op_and_reg(uint rd, uint rn, uint rm) {
  103. // and rd, rn, rm
  104. return 0x0000000 | (rn << 16) | (rd << 12) | rm;
  105. }
  106. STATIC uint asm_arm_op_eor_reg(uint rd, uint rn, uint rm) {
  107. // eor rd, rn, rm
  108. return 0x0200000 | (rn << 16) | (rd << 12) | rm;
  109. }
  110. STATIC uint asm_arm_op_orr_reg(uint rd, uint rn, uint rm) {
  111. // orr rd, rn, rm
  112. return 0x1800000 | (rn << 16) | (rd << 12) | rm;
  113. }
  114. void asm_arm_bkpt(asm_arm_t *as) {
  115. // bkpt #0
  116. emit_al(as, 0x1200070);
  117. }
  118. // locals:
  119. // - stored on the stack in ascending order
  120. // - numbered 0 through num_locals-1
  121. // - SP points to first local
  122. //
  123. // | SP
  124. // v
  125. // l0 l1 l2 ... l(n-1)
  126. // ^ ^
  127. // | low address | high address in RAM
  128. void asm_arm_entry(asm_arm_t *as, int num_locals) {
  129. assert(num_locals >= 0);
  130. as->stack_adjust = 0;
  131. as->push_reglist = 1 << ASM_ARM_REG_R1
  132. | 1 << ASM_ARM_REG_R2
  133. | 1 << ASM_ARM_REG_R3
  134. | 1 << ASM_ARM_REG_R4
  135. | 1 << ASM_ARM_REG_R5
  136. | 1 << ASM_ARM_REG_R6
  137. | 1 << ASM_ARM_REG_R7
  138. | 1 << ASM_ARM_REG_R8;
  139. // Only adjust the stack if there are more locals than usable registers
  140. if (num_locals > 3) {
  141. as->stack_adjust = num_locals * 4;
  142. // Align stack to 8 bytes
  143. if (num_locals & 1) {
  144. as->stack_adjust += 4;
  145. }
  146. }
  147. emit_al(as, asm_arm_op_push(as->push_reglist | 1 << ASM_ARM_REG_LR));
  148. if (as->stack_adjust > 0) {
  149. emit_al(as, asm_arm_op_sub_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
  150. }
  151. }
  152. void asm_arm_exit(asm_arm_t *as) {
  153. if (as->stack_adjust > 0) {
  154. emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
  155. }
  156. emit_al(as, asm_arm_op_pop(as->push_reglist | (1 << ASM_ARM_REG_PC)));
  157. }
  158. void asm_arm_push(asm_arm_t *as, uint reglist) {
  159. emit_al(as, asm_arm_op_push(reglist));
  160. }
  161. void asm_arm_pop(asm_arm_t *as, uint reglist) {
  162. emit_al(as, asm_arm_op_pop(reglist));
  163. }
  164. void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src) {
  165. emit_al(as, asm_arm_op_mov_reg(reg_dest, reg_src));
  166. }
  167. void asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm) {
  168. // TODO: There are more variants of immediate values
  169. if ((imm & 0xFF) == imm) {
  170. emit_al(as, asm_arm_op_mov_imm(rd, imm));
  171. } else if (imm < 0 && imm >= -256) {
  172. // mvn is "move not", not "move negative"
  173. emit_al(as, asm_arm_op_mvn_imm(rd, ~imm));
  174. } else {
  175. //Insert immediate into code and jump over it
  176. emit_al(as, 0x59f0000 | (rd << 12)); // ldr rd, [pc]
  177. emit_al(as, 0xa000000); // b pc
  178. emit(as, imm);
  179. }
  180. }
  181. void asm_arm_mov_local_reg(asm_arm_t *as, int local_num, uint rd) {
  182. // str rd, [sp, #local_num*4]
  183. emit_al(as, 0x58d0000 | (rd << 12) | (local_num << 2));
  184. }
  185. void asm_arm_mov_reg_local(asm_arm_t *as, uint rd, int local_num) {
  186. // ldr rd, [sp, #local_num*4]
  187. emit_al(as, 0x59d0000 | (rd << 12) | (local_num << 2));
  188. }
  189. void asm_arm_cmp_reg_i8(asm_arm_t *as, uint rd, int imm) {
  190. // cmp rd, #imm
  191. emit_al(as, 0x3500000 | (rd << 16) | (imm & 0xFF));
  192. }
  193. void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  194. // cmp rd, rn
  195. emit_al(as, 0x1500000 | (rd << 16) | rn);
  196. }
  197. void asm_arm_setcc_reg(asm_arm_t *as, uint rd, uint cond) {
  198. emit(as, asm_arm_op_mov_imm(rd, 1) | cond); // movCOND rd, #1
  199. emit(as, asm_arm_op_mov_imm(rd, 0) | (cond ^ (1 << 28))); // mov!COND rd, #0
  200. }
  201. void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  202. // add rd, rn, rm
  203. emit_al(as, asm_arm_op_add_reg(rd, rn, rm));
  204. }
  205. void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  206. // sub rd, rn, rm
  207. emit_al(as, asm_arm_op_sub_reg(rd, rn, rm));
  208. }
  209. void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rs, uint rm) {
  210. // rs and rm are swapped because of restriction rd!=rm
  211. // mul rd, rm, rs
  212. emit_al(as, asm_arm_op_mul_reg(rd, rm, rs));
  213. }
  214. void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  215. // and rd, rn, rm
  216. emit_al(as, asm_arm_op_and_reg(rd, rn, rm));
  217. }
  218. void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  219. // eor rd, rn, rm
  220. emit_al(as, asm_arm_op_eor_reg(rd, rn, rm));
  221. }
  222. void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  223. // orr rd, rn, rm
  224. emit_al(as, asm_arm_op_orr_reg(rd, rn, rm));
  225. }
  226. void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) {
  227. // add rd, sp, #local_num*4
  228. emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2));
  229. }
  230. void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs) {
  231. // mov rd, rd, lsl rs
  232. emit_al(as, 0x1a00010 | (rd << 12) | (rs << 8) | rd);
  233. }
  234. void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
  235. // mov rd, rd, asr rs
  236. emit_al(as, 0x1a00050 | (rd << 12) | (rs << 8) | rd);
  237. }
  238. void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
  239. // ldr rd, [rn, #off]
  240. emit_al(as, 0x5900000 | (rn << 16) | (rd << 12) | byte_offset);
  241. }
  242. void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  243. // ldrh rd, [rn]
  244. emit_al(as, 0x1d000b0 | (rn << 16) | (rd << 12));
  245. }
  246. void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  247. // ldrb rd, [rn]
  248. emit_al(as, 0x5d00000 | (rn << 16) | (rd << 12));
  249. }
  250. void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset) {
  251. // str rd, [rm, #off]
  252. emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset);
  253. }
  254. void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm) {
  255. // strh rd, [rm]
  256. emit_al(as, 0x1c000b0 | (rm << 16) | (rd << 12));
  257. }
  258. void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm) {
  259. // strb rd, [rm]
  260. emit_al(as, 0x5c00000 | (rm << 16) | (rd << 12));
  261. }
  262. void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  263. // str rd, [rm, rn, lsl #2]
  264. emit_al(as, 0x7800100 | (rm << 16) | (rd << 12) | rn);
  265. }
  266. void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  267. // strh doesn't support scaled register index
  268. emit_al(as, 0x1a00080 | (ASM_ARM_REG_R8 << 12) | rn); // mov r8, rn, lsl #1
  269. emit_al(as, 0x18000b0 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); // strh rd, [rm, r8]
  270. }
  271. void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  272. // strb rd, [rm, rn]
  273. emit_al(as, 0x7c00000 | (rm << 16) | (rd << 12) | rn);
  274. }
  275. void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label) {
  276. assert(label < as->base.max_num_labels);
  277. mp_uint_t dest = as->base.label_offsets[label];
  278. mp_int_t rel = dest - as->base.code_offset;
  279. rel -= 8; // account for instruction prefetch, PC is 8 bytes ahead of this instruction
  280. rel >>= 2; // in ARM mode the branch target is 32-bit aligned, so the 2 LSB are omitted
  281. if (SIGNED_FIT24(rel)) {
  282. emit(as, cond | 0xa000000 | (rel & 0xffffff));
  283. } else {
  284. printf("asm_arm_bcc: branch does not fit in 24 bits\n");
  285. }
  286. }
  287. void asm_arm_b_label(asm_arm_t *as, uint label) {
  288. asm_arm_bcc_label(as, ASM_ARM_CC_AL, label);
  289. }
  290. void asm_arm_bl_ind(asm_arm_t *as, void *fun_ptr, uint fun_id, uint reg_temp) {
  291. // If the table offset fits into the ldr instruction
  292. if (fun_id < (0x1000 / 4)) {
  293. emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_LR, ASM_ARM_REG_PC)); // mov lr, pc
  294. emit_al(as, 0x597f000 | (fun_id << 2)); // ldr pc, [r7, #fun_id*4]
  295. return;
  296. }
  297. emit_al(as, 0x59f0004 | (reg_temp << 12)); // ldr rd, [pc, #4]
  298. // Set lr after fun_ptr
  299. emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_LR, ASM_ARM_REG_PC, 4)); // add lr, pc, #4
  300. emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_PC, reg_temp)); // mov pc, reg_temp
  301. emit(as, (uint) fun_ptr);
  302. }
  303. #endif // MICROPY_EMIT_ARM