aboutsummaryrefslogtreecommitdiff
path: root/py/asmthumb.c
diff options
context:
space:
mode:
authorDamien George <damien.p.george@gmail.com>2019-02-21 15:18:33 +1100
committerDamien George <damien.p.george@gmail.com>2019-03-08 15:53:05 +1100
commit1396a026be78bc90ea18961ba6760f851b691b4c (patch)
tree4135ac73bcd2de56362d24895f8aec309666fce7 /py/asmthumb.c
parent636ed0ff8d4d3c43f7d9fb2d8852073f99c1e6cf (diff)
py: Add support to save native, viper and asm code to .mpy files.
This commit adds support for saving and loading .mpy files that contain native code (native, viper and inline-asm). A lot of the ground work was already done for this in the form of removing pointers from generated native code. The changes here are mainly to link in qstr values to the native code, and change the format of .mpy files to contain native code blocks (possibly mixed with bytecode). A top-level summary: - @micropython.native, @micropython.viper and @micropython.asm_thumb/ asm_xtensa are now allowed in .py files when compiling to .mpy, and they work transparently to the user. - Entire .py files can be compiled to native via mpy-cross -X emit=native and for the most part the generated .mpy files should work the same as their bytecode version. - The .mpy file format is changed to 1) specify in the header if the file contains native code and if so the architecture (eg x86, ARMV7M, Xtensa); 2) for each function block the kind of code is specified (bytecode, native, viper, asm). - When native code is loaded from a .mpy file the native code must be modified (in place) to link qstr values in, just like bytecode (see py/persistentcode.c:arch_link_qstr() function). In addition, this now defines a public, native ABI for dynamically loadable native code generated by other languages, like C.
Diffstat (limited to 'py/asmthumb.c')
-rw-r--r--py/asmthumb.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/py/asmthumb.c b/py/asmthumb.c
index 46102395d..e6bba7ea6 100644
--- a/py/asmthumb.c
+++ b/py/asmthumb.c
@@ -225,10 +225,12 @@ void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) {
}
// if loading lo half with movw, the i16 value will be zero extended into the r32 register!
-void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
+size_t asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
assert(reg_dest < ASM_THUMB_REG_R15);
+ size_t loc = mp_asm_base_get_code_pos(&as->base);
// mov[wt] reg_dest, #i16_src
asm_thumb_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff));
+ return loc;
}
#define OP_B_N(byte_offset) (0xe000 | (((byte_offset) >> 1) & 0x07ff))
@@ -271,12 +273,16 @@ bool asm_thumb_bl_label(asm_thumb_t *as, uint label) {
return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT23(rel);
}
-void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) {
+size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) {
// movw, movt does it in 8 bytes
// ldr [pc, #], dw does it in 6 bytes, but we might not reach to end of code for dw
+ size_t loc = mp_asm_base_get_code_pos(&as->base);
+
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVT, reg_dest, i32 >> 16);
+
+ return loc;
}
void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {