py/asmthumb: Make ARMv7-M instruction use dynamically selectable.
This commit adjusts the asm_thumb_xxx functions so they can be dynamically configured to use ARMv7-M instructions or not. This is available when MICROPY_DYNAMIC_COMPILER is enabled, and then controlled by the value of mp_dynamic_compiler.native_arch. If MICROPY_DYNAMIC_COMPILER is disabled the previous behaviour is retained: the functions emit ARMv7-M instructions only if MICROPY_EMIT_THUMB_ARMV7M is enabled. Signed-off-by: Damien George <damien@micropython.org>
This commit is contained in:
parent
7d3204783a
commit
a5324a1074
195
py/asmthumb.c
195
py/asmthumb.c
@ -34,7 +34,6 @@
|
|||||||
#if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
|
#if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
|
||||||
|
|
||||||
#include "py/mpstate.h"
|
#include "py/mpstate.h"
|
||||||
#include "py/persistentcode.h"
|
|
||||||
#include "py/asmthumb.h"
|
#include "py/asmthumb.h"
|
||||||
|
|
||||||
#define UNSIGNED_FIT5(x) ((uint32_t)(x) < 32)
|
#define UNSIGNED_FIT5(x) ((uint32_t)(x) < 32)
|
||||||
@ -46,7 +45,6 @@
|
|||||||
#define SIGNED_FIT12(x) (((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800)
|
#define SIGNED_FIT12(x) (((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800)
|
||||||
#define SIGNED_FIT23(x) (((x) & 0xffc00000) == 0) || (((x) & 0xffc00000) == 0xffc00000)
|
#define SIGNED_FIT23(x) (((x) & 0xffc00000) == 0) || (((x) & 0xffc00000) == 0xffc00000)
|
||||||
|
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
|
||||||
// Note: these actually take an imm12 but the high-bit is not encoded here
|
// Note: these actually take an imm12 but the high-bit is not encoded here
|
||||||
#define OP_ADD_W_RRI_HI(reg_src) (0xf200 | (reg_src))
|
#define OP_ADD_W_RRI_HI(reg_src) (0xf200 | (reg_src))
|
||||||
#define OP_ADD_W_RRI_LO(reg_dest, imm11) ((imm11 << 4 & 0x7000) | reg_dest << 8 | (imm11 & 0xff))
|
#define OP_ADD_W_RRI_LO(reg_dest, imm11) ((imm11 << 4 & 0x7000) | reg_dest << 8 | (imm11 & 0xff))
|
||||||
@ -58,7 +56,6 @@
|
|||||||
|
|
||||||
#define OP_LDRH_W_HI(reg_base) (0xf8b0 | (reg_base))
|
#define OP_LDRH_W_HI(reg_base) (0xf8b0 | (reg_base))
|
||||||
#define OP_LDRH_W_LO(reg_dest, imm12) ((reg_dest) << 12 | (imm12))
|
#define OP_LDRH_W_LO(reg_dest, imm12) ((reg_dest) << 12 | (imm12))
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline byte *asm_thumb_get_cur_to_write_bytes(asm_thumb_t *as, int n) {
|
static inline byte *asm_thumb_get_cur_to_write_bytes(asm_thumb_t *as, int n) {
|
||||||
return mp_asm_base_get_cur_to_write_bytes(&as->base, n);
|
return mp_asm_base_get_cur_to_write_bytes(&as->base, n);
|
||||||
@ -161,21 +158,21 @@ void asm_thumb_entry(asm_thumb_t *as, int num_locals) {
|
|||||||
}
|
}
|
||||||
asm_thumb_op16(as, OP_PUSH_RLIST_LR(reglist));
|
asm_thumb_op16(as, OP_PUSH_RLIST_LR(reglist));
|
||||||
if (stack_adjust > 0) {
|
if (stack_adjust > 0) {
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
if (asm_thumb_allow_armv7m(as)) {
|
||||||
if (UNSIGNED_FIT7(stack_adjust)) {
|
if (UNSIGNED_FIT7(stack_adjust)) {
|
||||||
asm_thumb_op16(as, OP_SUB_SP(stack_adjust));
|
asm_thumb_op16(as, OP_SUB_SP(stack_adjust));
|
||||||
|
} else {
|
||||||
|
asm_thumb_op32(as, OP_SUB_W_RRI_HI(ASM_THUMB_REG_SP), OP_SUB_W_RRI_LO(ASM_THUMB_REG_SP, stack_adjust * 4));
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
asm_thumb_op32(as, OP_SUB_W_RRI_HI(ASM_THUMB_REG_SP), OP_SUB_W_RRI_LO(ASM_THUMB_REG_SP, stack_adjust * 4));
|
int adj = stack_adjust;
|
||||||
|
// we don't expect the stack_adjust to be massive
|
||||||
|
while (!UNSIGNED_FIT7(adj)) {
|
||||||
|
asm_thumb_op16(as, OP_SUB_SP(127));
|
||||||
|
adj -= 127;
|
||||||
|
}
|
||||||
|
asm_thumb_op16(as, OP_SUB_SP(adj));
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
int adj = stack_adjust;
|
|
||||||
// we don't expect the stack_adjust to be massive
|
|
||||||
while (!UNSIGNED_FIT7(adj)) {
|
|
||||||
asm_thumb_op16(as, OP_SUB_SP(127));
|
|
||||||
adj -= 127;
|
|
||||||
}
|
|
||||||
asm_thumb_op16(as, OP_SUB_SP(adj));
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
as->push_reglist = reglist;
|
as->push_reglist = reglist;
|
||||||
as->stack_adjust = stack_adjust;
|
as->stack_adjust = stack_adjust;
|
||||||
@ -183,21 +180,21 @@ void asm_thumb_entry(asm_thumb_t *as, int num_locals) {
|
|||||||
|
|
||||||
void asm_thumb_exit(asm_thumb_t *as) {
|
void asm_thumb_exit(asm_thumb_t *as) {
|
||||||
if (as->stack_adjust > 0) {
|
if (as->stack_adjust > 0) {
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
if (asm_thumb_allow_armv7m(as)) {
|
||||||
if (UNSIGNED_FIT7(as->stack_adjust)) {
|
if (UNSIGNED_FIT7(as->stack_adjust)) {
|
||||||
asm_thumb_op16(as, OP_ADD_SP(as->stack_adjust));
|
asm_thumb_op16(as, OP_ADD_SP(as->stack_adjust));
|
||||||
|
} else {
|
||||||
|
asm_thumb_op32(as, OP_ADD_W_RRI_HI(ASM_THUMB_REG_SP), OP_ADD_W_RRI_LO(ASM_THUMB_REG_SP, as->stack_adjust * 4));
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
asm_thumb_op32(as, OP_ADD_W_RRI_HI(ASM_THUMB_REG_SP), OP_ADD_W_RRI_LO(ASM_THUMB_REG_SP, as->stack_adjust * 4));
|
int adj = as->stack_adjust;
|
||||||
|
// we don't expect the stack_adjust to be massive
|
||||||
|
while (!UNSIGNED_FIT7(adj)) {
|
||||||
|
asm_thumb_op16(as, OP_ADD_SP(127));
|
||||||
|
adj -= 127;
|
||||||
|
}
|
||||||
|
asm_thumb_op16(as, OP_ADD_SP(adj));
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
int adj = as->stack_adjust;
|
|
||||||
// we don't expect the stack_adjust to be massive
|
|
||||||
while (!UNSIGNED_FIT7(adj)) {
|
|
||||||
asm_thumb_op16(as, OP_ADD_SP(127));
|
|
||||||
adj -= 127;
|
|
||||||
}
|
|
||||||
asm_thumb_op16(as, OP_ADD_SP(adj));
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
asm_thumb_op16(as, OP_POP_RLIST_PC(as->push_reglist));
|
asm_thumb_op16(as, OP_POP_RLIST_PC(as->push_reglist));
|
||||||
}
|
}
|
||||||
@ -251,27 +248,19 @@ void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) {
|
|||||||
asm_thumb_op16(as, 0x4600 | op_lo);
|
asm_thumb_op16(as, 0x4600 | op_lo);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
|
||||||
|
|
||||||
// if loading lo half with movw, the i16 value will be zero extended into the r32 register!
|
// if loading lo half with movw, the i16 value will be zero extended into the r32 register!
|
||||||
size_t asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
|
void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
|
||||||
assert(reg_dest < ASM_THUMB_REG_R15);
|
assert(reg_dest < ASM_THUMB_REG_R15);
|
||||||
size_t loc = mp_asm_base_get_code_pos(&as->base);
|
|
||||||
// mov[wt] reg_dest, #i16_src
|
// mov[wt] reg_dest, #i16_src
|
||||||
asm_thumb_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff));
|
asm_thumb_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff));
|
||||||
return loc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
static void asm_thumb_mov_rlo_i16(asm_thumb_t *as, uint rlo_dest, int i16_src) {
|
||||||
|
|
||||||
void asm_thumb_mov_rlo_i16(asm_thumb_t *as, uint rlo_dest, int i16_src) {
|
|
||||||
asm_thumb_mov_rlo_i8(as, rlo_dest, (i16_src >> 8) & 0xff);
|
asm_thumb_mov_rlo_i8(as, rlo_dest, (i16_src >> 8) & 0xff);
|
||||||
asm_thumb_lsl_rlo_rlo_i5(as, rlo_dest, rlo_dest, 8);
|
asm_thumb_lsl_rlo_rlo_i5(as, rlo_dest, rlo_dest, 8);
|
||||||
asm_thumb_add_rlo_i8(as, rlo_dest, i16_src & 0xff);
|
asm_thumb_add_rlo_i8(as, rlo_dest, i16_src & 0xff);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define OP_B_N(byte_offset) (0xe000 | (((byte_offset) >> 1) & 0x07ff))
|
#define OP_B_N(byte_offset) (0xe000 | (((byte_offset) >> 1) & 0x07ff))
|
||||||
|
|
||||||
bool asm_thumb_b_n_label(asm_thumb_t *as, uint label) {
|
bool asm_thumb_b_n_label(asm_thumb_t *as, uint label) {
|
||||||
@ -295,14 +284,12 @@ bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide) {
|
|||||||
if (!wide) {
|
if (!wide) {
|
||||||
asm_thumb_op16(as, OP_BCC_N(cond, rel));
|
asm_thumb_op16(as, OP_BCC_N(cond, rel));
|
||||||
return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT9(rel);
|
return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT9(rel);
|
||||||
} else {
|
} else if (asm_thumb_allow_armv7m(as)) {
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
|
||||||
asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
|
asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
|
||||||
return true;
|
return true;
|
||||||
#else
|
} else {
|
||||||
// this method should not be called for ARMV6M
|
// this method should not be called for ARMV6M
|
||||||
return false;
|
return false;
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -323,30 +310,30 @@ size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) {
|
|||||||
|
|
||||||
size_t loc = mp_asm_base_get_code_pos(&as->base);
|
size_t loc = mp_asm_base_get_code_pos(&as->base);
|
||||||
|
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
if (asm_thumb_allow_armv7m(as)) {
|
||||||
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
|
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
|
||||||
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVT, reg_dest, i32 >> 16);
|
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVT, reg_dest, i32 >> 16);
|
||||||
#else
|
} else {
|
||||||
// should only be called with lo reg for ARMV6M
|
// should only be called with lo reg for ARMV6M
|
||||||
assert(reg_dest < ASM_THUMB_REG_R8);
|
assert(reg_dest < ASM_THUMB_REG_R8);
|
||||||
|
|
||||||
// sanity check that generated code is aligned
|
// sanity check that generated code is aligned
|
||||||
assert(!as->base.code_base || !(3u & (uintptr_t)as->base.code_base));
|
assert(!as->base.code_base || !(3u & (uintptr_t)as->base.code_base));
|
||||||
|
|
||||||
// basically:
|
// basically:
|
||||||
// (nop)
|
// (nop)
|
||||||
// ldr reg_dest, _data
|
// ldr reg_dest, _data
|
||||||
// b 1f
|
// b 1f
|
||||||
// _data: .word i32
|
// _data: .word i32
|
||||||
// 1:
|
// 1:
|
||||||
if (as->base.code_offset & 2u) {
|
if (as->base.code_offset & 2u) {
|
||||||
asm_thumb_op16(as, ASM_THUMB_OP_NOP);
|
asm_thumb_op16(as, ASM_THUMB_OP_NOP);
|
||||||
|
}
|
||||||
|
asm_thumb_ldr_rlo_pcrel_i8(as, reg_dest, 0);
|
||||||
|
asm_thumb_op16(as, OP_B_N(2));
|
||||||
|
asm_thumb_op16(as, i32 & 0xffff);
|
||||||
|
asm_thumb_op16(as, i32 >> 16);
|
||||||
}
|
}
|
||||||
asm_thumb_ldr_rlo_pcrel_i8(as, reg_dest, 0);
|
|
||||||
asm_thumb_op16(as, OP_B_N(2));
|
|
||||||
asm_thumb_op16(as, i32 & 0xffff);
|
|
||||||
asm_thumb_op16(as, i32 >> 16);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return loc;
|
return loc;
|
||||||
}
|
}
|
||||||
@ -354,14 +341,13 @@ size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) {
|
|||||||
void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {
|
void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {
|
||||||
if (reg_dest < 8 && UNSIGNED_FIT8(i32)) {
|
if (reg_dest < 8 && UNSIGNED_FIT8(i32)) {
|
||||||
asm_thumb_mov_rlo_i8(as, reg_dest, i32);
|
asm_thumb_mov_rlo_i8(as, reg_dest, i32);
|
||||||
} else {
|
} else if (asm_thumb_allow_armv7m(as)) {
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
|
||||||
if (UNSIGNED_FIT16(i32)) {
|
if (UNSIGNED_FIT16(i32)) {
|
||||||
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
|
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
|
||||||
} else {
|
} else {
|
||||||
asm_thumb_mov_reg_i32(as, reg_dest, i32);
|
asm_thumb_mov_reg_i32(as, reg_dest, i32);
|
||||||
}
|
}
|
||||||
#else
|
} else {
|
||||||
uint rlo_dest = reg_dest;
|
uint rlo_dest = reg_dest;
|
||||||
assert(rlo_dest < ASM_THUMB_REG_R8); // should never be called for ARMV6M
|
assert(rlo_dest < ASM_THUMB_REG_R8); // should never be called for ARMV6M
|
||||||
|
|
||||||
@ -389,7 +375,6 @@ void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {
|
|||||||
if (negate) {
|
if (negate) {
|
||||||
asm_thumb_neg_rlo_rlo(as, rlo_dest, rlo_dest);
|
asm_thumb_neg_rlo_rlo(as, rlo_dest, rlo_dest);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -432,27 +417,25 @@ void asm_thumb_mov_reg_pcrel(asm_thumb_t *as, uint rlo_dest, uint label) {
|
|||||||
mp_uint_t dest = get_label_dest(as, label);
|
mp_uint_t dest = get_label_dest(as, label);
|
||||||
mp_int_t rel = dest - as->base.code_offset;
|
mp_int_t rel = dest - as->base.code_offset;
|
||||||
rel |= 1; // to stay in Thumb state when jumping to this address
|
rel |= 1; // to stay in Thumb state when jumping to this address
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
if (asm_thumb_allow_armv7m(as)) {
|
||||||
rel -= 6 + 4; // adjust for mov_reg_i16, sxth_rlo_rlo and then PC+4 prefetch of add_reg_reg
|
rel -= 6 + 4; // adjust for mov_reg_i16, sxth_rlo_rlo and then PC+4 prefetch of add_reg_reg
|
||||||
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, rlo_dest, rel); // 4 bytes
|
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, rlo_dest, rel); // 4 bytes
|
||||||
asm_thumb_sxth_rlo_rlo(as, rlo_dest, rlo_dest); // 2 bytes
|
asm_thumb_sxth_rlo_rlo(as, rlo_dest, rlo_dest); // 2 bytes
|
||||||
#else
|
} else {
|
||||||
rel -= 8 + 4; // adjust for four instructions and then PC+4 prefetch of add_reg_reg
|
rel -= 8 + 4; // adjust for four instructions and then PC+4 prefetch of add_reg_reg
|
||||||
// 6 bytes
|
// 6 bytes
|
||||||
asm_thumb_mov_rlo_i16(as, rlo_dest, rel);
|
asm_thumb_mov_rlo_i16(as, rlo_dest, rel);
|
||||||
// 2 bytes - not always needed, but we want to keep the size the same
|
// 2 bytes - not always needed, but we want to keep the size the same
|
||||||
asm_thumb_sxth_rlo_rlo(as, rlo_dest, rlo_dest);
|
asm_thumb_sxth_rlo_rlo(as, rlo_dest, rlo_dest);
|
||||||
#endif
|
}
|
||||||
asm_thumb_add_reg_reg(as, rlo_dest, ASM_THUMB_REG_R15); // 2 bytes
|
asm_thumb_add_reg_reg(as, rlo_dest, ASM_THUMB_REG_R15); // 2 bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
// ARMv7-M only
|
||||||
static inline void asm_thumb_ldr_reg_reg_i12(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) {
|
static inline void asm_thumb_ldr_reg_reg_i12(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) {
|
||||||
asm_thumb_op32(as, OP_LDR_W_HI(reg_base), OP_LDR_W_LO(reg_dest, word_offset * 4));
|
asm_thumb_op32(as, OP_LDR_W_HI(reg_base), OP_LDR_W_LO(reg_dest, word_offset * 4));
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#if !MICROPY_EMIT_THUMB_ARMV7M
|
|
||||||
// emits code for: reg_dest = reg_base + offset << offset_shift
|
// emits code for: reg_dest = reg_base + offset << offset_shift
|
||||||
static void asm_thumb_add_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint reg_base, uint offset, uint offset_shift) {
|
static void asm_thumb_add_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint reg_base, uint offset, uint offset_shift) {
|
||||||
if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8) {
|
if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8) {
|
||||||
@ -479,37 +462,31 @@ static void asm_thumb_add_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint re
|
|||||||
assert(0); // should never be called for ARMV6M
|
assert(0); // should never be called for ARMV6M
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
void asm_thumb_ldr_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) {
|
void asm_thumb_ldr_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) {
|
||||||
if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8 && UNSIGNED_FIT5(word_offset)) {
|
if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8 && UNSIGNED_FIT5(word_offset)) {
|
||||||
asm_thumb_ldr_rlo_rlo_i5(as, reg_dest, reg_base, word_offset);
|
asm_thumb_ldr_rlo_rlo_i5(as, reg_dest, reg_base, word_offset);
|
||||||
} else {
|
} else if (asm_thumb_allow_armv7m(as)) {
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
|
||||||
asm_thumb_ldr_reg_reg_i12(as, reg_dest, reg_base, word_offset);
|
asm_thumb_ldr_reg_reg_i12(as, reg_dest, reg_base, word_offset);
|
||||||
#else
|
} else {
|
||||||
asm_thumb_add_reg_reg_offset(as, reg_dest, reg_base, word_offset - 31, 2);
|
asm_thumb_add_reg_reg_offset(as, reg_dest, reg_base, word_offset - 31, 2);
|
||||||
asm_thumb_ldr_rlo_rlo_i5(as, reg_dest, reg_dest, 31);
|
asm_thumb_ldr_rlo_rlo_i5(as, reg_dest, reg_dest, 31);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
// ARMv7-M only
|
||||||
static inline void asm_thumb_ldrh_reg_reg_i12(asm_thumb_t *as, uint reg_dest, uint reg_base, uint uint16_offset) {
|
static inline void asm_thumb_ldrh_reg_reg_i12(asm_thumb_t *as, uint reg_dest, uint reg_base, uint uint16_offset) {
|
||||||
asm_thumb_op32(as, OP_LDRH_W_HI(reg_base), OP_LDRH_W_LO(reg_dest, uint16_offset * 2));
|
asm_thumb_op32(as, OP_LDRH_W_HI(reg_base), OP_LDRH_W_LO(reg_dest, uint16_offset * 2));
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
void asm_thumb_ldrh_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint uint16_offset) {
|
void asm_thumb_ldrh_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint uint16_offset) {
|
||||||
if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8 && UNSIGNED_FIT5(uint16_offset)) {
|
if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8 && UNSIGNED_FIT5(uint16_offset)) {
|
||||||
asm_thumb_ldrh_rlo_rlo_i5(as, reg_dest, reg_base, uint16_offset);
|
asm_thumb_ldrh_rlo_rlo_i5(as, reg_dest, reg_base, uint16_offset);
|
||||||
} else {
|
} else if (asm_thumb_allow_armv7m(as)) {
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
|
||||||
asm_thumb_ldrh_reg_reg_i12(as, reg_dest, reg_base, uint16_offset);
|
asm_thumb_ldrh_reg_reg_i12(as, reg_dest, reg_base, uint16_offset);
|
||||||
#else
|
} else {
|
||||||
asm_thumb_add_reg_reg_offset(as, reg_dest, reg_base, uint16_offset - 31, 1);
|
asm_thumb_add_reg_reg_offset(as, reg_dest, reg_base, uint16_offset - 31, 1);
|
||||||
asm_thumb_ldrh_rlo_rlo_i5(as, reg_dest, reg_dest, 31);
|
asm_thumb_ldrh_rlo_rlo_i5(as, reg_dest, reg_dest, 31);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -521,20 +498,21 @@ void asm_thumb_b_label(asm_thumb_t *as, uint label) {
|
|||||||
mp_uint_t dest = get_label_dest(as, label);
|
mp_uint_t dest = get_label_dest(as, label);
|
||||||
mp_int_t rel = dest - as->base.code_offset;
|
mp_int_t rel = dest - as->base.code_offset;
|
||||||
rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
|
rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
|
||||||
|
|
||||||
if (dest != (mp_uint_t)-1 && rel <= -4) {
|
if (dest != (mp_uint_t)-1 && rel <= -4) {
|
||||||
// is a backwards jump, so we know the size of the jump on the first pass
|
// is a backwards jump, so we know the size of the jump on the first pass
|
||||||
// calculate rel assuming 12 bit relative jump
|
// calculate rel assuming 12 bit relative jump
|
||||||
if (SIGNED_FIT12(rel)) {
|
if (SIGNED_FIT12(rel)) {
|
||||||
asm_thumb_op16(as, OP_B_N(rel));
|
asm_thumb_op16(as, OP_B_N(rel));
|
||||||
} else {
|
return;
|
||||||
goto large_jump;
|
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
// is a forwards jump, so need to assume it's large
|
|
||||||
large_jump:
|
// is a large backwards jump, or a forwards jump (that must be assumed large)
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
|
||||||
|
if (asm_thumb_allow_armv7m(as)) {
|
||||||
asm_thumb_op32(as, OP_BW_HI(rel), OP_BW_LO(rel));
|
asm_thumb_op32(as, OP_BW_HI(rel), OP_BW_LO(rel));
|
||||||
#else
|
} else {
|
||||||
if (SIGNED_FIT12(rel)) {
|
if (SIGNED_FIT12(rel)) {
|
||||||
// this code path has to be the same number of instructions irrespective of rel
|
// this code path has to be the same number of instructions irrespective of rel
|
||||||
asm_thumb_op16(as, OP_B_N(rel));
|
asm_thumb_op16(as, OP_B_N(rel));
|
||||||
@ -545,7 +523,6 @@ void asm_thumb_b_label(asm_thumb_t *as, uint label) {
|
|||||||
mp_raise_NotImplementedError(MP_ERROR_TEXT("native method too big"));
|
mp_raise_NotImplementedError(MP_ERROR_TEXT("native method too big"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -553,24 +530,24 @@ void asm_thumb_bcc_label(asm_thumb_t *as, int cond, uint label) {
|
|||||||
mp_uint_t dest = get_label_dest(as, label);
|
mp_uint_t dest = get_label_dest(as, label);
|
||||||
mp_int_t rel = dest - as->base.code_offset;
|
mp_int_t rel = dest - as->base.code_offset;
|
||||||
rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
|
rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
|
||||||
|
|
||||||
if (dest != (mp_uint_t)-1 && rel <= -4) {
|
if (dest != (mp_uint_t)-1 && rel <= -4) {
|
||||||
// is a backwards jump, so we know the size of the jump on the first pass
|
// is a backwards jump, so we know the size of the jump on the first pass
|
||||||
// calculate rel assuming 9 bit relative jump
|
// calculate rel assuming 9 bit relative jump
|
||||||
if (SIGNED_FIT9(rel)) {
|
if (SIGNED_FIT9(rel)) {
|
||||||
asm_thumb_op16(as, OP_BCC_N(cond, rel));
|
asm_thumb_op16(as, OP_BCC_N(cond, rel));
|
||||||
} else {
|
return;
|
||||||
goto large_jump;
|
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
// is a forwards jump, so need to assume it's large
|
|
||||||
large_jump:
|
// is a large backwards jump, or a forwards jump (that must be assumed large)
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
|
||||||
|
if (asm_thumb_allow_armv7m(as)) {
|
||||||
asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
|
asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
|
||||||
#else
|
} else {
|
||||||
// reverse the sense of the branch to jump over a longer branch
|
// reverse the sense of the branch to jump over a longer branch
|
||||||
asm_thumb_op16(as, OP_BCC_N(cond ^ 1, 0));
|
asm_thumb_op16(as, OP_BCC_N(cond ^ 1, 0));
|
||||||
asm_thumb_b_label(as, label);
|
asm_thumb_b_label(as, label);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include "py/misc.h"
|
#include "py/misc.h"
|
||||||
#include "py/asmbase.h"
|
#include "py/asmbase.h"
|
||||||
|
#include "py/persistentcode.h"
|
||||||
|
|
||||||
#define ASM_THUMB_REG_R0 (0)
|
#define ASM_THUMB_REG_R0 (0)
|
||||||
#define ASM_THUMB_REG_R1 (1)
|
#define ASM_THUMB_REG_R1 (1)
|
||||||
@ -70,6 +71,21 @@ typedef struct _asm_thumb_t {
|
|||||||
uint32_t stack_adjust;
|
uint32_t stack_adjust;
|
||||||
} asm_thumb_t;
|
} asm_thumb_t;
|
||||||
|
|
||||||
|
#if MICROPY_DYNAMIC_COMPILER
|
||||||
|
|
||||||
|
static inline bool asm_thumb_allow_armv7m(asm_thumb_t *as) {
|
||||||
|
return MP_NATIVE_ARCH_ARMV7M <= mp_dynamic_compiler.native_arch
|
||||||
|
&& mp_dynamic_compiler.native_arch <= MP_NATIVE_ARCH_ARMV7EMDP;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline bool asm_thumb_allow_armv7m(asm_thumb_t *as) {
|
||||||
|
return MICROPY_EMIT_THUMB_ARMV7M;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline void asm_thumb_end_pass(asm_thumb_t *as) {
|
static inline void asm_thumb_end_pass(asm_thumb_t *as) {
|
||||||
(void)as;
|
(void)as;
|
||||||
}
|
}
|
||||||
@ -308,12 +324,7 @@ static inline void asm_thumb_sxth_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint r
|
|||||||
#define ASM_THUMB_OP_MOVT (0xf2c0)
|
#define ASM_THUMB_OP_MOVT (0xf2c0)
|
||||||
|
|
||||||
void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src);
|
void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src);
|
||||||
|
void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src);
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
|
||||||
size_t asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src);
|
|
||||||
#else
|
|
||||||
void asm_thumb_mov_rlo_i16(asm_thumb_t *as, uint rlo_dest, int i16_src);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// these return true if the destination is in range, false otherwise
|
// these return true if the destination is in range, false otherwise
|
||||||
bool asm_thumb_b_n_label(asm_thumb_t *as, uint label);
|
bool asm_thumb_b_n_label(asm_thumb_t *as, uint label);
|
||||||
@ -390,11 +401,6 @@ void asm_thumb_b_rel12(asm_thumb_t *as, int rel);
|
|||||||
|
|
||||||
#define ASM_MOV_LOCAL_REG(as, local_num, reg) asm_thumb_mov_local_reg((as), (local_num), (reg))
|
#define ASM_MOV_LOCAL_REG(as, local_num, reg) asm_thumb_mov_local_reg((as), (local_num), (reg))
|
||||||
#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_thumb_mov_reg_i32_optimised((as), (reg_dest), (imm))
|
#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_thumb_mov_reg_i32_optimised((as), (reg_dest), (imm))
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
|
||||||
#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_thumb_mov_reg_i16((as), ASM_THUMB_OP_MOVW, (reg_dest), (imm))
|
|
||||||
#else
|
|
||||||
#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_thumb_mov_rlo_i16((as), (reg_dest), (imm))
|
|
||||||
#endif
|
|
||||||
#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_thumb_mov_reg_i32((as), (reg_dest), (imm))
|
#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_thumb_mov_reg_i32((as), (reg_dest), (imm))
|
||||||
#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_thumb_mov_reg_local((as), (reg_dest), (local_num))
|
#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_thumb_mov_reg_local((as), (reg_dest), (local_num))
|
||||||
#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_thumb_mov_reg_reg((as), (reg_dest), (reg_src))
|
#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_thumb_mov_reg_reg((as), (reg_dest), (reg_src))
|
||||||
|
@ -2421,48 +2421,48 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
|||||||
asm_x86_setcc_r8(emit->as, ops[op_idx], REG_RET);
|
asm_x86_setcc_r8(emit->as, ops[op_idx], REG_RET);
|
||||||
#elif N_THUMB
|
#elif N_THUMB
|
||||||
asm_thumb_cmp_rlo_rlo(emit->as, REG_ARG_2, reg_rhs);
|
asm_thumb_cmp_rlo_rlo(emit->as, REG_ARG_2, reg_rhs);
|
||||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
if (asm_thumb_allow_armv7m(emit->as)) {
|
||||||
static uint16_t ops[6 + 6] = {
|
static uint16_t ops[6 + 6] = {
|
||||||
// unsigned
|
// unsigned
|
||||||
ASM_THUMB_OP_ITE_CC,
|
ASM_THUMB_OP_ITE_CC,
|
||||||
ASM_THUMB_OP_ITE_HI,
|
ASM_THUMB_OP_ITE_HI,
|
||||||
ASM_THUMB_OP_ITE_EQ,
|
ASM_THUMB_OP_ITE_EQ,
|
||||||
ASM_THUMB_OP_ITE_LS,
|
ASM_THUMB_OP_ITE_LS,
|
||||||
ASM_THUMB_OP_ITE_CS,
|
ASM_THUMB_OP_ITE_CS,
|
||||||
ASM_THUMB_OP_ITE_NE,
|
ASM_THUMB_OP_ITE_NE,
|
||||||
// signed
|
// signed
|
||||||
ASM_THUMB_OP_ITE_LT,
|
ASM_THUMB_OP_ITE_LT,
|
||||||
ASM_THUMB_OP_ITE_GT,
|
ASM_THUMB_OP_ITE_GT,
|
||||||
ASM_THUMB_OP_ITE_EQ,
|
ASM_THUMB_OP_ITE_EQ,
|
||||||
ASM_THUMB_OP_ITE_LE,
|
ASM_THUMB_OP_ITE_LE,
|
||||||
ASM_THUMB_OP_ITE_GE,
|
ASM_THUMB_OP_ITE_GE,
|
||||||
ASM_THUMB_OP_ITE_NE,
|
ASM_THUMB_OP_ITE_NE,
|
||||||
};
|
};
|
||||||
asm_thumb_op16(emit->as, ops[op_idx]);
|
asm_thumb_op16(emit->as, ops[op_idx]);
|
||||||
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
|
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
|
||||||
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
|
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
|
||||||
#else
|
} else {
|
||||||
static uint16_t ops[6 + 6] = {
|
static uint16_t ops[6 + 6] = {
|
||||||
// unsigned
|
// unsigned
|
||||||
ASM_THUMB_CC_CC,
|
ASM_THUMB_CC_CC,
|
||||||
ASM_THUMB_CC_HI,
|
ASM_THUMB_CC_HI,
|
||||||
ASM_THUMB_CC_EQ,
|
ASM_THUMB_CC_EQ,
|
||||||
ASM_THUMB_CC_LS,
|
ASM_THUMB_CC_LS,
|
||||||
ASM_THUMB_CC_CS,
|
ASM_THUMB_CC_CS,
|
||||||
ASM_THUMB_CC_NE,
|
ASM_THUMB_CC_NE,
|
||||||
// signed
|
// signed
|
||||||
ASM_THUMB_CC_LT,
|
ASM_THUMB_CC_LT,
|
||||||
ASM_THUMB_CC_GT,
|
ASM_THUMB_CC_GT,
|
||||||
ASM_THUMB_CC_EQ,
|
ASM_THUMB_CC_EQ,
|
||||||
ASM_THUMB_CC_LE,
|
ASM_THUMB_CC_LE,
|
||||||
ASM_THUMB_CC_GE,
|
ASM_THUMB_CC_GE,
|
||||||
ASM_THUMB_CC_NE,
|
ASM_THUMB_CC_NE,
|
||||||
};
|
};
|
||||||
asm_thumb_bcc_rel9(emit->as, ops[op_idx], 6);
|
asm_thumb_bcc_rel9(emit->as, ops[op_idx], 6);
|
||||||
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
|
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
|
||||||
asm_thumb_b_rel12(emit->as, 4);
|
asm_thumb_b_rel12(emit->as, 4);
|
||||||
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
|
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
|
||||||
#endif
|
}
|
||||||
#elif N_ARM
|
#elif N_ARM
|
||||||
asm_arm_cmp_reg_reg(emit->as, REG_ARG_2, reg_rhs);
|
asm_arm_cmp_reg_reg(emit->as, REG_ARG_2, reg_rhs);
|
||||||
static uint ccs[6 + 6] = {
|
static uint ccs[6 + 6] = {
|
||||||
|
Loading…
Reference in New Issue
Block a user