py: Implement native load for viper.
Viper can now do: ptr8(buf)[0], which loads a byte from a buffer using machine instructions.
This commit is contained in:
parent
1ef2348df0
commit
91cfd414c0
15
py/asmarm.c
15
py/asmarm.c
@ -357,6 +357,21 @@ void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
|
||||
emit_al(as, 0x1a00050 | (rd << 12) | (rs << 8) | rd);
|
||||
}
|
||||
|
||||
void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn) {
|
||||
// ldr rd, [rn]
|
||||
emit_al(as, 0x5900000 | (rn << 16) | (rd << 12));
|
||||
}
|
||||
|
||||
void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) {
|
||||
// ldrh rd, [rn]
|
||||
emit_al(as, 0x1d000b0 | (rn << 16) | (rd << 12));
|
||||
}
|
||||
|
||||
void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn) {
|
||||
// ldrb rd, [rn]
|
||||
emit_al(as, 0x5d00000 | (rn << 16) | (rd << 12));
|
||||
}
|
||||
|
||||
void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm) {
|
||||
// str rd, [rm]
|
||||
emit_al(as, 0x5800000 | (rm << 16) | (rd << 12));
|
||||
|
@ -104,6 +104,9 @@ void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs);
|
||||
void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs);
|
||||
|
||||
// memory
|
||||
void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn);
|
||||
void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn);
|
||||
void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn);
|
||||
void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm);
|
||||
void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm);
|
||||
void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm);
|
||||
|
36
py/asmx64.c
36
py/asmx64.c
@ -51,7 +51,9 @@
|
||||
#define OPCODE_MOV_I32_TO_RM32 (0xc7)
|
||||
#define OPCODE_MOV_R8_TO_RM8 (0x88) /* /r */
|
||||
#define OPCODE_MOV_R64_TO_RM64 (0x89) /* /r */
|
||||
#define OPCODE_MOV_RM64_TO_R64 (0x8b)
|
||||
#define OPCODE_MOV_RM64_TO_R64 (0x8b) /* /r */
|
||||
#define OPCODE_MOVZX_RM8_TO_R64 (0xb6) /* 0x0f 0xb6/r */
|
||||
#define OPCODE_MOVZX_RM16_TO_R64 (0xb7) /* 0x0f 0xb7/r */
|
||||
#define OPCODE_LEA_MEM_TO_R64 (0x8d) /* /r */
|
||||
#define OPCODE_AND_R64_TO_RM64 (0x21) /* /r */
|
||||
#define OPCODE_OR_R64_TO_RM64 (0x09) /* /r */
|
||||
@ -302,7 +304,7 @@ void asm_x64_mov_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
|
||||
asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_MOV_R64_TO_RM64);
|
||||
}
|
||||
|
||||
void asm_x64_mov_r8_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
|
||||
void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
|
||||
assert(dest_r64 < 8);
|
||||
if (src_r64 < 8) {
|
||||
asm_x64_write_byte_1(as, OPCODE_MOV_R8_TO_RM8);
|
||||
@ -312,7 +314,7 @@ void asm_x64_mov_r8_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_d
|
||||
asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
|
||||
}
|
||||
|
||||
void asm_x64_mov_r16_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
|
||||
void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
|
||||
assert(dest_r64 < 8);
|
||||
if (src_r64 < 8) {
|
||||
asm_x64_write_byte_2(as, OP_SIZE_PREFIX, OPCODE_MOV_R64_TO_RM64);
|
||||
@ -322,14 +324,34 @@ void asm_x64_mov_r16_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_
|
||||
asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
|
||||
}
|
||||
|
||||
void asm_x64_mov_r64_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
|
||||
void asm_x64_mov_r64_to_mem64(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
|
||||
// use REX prefix for 64 bit operation
|
||||
assert(dest_r64 < 8);
|
||||
asm_x64_write_byte_2(as, REX_PREFIX | REX_W | (src_r64 < 8 ? 0 : REX_R), OPCODE_MOV_R64_TO_RM64);
|
||||
asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
|
||||
}
|
||||
|
||||
void asm_x64_mov_disp_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
|
||||
void asm_x64_mov_mem8_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
|
||||
assert(src_r64 < 8);
|
||||
if (dest_r64 < 8) {
|
||||
asm_x64_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM8_TO_R64);
|
||||
} else {
|
||||
asm_x64_write_byte_3(as, REX_PREFIX | REX_R, 0x0f, OPCODE_MOVZX_RM8_TO_R64);
|
||||
}
|
||||
asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
|
||||
}
|
||||
|
||||
void asm_x64_mov_mem16_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
|
||||
assert(src_r64 < 8);
|
||||
if (dest_r64 < 8) {
|
||||
asm_x64_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM16_TO_R64);
|
||||
} else {
|
||||
asm_x64_write_byte_3(as, REX_PREFIX | REX_R, 0x0f, OPCODE_MOVZX_RM16_TO_R64);
|
||||
}
|
||||
asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
|
||||
}
|
||||
|
||||
void asm_x64_mov_mem64_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
|
||||
// use REX prefix for 64 bit operation
|
||||
assert(src_r64 < 8);
|
||||
asm_x64_write_byte_2(as, REX_PREFIX | REX_W | (dest_r64 < 8 ? 0 : REX_R), OPCODE_MOV_RM64_TO_R64);
|
||||
@ -587,11 +609,11 @@ STATIC int asm_x64_local_offset_from_ebp(asm_x64_t *as, int local_num) {
|
||||
}
|
||||
|
||||
void asm_x64_mov_local_to_r64(asm_x64_t *as, int src_local_num, int dest_r64) {
|
||||
asm_x64_mov_disp_to_r64(as, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, src_local_num), dest_r64);
|
||||
asm_x64_mov_mem64_to_r64(as, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, src_local_num), dest_r64);
|
||||
}
|
||||
|
||||
void asm_x64_mov_r64_to_local(asm_x64_t *as, int src_r64, int dest_local_num) {
|
||||
asm_x64_mov_r64_to_disp(as, src_r64, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, dest_local_num));
|
||||
asm_x64_mov_r64_to_mem64(as, src_r64, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, dest_local_num));
|
||||
}
|
||||
|
||||
void asm_x64_mov_local_addr_to_r64(asm_x64_t *as, int local_num, int dest_r64) {
|
||||
|
@ -83,9 +83,12 @@ void asm_x64_mov_r64_r64(asm_x64_t* as, int dest_r64, int src_r64);
|
||||
void asm_x64_mov_i64_to_r64(asm_x64_t* as, int64_t src_i64, int dest_r64);
|
||||
void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64);
|
||||
void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64);
|
||||
void asm_x64_mov_r8_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
||||
void asm_x64_mov_r16_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
||||
void asm_x64_mov_r64_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
||||
void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
||||
void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
||||
void asm_x64_mov_r64_to_mem64(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
||||
void asm_x64_mov_mem8_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
|
||||
void asm_x64_mov_mem16_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
|
||||
void asm_x64_mov_mem64_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
|
||||
void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
|
||||
void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
|
||||
void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
|
||||
|
32
py/asmx86.c
32
py/asmx86.c
@ -50,8 +50,10 @@
|
||||
#define OPCODE_MOV_I32_TO_R32 (0xb8)
|
||||
//#define OPCODE_MOV_I32_TO_RM32 (0xc7)
|
||||
#define OPCODE_MOV_R8_TO_RM8 (0x88) /* /r */
|
||||
#define OPCODE_MOV_R32_TO_RM32 (0x89)
|
||||
#define OPCODE_MOV_RM32_TO_R32 (0x8b)
|
||||
#define OPCODE_MOV_R32_TO_RM32 (0x89) /* /r */
|
||||
#define OPCODE_MOV_RM32_TO_R32 (0x8b) /* /r */
|
||||
#define OPCODE_MOVZX_RM8_TO_R32 (0xb6) /* 0x0f 0xb6/r */
|
||||
#define OPCODE_MOVZX_RM16_TO_R32 (0xb7) /* 0x0f 0xb7/r */
|
||||
#define OPCODE_LEA_MEM_TO_R32 (0x8d) /* /r */
|
||||
#define OPCODE_AND_R32_TO_RM32 (0x21) /* /r */
|
||||
#define OPCODE_OR_R32_TO_RM32 (0x09) /* /r */
|
||||
@ -244,22 +246,32 @@ void asm_x86_mov_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
|
||||
asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_MOV_R32_TO_RM32);
|
||||
}
|
||||
|
||||
void asm_x86_mov_r8_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
|
||||
void asm_x86_mov_r8_to_mem8(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
|
||||
asm_x86_write_byte_1(as, OPCODE_MOV_R8_TO_RM8);
|
||||
asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
|
||||
}
|
||||
|
||||
void asm_x86_mov_r16_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
|
||||
void asm_x86_mov_r16_to_mem16(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
|
||||
asm_x86_write_byte_2(as, OP_SIZE_PREFIX, OPCODE_MOV_R32_TO_RM32);
|
||||
asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
|
||||
}
|
||||
|
||||
void asm_x86_mov_r32_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
|
||||
void asm_x86_mov_r32_to_mem32(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
|
||||
asm_x86_write_byte_1(as, OPCODE_MOV_R32_TO_RM32);
|
||||
asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
|
||||
}
|
||||
|
||||
STATIC void asm_x86_mov_disp_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
|
||||
void asm_x86_mov_mem8_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
|
||||
asm_x86_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM8_TO_R32);
|
||||
asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
|
||||
}
|
||||
|
||||
void asm_x86_mov_mem16_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
|
||||
asm_x86_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM16_TO_R32);
|
||||
asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
|
||||
}
|
||||
|
||||
void asm_x86_mov_mem32_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
|
||||
asm_x86_write_byte_1(as, OPCODE_MOV_RM32_TO_R32);
|
||||
asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
|
||||
}
|
||||
@ -474,12 +486,12 @@ void asm_x86_push_arg(asm_x86_t *as, int src_arg_num) {
|
||||
#endif
|
||||
|
||||
void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32) {
|
||||
asm_x86_mov_disp_to_r32(as, ASM_X86_REG_EBP, 2 * WORD_SIZE + src_arg_num * WORD_SIZE, dest_r32);
|
||||
asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_EBP, 2 * WORD_SIZE + src_arg_num * WORD_SIZE, dest_r32);
|
||||
}
|
||||
|
||||
#if 0
|
||||
void asm_x86_mov_r32_to_arg(asm_x86_t *as, int src_r32, int dest_arg_num) {
|
||||
asm_x86_mov_r32_to_disp(as, src_r32, ASM_X86_REG_EBP, 2 * WORD_SIZE + dest_arg_num * WORD_SIZE);
|
||||
asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_EBP, 2 * WORD_SIZE + dest_arg_num * WORD_SIZE);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -499,11 +511,11 @@ STATIC int asm_x86_local_offset_from_ebp(asm_x86_t *as, int local_num) {
|
||||
}
|
||||
|
||||
void asm_x86_mov_local_to_r32(asm_x86_t *as, int src_local_num, int dest_r32) {
|
||||
asm_x86_mov_disp_to_r32(as, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, src_local_num), dest_r32);
|
||||
asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, src_local_num), dest_r32);
|
||||
}
|
||||
|
||||
void asm_x86_mov_r32_to_local(asm_x86_t *as, int src_r32, int dest_local_num) {
|
||||
asm_x86_mov_r32_to_disp(as, src_r32, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, dest_local_num));
|
||||
asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, dest_local_num));
|
||||
}
|
||||
|
||||
void asm_x86_mov_local_addr_to_r32(asm_x86_t *as, int local_num, int dest_r32) {
|
||||
|
@ -80,9 +80,12 @@ void* asm_x86_get_code(asm_x86_t* as);
|
||||
void asm_x86_mov_r32_r32(asm_x86_t* as, int dest_r32, int src_r32);
|
||||
void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32);
|
||||
void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32);
|
||||
void asm_x86_mov_r8_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
|
||||
void asm_x86_mov_r16_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
|
||||
void asm_x86_mov_r32_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
|
||||
void asm_x86_mov_r8_to_mem8(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
|
||||
void asm_x86_mov_r16_to_mem16(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
|
||||
void asm_x86_mov_r32_to_mem32(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
|
||||
void asm_x86_mov_mem8_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32);
|
||||
void asm_x86_mov_mem16_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32);
|
||||
void asm_x86_mov_mem32_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32);
|
||||
void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
|
||||
void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
|
||||
void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
|
||||
|
123
py/emitnative.c
123
py/emitnative.c
@ -151,9 +151,13 @@
|
||||
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x64_add_r64_r64((as), (reg_dest), (reg_src))
|
||||
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src))
|
||||
|
||||
#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x64_mov_r64_to_disp((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x64_mov_r8_to_disp((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x64_mov_r16_to_disp((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem64_to_r64((as), (reg_base), 0, (reg_dest))
|
||||
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem8_to_r64zx((as), (reg_base), 0, (reg_dest))
|
||||
#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem16_to_r64zx((as), (reg_base), 0, (reg_dest))
|
||||
|
||||
#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x64_mov_r8_to_mem8((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x64_mov_r16_to_mem16((as), (reg_src), (reg_base), 0)
|
||||
|
||||
#elif N_X86
|
||||
|
||||
@ -279,9 +283,13 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
|
||||
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x86_add_r32_r32((as), (reg_dest), (reg_src))
|
||||
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src))
|
||||
|
||||
#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_disp((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x86_mov_r8_to_disp((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x86_mov_r16_to_disp((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest))
|
||||
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem16_to_r32zx((as), (reg_base), 0, (reg_dest))
|
||||
#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem8_to_r32zx((as), (reg_base), 0, (reg_dest))
|
||||
|
||||
#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x86_mov_r8_to_mem8((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x86_mov_r16_to_mem16((as), (reg_src), (reg_base), 0)
|
||||
|
||||
#elif N_THUMB
|
||||
|
||||
@ -358,6 +366,10 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
|
||||
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_thumb_add_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
|
||||
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_thumb_sub_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
|
||||
|
||||
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
|
||||
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrb_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
|
||||
#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrh_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
|
||||
|
||||
#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_thumb_strb_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
|
||||
#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_thumb_strh_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
|
||||
@ -437,6 +449,10 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
|
||||
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_arm_add_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
|
||||
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_arm_sub_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
|
||||
|
||||
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base))
|
||||
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_arm_ldrb_reg_reg((as), (reg_dest), (reg_base))
|
||||
#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_arm_ldrh_reg_reg((as), (reg_dest), (reg_base))
|
||||
|
||||
#define ASM_STORE_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base))
|
||||
#define ASM_STORE8_REG_REG(as, reg_value, reg_base) asm_arm_strb_reg_reg((as), (reg_value), (reg_base))
|
||||
#define ASM_STORE16_REG_REG(as, reg_value, reg_base) asm_arm_strh_reg_reg((as), (reg_value), (reg_base))
|
||||
@ -1277,13 +1293,100 @@ STATIC void emit_native_load_build_class(emit_t *emit) {
|
||||
}
|
||||
|
||||
STATIC void emit_native_load_subscr(emit_t *emit) {
|
||||
vtype_kind_t vtype_lhs, vtype_rhs;
|
||||
emit_pre_pop_reg_reg(emit, &vtype_rhs, REG_ARG_2, &vtype_lhs, REG_ARG_1);
|
||||
if (vtype_lhs == VTYPE_PYOBJ && vtype_rhs == VTYPE_PYOBJ) {
|
||||
DEBUG_printf("load_subscr\n");
|
||||
// need to compile: base[index]
|
||||
|
||||
// pop: index, base
|
||||
// optimise case where index is an immediate
|
||||
vtype_kind_t vtype_base = peek_vtype(emit, 1);
|
||||
|
||||
if (vtype_base == VTYPE_PYOBJ) {
|
||||
// standard Python call
|
||||
vtype_kind_t vtype_index;
|
||||
emit_pre_pop_reg_reg(emit, &vtype_index, REG_ARG_2, &vtype_base, REG_ARG_1);
|
||||
assert(vtype_index == VTYPE_PYOBJ);
|
||||
emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, (mp_uint_t)MP_OBJ_SENTINEL, REG_ARG_3);
|
||||
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
|
||||
} else {
|
||||
printf("ViperTypeError: can't do subscr of types %d and %d\n", vtype_lhs, vtype_rhs);
|
||||
// viper load
|
||||
// TODO The different machine architectures have very different
|
||||
// capabilities and requirements for loads, so probably best to
|
||||
// write a completely separate load-optimiser for each one.
|
||||
stack_info_t *top = peek_stack(emit, 0);
|
||||
if (top->vtype == VTYPE_INT && top->kind == STACK_IMM) {
|
||||
// index is an immediate
|
||||
mp_int_t index_value = top->u_imm;
|
||||
emit_pre_pop_discard(emit); // discard index
|
||||
int reg_base = REG_ARG_1;
|
||||
int reg_index = REG_ARG_2;
|
||||
emit_pre_pop_reg_flexible(emit, &vtype_base, ®_base, reg_index, reg_index);
|
||||
switch (vtype_base) {
|
||||
case VTYPE_PTR8: {
|
||||
// pointer to 8-bit memory
|
||||
// TODO optimise to use thumb ldrb r1, [r2, r3]
|
||||
if (index_value != 0) {
|
||||
// index is non-zero
|
||||
#if N_THUMB
|
||||
if (index_value > 0 && index_value < 32) {
|
||||
asm_thumb_ldrb_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
ASM_MOV_IMM_TO_REG(emit->as, index_value, reg_index);
|
||||
ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add index to base
|
||||
reg_base = reg_index;
|
||||
}
|
||||
ASM_LOAD8_REG_REG(emit->as, REG_RET, reg_base); // load from (base+index)
|
||||
break;
|
||||
}
|
||||
case VTYPE_PTR16: {
|
||||
// pointer to 16-bit memory
|
||||
if (index_value != 0) {
|
||||
// index is a non-zero immediate
|
||||
#if N_THUMB
|
||||
if (index_value > 0 && index_value < 32) {
|
||||
asm_thumb_ldrh_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
ASM_MOV_IMM_TO_REG(emit->as, index_value << 1, reg_index);
|
||||
ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 2*index to base
|
||||
reg_base = reg_index;
|
||||
}
|
||||
ASM_LOAD16_REG_REG(emit->as, REG_RET, reg_base); // load from (base+2*index)
|
||||
break;
|
||||
}
|
||||
default:
|
||||
printf("ViperTypeError: can't load from type %d\n", vtype_base);
|
||||
}
|
||||
} else {
|
||||
// index is not an immediate
|
||||
vtype_kind_t vtype_index;
|
||||
int reg_index = REG_ARG_2;
|
||||
emit_pre_pop_reg_flexible(emit, &vtype_index, ®_index, REG_ARG_1, REG_ARG_1);
|
||||
emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
|
||||
switch (vtype_base) {
|
||||
case VTYPE_PTR8: {
|
||||
// pointer to 8-bit memory
|
||||
// TODO optimise to use thumb ldrb r1, [r2, r3]
|
||||
assert(vtype_index == VTYPE_INT);
|
||||
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
|
||||
ASM_LOAD8_REG_REG(emit->as, REG_RET, REG_ARG_1); // store value to (base+index)
|
||||
break;
|
||||
}
|
||||
case VTYPE_PTR16: {
|
||||
// pointer to 16-bit memory
|
||||
assert(vtype_index == VTYPE_INT);
|
||||
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
|
||||
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
|
||||
ASM_LOAD16_REG_REG(emit->as, REG_RET, REG_ARG_1); // load from (base+2*index)
|
||||
break;
|
||||
}
|
||||
default:
|
||||
printf("ViperTypeError: can't load from type %d\n", vtype_base);
|
||||
}
|
||||
}
|
||||
emit_post_push_reg(emit, VTYPE_INT, REG_RET);
|
||||
}
|
||||
}
|
||||
|
||||
|
18
tests/micropython/viper_ptr16_load.py
Normal file
18
tests/micropython/viper_ptr16_load.py
Normal file
@ -0,0 +1,18 @@
|
||||
# test loading from ptr16 type
|
||||
# only works on little endian machines
|
||||
|
||||
@micropython.viper
|
||||
def get(src:ptr16) -> int:
|
||||
return src[0]
|
||||
|
||||
@micropython.viper
|
||||
def memadd(src:ptr16, n:int) -> int:
|
||||
sum = 0
|
||||
for i in range(n):
|
||||
sum += src[i]
|
||||
return sum
|
||||
|
||||
b = bytearray(b'1234')
|
||||
print(b)
|
||||
print(get(b))
|
||||
print(memadd(b, 2))
|
3
tests/micropython/viper_ptr16_load.py.exp
Normal file
3
tests/micropython/viper_ptr16_load.py.exp
Normal file
@ -0,0 +1,3 @@
|
||||
bytearray(b'1234')
|
||||
12849
|
||||
26212
|
27
tests/micropython/viper_ptr8_load.py
Normal file
27
tests/micropython/viper_ptr8_load.py
Normal file
@ -0,0 +1,27 @@
|
||||
# test loading from ptr8 type
|
||||
|
||||
@micropython.viper
|
||||
def get(src:ptr8) -> int:
|
||||
return src[0]
|
||||
|
||||
@micropython.viper
|
||||
def memadd(src:ptr8, n:int) -> int:
|
||||
sum = 0
|
||||
for i in range(n):
|
||||
sum += src[i]
|
||||
return sum
|
||||
|
||||
@micropython.viper
|
||||
def memadd2(src_in) -> int:
|
||||
src = ptr8(src_in)
|
||||
n = int(len(src_in))
|
||||
sum = 0
|
||||
for i in range(n):
|
||||
sum += src[i]
|
||||
return sum
|
||||
|
||||
b = bytearray(b'1234')
|
||||
print(b)
|
||||
print(get(b))
|
||||
print(memadd(b, 4))
|
||||
print(memadd2(b))
|
4
tests/micropython/viper_ptr8_load.py.exp
Normal file
4
tests/micropython/viper_ptr8_load.py.exp
Normal file
@ -0,0 +1,4 @@
|
||||
bytearray(b'1234')
|
||||
49
|
||||
202
|
||||
202
|
Loading…
x
Reference in New Issue
Block a user