py: Implement native multiply operation in viper emitter.
This commit is contained in:
parent
4d9cad180d
commit
567b349c2b
12
py/asmarm.c
12
py/asmarm.c
|
@ -179,6 +179,12 @@ STATIC uint asm_arm_op_sub_reg(uint rd, uint rn, uint rm) {
|
||||||
return 0x0400000 | (rn << 16) | (rd << 12) | rm;
|
return 0x0400000 | (rn << 16) | (rd << 12) | rm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
STATIC uint asm_arm_op_mul_reg(uint rd, uint rm, uint rs) {
|
||||||
|
// mul rd, rm, rs
|
||||||
|
assert(rd != rm);
|
||||||
|
return 0x0000090 | (rd << 16) | (rs << 8) | rm;
|
||||||
|
}
|
||||||
|
|
||||||
STATIC uint asm_arm_op_and_reg(uint rd, uint rn, uint rm) {
|
STATIC uint asm_arm_op_and_reg(uint rd, uint rn, uint rm) {
|
||||||
// and rd, rn, rm
|
// and rd, rn, rm
|
||||||
return 0x0000000 | (rn << 16) | (rd << 12) | rm;
|
return 0x0000000 | (rn << 16) | (rd << 12) | rm;
|
||||||
|
@ -340,6 +346,12 @@ void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
|
||||||
emit_al(as, asm_arm_op_sub_reg(rd, rn, rm));
|
emit_al(as, asm_arm_op_sub_reg(rd, rn, rm));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rs, uint rm) {
|
||||||
|
// rs and rm are swapped because of restriction rd!=rm
|
||||||
|
// mul rd, rm, rs
|
||||||
|
emit_al(as, asm_arm_op_mul_reg(rd, rm, rs));
|
||||||
|
}
|
||||||
|
|
||||||
void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
|
void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
|
||||||
// and rd, rn, rm
|
// and rd, rn, rm
|
||||||
emit_al(as, asm_arm_op_and_reg(rd, rn, rm));
|
emit_al(as, asm_arm_op_and_reg(rd, rn, rm));
|
||||||
|
|
|
@ -101,6 +101,7 @@ void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn);
|
||||||
// arithmetic
|
// arithmetic
|
||||||
void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
|
void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
|
||||||
void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
|
void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
|
||||||
|
void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
|
||||||
void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
|
void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
|
||||||
void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
|
void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
|
||||||
void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
|
void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
|
||||||
|
|
|
@ -453,6 +453,12 @@ void asm_x64_sub_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
|
||||||
asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_SUB_R64_FROM_RM64);
|
asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_SUB_R64_FROM_RM64);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void asm_x64_mul_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
|
||||||
|
// imul reg64, reg/mem64 -- 0x0f 0xaf /r
|
||||||
|
asm_x64_write_byte_1(as, REX_PREFIX | REX_W | (dest_r64 < 8 ? 0 : REX_R) | (src_r64 < 8 ? 0 : REX_B));
|
||||||
|
asm_x64_write_byte_3(as, 0x0f, 0xaf, MODRM_R64(dest_r64) | MODRM_RM_REG | MODRM_RM_R64(src_r64));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
void asm_x64_sub_i32_from_r32(asm_x64_t *as, int src_i32, int dest_r32) {
|
void asm_x64_sub_i32_from_r32(asm_x64_t *as, int src_i32, int dest_r32) {
|
||||||
if (SIGNED_FIT8(src_i32)) {
|
if (SIGNED_FIT8(src_i32)) {
|
||||||
|
|
|
@ -105,6 +105,7 @@ void asm_x64_shl_r64_cl(asm_x64_t* as, int dest_r64);
|
||||||
void asm_x64_sar_r64_cl(asm_x64_t* as, int dest_r64);
|
void asm_x64_sar_r64_cl(asm_x64_t* as, int dest_r64);
|
||||||
void asm_x64_add_r64_r64(asm_x64_t* as, int dest_r64, int src_r64);
|
void asm_x64_add_r64_r64(asm_x64_t* as, int dest_r64, int src_r64);
|
||||||
void asm_x64_sub_r64_r64(asm_x64_t* as, int dest_r64, int src_r64);
|
void asm_x64_sub_r64_r64(asm_x64_t* as, int dest_r64, int src_r64);
|
||||||
|
void asm_x64_mul_r64_r64(asm_x64_t* as, int dest_r64, int src_r64);
|
||||||
void asm_x64_cmp_r64_with_r64(asm_x64_t* as, int src_r64_a, int src_r64_b);
|
void asm_x64_cmp_r64_with_r64(asm_x64_t* as, int src_r64_a, int src_r64_b);
|
||||||
void asm_x64_test_r8_with_r8(asm_x64_t* as, int src_r64_a, int src_r64_b);
|
void asm_x64_test_r8_with_r8(asm_x64_t* as, int src_r64_a, int src_r64_b);
|
||||||
void asm_x64_setcc_r8(asm_x64_t* as, int jcc_type, int dest_r8);
|
void asm_x64_setcc_r8(asm_x64_t* as, int jcc_type, int dest_r8);
|
||||||
|
|
|
@ -369,6 +369,11 @@ STATIC void asm_x86_sub_r32_i32(asm_x86_t *as, int dest_r32, int src_i32) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void asm_x86_mul_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
|
||||||
|
// imul reg32, reg/mem32 -- 0x0f 0xaf /r
|
||||||
|
asm_x86_write_byte_3(as, 0x0f, 0xaf, MODRM_R32(dest_r32) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
|
||||||
|
}
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
/* shifts not tested */
|
/* shifts not tested */
|
||||||
void asm_x86_shl_r32_by_imm(asm_x86_t *as, int r32, int imm) {
|
void asm_x86_shl_r32_by_imm(asm_x86_t *as, int r32, int imm) {
|
||||||
|
|
|
@ -104,6 +104,7 @@ void asm_x86_shl_r32_cl(asm_x86_t* as, int dest_r32);
|
||||||
void asm_x86_sar_r32_cl(asm_x86_t* as, int dest_r32);
|
void asm_x86_sar_r32_cl(asm_x86_t* as, int dest_r32);
|
||||||
void asm_x86_add_r32_r32(asm_x86_t* as, int dest_r32, int src_r32);
|
void asm_x86_add_r32_r32(asm_x86_t* as, int dest_r32, int src_r32);
|
||||||
void asm_x86_sub_r32_r32(asm_x86_t* as, int dest_r32, int src_r32);
|
void asm_x86_sub_r32_r32(asm_x86_t* as, int dest_r32, int src_r32);
|
||||||
|
void asm_x86_mul_r32_r32(asm_x86_t* as, int dest_r32, int src_r32);
|
||||||
void asm_x86_cmp_r32_with_r32(asm_x86_t* as, int src_r32_a, int src_r32_b);
|
void asm_x86_cmp_r32_with_r32(asm_x86_t* as, int src_r32_a, int src_r32_b);
|
||||||
void asm_x86_test_r8_with_r8(asm_x86_t* as, int src_r32_a, int src_r32_b);
|
void asm_x86_test_r8_with_r8(asm_x86_t* as, int src_r32_a, int src_r32_b);
|
||||||
void asm_x86_setcc_r8(asm_x86_t* as, mp_uint_t jcc_type, int dest_r8);
|
void asm_x86_setcc_r8(asm_x86_t* as, mp_uint_t jcc_type, int dest_r8);
|
||||||
|
|
|
@ -146,6 +146,7 @@
|
||||||
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x64_and_r64_r64((as), (reg_dest), (reg_src))
|
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x64_and_r64_r64((as), (reg_dest), (reg_src))
|
||||||
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x64_add_r64_r64((as), (reg_dest), (reg_src))
|
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x64_add_r64_r64((as), (reg_dest), (reg_src))
|
||||||
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src))
|
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src))
|
||||||
|
#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x64_mul_r64_r64((as), (reg_dest), (reg_src))
|
||||||
|
|
||||||
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem64_to_r64((as), (reg_base), 0, (reg_dest))
|
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem64_to_r64((as), (reg_base), 0, (reg_dest))
|
||||||
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x64_mov_mem64_to_r64((as), (reg_base), 8 * (word_offset), (reg_dest))
|
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x64_mov_mem64_to_r64((as), (reg_base), 8 * (word_offset), (reg_dest))
|
||||||
|
@ -290,6 +291,7 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
|
||||||
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x86_and_r32_r32((as), (reg_dest), (reg_src))
|
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x86_and_r32_r32((as), (reg_dest), (reg_src))
|
||||||
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x86_add_r32_r32((as), (reg_dest), (reg_src))
|
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x86_add_r32_r32((as), (reg_dest), (reg_src))
|
||||||
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src))
|
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src))
|
||||||
|
#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x86_mul_r32_r32((as), (reg_dest), (reg_src))
|
||||||
|
|
||||||
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest))
|
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest))
|
||||||
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x86_mov_mem32_to_r32((as), (reg_base), 4 * (word_offset), (reg_dest))
|
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x86_mov_mem32_to_r32((as), (reg_base), 4 * (word_offset), (reg_dest))
|
||||||
|
@ -382,6 +384,7 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
|
||||||
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_AND, (reg_dest), (reg_src))
|
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_AND, (reg_dest), (reg_src))
|
||||||
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_thumb_add_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
|
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_thumb_add_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
|
||||||
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_thumb_sub_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
|
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_thumb_sub_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
|
||||||
|
#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_MUL, (reg_dest), (reg_src))
|
||||||
|
|
||||||
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
|
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
|
||||||
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), (word_offset))
|
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), (word_offset))
|
||||||
|
@ -473,6 +476,7 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
|
||||||
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_arm_and_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
|
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_arm_and_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
|
||||||
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_arm_add_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
|
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_arm_add_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
|
||||||
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_arm_sub_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
|
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_arm_sub_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
|
||||||
|
#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_arm_mul_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
|
||||||
|
|
||||||
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0)
|
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0)
|
||||||
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset))
|
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset))
|
||||||
|
@ -2029,6 +2033,9 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
||||||
} else if (op == MP_BINARY_OP_SUBTRACT || op == MP_BINARY_OP_INPLACE_SUBTRACT) {
|
} else if (op == MP_BINARY_OP_SUBTRACT || op == MP_BINARY_OP_INPLACE_SUBTRACT) {
|
||||||
ASM_SUB_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
ASM_SUB_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||||
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
|
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
|
||||||
|
} else if (op == MP_BINARY_OP_MULTIPLY || op == MP_BINARY_OP_INPLACE_MULTIPLY) {
|
||||||
|
ASM_MUL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||||
|
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
|
||||||
} else if (MP_BINARY_OP_LESS <= op && op <= MP_BINARY_OP_NOT_EQUAL) {
|
} else if (MP_BINARY_OP_LESS <= op && op <= MP_BINARY_OP_NOT_EQUAL) {
|
||||||
// comparison ops are (in enum order):
|
// comparison ops are (in enum order):
|
||||||
// MP_BINARY_OP_LESS
|
// MP_BINARY_OP_LESS
|
||||||
|
|
|
@ -18,6 +18,17 @@ sub(42, 3)
|
||||||
sub(-1, 2)
|
sub(-1, 2)
|
||||||
sub(-42, -3)
|
sub(-42, -3)
|
||||||
|
|
||||||
|
@micropython.viper
|
||||||
|
def mul(x:int, y:int):
|
||||||
|
print(x * y)
|
||||||
|
print(y * x)
|
||||||
|
mul(0, 1)
|
||||||
|
mul(1, -1)
|
||||||
|
mul(1, 2)
|
||||||
|
mul(8, 3)
|
||||||
|
mul(-3, 4)
|
||||||
|
mul(-9, -6)
|
||||||
|
|
||||||
@micropython.viper
|
@micropython.viper
|
||||||
def shl(x:int, y:int):
|
def shl(x:int, y:int):
|
||||||
print(x << y)
|
print(x << y)
|
||||||
|
|
|
@ -14,6 +14,18 @@
|
||||||
3
|
3
|
||||||
-39
|
-39
|
||||||
39
|
39
|
||||||
|
0
|
||||||
|
0
|
||||||
|
-1
|
||||||
|
-1
|
||||||
|
2
|
||||||
|
2
|
||||||
|
24
|
||||||
|
24
|
||||||
|
-12
|
||||||
|
-12
|
||||||
|
54
|
||||||
|
54
|
||||||
1
|
1
|
||||||
8
|
8
|
||||||
1073741824
|
1073741824
|
||||||
|
|
Loading…
Reference in New Issue