Commit 826005c6 authored by Damien's avatar Damien
Browse files

Add support for inline thumb assembly.

parent 5bfb7599
...@@ -19,6 +19,7 @@ SRC = \ ...@@ -19,6 +19,7 @@ SRC = \
emitx64.c \ emitx64.c \
emitthumb.c \ emitthumb.c \
asmthumb.c \ asmthumb.c \
emitinlinethumb.c \
runtime.c \ runtime.c \
vm.c \ vm.c \
main.c \ main.c \
......
...@@ -20,7 +20,6 @@ struct _asm_thumb_t { ...@@ -20,7 +20,6 @@ struct _asm_thumb_t {
byte *code_base; byte *code_base;
byte dummy_data[8]; byte dummy_data[8];
int next_label;
int max_num_labels; int max_num_labels;
int *label_offsets; int *label_offsets;
int num_locals; int num_locals;
...@@ -65,7 +64,6 @@ void asm_thumb_free(asm_thumb_t *as, bool free_code) { ...@@ -65,7 +64,6 @@ void asm_thumb_free(asm_thumb_t *as, bool free_code) {
void asm_thumb_start_pass(asm_thumb_t *as, int pass) { void asm_thumb_start_pass(asm_thumb_t *as, int pass) {
as->pass = pass; as->pass = pass;
as->code_offset = 0; as->code_offset = 0;
as->next_label = 1;
if (pass == ASM_THUMB_PASS_2) { if (pass == ASM_THUMB_PASS_2) {
memset(as->label_offsets, -1, as->max_num_labels * sizeof(int)); memset(as->label_offsets, -1, as->max_num_labels * sizeof(int));
} }
...@@ -212,10 +210,6 @@ void asm_thumb_exit(asm_thumb_t *as) { ...@@ -212,10 +210,6 @@ void asm_thumb_exit(asm_thumb_t *as) {
asm_thumb_write_op16(as, OP_POP_RLIST_PC(as->push_reglist)); asm_thumb_write_op16(as, OP_POP_RLIST_PC(as->push_reglist));
} }
int asm_thumb_label_new(asm_thumb_t *as) {
return as->next_label++;
}
void asm_thumb_label_assign(asm_thumb_t *as, int label) { void asm_thumb_label_assign(asm_thumb_t *as, int label) {
assert(label < as->max_num_labels); assert(label < as->max_num_labels);
if (as->pass == ASM_THUMB_PASS_2) { if (as->pass == ASM_THUMB_PASS_2) {
...@@ -234,43 +228,33 @@ static int get_label_dest(asm_thumb_t *as, int label) { ...@@ -234,43 +228,33 @@ static int get_label_dest(asm_thumb_t *as, int label) {
return as->label_offsets[label]; return as->label_offsets[label];
} }
// the i8 value will be zero extended into the r32 register! #define OP_MOVS_RLO_I8(rlo_dest, i8_src) (0x2000 | ((rlo_dest) << 8) | (i8_src))
void asm_thumb_mov_reg_i8(asm_thumb_t *as, uint rlo_dest, int i8) {
// the i8_src value will be zero extended into the r32 register!
void asm_thumb_movs_rlo_i8(asm_thumb_t *as, uint rlo_dest, int i8_src) {
assert(rlo_dest < REG_R8); assert(rlo_dest < REG_R8);
// movs rlo_dest, #i8 // movs rlo_dest, #i8_src
asm_thumb_write_op16(as, 0x2000 | (rlo_dest << 8) | i8); asm_thumb_write_op16(as, OP_MOVS_RLO_I8(rlo_dest, i8_src));
} }
// if loading lo half, the i16 value will be zero extended into the r32 register! #define OP_MOVW (0xf240)
void asm_thumb_mov_i16_to_reg(asm_thumb_t *as, int i16, uint reg_dest, bool load_hi_half) { #define OP_MOVT (0xf2c0)
// if loading lo half with movw, the i16 value will be zero extended into the r32 register!
static void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
assert(reg_dest < REG_R15); assert(reg_dest < REG_R15);
uint op; // mov[wt] reg_dest, #i16_src
if (load_hi_half) { asm_thumb_write_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff));
// movt reg_dest, #i16
op = 0xf2c0;
} else {
// movw reg_dest, #i16
op = 0xf240;
}
asm_thumb_write_op32(as, op | ((i16 >> 1) & 0x0400) | ((i16 >> 12) & 0xf), ((i16 << 4) & 0x7000) | (reg_dest << 8) | (i16 & 0xff));
} }
void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, machine_uint_t i32) { // the i16_src value will be zero extended into the r32 register!
// movw, movt does it in 8 bytes void asm_thumb_movw_reg_i16(asm_thumb_t *as, uint reg_dest, int i16_src) {
// ldr [pc, #], dw does it in 6 bytes, but we might not reach to end of code for dw asm_thumb_mov_reg_i16(as, OP_MOVW, reg_dest, i16_src);
asm_thumb_mov_i16_to_reg(as, i32, reg_dest, false);
asm_thumb_mov_i16_to_reg(as, i32 >> 16, reg_dest, true);
} }
void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) { // the i16_src value will be zero extended into the r32 register!
if (reg_dest < 8 && UNSIGNED_FIT8(i32)) { void asm_thumb_movt_reg_i16(asm_thumb_t *as, uint reg_dest, int i16_src) {
asm_thumb_mov_reg_i8(as, reg_dest, i32); asm_thumb_mov_reg_i16(as, OP_MOVT, reg_dest, i16_src);
} else if (UNSIGNED_FIT16(i32)) {
asm_thumb_mov_i16_to_reg(as, i32, reg_dest, false);
} else {
asm_thumb_mov_reg_i32(as, reg_dest, i32);
}
} }
void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) { void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) {
...@@ -285,9 +269,69 @@ void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) { ...@@ -285,9 +269,69 @@ void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) {
} else { } else {
op_lo |= 0x80 | (reg_dest - 8); op_lo |= 0x80 | (reg_dest - 8);
} }
// mov reg_dest, reg_src
asm_thumb_write_op16(as, 0x4600 | op_lo); asm_thumb_write_op16(as, 0x4600 | op_lo);
} }
#define OP_SUBS_RLO_RLO_I3(rlo_dest, rlo_src, i3_src) (0x1e00 | ((i3_src) << 6) | ((rlo_src) << 3) | (rlo_dest))
void asm_thumb_subs_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src, int i3_src) {
assert(rlo_dest < REG_R8);
assert(rlo_src < REG_R8);
asm_thumb_write_op16(as, OP_SUBS_RLO_RLO_I3(rlo_dest, rlo_src, i3_src));
}
#define OP_CMP_RLO_I8(rlo, i8) (0x2800 | ((rlo) << 8) | (i8))
void asm_thumb_cmp_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
assert(rlo < REG_R8);
asm_thumb_write_op16(as, OP_CMP_RLO_I8(rlo, i8));
}
#define OP_BEQ_N(byte_offset) (0xd000 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BNE_N(byte_offset) (0xd100 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BCS_N(byte_offset) (0xd200 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BCC_N(byte_offset) (0xd300 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BMI_N(byte_offset) (0xd400 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BPL_N(byte_offset) (0xd500 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BVS_N(byte_offset) (0xd600 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BVC_N(byte_offset) (0xd700 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BHI_N(byte_offset) (0xd800 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BLS_N(byte_offset) (0xd900 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BGE_N(byte_offset) (0xda00 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BLT_N(byte_offset) (0xdb00 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BGT_N(byte_offset) (0xdc00 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BLE_N(byte_offset) (0xdd00 | (((byte_offset) >> 1) & 0x00ff))
void asm_thumb_bgt_n(asm_thumb_t *as, int label) {
int dest = get_label_dest(as, label);
int rel = dest - as->code_offset;
rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
if (SIGNED_FIT9(rel)) {
asm_thumb_write_op16(as, OP_BGT_N(rel));
} else {
printf("asm_thumb_bgt: branch does not fit in 9 bits\n");
}
}
void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, machine_uint_t i32) {
// movw, movt does it in 8 bytes
// ldr [pc, #], dw does it in 6 bytes, but we might not reach to end of code for dw
asm_thumb_mov_reg_i16(as, OP_MOVW, reg_dest, i32);
asm_thumb_mov_reg_i16(as, OP_MOVT, reg_dest, i32 >> 16);
}
void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {
if (reg_dest < 8 && UNSIGNED_FIT8(i32)) {
asm_thumb_movs_rlo_i8(as, reg_dest, i32);
} else if (UNSIGNED_FIT16(i32)) {
asm_thumb_mov_reg_i16(as, OP_MOVW, reg_dest, i32);
} else {
asm_thumb_mov_reg_i32(as, reg_dest, i32);
}
}
#define OP_STR_TO_SP_OFFSET(rlo_dest, word_offset) (0x9000 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff)) #define OP_STR_TO_SP_OFFSET(rlo_dest, word_offset) (0x9000 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
#define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff)) #define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
...@@ -351,7 +395,6 @@ void asm_thumb_b_label(asm_thumb_t *as, int label) { ...@@ -351,7 +395,6 @@ void asm_thumb_b_label(asm_thumb_t *as, int label) {
} }
} }
#define OP_CMP_REG_IMM(rlo, i8) (0x2800 | ((rlo) << 8) | (i8))
// all these bit arithmetics need coverage testing! // all these bit arithmetics need coverage testing!
#define OP_BEQ(byte_offset) (0xd000 | (((byte_offset) >> 1) & 0x00ff)) #define OP_BEQ(byte_offset) (0xd000 | (((byte_offset) >> 1) & 0x00ff))
#define OP_BEQW_HI(byte_offset) (0xf000 | (((byte_offset) >> 10) & 0x0400) | (((byte_offset) >> 14) & 0x003f)) #define OP_BEQW_HI(byte_offset) (0xf000 | (((byte_offset) >> 10) & 0x0400) | (((byte_offset) >> 14) & 0x003f))
...@@ -361,7 +404,7 @@ void asm_thumb_cmp_reg_bz_label(asm_thumb_t *as, uint rlo, int label) { ...@@ -361,7 +404,7 @@ void asm_thumb_cmp_reg_bz_label(asm_thumb_t *as, uint rlo, int label) {
assert(rlo < REG_R8); assert(rlo < REG_R8);
// compare reg with 0 // compare reg with 0
asm_thumb_write_op16(as, OP_CMP_REG_IMM(rlo, 0)); asm_thumb_write_op16(as, OP_CMP_RLO_I8(rlo, 0));
// branch if equal // branch if equal
int dest = get_label_dest(as, label); int dest = get_label_dest(as, label);
...@@ -369,7 +412,7 @@ void asm_thumb_cmp_reg_bz_label(asm_thumb_t *as, uint rlo, int label) { ...@@ -369,7 +412,7 @@ void asm_thumb_cmp_reg_bz_label(asm_thumb_t *as, uint rlo, int label) {
rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
if (dest >= 0 && rel <= -4) { if (dest >= 0 && rel <= -4) {
// is a backwards jump, so we know the size of the jump on the first pass // is a backwards jump, so we know the size of the jump on the first pass
// calculate rel assuming 12 bit relative jump // calculate rel assuming 9 bit relative jump
if (SIGNED_FIT9(rel)) { if (SIGNED_FIT9(rel)) {
asm_thumb_write_op16(as, OP_BEQ(rel)); asm_thumb_write_op16(as, OP_BEQ(rel));
} else { } else {
......
...@@ -38,23 +38,30 @@ void *asm_thumb_get_code(asm_thumb_t *as); ...@@ -38,23 +38,30 @@ void *asm_thumb_get_code(asm_thumb_t *as);
void asm_thumb_entry(asm_thumb_t *as, int num_locals); void asm_thumb_entry(asm_thumb_t *as, int num_locals);
void asm_thumb_exit(asm_thumb_t *as); void asm_thumb_exit(asm_thumb_t *as);
int asm_thumb_label_new(asm_thumb_t *as);
void asm_thumb_label_assign(asm_thumb_t *as, int label); void asm_thumb_label_assign(asm_thumb_t *as, int label);
// argument order follows ARM, in general dest is first // argument order follows ARM, in general dest is first
// note there is a difference between movw and mov.w, and many others!
void asm_thumb_mov_reg_i8(asm_thumb_t *as, uint rlo_dest, int i8_src); void asm_thumb_movs_rlo_i8(asm_thumb_t *as, uint rlo_dest, int i8_src);
void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, machine_uint_t i32_src); void asm_thumb_movw_reg_i16(asm_thumb_t *as, uint reg_dest, int i16_src);
void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32_src); void asm_thumb_movt_reg_i16(asm_thumb_t *as, uint reg_dest, int i16_src);
void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src); void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src);
void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num_dest, uint rlo_src); void asm_thumb_subs_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src, int i3_src);
void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num); void asm_thumb_cmp_rlo_i8(asm_thumb_t *as, uint rlo, int i8);
void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint reg_dest, int local_num); void asm_thumb_bgt_n(asm_thumb_t *as, int label);
void asm_thumb_add_reg_reg_reg(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b); void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, machine_uint_t i32_src); // convenience
void asm_thumb_cmp_reg_reg(asm_thumb_t *as, uint rlo_a, uint rlo_b); void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32_src); // convenience
void asm_thumb_ite_ge(asm_thumb_t *as); void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num_dest, uint rlo_src); // convenience
void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num); // convenience
void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint reg_dest, int local_num); // convenience
void asm_thumb_add_reg_reg_reg(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b); // convenience ?
void asm_thumb_cmp_reg_reg(asm_thumb_t *as, uint rlo_a, uint rlo_b); // convenience ?
void asm_thumb_ite_ge(asm_thumb_t *as); // convenience ?
void asm_thumb_b_label(asm_thumb_t *as, int label); // convenience ?
void asm_thumb_cmp_reg_bz_label(asm_thumb_t *as, uint rlo, int label); // convenience ?
void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp); // convenience ?
void asm_thumb_b_label(asm_thumb_t *as, int label);
void asm_thumb_cmp_reg_bz_label(asm_thumb_t *as, uint rlo, int label);
void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp);
...@@ -26,6 +26,7 @@ typedef enum { ...@@ -26,6 +26,7 @@ typedef enum {
} pn_kind_t; } pn_kind_t;
#define EMIT(fun, arg...) (comp->emit_method_table->fun(comp->emit, ##arg)) #define EMIT(fun, arg...) (comp->emit_method_table->fun(comp->emit, ##arg))
#define EMIT_INLINE_ASM(fun, arg...) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm, ##arg))
#define EMIT_OPT_NONE (0) #define EMIT_OPT_NONE (0)
#define EMIT_OPT_BYTE_CODE (1) #define EMIT_OPT_BYTE_CODE (1)
...@@ -47,7 +48,6 @@ typedef struct _compiler_t { ...@@ -47,7 +48,6 @@ typedef struct _compiler_t {
pass_kind_t pass; pass_kind_t pass;
int next_label; int next_label;
int max_num_labels;
int break_label; int break_label;
int continue_label; int continue_label;
...@@ -66,6 +66,9 @@ typedef struct _compiler_t { ...@@ -66,6 +66,9 @@ typedef struct _compiler_t {
emit_t *emit; // current emitter emit_t *emit; // current emitter
const emit_method_table_t *emit_method_table; // current emit method table const emit_method_table_t *emit_method_table; // current emit method table
emit_inline_asm_t *emit_inline_asm; // current emitter for inline asm
const emit_inline_asm_method_table_t *emit_inline_asm_method_table; // current emit method table for inline asm
} compiler_t; } compiler_t;
py_parse_node_t fold_constants(py_parse_node_t pn) { py_parse_node_t fold_constants(py_parse_node_t pn) {
...@@ -2389,7 +2392,7 @@ void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) { ...@@ -2389,7 +2392,7 @@ void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
apply_to_single_or_list(comp, pns->nodes[1], PN_typedargslist, compile_scope_func_param); apply_to_single_or_list(comp, pns->nodes[1], PN_typedargslist, compile_scope_func_param);
} }
assert(pns->nodes[2] == 0); // 2 is something... assert(PY_PARSE_NODE_IS_NULL(pns->nodes[2])); // 2 is something...
compile_node(comp, pns->nodes[3]); // 3 is function body compile_node(comp, pns->nodes[3]); // 3 is function body
// emit return if it wasn't the last opcode // emit return if it wasn't the last opcode
...@@ -2492,9 +2495,77 @@ void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) { ...@@ -2492,9 +2495,77 @@ void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
EMIT(end_pass); EMIT(end_pass);
// update maximim number of labels needed }
if (comp->next_label > comp->max_num_labels) {
comp->max_num_labels = comp->next_label; void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
comp->pass = pass;
comp->scope_cur = scope;
comp->next_label = 1;
if (scope->kind != SCOPE_FUNCTION) {
printf("Error: inline assembler must be a function\n");
return;
}
// get the function definition parse node
assert(PY_PARSE_NODE_IS_STRUCT(scope->pn));
py_parse_node_struct_t *pns = (py_parse_node_struct_t*)scope->pn;
assert(PY_PARSE_NODE_STRUCT_KIND(pns) == PN_funcdef);
//qstr f_id = PY_PARSE_NODE_LEAF_ARG(pns->nodes[0]); // name
scope->num_params = 0;
assert(PY_PARSE_NODE_IS_NULL(pns->nodes[1])); // arguments
assert(PY_PARSE_NODE_IS_NULL(pns->nodes[2])); // type
py_parse_node_t pn_body = pns->nodes[3]; // body
py_parse_node_t *nodes;
int num = list_get(&pn_body, PN_suite_block_stmts, &nodes);
if (comp->pass > PASS_1) {
EMIT_INLINE_ASM(start_pass, comp->pass, comp->scope_cur);
}
if (comp->pass == PASS_3) {
//printf("----\n");
scope_print_info(scope);
}
for (int i = 0; i < num; i++) {
assert(PY_PARSE_NODE_IS_STRUCT(nodes[i]));
py_parse_node_struct_t *pns2 = (py_parse_node_struct_t*)nodes[i];
assert(PY_PARSE_NODE_STRUCT_KIND(pns2) == PN_expr_stmt);
assert(PY_PARSE_NODE_IS_STRUCT(pns2->nodes[0]));
assert(PY_PARSE_NODE_IS_NULL(pns2->nodes[1]));
pns2 = (py_parse_node_struct_t*)pns2->nodes[0];
assert(PY_PARSE_NODE_STRUCT_KIND(pns2) == PN_power);
assert(PY_PARSE_NODE_IS_ID(pns2->nodes[0]));
assert(PY_PARSE_NODE_IS_STRUCT_KIND(pns2->nodes[1], PN_trailer_paren));
assert(PY_PARSE_NODE_IS_NULL(pns2->nodes[2]));
qstr op = PY_PARSE_NODE_LEAF_ARG(pns2->nodes[0]);
pns2 = (py_parse_node_struct_t*)pns2->nodes[1]; // PN_trailer_paren
py_parse_node_t *pn_arg;
int n_args = list_get(&pns2->nodes[0], PN_arglist, &pn_arg);
// emit instructions
if (strcmp(qstr_str(op), "label") == 0) {
if (!(n_args == 1 && PY_PARSE_NODE_IS_ID(pn_arg[0]))) {
printf("SyntaxError: inline assembler 'label' requires 1 argument\n");
return;
}
int lab = comp_next_label(comp);
if (pass > PASS_1) {
EMIT_INLINE_ASM(label, lab, PY_PARSE_NODE_LEAF_ARG(pn_arg[0]));
}
} else {
if (pass > PASS_1) {
EMIT_INLINE_ASM(op, op, n_args, pn_arg);
}
}
}
if (comp->pass > PASS_1) {
EMIT_INLINE_ASM(end_pass);
} }
} }
...@@ -2557,55 +2628,81 @@ void py_compile(py_parse_node_t pn) { ...@@ -2557,55 +2628,81 @@ void py_compile(py_parse_node_t pn) {
comp->qstr_native = qstr_from_str_static("native"); comp->qstr_native = qstr_from_str_static("native");
comp->qstr_asm_thumb = qstr_from_str_static("asm_thumb"); comp->qstr_asm_thumb = qstr_from_str_static("asm_thumb");
comp->max_num_labels = 0;
comp->break_label = 0; comp->break_label = 0;
comp->continue_label = 0; comp->continue_label = 0;
comp->except_nest_level = 0; comp->except_nest_level = 0;
comp->scope_head = NULL; comp->scope_head = NULL;
comp->scope_cur = NULL; comp->scope_cur = NULL;
comp->emit = emit_pass1_new(comp->qstr___class__); // optimise constants
comp->emit_method_table = &emit_pass1_method_table;
pn = fold_constants(pn); pn = fold_constants(pn);
// set the outer scope
scope_new_and_link(comp, SCOPE_MODULE, pn, EMIT_OPT_NONE); scope_new_and_link(comp, SCOPE_MODULE, pn, EMIT_OPT_NONE);
// compile pass 1
comp->emit = emit_pass1_new(comp->qstr___class__);
comp->emit_method_table = &emit_pass1_method_table;
comp->emit_inline_asm = NULL;
comp->emit_inline_asm_method_table = NULL;
uint max_num_labels = 0;
for (scope_t *s = comp->scope_head; s != NULL; s = s->next) { for (scope_t *s = comp->scope_head; s != NULL; s = s->next) {
compile_scope(comp, s, PASS_1); if (s->emit_options == EMIT_OPT_ASM_THUMB) {
compile_scope_inline_asm(comp, s, PASS_1);
} else {
compile_scope(comp, s, PASS_1);
}
// update maximim number of labels needed
if (comp->next_label > max_num_labels) {
max_num_labels = comp->next_label;
}
} }
// compute some things related to scope and identifiers
for (scope_t *s = comp->scope_head; s != NULL; s = s->next) { for (scope_t *s = comp->scope_head; s != NULL; s = s->next) {
compile_scope_compute_things(comp, s); compile_scope_compute_things(comp, s);
} }
// finish with pass 1
emit_pass1_free(comp->emit); emit_pass1_free(comp->emit);
// compile pass 2 and 3
emit_t *emit_bc = NULL; emit_t *emit_bc = NULL;
emit_t *emit_x64 = NULL; emit_t *emit_x64 = NULL;
emit_inline_asm_t *emit_inline_thumb = NULL;
for (scope_t *s = comp->scope_head; s != NULL; s = s->next) { for (scope_t *s = comp->scope_head; s != NULL; s = s->next) {
switch (s->emit_options) { if (s->emit_options == EMIT_OPT_ASM_THUMB) {
case EMIT_OPT_NATIVE_PYTHON: if (emit_inline_thumb == NULL) {
if (emit_x64 == NULL) { emit_inline_thumb = emit_inline_thumb_new(max_num_labels);
emit_x64 = emit_x64_new(comp->max_num_labels); }
} comp->emit = NULL;
comp->emit = emit_x64; comp->emit_method_table = NULL;
comp->emit_method_table = &emit_x64_method_table; comp->emit_inline_asm = emit_inline_thumb;
break; comp->emit_inline_asm_method_table = &emit_inline_thumb_method_table;
compile_scope_inline_asm(comp, s, PASS_2);
//case EMIT_OPT_ASM_THUMB: compile_scope_inline_asm(comp, s, PASS_3);
//if (em } else {
switch (s->emit_options) {
case EMIT_OPT_NATIVE_PYTHON:
if (emit_x64 == NULL) {
emit_x64 = emit_x64_new(max_num_labels);
}
comp->emit = emit_x64;
comp->emit_method_table = &emit_x64_method_table;
break;
default: default:
if (emit_bc == NULL) { if (emit_bc == NULL) {
emit_bc = emit_bc_new(comp->max_num_labels); emit_bc = emit_bc_new(max_num_labels);
} }
comp->emit = emit_bc; comp->emit = emit_bc;
comp->emit_method_table = &emit_bc_method_table; comp->emit_method_table = &emit_bc_method_table;
break; break;
}
compile_scope(comp, s, PASS_2);
compile_scope(comp, s, PASS_3);
} }
compile_scope(comp, s, PASS_2);
compile_scope(comp, s, PASS_3);
} }
m_free(comp); m_free(comp);
......
...@@ -128,3 +128,16 @@ emit_t *emit_cpython_new(uint max_num_labels); ...@@ -128,3 +128,16 @@ emit_t *emit_cpython_new(uint max_num_labels);
emit_t *emit_bc_new(uint max_num_labels); emit_t *emit_bc_new(uint max_num_labels);
emit_t *emit_x64_new(uint max_num_labels); emit_t *emit_x64_new(uint max_num_labels);
emit_t *emit_thumb_new(uint max_num_labels); emit_t *emit_thumb_new(uint max_num_labels);
typedef struct _emit_inline_asm_t emit_inline_asm_t;
typedef struct _emit_inline_asm_method_table_t {
void (*start_pass)(emit_inline_asm_t *emit, pass_kind_t pass, scope_t *scope);
void (*end_pass)(emit_inline_asm_t *emit);
void (*label)(emit_inline_asm_t *emit, int label_num, qstr label_id);
void (*op)(emit_inline_asm_t *emit, qstr op, int n_args, py_parse_node_t *args);
} emit_inline_asm_method_table_t;
extern const emit_inline_asm_method_table_t emit_inline_thumb_method_table;
emit_inline_asm_t *emit_inline_thumb_new(uint max_num_labels);
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
struct _emit_t { struct _emit_t {
pass_kind_t pass; pass_kind_t pass;
int next_label;
int stack_size; int stack_size;
bool last_emit_was_return_value; bool last_emit_was_return_value;
...@@ -55,7 +54,6 @@ static void emit_bc_set_native_types(emit_t *emit, bool do_native_types) { ...@@ -55,7 +54,6 @@ static void emit_bc_set_native_types(emit_t *emit, bool do_native_types) {
static void emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) { static void emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
emit->pass = pass; emit->pass = pass;
emit->next_label = 1;
emit->stack_size = 0; emit->stack_size = 0;
emit->last_emit_was_return_value = false; emit->last_emit_was_return_value = false;
emit->scope = scope; emit->scope = scope;
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
struct _emit_t { struct _emit_t {
int pass; int pass;
int next_label;
int byte_code_offset; int byte_code_offset;
int stack_size; int stack_size;