Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
TASTE
uPython-mirror
Commits
36db6bcf
Commit
36db6bcf
authored
May 07, 2014
by
Damien George
Browse files
py, compiler: Improve passes; add an extra pass for native emitter.
parent
ca25c15d
Changes
12
Show whitespace changes
Inline
Side-by-side
py/asmthumb.c
View file @
36db6bcf
...
...
@@ -42,7 +42,7 @@
#define SIGNED_FIT12(x) (((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800)
struct
_asm_thumb_t
{
int
pass
;
u
int
pass
;
uint
code_offset
;
uint
code_size
;
byte
*
code_base
;
...
...
@@ -58,14 +58,9 @@ struct _asm_thumb_t {
asm_thumb_t
*
asm_thumb_new
(
uint
max_num_labels
)
{
asm_thumb_t
*
as
;
as
=
m_new
(
asm_thumb_t
,
1
);
as
->
pass
=
0
;
as
->
code_offset
=
0
;
as
->
code_size
=
0
;
as
->
code_base
=
NULL
;
as
=
m_new0
(
asm_thumb_t
,
1
);
as
->
max_num_labels
=
max_num_labels
;
as
->
label_offsets
=
m_new
(
int
,
max_num_labels
);
as
->
num_locals
=
0
;
return
as
;
}
...
...
@@ -89,16 +84,16 @@ void asm_thumb_free(asm_thumb_t *as, bool free_code) {
m_del_obj
(
asm_thumb_t
,
as
);
}
void
asm_thumb_start_pass
(
asm_thumb_t
*
as
,
int
pass
)
{
void
asm_thumb_start_pass
(
asm_thumb_t
*
as
,
u
int
pass
)
{
as
->
pass
=
pass
;
as
->
code_offset
=
0
;
if
(
pass
==
ASM_THUMB_PASS_
2
)
{
if
(
pass
==
ASM_THUMB_PASS_
COMPUTE
)
{
memset
(
as
->
label_offsets
,
-
1
,
as
->
max_num_labels
*
sizeof
(
int
));
}
}
void
asm_thumb_end_pass
(
asm_thumb_t
*
as
)
{
if
(
as
->
pass
==
ASM_THUMB_PASS_
2
)
{
if
(
as
->
pass
==
ASM_THUMB_PASS_
COMPUTE
)
{
// calculate size of code in bytes
as
->
code_size
=
as
->
code_offset
;
as
->
code_base
=
m_new
(
byte
,
as
->
code_size
);
...
...
@@ -120,7 +115,7 @@ void asm_thumb_end_pass(asm_thumb_t *as) {
// all functions must go through this one to emit bytes
STATIC
byte
*
asm_thumb_get_cur_to_write_bytes
(
asm_thumb_t
*
as
,
int
num_bytes_to_write
)
{
//printf("emit %d\n", num_bytes_to_write);
if
(
as
->
pass
<
ASM_THUMB_PASS_
3
)
{
if
(
as
->
pass
<
ASM_THUMB_PASS_
EMIT
)
{
as
->
code_offset
+=
num_bytes_to_write
;
return
as
->
dummy_data
;
}
else
{
...
...
@@ -224,12 +219,12 @@ void asm_thumb_exit(asm_thumb_t *as) {
void
asm_thumb_label_assign
(
asm_thumb_t
*
as
,
uint
label
)
{
assert
(
label
<
as
->
max_num_labels
);
if
(
as
->
pass
==
ASM_THUMB_PASS_
2
)
{
if
(
as
->
pass
<
ASM_THUMB_PASS_
EMIT
)
{
// assign label offset
assert
(
as
->
label_offsets
[
label
]
==
-
1
);
as
->
label_offsets
[
label
]
=
as
->
code_offset
;
}
else
if
(
as
->
pass
==
ASM_THUMB_PASS_3
)
{
// ensure label offset has not changed from PASS_
2
to PASS_
3
}
else
{
// ensure label offset has not changed from PASS_
COMPUTE
to PASS_
EMIT
//printf("l%d: (at %d=%ld)\n", label, as->label_offsets[label], as->code_offset);
assert
(
as
->
label_offsets
[
label
]
==
as
->
code_offset
);
}
...
...
@@ -383,20 +378,35 @@ void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {
}
}
// i32 is stored as a full word in the code, and aligned to machine-word boundary
// TODO this is very inefficient, improve it!
void
asm_thumb_mov_reg_i32_aligned
(
asm_thumb_t
*
as
,
uint
reg_dest
,
int
i32
)
{
// align on machine-word + 2
if
((
as
->
code_offset
&
3
)
==
0
)
{
asm_thumb_op16
(
as
,
ASM_THUMB_OP_NOP
);
}
// jump over the i32 value (instruction prefect adds 4 to PC)
asm_thumb_op16
(
as
,
OP_B_N
(
0
));
// store i32 on machine-word aligned boundary
asm_thumb_data
(
as
,
4
,
i32
);
// do the actual load of the i32 value
asm_thumb_mov_reg_i32_optimised
(
as
,
reg_dest
,
i32
);
}
#define OP_STR_TO_SP_OFFSET(rlo_dest, word_offset) (0x9000 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
#define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
void
asm_thumb_mov_local_reg
(
asm_thumb_t
*
as
,
int
local_num
,
uint
rlo_src
)
{
assert
(
rlo_src
<
REG_R8
);
int
word_offset
=
as
->
num_locals
-
local_num
-
1
;
assert
(
as
->
pass
<
ASM_THUMB_PASS_
3
||
word_offset
>=
0
);
assert
(
as
->
pass
<
ASM_THUMB_PASS_
EMIT
||
word_offset
>=
0
);
asm_thumb_op16
(
as
,
OP_STR_TO_SP_OFFSET
(
rlo_src
,
word_offset
));
}
void
asm_thumb_mov_reg_local
(
asm_thumb_t
*
as
,
uint
rlo_dest
,
int
local_num
)
{
assert
(
rlo_dest
<
REG_R8
);
int
word_offset
=
as
->
num_locals
-
local_num
-
1
;
assert
(
as
->
pass
<
ASM_THUMB_PASS_
3
||
word_offset
>=
0
);
assert
(
as
->
pass
<
ASM_THUMB_PASS_
EMIT
||
word_offset
>=
0
);
asm_thumb_op16
(
as
,
OP_LDR_FROM_SP_OFFSET
(
rlo_dest
,
word_offset
));
}
...
...
@@ -405,7 +415,7 @@ void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num) {
void
asm_thumb_mov_reg_local_addr
(
asm_thumb_t
*
as
,
uint
rlo_dest
,
int
local_num
)
{
assert
(
rlo_dest
<
REG_R8
);
int
word_offset
=
as
->
num_locals
-
local_num
-
1
;
assert
(
as
->
pass
<
ASM_THUMB_PASS_
3
||
word_offset
>=
0
);
assert
(
as
->
pass
<
ASM_THUMB_PASS_
EMIT
||
word_offset
>=
0
);
asm_thumb_op16
(
as
,
OP_ADD_REG_SP_OFFSET
(
rlo_dest
,
word_offset
));
}
...
...
py/asmthumb.h
View file @
36db6bcf
...
...
@@ -24,9 +24,8 @@
* THE SOFTWARE.
*/
#define ASM_THUMB_PASS_1 (1)
#define ASM_THUMB_PASS_2 (2)
#define ASM_THUMB_PASS_3 (3)
#define ASM_THUMB_PASS_COMPUTE (1)
#define ASM_THUMB_PASS_EMIT (2)
#define REG_R0 (0)
#define REG_R1 (1)
...
...
@@ -71,7 +70,7 @@ typedef struct _asm_thumb_t asm_thumb_t;
asm_thumb_t
*
asm_thumb_new
(
uint
max_num_labels
);
void
asm_thumb_free
(
asm_thumb_t
*
as
,
bool
free_code
);
void
asm_thumb_start_pass
(
asm_thumb_t
*
as
,
int
pass
);
void
asm_thumb_start_pass
(
asm_thumb_t
*
as
,
u
int
pass
);
void
asm_thumb_end_pass
(
asm_thumb_t
*
as
);
uint
asm_thumb_get_code_size
(
asm_thumb_t
*
as
);
void
*
asm_thumb_get_code
(
asm_thumb_t
*
as
);
...
...
@@ -188,6 +187,7 @@ void asm_thumb_bcc_n(asm_thumb_t *as, int cond, uint label);
void
asm_thumb_mov_reg_i32
(
asm_thumb_t
*
as
,
uint
reg_dest
,
machine_uint_t
i32_src
);
// convenience
void
asm_thumb_mov_reg_i32_optimised
(
asm_thumb_t
*
as
,
uint
reg_dest
,
int
i32_src
);
// convenience
void
asm_thumb_mov_reg_i32_aligned
(
asm_thumb_t
*
as
,
uint
reg_dest
,
int
i32
);
// convenience
void
asm_thumb_mov_local_reg
(
asm_thumb_t
*
as
,
int
local_num_dest
,
uint
rlo_src
);
// convenience
void
asm_thumb_mov_reg_local
(
asm_thumb_t
*
as
,
uint
rlo_dest
,
int
local_num
);
// convenience
void
asm_thumb_mov_reg_local_addr
(
asm_thumb_t
*
as
,
uint
rlo_dest
,
int
local_num
);
// convenience
...
...
py/asmx64.c
View file @
36db6bcf
...
...
@@ -112,7 +112,7 @@
#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
struct
_asm_x64_t
{
int
pass
;
u
int
pass
;
uint
code_offset
;
uint
code_size
;
byte
*
code_base
;
...
...
@@ -138,14 +138,9 @@ void *alloc_mem(uint req_size, uint *alloc_size, bool is_exec) {
asm_x64_t
*
asm_x64_new
(
uint
max_num_labels
)
{
asm_x64_t
*
as
;
as
=
m_new
(
asm_x64_t
,
1
);
as
->
pass
=
0
;
as
->
code_offset
=
0
;
as
->
code_size
=
0
;
as
->
code_base
=
NULL
;
as
=
m_new0
(
asm_x64_t
,
1
);
as
->
max_num_labels
=
max_num_labels
;
as
->
label_offsets
=
m_new
(
int
,
max_num_labels
);
as
->
num_locals
=
0
;
return
as
;
}
...
...
@@ -170,17 +165,17 @@ void asm_x64_free(asm_x64_t *as, bool free_code) {
m_del_obj
(
asm_x64_t
,
as
);
}
void
asm_x64_start_pass
(
asm_x64_t
*
as
,
int
pass
)
{
void
asm_x64_start_pass
(
asm_x64_t
*
as
,
u
int
pass
)
{
as
->
pass
=
pass
;
as
->
code_offset
=
0
;
if
(
pass
==
ASM_X64_PASS_
2
)
{
if
(
pass
==
ASM_X64_PASS_
COMPUTE
)
{
// reset all labels
memset
(
as
->
label_offsets
,
-
1
,
as
->
max_num_labels
*
sizeof
(
int
));
}
}
void
asm_x64_end_pass
(
asm_x64_t
*
as
)
{
if
(
as
->
pass
==
ASM_X64_PASS_
2
)
{
if
(
as
->
pass
==
ASM_X64_PASS_
COMPUTE
)
{
// calculate size of code in bytes
as
->
code_size
=
as
->
code_offset
;
//as->code_base = m_new(byte, as->code_size); need to allocale executable memory
...
...
@@ -204,7 +199,7 @@ void asm_x64_end_pass(asm_x64_t *as) {
// all functions must go through this one to emit bytes
STATIC
byte
*
asm_x64_get_cur_to_write_bytes
(
asm_x64_t
*
as
,
int
num_bytes_to_write
)
{
//printf("emit %d\n", num_bytes_to_write);
if
(
as
->
pass
<
ASM_X64_PASS_
3
)
{
if
(
as
->
pass
<
ASM_X64_PASS_
EMIT
)
{
as
->
code_offset
+=
num_bytes_to_write
;
return
as
->
dummy_data
;
}
else
{
...
...
@@ -367,6 +362,15 @@ void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r
}
}
// src_i64 is stored as a full word in the code, and aligned to machine-word boundary
void
asm_x64_mov_i64_to_r64_aligned
(
asm_x64_t
*
as
,
int64_t
src_i64
,
int
dest_r64
)
{
// mov instruction uses 2 bytes for the instruction, before the i64
while
(((
as
->
code_offset
+
2
)
&
(
WORD_SIZE
-
1
))
!=
0
)
{
asm_x64_nop
(
as
);
}
asm_x64_mov_i64_to_r64
(
as
,
src_i64
,
dest_r64
);
}
void
asm_x64_mov_i32_to_disp
(
asm_x64_t
*
as
,
int
src_i32
,
int
dest_r32
,
int
dest_disp
)
{
assert
(
0
);
...
...
@@ -487,12 +491,12 @@ void asm_x64_setcc_r8(asm_x64_t *as, int jcc_type, int dest_r8) {
void
asm_x64_label_assign
(
asm_x64_t
*
as
,
int
label
)
{
assert
(
label
<
as
->
max_num_labels
);
if
(
as
->
pass
==
ASM_X64_PASS_
2
)
{
if
(
as
->
pass
<
ASM_X64_PASS_
EMIT
)
{
// assign label offset
assert
(
as
->
label_offsets
[
label
]
==
-
1
);
as
->
label_offsets
[
label
]
=
as
->
code_offset
;
}
else
if
(
as
->
pass
==
ASM_X64_PASS_3
)
{
// ensure label offset has not changed from PASS_
2
to PASS_
3
}
else
{
// ensure label offset has not changed from PASS_
COMPUTE
to PASS_
EMIT
//printf("l%d: (at %d=%ld)\n", label, as->label_offsets[label], as->code_offset);
assert
(
as
->
label_offsets
[
label
]
==
as
->
code_offset
);
}
...
...
py/asmx64.h
View file @
36db6bcf
...
...
@@ -24,9 +24,8 @@
* THE SOFTWARE.
*/
#define ASM_X64_PASS_1 (1)
#define ASM_X64_PASS_2 (2)
#define ASM_X64_PASS_3 (3)
#define ASM_X64_PASS_COMPUTE (1)
#define ASM_X64_PASS_EMIT (2)
#define REG_RAX (0)
#define REG_RCX (1)
...
...
@@ -54,7 +53,7 @@ typedef struct _asm_x64_t asm_x64_t;
asm_x64_t
*
asm_x64_new
(
uint
max_num_labels
);
void
asm_x64_free
(
asm_x64_t
*
as
,
bool
free_code
);
void
asm_x64_start_pass
(
asm_x64_t
*
as
,
int
pass
);
void
asm_x64_start_pass
(
asm_x64_t
*
as
,
u
int
pass
);
void
asm_x64_end_pass
(
asm_x64_t
*
as
);
uint
asm_x64_get_code_size
(
asm_x64_t
*
as
);
void
*
asm_x64_get_code
(
asm_x64_t
*
as
);
...
...
@@ -71,6 +70,7 @@ void asm_x64_mov_i32_to_r64(asm_x64_t* as, int src_i32, int dest_r64);
void
asm_x64_mov_i64_to_r64
(
asm_x64_t
*
as
,
int64_t
src_i64
,
int
dest_r64
);
void
asm_x64_mov_i32_to_disp
(
asm_x64_t
*
as
,
int
src_i32
,
int
dest_r32
,
int
dest_disp
);
void
asm_x64_mov_i64_to_r64_optimised
(
asm_x64_t
*
as
,
int64_t
src_i64
,
int
dest_r64
);
void
asm_x64_mov_i64_to_r64_aligned
(
asm_x64_t
*
as
,
int64_t
src_i64
,
int
dest_r64
);
void
asm_x64_xor_r64_to_r64
(
asm_x64_t
*
as
,
int
src_r64
,
int
dest_r64
);
void
asm_x64_add_r64_to_r64
(
asm_x64_t
*
as
,
int
src_r64
,
int
dest_r64
);
void
asm_x64_add_i32_to_r32
(
asm_x64_t
*
as
,
int
src_i32
,
int
dest_r32
);
...
...
py/compile.c
View file @
36db6bcf
...
...
@@ -994,7 +994,7 @@ void compile_funcdef_param(compiler_t *comp, mp_parse_node_t pn) {
// leaves function object on stack
// returns function name
qstr
compile_funcdef_helper
(
compiler_t
*
comp
,
mp_parse_node_struct_t
*
pns
,
uint
emit_options
)
{
if
(
comp
->
pass
==
PASS_
1
)
{
if
(
comp
->
pass
==
MP_
PASS_
SCOPE
)
{
// create a new scope for this function
scope_t
*
s
=
scope_new_and_link
(
comp
,
SCOPE_FUNCTION
,
(
mp_parse_node_t
)
pns
,
emit_options
);
// store the function scope so the compiling function can use it at each pass
...
...
@@ -1043,7 +1043,7 @@ qstr compile_funcdef_helper(compiler_t *comp, mp_parse_node_struct_t *pns, uint
// leaves class object on stack
// returns class name
qstr
compile_classdef_helper
(
compiler_t
*
comp
,
mp_parse_node_struct_t
*
pns
,
uint
emit_options
)
{
if
(
comp
->
pass
==
PASS_
1
)
{
if
(
comp
->
pass
==
MP_
PASS_
SCOPE
)
{
// create a new scope for this class
scope_t
*
s
=
scope_new_and_link
(
comp
,
SCOPE_CLASS
,
(
mp_parse_node_t
)
pns
,
emit_options
);
// store the class scope so the compiling function can use it at each pass
...
...
@@ -1510,7 +1510,7 @@ void compile_import_from(compiler_t *comp, mp_parse_node_struct_t *pns) {
}
void
compile_global_stmt
(
compiler_t
*
comp
,
mp_parse_node_struct_t
*
pns
)
{
if
(
comp
->
pass
==
PASS_
1
)
{
if
(
comp
->
pass
==
MP_
PASS_
SCOPE
)
{
if
(
MP_PARSE_NODE_IS_LEAF
(
pns
->
nodes
[
0
]))
{
scope_declare_global
(
comp
->
scope_cur
,
MP_PARSE_NODE_LEAF_ARG
(
pns
->
nodes
[
0
]));
}
else
{
...
...
@@ -1524,7 +1524,7 @@ void compile_global_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
}
void
compile_nonlocal_stmt
(
compiler_t
*
comp
,
mp_parse_node_struct_t
*
pns
)
{
if
(
comp
->
pass
==
PASS_
1
)
{
if
(
comp
->
pass
==
MP_
PASS_
SCOPE
)
{
if
(
MP_PARSE_NODE_IS_LEAF
(
pns
->
nodes
[
0
]))
{
scope_declare_nonlocal
(
comp
->
scope_cur
,
MP_PARSE_NODE_LEAF_ARG
(
pns
->
nodes
[
0
]));
}
else
{
...
...
@@ -2056,7 +2056,7 @@ void compile_expr_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
&& MP_PARSE_NODE_IS_STRUCT_KIND(((mp_parse_node_struct_t*)pns1->nodes[0])->nodes[1], PN_trailer_paren)
&& MP_PARSE_NODE_IS_NULL(((mp_parse_node_struct_t*)pns1->nodes[0])->nodes[2])
) {
if (comp->pass == PASS_
1
) {
if (comp->pass ==
MP_
PASS_
SCOPE
) {
qstr const_id = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
if (!MP_PARSE_NODE_IS_SMALL_INT(((mp_parse_node_struct_t*)((mp_parse_node_struct_t*)pns1->nodes[0])->nodes[1])->nodes[0])) {
...
...
@@ -2153,7 +2153,7 @@ void compile_lambdef(compiler_t *comp, mp_parse_node_struct_t *pns) {
//mp_parse_node_t pn_params = pns->nodes[0];
//mp_parse_node_t pn_body = pns->nodes[1];
if
(
comp
->
pass
==
PASS_
1
)
{
if
(
comp
->
pass
==
MP_
PASS_
SCOPE
)
{
// create a new scope for this lambda
scope_t
*
s
=
scope_new_and_link
(
comp
,
SCOPE_LAMBDA
,
(
mp_parse_node_t
)
pns
,
comp
->
scope_cur
->
emit_options
);
// store the lambda scope so the compiling function (this one) can use it at each pass
...
...
@@ -2499,7 +2499,7 @@ void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_
assert
(
MP_PARSE_NODE_IS_STRUCT_KIND
(
pns
->
nodes
[
1
],
PN_comp_for
));
mp_parse_node_struct_t
*
pns_comp_for
=
(
mp_parse_node_struct_t
*
)
pns
->
nodes
[
1
];
if
(
comp
->
pass
==
PASS_
1
)
{
if
(
comp
->
pass
==
MP_
PASS_
SCOPE
)
{
// create a new scope for this comprehension
scope_t
*
s
=
scope_new_and_link
(
comp
,
kind
,
(
mp_parse_node_t
)
pns
,
comp
->
scope_cur
->
emit_options
);
// store the comprehension scope so the compiling function (this one) can use it at each pass
...
...
@@ -3020,7 +3020,7 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
comp
->
next_label
=
1
;
EMIT_ARG
(
start_pass
,
pass
,
scope
);
if
(
comp
->
pass
==
PASS_
1
)
{
if
(
comp
->
pass
==
MP_
PASS_
SCOPE
)
{
// reset maximum stack sizes in scope
// they will be computed in this first pass
scope
->
stack_size
=
0
;
...
...
@@ -3028,7 +3028,7 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
}
#if MICROPY_EMIT_CPYTHON
if
(
comp
->
pass
==
PASS_
3
)
{
if
(
comp
->
pass
==
MP_
PASS_
EMIT
)
{
scope_print_info
(
scope
);
}
#endif
...
...
@@ -3053,7 +3053,7 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
// work out number of parameters, keywords and default parameters, and add them to the id_info array
// must be done before compiling the body so that arguments are numbered first (for LOAD_FAST etc)
if
(
comp
->
pass
==
PASS_
1
)
{
if
(
comp
->
pass
==
MP_
PASS_
SCOPE
)
{
comp
->
have_star
=
false
;
apply_to_single_or_list
(
comp
,
pns
->
nodes
[
1
],
PN_typedargslist
,
compile_scope_func_param
);
}
...
...
@@ -3073,7 +3073,7 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
// work out number of parameters, keywords and default parameters, and add them to the id_info array
// must be done before compiling the body so that arguments are numbered first (for LOAD_FAST etc)
if
(
comp
->
pass
==
PASS_
1
)
{
if
(
comp
->
pass
==
MP_
PASS_
SCOPE
)
{
comp
->
have_star
=
false
;
apply_to_single_or_list
(
comp
,
pns
->
nodes
[
0
],
PN_varargslist
,
compile_scope_lambda_param
);
}
...
...
@@ -3104,7 +3104,7 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
#else
qstr
qstr_arg
=
MP_QSTR_
;
#endif
if
(
comp
->
pass
==
PASS_
1
)
{
if
(
comp
->
pass
==
MP_
PASS_
SCOPE
)
{
bool
added
;
id_info_t
*
id_info
=
scope_find_or_add_id
(
comp
->
scope_cur
,
qstr_arg
,
&
added
);
assert
(
added
);
...
...
@@ -3141,7 +3141,7 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
mp_parse_node_struct_t
*
pns
=
(
mp_parse_node_struct_t
*
)
scope
->
pn
;
assert
(
MP_PARSE_NODE_STRUCT_KIND
(
pns
)
==
PN_classdef
);
if
(
comp
->
pass
==
PASS_
1
)
{
if
(
comp
->
pass
==
MP_
PASS_
SCOPE
)
{
bool
added
;
id_info_t
*
id_info
=
scope_find_or_add_id
(
scope
,
MP_QSTR___class__
,
&
added
);
assert
(
added
);
...
...
@@ -3177,6 +3177,7 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
}
#if MICROPY_EMIT_INLINE_THUMB
// requires 3 passes: SCOPE, CODE_SIZE, EMIT
STATIC
void
compile_scope_inline_asm
(
compiler_t
*
comp
,
scope_t
*
scope
,
pass_kind_t
pass
)
{
comp
->
pass
=
pass
;
comp
->
scope_cur
=
scope
;
...
...
@@ -3187,7 +3188,7 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind
return
;
}
if
(
comp
->
pass
>
PASS_
1
)
{
if
(
comp
->
pass
>
MP_
PASS_
SCOPE
)
{
EMIT_INLINE_ASM_ARG
(
start_pass
,
comp
->
pass
,
comp
->
scope_cur
);
}
...
...
@@ -3199,7 +3200,7 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind
//qstr f_id = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]); // function name
// parameters are in pns->nodes[1]
if
(
comp
->
pass
==
PASS_
2
)
{
if
(
comp
->
pass
==
MP_
PASS_
CODE_SIZE
)
{
mp_parse_node_t
*
pn_params
;
int
n_params
=
list_get
(
&
pns
->
nodes
[
1
],
PN_typedargslist
,
&
pn_params
);
scope
->
num_pos_args
=
EMIT_INLINE_ASM_ARG
(
count_params
,
n_params
,
pn_params
);
...
...
@@ -3212,7 +3213,7 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind
int
num
=
list_get
(
&
pn_body
,
PN_suite_block_stmts
,
&
nodes
);
/*
if (comp->pass == PASS_
3
) {
if (comp->pass ==
MP_
PASS_
EMIT
) {
//printf("----\n");
scope_print_info(scope);
}
...
...
@@ -3250,7 +3251,7 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind
return
;
}
uint
lab
=
comp_next_label
(
comp
);
if
(
pass
>
PASS_
1
)
{
if
(
pass
>
MP_
PASS_
SCOPE
)
{
EMIT_INLINE_ASM_ARG
(
label
,
lab
,
MP_PARSE_NODE_LEAF_ARG
(
pn_arg
[
0
]));
}
}
else
if
(
op
==
MP_QSTR_align
)
{
...
...
@@ -3258,7 +3259,7 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind
compile_syntax_error
(
comp
,
nodes
[
i
],
"inline assembler 'align' requires 1 argument"
);
return
;
}
if
(
pass
>
PASS_
1
)
{
if
(
pass
>
MP_
PASS_
SCOPE
)
{
EMIT_INLINE_ASM_ARG
(
align
,
MP_PARSE_NODE_LEAF_SMALL_INT
(
pn_arg
[
0
]));
}
}
else
if
(
op
==
MP_QSTR_data
)
{
...
...
@@ -3266,7 +3267,7 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind
compile_syntax_error
(
comp
,
nodes
[
i
],
"inline assembler 'data' requires at least 2 arguments"
);
return
;
}
if
(
pass
>
PASS_
1
)
{
if
(
pass
>
MP_
PASS_
SCOPE
)
{
machine_int_t
bytesize
=
MP_PARSE_NODE_LEAF_SMALL_INT
(
pn_arg
[
0
]);
for
(
uint
i
=
1
;
i
<
n_args
;
i
++
)
{
if
(
!
MP_PARSE_NODE_IS_SMALL_INT
(
pn_arg
[
i
]))
{
...
...
@@ -3277,13 +3278,13 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind
}
}
}
else
{
if
(
pass
>
PASS_
1
)
{
if
(
pass
>
MP_
PASS_
SCOPE
)
{
EMIT_INLINE_ASM_ARG
(
op
,
op
,
n_args
,
pn_arg
);
}
}
}
if
(
comp
->
pass
>
PASS_
1
)
{
if
(
comp
->
pass
>
MP_
PASS_
SCOPE
)
{
bool
success
=
EMIT_INLINE_ASM
(
end_pass
);
if
(
!
success
)
{
comp
->
had_error
=
true
;
...
...
@@ -3438,10 +3439,10 @@ mp_obj_t mp_compile(mp_parse_node_t pn, qstr source_file, uint emit_opt, bool is
if
(
false
)
{
#if MICROPY_EMIT_INLINE_THUMB
}
else
if
(
s
->
emit_options
==
MP_EMIT_OPT_ASM_THUMB
)
{
compile_scope_inline_asm
(
comp
,
s
,
PASS_
1
);
compile_scope_inline_asm
(
comp
,
s
,
MP_
PASS_
SCOPE
);
#endif
}
else
{
compile_scope
(
comp
,
s
,
PASS_
1
);
compile_scope
(
comp
,
s
,
MP_
PASS_
SCOPE
);
}
// update maximim number of labels needed
...
...
@@ -3482,9 +3483,9 @@ mp_obj_t mp_compile(mp_parse_node_t pn, qstr source_file, uint emit_opt, bool is
comp
->
emit_method_table
=
NULL
;
comp
->
emit_inline_asm
=
emit_inline_thumb
;
comp
->
emit_inline_asm_method_table
=
&
emit_inline_thumb_method_table
;
compile_scope_inline_asm
(
comp
,
s
,
PASS_
2
);
compile_scope_inline_asm
(
comp
,
s
,
MP_
PASS_
CODE_SIZE
);
if
(
!
comp
->
had_error
)
{
compile_scope_inline_asm
(
comp
,
s
,
PASS_
3
);
compile_scope_inline_asm
(
comp
,
s
,
MP_
PASS_
EMIT
);
}
#endif
...
...
@@ -3514,6 +3515,10 @@ mp_obj_t mp_compile(mp_parse_node_t pn, qstr source_file, uint emit_opt, bool is
#endif
comp
->
emit
=
emit_native
;
comp
->
emit_method_table
->
set_native_types
(
comp
->
emit
,
s
->
emit_options
==
MP_EMIT_OPT_VIPER
);
// native emitters need an extra pass to compute stack size
compile_scope
(
comp
,
s
,
MP_PASS_STACK_SIZE
);
break
;
#endif // MICROPY_EMIT_NATIVE
...
...
@@ -3527,10 +3532,14 @@ mp_obj_t mp_compile(mp_parse_node_t pn, qstr source_file, uint emit_opt, bool is
}
#endif // !MICROPY_EMIT_CPYTHON
// compile pass 2 and pass 3
compile_scope
(
comp
,
s
,
PASS_2
);
// second last pass: compute code size
if
(
!
comp
->
had_error
)
{
compile_scope
(
comp
,
s
,
MP_PASS_CODE_SIZE
);
}
// final pass: emit code
if
(
!
comp
->
had_error
)
{
compile_scope
(
comp
,
s
,
PASS_
3
);
compile_scope
(
comp
,
s
,
MP_
PASS_
EMIT
);
}
}
}
...
...
py/emit.h
View file @
36db6bcf
...
...
@@ -35,9 +35,10 @@
*/
typedef
enum
{
PASS_1
=
1
,
// work out id's and their kind, and number of labels
PASS_2
=
2
,
// work out stack size and code size and label offsets
PASS_3
=
3
,
// emit code
MP_PASS_SCOPE
=
1
,
// work out id's and their kind, and number of labels
MP_PASS_STACK_SIZE
=
2
,
// work out maximum stack size
MP_PASS_CODE_SIZE
=
3
,
// work out code size and label offsets
MP_PASS_EMIT
=
4
,
// emit code
}
pass_kind_t
;
#define MP_EMIT_STAR_FLAG_SINGLE (0x01)
...
...
py/emitbc.c
View file @
36db6bcf
...
...
@@ -83,7 +83,7 @@ void emit_bc_free(emit_t *emit) {
// all functions must go through this one to emit code info
STATIC
byte
*
emit_get_cur_to_write_code_info
(
emit_t
*
emit
,
int
num_bytes_to_write
)
{
//printf("emit %d\n", num_bytes_to_write);
if
(
emit
->
pass
<
PASS_
3
)
{
if
(
emit
->
pass
<
MP_
PASS_
EMIT
)
{
emit
->
code_info_offset
+=
num_bytes_to_write
;
return
emit
->
dummy_data
;
}
else
{
...
...
@@ -123,7 +123,7 @@ STATIC void emit_write_code_info_bytes_lines(emit_t* emit, uint bytes_to_skip, u
// all functions must go through this one to emit byte code
STATIC
byte
*
emit_get_cur_to_write_byte_code
(
emit_t
*
emit
,
int
num_bytes_to_write
)
{
//printf("emit %d\n", num_bytes_to_write);
if
(
emit
->
pass
<
PASS_
3
)
{
if
(
emit
->
pass
<
MP_
PASS_
EMIT
)
{
emit
->
byte_code_offset
+=
num_bytes_to_write
;
return
emit
->
dummy_data
;
}
else
{
...
...
@@ -221,7 +221,7 @@ STATIC void emit_write_byte_code_byte_qstr(emit_t* emit, byte b, qstr qstr) {
// unsigned labels are relative to ip following this instruction, stored as 16 bits
STATIC
void
emit_write_byte_code_byte_unsigned_label
(
emit_t
*
emit
,
byte
b1
,
uint
label
)
{
uint
byte_code_offset
;
if
(
emit
->
pass
<
PASS_
3
)
{
if
(
emit
->
pass
<
MP_
PASS_
EMIT
)
{
byte_code_offset
=
0
;
}
else
{
byte_code_offset
=
emit
->
label_offsets
[
label
]
-
emit
->
byte_code_offset
-
3
;
...
...
@@ -235,7 +235,7 @@ STATIC void emit_write_byte_code_byte_unsigned_label(emit_t* emit, byte b1, uint
// signed labels are relative to ip following this instruction, stored as 16 bits, in excess
STATIC
void
emit_write_byte_code_byte_signed_label
(
emit_t
*
emit
,
byte
b1
,
uint
label
)
{
int
byte_code_offset
;
if
(
emit
->
pass
<
PASS_
3
)
{
if
(
emit
->
pass
<
MP_
PASS_
EMIT
)
{
byte_code_offset
=
0
;
}
else
{
byte_code_offset
=
emit
->
label_offsets
[
label
]
-
emit
->
byte_code_offset
-
3
+
0x8000
;
...
...
@@ -256,13 +256,13 @@ STATIC void emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
emit
->
scope
=
scope
;
emit
->
last_source_line_offset
=
0
;
emit
->
last_source_line
=
1
;
if
(
pass
==
PASS_
2
)
{
if
(
pass
<
MP_
PASS_
EMIT
)
{
memset
(
emit
->
label_offsets
,
-
1
,
emit
->
max_num_labels
*
sizeof
(
uint
));
}
emit
->
byte_code_offset
=
0
;
emit
->
code_info_offset
=
0
;
// write code info size
(don't know size at this stage in PASS_2 so need to
use maximum space (4 bytes) to write it
)
// write code info size
;
use maximum space (4 bytes) to write it
; TODO possible optimise this
{
byte
*
c
=
emit_get_cur_to_write_code_info
(
emit
,
4
);
machine_uint_t
s
=
emit
->
code_info_size
;
...
...
@@ -319,13 +319,13 @@ STATIC void emit_bc_end_pass(emit_t *emit) {
*
emit_get_cur_to_write_code_info
(
emit
,
1
)
=
0
;
// end of line number info
emit_align_code_info_to_machine_word
(
emit
);
// align so that following byte_code is aligned
if
(
emit
->
pass
==
PASS_
2
)
{
if
(
emit
->
pass
==
MP_
PASS_
CODE_SIZE
)
{
// calculate size of code in bytes
emit
->
code_info_size
=
emit
->
code_info_offset
;
emit
->
byte_code_size
=
emit
->
byte_code_offset
;
emit
->
code_base
=
m_new0
(
byte
,
emit
->
code_info_size
+
emit
->
byte_code_size
);
}
else
if
(
emit
->
pass
==
PASS_
3
)
{
}
else
if
(
emit
->
pass
==
MP_
PASS_
EMIT
)
{
qstr
*
arg_names
=
m_new
(
qstr
,
emit
->
scope
->
num_pos_args
+
emit
->
scope
->
num_kwonly_args
);
for
(
int
i
=
0
;
i
<
emit
->
scope
->
num_pos_args
+
emit
->
scope
->
num_kwonly_args
;
i
++
)
{
arg_names
[
i
]
=
emit
->
scope
->
id_info
[
i
].
qstr
;
...
...
@@ -383,12 +383,12 @@ STATIC void emit_bc_pre(emit_t *emit, int stack_size_delta) {
STATIC
void
emit_bc_label_assign
(
emit_t
*
emit
,
uint
l
)
{
emit_bc_pre
(
emit
,
0
);
assert
(
l
<
emit
->
max_num_labels
);
if
(
emit
->
pass
==
PASS_
2
)
{
if
(
emit
->
pass
<
MP_
PASS_
EMIT
)
{
// assign label offset
assert
(
emit
->
label_offsets
[
l
]
==
-
1
);
emit
->
label_offsets
[
l
]
=
emit
->
byte_code_offset
;
}
else
if
(
emit
->
pass
==
PASS_3
)
{
// ensure label offset has not changed from PASS_
2
to PASS_
3
}
else
{
// ensure label offset has not changed from
MP_
PASS_
CODE_SIZE
to
MP_
PASS_
EMIT
//printf("l%d: (at %d vs %d)\n", l, emit->byte_code_offset, emit->label_offsets[l]);
assert
(
emit
->
label_offsets
[
l
]
==
emit
->
byte_code_offset
);
}
...
...
@@ -827,17 +827,13 @@ STATIC void emit_bc_raise_varargs(emit_t *emit, int n_args) {
STATIC
void
emit_bc_yield_value
(
emit_t
*
emit
)
{
emit_bc_pre
(
emit
,
0
);
if
(
emit
->
pass
==
PASS_2
)
{
emit
->
scope
->
scope_flags
|=
MP_SCOPE_FLAG_GENERATOR
;
}
emit_write_byte_code_byte
(
emit
,
MP_BC_YIELD_VALUE
);
}
STATIC
void
emit_bc_yield_from
(
emit_t
*
emit
)
{
emit_bc_pre
(
emit
,
-
1
);
if
(
emit
->
pass
==
PASS_2
)
{
emit
->
scope
->
scope_flags
|=
MP_SCOPE_FLAG_GENERATOR
;
}
emit_write_byte_code_byte
(
emit
,
MP_BC_YIELD_FROM
);
}
...
...
py/emitcpy.c
View file @
36db6bcf
...
...
@@ -72,7 +72,7 @@ STATIC void emit_cpy_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope)
emit
->
stack_size
=
0
;
emit
->
last_emit_was_return_value
=
false
;
emit
->
scope
=
scope
;
if
(
pass
==
PASS_
2
)
{
if
(
pass
<
MP_
PASS_
EMIT
)
{
memset
(
emit
->
label_offsets
,
-
1
,
emit
->
max_num_labels
*
sizeof
(
int
));
}
}
...
...
@@ -114,7 +114,7 @@ static void emit_pre(emit_t *emit, int stack_size_delta, int byte_code_size) {
emit
->
scope
->
stack_size
=
emit
->
stack_size
;
}