Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
TASTE
uPython-mirror
Commits
963a5a3e
Commit
963a5a3e
authored
Jan 16, 2015
by
Damien George
Browse files
py, unix: Allow to compile with -Wsign-compare.
See issue #699.
parent
f12ea7c7
Changes
23
Hide whitespace changes
Inline
Side-by-side
py/asmx86.c
View file @
963a5a3e
...
...
@@ -398,7 +398,7 @@ void asm_x86_label_assign(asm_x86_t *as, mp_uint_t label) {
assert
(
label
<
as
->
max_num_labels
);
if
(
as
->
pass
<
ASM_X86_PASS_EMIT
)
{
// assign label offset
assert
(
as
->
label_offsets
[
label
]
==
-
1
);
assert
(
as
->
label_offsets
[
label
]
==
(
mp_uint_t
)
-
1
);
as
->
label_offsets
[
label
]
=
as
->
code_offset
;
}
else
{
// ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT
...
...
@@ -407,7 +407,7 @@ void asm_x86_label_assign(asm_x86_t *as, mp_uint_t label) {
}
}
STATIC
mp_uint_t
get_label_dest
(
asm_x86_t
*
as
,
in
t
label
)
{
STATIC
mp_uint_t
get_label_dest
(
asm_x86_t
*
as
,
mp_uint_
t
label
)
{
assert
(
label
<
as
->
max_num_labels
);
return
as
->
label_offsets
[
label
];
}
...
...
@@ -415,7 +415,7 @@ STATIC mp_uint_t get_label_dest(asm_x86_t *as, int label) {
void
asm_x86_jmp_label
(
asm_x86_t
*
as
,
mp_uint_t
label
)
{
mp_uint_t
dest
=
get_label_dest
(
as
,
label
);
mp_int_t
rel
=
dest
-
as
->
code_offset
;
if
(
dest
!=
-
1
&&
rel
<
0
)
{
if
(
dest
!=
(
mp_uint_t
)
-
1
&&
rel
<
0
)
{
// is a backwards jump, so we know the size of the jump on the first pass
// calculate rel assuming 8 bit relative jump
rel
-=
2
;
...
...
@@ -437,7 +437,7 @@ void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) {
void
asm_x86_jcc_label
(
asm_x86_t
*
as
,
mp_uint_t
jcc_type
,
mp_uint_t
label
)
{
mp_uint_t
dest
=
get_label_dest
(
as
,
label
);
mp_int_t
rel
=
dest
-
as
->
code_offset
;
if
(
dest
!=
-
1
&&
rel
<
0
)
{
if
(
dest
!=
(
mp_uint_t
)
-
1
&&
rel
<
0
)
{
// is a backwards jump, so we know the size of the jump on the first pass
// calculate rel assuming 8 bit relative jump
rel
-=
2
;
...
...
py/bc.c
View file @
963a5a3e
...
...
@@ -116,7 +116,7 @@ void mp_setup_code_state(mp_code_state *code_state, mp_obj_t self_in, mp_uint_t
// Apply processing and check below only if we don't have kwargs,
// otherwise, kw handling code below has own extensive checks.
if
(
n_kw
==
0
&&
!
self
->
has_def_kw_args
)
{
if
(
n_args
>=
self
->
n_pos_args
-
self
->
n_def_args
)
{
if
(
n_args
>=
(
mp_uint_t
)(
self
->
n_pos_args
-
self
->
n_def_args
)
)
{
// given enough arguments, but may need to use some default arguments
for
(
mp_uint_t
i
=
n_args
;
i
<
self
->
n_pos_args
;
i
++
)
{
code_state
->
state
[
n_state
-
1
-
i
]
=
self
->
extra_args
[
i
-
(
self
->
n_pos_args
-
self
->
n_def_args
)];
...
...
py/binary.c
View file @
963a5a3e
...
...
@@ -256,7 +256,7 @@ void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte **
}
}
mp_binary_set_int
(
MIN
(
size
,
sizeof
(
val
)),
struct_type
==
'>'
,
p
,
val
);
mp_binary_set_int
(
MIN
(
(
size_t
)
size
,
sizeof
(
val
)),
struct_type
==
'>'
,
p
,
val
);
}
void
mp_binary_set_val_array
(
char
typecode
,
void
*
p
,
mp_uint_t
index
,
mp_obj_t
val_in
)
{
...
...
py/builtinimport.c
View file @
963a5a3e
...
...
@@ -203,7 +203,7 @@ mp_obj_t mp_builtin___import__(mp_uint_t n_args, const mp_obj_t *args) {
nlr_raise
(
mp_obj_new_exception_msg
(
&
mp_type_ImportError
,
"Invalid relative import"
));
}
uint
new_mod_l
=
(
mod_len
==
0
?
p
-
this_name
:
p
-
this_name
+
1
+
mod_len
);
uint
new_mod_l
=
(
mod_len
==
0
?
(
size_t
)(
p
-
this_name
)
:
(
size_t
)(
p
-
this_name
)
+
1
+
mod_len
);
char
*
new_mod
=
alloca
(
new_mod_l
);
memcpy
(
new_mod
,
this_name
,
p
-
this_name
);
if
(
mod_len
!=
0
)
{
...
...
py/compile.c
View file @
963a5a3e
...
...
@@ -216,12 +216,12 @@ STATIC mp_parse_node_t fold_constants(compiler_t *comp, mp_parse_node_t pn, mp_m
mp_int_t
arg1
=
MP_PARSE_NODE_LEAF_SMALL_INT
(
pns
->
nodes
[
2
]);
if
(
MP_PARSE_NODE_IS_TOKEN_KIND
(
pns
->
nodes
[
1
],
MP_TOKEN_OP_DBL_LESS
))
{
// int << int
if
(
!
(
arg1
>=
BITS_PER_WORD
||
arg0
>
(
MP_SMALL_INT_MAX
>>
arg1
)
||
arg0
<
(
MP_SMALL_INT_MIN
>>
arg1
)))
{
if
(
!
(
arg1
>=
(
mp_int_t
)
BITS_PER_WORD
||
arg0
>
(
MP_SMALL_INT_MAX
>>
arg1
)
||
arg0
<
(
MP_SMALL_INT_MIN
>>
arg1
)))
{
pn
=
mp_parse_node_new_leaf
(
MP_PARSE_NODE_SMALL_INT
,
arg0
<<
arg1
);
}
}
else
if
(
MP_PARSE_NODE_IS_TOKEN_KIND
(
pns
->
nodes
[
1
],
MP_TOKEN_OP_DBL_MORE
))
{
// int >> int
if
(
arg1
>=
BITS_PER_WORD
)
{
if
(
arg1
>=
(
mp_int_t
)
BITS_PER_WORD
)
{
// Shifting to big amounts is underfined behavior
// in C and is CPU-dependent; propagate sign bit.
arg1
=
BITS_PER_WORD
-
1
;
...
...
@@ -386,8 +386,8 @@ STATIC scope_t *scope_new_and_link(compiler_t *comp, scope_kind_t kind, mp_parse
return
scope
;
}
STATIC
void
apply_to_single_or_list
(
compiler_t
*
comp
,
mp_parse_node_t
pn
,
in
t
pn_list_kind
,
void
(
*
f
)(
compiler_t
*
,
mp_parse_node_t
))
{
if
(
MP_PARSE_NODE_IS_STRUCT
(
pn
)
&&
MP_PARSE_NODE_STRUCT_KIND
((
mp_parse_node_struct_t
*
)
pn
)
==
pn_list_kind
)
{
STATIC
void
apply_to_single_or_list
(
compiler_t
*
comp
,
mp_parse_node_t
pn
,
pn_kind_
t
pn_list_kind
,
void
(
*
f
)(
compiler_t
*
,
mp_parse_node_t
))
{
if
(
MP_PARSE_NODE_IS_STRUCT
_KIND
(
pn
,
pn_list_kind
)
)
{
mp_parse_node_struct_t
*
pns
=
(
mp_parse_node_struct_t
*
)
pn
;
int
num_nodes
=
MP_PARSE_NODE_STRUCT_NUM_NODES
(
pns
);
for
(
int
i
=
0
;
i
<
num_nodes
;
i
++
)
{
...
...
@@ -398,7 +398,7 @@ STATIC void apply_to_single_or_list(compiler_t *comp, mp_parse_node_t pn, int pn
}
}
STATIC
int
list_get
(
mp_parse_node_t
*
pn
,
in
t
pn_kind
,
mp_parse_node_t
**
nodes
)
{
STATIC
int
list_get
(
mp_parse_node_t
*
pn
,
pn_kind_
t
pn_kind
,
mp_parse_node_t
**
nodes
)
{
if
(
MP_PARSE_NODE_IS_NULL
(
*
pn
))
{
*
nodes
=
NULL
;
return
0
;
...
...
@@ -811,14 +811,14 @@ STATIC void c_assign_tuple(compiler_t *comp, mp_parse_node_t node_head, uint num
uint
num_head
=
(
node_head
==
MP_PARSE_NODE_NULL
)
?
0
:
1
;
// look for star expression
int
have_star_index
=
-
1
;
u
int
have_star_index
=
-
1
;
if
(
num_head
!=
0
&&
MP_PARSE_NODE_IS_STRUCT_KIND
(
node_head
,
PN_star_expr
))
{
EMIT_ARG
(
unpack_ex
,
0
,
num_tail
);
have_star_index
=
0
;
}
for
(
int
i
=
0
;
i
<
num_tail
;
i
++
)
{
for
(
u
int
i
=
0
;
i
<
num_tail
;
i
++
)
{
if
(
MP_PARSE_NODE_IS_STRUCT_KIND
(
nodes_tail
[
i
],
PN_star_expr
))
{
if
(
have_star_index
<
0
)
{
if
(
have_star_index
==
(
uint
)
-
1
)
{
EMIT_ARG
(
unpack_ex
,
num_head
+
i
,
num_tail
-
i
-
1
);
have_star_index
=
num_head
+
i
;
}
else
{
...
...
@@ -827,7 +827,7 @@ STATIC void c_assign_tuple(compiler_t *comp, mp_parse_node_t node_head, uint num
}
}
}
if
(
have_star_index
<
0
)
{
if
(
have_star_index
==
(
uint
)
-
1
)
{
EMIT_ARG
(
unpack_sequence
,
num_head
+
num_tail
);
}
if
(
num_head
!=
0
)
{
...
...
@@ -837,7 +837,7 @@ STATIC void c_assign_tuple(compiler_t *comp, mp_parse_node_t node_head, uint num
c_assign
(
comp
,
node_head
,
ASSIGN_STORE
);
}
}
for
(
int
i
=
0
;
i
<
num_tail
;
i
++
)
{
for
(
u
int
i
=
0
;
i
<
num_tail
;
i
++
)
{
if
(
num_head
+
i
==
have_star_index
)
{
c_assign
(
comp
,
((
mp_parse_node_struct_t
*
)
nodes_tail
[
i
])
->
nodes
[
0
],
ASSIGN_STORE
);
}
else
{
...
...
py/emitbc.c
View file @
963a5a3e
...
...
@@ -424,7 +424,7 @@ STATIC void emit_bc_label_assign(emit_t *emit, mp_uint_t l) {
assert
(
l
<
emit
->
max_num_labels
);
if
(
emit
->
pass
<
MP_PASS_EMIT
)
{
// assign label offset
assert
(
emit
->
label_offsets
[
l
]
==
-
1
);
assert
(
emit
->
label_offsets
[
l
]
==
(
mp_uint_t
)
-
1
);
emit
->
label_offsets
[
l
]
=
emit
->
bytecode_offset
;
}
else
{
// ensure label offset has not changed from MP_PASS_CODE_SIZE to MP_PASS_EMIT
...
...
py/lexer.c
View file @
963a5a3e
...
...
@@ -657,7 +657,7 @@ STATIC void mp_lexer_next_token_into(mp_lexer_t *lex, bool first_token) {
// need to check for this special token in many places in the compiler.
// TODO improve speed of these string comparisons
//for (mp_int_t i = 0; tok_kw[i] != NULL; i++) {
for
(
mp_int
_t
i
=
0
;
i
<
MP_ARRAY_SIZE
(
tok_kw
);
i
++
)
{
for
(
size
_t
i
=
0
;
i
<
MP_ARRAY_SIZE
(
tok_kw
);
i
++
)
{
if
(
str_strn_equal
(
tok_kw
[
i
],
lex
->
vstr
.
buf
,
lex
->
vstr
.
len
))
{
if
(
i
==
MP_ARRAY_SIZE
(
tok_kw
)
-
1
)
{
// tok_kw[MP_ARRAY_SIZE(tok_kw) - 1] == "__debug__"
...
...
py/map.c
View file @
963a5a3e
...
...
@@ -47,7 +47,7 @@ const mp_map_t mp_const_empty_map = {
STATIC
uint32_t
doubling_primes
[]
=
{
0
,
7
,
19
,
43
,
89
,
179
,
347
,
647
,
1229
,
2297
,
4243
,
7829
,
14347
,
26017
,
47149
,
84947
,
152443
,
273253
,
488399
,
869927
,
1547173
,
2745121
,
4861607
};
STATIC
mp_uint_t
get_doubling_prime_greater_or_equal_to
(
mp_uint_t
x
)
{
for
(
in
t
i
=
0
;
i
<
MP_ARRAY_SIZE
(
doubling_primes
);
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
MP_ARRAY_SIZE
(
doubling_primes
);
i
++
)
{
if
(
doubling_primes
[
i
]
>=
x
)
{
return
doubling_primes
[
i
];
}
...
...
py/mpz.c
View file @
963a5a3e
...
...
@@ -1370,7 +1370,7 @@ mp_int_t mpz_hash(const mpz_t *z) {
}
bool
mpz_as_int_checked
(
const
mpz_t
*
i
,
mp_int_t
*
value
)
{
mp_int_t
val
=
0
;
mp_
u
int_t
val
=
0
;
mpz_dig_t
*
d
=
i
->
dig
+
i
->
len
;
while
(
d
--
>
i
->
dig
)
{
...
...
py/obj.c
View file @
963a5a3e
...
...
@@ -393,11 +393,11 @@ mp_uint_t mp_get_index(const mp_obj_type_t *type, mp_uint_t len, mp_obj_t index,
if
(
is_slice
)
{
if
(
i
<
0
)
{
i
=
0
;
}
else
if
(
i
>
len
)
{
}
else
if
(
(
mp_uint_t
)
i
>
len
)
{
i
=
len
;
}
}
else
{
if
(
i
<
0
||
i
>=
len
)
{
if
(
i
<
0
||
(
mp_uint_t
)
i
>=
len
)
{
if
(
MICROPY_ERROR_REPORTING
==
MICROPY_ERROR_REPORTING_TERSE
)
{
nlr_raise
(
mp_obj_new_exception_msg
(
&
mp_type_IndexError
,
"index out of range"
));
}
else
{
...
...
py/obj.h
View file @
963a5a3e
...
...
@@ -86,7 +86,7 @@ typedef struct _mp_obj_base_t mp_obj_base_t;
#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 1)
#define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_int_t)(small_int)) << 1) | 1))
#define MP_OBJ_QSTR_VALUE(o) (((mp_int_t)(o)) >> 2)
#define MP_OBJ_QSTR_VALUE(o) (((mp_
u
int_t)(o)) >> 2)
#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 2) | 2))
// These macros are used to declare and define constant function objects
...
...
@@ -247,7 +247,7 @@ bool mp_get_buffer(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags);
void
mp_get_buffer_raise
(
mp_obj_t
obj
,
mp_buffer_info_t
*
bufinfo
,
mp_uint_t
flags
);
// Stream protocol
#define MP_STREAM_ERROR (-1)
#define MP_STREAM_ERROR (
(mp_uint_t)
-1)
typedef
struct
_mp_stream_p_t
{
// On error, functions should return MP_STREAM_ERROR and fill in *errcode (values
// are implementation-dependent, but will be exposed to user, e.g. via exception).
...
...
py/objint.c
View file @
963a5a3e
...
...
@@ -334,7 +334,7 @@ STATIC mp_obj_t int_to_bytes(mp_uint_t n_args, const mp_obj_t *args) {
// TODO: Support signed param (assumes signed=False)
mp_int_t
val
=
mp_obj_int_get_checked
(
args
[
0
]);
mp_int_t
len
=
MP_OBJ_SMALL_INT_VALUE
(
args
[
1
]);
mp_
u
int_t
len
=
MP_OBJ_SMALL_INT_VALUE
(
args
[
1
]);
byte
*
data
;
mp_obj_t
o
=
mp_obj_str_builder_start
(
&
mp_type_bytes
,
len
,
&
data
);
...
...
py/objlist.c
View file @
963a5a3e
...
...
@@ -364,7 +364,7 @@ STATIC mp_obj_t list_insert(mp_obj_t self_in, mp_obj_t idx, mp_obj_t obj) {
if
(
index
<
0
)
{
index
=
0
;
}
if
(
index
>
self
->
len
)
{
if
(
(
mp_uint_t
)
index
>
self
->
len
)
{
index
=
self
->
len
;
}
...
...
py/objstr.c
View file @
963a5a3e
...
...
@@ -941,7 +941,7 @@ mp_obj_t mp_obj_str_format(mp_uint_t n_args, const mp_obj_t *args, mp_map_t *kwa
}
}
lookup
=
str_to_int
(
field
,
&
index
)
+
field
;
if
(
index
>=
n_args
-
1
)
{
if
(
(
uint
)
index
>=
n_args
-
1
)
{
nlr_raise
(
mp_obj_new_exception_msg
(
&
mp_type_IndexError
,
"tuple index out of range"
));
}
arg
=
args
[
index
+
1
];
...
...
@@ -969,7 +969,7 @@ mp_obj_t mp_obj_str_format(mp_uint_t n_args, const mp_obj_t *args, mp_map_t *kwa
"can't switch from manual field specification to automatic field numbering"
));
}
}
if
(
arg_i
>=
n_args
-
1
)
{
if
(
(
uint
)
arg_i
>=
n_args
-
1
)
{
nlr_raise
(
mp_obj_new_exception_msg
(
&
mp_type_IndexError
,
"tuple index out of range"
));
}
arg
=
args
[
arg_i
+
1
];
...
...
@@ -1248,7 +1248,7 @@ mp_obj_t mp_obj_str_format(mp_uint_t n_args, const mp_obj_t *args, mp_map_t *kwa
if
(
precision
<
0
)
{
precision
=
len
;
}
if
(
len
>
precision
)
{
if
(
len
>
(
mp_uint_t
)
precision
)
{
len
=
precision
;
}
pfenv_print_strn
(
&
pfenv_vstr
,
s
,
len
,
flags
,
fill
,
width
);
...
...
@@ -1334,7 +1334,7 @@ STATIC mp_obj_t str_modulo_format(mp_obj_t pattern, mp_uint_t n_args, const mp_o
int
width
=
0
;
if
(
str
<
top
)
{
if
(
*
str
==
'*'
)
{
if
(
arg_i
>=
n_args
)
{
if
(
(
uint
)
arg_i
>=
n_args
)
{
goto
not_enough_args
;
}
width
=
mp_obj_get_int
(
args
[
arg_i
++
]);
...
...
@@ -1347,7 +1347,7 @@ STATIC mp_obj_t str_modulo_format(mp_obj_t pattern, mp_uint_t n_args, const mp_o
if
(
str
<
top
&&
*
str
==
'.'
)
{
if
(
++
str
<
top
)
{
if
(
*
str
==
'*'
)
{
if
(
arg_i
>=
n_args
)
{
if
(
(
uint
)
arg_i
>=
n_args
)
{
goto
not_enough_args
;
}
prec
=
mp_obj_get_int
(
args
[
arg_i
++
]);
...
...
@@ -1370,7 +1370,7 @@ STATIC mp_obj_t str_modulo_format(mp_obj_t pattern, mp_uint_t n_args, const mp_o
// Tuple value lookup
if
(
arg
==
MP_OBJ_NULL
)
{
if
(
arg_i
>=
n_args
)
{
if
(
(
uint
)
arg_i
>=
n_args
)
{
not_enough_args:
nlr_raise
(
mp_obj_new_exception_msg
(
&
mp_type_TypeError
,
"not enough arguments for format string"
));
}
...
...
@@ -1429,7 +1429,7 @@ not_enough_args:
if
(
prec
<
0
)
{
prec
=
len
;
}
if
(
len
>
prec
)
{
if
(
len
>
(
uint
)
prec
)
{
len
=
prec
;
}
pfenv_print_strn
(
&
pfenv_vstr
,
vstr_str
(
arg_vstr
),
len
,
flags
,
' '
,
width
);
...
...
@@ -1453,7 +1453,7 @@ not_enough_args:
}
}
if
(
arg_i
!=
n_args
)
{
if
(
(
uint
)
arg_i
!=
n_args
)
{
nlr_raise
(
mp_obj_new_exception_msg
(
&
mp_type_TypeError
,
"not all arguments converted during string formatting"
));
}
...
...
@@ -1522,7 +1522,7 @@ STATIC mp_obj_t str_replace(mp_uint_t n_args, const mp_obj_t *args) {
replaced_str_index
+=
new_len
;
num_replacements_done
++
;
}
while
(
num_replacements_done
!=
max_rep
&&
str_len_remain
>
0
&&
(
old_occurrence
=
find_subbytes
(
offset_ptr
,
str_len_remain
,
old
,
old_len
,
1
))
!=
NULL
)
{
while
(
num_replacements_done
!=
(
mp_uint_t
)
max_rep
&&
str_len_remain
>
0
&&
(
old_occurrence
=
find_subbytes
(
offset_ptr
,
str_len_remain
,
old
,
old_len
,
1
))
!=
NULL
)
{
if
(
old_len
==
0
)
{
old_occurrence
+=
1
;
}
...
...
py/parse.c
View file @
963a5a3e
...
...
@@ -316,7 +316,7 @@ STATIC void push_result_token(parser_t *parser) {
for
(;
i
<
len
;
i
++
)
{
mp_uint_t
dig
;
int
clower
=
str
[
i
]
|
0x20
;
if
(
unichar_isdigit
(
str
[
i
])
&&
str
[
i
]
-
'0'
<
base
)
{
if
(
unichar_isdigit
(
str
[
i
])
&&
(
mp_uint_t
)(
str
[
i
]
-
'0'
)
<
base
)
{
dig
=
str
[
i
]
-
'0'
;
}
else
if
(
base
==
16
&&
'a'
<=
clower
&&
clower
<=
'f'
)
{
dig
=
clower
-
'a'
+
10
;
...
...
py/pfenv.c
View file @
963a5a3e
...
...
@@ -341,12 +341,15 @@ int pfenv_print_float(const pfenv_t *pfenv, mp_float_t f, char fmt, int flags, c
*
fmt_s
=
'\0'
;
len
=
snprintf
(
buf
,
sizeof
(
buf
),
fmt_buf
,
prec
,
f
);
if
(
len
<
0
)
{
len
=
0
;
}
#else
#error Unknown MICROPY FLOAT IMPL
#endif
char
*
s
=
buf
;
if
((
flags
&
PF_FLAG_ADD_PERCENT
)
&&
(
len
+
1
)
<
sizeof
(
buf
))
{
if
((
flags
&
PF_FLAG_ADD_PERCENT
)
&&
(
size_t
)
(
len
+
1
)
<
sizeof
(
buf
))
{
buf
[
len
++
]
=
'%'
;
buf
[
len
]
=
'\0'
;
}
...
...
py/qstr.c
View file @
963a5a3e
...
...
@@ -49,7 +49,7 @@
// - data follows
// - \0 terminated (for now, so they can be printed using printf)
#define Q_GET_HASH(q) ((
q)[0] | (
(q)[1] << 8))
#define Q_GET_HASH(q) ((
mp_uint_t)(q)[0] | ((mp_uint_t)
(q)[1] << 8))
#define Q_GET_ALLOC(q) (2 + MICROPY_QSTR_BYTES_IN_LEN + Q_GET_LENGTH(q) + 1)
#define Q_GET_DATA(q) ((q) + 2 + MICROPY_QSTR_BYTES_IN_LEN)
#if MICROPY_QSTR_BYTES_IN_LEN == 1
...
...
py/runtime.c
View file @
963a5a3e
...
...
@@ -334,7 +334,7 @@ mp_obj_t mp_binary_op(mp_uint_t op, mp_obj_t lhs, mp_obj_t rhs) {
if
(
rhs_val
<
0
)
{
// negative shift not allowed
nlr_raise
(
mp_obj_new_exception_msg
(
&
mp_type_ValueError
,
"negative shift count"
));
}
else
if
(
rhs_val
>=
BITS_PER_WORD
||
lhs_val
>
(
MP_SMALL_INT_MAX
>>
rhs_val
)
||
lhs_val
<
(
MP_SMALL_INT_MIN
>>
rhs_val
))
{
}
else
if
(
rhs_val
>=
(
mp_int_t
)
BITS_PER_WORD
||
lhs_val
>
(
MP_SMALL_INT_MAX
>>
rhs_val
)
||
lhs_val
<
(
MP_SMALL_INT_MIN
>>
rhs_val
))
{
// left-shift will overflow, so use higher precision integer
lhs
=
mp_obj_new_int_from_ll
(
lhs_val
);
goto
generic_binary_op
;
...
...
@@ -351,7 +351,7 @@ mp_obj_t mp_binary_op(mp_uint_t op, mp_obj_t lhs, mp_obj_t rhs) {
nlr_raise
(
mp_obj_new_exception_msg
(
&
mp_type_ValueError
,
"negative shift count"
));
}
else
{
// standard precision is enough for right-shift
if
(
rhs_val
>=
BITS_PER_WORD
)
{
if
(
rhs_val
>=
(
mp_int_t
)
BITS_PER_WORD
)
{
// Shifting to big amounts is underfined behavior
// in C and is CPU-dependent; propagate sign bit.
rhs_val
=
BITS_PER_WORD
-
1
;
...
...
py/sequence.c
View file @
963a5a3e
...
...
@@ -70,12 +70,12 @@ bool mp_seq_get_fast_slice_indexes(mp_uint_t len, mp_obj_t slice, mp_bound_slice
if
(
start
<
0
)
{
start
=
0
;
}
}
else
if
(
start
>
len
)
{
}
else
if
(
(
mp_uint_t
)
start
>
len
)
{
start
=
len
;
}
if
(
stop
<
0
)
{
stop
=
len
+
stop
;
}
else
if
(
stop
>
len
)
{
}
else
if
(
(
mp_uint_t
)
stop
>
len
)
{
stop
=
len
;
}
...
...
py/showbc.c
View file @
963a5a3e
...
...
@@ -81,7 +81,7 @@ void mp_bytecode_print(const void *descr, mp_uint_t n_total_args, const byte *ip
// bytecode prelude: arg names (as qstr objects)
printf
(
"arg names:"
);
for
(
in
t
i
=
0
;
i
<
n_total_args
;
i
++
)
{
for
(
mp_uint_
t
i
=
0
;
i
<
n_total_args
;
i
++
)
{
printf
(
" %s"
,
qstr_str
(
MP_OBJ_QSTR_VALUE
(
*
(
mp_obj_t
*
)
ip
)));
ip
+=
sizeof
(
mp_obj_t
);
}
...
...
@@ -539,7 +539,7 @@ const byte *mp_bytecode_print_str(const byte *ip) {
void
mp_bytecode_print2
(
const
byte
*
ip
,
mp_uint_t
len
)
{
mp_showbc_code_start
=
ip
;
while
(
ip
-
mp_showbc_code_start
<
len
)
{
while
(
ip
<
len
+
mp_showbc_code_start
)
{
printf
(
"%02u "
,
(
uint
)(
ip
-
mp_showbc_code_start
));
ip
=
mp_bytecode_print_str
(
ip
);
printf
(
"
\n
"
);
...
...
Prev
1
2
Next
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment