aboutsummaryrefslogtreecommitdiff
path: root/Python
diff options
context:
space:
mode:
authorMark Shannon <mark@hotpy.org>2021-10-06 13:05:45 +0100
committerGitHub <noreply@github.com>2021-10-06 13:05:45 +0100
commitf6eafe18c004c55082de40d20cad084ef9dd3db7 (patch)
treeb5e0d2f185b2da5cf66f7cbd00c66b87937d46ba /Python
parentc379bc5ec9012cf66424ef3d80612cf13ec51006 (diff)
downloadcpython3-f6eafe18c004c55082de40d20cad084ef9dd3db7.tar.gz
Normalize jumps in compiler. All forward jumps to use JUMP_FORWARD. (GH-28755)
Diffstat (limited to 'Python')
-rw-r--r--Python/ceval.c24
-rw-r--r--Python/compile.c23
2 files changed, 35 insertions, 12 deletions
diff --git a/Python/ceval.c b/Python/ceval.c
index 2dbc291897..a3a173dfb7 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -4051,19 +4051,18 @@ check_eval_breaker:
TARGET(JUMP_ABSOLUTE) {
PREDICTED(JUMP_ABSOLUTE);
- if (oparg < INSTR_OFFSET()) {
- /* Increment the warmup counter and quicken if warm enough
- * _Py_Quicken is idempotent so we don't worry about overflow */
- if (!PyCodeObject_IsWarmedUp(co)) {
- PyCodeObject_IncrementWarmup(co);
- if (PyCodeObject_IsWarmedUp(co)) {
- if (_Py_Quicken(co)) {
- goto error;
- }
- int nexti = INSTR_OFFSET();
- first_instr = co->co_firstinstr;
- next_instr = first_instr + nexti;
+ assert(oparg < INSTR_OFFSET());
+ /* Increment the warmup counter and quicken if warm enough
+ * _Py_Quicken is idempotent so we don't worry about overflow */
+ if (!PyCodeObject_IsWarmedUp(co)) {
+ PyCodeObject_IncrementWarmup(co);
+ if (PyCodeObject_IsWarmedUp(co)) {
+ if (_Py_Quicken(co)) {
+ goto error;
}
+ int nexti = INSTR_OFFSET();
+ first_instr = co->co_firstinstr;
+ next_instr = first_instr + nexti;
}
}
JUMPTO(oparg);
@@ -4072,6 +4071,7 @@ check_eval_breaker:
}
TARGET(JUMP_ABSOLUTE_QUICK) {
+ assert(oparg < INSTR_OFFSET());
JUMPTO(oparg);
CHECK_EVAL_BREAKER();
DISPATCH();
diff --git a/Python/compile.c b/Python/compile.c
index 694da29b77..2d82d6a1e5 100644
--- a/Python/compile.c
+++ b/Python/compile.c
@@ -7221,6 +7221,26 @@ assemble_emit(struct assembler *a, struct instr *i)
}
static void
+normalize_jumps(struct assembler *a)
+{
+ for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
+ b->b_visited = 0;
+ }
+ for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
+ b->b_visited = 1;
+ if (b->b_iused == 0) {
+ continue;
+ }
+ struct instr *last = &b->b_instr[b->b_iused-1];
+ if (last->i_opcode == JUMP_ABSOLUTE &&
+ last->i_target->b_visited == 0
+ ) {
+ last->i_opcode = JUMP_FORWARD;
+ }
+ }
+}
+
+static void
assemble_jump_offsets(struct assembler *a, struct compiler *c)
{
basicblock *b;
@@ -7897,6 +7917,9 @@ assemble(struct compiler *c, int addNone)
clean_basic_block(b);
}
+ /* Order of basic blocks must have been determined by now */
+ normalize_jumps(&a);
+
/* Can't modify the bytecode after computing jump offsets. */
assemble_jump_offsets(&a, c);