101
101
(opcode) == POP_JUMP_IF_FALSE || \
102
102
(opcode) == POP_JUMP_IF_TRUE)
103
103
104
+ #define IS_JUMP_OPCODE (opcode ) \
105
+ (IS_VIRTUAL_JUMP_OPCODE(opcode) || \
106
+ is_bit_set_in_table(_PyOpcode_Jump, opcode))
107
+
104
108
/* opcodes which are not emitted in codegen stage, only by the assembler */
105
109
#define IS_ASSEMBLER_OPCODE (opcode ) \
106
110
((opcode) == JUMP_FORWARD || \
124
128
(opcode) == POP_JUMP_BACKWARD_IF_TRUE || \
125
129
(opcode) == POP_JUMP_BACKWARD_IF_FALSE)
126
130
131
+ #define IS_UNCONDITIONAL_JUMP_OPCODE (opcode ) \
132
+ ((opcode) == JUMP || \
133
+ (opcode) == JUMP_NO_INTERRUPT || \
134
+ (opcode) == JUMP_FORWARD || \
135
+ (opcode) == JUMP_BACKWARD || \
136
+ (opcode) == JUMP_BACKWARD_NO_INTERRUPT)
137
+
138
+ #define IS_SCOPE_EXIT_OPCODE (opcode ) \
139
+ ((opcode) == RETURN_VALUE || \
140
+ (opcode) == RAISE_VARARGS || \
141
+ (opcode) == RERAISE)
127
142
128
143
#define IS_TOP_LEVEL_AWAIT (c ) ( \
129
144
(c->c_flags->cf_flags & PyCF_ALLOW_TOP_LEVEL_AWAIT) \
@@ -182,8 +197,7 @@ is_block_push(struct instr *instr)
182
197
static inline int
183
198
is_jump (struct instr * i )
184
199
{
185
- return IS_VIRTUAL_JUMP_OPCODE (i -> i_opcode ) ||
186
- is_bit_set_in_table (_PyOpcode_Jump , i -> i_opcode );
200
+ return IS_JUMP_OPCODE (i -> i_opcode );
187
201
}
188
202
189
203
static int
@@ -249,16 +263,10 @@ typedef struct basicblock_ {
249
263
int b_startdepth ;
250
264
/* instruction offset for block, computed by assemble_jump_offsets() */
251
265
int b_offset ;
252
- /* Basic block has no fall through (it ends with a return, raise or jump) */
253
- unsigned b_nofallthrough : 1 ;
254
266
/* Basic block is an exception handler that preserves lasti */
255
267
unsigned b_preserve_lasti : 1 ;
256
268
/* Used by compiler passes to mark whether they have visited a basic block. */
257
269
unsigned b_visited : 1 ;
258
- /* Basic block exits scope (it ends with a return or raise) */
259
- unsigned b_exit : 1 ;
260
- /* b_return is true if a RETURN_VALUE opcode is inserted. */
261
- unsigned b_return : 1 ;
262
270
/* b_cold is true if this block is not perf critical (like an exception handler) */
263
271
unsigned b_cold : 1 ;
264
272
/* b_warm is used by the cold-detection algorithm to mark blocks which are definitely not cold */
@@ -274,6 +282,29 @@ basicblock_last_instr(basicblock *b) {
274
282
return NULL ;
275
283
}
276
284
285
+ static inline int
286
+ basicblock_returns (basicblock * b ) {
287
+ struct instr * last = basicblock_last_instr (b );
288
+ return last && last -> i_opcode == RETURN_VALUE ;
289
+ }
290
+
291
+ static inline int
292
+ basicblock_exits_scope (basicblock * b ) {
293
+ struct instr * last = basicblock_last_instr (b );
294
+ return last && IS_SCOPE_EXIT_OPCODE (last -> i_opcode );
295
+ }
296
+
297
+ static inline int
298
+ basicblock_nofallthrough (basicblock * b ) {
299
+ struct instr * last = basicblock_last_instr (b );
300
+ return (last &&
301
+ (IS_SCOPE_EXIT_OPCODE (last -> i_opcode ) ||
302
+ IS_UNCONDITIONAL_JUMP_OPCODE (last -> i_opcode )));
303
+ }
304
+
305
+ #define BB_NO_FALLTHROUGH (B ) (basicblock_nofallthrough(B))
306
+ #define BB_HAS_FALLTHROUGH (B ) (!basicblock_nofallthrough(B))
307
+
277
308
/* fblockinfo tracks the current frame block.
278
309
279
310
A frame block is used to handle loops, try/except, and try/finally.
@@ -847,7 +878,7 @@ compiler_copy_block(struct compiler *c, basicblock *block)
847
878
/* Cannot copy a block if it has a fallthrough, since
848
879
* a block can only have one fallthrough predecessor.
849
880
*/
850
- assert (block -> b_nofallthrough );
881
+ assert (BB_NO_FALLTHROUGH ( block ) );
851
882
basicblock * result = compiler_new_block (c );
852
883
if (result == NULL ) {
853
884
return NULL ;
@@ -859,8 +890,6 @@ compiler_copy_block(struct compiler *c, basicblock *block)
859
890
}
860
891
result -> b_instr [n ] = block -> b_instr [i ];
861
892
}
862
- result -> b_exit = block -> b_exit ;
863
- result -> b_nofallthrough = 1 ;
864
893
return result ;
865
894
}
866
895
@@ -1218,11 +1247,7 @@ static int
1218
1247
is_end_of_basic_block (struct instr * instr )
1219
1248
{
1220
1249
int opcode = instr -> i_opcode ;
1221
-
1222
- return is_jump (instr ) ||
1223
- opcode == RETURN_VALUE ||
1224
- opcode == RAISE_VARARGS ||
1225
- opcode == RERAISE ;
1250
+ return is_jump (instr ) || IS_SCOPE_EXIT_OPCODE (opcode );
1226
1251
}
1227
1252
1228
1253
static int
@@ -1258,9 +1283,6 @@ basicblock_addop_line(basicblock *b, int opcode, int line,
1258
1283
struct instr * i = & b -> b_instr [off ];
1259
1284
i -> i_opcode = opcode ;
1260
1285
i -> i_oparg = 0 ;
1261
- if (opcode == RETURN_VALUE ) {
1262
- b -> b_return = 1 ;
1263
- }
1264
1286
i -> i_lineno = line ;
1265
1287
i -> i_end_lineno = end_line ;
1266
1288
i -> i_col_offset = col_offset ;
@@ -7139,11 +7161,8 @@ stackdepth(struct compiler *c, basicblock *entry)
7139
7161
}
7140
7162
depth = new_depth ;
7141
7163
assert (!IS_ASSEMBLER_OPCODE (instr -> i_opcode ));
7142
- if (instr -> i_opcode == JUMP_NO_INTERRUPT ||
7143
- instr -> i_opcode == JUMP ||
7144
- instr -> i_opcode == RETURN_VALUE ||
7145
- instr -> i_opcode == RAISE_VARARGS ||
7146
- instr -> i_opcode == RERAISE )
7164
+ if (IS_UNCONDITIONAL_JUMP_OPCODE (instr -> i_opcode ) ||
7165
+ IS_SCOPE_EXIT_OPCODE (instr -> i_opcode ))
7147
7166
{
7148
7167
/* remaining code is dead */
7149
7168
next = NULL ;
@@ -7154,7 +7173,7 @@ stackdepth(struct compiler *c, basicblock *entry)
7154
7173
}
7155
7174
}
7156
7175
if (next != NULL ) {
7157
- assert (b -> b_nofallthrough == 0 );
7176
+ assert (BB_HAS_FALLTHROUGH ( b ) );
7158
7177
stackdepth_push (& sp , next , depth );
7159
7178
}
7160
7179
}
@@ -7309,7 +7328,7 @@ label_exception_targets(basicblock *entry) {
7309
7328
instr -> i_except = handler ;
7310
7329
assert (i == b -> b_iused - 1 );
7311
7330
if (!instr -> i_target -> b_visited ) {
7312
- if (b -> b_nofallthrough == 0 ) {
7331
+ if (BB_HAS_FALLTHROUGH ( b ) ) {
7313
7332
ExceptStack * copy = copy_except_stack (except_stack );
7314
7333
if (copy == NULL ) {
7315
7334
goto error ;
@@ -7329,7 +7348,7 @@ label_exception_targets(basicblock *entry) {
7329
7348
instr -> i_except = handler ;
7330
7349
}
7331
7350
}
7332
- if (b -> b_nofallthrough == 0 && !b -> b_next -> b_visited ) {
7351
+ if (BB_HAS_FALLTHROUGH ( b ) && !b -> b_next -> b_visited ) {
7333
7352
assert (except_stack != NULL );
7334
7353
b -> b_next -> b_exceptstack = except_stack ;
7335
7354
todo [0 ] = b -> b_next ;
@@ -7368,7 +7387,7 @@ mark_warm(basicblock *entry) {
7368
7387
assert (!b -> b_except_predecessors );
7369
7388
b -> b_warm = 1 ;
7370
7389
basicblock * next = b -> b_next ;
7371
- if (next && ! b -> b_nofallthrough && !next -> b_visited ) {
7390
+ if (next && BB_HAS_FALLTHROUGH ( b ) && !next -> b_visited ) {
7372
7391
* sp ++ = next ;
7373
7392
next -> b_visited = 1 ;
7374
7393
}
@@ -7412,7 +7431,7 @@ mark_cold(basicblock *entry) {
7412
7431
basicblock * b = * (-- sp );
7413
7432
b -> b_cold = 1 ;
7414
7433
basicblock * next = b -> b_next ;
7415
- if (next && ! b -> b_nofallthrough ) {
7434
+ if (next && BB_HAS_FALLTHROUGH ( b ) ) {
7416
7435
if (!next -> b_warm && !next -> b_visited ) {
7417
7436
* sp ++ = next ;
7418
7437
next -> b_visited = 1 ;
@@ -7447,15 +7466,14 @@ push_cold_blocks_to_end(struct compiler *c, basicblock *entry, int code_flags) {
7447
7466
/* If we have a cold block with fallthrough to a warm block, add */
7448
7467
/* an explicit jump instead of fallthrough */
7449
7468
for (basicblock * b = entry ; b != NULL ; b = b -> b_next ) {
7450
- if (b -> b_cold && ! b -> b_nofallthrough && b -> b_next && b -> b_next -> b_warm ) {
7469
+ if (b -> b_cold && BB_HAS_FALLTHROUGH ( b ) && b -> b_next && b -> b_next -> b_warm ) {
7451
7470
basicblock * explicit_jump = compiler_new_block (c );
7452
7471
if (explicit_jump == NULL ) {
7453
7472
return -1 ;
7454
7473
}
7455
7474
basicblock_add_jump (explicit_jump , JUMP , -1 , 0 , 0 , 0 , b -> b_next );
7456
7475
7457
7476
explicit_jump -> b_cold = 1 ;
7458
- explicit_jump -> b_nofallthrough = 1 ;
7459
7477
explicit_jump -> b_next = b -> b_next ;
7460
7478
b -> b_next = explicit_jump ;
7461
7479
}
@@ -7948,7 +7966,7 @@ scan_block_for_local(int target, basicblock *b, bool unsafe_to_start,
7948
7966
if (unsafe ) {
7949
7967
// unsafe at end of this block,
7950
7968
// so unsafe at start of next blocks
7951
- if (b -> b_next && ! b -> b_nofallthrough ) {
7969
+ if (b -> b_next && BB_HAS_FALLTHROUGH ( b ) ) {
7952
7970
MAYBE_PUSH (b -> b_next );
7953
7971
}
7954
7972
if (b -> b_iused > 0 ) {
@@ -8276,9 +8294,10 @@ dump_instr(struct instr *i)
8276
8294
static void
8277
8295
dump_basicblock (const basicblock * b )
8278
8296
{
8279
- const char * b_return = b -> b_return ? "return " : "" ;
8297
+ const char * b_return = basicblock_returns ( b ) ? "return " : "" ;
8280
8298
fprintf (stderr , "[%d %d %d %p] used: %d, depth: %d, offset: %d %s\n" ,
8281
- b -> b_cold , b -> b_warm , b -> b_nofallthrough , b , b -> b_iused , b -> b_startdepth , b -> b_offset , b_return );
8299
+ b -> b_cold , b -> b_warm , BB_NO_FALLTHROUGH (b ), b , b -> b_iused ,
8300
+ b -> b_startdepth , b -> b_offset , b_return );
8282
8301
if (b -> b_instr ) {
8283
8302
int i ;
8284
8303
for (i = 0 ; i < b -> b_iused ; i ++ ) {
@@ -8540,7 +8559,6 @@ remove_redundant_jumps(basicblock *entry) {
8540
8559
b_last_instr -> i_opcode == JUMP_NO_INTERRUPT ) {
8541
8560
if (b_last_instr -> i_target == b -> b_next ) {
8542
8561
assert (b -> b_next -> b_iused );
8543
- b -> b_nofallthrough = 0 ;
8544
8562
b_last_instr -> i_opcode = NOP ;
8545
8563
removed ++ ;
8546
8564
}
@@ -8567,7 +8585,7 @@ assemble(struct compiler *c, int addNone)
8567
8585
}
8568
8586
8569
8587
/* Make sure every block that falls off the end returns None. */
8570
- if (!c -> u -> u_curblock -> b_return ) {
8588
+ if (!basicblock_returns ( c -> u -> u_curblock ) ) {
8571
8589
UNSET_LOC (c );
8572
8590
if (addNone )
8573
8591
ADDOP_LOAD_CONST (c , Py_None );
@@ -9059,7 +9077,6 @@ optimize_basic_block(struct compiler *c, basicblock *bb, PyObject *consts)
9059
9077
jump_if_true = nextop == POP_JUMP_IF_TRUE ;
9060
9078
if (is_true == jump_if_true ) {
9061
9079
bb -> b_instr [i + 1 ].i_opcode = JUMP ;
9062
- bb -> b_nofallthrough = 1 ;
9063
9080
}
9064
9081
else {
9065
9082
bb -> b_instr [i + 1 ].i_opcode = NOP ;
@@ -9079,7 +9096,6 @@ optimize_basic_block(struct compiler *c, basicblock *bb, PyObject *consts)
9079
9096
jump_if_true = nextop == JUMP_IF_TRUE_OR_POP ;
9080
9097
if (is_true == jump_if_true ) {
9081
9098
bb -> b_instr [i + 1 ].i_opcode = JUMP ;
9082
- bb -> b_nofallthrough = 1 ;
9083
9099
}
9084
9100
else {
9085
9101
inst -> i_opcode = NOP ;
@@ -9268,7 +9284,7 @@ extend_block(basicblock *bb) {
9268
9284
last -> i_opcode != JUMP_BACKWARD ) {
9269
9285
return 0 ;
9270
9286
}
9271
- if (last -> i_target -> b_exit && last -> i_target -> b_iused <= MAX_COPY_SIZE ) {
9287
+ if (basicblock_exits_scope ( last -> i_target ) && last -> i_target -> b_iused <= MAX_COPY_SIZE ) {
9272
9288
basicblock * to_copy = last -> i_target ;
9273
9289
last -> i_opcode = NOP ;
9274
9290
for (int i = 0 ; i < to_copy -> b_iused ; i ++ ) {
@@ -9278,7 +9294,6 @@ extend_block(basicblock *bb) {
9278
9294
}
9279
9295
bb -> b_instr [index ] = to_copy -> b_instr [i ];
9280
9296
}
9281
- bb -> b_exit = 1 ;
9282
9297
}
9283
9298
return 0 ;
9284
9299
}
@@ -9336,34 +9351,21 @@ normalize_basic_block(basicblock *bb) {
9336
9351
/* Mark blocks as exit and/or nofallthrough.
9337
9352
Raise SystemError if CFG is malformed. */
9338
9353
for (int i = 0 ; i < bb -> b_iused ; i ++ ) {
9339
- assert (!IS_ASSEMBLER_OPCODE (bb -> b_instr [i ].i_opcode ));
9340
- switch (bb -> b_instr [i ].i_opcode ) {
9341
- case RETURN_VALUE :
9342
- case RAISE_VARARGS :
9343
- case RERAISE :
9344
- bb -> b_exit = 1 ;
9345
- bb -> b_nofallthrough = 1 ;
9346
- break ;
9347
- case JUMP :
9348
- case JUMP_NO_INTERRUPT :
9349
- bb -> b_nofallthrough = 1 ;
9350
- /* fall through */
9351
- case POP_JUMP_IF_NOT_NONE :
9352
- case POP_JUMP_IF_NONE :
9353
- case POP_JUMP_IF_FALSE :
9354
- case POP_JUMP_IF_TRUE :
9355
- case JUMP_IF_FALSE_OR_POP :
9356
- case JUMP_IF_TRUE_OR_POP :
9357
- case FOR_ITER :
9358
- if (i != bb -> b_iused - 1 ) {
9359
- PyErr_SetString (PyExc_SystemError , "malformed control flow graph." );
9360
- return -1 ;
9361
- }
9362
- /* Skip over empty basic blocks. */
9363
- while (bb -> b_instr [i ].i_target -> b_iused == 0 ) {
9364
- bb -> b_instr [i ].i_target = bb -> b_instr [i ].i_target -> b_next ;
9365
- }
9366
-
9354
+ int opcode = bb -> b_instr [i ].i_opcode ;
9355
+ assert (!IS_ASSEMBLER_OPCODE (opcode ));
9356
+ int is_jump = IS_JUMP_OPCODE (opcode );
9357
+ int is_exit = IS_SCOPE_EXIT_OPCODE (opcode );
9358
+ if (is_exit || is_jump ) {
9359
+ if (i != bb -> b_iused - 1 ) {
9360
+ PyErr_SetString (PyExc_SystemError , "malformed control flow graph." );
9361
+ return -1 ;
9362
+ }
9363
+ }
9364
+ if (is_jump ) {
9365
+ /* Skip over empty basic blocks. */
9366
+ while (bb -> b_instr [i ].i_target -> b_iused == 0 ) {
9367
+ bb -> b_instr [i ].i_target = bb -> b_instr [i ].i_target -> b_next ;
9368
+ }
9367
9369
}
9368
9370
}
9369
9371
return 0 ;
@@ -9381,7 +9383,7 @@ mark_reachable(struct assembler *a) {
9381
9383
while (sp > stack ) {
9382
9384
basicblock * b = * (-- sp );
9383
9385
b -> b_visited = 1 ;
9384
- if (b -> b_next && ! b -> b_nofallthrough ) {
9386
+ if (b -> b_next && BB_HAS_FALLTHROUGH ( b ) ) {
9385
9387
if (!b -> b_next -> b_visited ) {
9386
9388
assert (b -> b_next -> b_predecessors == 0 );
9387
9389
* sp ++ = b -> b_next ;
@@ -9470,7 +9472,7 @@ propagate_line_numbers(struct assembler *a) {
9470
9472
COPY_INSTR_LOC (b -> b_instr [i ], prev_instr );
9471
9473
}
9472
9474
}
9473
- if (! b -> b_nofallthrough && b -> b_next -> b_predecessors == 1 ) {
9475
+ if (BB_HAS_FALLTHROUGH ( b ) && b -> b_next -> b_predecessors == 1 ) {
9474
9476
assert (b -> b_next -> b_iused );
9475
9477
if (b -> b_next -> b_instr [0 ].i_lineno < 0 ) {
9476
9478
COPY_INSTR_LOC (prev_instr , b -> b_next -> b_instr [0 ]);
@@ -9518,7 +9520,6 @@ optimize_cfg(struct compiler *c, struct assembler *a, PyObject *consts)
9518
9520
for (basicblock * b = a -> a_entry ; b != NULL ; b = b -> b_next ) {
9519
9521
if (b -> b_predecessors == 0 ) {
9520
9522
b -> b_iused = 0 ;
9521
- b -> b_nofallthrough = 0 ;
9522
9523
}
9523
9524
}
9524
9525
eliminate_empty_basic_blocks (a -> a_entry );
@@ -9558,7 +9559,7 @@ trim_unused_consts(struct assembler *a, PyObject *consts)
9558
9559
9559
9560
static inline int
9560
9561
is_exit_without_lineno (basicblock * b ) {
9561
- if (!b -> b_exit ) {
9562
+ if (!basicblock_exits_scope ( b ) ) {
9562
9563
return 0 ;
9563
9564
}
9564
9565
for (int i = 0 ; i < b -> b_iused ; i ++ ) {
@@ -9609,7 +9610,7 @@ duplicate_exits_without_lineno(struct compiler *c)
9609
9610
/* Any remaining reachable exit blocks without line number can only be reached by
9610
9611
* fall through, and thus can only have a single predecessor */
9611
9612
for (basicblock * b = c -> u -> u_blocks ; b != NULL ; b = b -> b_list ) {
9612
- if (! b -> b_nofallthrough && b -> b_next && b -> b_iused > 0 ) {
9613
+ if (BB_HAS_FALLTHROUGH ( b ) && b -> b_next && b -> b_iused > 0 ) {
9613
9614
if (is_exit_without_lineno (b -> b_next )) {
9614
9615
assert (b -> b_next -> b_iused > 0 );
9615
9616
COPY_INSTR_LOC (b -> b_instr [b -> b_iused - 1 ], b -> b_next -> b_instr [0 ]);
0 commit comments