@@ -340,10 +340,8 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
340340 exir .print_program .pretty_print (program )
341341
342342 deboxed_int_list = []
343- for item in program .execution_plan [0 ].values [5 ].val .items : # pyre-ignore[16]
344- deboxed_int_list .append (
345- program .execution_plan [0 ].values [item ].val .int_val # pyre-ignore[16]
346- )
343+ for item in program .execution_plan [0 ].values [5 ].val .items :
344+ deboxed_int_list .append (program .execution_plan [0 ].values [item ].val .int_val )
347345
348346 self .assertEqual (IntList (deboxed_int_list ), IntList ([2 , 0 , 1 ]))
349347
@@ -459,11 +457,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
459457 # Check the mul operator's stack trace contains f -> g -> h
460458 self .assertTrue (
461459 "return torch.mul(x, torch.randn(3, 2))"
462- in program .execution_plan [0 ] # pyre-ignore[16]
463- .chains [0 ]
464- .stacktrace [1 ]
465- .items [- 1 ]
466- .context
460+ in program .execution_plan [0 ].chains [0 ].stacktrace [1 ].items [- 1 ].context
467461 )
468462 self .assertEqual (
469463 program .execution_plan [0 ].chains [0 ].stacktrace [1 ].items [- 1 ].name , "f"
@@ -616,11 +610,7 @@ def false_fn(y: torch.Tensor) -> torch.Tensor:
616610 if not isinstance (inst .instr_args , KernelCall ):
617611 continue
618612
619- op = (
620- program .execution_plan [0 ]
621- .operators [inst .instr_args .op_index ] # pyre-ignore[16]
622- .name
623- )
613+ op = program .execution_plan [0 ].operators [inst .instr_args .op_index ].name
624614
625615 if "mm" in op :
626616 num_mm += 1
@@ -657,19 +647,13 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
657647 # generate the tensor on which this iteration will operate on.
658648 self .assertEqual (
659649 op_table [
660- program .execution_plan [0 ] # pyre-ignore[16]
661- .chains [0 ]
662- .instructions [0 ]
663- .instr_args .op_index
650+ program .execution_plan [0 ].chains [0 ].instructions [0 ].instr_args .op_index
664651 ].name ,
665652 "aten::sym_size" ,
666653 )
667654 self .assertEqual (
668655 op_table [
669- program .execution_plan [0 ] # pyre-ignore[16]
670- .chains [0 ]
671- .instructions [1 ]
672- .instr_args .op_index
656+ program .execution_plan [0 ].chains [0 ].instructions [1 ].instr_args .op_index
673657 ].name ,
674658 "aten::select_copy" ,
675659 )
@@ -681,28 +665,19 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
681665 # We check here that both of these have been generated.
682666 self .assertEqual (
683667 op_table [
684- program .execution_plan [0 ] # pyre-ignore[16]
685- .chains [0 ]
686- .instructions [- 5 ]
687- .instr_args .op_index
668+ program .execution_plan [0 ].chains [0 ].instructions [- 5 ].instr_args .op_index
688669 ].name ,
689670 "executorch_prim::et_copy_index" ,
690671 )
691672 self .assertEqual (
692673 op_table [
693- program .execution_plan [0 ] # pyre-ignore[16]
694- .chains [0 ]
695- .instructions [- 4 ]
696- .instr_args .op_index
674+ program .execution_plan [0 ].chains [0 ].instructions [- 4 ].instr_args .op_index
697675 ].name ,
698676 "executorch_prim::add" ,
699677 )
700678 self .assertEqual (
701679 op_table [
702- program .execution_plan [0 ] # pyre-ignore[16]
703- .chains [0 ]
704- .instructions [- 3 ]
705- .instr_args .op_index
680+ program .execution_plan [0 ].chains [0 ].instructions [- 3 ].instr_args .op_index
706681 ].name ,
707682 "executorch_prim::eq" ,
708683 )
@@ -716,10 +691,7 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
716691 )
717692 self .assertEqual (
718693 op_table [
719- program .execution_plan [0 ] # pyre-ignore[16]
720- .chains [0 ]
721- .instructions [- 1 ]
722- .instr_args .op_index
694+ program .execution_plan [0 ].chains [0 ].instructions [- 1 ].instr_args .op_index
723695 ].name ,
724696 "executorch_prim::sub" ,
725697 )
@@ -1300,9 +1272,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
13001272 # this triggers the actual emission of the graph
13011273 program = program_mul ._emitter_output .program
13021274 node = None
1303- program .execution_plan [0 ].chains [0 ].instructions [ # pyre-ignore[16]
1304- 0
1305- ].instr_args .op_index
1275+ program .execution_plan [0 ].chains [0 ].instructions [0 ].instr_args .op_index
13061276
13071277 # Find the multiplication node in the graph that was emitted.
13081278 for node in program_mul .exported_program ().graph .nodes :
@@ -1314,7 +1284,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
13141284 # Find the multiplication instruction in the program that was emitted.
13151285 for idx in range (len (program .execution_plan [0 ].chains [0 ].instructions )):
13161286 instruction = program .execution_plan [0 ].chains [0 ].instructions [idx ]
1317- op_index = instruction .instr_args .op_index # pyre-ignore[16]
1287+ op_index = instruction .instr_args .op_index
13181288 if "mul" in program .execution_plan [0 ].operators [op_index ].name :
13191289 break
13201290
@@ -1453,9 +1423,7 @@ def forward(self, x, y):
14531423 exec_prog ._emitter_output .program
14541424 self .assertIsNotNone (exec_prog .delegate_map )
14551425 self .assertIsNotNone (exec_prog .delegate_map .get ("forward" ))
1456- self .assertIsNotNone (
1457- exec_prog .delegate_map .get ("forward" ).get (0 ) # pyre-ignore[16]
1458- )
1426+ self .assertIsNotNone (exec_prog .delegate_map .get ("forward" ).get (0 ))
14591427 self .assertEqual (
14601428 exec_prog .delegate_map .get ("forward" ).get (0 ).get ("name" ),
14611429 "BackendWithCompilerExample" ,
@@ -1568,9 +1536,7 @@ def forward(self, x):
15681536 model = model .to_executorch ()
15691537 model .dump_executorch_program (True )
15701538 self .assertTrue (
1571- model .executorch_program .execution_plan [0 ] # pyre-ignore[16]
1572- .values [0 ]
1573- .val .allocation_info
1539+ model .executorch_program .execution_plan [0 ].values [0 ].val .allocation_info
15741540 is not None
15751541 )
15761542 executorch_module = _load_for_executorch_from_buffer (model .buffer )
@@ -1611,9 +1577,7 @@ def forward(self, x):
16111577 )
16121578 model .dump_executorch_program (True )
16131579 self .assertTrue (
1614- model .executorch_program .execution_plan [0 ] # pyre-ignore[16]
1615- .values [0 ]
1616- .val .allocation_info
1580+ model .executorch_program .execution_plan [0 ].values [0 ].val .allocation_info
16171581 is not None
16181582 )
16191583 executorch_module = _load_for_executorch_from_buffer (model .buffer )
0 commit comments