Skip to content
This repository was archived by the owner on Aug 1, 2025. It is now read-only.

Commit 2b6db3b

Browse files
authored
turn off normalize_ir, turn on use_functionalization by default (#1050)
* turn off normalize_ir, turn on use_functionalization by default ghstack-source-id: 86d8054 Pull Request resolved: #1026 * add to CI skip list * lint * force CI to re-run * force CI rerun
1 parent 890e025 commit 2b6db3b

File tree

3 files changed

+12
-1
lines changed

3 files changed

+12
-1
lines changed

benchmarks/common.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,12 @@ def set_model_name(name):
9393
"mobilenet_",
9494
"pytorch_struct",
9595
"vgg16",
96+
"Background_Matting", # from functionalization
97+
"mobilenet_v2_quantized_qat", # from functionalization
98+
"resnet50_quantized_qat", # from functionalization
99+
"speech_transformer", # from functionalization
100+
"vision_maskrcnn", # from functionalization
101+
"timm_efficientnet", # from functionalization (only fails for inductor)
96102
# Huggingface
97103
"AlbertForMaskedLM",
98104
"BartForConditionalGeneration",

torchdynamo/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
# TODO: remove this config entirely
2+
import functorch.compile
3+
14
from . import allowed_functions
25
from . import convert_frame
36
from . import eval_frame
@@ -13,6 +16,8 @@
1316
from .utils import guard_failures
1417
from .utils import orig_code_map
1518

19+
functorch.compile.config.use_functionalize = True
20+
1621
__all__ = [
1722
"optimize",
1823
"optimize_assert",

torchdynamo/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ class AccessLimitingConfig(ModuleType):
6666
fake_tensor_propagation = True
6767

6868
# run FX normalization passes in optimizer
69-
normalize_ir = True
69+
normalize_ir = False
7070

7171
# If a tensor subclass type is in this set, torchdynamo will inline the
7272
# __torch_function__ logic of the subclass.

0 commit comments

Comments
 (0)