Skip to content

Commit 7fa598c

Browse files
committed
infra: Add Torch 1.13.1 testing to nightly CI
- Add testing for Torch 1.13.1 path in CI across both TS and FX compilation paths - Disable `aten` tests for 1.13.1, to resolve Torch Dynamo import/functionality issues - Refactor quantization FX tests to resolve key error in pattern dictionary - Add parameter fields to CI to accomodate Torch 1.13.1 version - Update `dispatch_tracer` function docstrings and imports to avoid naming issue with `torch._dynamo` vs `torchdynamo` - Rename CI versioning to use "legacy"
1 parent 97209fe commit 7fa598c

File tree

4 files changed

+175
-15
lines changed

4 files changed

+175
-15
lines changed

.circleci/config.yml

Lines changed: 144 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -524,15 +524,47 @@ commands:
524524
- store_artifacts:
525525
path: /tmp/testlogs
526526

527-
test-fx_converters:
528-
description: "Test the fx converters"
527+
test-fx_converters_acc:
528+
description: "Test the fx acc converters"
529529
steps:
530530
- run:
531531
name: Run FX converter tests
532532
command: |
533533
cd py/torch_tensorrt/fx/test
534-
pushd converters/
535-
pytest --junitxml=/tmp/artifacts/test_results/fx/converters/test_results.xml
534+
pushd converters/acc_op/
535+
pytest --junitxml=/tmp/artifacts/test_results/fx/converters/acc_op/test_results.xml
536+
popd
537+
538+
- store_test_results:
539+
path: /tmp/artifacts
540+
- store_artifacts:
541+
path: /tmp/testlogs
542+
543+
test-fx_converters_aten:
544+
description: "Test the fx aten converters"
545+
steps:
546+
- run:
547+
name: Run FX converter tests
548+
command: |
549+
cd py/torch_tensorrt/fx/test
550+
pushd converters/aten_op/
551+
pytest --junitxml=/tmp/artifacts/test_results/fx/converters/aten_op/test_results.xml
552+
popd
553+
554+
- store_test_results:
555+
path: /tmp/artifacts
556+
- store_artifacts:
557+
path: /tmp/testlogs
558+
559+
test-fx_converters_vanilla:
560+
description: "Test the fx vanilla converters"
561+
steps:
562+
- run:
563+
name: Run FX converter tests
564+
command: |
565+
cd py/torch_tensorrt/fx/test
566+
pushd converters/vanilla/
567+
pytest --junitxml=/tmp/artifacts/test_results/fx/converters/vanilla/test_results.xml
536568
popd
537569
538570
- store_test_results:
@@ -587,7 +619,7 @@ commands:
587619
path: /tmp/testlogs
588620

589621
test-fx_tracer:
590-
description: "Test the fx tracer"
622+
description: "Test all fx tracers"
591623
steps:
592624
- run:
593625
name: Run FX tracer
@@ -602,6 +634,22 @@ commands:
602634
- store_artifacts:
603635
path: /tmp/testlogs
604636

637+
test-fx_tracer_acc:
638+
description: "Test the fx acc tracer only"
639+
steps:
640+
- run:
641+
name: Run FX tracer
642+
command: |
643+
cd py/torch_tensorrt/fx/test
644+
pushd tracer
645+
list_tracer=$(ls | grep test_acc)
646+
pytest $list_tracer --junitxml=/tmp/artifacts/test_results/fx/tracer/test_results.xml
647+
popd
648+
- store_test_results:
649+
path: /tmp/artifacts
650+
- store_artifacts:
651+
path: /tmp/testlogs
652+
605653
test-fx_quant:
606654
description: "Test the fx quant"
607655
steps:
@@ -625,7 +673,9 @@ commands:
625673
name: Run fx tests
626674
command: |
627675
mkdir -p /tmp/artifacts/test_results
628-
- test-fx_converters
676+
- test-fx_converters_acc
677+
- test-fx_converters_aten
678+
- test-fx_converters_vanilla
629679
- test-fx_passes
630680
- test-fx_tools
631681
- test-fx_trt_lower
@@ -637,6 +687,26 @@ commands:
637687
- store_artifacts:
638688
path: /tmp/testlogs
639689

690+
test-fx-no-aten:
691+
description: "Test the fx backend without aten operators"
692+
steps:
693+
- run:
694+
name: Run fx tests without aten ops
695+
command: |
696+
mkdir -p /tmp/artifacts/test_results
697+
- test-fx_converters_acc
698+
- test-fx_converters_vanilla
699+
- test-fx_passes
700+
- test-fx_tools
701+
- test-fx_trt_lower
702+
- test-fx_tracer_acc
703+
- test-fx_core
704+
- test-fx_quant
705+
- store_test_results:
706+
path: /tmp/artifacts
707+
- store_artifacts:
708+
path: /tmp/testlogs
709+
640710
# Define a job to be invoked later in a workflow.
641711
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
642712
jobs:
@@ -782,6 +852,37 @@ jobs:
782852
- dump-test-env
783853
- test-fx
784854

855+
test-py-fx-x86_64-linux-no-aten:
856+
parameters:
857+
torch-build:
858+
type: string
859+
torch-build-index:
860+
type: string
861+
trt-version-long:
862+
type: string
863+
machine:
864+
image: ubuntu-2004-cuda-11.4:202110-01
865+
resource_class: gpu.nvidia.large
866+
steps:
867+
- checkout
868+
- attach_workspace:
869+
at: /tmp/dist/
870+
- install-torch-from-index:
871+
torch-build: << parameters.torch-build >>
872+
torch-build-index: << parameters.torch-build-index >>
873+
- create-py-env:
874+
trt-version-long: << parameters.trt-version-long >>
875+
- install-cudnn
876+
# - run:
877+
# name: "Set LD_LIBRARY_PATH path to include the installed CUDNN"
878+
# command: export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/:$LD_LIBRARY_PATH
879+
- run:
880+
name: "Install torch-tensorrt"
881+
command: pip3 install --pre /tmp/dist/x86_64-linux/*cp39-cp39*.whl
882+
# We install torch after torch-trt because pip automatically enforces the version constraint otherwise
883+
- dump-test-env
884+
- test-fx-no-aten
885+
785886
package-x86_64-linux:
786887
parameters:
787888
enabled:
@@ -1074,6 +1175,12 @@ parameters:
10741175
torch-build-index:
10751176
type: string
10761177
default: "https://download.pytorch.org/whl/nightly/cu117"
1178+
torch-build-legacy:
1179+
type: string
1180+
default: "1.13.1+cu117"
1181+
torch-build-index-legacy:
1182+
type: string
1183+
default: "https://download.pytorch.org/whl/cu117"
10771184
cudnn-version:
10781185
type: string
10791186
default: "8.5.0.96"
@@ -1127,6 +1234,7 @@ workflows:
11271234
- release/**/*
11281235
jobs:
11291236
- build-x86_64-linux:
1237+
name: build-x86_64-linux
11301238
torch-build: << pipeline.parameters.torch-build >>
11311239
torch-build-index: << pipeline.parameters.torch-build-index >>
11321240

@@ -1153,6 +1261,36 @@ workflows:
11531261
requires:
11541262
- build-x86_64-linux
11551263

1264+
- build-x86_64-linux:
1265+
name: build-x86_64-linux-legacy
1266+
torch-build: << pipeline.parameters.torch-build-legacy >>
1267+
torch-build-index: << pipeline.parameters.torch-build-index-legacy >>
1268+
1269+
- test-core-cpp-x86_64-linux:
1270+
name: test-core-cpp-x86_64-linux-legacy
1271+
torch-build: << pipeline.parameters.torch-build-legacy >>
1272+
torch-build-index: << pipeline.parameters.torch-build-index-legacy >>
1273+
trt-version-short: << pipeline.parameters.trt-version-short >>
1274+
trt-version-long: << pipeline.parameters.trt-version-long >>
1275+
cudnn-version: << pipeline.parameters.cudnn-version >>
1276+
requires:
1277+
- build-x86_64-linux-legacy
1278+
1279+
- test-py-ts-x86_64-linux:
1280+
name: test-py-ts-x86_64-linux-legacy
1281+
torch-build: << pipeline.parameters.torch-build-legacy >>
1282+
torch-build-index: << pipeline.parameters.torch-build-index-legacy >>
1283+
trt-version-long: << pipeline.parameters.trt-version-long >>
1284+
requires:
1285+
- build-x86_64-linux-legacy
1286+
1287+
- test-py-fx-x86_64-linux-no-aten:
1288+
torch-build: << pipeline.parameters.torch-build-legacy >>
1289+
torch-build-index: << pipeline.parameters.torch-build-index-legacy >>
1290+
trt-version-long: << pipeline.parameters.trt-version-long >>
1291+
requires:
1292+
- build-x86_64-linux-legacy
1293+
11561294
release:
11571295
when: << pipeline.parameters.enable-packaging >>
11581296
jobs:

py/setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ def run(self):
380380
long_description=long_description,
381381
ext_modules=ext_modules,
382382
install_requires=[
383-
"torch>=1.14.0.dev0",
383+
"torch>=1.13.1",
384384
],
385385
setup_requires=[],
386386
cmdclass={

py/torch_tensorrt/fx/test/quant/test_quant_trt.py

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -696,7 +696,6 @@ def conv_add_extra_inputs_getter(pattern):
696696
return [extra_input]
697697

698698
conv_add_config = {
699-
"pattern_complex_format": (operator.add, torch.nn.Conv2d, MatchAllNode),
700699
"observation_type": ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
701700
"dtype_configs": [
702701
weighted_op_qint8_dtype_config,
@@ -707,6 +706,15 @@ def conv_add_extra_inputs_getter(pattern):
707706
"reference_quantized_module_for_root": torch.nn.quantized._reference.Conv2d,
708707
}
709708

709+
if torch.__version__.startswith("1"):
710+
conv_add_config["pattern"] = (operator.add, torch.nn.Conv2d, MatchAllNode)
711+
else:
712+
conv_add_config["pattern_complex_format"] = (
713+
operator.add,
714+
torch.nn.Conv2d,
715+
MatchAllNode,
716+
)
717+
710718
m = M().eval()
711719
modified_backend_config_dict = copy.deepcopy(self.trt_backend_config_dict)
712720
modified_backend_config_dict["configs"].insert(0, conv_add_config)
@@ -764,10 +772,6 @@ def forward(self, x):
764772
}
765773

766774
conv_add_config = {
767-
"pattern_complex_format": (
768-
torch.nn.ReLU,
769-
(operator.add, torch.nn.Conv2d, MatchAllNode),
770-
),
771775
"observation_type": ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
772776
"dtype_configs": [
773777
weighted_op_quint8_dtype_config,
@@ -776,6 +780,17 @@ def forward(self, x):
776780
# "reference_quantized_module_for_root": torch.nn.quantized._reference.Conv2d,
777781
}
778782

783+
if torch.__version__.startswith("1"):
784+
conv_add_config["pattern"] = (
785+
torch.nn.ReLU,
786+
(operator.add, torch.nn.Conv2d, MatchAllNode),
787+
)
788+
else:
789+
conv_add_config["pattern_complex_format"] = (
790+
torch.nn.ReLU,
791+
(operator.add, torch.nn.Conv2d, MatchAllNode),
792+
)
793+
779794
conv_config = {
780795
"pattern": torch.nn.Conv2d,
781796
"observation_type": ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,

py/torch_tensorrt/fx/tracer/dispatch_tracer/aten_tracer.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,10 @@
44
from typing import Any, Callable, Dict, Generator, List, Optional, Set, Tuple, Union
55

66
import torch
7-
import torch._dynamo as torchdynamo
8-
from torch._dynamo.guards import Guard
7+
8+
if not torch.__version__.startswith("1"):
9+
import torch._dynamo as torchdynamo
10+
911
from torch.fx.passes.infra.pass_base import PassResult
1012

1113
from torch_tensorrt.fx.passes.lower_basic_pass_aten import (
@@ -96,12 +98,17 @@ def dynamo_trace(
9698
aten_graph: bool,
9799
tracing_mode: str = "real",
98100
dynamo_config: Optional[DynamoConfig] = None,
99-
) -> Tuple[torch.fx.GraphModule, Set[Guard]]:
101+
) -> Tuple[torch.fx.GraphModule, Set]:
100102
"""
101103
TODO: Once we fully migrate to torchdynamo frontend, we will remove
102104
this config option alltogether. For now, it helps with quick
103105
experiments with playing around with TorchDynamo
104106
"""
107+
if torch.__version__.startswith("1"):
108+
raise ValueError(
109+
f"The aten tracer requires Torch version >= 2.0. Detected version {torch.__version__}"
110+
)
111+
105112
if dynamo_config is None:
106113
dynamo_config = DynamoConfig()
107114
with using_config(dynamo_config), setting_python_recursive_limit(2000):

0 commit comments

Comments
 (0)