Skip to content

Commit

Permalink
rm dynamo (#558)
Browse files Browse the repository at this point in the history
  • Loading branch information
hjchen2 authored Jan 23, 2024
1 parent cffd3d4 commit 0dd6c37
Show file tree
Hide file tree
Showing 5 changed files with 1 addition and 11 deletions.
2 changes: 0 additions & 2 deletions examples/text_to_image_deep_cache_sdxl_enterprise.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import torch.nn as nn
from torch._dynamo import allow_in_graph as maybe_allow_in_graph

# oneflow_compile should be imported before importing any diffusers
from onediff.infer_compiler import oneflow_compile
Expand Down Expand Up @@ -112,7 +111,6 @@ def parse_args():
False,
False,
args.bits,
maybe_allow_in_graph,
)

if args.complie_text_encoder:
Expand Down
2 changes: 0 additions & 2 deletions examples/text_to_image_sdxl_enterprise.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
import torch.nn as nn
from torch._dynamo import allow_in_graph as maybe_allow_in_graph

# oneflow_compile should be imported before importing any diffusers
from onediff.infer_compiler import oneflow_compile
Expand Down Expand Up @@ -110,7 +109,6 @@ def parse_args():
False,
False,
args.bits,
maybe_allow_in_graph,
)

if args.complie_text_encoder:
Expand Down
2 changes: 0 additions & 2 deletions onediff_comfy_nodes/utils/onediff_quant_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import torch
import torch.nn as nn

from torch._dynamo import allow_in_graph as maybe_allow_in_graph

if hasattr(comfy.ops, "disable_weight_init"):
comfy_ops_Linear = comfy.ops.disable_weight_init.Linear
Expand Down Expand Up @@ -241,7 +240,6 @@ def replace_module_with_quantizable_module(
fake_quant=False,
static=False,
nbits=8,
convert_fn=maybe_allow_in_graph,
)
modify_sub_module(diffusion_model, sub_module_name, sub_mod)
if use_rewrite_attn:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
rewrite_sdxl_pipeline_attention,
replace_sub_module_with_quantizable_module,
)
from torch._dynamo import allow_in_graph as maybe_allow_in_graph


def _use_graph():
Expand Down Expand Up @@ -108,7 +107,6 @@ def _quantize_model(self):
self._fake_quant,
self._static,
self._bits,
maybe_allow_in_graph,
)
rewrite_sdxl_pipeline_attention(self._pipe)

Expand Down
4 changes: 1 addition & 3 deletions src/onediff/optimization/quant_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ def quantize_model(
if varify_can_use_quantization() is False:
return model

from torch._dynamo import allow_in_graph as maybe_allow_in_graph
from onediff_quant.utils import symm_quantize_sub_module, find_quantizable_modules
from onediff_quant.utils import get_quantize_module
from onediff_quant import Quantizer
Expand Down Expand Up @@ -90,7 +89,6 @@ def apply_quantization_to_modules(quantizable_modules):
fake_quant=False,
static=False,
nbits=bits,
convert_fn=maybe_allow_in_graph,
)

modify_sub_module(model, sub_module_name, sub_mod)
Expand All @@ -112,4 +110,4 @@ def apply_quantization_to_modules(quantizable_modules):
+ f"Time: {time.time() - start_time:.4f}s \n"
)

return model
return model

0 comments on commit 0dd6c37

Please sign in to comment.