Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix IPAdapter Style & Composition SDXL Support #927

Merged
merged 10 commits into from
Jun 5, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,9 @@ def split_patch_kwargs(patch_kwargs):
split1dict = {}
split2dict = {}
for k, v in patch_kwargs.items():
if k in ["cond", "uncond", "mask", "weight"]:
if k in ["cond", "cond_alt", "uncond", "mask", "weight"] or isinstance(
v, torch.Tensor
):
split1dict[k] = v
else:
split2dict[k] = v
Expand Down
40 changes: 35 additions & 5 deletions src/onediff/infer_compiler/backends/oneflow/args_tree_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,42 @@ def wrapper(self: "OneflowDeployableModule", *args, **kwargs):
and self._deployable_module_dpl_graph is not None
and self._deployable_module_input_structure_key != input_structure_key
):
logger.warning(
"Input structure key has changed. Resetting the deployable module graph."
# Retrieve the deployable module graph from cache using the input structure key
dpl_graph = self._deployable_module_graph_cache.get(
input_structure_key, None
)
self._deployable_module_dpl_graph = None
self._load_graph_first_run = True
self._deployable_module_input_structure_key = None

# Check if the current input structure key is not already in the cache
if (
self._deployable_module_input_structure_key
not in self._deployable_module_graph_cache
):
current_cache_size = len(self._deployable_module_graph_cache)
max_cached_graph_size = (
self._deployable_module_options.max_cached_graph_size
)

# Ensure the current cache size is within the allowed limit
assert current_cache_size < max_cached_graph_size, (
f"Cache size exceeded! Current size: {current_cache_size}, "
f"Maximum allowed size: {max_cached_graph_size}"
)
ccssu marked this conversation as resolved.
Show resolved Hide resolved

self._deployable_module_graph_cache[
self._deployable_module_input_structure_key
] = self._deployable_module_dpl_graph

# If a cached graph is found, update the deployable module graph and input structure key
if dpl_graph is not None:
self._deployable_module_dpl_graph = dpl_graph
self._deployable_module_input_structure_key = input_structure_key
else:
logger.warning(
f"Input structure key {self._deployable_module_input_structure_key} to {input_structure_key} has changed. Resetting the deployable module graph. This may slow down the process."
)
self._deployable_module_dpl_graph = None
self._deployable_module_input_structure_key = None
self._load_graph_first_run = True

output = func(self, *mapped_args, **mapped_kwargs)
return process_output(output)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ def __init__(
options if options is not None else OneflowCompileOptions()
)
self._deployable_module_dpl_graph = None
self._deployable_module_graph_cache = {}
self._is_raw_deployable_module = True
self._load_graph_first_run = True
self._deployable_module_input_structure_key = None
Expand All @@ -84,6 +85,9 @@ def from_existing(cls, existing_module, dynamic=True, options=None):
instance._deployable_module_dpl_graph = (
existing_module._deployable_module_dpl_graph
)
instance._deployable_module_graph_cache = (
existing_module._deployable_module_graph_cache
)
instance._load_graph_first_run = existing_module._load_graph_first_run
instance._deployable_module_input_structure_key = (
existing_module._deployable_module_input_structure_key
Expand Down