diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py index 5ed00e14c14f..72287c9f6537 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py @@ -758,7 +758,7 @@ def initialize_new_tokens(self, inserting_toks: List[str]): idx += 1 - # copied from train_dreambooth_lora_sdxl_advanced.py + # Copied from train_dreambooth_lora_sdxl_advanced.py def save_embeddings(self, file_path: str): assert self.train_ids is not None, "Initialize new tokens before saving embeddings." tensors = {} diff --git a/examples/community/llm_grounded_diffusion.py b/examples/community/llm_grounded_diffusion.py index 24b0a9d0e209..b25da201ddbe 100644 --- a/examples/community/llm_grounded_diffusion.py +++ b/examples/community/llm_grounded_diffusion.py @@ -1524,35 +1524,35 @@ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32 assert emb.shape == (w.shape[0], embedding_dim) return emb - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale def guidance_scale(self): return self._guidance_scale - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_rescale @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_rescale def guidance_rescale(self): return self._guidance_rescale - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs def cross_attention_kwargs(self): return self._cross_attention_kwargs - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps def num_timesteps(self): return self._num_timesteps diff --git a/src/diffusers/models/controlnet_xs.py b/src/diffusers/models/controlnet_xs.py index 7c3ade26f1ac..c2a52a351532 100644 --- a/src/diffusers/models/controlnet_xs.py +++ b/src/diffusers/models/controlnet_xs.py @@ -851,8 +851,8 @@ def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value - # copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: @@ -911,7 +911,7 @@ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) - # copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. @@ -927,7 +927,7 @@ def set_default_attn_processor(self): self.set_attn_processor(processor) - # copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.enable_freeu + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.enable_freeu def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497. @@ -952,7 +952,7 @@ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): setattr(upsample_block, "b1", b1) setattr(upsample_block, "b2", b2) - # copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.disable_freeu + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.disable_freeu def disable_freeu(self): """Disables the FreeU mechanism.""" freeu_keys = {"s1", "s2", "b1", "b2"} @@ -961,7 +961,7 @@ def disable_freeu(self): if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) - # copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) @@ -985,7 +985,7 @@ def fuse_qkv_projections(self): if isinstance(module, Attention): module.fuse_projections(fuse=True) - # copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. diff --git a/src/diffusers/pipelines/deepfloyd_if/watermark.py b/src/diffusers/pipelines/deepfloyd_if/watermark.py index ca10413de137..e03e3fab026a 100644 --- a/src/diffusers/pipelines/deepfloyd_if/watermark.py +++ b/src/diffusers/pipelines/deepfloyd_if/watermark.py @@ -17,7 +17,7 @@ def __init__(self): self.watermark_image_as_pil = None def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None): - # copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287 + # Copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287 h = images[0].height w = images[0].width diff --git a/src/diffusers/schedulers/scheduling_dpmsolver_sde.py b/src/diffusers/schedulers/scheduling_dpmsolver_sde.py index 37c9ab624d05..bea6e5e07543 100644 --- a/src/diffusers/schedulers/scheduling_dpmsolver_sde.py +++ b/src/diffusers/schedulers/scheduling_dpmsolver_sde.py @@ -370,7 +370,7 @@ def t_fn(_sigma): timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sig_proposed]) return timesteps - # copied from diffusers.schedulers.scheduling_euler_discrete._sigma_to_t + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(np.maximum(sigma, 1e-10)) @@ -394,7 +394,7 @@ def _sigma_to_t(self, sigma, log_sigmas): t = t.reshape(sigma.shape) return t - # copied from diffusers.schedulers.scheduling_euler_discrete._convert_to_karras + # copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.Tensor) -> torch.Tensor: """Constructs the noise schedule of Karras et al. (2022).""" diff --git a/src/diffusers/schedulers/scheduling_lms_discrete.py b/src/diffusers/schedulers/scheduling_lms_discrete.py index f96a819db1c6..9595bb4c71ba 100644 --- a/src/diffusers/schedulers/scheduling_lms_discrete.py +++ b/src/diffusers/schedulers/scheduling_lms_discrete.py @@ -324,7 +324,7 @@ def _init_step_index(self, timestep): else: self._step_index = self._begin_index - # copied from diffusers.schedulers.scheduling_euler_discrete._sigma_to_t + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(np.maximum(sigma, 1e-10)) @@ -348,7 +348,7 @@ def _sigma_to_t(self, sigma, log_sigmas): t = t.reshape(sigma.shape) return t - # copied from diffusers.schedulers.scheduling_euler_discrete._convert_to_karras + # copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.Tensor) -> torch.Tensor: """Constructs the noise schedule of Karras et al. (2022).""" diff --git a/tests/lora/utils.py b/tests/lora/utils.py index 9aed5defada2..d08a26645602 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -156,7 +156,7 @@ def get_dummy_inputs(self, with_generator=True): return noise, input_ids, pipeline_inputs - # copied from: https://colab.research.google.com/gist/sayakpaul/df2ef6e1ae6d8c10a49d859883b10860/scratchpad.ipynb + # Copied from: https://colab.research.google.com/gist/sayakpaul/df2ef6e1ae6d8c10a49d859883b10860/scratchpad.ipynb def get_dummy_tokens(self): max_seq_length = 77 diff --git a/tests/pipelines/controlnet/test_controlnet_sdxl.py b/tests/pipelines/controlnet/test_controlnet_sdxl.py index 931e5cc025ca..4194b6510d17 100644 --- a/tests/pipelines/controlnet/test_controlnet_sdxl.py +++ b/tests/pipelines/controlnet/test_controlnet_sdxl.py @@ -294,7 +294,7 @@ def test_stable_diffusion_xl_multi_prompts(self): # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 - # copied from test_stable_diffusion_xl.py + # Copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) @@ -375,7 +375,7 @@ def test_controlnet_sdxl_lcm(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - # copied from test_stable_diffusion_xl.py:test_stable_diffusion_two_xl_mixture_of_denoiser_fast + # Copied from test_stable_diffusion_xl.py:test_stable_diffusion_two_xl_mixture_of_denoiser_fast # with `StableDiffusionXLControlNetPipeline` instead of `StableDiffusionXLPipeline` def test_controlnet_sdxl_two_mixture_of_denoiser_fast(self): components = self.get_dummy_components() diff --git a/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py b/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py index 766eea4c0c32..8092e28c33e9 100644 --- a/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py +++ b/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py @@ -322,7 +322,7 @@ def test_stable_diffusion_xl_multi_prompts(self): # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 - # copied from test_stable_diffusion_xl.py + # Copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/controlnet_xs/test_controlnetxs_sdxl.py b/tests/pipelines/controlnet_xs/test_controlnetxs_sdxl.py index ee0d15ec3472..c940504d6c3e 100644 --- a/tests/pipelines/controlnet_xs/test_controlnetxs_sdxl.py +++ b/tests/pipelines/controlnet_xs/test_controlnetxs_sdxl.py @@ -151,7 +151,7 @@ def get_dummy_components(self): } return components - # copied from test_controlnet_sdxl.py + # Copied from test_controlnet_sdxl.py def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) @@ -176,24 +176,24 @@ def get_dummy_inputs(self, device, seed=0): return inputs - # copied from test_controlnet_sdxl.py + # Copied from test_controlnet_sdxl.py def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) - # copied from test_controlnet_sdxl.py @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) + # Copied from test_controlnet_sdxl.py def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) - # copied from test_controlnet_sdxl.py + # Copied from test_controlnet_sdxl.py def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) - # copied from test_controlnet_sdxl.py @require_torch_gpu + # Copied from test_controlnet_sdxl.py def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() @@ -222,7 +222,7 @@ def test_stable_diffusion_xl_offloads(self): assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 - # copied from test_controlnet_sdxl.py + # Copied from test_controlnet_sdxl.py def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) @@ -276,7 +276,7 @@ def test_stable_diffusion_xl_multi_prompts(self): # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 - # copied from test_stable_diffusion_xl.py + # Copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) @@ -315,11 +315,11 @@ def test_stable_diffusion_xl_prompt_embeds(self): # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1.1e-4 - # copied from test_stable_diffusion_xl.py + # Copied from test_stable_diffusion_xl.py def test_save_load_optional_components(self): self._test_save_load_optional_components() - # copied from test_controlnetxs.py + # Copied from test_controlnetxs.py def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components)