Skip to content

Commit

Permalink
chore: fix flake8 errors
Browse files Browse the repository at this point in the history
This commit fixes flake8 errors that were introduced into the codebase
in the last months.
  • Loading branch information
rickstaa committed Aug 16, 2024
1 parent 76ba0fe commit bdcbbc1
Show file tree
Hide file tree
Showing 13 changed files with 97 additions and 44 deletions.
2 changes: 2 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
[flake8]
max-line-length = 88
extend-ignore = E203,E701
per-file-ignores =
__init__.py: F401
2 changes: 1 addition & 1 deletion runner/app/pipelines/optim/deepcache.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""This module provides a function to enable DeepCache optimization for the pipeline.
For more information, see the DeepCache project on GitHub: https://github.com/horseee/DeepCache
"""
""" # noqa: E501

import logging

Expand Down
6 changes: 3 additions & 3 deletions runner/app/pipelines/optim/sfast.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""This module provides a function to enable StableFast optimization for the pipeline.
For more information, see the DeepCache project on GitHub: https://github.com/chengzeyi/stable-fast
"""
""" # noqa: E501

import logging

Expand All @@ -25,13 +25,13 @@ def compile_model(pipe):
# NOTE: Disable Triton if kernel generation, compilation, and fine-tuning are slow,
# especially due to insufficient GPU VRAM or outdated architecture.
try:
import xformers
import xformers # noqa: F401

config.enable_xformers = True
except ImportError:
logger.info("xformers not installed, skip")
try:
import triton
import triton # noqa: F401

config.enable_triton = True
except ImportError:
Expand Down
4 changes: 3 additions & 1 deletion runner/app/pipelines/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
"""This module contains several utility functions that are used across the pipelines module."""
"""This module contains several utility functions that are used across the pipelines
module.
"""

from app.pipelines.utils.utils import (
SafetyChecker,
Expand Down
4 changes: 3 additions & 1 deletion runner/app/pipelines/utils/audio.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
"""This module provides functionality for converting audio files between different formats."""
"""This module provides functionality for converting audio files between different
formats.
"""

from io import BytesIO

Expand Down
3 changes: 2 additions & 1 deletion runner/app/pipelines/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ def split_prompt(
separator (str): The character used to split the input prompt. Defaults to '|'.
key_prefix (str): Prefix for keys in the returned dictionary for all prompts,
including the main prompt. Defaults to 'prompt'.
max_splits (int): Maximum number of splits to perform. Defaults to -1 (no limit).
max_splits (int): Maximum number of splits to perform. Defaults to -1 (no
limit).
Returns:
Dict[str, str]: A dictionary of all prompts, including the main prompt.
Expand Down
29 changes: 23 additions & 6 deletions runner/app/routes/image_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,38 +50,55 @@ async def image_to_image(
strength: Annotated[
float,
Form(
description="Degree of transformation applied to the reference image (0 to 1)."
description=(
"Degree of transformation applied to the reference image (0 to 1)."
)
),
] = 0.8,
guidance_scale: Annotated[
float,
Form(
description="Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality)."
description=(
"Encourages model to generate images closely linked to the text prompt "
"(higher values may reduce image quality)."
)
),
] = 7.5,
image_guidance_scale: Annotated[
float,
Form(
description="Degree to which the generated image is pushed towards the initial image."
description=(
"Degree to which the generated image is pushed towards the initial "
"image."
)
),
] = 1.5,
negative_prompt: Annotated[
str,
Form(
description="Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1."
description=(
"Text prompt(s) to guide what to exclude from image generation. "
"Ignored if guidance_scale < 1."
)
),
] = "",
safety_check: Annotated[
bool,
Form(
description="Perform a safety check to estimate if generated images could be offensive or harmful."
description=(
"Perform a safety check to estimate if generated images could be "
"offensive or harmful."
)
),
] = True,
seed: Annotated[int, Form(description="Seed for random number generation.")] = None,
num_inference_steps: Annotated[
int,
Form(
description="Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength."
description=(
"Number of denoising steps. More steps usually lead to higher quality "
"images but slower inference. Modulated by strength."
)
),
] = 100, # NOTE: Hardcoded due to varying pipeline values.
num_images_per_prompt: Annotated[
Expand Down
20 changes: 16 additions & 4 deletions runner/app/routes/image_to_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,26 +53,38 @@ async def image_to_video(
motion_bucket_id: Annotated[
int,
Form(
description="Used for conditioning the amount of motion for the generation. The higher the number the more motion will be in the video."
description=(
"Used for conditioning the amount of motion for the generation. The "
"higher the number the more motion will be in the video."
)
),
] = 127,
noise_aug_strength: Annotated[
float,
Form(
description="Amount of noise added to the conditioning image. Higher values reduce resemblance to the conditioning image and increase motion."
description=(
"Amount of noise added to the conditioning image. Higher values reduce "
"resemblance to the conditioning image and increase motion."
)
),
] = 0.02,
safety_check: Annotated[
bool,
Form(
description="Perform a safety check to estimate if generated images could be offensive or harmful."
description=(
"Perform a safety check to estimate if generated images could be "
"offensive or harmful."
)
),
] = True,
seed: Annotated[int, Form(description="Seed for random number generation.")] = None,
num_inference_steps: Annotated[
int,
Form(
description="Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength."
description=(
"Number of denoising steps. More steps usually lead to higher quality "
"images but slower inference. Modulated by strength."
)
),
] = 25, # NOTE: Hardcoded due to varying pipeline values.
pipeline: Pipeline = Depends(get_pipeline),
Expand Down
25 changes: 20 additions & 5 deletions runner/app/routes/text_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,10 @@ class TextToImageParams(BaseModel):
prompt: Annotated[
str,
Field(
description="Text prompt(s) to guide image generation. Separate multiple prompts with '|' if supported by the model."
description=(
"Text prompt(s) to guide image generation. Separate multiple prompts "
"with '|' if supported by the model."
)
),
]
height: Annotated[
Expand All @@ -43,21 +46,30 @@ class TextToImageParams(BaseModel):
float,
Field(
default=7.5,
description="Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality).",
description=(
"Encourages model to generate images closely linked to the text prompt "
"(higher values may reduce image quality)."
),
),
]
negative_prompt: Annotated[
str,
Field(
default="",
description="Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1.",
description=(
"Text prompt(s) to guide what to exclude from image generation. "
"Ignored if guidance_scale < 1."
),
),
]
safety_check: Annotated[
bool,
Field(
default=True,
description="Perform a safety check to estimate if generated images could be offensive or harmful.",
description=(
"Perform a safety check to estimate if generated images could be "
"offensive or harmful."
),
),
]
seed: Annotated[
Expand All @@ -67,7 +79,10 @@ class TextToImageParams(BaseModel):
int,
Field(
default=50,
description="Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength.",
description=(
"Number of denoising steps. More steps usually lead to higher quality "
"images but slower inference. Modulated by strength."
),
),
]
num_images_per_prompt: Annotated[
Expand Down
10 changes: 8 additions & 2 deletions runner/app/routes/upscale.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,20 @@ async def upscale(
safety_check: Annotated[
bool,
Form(
description="Perform a safety check to estimate if generated images could be offensive or harmful."
description=(
"Perform a safety check to estimate if generated images could be "
"offensive or harmful."
)
),
] = True,
seed: Annotated[int, Form(description="Seed for random number generation.")] = None,
num_inference_steps: Annotated[
int,
Form(
description="Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength."
description=(
"Number of denoising steps. More steps usually lead to higher quality "
"images but slower inference. Modulated by strength."
)
),
] = 75, # NOTE: Hardcoded due to varying pipeline values.
pipeline: Pipeline = Depends(get_pipeline),
Expand Down
21 changes: 14 additions & 7 deletions runner/bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,13 +67,16 @@ def bench_pipeline(

print(f"inference {i} {batch_size=} time: {inference_time[i]:.3f}s")
print(
f"inference {i} {batch_size=} time per output: {inference_time_per_output[i]:.3f}s"
f"inference {i} {batch_size=} time per output: "
f"{inference_time_per_output[i]:.3f}s"
)
print(
f"inference {i} {batch_size=} max GPU memory allocated: {max_mem_allocated[i]:.3f}GiB"
f"inference {i} {batch_size=} max GPU memory allocated: "
f"{max_mem_allocated[i]:.3f}GiB"
)
print(
f"inference {i} {batch_size=} max GPU memory reserved: {max_mem_reserved[i]:.3f}GiB"
f"inference {i} {batch_size=} max GPU memory reserved: "
f"{max_mem_reserved[i]:.3f}GiB"
)

return BenchMetrics(
Expand Down Expand Up @@ -116,7 +119,8 @@ def bench_pipeline(
args = parser.parse_args()

print(
f"{args.pipeline=} {args.model_id=} {args.runs=} {args.batch_size=} {args.num_inference_steps=}"
f"{args.pipeline=} {args.model_id=} {args.runs=} {args.batch_size=} "
f"{args.num_inference_steps=}"
)

start = time()
Expand Down Expand Up @@ -150,13 +154,16 @@ def bench_pipeline(
if os.getenv("SFAST", "").strip().lower() == "true":
print(f"avg warmup inference time: {warmup_metrics.inference_time:.3f}s")
print(
f"avg warmup inference time per output: {warmup_metrics.inference_time_per_output:.3f}s"
f"avg warmup inference time per output: "
f"{warmup_metrics.inference_time_per_output:.3f}s"
)
print(
f"avg warmup inference max GPU memory allocated: {warmup_metrics.max_mem_allocated:.3f}GiB"
f"avg warmup inference max GPU memory allocated: "
f"{warmup_metrics.max_mem_allocated:.3f}GiB"
)
print(
f"avg warmup inference max GPU memory reserved: {warmup_metrics.max_mem_reserved:.3f}GiB"
f"avg warmup inference max GPU memory reserved: "
f"{warmup_metrics.max_mem_reserved:.3f}GiB"
)

print(f"avg inference time: {metrics.inference_time:.3f}s")
Expand Down
11 changes: 0 additions & 11 deletions runner/gateway.openapi.json
Original file line number Diff line number Diff line change
Expand Up @@ -639,17 +639,6 @@
"type": "object",
"title": "HTTPValidationError"
},
"HealthCheck": {
"properties": {
"status": {
"type": "string",
"title": "Status",
"default": "OK"
}
},
"type": "object",
"title": "HealthCheck"
},
"ImageResponse": {
"properties": {
"images": {
Expand Down
4 changes: 2 additions & 2 deletions runner/gen_openapi.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import argparse
import copy
import json
import os

import yaml
from app.main import app, use_route_names_as_operation_ids
Expand Down Expand Up @@ -41,8 +40,9 @@ def translate_to_gateway(openapi):
Returns:
dict: The translated OpenAPI schema.
"""
# Remove 'health' endpoint
# Remove 'health' related endpoints and schemas.
openapi["paths"].pop("/health")
openapi["components"]["schemas"].pop("HealthCheck")

# Enforce 'model_id' in all endpoints
for _, methods in openapi["paths"].items():
Expand Down

0 comments on commit bdcbbc1

Please sign in to comment.