Skip to content

Commit

Permalink
Refine manual seed (#877)
Browse files Browse the repository at this point in the history
This PR is done:

- [x] Refine 测试脚本,在 warmup 前固定 seed,以及在正式生成图像前固定 seed。确保 warmup
阶段结果一致,最终生成的图像也保持一致可重复。

---------

Co-authored-by: Xiaoyu Xu <xiaoyulink@gmail.com>
  • Loading branch information
lixiang007666 and strint authored May 15, 2024
1 parent 5619461 commit f5168ae
Show file tree
Hide file tree
Showing 12 changed files with 19 additions and 10 deletions.
1 change: 1 addition & 0 deletions .github/workflows/examples.yml
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,7 @@ jobs:
# run: docker exec -w /src/onediff/onediff_diffusers_extensions -e ONEFLOW_MLIR_ENABLE_INFERENCE_OPTIMIZATION=0 ${{ env.CONTAINER_NAME }} python3 examples/text_to_image_sdxl_reuse_pipe.py --base /share_nfs/hf_models/stable-diffusion-xl-base-1.0 --new_base /share_nfs/hf_models/dataautogpt3-OpenDalleV1.1
- if: matrix.test-suite == 'diffusers_examples' && startsWith(matrix.image, 'onediff-pro')
run: |
docker exec -w /src/onediff ${{ env.CONTAINER_NAME }} python3 onediff_diffusers_extensions/examples/text_to_image_sd_enterprise.py --model /share_nfs/hf_models/stable-diffusion-v1-5-int8 --width 512 --height 512 --saved_image /src/onediff/output_enterprise_sd.png
docker exec -w /src/onediff ${{ env.CONTAINER_NAME }} python3 tests/test_quantitative_quality.py
- name: Shutdown docker for ComfyUI Test
Expand Down
2 changes: 1 addition & 1 deletion onediff_diffusers_extensions/examples/text_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,12 @@ def parse_args():
prompt = args.prompt
with flow.autocast("cuda"):
for _ in range(args.warmup):
torch.manual_seed(args.seed)
images = pipe(
prompt, height=args.height, width=args.width, num_inference_steps=args.steps
).images

torch.manual_seed(args.seed)

images = pipe(
prompt, height=args.height, width=args.width, num_inference_steps=args.steps
).images
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
# Warmup with chosen resolutions
for resolution in resolutions:
for i in range(args.warmup):
torch.manual_seed(args.seed)
image = base(
prompt=args.prompt,
height=resolution[0],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,8 +134,6 @@ def parse_args():
pipe.upcast_vae()
pipe.vae.decoder = oneflow_compile(pipe.vae.decoder, options=compile_options)

torch.manual_seed(args.seed)

if args.load_graph:
print("Loading graphs to avoid compilation...")
start_t = time.time()
Expand All @@ -146,18 +144,21 @@ def parse_args():
print(f"warmup with loading graph elapsed: {end_t - start_t} s")
start_t = time.time()
for _ in range(args.warmup):
torch.manual_seed(args.seed)
image = pipe(**infer_args).images[0]
end_t = time.time()
print(f"warmup with run elapsed: {end_t - start_t} s")
else:
start_t = time.time()
for _ in range(args.warmup):
torch.manual_seed(args.seed)
image = pipe(**infer_args).images[0]
end_t = time.time()
print(f"warmup with run elapsed: {end_t - start_t} s")

start_t = time.time()

torch.manual_seed(args.seed)
torch.cuda.cudart().cudaProfilerStart()
image = pipe(**infer_args).images[0]
torch.cuda.cudart().cudaProfilerStop()
Expand Down
2 changes: 1 addition & 1 deletion onediff_diffusers_extensions/examples/text_to_image_lcm.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ def parse_args():
pipe.unet = oneflow_compile(pipe.unet)

for _ in range(args.warmup):
torch.manual_seed(args.seed)
images = pipe(
args.prompt,
height=args.height,
Expand All @@ -73,7 +74,6 @@ def parse_args():
).images

torch.manual_seed(args.seed)

images = pipe(
args.prompt, height=args.height, width=args.width, num_inference_steps=args.steps
).images
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ def parse_args():
exit(1)

for _ in range(args.warmup):
torch.manual_seed(args.seed)
images = pipe(
args.prompt,
height=args.height,
Expand All @@ -90,7 +91,6 @@ def parse_args():
).images

torch.manual_seed(args.seed)

images = pipe(
args.prompt, height=args.height, width=args.width, num_inference_steps=args.steps
).images
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def main():
"linear_mae_threshold": args.linear_mae_threshold,
"conv_compute_density_threshold": args.conv_compute_density_threshold,
"linear_compute_density_threshold": args.linear_compute_density_threshold})

torch.manual_seed(args.seed)
# Warm-up
pipe(prompt=args.prompt, num_inference_steps=1)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,6 @@ def parse_args():
pipe.unet = oneflow_compile(pipe.unet, options=compile_options)
pipe.vae.decoder = oneflow_compile(pipe.vae.decoder, options=compile_options)

torch.manual_seed(args.seed)

if args.load_graph:
print("Loading graphs to avoid compilation...")
Expand All @@ -113,18 +112,21 @@ def parse_args():
print(f"warmup with loading graph elapsed: {end_t - start_t} s")
start_t = time.time()
for _ in range(args.warmup):
torch.manual_seed(args.seed)
image = pipe(**infer_args).images[0]
end_t = time.time()
print(f"warmup with run elapsed: {end_t - start_t} s")
else:
start_t = time.time()
for _ in range(args.warmup):
torch.manual_seed(args.seed)
image = pipe(**infer_args).images[0]
end_t = time.time()
print(f"warmup with run elapsed: {end_t - start_t} s")

start_t = time.time()

torch.manual_seed(args.seed)
torch.cuda.cudart().cudaProfilerStart()
image = pipe(**infer_args).images[0]
torch.cuda.cudart().cudaProfilerStop()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,6 @@ def parse_args():
pipe.unet = oneflow_compile(pipe.unet, options=compile_options)
pipe.vae.decoder = oneflow_compile(pipe.vae.decoder, options=compile_options)

torch.manual_seed(args.seed)

if args.load_graph:
print("Loading graphs to avoid compilation...")
Expand All @@ -115,18 +114,21 @@ def parse_args():
print(f"warmup with loading graph elapsed: {end_t - start_t} s")
start_t = time.time()
for _ in range(args.warmup):
torch.manual_seed(args.seed)
image = pipe(**infer_args).images[0]
end_t = time.time()
print(f"warmup with run elapsed: {end_t - start_t} s")
else:
start_t = time.time()
for _ in range(args.warmup):
torch.manual_seed(args.seed)
image = pipe(**infer_args).images[0]
end_t = time.time()
print(f"warmup with run elapsed: {end_t - start_t} s")

start_t = time.time()

torch.manual_seed(args.seed)
torch.cuda.cudart().cudaProfilerStart()
image = pipe(**infer_args).images[0]
torch.cuda.cudart().cudaProfilerStop()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@
new_base.to("cuda")

print("New base running by torch backend")
torch.manual_seed(args.seed)
image = new_base(
prompt=args.prompt,
height=args.height,
Expand Down Expand Up @@ -128,6 +129,7 @@

# Normal SDXL run
print("Re-use the compiled graph")
torch.manual_seed(args.seed)
image = new_base(
prompt=args.prompt,
height=args.height,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@

# Warmup
for i in range(args.warmup):
torch.manual_seed(args.seed)
image = base(
prompt=args.prompt,
height=args.height,
Expand All @@ -60,7 +61,6 @@

# Normal SDXL turbo run
torch.manual_seed(args.seed)

start_t = time.time()

image = base(
Expand Down
2 changes: 1 addition & 1 deletion tests/test_quantitative_quality.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import numpy as np
import unittest

class QuantizeQuality(unittest.TestCase):
class QuantizeQuality(unittest.TestCase):

def test_validate(self):
image1 = np.array(Image.open('/share_nfs/civitai/20240407-163408.jpg').convert('RGB'))
Expand Down

0 comments on commit f5168ae

Please sign in to comment.