Skip to content

Commit

Permalink
code style
Browse files Browse the repository at this point in the history
  • Loading branch information
eaidova committed Nov 8, 2024
1 parent 3eb84c0 commit fa46aa9
Show file tree
Hide file tree
Showing 3 changed files with 84 additions and 45 deletions.
8 changes: 3 additions & 5 deletions notebooks/multilora-image-generation/gradio_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ def make_demo(pipeline, generator_cls, adapters, adapters_meta):
adapter_name = adapters_meta[idx]["name"]
adapters_selection[adapter_name] = adapters_meta[idx]
adapters_selection[adapter_name]["adapter"] = adapter


def infer(prompt, seed, randomize_seed, width, height, lora_id, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
Expand All @@ -27,7 +26,7 @@ def infer(prompt, seed, randomize_seed, width, height, lora_id, progress=gr.Prog
adapter_info = adapters_selection[lora_id]
adapter = adapter_info["adapter"]
prompt_template = adapter_info.get("prompt", "<subject>")
alpha = adapter_info.get("weight", 1.0)
alpha = adapter_info.get("weight", 1.0)
adapter_config.add(adapter, alpha)
prompt = prompt_template.replace("<subject>", prompt)

Expand All @@ -46,7 +45,7 @@ def infer(prompt, seed, randomize_seed, width, height, lora_id, progress=gr.Prog
with gr.Blocks() as demo:
with gr.Column():
gr.Markdown(
"""
"""
# Image Generation with LoRA and OpenVINO GenAI
1. Provide input generation prompt into prompt window
2. Select one of the predefined adapters (use none for a generation without LoRA)
Expand All @@ -68,7 +67,6 @@ def infer(prompt, seed, randomize_seed, width, height, lora_id, progress=gr.Prog

result = gr.Image(label="Result", show_label=False)


with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
Expand Down Expand Up @@ -104,4 +102,4 @@ def infer(prompt, seed, randomize_seed, width, height, lora_id, progress=gr.Prog
outputs=[result, seed],
)

return demo
return demo
46 changes: 20 additions & 26 deletions notebooks/multilora-image-generation/lora_config.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,32 @@
LORA = [
{
"model_id": "DoctorDiffusion/doctor-diffusion-s-xray-xl-lora",
"prompt": "xray <subject>",
"file_name": "DD-xray-v1.safetensors",
"weight": 0.8,
"name": "X-ray"
},
{
"model_id": "alvdansen/the-point",
"prompt": "<subject>",
"name": "Point style",
"file_name": "araminta_k_the_point.safetensors",
"weight": 0.6
{
"model_id": "DoctorDiffusion/doctor-diffusion-s-xray-xl-lora",
"prompt": "xray <subject>",
"file_name": "DD-xray-v1.safetensors",
"weight": 0.8,
"name": "X-ray",
},
{"model_id": "alvdansen/the-point", "prompt": "<subject>", "name": "Point style", "file_name": "araminta_k_the_point.safetensors", "weight": 0.6},
{
"model_id": "KappaNeuro/ukiyo-e-art",
"prompt": "an illustration of <subject> in Ukiyo-e Art style",
"negative": "realistic, portrait, 3d",
"file_name": "Ukiyo-e Art.safetensors",
"weight": 0.8,
"name": "Ukiyo-e Style"
"model_id": "KappaNeuro/ukiyo-e-art",
"prompt": "an illustration of <subject> in Ukiyo-e Art style",
"negative": "realistic, portrait, 3d",
"file_name": "Ukiyo-e Art.safetensors",
"weight": 0.8,
"name": "Ukiyo-e Style",
},
{
"model_id": "DoctorDiffusion/doctor-diffusion-s-controllable-vector-art-xl-lora",
"prompt": "vector <subject>",
"file_name": "DD-vector-v2.safetensors",
"weight": 0.8,
"name": "Vector Art"
},
"model_id": "DoctorDiffusion/doctor-diffusion-s-controllable-vector-art-xl-lora",
"prompt": "vector <subject>",
"file_name": "DD-vector-v2.safetensors",
"weight": 0.8,
"name": "Vector Art",
},
{
"model_id": "Norod78/sdxl-chalkboarddrawing-lora",
"prompt": "A colorful chalkboard drawing of <subject>",
"name": "Chalkboard drawing",
"file_name": "SDXL_ChalkBoardDrawing_LoRA_r8.safetensors",
"weight": 0.45,
}
},
]
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,40 @@
"* **Content Creation**: Businesses can customize image generation models to produce branded visuals, enhancing marketing and media production.\n",
"* **Entertainment**: Game developers and filmmakers can use fine-tuned models to create realistic and imaginative worlds, streamlining the creative process.\n",
" \n",
"In this tutorial we explore possibilities to use LoRA with OpenVINO Genirative API."
"In this tutorial we explore possibilities to use LoRA with OpenVINO Generative API.\n",
"\n",
"#### Table of contents:\n",
"\n",
"- [Prerequisites](#Prerequisites)\n",
"- [Convert Diffusion Model using Optimum Intel](#Convert-Diffusion-Model-using-Optimum-Intel)\n",
" - [Applying LoRA to Original Diffusers pipeline before conversion](#Applying-LoRA-to-Original-Diffusers-pipeline-before-conversion)\n",
"- [Image Generation using OpenVINO GenAI](#Image-Generation-using-OpenVINO-GenAI)\n",
" - [Integration LoRA into pipeline](#Integration-LoRA-into-pipeline)\n",
" - [Prepare LoRA Adapters](#Prepare-LoRA-Adapters)\n",
" - [Create Inference Pipeline](#Create-Inference-Pipeline)\n",
" - [Selection specific adapter during generation](#Selection-specific-adapter-during-generation)\n",
" - [Use multiple adapters simultaneously](#Use-multiple-adapters-simultaneously)\n",
" - [Disable adapters](#Disable-adapters)\n",
"- [Interactive demo](#Interactive-demo)\n",
"\n",
"\n",
"### Installation Instructions\n",
"\n",
"This is a self-contained example that relies solely on its own code.\n",
"\n",
"We recommend running the notebook in a virtual environment. You only need a Jupyter server to start.\n",
"For details, please refer to [Installation Guide](https://github.com/openvinotoolkit/openvino_notebooks/blob/latest/README.md#-installation-guide).\n",
"\n",
"<img referrerpolicy=\"no-referrer-when-downgrade\" src=\"https://static.scarf.sh/a.png?x-pxid=5b5a4db0-7875-4bfb-bdbd-01698b5b1a77&file=notebooks/multilora-image-generation/multilora-image-generation.ipynb\" />\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prerequisites"
"## Prerequisites\n",
"[back to top ⬆️](#Table-of-contents:)"
]
},
{
Expand Down Expand Up @@ -76,15 +101,19 @@
"metadata": {},
"source": [
"## Convert Diffusion Model using Optimum Intel\n",
"[back to top ⬆️](#Table-of-contents:)\n",
"\n",
"\n",
"🤗 [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) is the interface between the 🤗 [Transformers](https://huggingface.co/docs/transformers/index) and [Diffusers](https://huggingface.co/docs/diffusers/index) libraries and OpenVINO to accelerate end-to-end pipelines on Intel architectures. It provides ease-to-use [interface](https://huggingface.co/docs/optimum/intel/openvino/export) for exporting models to [OpenVINO Intermediate Representation (IR)](https://docs.openvino.ai/2024/documentation/openvino-ir-format.html) format.\n",
"\n",
"### Appling LoRA to Original Diffusers pipeline before conversion\n",
"### Applying LoRA to Original Diffusers pipeline before conversion\n",
"[back to top ⬆️](#Table-of-contents:)\n",
"\n",
"LoRA can be easily added to [Diffusers pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters#lora) before export. At the export stage, LoRA weights will be fused to original model weights and converted model will preserve LoRA provided behaviour. This approach is suitable when you need model with adapter capabilities by default and it does not required configuration at inference time (e.g. changing weight coefficient for adapter).\n",
"\n",
"LoRA can be easily added to [Diffusers pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters#lora) before export. At the export stage, LoRA weights will be fused to original model weights and converted model will preserve LoRA provided behavior. This approach is suitable when you need model with adapter capabilities by default and it does not required configuration at inference time (e.g. changing weight coefficient for adapter).\n",
"For example, we can use this method for speedup generation process with integration [LCM LoRA](https://huggingface.co/blog/lcm_lora). Previously, we already considered with approach in this [tutorial](../latent-consistency-models-image-generation/lcm-lora-controlnet.ipynb).\n",
"\n",
"Using `optimum-cli` for exporting models requires to provide model id on HuggingFace Hub or local directory with saved model. In case, if model sotred in multiple separated repositories or directorues (e.g. you want to replace VAE component or add LoRA), it should be merged and saved on disk before export. For avoiding this, we will use `export_from_model` function that accepts initialized model. Additionally, for using model with OpenVINO GenAI, we need to export tokenizers to OpenVINO format using [OpenVINO Tokenizers](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide/ov-tokenizers.html) library.\n",
"Using `optimum-cli` for exporting models requires to provide model id on HuggingFace Hub or local directory with saved model. In case, if model stored in multiple separated repositories or directories (e.g. you want to replace VAE component or add LoRA), it should be merged and saved on disk before export. For avoiding this, we will use `export_from_model` function that accepts initialized model. Additionally, for using model with OpenVINO GenAI, we need to export tokenizers to OpenVINO format using [OpenVINO Tokenizers](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide/ov-tokenizers.html) library.\n",
"\n",
"In this tutorial we will use [Stable Diffusion XL](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) model, but the same steps are also applicable to other models of Stable Diffusion family."
]
Expand Down Expand Up @@ -143,29 +172,36 @@
"metadata": {},
"source": [
"## Image Generation using OpenVINO GenAI\n",
"[back to top ⬆️](#Table-of-contents:)\n",
"\n",
"\n",
"[OpenVINO™ GenAI](https://github.com/openvinotoolkit/openvino.genai) is a library of the most popular Generative AI model pipelines, optimized execution methods, and samples that run on top of highly performant [OpenVINO Runtime](https://github.com/openvinotoolkit/openvino).\n",
"\n",
"This library is friendly to PC and laptop execution, and optimized for resource consumption. It requires no external dependencies to run generative models as it already includes all the core functionality.\n",
"\n",
"`openvino_genai.TextToImagePipeline` class supports inference of [Diffuser models](https://github.com/openvinotoolkit/openvino.genai/blob/master/src/docs/SUPPORTED_MODELS.md#text-2-image-models). For pipeline initialization, we should provide directory with converted by Optimum Intel pipeline and specify inference device. Optionally, we can provide configuration for LoRA Adapters using `adapter_config`. For starting generation process `generate` method should be used. Basicly, it required to provide input text prompt for image generation. You can provide additional arguments like negative prompt, number of steps, guidance scale, image width and height to control generation process.\n",
"`openvino_genai.Text2ImagePipeline` class supports inference of [Diffusers models](https://github.com/openvinotoolkit/openvino.genai/blob/master/src/docs/SUPPORTED_MODELS.md#text-2-image-models). For pipeline initialization, we should provide directory with converted by Optimum Intel pipeline and specify inference device. Optionally, we can provide configuration for LoRA Adapters using `adapter_config`. For starting generation process `generate` method should be used. Basically, it required to provide input text prompt for image generation. You can provide additional arguments like negative prompt, number of steps, guidance scale, image width and height to control generation process.\n",
"\n",
"### Integration LoRA into pipeline\n",
"[back to top ⬆️](#Table-of-contents:)\n",
"\n",
"\n",
"Similarly to Diffusers pipeline, you can store separately and load LoRA into base pipeline before inference using OpenVINO GenAI.\n",
"`openvino_genai.AdapterConfig` serves for adapters management in `openvino_genai.TextToImagePipeline`. It can be used for adding and removing adapters or changing their weight coefficient for blending into pipeline. You can add one or multiple adapters into config and also specify alpha blending conefficients for their addition. OpenVINO GenAI supports LoRA adapters saved in Safetensors format. You can use one of publicly available pretrained adapters from [CivitAI](https://civitai.com/) or [HuggingFace Hub](https://huggingface.co/models) or train your own.\n",
"`openvino_genai.AdapterConfig` serves for adapters management in `openvino_genai.Text2ImagePipeline`. It can be used for adding and removing adapters or changing their weight coefficient for blending into pipeline. You can add one or multiple adapters into config and also specify alpha blending coefficients for their addition. OpenVINO GenAI supports LoRA adapters saved in Safetensors format. You can use one of publicly available pretrained adapters from [CivitAI](https://civitai.com/) or [HuggingFace Hub](https://huggingface.co/models) or train your own.\n",
"> **Important Note**: Before loading pretrained adapters, please make sure that they are compatible with your base model architecture. E.g. if you use SDXL model, you need to provide adapters trained for this model type and loading adapter, for example, trained for FLUX is not allowed.\n",
"\n",
"Generally, process of adapters configuration consists of 2 steps:\n",
"1. Register adapters in pipeline constructor. At this moment, it is recommended to provide all adapters that you plan to use on this stage.\n",
"2. Choose which adapter (or a combination of adapters) to apply in each `generate` call. It is not obligated to use all of provided in constructor adapters simulteniously, you can select one or combination of several among them for each generation cycle.\n"
"2. Choose which adapter (or a combination of adapters) to apply in each `generate` call. It is not obligated to use all of provided in constructor adapters simultaneously, you can select one or combination of several among them for each generation cycle.\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare LoRA Adapters\n",
"[back to top ⬆️](#Table-of-contents:)\n",
"\n",
"### Prepare LoRA Adapters"
]
},
Expand Down Expand Up @@ -220,6 +256,7 @@
"\n",
" return adapter_config\n",
"\n",
"\n",
"adapters_config = prepare_adapter_config(0.0)\n",
"adapters = adapters_config.get_adapters()"
]
Expand All @@ -230,6 +267,8 @@
"metadata": {},
"source": [
"### Create Inference Pipeline\n",
"[back to top ⬆️](#Table-of-contents:)\n",
"\n",
"\n",
"diffusion process involves random for preparing initial state for denoising. For reproducibility of generation results, we will use `Generator` class. "
]
Expand Down Expand Up @@ -268,14 +307,14 @@
"metadata": {},
"outputs": [],
"source": [
"import openvino_genai\n",
"import openvino as ov\n",
"import torch\n",
"\n",
"\n",
"class Generator(openvino_genai.Generator):\n",
" def __init__(self, seed):\n",
" openvino_genai.Generator.__init__(self)\n",
" self.generator = torch.Generator(device='cpu').manual_seed(seed)\n",
" self.generator = torch.Generator(device=\"cpu\").manual_seed(seed)\n",
"\n",
" def next(self):\n",
" return torch.randn(1, generator=self.generator, dtype=torch.float32).item()\n",
Expand All @@ -284,6 +323,7 @@
" torch_tensor = torch.randn(list(shape), generator=self.generator, dtype=torch.float32)\n",
" return ov.Tensor(torch_tensor.numpy())\n",
"\n",
"\n",
"pipe = openvino_genai.Text2ImagePipeline(model_dir, \"CPU\", adapters=adapters_config)"
]
},
Expand All @@ -293,6 +333,8 @@
"metadata": {},
"source": [
"### Selection specific adapter during generation\n",
"[back to top ⬆️](#Table-of-contents:)\n",
"\n",
"\n",
"As it was already mention before, it is not necessary to use all adapters specified at initialization stage for generation in the same time. Providing adapters argument with `openvino_genai.AdapterConfig` into `generate` allow to select one or several from them. For example, let's select LoRA for generation images in X-Ray style."
]
Expand Down Expand Up @@ -343,8 +385,10 @@
"metadata": {},
"source": [
"### Use multiple adapters simultaneously\n",
"[back to top ⬆️](#Table-of-contents:)\n",
"\n",
"You also can use combination of adapters that will be applied in the same time. Let's see what happens if traditional Japaneese art will meet modern illustration pointilistic style."
"\n",
"You also can use combination of adapters that will be applied in the same time. Let's see what happens if traditional Japanese art will meet modern illustration pointillistic style."
]
},
{
Expand Down Expand Up @@ -394,8 +438,10 @@
"metadata": {},
"source": [
"### Disable adapters\n",
"[back to top ⬆️](#Table-of-contents:)\n",
"\n",
"\n",
"You can disable adapters providing empty AdaptersConfig into generate"
"You can disable adapters providing empty `AdapterConfig` into generate"
]
},
{
Expand Down Expand Up @@ -435,7 +481,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Interactive demo"
"## Interactive demo\n",
"[back to top ⬆️](#Table-of-contents:)"
]
},
{
Expand All @@ -444,7 +491,7 @@
"metadata": {},
"outputs": [],
"source": [
"gradio_helper_path = Path(\"gradio_helper.py\") \n",
"gradio_helper_path = Path(\"gradio_helper.py\")\n",
"\n",
"if not gradio_helper_path.exists():\n",
" r = requests.get(\n",
Expand Down

0 comments on commit fa46aa9

Please sign in to comment.