@@ -10,23 +10,41 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
10
10
specific language governing permissions and limitations under the License.
11
11
-->
12
12
13
+ # Text-Guided Image-Inpainting
13
14
15
+ The [`StableDiffusionInpaintPipeline`] lets you edit specific parts of an image by providing a mask and text prompt.
14
16
15
- # Quicktour
17
+ ```python
18
+ from io import BytesIO
16
19
17
- Start using Diffusers🧨 quickly!
18
- To start, use the [`DiffusionPipeline`] for quick inference and sample generations!
20
+ from torch import autocast
21
+ import requests
22
+ import PIL
23
+
24
+ from diffusers import StableDiffusionInpaintPipeline
19
25
20
- ```
21
- pip install diffusers
22
- ```
23
26
24
- ## Main classes
27
+ def download_image(url):
28
+ response = requests.get(url)
29
+ return PIL.Image.open(BytesIO(response.content)).convert("RGB")
25
30
26
- ### Models
27
31
28
- ### Schedulers
32
+ img_url = " https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
33
+ mask_url = " https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
29
34
30
- ### Pipeliens
35
+ init_image = download_image(img_url).resize((512, 512))
36
+ mask_image = download_image(mask_url).resize((512, 512))
31
37
38
+ device = " cuda"
39
+ pipe = StableDiffusionInpaintPipeline.from_pretrained(
40
+ " CompVis/stable-diffusion-v1-4" , revision = " fp16" , torch_dtype =torch.float16, use_auth_token =True
41
+ ).to(device)
42
+
43
+ prompt = " a cat sitting on a bench"
44
+ with autocast("cuda"):
45
+ images = pipe(prompt=prompt, init_image =init_image, mask_image =mask_image, strength =0.75).images
46
+
47
+ images[0].save("cat_on_bench.png")
48
+ ```
32
49
50
+ You can also run this example on colab [](https://colab.research.google.com/github/patil-suraj/Notebooks/blob/master/in_painting_with_stable_diffusion_using_diffusers.ipynb)
0 commit comments