diff --git a/Stable-Diffusion-local-Windows.ipynb b/Stable-Diffusion-local-Windows.ipynb index f4cea1503d4..1c5e90dcadf 100644 --- a/Stable-Diffusion-local-Windows.ipynb +++ b/Stable-Diffusion-local-Windows.ipynb @@ -65,25 +65,31 @@ "imageio-ffmpeg==0.4.2\n", "imageio==2.9.0\n", "kornia==0.6.0\n", + "# pip will resolve the version which matches torch\n", + "numpy\n", "omegaconf==2.1.1\n", "opencv-python==4.6.0.66\n", "pillow==9.2.0\n", + "pip>=22\n", "pudb==2019.2\n", "pytorch-lightning==1.4.2\n", "streamlit==1.12.0\n", - "# Regular \"taming-transformers\" doesn't seem to work\n", + "# \"CompVis/taming-transformers\" doesn't work\n", + "# ldm\\models\\autoencoder.py\", line 6, in \n", + "# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer\n", + "# ModuleNotFoundError\n", "taming-transformers-rom1504==0.0.6\n", "test-tube>=0.7.5\n", "torch-fidelity==0.3.0\n", "torchmetrics==0.6.0\n", - "torchvision==0.12.0\n", "transformers==4.19.2\n", "git+https://github.com/openai/CLIP.git@main#egg=clip\n", "git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion\n", "# No CUDA in PyPi builds\n", - "torch@https://download.pytorch.org/whl/cu113/torch-1.11.0%2Bcu113-cp310-cp310-win_amd64.whl\n", - "# No MKL in PyPi builds (faster, more robust than OpenBLAS)\n", - "numpy@https://download.lfd.uci.edu/pythonlibs/archived/numpy-1.22.4+mkl-cp310-cp310-win_amd64.whl\n", + "--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org\n", + "torch==1.11.0\n", + "# Same as numpy - let pip do its thing\n", + "torchvision\n", "-e .\n" ] }, diff --git a/requirements-lin.txt b/requirements-lin.txt new file mode 100644 index 00000000000..9c3d1e7451c --- /dev/null +++ b/requirements-lin.txt @@ -0,0 +1,32 @@ +albumentations==0.4.3 +einops==0.3.0 +huggingface-hub==0.8.1 +imageio-ffmpeg==0.4.2 +imageio==2.9.0 +kornia==0.6.0 +# pip will resolve the version which matches torch +numpy +omegaconf==2.1.1 +opencv-python==4.6.0.66 +pillow==9.2.0 +pip>=22 +pudb==2019.2 +pytorch-lightning==1.4.2 +streamlit==1.12.0 +# "CompVis/taming-transformers" doesn't work +# ldm\models\autoencoder.py", line 6, in +# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer +# ModuleNotFoundError +taming-transformers-rom1504==0.0.6 +test-tube>=0.7.5 +torch-fidelity==0.3.0 +torchmetrics==0.6.0 +transformers==4.19.2 +git+https://github.com/openai/CLIP.git@main#egg=clip +git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion +# No CUDA in PyPi builds +--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org +torch==1.11.0 +# Same as numpy - let pip do its thing +torchvision +-e . diff --git a/requirements.txt b/requirements-mac.txt similarity index 100% rename from requirements.txt rename to requirements-mac.txt diff --git a/requirements-win.txt b/requirements-win.txt new file mode 100644 index 00000000000..9c3d1e7451c --- /dev/null +++ b/requirements-win.txt @@ -0,0 +1,32 @@ +albumentations==0.4.3 +einops==0.3.0 +huggingface-hub==0.8.1 +imageio-ffmpeg==0.4.2 +imageio==2.9.0 +kornia==0.6.0 +# pip will resolve the version which matches torch +numpy +omegaconf==2.1.1 +opencv-python==4.6.0.66 +pillow==9.2.0 +pip>=22 +pudb==2019.2 +pytorch-lightning==1.4.2 +streamlit==1.12.0 +# "CompVis/taming-transformers" doesn't work +# ldm\models\autoencoder.py", line 6, in +# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer +# ModuleNotFoundError +taming-transformers-rom1504==0.0.6 +test-tube>=0.7.5 +torch-fidelity==0.3.0 +torchmetrics==0.6.0 +transformers==4.19.2 +git+https://github.com/openai/CLIP.git@main#egg=clip +git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion +# No CUDA in PyPi builds +--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org +torch==1.11.0 +# Same as numpy - let pip do its thing +torchvision +-e .