From 0bd9208655b3c46b127393e9c1eff7bc61739f3b Mon Sep 17 00:00:00 2001 From: Rob Ballantyne Date: Mon, 18 Dec 2023 11:50:58 +0000 Subject: [PATCH] Various improvements... Support PyTorch 2.1.1 Add storagemonitor Improve default provisioning Add layer1 build init --- .github/workflows/docker-build.yml | 28 ++++--- .gitignore | 2 + README.md | 77 ++++++++++++++----- .../opt/ai-dock/bin/build/layer0/amd.sh | 13 ++-- .../opt/ai-dock/bin/build/layer0/clean.sh | 18 +---- .../opt/ai-dock/bin/build/layer0/common.sh | 30 +++----- .../opt/ai-dock/bin/build/layer0/cpu.sh | 13 ++-- .../opt/ai-dock/bin/build/layer0/init.sh | 14 ++-- .../opt/ai-dock/bin/build/layer0/nvidia.sh | 16 ++-- .../opt/ai-dock/bin/supervisor-webui.sh | 11 ++- .../ai-dock/storage_monitor/etc/mappings.sh | 21 +++++ build/Dockerfile | 17 ++-- config/provisioning/default.sh | 50 ++++++++++-- docker-compose.yaml | 47 ++++++----- 14 files changed, 220 insertions(+), 137 deletions(-) create mode 100644 build/COPY_ROOT/opt/ai-dock/storage_monitor/etc/mappings.sh diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index d3973bc..2156800 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -9,12 +9,12 @@ env: UBUNTU_VERSION: 22.04 BUILDX_NO_DEFAULT_ATTESTATIONS: 1 # Until py3.11 is available on all platforms - LATEST_CUDA: "pytorch-2.0.1-py3.10-cuda-11.8.0-base-22.04" - LATEST_CUDA_JUPYTER: "jupyter-pytorch-2.0.1-py3.10-cuda-11.8.0-base-22.04" - LATEST_ROCM: "pytorch-2.0.1-py3.10-rocm-5.4.2-runtime-22.04" - LATEST_ROCM_JUPYTER: "jupyter-pytorch-2.0.1-py3.10-rocm-5.4.2-runtime-22.04" - LATEST_CPU: "pytorch-2.0.1-py3.10-cpu-22.04" - LATEST_CPU_JUPYTER: "jupyter-pytorch-2.0.1-py3.10-cpu-22.04" + LATEST_CUDA: "pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04" + LATEST_CUDA_JUPYTER: "jupyter-pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04" + LATEST_ROCM: "pytorch-2.1.1-py3.10-rocm-5.4.2-runtime-22.04" + LATEST_ROCM_JUPYTER: "jupyter-pytorch-2.1.1-py3.10-rocm-5.4.2-runtime-22.04" + LATEST_CPU: "pytorch-2.1.1-py3.10-cpu-22.04" + LATEST_CPU_JUPYTER: "jupyter-pytorch-2.1.1-py3.10-cpu-22.04" jobs: cpu-base: @@ -29,6 +29,7 @@ jobs: - "3.10" pytorch: - "2.0.1" + - "2.1.1" steps: - name: Free Space @@ -91,12 +92,15 @@ jobs: - "3.10" pytorch: - "2.0.1" + - "2.1.1" cuda: - - "11.7.1" - "11.8.0" + - "12.1.0" level: - "base" - - "cudnn8-devel" + exclude: + - cuda: "12.1.0" + pytorch: "2.0.1" steps: - name: Free Space @@ -159,11 +163,17 @@ jobs: - "3.10" pytorch: - "2.0.1" + - "2.1.1" rocm: - "5.4.2" + - "5.6" level: - "runtime" - # Templating for future releases + exclude: + - rocm: "5.4.2" + pytorch: "2.1.1" + - rocm: "5.6" + pytorch: "2.0.1" steps: - name: Free Space diff --git a/.gitignore b/.gitignore index 7ae3244..0e5f870 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ workspace +*__pycache__ +build/COPY_ROOT_EXTRA/ config/authorized_keys config/rclone tpdocs diff --git a/README.md b/README.md index 062d54c..d2dd899 100644 --- a/README.md +++ b/README.md @@ -48,34 +48,59 @@ Tags follow these patterns: ##### _CUDA_ - `:pytorch-[pytorch-version]-py[python-version]-cuda-[x.x.x]-base-[ubuntu-version]` -- `:latest-cuda` → `:pytorch-2.0.1-py3.10-cuda-11.8.0-base-22.04` +- `:latest-cuda` → `:pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04` -- `:latest-cuda-jupyter` → `:jupyter-pytorch-2.0.1-py3.10-cuda-11.8.0-base-22.04` +- `:latest-cuda-jupyter` → `:jupyter-pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04` ##### _ROCm_ - `:pytorch-[pytorch-version]-py[python-version]-rocm-[x.x.x]-runtime-[ubuntu-version]` -- `:latest-rocm` → `:pytorch-2.0.1-py3.10-rocm-5.4.2-runtime-22.04` +- `:latest-rocm` → `:pytorch-2.1.1-py3.10-rocm-5.4.2-runtime-22.04` -- `:latest-rocm-jupyter` → `:jupyter-pytorch-2.0.1-py3.10-rocm-5.4.2-runtime-22.04` +- `:latest-rocm-jupyter` → `:jupyter-pytorch-2.1.1-py3.10-rocm-5.4.2-runtime-22.04` ##### _CPU_ - `:pytorch-[pytorch-version]-py[python-version]-ubuntu-[ubuntu-version]` -- `:latest-cpu` → `:pytorch-2.0.1-py3.10-cpu-22.04` +- `:latest-cpu` → `:pytorch-2.1.1-py3.10-cpu-22.04` -- `:latest-cpu-jupyter` → `:jupyter-pytorch-2.0.1-py3.10-cpu-22.04` +- `:latest-cpu-jupyter` → `:jupyter-pytorch-2.1.1-py3.10-cpu-22.04` Browse [here](https://github.com/ai-dock/stable-diffusion-webui/pkgs/container/stable-diffusion-webui) for an image suitable for your target environment. -You can also self-build from source by editing `.env` and running `docker compose build`. +You can also [build from source](#building-images) by editing `.env` and running `docker compose build`. Supported Python versions: `3.10` -Supported Pytorch versions: `2.0.1` +Supported Pytorch versions: `2.1.1` `2.0.1` Supported Platforms: `NVIDIA CUDA`, `AMD ROCm`, `CPU` +## Building Images + +You can self-build from source by editing `docker-compose.yaml` or `.env` and running `docker compose build`. + +It is a good idea to leave the main source tree alone and copy any extra files you would like in the container into `build/COPY_ROOT_EXTRA/...`. The structure within this directory will be overlayed on `/` near the end of the build process. + +After copying has been completed, the script `build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh` will be executed. A template for this file capable of downloading models and nodes is provided for convenience. + +Any directories and files that you add into `opt/storage` will be made available in the running container at `$WORKSPACE/storage` through symbolic links. + +This directory is monitored by `inotifywait`. Any items appearing here will be automatically symlinked to the application directories as defined in `/opt/ai-dock/storage_monitor/etc/mappings.sh`. + +### Recommended workflow + +- Fork this repository and clone +- Create and switch to a new branch +- Create `.env` to override the `IMAGE_TAG` and other variables +- Copy non-public models to `build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/ckpt/` +- Edit `build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh` to download public models and extensions +- Run `docker compose build` +- Run `docker compose push` + +>[!NOTE] +>The GitHub actions in this repository will also build the image if you push changes to the `main` branch of your fork. You should edit the actions script to avoid building for platforms you don't need. + ## Run Locally A 'feature-complete' `docker-compose.yaml` file is included for your convenience. All features of the image are included - Simply edit the environment variables in `.env`, save and then type `docker compose up`. @@ -143,18 +168,17 @@ You can use the included `cloudflared` service to make secure connections withou | `DIRECT_ADDRESS_GET_WAN` | Use the internet facing interface for direct links (default `false`) | | `WEBUI_BRANCH` | WebUI branch/commit hash. Defaults to `master` | | `WEBUI_FLAGS` | Startup flags. eg. `--no-half` | -| `WEBUI_PORT` | WebUI port (default `7860`) | -| `GPU_COUNT` | Limit the number of available GPUs | +| `WEBUI_PORT_HOST` | WebUI port (default `7860`) | | `PROVISIONING_SCRIPT` | URL of a remote script to execute on init. See [note](#provisioning-script). | | `RCLONE_*` | Rclone configuration - See [rclone documentation](https://rclone.org/docs/#config-file) | | `SKIP_ACL` | Set `true` to skip modifying workspace ACL | -| `SSH_PORT` | Set a non-standard port for SSH (default `22`) | +| `SSH_PORT_HOST` | Set a non-standard port for SSH (default `22`) | | `SSH_PUBKEY` | Your public key for SSH | | `WEB_ENABLE_AUTH` | Enable password protection for web services (default `true`) | | `WEB_USER` | Username for web services (default `user`) | | `WEB_PASSWORD` | Password for web services (default `password`) | | `WORKSPACE` | A volume path. Defaults to `/workspace/` | -| `WORKSPACE_SYNC` | Move mamba environments and services to workspace if mounted (default `true`) | +| `WORKSPACE_SYNC` | Move mamba environments and services to workspace if mounted (default `false`) | Environment variables can be specified by using any of the standard methods (`docker-compose.yaml`, `docker run -e...`). Additionally, environment variables can also be passed as parameters of `init.sh`. @@ -198,16 +222,19 @@ This script will download & install the following: __Models__ - Stable Diffusion 1.5 -- Stable Diffusion 2.1 - Stable Diffusion XL - Stable Diffusion XL Refiner __Extensions__ - Controlnet +- Deforum +- Dreambooth - Dynamic Prompts - Face Editor - Image Browser +- Openpose Editor +- ReActor - Regional Prompter - Ultimate Upscale @@ -233,7 +260,7 @@ __VAE__ Remember, you do not have to use this script - Just set `PROVISIONING_SCRIPT=https://example.com/your-script.sh`. >[!NOTE] ->If configured, `sshd`, `caddy`, `cloudflared`, `rclone`, `port redirector` & `logtail` will be launched before provisioning; Any other processes will launch after. +>If configured, `sshd`, `caddy`, `cloudflared`, `rclone`, `serviceportal`, `storagemonitor` & `logtail` will be launched before provisioning; Any other processes will launch after. >[!WARNING] >Only use scripts that you trust and which cannot be changed without your consent. @@ -289,6 +316,15 @@ To ensure that the files remain accessible to the local user that owns the direc If you do not want this, you can set the environment variable `SKIP_ACL=true`. +## Workspace Sync + +When run with `WORKSPACE_SYNC=true` the container will move the micomamba environments and application directories into your workspace directory. + +This allows you to maintain state even after destroying containers. This is useful in cloud environments but is likely unnecessary when running locally. + +>[!WARNING] +>A synced environment will not be upgraded when deploying an updated docker image. You must manually upgrade your packages or remove the synced environment. + ## Running Services This image will spawn multiple processes upon starting a container because some of our remote environments do not support more than one container per instance. @@ -308,9 +344,6 @@ You can set startup flags by using variable `WEBUI_FLAGS`. To manage this service you can use `supervisorctl [start|stop|restart] webui`. ->[!NOTE] ->_If you have enabled `CF_QUICK_TUNNELS` a secure `https://[random-auto-generated-sub-domain].trycloudflare.com` link will be created. You can find it at `/var/log/supervisor/quicktunnel-webui.log`_ - ### Jupyter (with tag `jupyter` only) The jupyter server will launch a `lab` instance unless you specify `JUPYTER_MODE=notebook`. @@ -321,9 +354,6 @@ A python kernel will be installed coresponding with the python version of the im Jupyter's official documentation is available at https://jupyter.org/ ->[!NOTE] ->_If you have enabled `CF_QUICK_TUNNELS` a secure `https://[random-auto-generated-sub-domain].trycloudflare.com` link will be created. You can find it at `/var/log/supervisor/quicktunnel-jupyter.log`_ - ### Caddy This is a simple webserver acting as a reverse proxy. @@ -350,6 +380,8 @@ You can also create a private network to enable remote connecions to the contain If you do not wish to provide a tunnel token, you could enable `CF_QUICK_TUNNELS` which will create a throwaway tunnel for your web services. +Secure links can be found in the [service portal](#service-portal) and in the log files at `/var/log/supervisor/quicktunnel-*.log`. + Full documentation for Cloudflare tunnels is [here](https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/). >[!NOTE] @@ -414,6 +446,10 @@ This script follows and prints the log files for each of the above services to s If you are logged into the container you can follow the logs by running `logtail.sh` in your shell. +### Storage Monitor + +This service detects changes to files in `$WORKSPACE/storage` and creates symbolic links to the application directories defined in `/opt/ai-dock/storage_monitor/etc/mappings.sh` + ## Open Ports Some ports need to be exposed for the services to run or for certain features of the provided software to function @@ -449,7 +485,6 @@ Some ports need to be exposed for the services to run or for certain features of - Create a [new notebook](https://link.ai-dock.org/console.paperspace.com) with the `Start from Scratch` template. - Select `Advanced options` - In Container Name enter `ghcr.io/ai-dock/stable-diffusion-webui:latest-jupyter` -- In Registry Username enter `x` (Paperspace bug) - In Command enter `init.sh WORKSPACE=/notebooks PROVISIONING_SCRIPT="https://raw.githubusercontent.com/ai-dock/stable-diffusion-webui/main/config/provisioning/default.sh" WEBUI_FLAGS="--xformers" CF_QUICK_TUNNELS=true` You can use the web UI to do further configuration, or you can supply further environment variables as detailed above. diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh index 0f49f72..53491a6 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh @@ -1,13 +1,10 @@ -#!/bin/bash +#!/bin/false -# Must exit and fail to build if any command fails -set -eo pipefail - -main() { - install_webui +build_amd_main() { + build_amd_install_webui } -install_webui() { +build_amd_install_webui() { # Mamba export does not include pip packages. # We need to get torch again - todo find a better way? micromamba -n webui run pip install \ @@ -17,4 +14,4 @@ install_webui() { /opt/ai-dock/bin/update-webui.sh } -main "$@"; exit \ No newline at end of file +build_amd_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh index 0e167bd..1d04077 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh @@ -1,19 +1,5 @@ -#!/bin/bash - -# Must exit and fail to build if any command fails -set -eo pipefail +#!/bin/false # Tidy up and keep image small apt-get clean -y -micromamba clean -ay - -# Remove build scripts -scripts_dir="/opt/ai-dock/bin/build/" - -# Remove this layer's scripts -rm -rf ${scripts_dir}layer0 - -# Remove parent directory if this is the last build layer -if [[ $(ls -l ${scripts_dir} | grep -c ^d) -eq 0 ]]; then - rm -rf ${scripts_dir} -fi \ No newline at end of file +micromamba clean -ay \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh index 9a4cbc9..cdbe107 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh @@ -1,21 +1,15 @@ -#!/bin/bash - -# Must exit and fail to build if any command fails -set -eo pipefail +#!/bin/false +source /opt/ai-dock/etc/environment.sh webui_git="https://github.com/AUTOMATIC1111/stable-diffusion-webui" -main() { - create_env - install_jupyter_kernels - clone_webui +build_common_main() { + build_common_create_env + build_common_install_jupyter_kernels + build_common_clone_webui } -create_env() { - if [[ $PYTHON_VERSION == "3.10" ]]; then - $MAMBA_INSTALL -n ${MAMBA_DEFAULT_ENV} -c conda-forge -y \ - python==3.10.6 - fi +build_common_create_env() { apt-get update $APT_INSTALL libgl1 libgoogle-perftools4 ln -sf $(ldconfig -p | grep -Po "libtcmalloc.so.\d" | head -n 1) \ @@ -24,13 +18,13 @@ create_env() { exported_env=/tmp/${MAMBA_DEFAULT_ENV}.yaml micromamba env export -n ${MAMBA_DEFAULT_ENV} > "${exported_env}" $MAMBA_CREATE -n webui --file "${exported_env}" - $MAMBA_INSTALL -n webui -c conda-forge -y \ + $MAMBA_INSTALL -n webui \ httpx=0.24.1 } -install_jupyter_kernels() { +build_common_install_jupyter_kernels() { if [[ $IMAGE_BASE =~ "jupyter-pytorch" ]]; then - $MAMBA_INSTALL -n webui -c conda-forge -y \ + $MAMBA_INSTALL -n webui \ ipykernel \ ipywidgets @@ -52,9 +46,9 @@ install_jupyter_kernels() { fi } -clone_webui() { +build_common_clone_webui() { cd /opt git clone ${webui_git} } -main "$@"; exit \ No newline at end of file +build_common_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh index 93643ac..f8ff8ae 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh @@ -1,14 +1,11 @@ -#!/bin/bash +#!/bin/false -# Must exit and fail to build if any command fails -set -e - -main() { - install_webui +build_cpu_main() { + build_cpu_install_webui } -install_webui() { +build_cpu_install_webui() { /opt/ai-dock/bin/update-webui.sh } -main "$@"; exit \ No newline at end of file +build_cpu_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh index 6e2382a..bedaae4 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh @@ -3,21 +3,19 @@ # Must exit and fail to build if any command fails set -eo pipefail -/opt/ai-dock/bin/build/layer0/common.sh +source /opt/ai-dock/bin/build/layer0/common.sh if [[ "$XPU_TARGET" == "NVIDIA_GPU" ]]; then - /opt/ai-dock/bin/build/layer0/nvidia.sh + source /opt/ai-dock/bin/build/layer0/nvidia.sh elif [[ "$XPU_TARGET" == "AMD_GPU" ]]; then - /opt/ai-dock/bin/build/layer0/amd.sh + source /opt/ai-dock/bin/build/layer0/amd.sh elif [[ "$XPU_TARGET" == "CPU" ]]; then - /opt/ai-dock/bin/build/layer0/cpu.sh + source /opt/ai-dock/bin/build/layer0/cpu.sh else printf "No valid XPU_TARGET specified\n" >&2 exit 1 fi -# webui 'prepare-environment' -cd /opt/stable-diffusion-webui -micromamba run -n webui python launch.py --skip-torch-cuda-test --skip-python-version-check --no-download-sd-model --do-not-download-clip --exit +$MAMBA_DEFAULT_RUN python /opt/ai-dock/tests/assert-torch-version.py -/opt/ai-dock/bin/build/layer0/clean.sh \ No newline at end of file +source /opt/ai-dock/bin/build/layer0/clean.sh \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh index 74de3b1..82e9a64 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh @@ -1,19 +1,17 @@ -#!/bin/bash +#!/bin/false -# Must exit and fail to build if any command fails -set -eo pipefail - -main() { - install_webui +build_nvidia_main() { + build_nvidia_install_webui } -install_webui() { +build_nvidia_install_webui() { micromamba run -n webui ${PIP_INSTALL} \ torch=="${PYTORCH_VERSION}" \ - xformers \ nvidia-ml-py3 + + micromamba install -n webui -c xformers xformers /opt/ai-dock/bin/update-webui.sh } -main "$@"; exit \ No newline at end of file +build_nvidia_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/supervisor-webui.sh b/build/COPY_ROOT/opt/ai-dock/bin/supervisor-webui.sh index b4c3afa..05cfcaf 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/supervisor-webui.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/supervisor-webui.sh @@ -2,8 +2,8 @@ trap cleanup EXIT -LISTEN_PORT=17860 -METRICS_PORT=27860 +LISTEN_PORT=${WEBUI_PORT_LOCAL:-17860} +METRICS_PORT=${WEBUI_METRICS_PORT:-27860} PROXY_SECURE=true function cleanup() { @@ -12,10 +12,9 @@ function cleanup() { } function start() { - if [[ -z $WEBUI_PORT ]]; then - WEBUI_PORT=7860 + if [[ ! -v WEBUI_PORT || -z $WEBUI_PORT ]]; then + WEBUI_PORT=${WEBUI_PORT_HOST:-8188} fi - PROXY_PORT=$WEBUI_PORT SERVICE_NAME="A1111 SD Web UI" @@ -38,7 +37,7 @@ function start() { PLATFORM_FLAGS="--use-cpu all --skip-torch-cuda-test --no-half" fi # No longer skipping prepare-environment - BASE_FLAGS= + BASE_FLAGS="" # Delay launch until micromamba is ready if [[ -f /run/workspace_sync || -f /run/container_config ]]; then diff --git a/build/COPY_ROOT/opt/ai-dock/storage_monitor/etc/mappings.sh b/build/COPY_ROOT/opt/ai-dock/storage_monitor/etc/mappings.sh new file mode 100644 index 0000000..8b4da86 --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/storage_monitor/etc/mappings.sh @@ -0,0 +1,21 @@ +# Key is relative to $WORKSPACE/storage/ + +declare -A storage_map +storage_map["stable_diffusion/models/ckpt"]="/opt/stable-diffusion-webui/models/Stable-diffusion" +storage_map["stable_diffusion/models/codeformer"]="/opt/stable-diffusion-webui/models/Codeformer" +storage_map["stable_diffusion/models/controlnet"]="/opt/stable-diffusion-webui/models/ControlNet" +storage_map["stable_diffusion/models/deepbooru"]="/opt/stable-diffusion-webui/models/deepbooru" +storage_map["stable_diffusion/models/dreambooth"]="/opt/stable-diffusion-webui/models/dreambooth" +storage_map["stable_diffusion/models/esrgan"]="/opt/stable-diffusion-webui/models/ESRGAN" +storage_map["stable_diffusion/models/gfpgan"]="/opt/stable-diffusion-webui/models/GFPGAN" +storage_map["stable_diffusion/models/hypernetworks"]="/opt/stable-diffusion-webui/models/hypernetworks" +storage_map["stable_diffusion/models/insightface"]="/opt/stable-diffusion-webui/models/insightface" +storage_map["stable_diffusion/models/karlo"]="/opt/stable-diffusion-webui/models/karlo" +storage_map["stable_diffusion/models/ldsr"]="/opt/stable-diffusion-webui/models/LDSR" +storage_map["stable_diffusion/models/lora"]="/opt/stable-diffusion-webui/models/Lora" +storage_map["stable_diffusion/models/reactor"]="/opt/stable-diffusion-webui/models/reactor" +storage_map["stable_diffusion/models/swinIR"]="/opt/stable-diffusion-webui/models/swinIR" +storage_map["stable_diffusion/models/vae"]="/opt/stable-diffusion-webui/models/VAE" +storage_map["stable_diffusion/models/vae_approx"]="/opt/stable-diffusion-webui/models/VAE-approx" + +# Add more mappings for other repository directories as needed \ No newline at end of file diff --git a/build/Dockerfile b/build/Dockerfile index 09b0254..7bc7677 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -1,15 +1,15 @@ # For build automation - Allows building from any ai-dock base image # Use a *cuda*base* image as default because pytorch brings the libs -ARG IMAGE_BASE="ghcr.io/ai-dock/pytorch:2.0.1-py3.10-cuda-11.8.0-base-22.04" +ARG IMAGE_BASE="ghcr.io/ai-dock/pytorch:2.1.1-py3.10-cuda-11.8.0-base-22.04" FROM ${IMAGE_BASE} LABEL org.opencontainers.image.source https://github.com/ai-dock/stable-diffusion-webui - LABEL org.opencontainers.image.description "Automatic1111 Stable Diffusion Web UI" - LABEL maintainer="Rob Ballantyne " ENV IMAGE_SLUG="stable-diffusion-webui" +ENV OPT_SYNC=stable-diffusion-webui + # Copy early so we can use scripts in the build - Changes to these files will invalidate the cache and cause a rebuild. COPY ./COPY_ROOT/ / @@ -17,14 +17,15 @@ COPY ./COPY_ROOT/ / # Don't put anything heavy in here - We can use multi-stage building above if necessary. ARG IMAGE_BASE -RUN /opt/ai-dock/bin/build/layer0/init.sh | tee /var/log/build.log +RUN set -eo pipefail && /opt/ai-dock/bin/build/layer0/init.sh | tee /var/log/build.log -ENV OPT_SYNC=stable-diffusion-webui:$OPT_SYNC +# Must be set after layer0 ENV MAMBA_DEFAULT_ENV=webui ENV MAMBA_DEFAULT_RUN="micromamba run -n ${MAMBA_DEFAULT_ENV}" -# Set false when building specific versions -ARG AUTO_UPDATE=true -ENV AUTO_UPDATE=$AUTO_UPDATE +# Copy overrides and models into later layers for fast rebuilds +COPY ./COPY_ROOT_EXTRA/ / +RUN set -eo pipefail && /opt/ai-dock/bin/build/layer1/init.sh | tee -a /var/log/build.log + # Keep init.sh as-is and place additional logic in /opt/ai-dock/bin/preflight.sh CMD ["init.sh"] diff --git a/config/provisioning/default.sh b/config/provisioning/default.sh index 0c88512..b14c155 100644 --- a/config/provisioning/default.sh +++ b/config/provisioning/default.sh @@ -4,10 +4,19 @@ # https://raw.githubusercontent.com/ai-dock/stable-diffusion-webui/main/config/provisioning/default.sh -### Edit the following arrays to suit your workflow +### Edit the following arrays to suit your workflow - values must be quoted and separated by newlines or spaces. DISK_GB_REQUIRED=30 +MAMBA_PACKAGES=( + #"package1" + #"package2=version" + ) + +PIP_PACKAGES=( + "bitsandbytes==0.41.2.post2" + ) + EXTENSIONS=( "https://github.com/Mikubill/sd-webui-controlnet" "https://github.com/d8ahazard/sd_dreambooth_extension" @@ -67,29 +76,56 @@ CONTROLNET_MODELS=( ### DO NOT EDIT BELOW HERE UNLESS YOU KNOW WHAT YOU ARE DOING ### function provisioning_start() { + source /opt/ai-dock/etc/environment.sh DISK_GB_AVAILABLE=$(($(df --output=avail -m "${WORKSPACE}" | tail -n1) / 1000)) DISK_GB_USED=$(($(df --output=used -m "${WORKSPACE}" | tail -n1) / 1000)) DISK_GB_ALLOCATED=$(($DISK_GB_AVAILABLE + $DISK_GB_USED)) provisioning_print_header + provisioning_get_mamba_packages + provisioning_get_pip_packages provisioning_get_extensions provisioning_get_models \ - "/opt/stable-diffusion-webui/models/Stable-diffusion" \ + "${WORKSPACE}/storage/stable_diffusion/models/ckpt" \ "${CHECKPOINT_MODELS[@]}" provisioning_get_models \ - "/opt/stable-diffusion-webui/models/Lora" \ + "${WORKSPACE}/storage/stable_diffusion/models/lora" \ "${LORA_MODELS[@]}" provisioning_get_models \ - "/opt/stable-diffusion-webui/extensions/sd-webui-controlnet/models" \ + "${WORKSPACE}/storage/stable_diffusion/models/controlnet" \ "${CONTROLNET_MODELS[@]}" provisioning_get_models \ - "/opt/stable-diffusion-webui/models/VAE" \ + "${WORKSPACE}/storage/stable_diffusion/models/vae" \ "${VAE_MODELS[@]}" provisioning_get_models \ - "/opt/stable-diffusion-webui/models/ESRGAN" \ + "${WORKSPACE}/storage/stable_diffusion/models/esrgan" \ "${ESRGAN_MODELS[@]}" + + PLATFORM_FLAGS="" + if [[ $XPU_TARGET = "CPU" ]]; then + PLATFORM_FLAGS="--use-cpu all --skip-torch-cuda-test --no-half" + fi + PROVISIONING_FLAGS="--skip-python-version-check --no-download-sd-model --do-not-download-clip --port 11404 --exit" + FLAGS_COMBINED="${PLATFORM_FLAGS} $(cat /etc/a1111_webui_flags.conf) ${PROVISIONING_FLAGS}" + + # Start and exit because webui will probably require a restart + cd /opt/stable-diffusion-webui && \ + micromamba run -n webui -e LD_PRELOAD=libtcmalloc.so python launch.py \ + ${FLAGS_COMBINED} provisioning_print_end } +function provisioning_get_mamba_packages() { + if [[ -n $MAMBA_PACKAGES ]]; then + $MAMBA_INSTALL -n webui ${MAMBA_PACKAGES[@]} + fi +} + +function provisioning_get_pip_packages() { + if [[ -n $PIP_PACKAGES ]]; then + micromamba run -n webui $PIP_INSTALL ${PIP_PACKAGES[@]} + fi +} + function provisioning_get_extensions() { for repo in "${EXTENSIONS[@]}"; do dir="${repo##*/}" @@ -105,7 +141,7 @@ function provisioning_get_extensions() { fi else printf "Downloading extension: %s...\n" "${repo}" - git clone "${repo}" "${path}" + git clone "${repo}" "${path}" --recursive if [[ -e $requirements ]]; then micromamba -n webui run ${PIP_INSTALL} -r "${requirements}" fi diff --git a/docker-compose.yaml b/docker-compose.yaml index f27505b..25541c9 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -5,11 +5,11 @@ services: build: context: ./build args: - IMAGE_BASE: ${IMAGE_BASE:-ghcr.io/ai-dock/jupyter-pytorch:latest-cuda} + IMAGE_BASE: ${IMAGE_BASE:-ghcr.io/ai-dock/jupyter-pytorch:2.1.1-py3.10-cuda-11.8.0-base-22.04} tags: - - "ghcr.io/ai-dock/webui:${IMAGE_TAG:-latest-jupyter}" + - "ghcr.io/ai-dock/webui:${IMAGE_TAG:-jupyter-pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04}" - image: ghcr.io/ai-dock/stable-diffusion-webui:${IMAGE_TAG:-latest-jupyter} + image: ghcr.io/ai-dock/stable-diffusion-webui:${IMAGE_TAG:-jupyter-pytorch-2.1.1-py3.10-cuda-11.8.0-base-22.04} security_opt: # For Rclone mount @@ -40,6 +40,8 @@ services: - ./config/rclone:/etc/rclone # Workspace - ./workspace:${WORKSPACE:-/workspace/}:rshared + # You can share /workspace/storage with other non-WEBUI containers. See README + #- /path/to/common_storage:${WORKSPACE:-/workspace/}storage/:rshared # Will echo to root-owned authorized_keys file; # Avoids changing local file owner - ./config/authorized_keys:/root/.ssh/authorized_keys_mount @@ -47,31 +49,38 @@ services: ports: # SSH available on host machine port 2222 to avoid conflict. Change to suit - - ${SSH_PORT_HOST:-2222}:${SSH_PORT:-22} - # redirect to Cloudflare quick tunnel - - ${REDIRECTOR_PORT_HOST}:1111 - # A1111 web interface - - ${WEBUI_PORT_HOST:-7860}:${WEBUI_PORT:-7860} + - ${SSH_PORT_HOST:-2222}:${SSH_PORT_LOCAL:-22} + # Caddy port for service portal + - ${SERVICEPORTAL_PORT_HOST:-1111}:${SERVICEPORTAL_PORT_HOST:-1111} + # WEBUI web interface + - ${WEBUI_PORT_HOST:-7860}:${WEBUI_PORT_HOST:-7860} # Jupyter server - - ${JUPYTER_PORT_HOST:-8888}:${JUPYTER_PORT:-8888} + - ${JUPYTER_PORT_HOST:-8888}:${JUPYTER_PORT_HOST:-8888} # Rclone webserver for interactive configuration - - ${RCLONE_PORT_HOST:-53682}:53682 + - ${RCLONE_PORT_HOST:-53682}:${RCLONE_PORT_HOST:-53682} environment: # Don't enclose values in quotes - - AUTO_UPDATE=${AUTO_UPDATE:-true} - - WEBUI_BRANCH=${WEBUI_BRANCH:-master} - - WEBUI_FLAGS=${WEBUI_FLAGS:-} - - WEBUI_PORT=${WEBUI_PORT:-7860} - DIRECT_ADDRESS=${DIRECT_ADDRESS:-127.0.0.1} - DIRECT_ADDRESS_GET_WAN=${DIRECT_ADDRESS_GET_WAN:-false} - - SSH_PORT=${SSH_PORT:-22} - WORKSPACE=${WORKSPACE:-/workspace} - - WORKSPACE_SYNC=${WORKSPACE_SYNC:-true} + - WORKSPACE_SYNC=${WORKSPACE_SYNC:-false} - CF_TUNNEL_TOKEN=${CF_TUNNEL_TOKEN:-} - CF_QUICK_TUNNELS=${CF_QUICK_TUNNELS:-true} - - TUNNEL_TRANSPORT_PROTOCOL=${TUNNEL_TRANSPORT_PROTOCOL:-http2} - WEB_ENABLE_AUTH=${WEB_ENABLE_AUTH:-true} - WEB_USER=${WEB_USER:-user} - - WEB_PASSWORD=${WEB_PASSWORD:-true} - # - PROVISIONING_SCRIPT=${PROVISIONING_SCRIPT:-} \ No newline at end of file + - WEB_PASSWORD=${WEB_PASSWORD:-password} + - SSH_PORT_HOST=${SSH_PORT_HOST:-2222} + - SSH_PORT_LOCAL=${SSH_PORT_LOCAL:-22} + - SERVICEPORTAL_PORT_HOST=${SERVICEPORTAL_PORT_HOST:-1111} + - SERVICEPORTAL_PORT_LOCAL=${SERVICEPORTAL_PORT_LOCAL:-11111} + - SERVICEPORTAL_METRICS_PORT=${SERVICEPORTAL_METRICS_PORT:-21111} + - WEBUI_FLAGS=${WEBUI_FLAGS:-} + - WEBUI_PORT_HOST=${WEBUI_PORT_HOST:-7860} + - WEBUI_PORT_LOCAL=${WEBUI_PORT_LOCAL:-17860} + - WEBUI_METRICS_PORT=${WEBUI_METRICS_PORT:-27860} + - JUPYTER_PORT_HOST=${JUPYTER_PORT_HOST:-8888} + - JUPYTER_PORT_LOCAL=${JUPYTER_PORT_LOCAL:-18888} + - JUPYTER_METRICS_PORT=${JUPYTER_METRICS_PORT:-28888} + - SERVERLESS=${SERVERLESS:-false} + # - PROVISIONING_SCRIPT=${PROVISIONING_SCRIPT:-}