Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 45 additions & 0 deletions .github/workflows/stable-diffusion-web-ui-production.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
name: Build and Push Stable Diffusion WebUI Docker Image

on:
workflow_dispatch:
push:
paths:
- "containers/official-templates/stable-diffusion-webui/**"
branches-ignore:
- main

jobs:
build:
name: Build and Push Docker Image
runs-on: ubuntu-latest

steps:
- name: Checkout repository
uses: actions/checkout@v3

- name: Clear space to remove unused folders
run: |
rm -rf /usr/share/dotnet
rm -rf /opt/ghc
rm -rf "/usr/local/share/boost"
rm -rf "$AGENT_TOOLSDIRECTORY"

- name: Set up QEMU
uses: docker/setup-qemu-action@v3

# Set up Docker Buildx
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

# Log in to Docker Hub
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}

- name: Build and Push Docker Image
working-directory: containers/official-templates/stable-diffusion-webui
uses: docker/bake-action@v2
with:
push: true
67 changes: 35 additions & 32 deletions official-templates/stable-diffusion-webui/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,9 +1,5 @@
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04

ARG WEBUI_VERSION
ARG TORCH_VERSION
ARG XFORMERS_VERSION

ENV DEBIAN_FRONTEND noninteractive
ENV SHELL=/bin/bash
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/x86_64-linux-gnu
Expand All @@ -15,13 +11,18 @@ WORKDIR /workspace
SHELL ["/bin/bash", "-o", "pipefail", "-c"]

# Install system dependencies
RUN apt update --yes && \
apt upgrade --yes && \
apt install --yes --no-install-recommends \
RUN rm -f /etc/apt/sources.list.d/*.list && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
apt-get update --yes && \
apt-get upgrade --yes && \
apt-get install --yes --no-install-recommends \
git openssh-server libglib2.0-0 libsm6 libgl1 libxrender1 libxext6 ffmpeg wget curl psmisc rsync vim bc nginx \
pkg-config libffi-dev libcairo2 libcairo2-dev libgoogle-perftools4 libtcmalloc-minimal4 apt-transport-https \
software-properties-common ca-certificates && \
update-ca-certificates
update-ca-certificates && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*

# Install Python 3.10
RUN add-apt-repository ppa:deadsnakes/ppa && \
Expand All @@ -32,51 +33,47 @@ RUN add-apt-repository ppa:deadsnakes/ppa && \
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \python get-pip.py && \
pip install -U --no-cache-dir pip

# Cache Models
RUN mkdir /sd-models && mkdir /cn-models && \
wget https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned.ckpt -O /sd-models/v1-5-pruned.ckpt && \
wget https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -O /sd-models/sd_xl_base_1.0.safetensors && \
wget https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors -O /sd-models/sd_xl_refiner_1.0.safetensors && \
wget https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_canny.pth -O /cn-models/control_v11p_sd15_canny.pth

# Create a virtual environment
RUN python -m venv /workspace/venv && \
pip install -U --no-cache-dir jupyterlab jupyterlab_widgets ipykernel ipywidgets

WORKDIR /

ARG WEBUI_VERSION

# Install Automatic1111's WebUI
RUN git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git && \
cd stable-diffusion-webui && \
cd /stable-diffusion-webui && \
git checkout tags/${WEBUI_VERSION} && \
pip install -r requirements_versions.txt && \
pip install httpx>=0.25.0 xformers && \
pip install --no-cache-dir -r requirements_versions.txt && \
python -c "from launch import prepare_environment; prepare_environment()" --skip-torch-cuda-test

# Install torch and xformers
RUN cd /workspace/stable-diffusion-webui && \
pip install torch===${TORCH_VERSION} torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 && \
pip install xformers==${XFORMERS_VERSION} --index-url https://download.pytorch.org/whl/cu118
WORKDIR /stable-diffusion-webui

# Apply hotfix to replace `pytorch_lightning.utilities.distributed` with `pytorch_lightning.utilities.rank_zero`
# see https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/11458#issuecomment-1609900319
# RUN sed -i 's/pytorch_lightning.utilities.distributed/pytorch_lightning.utilities.rank_zero/g' \
# repositories/stable-diffusion-stability-ai/ldm/models/diffusion/ddpm.py && \
# sed -i 's/pytorch_lightning.utilities.distributed/pytorch_lightning.utilities.rank_zero/g' \
# extensions-builtin/LDSR/sd_hijack_ddpm_v1.py

# Install DeForum
RUN cd /workspace/stable-diffusion-webui && \
git clone https://github.com/deforum-art/sd-webui-deforum extensions/deforum && \
RUN git clone https://github.com/deforum-art/sd-webui-deforum extensions/deforum && \
cd extensions/deforum && \
pip install basicsr && \
pip install -r requirements.txt

# Install ControlNet
RUN cd /workspace/stable-diffusion-webui && \
git clone https://github.com/Mikubill/sd-webui-controlnet.git extensions/sd-webui-controlnet && \
RUN git clone https://github.com/Mikubill/sd-webui-controlnet.git extensions/sd-webui-controlnet && \
cd extensions/sd-webui-controlnet && \
pip install opencv-python-headless>=4.9.0 && \
pip install -r requirements.txt

COPY cache-sd-model.py /workspace/stable-diffusion-webui/
RUN cd /workspace/stable-diffusion-webui/ && \
python cache-sd-model.py --use-cpu=all --ckpt /sd-models/SDv1-5.ckpt
WORKDIR /

RUN mv /workspace/venv /venv && \
mv /workspace/stable-diffusion-webui /stable-diffusion-webui && \
mkdir /workspace/downloader && git clone https://github.com/jjangga0214/sd-models-downloader.git /workspace/downloader

COPY relauncher.py webui-user.sh /stable-diffusion-webui/
mkdir -p /workspace/downloader && git clone https://github.com/jjangga0214/sd-models-downloader.git /workspace/downloader

# NGINX Proxy
COPY --from=proxy nginx.conf /etc/nginx/nginx.conf
Expand All @@ -85,6 +82,12 @@ COPY --from=proxy readme.html /usr/share/nginx/html/readme.html
# Copy the README.md
COPY README.md /usr/share/nginx/html/README.md

WORKDIR /stable-diffusion-webui

COPY relauncher.py webui-user.sh ./

WORKDIR /workspace

# Start Scripts
COPY pre_start.sh /pre_start.sh
COPY --from=scripts start.sh /
Expand Down
78 changes: 62 additions & 16 deletions official-templates/stable-diffusion-webui/README.md
Original file line number Diff line number Diff line change
@@ -1,29 +1,75 @@
## 🚀 RunPod Automatic1111 Stable Diffusion Template
# Automatic1111 Stable Diffusion WebUI Template

### 📝 General
A ready-to-use template for running Stable Diffusion WebUI (AUTOMATIC1111) on RunPod.

⚠️ **Please note, this doesn't work out-of-the-box with encrypted volumes!**
## Quick Start

This is a RunPod packaged template for stable diffusion using the Automatic1111 repository. RunPod does not maintain the code for this repo, we merely package it for easier use.
1. Wait for the pod to fully initialize (GPU Utilization should be 0%)
2. Access the WebUI through port 3000
3. Start creating!

If you need help with settings, etc., feel free to ask us, but remember we're not stable diffusion experts! 😅 We'll do our best to assist, but the RunPod community or automatic/stable diffusion communities might be more effective in helping you.
⚠️ **Note**: You may encounter a 502 error if you try to connect before initialization is complete.

🔵 **Please wait until the GPU Utilization % is 0 before attempting to connect. You'll likely encounter a 502 error if the pod is still getting ready for use.**
## Pre-installed Models

### ⚙️ Changing Launch Parameters
The following models are automatically mounted and ready to use:

You might be used to altering a different file for your launch parameters. In our case, we use `relauncher.py` located in the `webui` directory to manage the launch flags like `--xformers`. Feel free to edit this file, and then restart your pod via the hamburger menu for the changes to take effect. `--xformers` and `--api` are commonly inquired about.
### Stable Diffusion Models

### 📥 Using Your Own Models
- [Stable Diffusion XL Base 1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors)
- [Stable Diffusion XL Refiner 1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/blob/main/sd_xl_refiner_1.0.safetensors)
- [Stable Diffusion 1.5](https://huggingface.co/Comfy-Org/stable-diffusion-v1-5-archive/blob/main/v1-5-pruned-emaonly.safetensors)

The best ways to introduce your models to your pod is by using [runpodctl](https://github.com/runpod/runpodctl/blob/main/README.md) or by uploading them to Google Drive or another cloud storage provider and downloading them to your pod from there.
### ControlNet Models

### 🚚 Uploading to Google Drive
- Canny (control_v11p_sd15_canny.pth)

If you're finished with the pod and want to transfer things to Google Drive, [this colab](https://colab.research.google.com/drive/1ot8pODgystx1D6_zvsALDSvjACBF1cj6) can assist you using `runpodctl`. You can run `runpodctl` in a web terminal (found in the pod connect menu), or in a terminal on the desktop.
## Network Ports

## 🔌 Template Ports
| Application | Port | Protocol | Description |
| ---------------------- | ---- | -------- | ----------------------------------- |
| Stable Diffusion WebUI | 3000 | HTTP | Main interface for Stable Diffusion |
| Jupyter Lab | 8888 | HTTP | Python notebook interface |
| SSH | 22 | TCP | Secure shell access |

- **3001** | HTTP - This is the WebUI port that gets proxied to the internal 3000 port.
- **8888** | HTTP - This is the JupyterLab port that gets proxied to the internal 8888 port.
- **22** | TCP - This is the SSH port that gets proxied to the internal 22 port.
## Customization

### Modifying Launch Parameters

Launch parameters are configured in `webui-user.sh`. To modify them:

1. Edit the file in the workspace
2. Restart the pod to apply changes

Current default parameters include:

- `--xformers` for optimized memory usage
- `--listen` for network access
- `--enable-insecure-extension-access` for extension support

## Adding Custom Models

You have two options for adding your own models:

1. Using [runpodctl](https://github.com/runpod/runpodctl/blob/main/README.md)
2. Downloading from cloud storage (Google Drive, etc.)

## Backing Up Your Work

To save your work to Google Drive:

1. Use [this Google Colab notebook](https://colab.research.google.com/drive/1ot8pODgystx1D6_zvsALDSvjACBF1cj6)
2. Follow the instructions to transfer files using `runpodctl`

You can run `runpodctl` either through:

- The web terminal (in pod connect menu)
- The desktop terminal

## Important Notes

- This template doesn't support encrypted volumes
- For technical support, consider:
- [RunPod Community on Discord](https://discord.gg/cUpRmau42V)
- [Automatic1111 GitHub Issues](https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues)
- [Stable Diffusion on Reddit](https://www.reddit.com/r/StableDiffusion/)
7 changes: 0 additions & 7 deletions official-templates/stable-diffusion-webui/cache-sd-model.py

This file was deleted.

7 changes: 3 additions & 4 deletions official-templates/stable-diffusion-webui/docker-bake.hcl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
variable "RELEASE" {
default = "12.0.0"
default = "13.0.0"
}

target "default" {
Expand All @@ -10,8 +10,7 @@ target "default" {
proxy = "../../container-template/proxy"
}
args = {
WEBUI_VERSION = "v1.8.0"
TORCH_VERSION = "2.1.2+cu118"
XFORMERS_VERSION = "0.0.23.post1+cu118"
WEBUI_VERSION = "v1.10.0"
}
platforms = ["linux/amd64"]
}
25 changes: 19 additions & 6 deletions official-templates/stable-diffusion-webui/pre_start.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,26 @@ rsync --remove-source-files -rlptDu --ignore-existing /venv/ /workspace/venv/

echo "**** syncing stable diffusion to workspace, please wait ****"
rsync --remove-source-files -rlptDu --ignore-existing /stable-diffusion-webui/ /workspace/stable-diffusion-webui/
ln -s /sd-models/* /workspace/stable-diffusion-webui/models/Stable-diffusion/
ln -s /cn-models/* /workspace/stable-diffusion-webui/extensions/sd-webui-controlnet/models/

if [[ $RUNPOD_STOP_AUTO ]]
then
echo "Skipping auto-start of webui"
else
# Create symbolic links for the models
mkdir -p /workspace/stable-diffusion-webui/models/Stable-diffusion/
ln -sf /runpod/cache/model/Comfy-Org/stable-diffusion-v1-5-archive/main/v1-5-pruned-emaonly.safetensors /workspace/stable-diffusion-webui/models/Stable-diffusion/
ln -sf /runpod/cache/model/stabilityai/stable-diffusion-xl-base-1.0/main/sd_xl_base_1.0.safetensors /workspace/stable-diffusion-webui/models/Stable-diffusion/
ln -sf /runpod/cache/model/stabilityai/stable-diffusion-xl-refiner-1.0/main/sd_xl_refiner_1.0.safetensors /workspace/stable-diffusion-webui/models/Stable-diffusion/
ln -sf /runpod/cache/model/stabilityai/stable-diffusion-3-medium/main/sd3_medium.safetensors /workspace/stable-diffusion-webui/models/Stable-diffusion/
ln -sf /runpod/cache/model/stabilityai/stable-diffusion-3-medium/main/sd3_medium_incl_clips.safetensors /workspace/stable-diffusion-webui/models/Stable-diffusion/

# Create symbolic links for clips
mkdir -p /workspace/stable-diffusion-webui/models/Clip/
ln -sf /runpod/cache/model/stabilityai/stable-diffusion-3-medium/main/text_encoders/clip_g.safetensors /workspace/stable-diffusion-webui/models/Clip/
ln -sf /runpod/cache/model/stabilityai/stable-diffusion-3-medium/main/text_encoders/clip_l.safetensors /workspace/stable-diffusion-webui/models/Clip/
ln -sf /runpod/cache/model/stabilityai/stable-diffusion-3-medium/main/text_encoders/t5xxl_fp8_e4m3fn.safetensors /workspace/stable-diffusion-webui/models/Clip/

# Create symbolic link for ControlNet model
mkdir -p /workspace/stable-diffusion-webui/extensions/sd-webui-controlnet/models/
ln -sf /runpod/cache/model/lllyasviel/ControlNet-v1-1/main/control_v11p_sd15_canny.pth /workspace/stable-diffusion-webui/extensions/sd-webui-controlnet/models/

if [[ ! $RUNPOD_STOP_AUTO ]]; then
echo "Started webui through relauncher script"
cd /workspace/stable-diffusion-webui
python relauncher.py &
Expand Down
23 changes: 8 additions & 15 deletions official-templates/stable-diffusion-webui/relauncher.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,18 @@
import os
import time
import subprocess


def relaunch_process(launch_counter=0):
'''

'''
def relaunch_process():
"""Relaunches the web UI process in a loop"""
while True:
print('Relauncher: Launching...')
if launch_counter > 0:
print(f'\tRelaunch count: {launch_counter}')

print("Relauncher: Launching...")
try:
launch_string = "/workspace/stable-diffusion-webui/webui.sh -f"
os.system(launch_string)
webui_path = "/workspace/stable-diffusion-webui/webui.sh"
subprocess.run(f"bash {webui_path} -f", shell=True, check=True)
except Exception as err:
print(f"An error occurred: {err}")
finally:
print('Relauncher: Process is ending. Relaunching in 2s...')
launch_counter += 1
time.sleep(2)
print("Relauncher: Process is ending. Relaunching in 2s...")
time.sleep(2)


if __name__ == "__main__":
Expand Down
4 changes: 2 additions & 2 deletions official-templates/stable-diffusion-webui/webui-user.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# #!/bin/bash
#!/bin/bash
#########################################################
# Uncomment and change the variables below to your need:#
#########################################################
Expand All @@ -10,7 +10,7 @@ install_dir="/workspace"
#clone_dir="stable-diffusion-webui"

# Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention"
export COMMANDLINE_ARGS="--port 3000 --xformers --skip-install --listen --enable-insecure-extension-access"
export COMMANDLINE_ARGS="--port 3000 --xformers --skip-install --listen --enable-insecure-extension-access --skip-torch-cuda-test"
#export XFORMERS_PACKAGE="xformers==0.0.17.dev447"

# python3 executable
Expand Down