From cd0fa3e18c655d2e3ad06a5b3642c9967152a858 Mon Sep 17 00:00:00 2001 From: Rob Ballantyne Date: Thu, 18 Apr 2024 16:35:37 +0100 Subject: [PATCH] update build process --- .github/workflows/docker-build.yml | 127 +++++++++--------- .../opt/ai-dock/bin/build/layer0/amd.sh | 12 +- .../opt/ai-dock/bin/build/layer0/common.sh | 74 ++++++---- .../opt/ai-dock/bin/build/layer0/cpu.sh | 3 +- .../opt/ai-dock/bin/build/layer0/init.sh | 2 - .../opt/ai-dock/bin/build/layer0/nvidia.sh | 20 ++- .../opt/ai-dock/bin/get-cfqt-invokeai.sh | 28 ---- .../opt/ai-dock/bin/preflight.d/10-default.sh | 4 +- .../opt/ai-dock/bin/update-invokeai.sh | 1 - .../opt/ai-dock/bin/build/layer1/init.sh | 5 +- build/Dockerfile | 13 +- docker-compose.yaml | 11 +- 12 files changed, 156 insertions(+), 144 deletions(-) delete mode 100755 build/COPY_ROOT/opt/ai-dock/bin/get-cfqt-invokeai.sh diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index d030e5c..1f35757 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -2,28 +2,22 @@ name: Docker Build on: workflow_dispatch: - push: - branches: [ "main" ] + #push: + # branches: [ "main" ] env: UBUNTU_VERSION: 22.04 BUILDX_NO_DEFAULT_ATTESTATIONS: 1 - LATEST_CUDA: "pytorch-2.2.1-py3.10-cuda-11.8.0-runtime-22.04" - LATEST_ROCM: "pytorch-2.2.1-py3.10-rocm-5.7-runtime-22.04" - LATEST_CPU: "pytorch-2.2.1-py3.10-cpu-22.04" - + jobs: cpu-base: runs-on: ubuntu-latest strategy: fail-fast: false matrix: - base: - - "jupyter-pytorch" - python: - - "3.10" - pytorch: - - "2.2.1" + build: + #- {latest: "true", invokeai: "4.1.0", python: "3.10", pytorch: "2.2.2"} + - {latest: "false", invokeai: "4.1.0", python: "3.10", pytorch: "2.2.2"} steps: - name: Free Space @@ -59,22 +53,28 @@ jobs: - name: Set tags run: | - img_path="ghcr.io/${{ env.PACKAGE_NAME }}" - ver_tag="pytorch-${{ matrix.pytorch }}-py${{ matrix.python }}-cpu-${{ env.UBUNTU_VERSION }}" - - if [[ $ver_tag == ${{ env.LATEST_CPU }} ]]; then - TAGS="${img_path}:latest-cpu, ${img_path}:$ver_tag" - else - TAGS="${img_path}:$ver_tag" - fi - echo "TAGS=${TAGS}" >> ${GITHUB_ENV} + img_path="ghcr.io/${{ env.PACKAGE_NAME }}" + + INVOKEAI_VERSION=${{ matrix.build.invokeai }} + [ -z "$INVOKEAI_VERSION" ] && { echo "Error: INVOKEAI_VERSION is empty. Exiting script." >&2; exit 1; } + echo "INVOKEAI_VERSION=${INVOKEAI_VERSION}" >> ${GITHUB_ENV} + + base_tag="cpu-${{ env.UBUNTU_VERSION }}" + + if [[ ${{ matrix.build.latest }} == "true" ]]; then + echo "Marking latest" + TAGS="${img_path}:${base_tag}, ${img_path}:latest-cpu, ${img_path}:latest-cpu-jupyter" + else + TAGS="${img_path}:${base_tag}-v${INVOKEAI_VERSION}" + fi + echo "TAGS=${TAGS}" >> ${GITHUB_ENV} - name: Build and push uses: docker/build-push-action@v4 with: context: build build-args: | - IMAGE_BASE=ghcr.io/ai-dock/${{ matrix.base }}:${{ matrix.pytorch }}-py${{ matrix.python }}-cpu-${{ env.UBUNTU_VERSION }} + IMAGE_BASE=ghcr.io/ai-dock/python:${{ matrix.build.python }}-cpu-${{ env.UBUNTU_VERSION }} push: true # Avoids unknown/unknown architecture and extra metadata provenance: false @@ -85,17 +85,9 @@ jobs: strategy: fail-fast: false matrix: - base: - - "jupyter-pytorch" - python: - - "3.10" - pytorch: - - "2.2.1" - cuda: - - "11.8.0" - - "12.1.0" - level: - - "runtime" + build: + #- {latest: "true", invokeai: "4.1.0", python: "3.10", pytorch: "2.2.2", cuda: "11.8.0-runtime"} + - {latest: "false", invokeai: "4.1.0", python: "3.10", pytorch: "2.2.2", cuda: "12.1.1-runtime"} steps: - @@ -132,23 +124,31 @@ jobs: - name: Set tags run: | - img_path="ghcr.io/${{ env.PACKAGE_NAME }}" - ver_tag="pytorch-${{ matrix.pytorch }}-py${{ matrix.python }}-cuda-${{ matrix.cuda }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }}" - - if [[ $ver_tag == ${{ env.LATEST_CUDA }} ]]; then - TAGS="${img_path}:latest, ${img_path}:latest-cuda, ${img_path}:$ver_tag" - else - TAGS="${img_path}:$ver_tag" - fi - echo "TAGS=${TAGS}" >> ${GITHUB_ENV} + img_path="ghcr.io/${{ env.PACKAGE_NAME }}" + + INVOKEAI_VERSION=${{ matrix.build.invokeai }} + [ -z "$INVOKEAI_VERSION" ] && { echo "Error: INVOKEAI_VERSION is empty. Exiting script." >&2; exit 1; } + echo "INVOKEAI_VERSION=${INVOKEAI_VERSION}" >> ${GITHUB_ENV} + + base_tag="cpu-${{ env.UBUNTU_VERSION }}" + + if [[ ${{ matrix.build.latest }} == "true" ]]; then + echo "Marking latest" + TAGS="${img_path}:${base_tag}, ${img_path}:latest, ${img_path}:latest-cuda" + else + TAGS="${img_path}:${base_tag}-v${INVOKEAI_VERSION}" + fi + echo "TAGS=${TAGS}" >> ${GITHUB_ENV} - name: Build and push uses: docker/build-push-action@v4 with: context: build build-args: | - IMAGE_BASE=ghcr.io/ai-dock/${{ matrix.base }}:${{ matrix.pytorch }}-py${{ matrix.python }}-cuda-${{ matrix.cuda }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }} - PYTORCH_VERSION=${{ matrix.pytorch }} + IMAGE_BASE=ghcr.io/ai-dock/python:${{ matrix.build.python }}-cuda-${{ matrix.build.cuda }}-${{ env.UBUNTU_VERSION }} + PYTHON_VERSION=${{ matrix.build.python }} + PYTORCH_VERSION=${{ matrix.build.pytorch }} + INVOKEAI_VERSION=${{ env.INVOKEAI_VERSION }} push: true provenance: false tags: ${{ env.TAGS }} @@ -158,16 +158,9 @@ jobs: strategy: fail-fast: false matrix: - base: - - "jupyter-pytorch" - python: - - "3.10" - pytorch: - - "2.2.1" - rocm: - - "5.7" - level: - - "runtime" + build: + #- {latest: "true", invokeai: "4.1.0", python: "3.10", pytorch: "2.2.2", rocm: "5.7-runtime"} + - {latest: "false", invokeai: "4.1.0", python: "3.10", pytorch: "2.2.2", rocm: "5.7-runtime"} steps: - name: Free Space @@ -203,22 +196,28 @@ jobs: - name: Set tags run: | - img_path="ghcr.io/${{ env.PACKAGE_NAME }}" - ver_tag="pytorch-${{ matrix.pytorch }}-py${{ matrix.python }}-rocm-${{ matrix.rocm }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }}" - - if [[ $ver_tag == ${{ env.LATEST_ROCM }} ]]; then - TAGS="${img_path}:latest-rocm, ${img_path}:$ver_tag" - else - TAGS="${img_path}:$ver_tag" - fi - echo "TAGS=${TAGS}" >> ${GITHUB_ENV} + img_path="ghcr.io/${{ env.PACKAGE_NAME }}" + + INVOKEAI_VERSION=${{ matrix.build.tag }} + [ -z "$INVOKEAI_VERSION" ] && { echo "Error: INVOKEAI_VERSION is empty. Exiting script." >&2; exit 1; } + echo "INVOKEAI_VERSION=${INVOKEAI_VERSION}" >> ${GITHUB_ENV} + + base_tag="cpu-${{ env.UBUNTU_VERSION }}" + + if [[ ${{ matrix.build.latest }} == "true" ]]; then + echo "Marking latest" + TAGS="${img_path}:${base_tag}, ${img_path}:latest-rocm" + else + TAGS="${img_path}:${base_tag}-v${INVOKEAI_VERSION}" + fi + echo "TAGS=${TAGS}" >> ${GITHUB_ENV} - name: Build and push uses: docker/build-push-action@v4 with: context: build build-args: | - IMAGE_BASE=ghcr.io/ai-dock/${{ matrix.base }}:${{ matrix.pytorch }}-py${{ matrix.python }}-rocm-${{ matrix.rocm }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }} + IMAGE_BASE=ghcr.io/ai-dock/python:${{ matrix.build.python }}-rocm-${{ matrix.build.rocm }}-${{ env.UBUNTU_VERSION }} push: true provenance: false tags: ${{ env.TAGS }} \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh index 5196d96..4c48231 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh @@ -2,16 +2,14 @@ build_amd_main() { build_amd_install_invokeai + build_common_run_tests } build_amd_install_invokeai() { - # Mamba export does not include pip packages. - # We need to get torch again - todo find a better way? - micromamba -n invokeai run pip install \ - --no-cache-dir \ - --index-url https://download.pytorch.org/whl/rocm${ROCM_VERSION} \ - torch==${PYTORCH_VERSION} torchvision torchaudio - /opt/ai-dock/bin/update-invokeai.sh + micromamba run -n invokeai ${PIP_INSTALL} \ + torch=="${PYTORCH_VERSION}" \ + onnxruntime-gpu + build_common_install_invokeai } build_amd_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh index c974f81..1b08766 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh @@ -9,37 +9,59 @@ build_common_main() { build_common_create_env() { apt-get update - $APT_INSTALL libgl1 libgoogle-perftools4 python3-opencv libopencv-dev - ln -sf $(ldconfig -p | grep -Po "libtcmalloc.so.\d" | head -n 1) \ + $APT_INSTALL \ + libgl1-mesa-glx \ + libtcmalloc-minimal4 + + ln -sf $(ldconfig -p | grep -Po "libtcmalloc_minimal.so.\d" | head -n 1) \ /lib/x86_64-linux-gnu/libtcmalloc.so - # A new pytorch env costs ~ 300Mb - exported_env=/tmp/${MAMBA_DEFAULT_ENV}.yaml - micromamba env export -n ${MAMBA_DEFAULT_ENV} > "${exported_env}" - $MAMBA_CREATE -n invokeai --file "${exported_env}" + + micromamba create -n invokeai + micromamba run -n invokeai mamba-skel + mkdir -p $INVOKEAI_ROOT + + micromamba install -n invokeai -y \ + python="${PYTHON_VERSION}" \ + ipykernel \ + ipywidgets \ + nano + micromamba run -n invokeai install-pytorch -v "$PYTORCH_VERSION" } build_common_install_jupyter_kernels() { - if [[ $IMAGE_BASE =~ "jupyter-pytorch" ]]; then - $MAMBA_INSTALL -n invokeai \ - ipykernel \ - ipywidgets - - kernel_path=/usr/local/share/jupyter/kernels - - # Add the often-present "Python3 (ipykernel) as a comfyui alias" - rm -rf ${kernel_path}/python3 - dir="${kernel_path}/python3" - file="${dir}/kernel.json" - cp -rf ${kernel_path}/../_template ${dir} - sed -i 's/DISPLAY_NAME/'"Python3 (ipykernel)"'/g' ${file} - sed -i 's/PYTHON_MAMBA_NAME/'"invokeai"'/g' ${file} - - dir="${kernel_path}/invokeai" - file="${dir}/kernel.json" - cp -rf ${kernel_path}/../_template ${dir} - sed -i 's/DISPLAY_NAME/'"Invoke AI"'/g' ${file} - sed -i 's/PYTHON_MAMBA_NAME/'"invokeai"'/g' ${file} + micromamba install -n invokeai -y \ + ipykernel \ + ipywidgets + + kernel_path=/usr/local/share/jupyter/kernels + + # Add the often-present "Python3 (ipykernel) as an InvokeAI alias" + rm -rf ${kernel_path}/python3 + dir="${kernel_path}/python3" + file="${dir}/kernel.json" + cp -rf ${kernel_path}/../_template ${dir} + sed -i 's/DISPLAY_NAME/'"Python3 (ipykernel)"'/g' ${file} + sed -i 's/PYTHON_MAMBA_NAME/'"invokeai"'/g' ${file} + + dir="${kernel_path}/invokeai" + file="${dir}/kernel.json" + cp -rf ${kernel_path}/../_template ${dir} + sed -i 's/DISPLAY_NAME/'"Invoke AI"'/g' ${file} + sed -i 's/PYTHON_MAMBA_NAME/'"invokeai"'/g' ${file} +} + +build_common_install_invokeai() { + micromamba run -n invokeai ${PIP_INSTALL} --use-pep517 \ + torch==${PYTORCH_VERSION} \ + InvokeAI==${INVOKEAI_VERSION} +} + +build_common_run_tests() { + installed_pytorch_version=$(micromamba run -n invokeai python -c "import torch; print(torch.__version__)") + if [[ "$installed_pytorch_version" != "$PYTORCH_VERSION"* ]]; then + echo "Expected PyTorch ${PYTORCH_VERSION} but found ${installed_pytorch_version}\n" + exit 1 fi } diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh index 43aeb99..e1c37a5 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh @@ -2,10 +2,11 @@ build_cpu_main() { build_cpu_install_invokeai + build_common_run_tests } build_cpu_install_invokeai() { - /opt/ai-dock/bin/update-invokeai.sh + build_common_install_invokeai } build_cpu_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh index 81f25ae..974e256 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh @@ -17,6 +17,4 @@ else exit 1 fi -$MAMBA_DEFAULT_RUN python /opt/ai-dock/tests/assert-torch-version.py - source /opt/ai-dock/bin/build/layer0/clean.sh diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh index 6d1f08b..2b71ca2 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh @@ -2,16 +2,30 @@ build_nvidia_main() { build_nvidia_install_invokeai + build_common_run_tests + build_nvidia_run_tests } build_nvidia_install_invokeai() { micromamba run -n invokeai ${PIP_INSTALL} \ torch=="${PYTORCH_VERSION}" \ - nvidia-ml-py3 + nvidia-ml-py3 \ + onnxruntime-gpu - micromamba install -n invokeai -c xformers xformers + micromamba install -n invokeai -c xformers -y \ + xformers \ + pytorch=${PYTORCH_VERSION} \ + pytorch-cuda="$(cut -d '.' -f 1,2 <<< "${CUDA_VERSION}")" - /opt/ai-dock/bin/update-invokeai.sh + build_common_install_invokeai +} + +build_nvidia_run_tests() { + installed_pytorch_cuda_version=$(micromamba run -n invokeai python -c "import torch; print(torch.version.cuda)") + if [[ "$CUDA_VERSION" != "$installed_pytorch_cuda"* ]]; then + echo "Expected PyTorch CUDA ${CUDA_VERSION} but found ${installed_pytorch_cuda}\n" + exit 1 + fi } build_nvidia_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/get-cfqt-invokeai.sh b/build/COPY_ROOT/opt/ai-dock/bin/get-cfqt-invokeai.sh deleted file mode 100755 index 8ac4994..0000000 --- a/build/COPY_ROOT/opt/ai-dock/bin/get-cfqt-invokeai.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -service="webui" -port=${WEBUI_PORT:-7860} - -if [[ -z $CF_QUICK_TUNNELS ]]; then - printf "\n** You have not enabled Cloudflare quick tunnels **\n\n" - printf "To enable, you can do the following:\n\n" - printf "1. export CF_QUICK_TUNNELS=true\n" - printf "2. supervisorctl restart %s\n\n" $service - exit 1 -fi - -if [[ -f /var/log/supervisor/quicktunnel-${service}.log ]]; then - grep -b0 -a0 'trycloudflare.com' /var/log/supervisor/quicktunnel-${service}.log - if [[ $? -gt 0 ]]; then - printf "\n** Something may have gone wrong setting up the %s tunnel **\n\n" $service - printf "To set up manually you can run the following command:\n\n" - printf "cloudflared tunnel --url localhost:%s > /var/log/supervisor/quicktunnel-%s.log 2>&1 &\n\n" $port $service - fi -else - printf "** The %s tunnel has not yet started **\n\n" - if [[ -f /run/provisioning_script ]]; then - printf "The container is still being provisioned. Check the logs for progress (logtail.sh)\n\n" - else - printf "Please wait a moment and try again.\n\n" - fi -fi \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/preflight.d/10-default.sh b/build/COPY_ROOT/opt/ai-dock/bin/preflight.d/10-default.sh index 0b6911a..5007cfa 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/preflight.d/10-default.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/preflight.d/10-default.sh @@ -17,10 +17,10 @@ function preflight_copy_notebook() { # Default to false until we can stabilize the update process function preflight_update_invokeai() { - if [[ ${AUTO_UPDATE,,} != "false" ]]; then + if [[ ${AUTO_UPDATE,,} == "true" ]]; then /opt/ai-dock/bin/update-invokeai.sh else - printf "Skipping auto update (AUTO_UPDATE=false)" + printf "Skipping auto update (AUTO_UPDATE != true)" fi } diff --git a/build/COPY_ROOT/opt/ai-dock/bin/update-invokeai.sh b/build/COPY_ROOT/opt/ai-dock/bin/update-invokeai.sh index afdee8a..da8eff0 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/update-invokeai.sh +++ b/build/COPY_ROOT/opt/ai-dock/bin/update-invokeai.sh @@ -1,6 +1,5 @@ #!/bin/bash umask 002 -branch=master if [[ -n "${INVOKEAI_VERSION}" ]]; then version="${INVOKEAI_VERSION}" diff --git a/build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh b/build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh index b955675..e7c3c25 100755 --- a/build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh +++ b/build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh @@ -98,14 +98,11 @@ function build_extra_start() { done pkill invokeai-web - - # Ensure pytorch hasn't been clobbered - $MAMBA_DEFAULT_RUN python /opt/ai-dock/tests/assert-torch-version.py || exit 1 } function build_extra_get_mamba_packages() { if [[ -n $MAMBA_PACKAGES ]]; then - $MAMBA_INSTALL -n invokeai ${MAMBA_PACKAGES[@]} + micromamba install -n invokeai -y ${MAMBA_PACKAGES[@]} fi } diff --git a/build/Dockerfile b/build/Dockerfile index aaa1655..75354ea 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -1,12 +1,21 @@ # For build automation - Allows building from any ai-dock base image # Use a *cuda*base* image as default because pytorch brings the libs -ARG IMAGE_BASE="ghcr.io/ai-dock/pytorch:2.1.1-py3.10-cuda-11.8.0-base-22.04" +ARG IMAGE_BASE="ghcr.io/ai-dock/python:3.10-cuda-11.8.0-base-22.04" FROM ${IMAGE_BASE} LABEL org.opencontainers.image.source https://github.com/ai-dock/invokeai -LABEL org.opencontainers.image.description "AI-Dock + Invoke AI docker image" +LABEL org.opencontainers.image.description "InvokeAI docker images for use in GPU cloud and local environments. Includes AI-Dock base for authentication and improved user experience." LABEL maintainer="Rob Ballantyne " +ARG PYTHON_VERSION="3.10" +ENV PYTHON_VERSION="${PYTHON_VERSION}" + +ARG PYTORCH_VERSION="2.2.2" +ENV PYTORCH_VERSION="${PYTORCH_VERSION}" + +ARG INVOKEAI_VERSION +ENV INVOKEAI_VERSION=${INVOKEAI_VERSION} + ENV IMAGE_SLUG="invokeai" ENV OPT_SYNC=invokeai diff --git a/docker-compose.yaml b/docker-compose.yaml index 59534e0..a331392 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -6,11 +6,14 @@ services: build: context: ./build args: - IMAGE_BASE: ${IMAGE_BASE:-ghcr.io/ai-dock/jupyter-pytorch:2.2.1-py3.10-cuda-11.8.0-runtime-22.04} + PYTHON_VERSION: ${PYTHON_VERSION:-3.10} + PYTORCH_VERSION: ${PYTORCH_VERSION:-2.2.2} + INVOKEAI_VERSION: ${INVOKEAI_VERSION:-4.1.0} + IMAGE_BASE: ${IMAGE_BASE:-ghcr.io/ai-dock/python:3.10-cuda-11.8.0-runtime-22.04} tags: - - "ghcr.io/ai-dock/invokeai:${IMAGE_TAG:-pytorch-2.2.1-py3.10-cuda-11.8.0-runtime-22.04}" + - "ghcr.io/ai-dock/invokeai:${IMAGE_TAG:-cuda-11.8.0-runtime-22.04}" - image: ghcr.io/ai-dock/invokeai:${IMAGE_TAG:-pytorch-2.2.1-py3.10-cuda-11.8.0-runtime-22.04} + image: ghcr.io/ai-dock/invokeai:${IMAGE_TAG:-cuda-11.8.0-runtime-22.04} devices: - "/dev/dri:/dev/dri" @@ -20,7 +23,7 @@ services: volumes: # Workspace - ./workspace:${WORKSPACE:-/workspace/}:rshared - # You can share /workspace/storage with other non-WEBUI containers. See README + # You can share /workspace/storage with other non-InvokeAI containers. See README #- /path/to/common_storage:${WORKSPACE:-/workspace/}storage/:rshared # Will echo to root-owned authorized_keys file; # Avoids changing local file owner