diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000..3cb040a --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,3 @@ +# These are supported funding model platforms + +github: [ai-dock, robballantyne] diff --git a/.github/workflows/clear-cache.yml b/.github/workflows/clear-cache.yml new file mode 100644 index 0000000..2d1cd89 --- /dev/null +++ b/.github/workflows/clear-cache.yml @@ -0,0 +1,55 @@ +# https://stackoverflow.com/a/73556714 +name: Clear Cache + +on: + workflow_dispatch: + +permissions: + actions: write + +jobs: + clear-cache: + runs-on: ubuntu-latest + steps: + - name: Clear cache + uses: actions/github-script@v6 + with: + script: | + console.log("About to clear") + const response = await github.rest.actions.getActionsCacheList({ + owner: context.repo.owner, + repo: context.repo.repo, + page: 1, + per_page: 100 + }); + + const pages = (function() { + if (typeof response.headers.link !== 'undefined') { + return response.headers.link.split(">").slice(-2)[0].split('=').slice(-1)[0] + } + return 1; + })(); + + console.log("Total pages: " + pages); + + for (let page = pages; page >= 1; page--) { + console.log("Processing page " + page) + + const response = await github.rest.actions.getActionsCacheList({ + owner: context.repo.owner, + repo: context.repo.repo, + page: page, + per_page: 100 + }); + + for (const cache of response.data.actions_caches) { + console.log(cache) + github.rest.actions.deleteActionsCacheById({ + owner: context.repo.owner, + repo: context.repo.repo, + cache_id: cache.id, + }) + } + } + + console.log("Clear completed") \ No newline at end of file diff --git a/.github/workflows/delete-old-images.yml b/.github/workflows/delete-old-images.yml new file mode 100644 index 0000000..773d65f --- /dev/null +++ b/.github/workflows/delete-old-images.yml @@ -0,0 +1,110 @@ +name: Delete Old Packages + +env: + PER_PAGE: 100 + +on: + workflow_dispatch: + inputs: + age: + type: choice + required: true + description: Delete older than + options: + - 1 Hour + - 12 Hours + - 1 Day + - 1 Week + - 2 Weeks + - 1 Month + - 6 Months + - 1 Year + - 2 Years + - 3 Years + - 4 Years + - 5 Years + - All Packages + +jobs: + delete-old-packages: + runs-on: ubuntu-latest + steps: + - + run: | + echo "PACKAGE_NAME=${GITHUB_REPOSITORY,,}" >> ${GITHUB_ENV} + echo "OWNER=orgs/${GITHUB_REPOSITORY_OWNER,,}" >> ${GITHUB_ENV} + - + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.DELETE_PACKAGES_TOKEN }} + script: | + const delete_age = (function() { + switch ("${{ github.event.inputs.age }}") { + case "All Packages": + return 0; + case "1 Hour": + return 60; + case "12 Hours": + return 720; + case "1 Day": + return 1440; + case "1 Week": + return 10080; + case "2 Weeks": + return 20160; + case "1 Month": + return 43800; + case "6 Months": + return 262800; + case "1 Year": + return 525600; + case "2 Years": + return 525600 * 2; + case "3 Years": + return 525600 * 3; + case "4 Years": + return 525600 * 4; + case "5 Years": + return 525600 * 5; + default: + return 157680000; + } + })(); + + const now = new Date(); + const epoch_minutes = Math.round(now.getTime() / 1000 / 60); + + const response = await github.request("GET /${{ env.OWNER }}/packages/container/${{ github.event.repository.name }}/versions", + { per_page: ${{ env.PER_PAGE }} + }); + + const pages = (function() { + if (typeof response.headers.link !== 'undefined') { + return response.headers.link.split(">").slice(-2)[0].split('=').slice(-1)[0] + } + return 1; + })(); + + console.log("Total pages: " + pages); + + for (let page = pages; page >= 1; page--) { + console.log("Processing page " + page) + + const response = await github.request("GET /${{ env.OWNER }}/packages/container/${{ github.event.repository.name }}/versions", + { + per_page: ${{ env.PER_PAGE }}, + page: page + }); + + console.log("Deleting packages updated more than " + delete_age + " minutes ago...") + for (version of response.data) { + let updated_at = new Date(version.updated_at) + let minutes_old = epoch_minutes - Math.round(updated_at.getTime() / 1000 / 60); + console.log("Package is " + minutes_old + " minutes old") + if (minutes_old > delete_age) { + console.log("delete " + version.id) + const deleteResponse = await github.request("DELETE /${{ env.OWNER }}/packages/container/${{ github.event.repository.name }}/versions/" + version.id, { }); + console.log("status " + deleteResponse.status) + } + } + } diff --git a/.github/workflows/delete-untagged-images.yml b/.github/workflows/delete-untagged-images.yml new file mode 100644 index 0000000..017ab09 --- /dev/null +++ b/.github/workflows/delete-untagged-images.yml @@ -0,0 +1,56 @@ +name: Delete Untagged Packages + +env: + PER_PAGE: 100 + +on: + workflow_dispatch: + workflow_run: + workflows: ["Docker Build"] + types: + - completed + +jobs: + delete-untagged: + runs-on: ubuntu-latest + steps: + - + run: | + echo "PACKAGE_NAME=${GITHUB_REPOSITORY,,}" >> ${GITHUB_ENV} + echo "OWNER=orgs/${GITHUB_REPOSITORY_OWNER,,}" >> ${GITHUB_ENV} + - + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.DELETE_PACKAGES_TOKEN }} + script: | + const response = await github.request("GET /${{ env.OWNER }}/packages/container/${{ github.event.repository.name }}/versions", + { per_page: ${{ env.PER_PAGE }} + }); + + const pages = (function() { + if (typeof response.headers.link !== 'undefined') { + return response.headers.link.split(">").slice(-2)[0].split('=').slice(-1)[0] + } + return 1; + })(); + + console.log("Total pages: " + pages); + + for (let page = pages; page >= 1; page--) { + console.log("Processing page " + page) + + const response = await github.request("GET /${{ env.OWNER }}/packages/container/${{ github.event.repository.name }}/versions", + { + per_page: ${{ env.PER_PAGE }}, + page: page + }); + + for (version of response.data) { + if (version.metadata.container.tags.length == 0) { + console.log("delete " + version.id) + const deleteResponse = await github.request("DELETE /${{ env.OWNER }}/packages/container/${{ github.event.repository.name }}/versions/" + version.id, { }); + console.log("status " + deleteResponse.status) + } + } + } + diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml new file mode 100644 index 0000000..8f5200f --- /dev/null +++ b/.github/workflows/docker-build.yml @@ -0,0 +1,237 @@ +name: Docker Build + +on: + workflow_dispatch: + push: + branches: [ "main" ] + +env: + UBUNTU_VERSION: 22.04 + BUILDX_NO_DEFAULT_ATTESTATIONS: 1 + # Until py3.11 is available on all platforms + LATEST_CUDA: "pytorch-2.2.0-py3.10-cuda-11.8.0-runtime-22.04" + LATEST_CUDA_JUPYTER: "jupyter-pytorch-2.2.0-py3.10-cuda-11.8.0-runtime-22.04" + LATEST_ROCM: "pytorch-2.2.0-py3.10-rocm-5.7-runtime-22.04" + LATEST_ROCM_JUPYTER: "jupyter-pytorch-2.2.0-py3.10-rocm-5.7-runtime-22.04" + LATEST_CPU: "pytorch-2.2.0-py3.10-cpu-22.04" + LATEST_CPU_JUPYTER: "jupyter-pytorch-2.2.0-py3.10-cpu-22.04" + +jobs: + cpu-base: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + base: + - "pytorch" + - "jupyter-pytorch" + python: + - "3.10" + pytorch: + - "2.2.1" + steps: + - + name: Free Space + run: | + df -h + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf /usr/local/.ghcup + sudo rm -rf /usr/local/share/boost + sudo rm -rf /usr/local/lib/android + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + df -h + - + name: Env Setter + run: | + echo "PACKAGE_NAME=${GITHUB_REPOSITORY,,}" >> ${GITHUB_ENV} + - + name: Checkout + uses: actions/checkout@v3 + - + name: Permissions fixes + run: | + reponame="$(basename ${GITHUB_REPOSITORY})" + target="${HOME}/work/${reponame}/${reponame}/build/COPY*" + chmod -R ug+rwX ${target} + - + name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - + name: Set tags + run: | + img_path="ghcr.io/${{ env.PACKAGE_NAME }}" + ver_tag="${{ matrix.base }}-${{ matrix.pytorch }}-py${{ matrix.python }}-cpu-${{ env.UBUNTU_VERSION }}" + + if [[ $ver_tag == ${{ env.LATEST_CPU }} ]]; then + TAGS="${img_path}:latest-cpu, ${img_path}:$ver_tag" + elif [[ $ver_tag == ${{ env.LATEST_CPU_JUPYTER }} ]]; then + TAGS="${img_path}:latest-cpu-jupyter, ${img_path}:$ver_tag" + else + TAGS="${img_path}:$ver_tag" + fi + echo "TAGS=${TAGS}" >> ${GITHUB_ENV} + - + name: Build and push + uses: docker/build-push-action@v4 + with: + context: build + build-args: | + IMAGE_BASE=ghcr.io/ai-dock/${{ matrix.base }}:${{ matrix.pytorch }}-py${{ matrix.python }}-cpu-${{ env.UBUNTU_VERSION }} + push: true + # Avoids unknown/unknown architecture and extra metadata + provenance: false + tags: ${{ env.TAGS }} + + nvidia-base: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + base: + - "pytorch" + - "jupyter-pytorch" + python: + - "3.10" + pytorch: + - "2.2.1" + cuda: + - "11.8.0" + - "12.1.0" + level: + - "runtime" + + steps: + - + name: Free Space + run: | + df -h + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf /usr/local/.ghcup + sudo rm -rf /usr/local/share/boost + sudo rm -rf /usr/local/lib/android + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + df -h + - + name: Env Setter + run: | + echo "PACKAGE_NAME=${GITHUB_REPOSITORY,,}" >> ${GITHUB_ENV} + - + name: Checkout + uses: actions/checkout@v3 + - + name: Permissions fixes + run: | + reponame="$(basename ${GITHUB_REPOSITORY})" + target="${HOME}/work/${reponame}/${reponame}/build/COPY*" + chmod -R ug+rwX ${target} + - + name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - + name: Set tags + run: | + img_path="ghcr.io/${{ env.PACKAGE_NAME }}" + ver_tag="${{ matrix.base }}-${{ matrix.pytorch }}-py${{ matrix.python }}-cuda-${{ matrix.cuda }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }}" + + if [[ $ver_tag == ${{ env.LATEST_CUDA }} ]]; then + TAGS="${img_path}:latest, ${img_path}:latest-cuda, ${img_path}:$ver_tag" + elif [[ $ver_tag == ${{ env.LATEST_CUDA_JUPYTER }} ]]; then + TAGS="${img_path}:latest-jupyter, ${img_path}:latest-cuda-jupyter, ${img_path}:$ver_tag" + else + TAGS="${img_path}:$ver_tag" + fi + echo "TAGS=${TAGS}" >> ${GITHUB_ENV} + - + name: Build and push + uses: docker/build-push-action@v4 + with: + context: build + build-args: | + IMAGE_BASE=ghcr.io/ai-dock/${{ matrix.base }}:${{ matrix.pytorch }}-py${{ matrix.python }}-cuda-${{ matrix.cuda }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }} + PYTORCH_VERSION=${{ matrix.pytorch }} + push: true + provenance: false + tags: ${{ env.TAGS }} + + amd-base: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + base: + - "pytorch" + - "jupyter-pytorch" + python: + - "3.10" + pytorch: + - "2.2.1" + rocm: + - "5.7" + level: + - "runtime" + steps: + - + name: Free Space + run: | + df -h + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf /usr/local/.ghcup + sudo rm -rf /usr/local/share/boost + sudo rm -rf /usr/local/lib/android + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + df -h + - + name: Env Setter + run: | + echo "PACKAGE_NAME=${GITHUB_REPOSITORY,,}" >> ${GITHUB_ENV} + - + name: Checkout + uses: actions/checkout@v3 + - + name: Permissions fixes + run: | + reponame="$(basename ${GITHUB_REPOSITORY})" + target="${HOME}/work/${reponame}/${reponame}/build/COPY*" + chmod -R ug+rwX ${target} + - + name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - + name: Set tags + run: | + img_path="ghcr.io/${{ env.PACKAGE_NAME }}" + ver_tag="${{ matrix.base }}-${{ matrix.pytorch }}-py${{ matrix.python }}-rocm-${{ matrix.rocm }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }}" + + if [[ $ver_tag == ${{ env.LATEST_ROCM }} ]]; then + TAGS="${img_path}:latest-rocm, ${img_path}:$ver_tag" + elif [[ $ver_tag == ${{ env.LATEST_ROCM_JUPYTER }} ]]; then + TAGS="${img_path}:latest-rocm-jupyter, ${img_path}:$ver_tag" + else + TAGS="${img_path}:$ver_tag" + fi + echo "TAGS=${TAGS}" >> ${GITHUB_ENV} + - + name: Build and push + uses: docker/build-push-action@v4 + with: + context: build + build-args: | + IMAGE_BASE=ghcr.io/ai-dock/${{ matrix.base }}:${{ matrix.pytorch }}-py${{ matrix.python }}-rocm-${{ matrix.rocm }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }} + push: true + provenance: false + tags: ${{ env.TAGS }} \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0e5f870 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +workspace +*__pycache__ +build/COPY_ROOT_EXTRA/ +config/authorized_keys +config/rclone +tpdocs +.env diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..124ba10 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,27 @@ +Custom Software License + +Copyright © 2022-present Robert Ballantyne, trading as AI-Dock. All rights reserved. + +Author and Licensor: Robert Ballantyne. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software") to use the Software for personal or commercial purposes, subject to the following conditions: + +1. Users may not modify the Software in any way that overrides the original code written by the author, except as explicitly instructed in the accompanying documentation provided by the author. + +2. Users may add additional code or modifications for their custom builds, provided that such additions do not override the original code written by the author. + +3. Distribution of the Software, including forks and source code, is permitted without explicit permission from the author. Hosting derivatives on a public registry, such as Docker Hub, is allowed, but users are not permitted to actively encourage the use of these derivatives by others without explicit permission from the author. Distribution of Docker images and templates derived from the Software is permitted only with explicit permission from the author. Permission may be revoked at any time without prior notice. To obtain permission for distribution of Docker images and templates, users must enter into a separate licensing agreement with the author. + +4. Users may not remove or alter any branding, trademarks, or copyright notices present in the Software, including hyperlinks to external resources such as the author's website or documentation, and links to third-party services. These hyperlinks and links shall remain intact and unaltered. + +5. Distribution of modified versions of the Software must prominently display a notice indicating that the Software has been modified from the original version and include appropriate attribution to the original author. + +6. Users may not engage in any activities that could lead to malicious imitation or misrepresentation of the Software, including but not limited to creating derivative works that attempt to pass off as the original Software or using the Software to mislead or deceive others. + +7. The author must ensure that the complete corresponding source code for the Software, including any modifications made by the author, remains publicly available at all times. + +8. Users who have been granted permission to modify and distribute the Software are responsible for ensuring that the complete corresponding source code for any modifications they make to the Software remains publicly available at all times when they distribute their versions of the Software. This requirement applies to both the original Software and any derivative works created based on the Software. + +9. This license applies only to the code originating from AI-Dock repositories, both inside and outside of containers. Other bundled software or dependencies should be viewed as separate entities and may be subject to their own respective licenses. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/NOTICE.md b/NOTICE.md new file mode 100644 index 0000000..d251983 --- /dev/null +++ b/NOTICE.md @@ -0,0 +1,17 @@ +## Notice: + +I have chosen to apply a custom license to this software for the following reasons: + +- **Uniqueness of Containers:** Common open-source licenses may not adequately address the nuances of software distributed within containers. My custom license ensures clarity regarding the separation of my code from bundled software, thereby respecting the rights of other authors. + +- **Preservation of Source Code Integrity:** I am committed to maintaining the integrity of the source code while adhering to the spirit of open-source software. My custom license helps ensure transparency and accountability in my development practices. + +- **Funding and Control of Distribution:** Some of the funding for this project comes from maintaining control of distribution. This funding model wouldn't be possible without limiting distribution in certain ways, ultimately supporting the project's mission. + +- **Empowering Access:** Supported by controlled distribution, the mission of this project is to empower users with access to valuable tools and resources in the cloud, enabling them to utilize software that may otherwise require hardware resources beyond their reach. + +I welcome sponsorship from commercial entities utilizing this software, although it is not mandatory. Your support helps sustain the ongoing development and improvement of this project. + +You can sponsor this project at https://github.com/sponsors/ai-dock. + +Your understanding and support are greatly appreciated. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..0ebd378 --- /dev/null +++ b/README.md @@ -0,0 +1,112 @@ +[![Docker Build](https://github.com/ai-dock/stable-diffusion-webui/actions/workflows/docker-build.yml/badge.svg)](https://github.com/ai-dock/stable-diffusion-webui/actions/workflows/docker-build.yml) + +# AI-Dock + Invoke AI Docker Image + +Run [Invoke AI](https://github.com/invoke-ai/InvokeAI) in a docker container locally or in the cloud. + +>[!NOTE] +>These images do not bundle models or third-party configurations. You should use a [provisioning script](#provisioning-script) to automatically configure your container. You can find examples in `config/provisioning`. + +## Documentation + +All AI-Dock containers share a common base which is designed to make running on cloud services such as [vast.ai](https://link.ai-dock.org/vast.ai) and [runpod.io](https://link.ai-dock.org/template) as straightforward and user friendly as possible. + +Common features and options are documented in the [base wiki](https://github.com/ai-dock/base-image/wiki) but any additional features unique to this image will be detailed below. + + +#### Version Tags + +The `:latest` tag points to `:latest-cuda` + +Tags follow these patterns: + +##### _CUDA_ +- `:pytorch-[pytorch-version]-py[python-version]-cuda-[x.x.x]-base-[ubuntu-version]` + +- `:latest-cuda` → `:pytorch-2.2.0-py3.10-cuda-11.8.0-base-22.04` + +- `:latest-cuda-jupyter` → `:jupyter-pytorch-2.2.0-py3.10-cuda-11.8.0-base-22.04` + +##### _ROCm_ +- `:pytorch-[pytorch-version]-py[python-version]-rocm-[x.x.x]-runtime-[ubuntu-version]` + +- `:latest-rocm` → `:pytorch-2.2.0-py3.10-rocm-5.7-runtime-22.04` + +- `:latest-rocm-jupyter` → `:jupyter-pytorch-2.2.0-py3.10-rocm-5.7-runtime-22.04` + +##### _CPU_ +- `:pytorch-[pytorch-version]-py[python-version]-ubuntu-[ubuntu-version]` + +- `:latest-cpu` → `:pytorch-2.2.0-py3.10-cpu-22.04` + +- `:latest-cpu-jupyter` → `:jupyter-pytorch-2.2.0-py3.10-cpu-22.04` + +Browse [here](https://github.com/ai-dock/invokeai/pkgs/container/invokeai) for an image suitable for your target environment. + +Supported Python versions: `3.10` + +Supported Pytorch versions: `2.2.0`, `2.1.2` + +Supported Platforms: `NVIDIA CUDA`, `AMD ROCm`, `CPU` + +## Additional Environment Variables + +| Variable | Description | +| ------------------------ | ----------- | +| `AUTO_UPDATE` | Update Invoke AI on startup (default `true`) | +| `INVOKEAI_VERSION` | InvokeAI version tag (default `None`) | +| `INVOKEAI_FLAGS` | Startup flags | +| `INVOKEAI_PORT_HOST` | Web UI port (default `7860`) | +| `INVOKEAI_URL` | Override `$DIRECT_ADDRESS:port` with URL for Invoke AI service | +| `INVOKEAI_*` ^ | Invoke AI environment configuration as described in the [project documentation](https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/#environment-variables) | + +See the base environment variables [here](https://github.com/ai-dock/base-image/wiki/2.0-Environment-Variables) for more configuration options. + +### Additional Micromamba Environments + +| Environment | Packages | +| -------------- | ----------------------------------------- | +| `invokeai` | Invoke AI and dependencies | + +This micromamba environment will be activated on shell login. + +See the base micromamba environments [here](https://github.com/ai-dock/base-image/wiki/1.0-Included-Software#installed-micromamba-environments). + + +## Additional Services + +The following services will be launched alongside the [default services](https://github.com/ai-dock/base-image/wiki/1.0-Included-Software) provided by the base image. + +### Invoke AI + +The service will launch on port `9090` unless you have specified an override with `INVOKEAI_PORT_HOST`. + +Invoke AI will be updated to the latest version on container start. You can pin the version to a branch or commit hash by setting the `INVOKEAI_BRANCH` variable. + +You can set startup flags by using variable `INVOKEAI_FLAGS`. + +To manage this service you can use `supervisorctl [start|stop|restart] invokeai`. + +>[!NOTE] +>All services are password protected by default. See the [security](https://github.com/ai-dock/base-image/wiki#security) and [environment variables](https://github.com/ai-dock/base-image/wiki/2.0-Environment-Variables) documentation for more information. + + +## Pre-Configured Templates + +**Vast.​ai** + +- [A1111 WebUI:latest](https://link.ai-dock.org/template-vast-invokeai) + +- [A1111 WebUI:latest-jupyter](https://link.ai-dock.org/template-vast-invokeai-jupyter) + +--- + +**Runpod.​io** + +- [A1111 WebUI:latest](https://link.ai-dock.org/template-runpod-invokeai) + +- [A1111 WebUI:latest-jupyter](https://link.ai-dock.org/template-runpod-invokeai-jupyter) + +--- + +_The author ([@robballantyne](https://github.com/robballantyne)) may be compensated if you sign up to services linked in this document. Testing multiple variants of GPU images in many different environments is both costly and time-consuming; This helps to offset costs_ \ No newline at end of file diff --git a/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/.gitkeep b/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/invokeai.conf b/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/invokeai.conf new file mode 100644 index 0000000..568ce6f --- /dev/null +++ b/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/invokeai.conf @@ -0,0 +1,20 @@ +[program:invokeai] +user=$USER_NAME +environment=PROC_NAME="%(program_name)s",USER=$USER_NAME,HOME=/home/$USER_NAME +command=/opt/ai-dock/bin/supervisor-invokeai.sh +process_name=%(program_name)s +numprocs=1 +directory=/home/$USER_NAME +priority=1500 +autostart=true +startsecs=5 +startretries=3 +autorestart=true +stopsignal=TERM +stopwaitsecs=10 +stopasgroup=true +killasgroup=true +stdout_logfile=/var/log/supervisor/invokeai.log +stdout_logfile_maxbytes=10MB +stdout_logfile_backups=1 +redirect_stderr=true \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh new file mode 100755 index 0000000..5196d96 --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh @@ -0,0 +1,17 @@ +#!/bin/false + +build_amd_main() { + build_amd_install_invokeai +} + +build_amd_install_invokeai() { + # Mamba export does not include pip packages. + # We need to get torch again - todo find a better way? + micromamba -n invokeai run pip install \ + --no-cache-dir \ + --index-url https://download.pytorch.org/whl/rocm${ROCM_VERSION} \ + torch==${PYTORCH_VERSION} torchvision torchaudio + /opt/ai-dock/bin/update-invokeai.sh +} + +build_amd_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh new file mode 100755 index 0000000..a4d474c --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh @@ -0,0 +1,9 @@ +#!/bin/false + +# Tidy up and keep image small +apt-get clean -y +micromamba clean -ay + +fix-permissions.sh -o container +rm /etc/ld.so.cache +ldconfig \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh new file mode 100755 index 0000000..7cf42f4 --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh @@ -0,0 +1,46 @@ +#!/bin/false + +source /opt/ai-dock/etc/environment.sh + +build_common_main() { + build_common_create_env + build_common_install_jupyter_kernels +} + +build_common_create_env() { + apt-get update + $APT_INSTALL libgl1 libgoogle-perftools4 + ln -sf $(ldconfig -p | grep -Po "libtcmalloc.so.\d" | head -n 1) \ + /lib/x86_64-linux-gnu/libtcmalloc.so + # A new pytorch env costs ~ 300Mb + exported_env=/tmp/${MAMBA_DEFAULT_ENV}.yaml + micromamba env export -n ${MAMBA_DEFAULT_ENV} > "${exported_env}" + $MAMBA_CREATE -n invokeai --file "${exported_env}" + mkdir -p $INVOKEAI_ROOT +} + +build_common_install_jupyter_kernels() { + if [[ $IMAGE_BASE =~ "jupyter-pytorch" ]]; then + $MAMBA_INSTALL -n invokeai \ + ipykernel \ + ipywidgets + + kernel_path=/usr/local/share/jupyter/kernels + + # Add the often-present "Python3 (ipykernel) as a comfyui alias" + rm -rf ${kernel_path}/python3 + dir="${kernel_path}/python3" + file="${dir}/kernel.json" + cp -rf ${kernel_path}/../_template ${dir} + sed -i 's/DISPLAY_NAME/'"Python3 (ipykernel)"'/g' ${file} + sed -i 's/PYTHON_MAMBA_NAME/'"invokeai"'/g' ${file} + + dir="${kernel_path}/invokeai" + file="${dir}/kernel.json" + cp -rf ${kernel_path}/../_template ${dir} + sed -i 's/DISPLAY_NAME/'"Invoke AI"'/g' ${file} + sed -i 's/PYTHON_MAMBA_NAME/'"invokeai"'/g' ${file} + fi +} + +build_common_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh new file mode 100755 index 0000000..84b3739 --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh @@ -0,0 +1,11 @@ +#!/bin/false + +build_cpu_main() { + build_cpu_install_invokeai +} + +build_cpu_install_webui() { + /opt/ai-dock/bin/update-invokeai.sh +} + +build_cpu_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh new file mode 100755 index 0000000..81f25ae --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# Must exit and fail to build if any command fails +set -eo pipefail +umask 002 + +source /opt/ai-dock/bin/build/layer0/common.sh + +if [[ "$XPU_TARGET" == "NVIDIA_GPU" ]]; then + source /opt/ai-dock/bin/build/layer0/nvidia.sh +elif [[ "$XPU_TARGET" == "AMD_GPU" ]]; then + source /opt/ai-dock/bin/build/layer0/amd.sh +elif [[ "$XPU_TARGET" == "CPU" ]]; then + source /opt/ai-dock/bin/build/layer0/cpu.sh +else + printf "No valid XPU_TARGET specified\n" >&2 + exit 1 +fi + +$MAMBA_DEFAULT_RUN python /opt/ai-dock/tests/assert-torch-version.py + +source /opt/ai-dock/bin/build/layer0/clean.sh diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh new file mode 100755 index 0000000..6d1f08b --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh @@ -0,0 +1,17 @@ +#!/bin/false + +build_nvidia_main() { + build_nvidia_install_invokeai +} + +build_nvidia_install_invokeai() { + micromamba run -n invokeai ${PIP_INSTALL} \ + torch=="${PYTORCH_VERSION}" \ + nvidia-ml-py3 + + micromamba install -n invokeai -c xformers xformers + + /opt/ai-dock/bin/update-invokeai.sh +} + +build_nvidia_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/get-cfqt-invokeai.sh b/build/COPY_ROOT/opt/ai-dock/bin/get-cfqt-invokeai.sh new file mode 100755 index 0000000..8ac4994 --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/bin/get-cfqt-invokeai.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +service="webui" +port=${WEBUI_PORT:-7860} + +if [[ -z $CF_QUICK_TUNNELS ]]; then + printf "\n** You have not enabled Cloudflare quick tunnels **\n\n" + printf "To enable, you can do the following:\n\n" + printf "1. export CF_QUICK_TUNNELS=true\n" + printf "2. supervisorctl restart %s\n\n" $service + exit 1 +fi + +if [[ -f /var/log/supervisor/quicktunnel-${service}.log ]]; then + grep -b0 -a0 'trycloudflare.com' /var/log/supervisor/quicktunnel-${service}.log + if [[ $? -gt 0 ]]; then + printf "\n** Something may have gone wrong setting up the %s tunnel **\n\n" $service + printf "To set up manually you can run the following command:\n\n" + printf "cloudflared tunnel --url localhost:%s > /var/log/supervisor/quicktunnel-%s.log 2>&1 &\n\n" $port $service + fi +else + printf "** The %s tunnel has not yet started **\n\n" + if [[ -f /run/provisioning_script ]]; then + printf "The container is still being provisioned. Check the logs for progress (logtail.sh)\n\n" + else + printf "Please wait a moment and try again.\n\n" + fi +fi \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/preflight.d/10-default.sh b/build/COPY_ROOT/opt/ai-dock/bin/preflight.d/10-default.sh new file mode 100755 index 0000000..051eade --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/bin/preflight.d/10-default.sh @@ -0,0 +1,27 @@ +#!/bin/false +# This file will be sourced in init.sh + +function preflight_main() { + preflight_copy_notebook + preflight_update_invokeai + printf "%s" "${INVOKEAI_FLAGS}" > /etc/invokeai_flags.conf +} + +function preflight_copy_notebook() { + if micromamba env list | grep 'jupyter' > /dev/null 2>&1; then + if [[ ! -f "${WORKSPACE}invokeai.ipynb" ]]; then + cp /usr/local/share/ai-dock/invokeai.ipynb ${WORKSPACE} + fi + fi +} + +# Default to false until we can stabilize the update process +function preflight_update_invokeai() { + if [[ ${AUTO_UPDATE,,} == "true" ]]; then + /opt/ai-dock/bin/update-invokeai.sh + else + printf "Skipping auto update (AUTO_UPDATE=false)" + fi +} + +preflight_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/supervisor-invokeai.sh b/build/COPY_ROOT/opt/ai-dock/bin/supervisor-invokeai.sh new file mode 100755 index 0000000..153bcff --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/bin/supervisor-invokeai.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +trap cleanup EXIT + +LISTEN_PORT=${INVOKEAI_PORT_LOCAL:-19090} +METRICS_PORT=${INVOKEAI_METRICS_PORT:-29090} +SERVICE_URL="${INVOKEAI_URL:-}" +QUICKTUNNELS=true + +function cleanup() { + kill $(jobs -p) > /dev/null 2>&1 + rm /run/http_ports/$PROXY_PORT > /dev/null 2>&1 +} + +function start() { + if [[ ! -v WEBUI_PORT || -z $WEBUI_PORT ]]; then + INVOKEAI_PORT=${INVOKEAI_PORT_HOST:-9090} + fi + PROXY_PORT=$INVOKEAI_PORT + SERVICE_NAME="Invoke AI" + + file_content="$( + jq --null-input \ + --arg listen_port "${LISTEN_PORT}" \ + --arg metrics_port "${METRICS_PORT}" \ + --arg proxy_port "${PROXY_PORT}" \ + --arg proxy_secure "${PROXY_SECURE,,}" \ + --arg service_name "${SERVICE_NAME}" \ + --arg service_url "${SERVICE_URL}" \ + '$ARGS.named' + )" + + printf "%s" "$file_content" > /run/http_ports/$PROXY_PORT + + printf "Starting $SERVICE_NAME...\n" + + # Delay launch until micromamba is ready + if [[ -f /run/workspace_sync || -f /run/container_config ]]; then + fuser -k -SIGTERM ${LISTEN_PORT}/tcp > /dev/null 2>&1 & + wait -n + /usr/bin/python3 /opt/ai-dock/fastapi/logviewer/main.py \ + -p $LISTEN_PORT \ + -r 5 \ + -s "${SERVICE_NAME}" \ + -t "Preparing ${SERVICE_NAME}" & + fastapi_pid=$! + + while [[ -f /run/workspace_sync || -f /run/container_config ]]; do + sleep 1 + done + + kill $fastapi_pid + wait $fastapi_pid 2>/dev/null + fi + + fuser -k -SIGKILL ${LISTEN_PORT}/tcp > /dev/null 2>&1 & + wait -n + + + printf "Starting %s...\n" "${SERVICE_NAME}" + + export INVOKEAI_HOST=127.0.0.1 + export INVOKEAI_PORT=${LISTEN_PORT} + + # InvokeAI fails to start when the invoke dir is owned by root despite our loose permissions + sudo find "$(readlink -f /opt/invokeai)" -not -user "$USER_NAME" -exec chown "${USER_NAME}.${USER_NAME}" {} \; + + cd /opt/invokeai + micromamba run -n invokeai -e LD_PRELOAD=libtcmalloc.so invokeai-web +} + +start 2>&1 \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/update-invokeai.sh b/build/COPY_ROOT/opt/ai-dock/bin/update-invokeai.sh new file mode 100755 index 0000000..afdee8a --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/bin/update-invokeai.sh @@ -0,0 +1,25 @@ +#!/bin/bash +umask 002 +branch=master + +if [[ -n "${INVOKEAI_VERSION}" ]]; then + version="${INVOKEAI_VERSION}" +else + version="$(curl -fsSL "https://api.github.com/repos/invoke-ai/InvokeAI/releases/latest"| jq -r '.tag_name' | sed 's/[^0-9\.\-]*//g')" +fi + +# -b flag has priority +while getopts v: flag +do + case "${flag}" in + v) version="$OPTARG";; + esac +done + +printf "Updating InvokeAI (${version:-latest})...\n" + +# Pin Torch to our image version +micromamba run -n invokeai ${PIP_INSTALL} --use-pep517 \ + torch==${PYTORCH_VERSION} \ + InvokeAI${version+==$version} + diff --git a/build/COPY_ROOT/opt/ai-dock/storage_monitor/etc/mappings.sh b/build/COPY_ROOT/opt/ai-dock/storage_monitor/etc/mappings.sh new file mode 100644 index 0000000..ee4c5e1 --- /dev/null +++ b/build/COPY_ROOT/opt/ai-dock/storage_monitor/etc/mappings.sh @@ -0,0 +1,22 @@ +# Key is relative to $WORKSPACE/storage/ + +# Can't link InvokeAI models yet. Needs manual scan of storage dir +declare -A storage_map +#storage_map["stable_diffusion/models/ckpt"]="/opt/stable-diffusion-webui/models/Stable-diffusion" +#storage_map["stable_diffusion/models/codeformer"]="/opt/stable-diffusion-webui/models/Codeformer" +#storage_map["stable_diffusion/models/controlnet"]="/opt/stable-diffusion-webui/models/ControlNet" +#storage_map["stable_diffusion/models/deepbooru"]="/opt/stable-diffusion-webui/models/deepbooru" +#storage_map["stable_diffusion/models/dreambooth"]="/opt/stable-diffusion-webui/models/dreambooth" +#storage_map["stable_diffusion/models/esrgan"]="/opt/stable-diffusion-webui/models/ESRGAN" +#storage_map["stable_diffusion/models/gfpgan"]="/opt/stable-diffusion-webui/models/GFPGAN" +#storage_map["stable_diffusion/models/hypernetworks"]="/opt/stable-diffusion-webui/models/hypernetworks" +#storage_map["stable_diffusion/models/insightface"]="/opt/stable-diffusion-webui/models/insightface" +#storage_map["stable_diffusion/models/karlo"]="/opt/stable-diffusion-webui/models/karlo" +#storage_map["stable_diffusion/models/ldsr"]="/opt/stable-diffusion-webui/models/LDSR" +#storage_map["stable_diffusion/models/lora"]="/opt/stable-diffusion-webui/models/Lora" +#storage_map["stable_diffusion/models/reactor"]="/opt/stable-diffusion-webui/models/reactor" +#storage_map["stable_diffusion/models/swinIR"]="/opt/stable-diffusion-webui/models/swinIR" +#storage_map["stable_diffusion/models/vae"]="/opt/stable-diffusion-webui/models/VAE" +#storage_map["stable_diffusion/models/vae_approx"]="/opt/stable-diffusion-webui/models/VAE-approx" + +# Add more mappings for other repository directories as needed \ No newline at end of file diff --git a/build/COPY_ROOT/root/.gitkeep b/build/COPY_ROOT/root/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT/usr/.gitkeep b/build/COPY_ROOT/usr/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT/usr/local/share/ai-dock/invokeai.ipynb b/build/COPY_ROOT/usr/local/share/ai-dock/invokeai.ipynb new file mode 100644 index 0000000..10015bd --- /dev/null +++ b/build/COPY_ROOT/usr/local/share/ai-dock/invokeai.ipynb @@ -0,0 +1,73 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "9f9fd726-736c-40f9-b4b3-d4439c6fe94f", + "metadata": {}, + "outputs": [], + "source": [ + "# Stop the Web UI service\n", + "\n", + "!supervisorctl stop invokeai" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "715b36cb-ae96-4887-acb9-fd393fa1c2d8", + "metadata": {}, + "outputs": [], + "source": [ + "# Start the Web UI service\n", + "\n", + "!jupyter-start-service.sh invokeai" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "df0afee2-8e20-4206-bccf-fecac674f481", + "metadata": {}, + "outputs": [], + "source": [ + "# View the live logs\n", + "\n", + "!logtail.sh" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9950e252-a1ad-4f7e-baef-0703b1cd3bd6", + "metadata": {}, + "outputs": [], + "source": [ + "# Get secure UI links\n", + "\n", + "!cfqt-url.sh -p 1111" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "webui", + "language": "python", + "name": "webui" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh b/build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh new file mode 100755 index 0000000..9ebb5e9 --- /dev/null +++ b/build/COPY_ROOT_EXTRA/opt/ai-dock/bin/build/layer1/init.sh @@ -0,0 +1,168 @@ +#!/bin/bash +set -eo pipefail + +# Use this layer to add nodes and models + +MAMBA_PACKAGES=( + #"package1" + #"package2=version" + ) + +PIP_PACKAGES=( + #"package1==version" + #"package2" + ) + + +NODES=( + #"https://github.com/mickr777/GPT2RandomPromptMaker" +) + +CHECKPOINT_MODELS=( + #"https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt" + #"https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt" + #"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors" + #"https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors" +) + +LORA_MODELS=( + #"https://civitai.com/api/download/models/16576" +) + +VAE_MODELS=( + #"https://huggingface.co/stabilityai/sd-vae-ft-ema-original/resolve/main/vae-ft-ema-560000-ema-pruned.safetensors" + #"https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors" + #"https://huggingface.co/stabilityai/sdxl-vae/resolve/main/sdxl_vae.safetensors" +) + +ESRGAN_MODELS=( + #"https://huggingface.co/ai-forever/Real-ESRGAN/resolve/main/RealESRGAN_x4.pth" + #"https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth" + #"https://huggingface.co/Akumetsu971/SD_Anime_Futuristic_Armor/resolve/main/4x_NMKD-Siax_200k.pth" +) + +CONTROLNET_MODELS=( + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_canny-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_depth-fp16.safetensors" + #"https://huggingface.co/kohya-ss/ControlNet-diff-modules/resolve/main/diff_control_sd15_depth_fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_hed-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_mlsd-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_normal-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_openpose-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_scribble-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_seg-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_canny-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_color-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_depth-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_keypose-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_openpose-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_seg-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_sketch-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_style-fp16.safetensors" +) + +### DO NOT EDIT BELOW HERE UNLESS YOU KNOW WHAT YOU ARE DOING ### + +function build_extra_start() { + source /opt/ai-dock/etc/environment.sh + build_extra_get_mamba_packages + build_extra_get_pip_packages + build_extra_get_nodes + build_extra_get_models \ + "/opt/storage/stable_diffusion/models/ckpt" \ + "${CHECKPOINT_MODELS[@]}" + build_extra_get_models \ + "/opt/storage/stable_diffusion/models/lora" \ + "${LORA_MODELS[@]}" + build_extra_get_models \ + "/opt/storage/stable_diffusion/models/controlnet" \ + "${CONTROLNET_MODELS[@]}" + build_extra_get_models \ + "/opt/storage/stable_diffusion/models/vae" \ + "${VAE_MODELS[@]}" + build_extra_get_models \ + "/opt/storage/stable_diffusion/models/esrgan" \ + "${ESRGAN_MODELS[@]}" + + # Invoke has no exit/CI run mode so run it and wait until it's fuly initialised + cd /opt/invokeai && \ + micromamba run -n invokeai -e LD_PRELOAD=libtcmalloc.so invokeai-web > /tmp/invoke-ci.log 2>&1 & + pid=$! + wait_max=30 + wait_current=0 + init_string="Uvicorn running on" + + # Until loop to continuously check if the string is found or maximum wait time is reached + until grep -qi "$init_string" /tmp/invoke-ci.log; do + printf "Waiting for InvokeAI initialization to complete...\n" + sleep 1 + done + + kill $pid + + # Ensure pytorch hasn't been clobbered + $MAMBA_DEFAULT_RUN python /opt/ai-dock/tests/assert-torch-version.py || exit 1 +} + +function build_extra_get_mamba_packages() { + if [[ -n $MAMBA_PACKAGES ]]; then + $MAMBA_INSTALL -n invokeai ${MAMBA_PACKAGES[@]} + fi +} + +function build_extra_get_pip_packages() { + if [[ -n $PIP_PACKAGES ]]; then + micromamba run -n invokeai $PIP_INSTALL ${PIP_PACKAGES[@]} + fi +} + +function build_extra_get_nodes() { + for repo in "${NODES[@]}"; do + dir="${repo##*/}" + path="/opt/invokeai/nodes/${dir}" + requirements="${path}/requirements.txt" + if [[ -d $path ]]; then + if [[ ${AUTO_UPDATE,,} != "false" ]]; then + printf "Updating node: %s...\n" "${repo}" + ( cd "$path" && git pull ) + if [[ -e $requirements ]]; then + micromamba -n invokeai run ${PIP_INSTALL} -r "$requirements" + fi + fi + else + printf "Downloading node: %s...\n" "${repo}" + git clone "${repo}" "${path}" --recursive + if [[ -e $requirements ]]; then + micromamba -n invokeai run ${PIP_INSTALL} -r "${requirements}" + fi + fi + done +} + +function build_extra_get_models() { + if [[ -n $2 ]]; then + dir="$1" + mkdir -p "$dir" + shift + arr=("$@") + + printf "Downloading %s model(s) to %s...\n" "${#arr[@]}" "$dir" + for url in "${arr[@]}"; do + printf "Downloading: %s\n" "${url}" + build_extra_download "${url}" "${dir}" + printf "\n" + done + fi +} + +# Download from $1 URL to $2 file path +function build_extra_download() { + wget -qnc --content-disposition --show-progress -e dotbytes="${3:-4M}" -P "$2" "$1" +} + + +umask 002 +build_extra_start +fix-permissions.sh -o container +rm /etc/ld.so.cache +ldconfig \ No newline at end of file diff --git a/build/COPY_ROOT_EXTRA/opt/serverless/handlers/.gitkeep b/build/COPY_ROOT_EXTRA/opt/serverless/handlers/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/serverless/workflows/.gitkeep b/build/COPY_ROOT_EXTRA/opt/serverless/workflows/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/ckpt/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/ckpt/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/codeformer/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/codeformer/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/controlnet/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/controlnet/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/deepbooru/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/deepbooru/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/deforum/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/deforum/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/dreambooth/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/dreambooth/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/esrgan/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/esrgan/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/gfpgan/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/gfpgan/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/hypernetworks/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/hypernetworks/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/insightface/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/insightface/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/karlo/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/karlo/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/ldsr/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/ldsr/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/lora/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/lora/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/reactor/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/reactor/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/swinIR/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/swinIR/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/vae/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/vae/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/vae_approx/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/stable_diffusion/models/vae_approx/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/Dockerfile b/build/Dockerfile new file mode 100644 index 0000000..aaa1655 --- /dev/null +++ b/build/Dockerfile @@ -0,0 +1,33 @@ +# For build automation - Allows building from any ai-dock base image +# Use a *cuda*base* image as default because pytorch brings the libs +ARG IMAGE_BASE="ghcr.io/ai-dock/pytorch:2.1.1-py3.10-cuda-11.8.0-base-22.04" +FROM ${IMAGE_BASE} + +LABEL org.opencontainers.image.source https://github.com/ai-dock/invokeai +LABEL org.opencontainers.image.description "AI-Dock + Invoke AI docker image" +LABEL maintainer="Rob Ballantyne " + +ENV IMAGE_SLUG="invokeai" +ENV OPT_SYNC=invokeai + +ENV INVOKEAI_ROOT="/opt/invokeai" + +# Copy early so we can use scripts in the build - Changes to these files will invalidate the cache and cause a rebuild. +COPY --chown=0:1111 ./COPY_ROOT/ / + +# Use build scripts to ensure we can build all targets from one Dockerfile in a single layer. +# Don't put anything heavy in here - We can use multi-stage building above if necessary. + +ARG IMAGE_BASE +RUN set -eo pipefail && /opt/ai-dock/bin/build/layer0/init.sh | tee /var/log/build.log + +# Must be set after layer0 +ENV MAMBA_DEFAULT_ENV=invokeai +ENV MAMBA_DEFAULT_RUN="micromamba run -n ${MAMBA_DEFAULT_ENV}" + +# Copy overrides and models into later layers for fast rebuilds +COPY --chown=0:1111 ./COPY_ROOT_EXTRA/ / +RUN set -eo pipefail && /opt/ai-dock/bin/build/layer1/init.sh | tee -a /var/log/build.log + +# Keep init.sh as-is and place additional logic in /opt/ai-dock/bin/preflight.sh +CMD ["init.sh"] diff --git a/config/provisioning/default.sh b/config/provisioning/default.sh new file mode 100755 index 0000000..e37a452 --- /dev/null +++ b/config/provisioning/default.sh @@ -0,0 +1,171 @@ +#!/bin/bash +# This file will be executed by init.sh + +# https://raw.githubusercontent.com/ai-dock/invokeai/main/config/provisioning/default.sh + +## NOTICE ## +## Invoke AI does not support directly downloading models. +## You will need to manually scan your $WORKSPACE/storage/ directory + +### Edit the following arrays to suit your workflow - values must be quoted and separated by newlines or spaces. + +DISK_GB_REQUIRED=30 + +MAMBA_PACKAGES=( + #"package1" + #"package2=version" + ) + +PIP_PACKAGES=( + #"bitsandbytes==0.41.2.post2" + ) + +NODES=( + #"https://github.com/mickr777/GPT2RandomPromptMaker" +) + +CHECKPOINT_MODELS=( + #"https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt" + #"https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt" + #"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors" + #"https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors" +) + +LORA_MODELS=( + #"https://civitai.com/api/download/models/16576" +) + +VAE_MODELS=( + #"https://huggingface.co/stabilityai/sd-vae-ft-ema-original/resolve/main/vae-ft-ema-560000-ema-pruned.safetensors" + #"https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors" + #"https://huggingface.co/stabilityai/sdxl-vae/resolve/main/sdxl_vae.safetensors" +) + +ESRGAN_MODELS=( + #"https://huggingface.co/ai-forever/Real-ESRGAN/resolve/main/RealESRGAN_x4.pth" + #"https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth" + #"https://huggingface.co/Akumetsu971/SD_Anime_Futuristic_Armor/resolve/main/4x_NMKD-Siax_200k.pth" +) + +CONTROLNET_MODELS=( + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_canny-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_depth-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_hed-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_mlsd-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_normal-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_openpose-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_scribble-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_seg-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_canny-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_color-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_depth-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_keypose-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_openpose-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_seg-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_sketch-fp16.safetensors" + #"https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/t2iadapter_style-fp16.safetensors" +) + + +### DO NOT EDIT BELOW HERE UNLESS YOU KNOW WHAT YOU ARE DOING ### + +function provisioning_start() { + source /opt/ai-dock/etc/environment.sh + DISK_GB_AVAILABLE=$(($(df --output=avail -m "${WORKSPACE}" | tail -n1) / 1000)) + DISK_GB_USED=$(($(df --output=used -m "${WORKSPACE}" | tail -n1) / 1000)) + DISK_GB_ALLOCATED=$(($DISK_GB_AVAILABLE + $DISK_GB_USED)) + provisioning_print_header + provisioning_get_mamba_packages + provisioning_get_pip_packages + provisioning_get_nodes + provisioning_get_models \ + "${WORKSPACE}/storage/stable_diffusion/models/ckpt" \ + "${CHECKPOINT_MODELS[@]}" + provisioning_get_models \ + "${WORKSPACE}/storage/stable_diffusion/models/lora" \ + "${LORA_MODELS[@]}" + provisioning_get_models \ + "${WORKSPACE}/storage/stable_diffusion/models/controlnet" \ + "${CONTROLNET_MODELS[@]}" + provisioning_get_models \ + "${WORKSPACE}/storage/stable_diffusion/models/vae" \ + "${VAE_MODELS[@]}" + provisioning_get_models \ + "${WORKSPACE}/storage/stable_diffusion/models/esrgan" \ + "${ESRGAN_MODELS[@]}" + + provisioning_print_end +} + +function provisioning_get_mamba_packages() { + if [[ -n $MAMBA_PACKAGES ]]; then + $MAMBA_INSTALL -n invokeai ${MAMBA_PACKAGES[@]} + fi +} + +function provisioning_get_pip_packages() { + if [[ -n $PIP_PACKAGES ]]; then + micromamba run -n invokeai $PIP_INSTALL ${PIP_PACKAGES[@]} + fi +} + +function provisioning_get_nodes() { + for repo in "${EXTENSIONS[@]}"; do + dir="${repo##*/}" + path="/opt/invokeai/nodes/${dir}" + requirements="${path}/requirements.txt" + if [[ -d $path ]]; then + if [[ ${AUTO_UPDATE,,} != "false" ]]; then + printf "Updating extension: %s...\n" "${repo}" + ( cd "$path" && git pull ) + if [[ -e $requirements ]]; then + micromamba -n invokeai run ${PIP_INSTALL} -r "$requirements" + fi + fi + else + printf "Downloading node: %s...\n" "${repo}" + git clone "${repo}" "${path}" --recursive + if [[ -e $requirements ]]; then + micromamba -n invokeai run ${PIP_INSTALL} -r "${requirements}" + fi + fi + done +} + +function provisioning_get_models() { + if [[ -z $2 ]]; then return 1; fi + dir="$1" + mkdir -p "$dir" + shift + if [[ $DISK_GB_ALLOCATED -ge $DISK_GB_REQUIRED ]]; then + arr=("$@") + else + printf "WARNING: Low disk space allocation - Only the first model will be downloaded!\n" + arr=("$1") + fi + + printf "Downloading %s model(s) to %s...\n" "${#arr[@]}" "$dir" + for url in "${arr[@]}"; do + printf "Downloading: %s\n" "${url}" + provisioning_download "${url}" "${dir}" + printf "\n" + done +} + +function provisioning_print_header() { + printf "\n##############################################\n# #\n# Provisioning container #\n# #\n# This will take some time #\n# #\n# Your container will be ready on completion #\n# #\n##############################################\n\n" + if [[ $DISK_GB_ALLOCATED -lt $DISK_GB_REQUIRED ]]; then + printf "WARNING: Your allocated disk size (%sGB) is below the recommended %sGB - Some models will not be downloaded\n" "$DISK_GB_ALLOCATED" "$DISK_GB_REQUIRED" + fi +} + +function provisioning_print_end() { + printf "\nProvisioning complete: Invoke AI will start now\n\n" +} + +# Download from $1 URL to $2 file path +function provisioning_download() { + wget -qnc --content-disposition --show-progress -e dotbytes="${3:-4M}" -P "$2" "$1" +} + +provisioning_start \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..9dd693e --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,72 @@ +version: "3.8" +# Compose file build variables set in .env +services: + supervisor: + platform: linux/amd64 + build: + context: ./build + args: + IMAGE_BASE: ${IMAGE_BASE:-ghcr.io/ai-dock/jupyter-pytorch:2.2.1-py3.10-cuda-11.8.0-runtime-22.04} + tags: + - "ghcr.io/ai-dock/invokeai:${IMAGE_TAG:-jupyter-pytorch-2.2.1-py3.10-cuda-11.8.0-runtime-22.04}" + + image: ghcr.io/ai-dock/invokeai:${IMAGE_TAG:-jupyter-pytorch-2.2.1-py3.10-cuda-11.8.0-runtime-22.04} + + devices: + - "/dev/dri:/dev/dri" + # For AMD GPU + #- "/dev/kfd:/dev/kfd" + + volumes: + # Workspace + - ./workspace:${WORKSPACE:-/workspace/}:rshared + # You can share /workspace/storage with other non-WEBUI containers. See README + #- /path/to/common_storage:${WORKSPACE:-/workspace/}storage/:rshared + # Will echo to root-owned authorized_keys file; + # Avoids changing local file owner + - ./config/authorized_keys:/root/.ssh/authorized_keys_mount + - ./config/provisioning/default.sh:/opt/ai-dock/bin/provisioning.sh + + ports: + # SSH available on host machine port 2222 to avoid conflict. Change to suit + - ${SSH_PORT_HOST:-2222}:${SSH_PORT_LOCAL:-22} + # Caddy port for service portal + - ${SERVICEPORTAL_PORT_HOST:-1111}:${SERVICEPORTAL_PORT_HOST:-1111} + # Invoke AI web interface + - ${INKOKEAI_PORT_HOST:-9090}:${INVOKEAI_PORT_HOST:-9090} + # Jupyter server + - ${JUPYTER_PORT_HOST:-8888}:${JUPYTER_PORT_HOST:-8888} + # Syncthing + - ${SYNCTHING_UI_PORT_HOST:-8384}:${SYNCTHING_UI_PORT_HOST:-8384} + - ${SYNCTHING_TRANSPORT_PORT_HOST:-22999}:${SYNCTHING_TRANSPORT_PORT_HOST:-22999} + + environment: + # Don't enclose values in quotes + - DIRECT_ADDRESS=${DIRECT_ADDRESS:-127.0.0.1} + - DIRECT_ADDRESS_GET_WAN=${DIRECT_ADDRESS_GET_WAN:-false} + - WORKSPACE=${WORKSPACE:-/workspace} + - WORKSPACE_SYNC=${WORKSPACE_SYNC:-false} + - CF_TUNNEL_TOKEN=${CF_TUNNEL_TOKEN:-} + - CF_QUICK_TUNNELS=${CF_QUICK_TUNNELS:-true} + - WEB_ENABLE_AUTH=${WEB_ENABLE_AUTH:-true} + - WEB_USER=${WEB_USER:-user} + - WEB_PASSWORD=${WEB_PASSWORD:-password} + - SSH_PORT_HOST=${SSH_PORT_HOST:-2222} + - SSH_PORT_LOCAL=${SSH_PORT_LOCAL:-22} + - SERVICEPORTAL_PORT_HOST=${SERVICEPORTAL_PORT_HOST:-1111} + - SERVICEPORTAL_METRICS_PORT=${SERVICEPORTAL_METRICS_PORT:-21111} + - SERVICEPORTAL_URL=${SERVICEPORTAL_URL:-} + - WEBUI_BRANCH=${WEBUI_BRANCH:-} + - INVOKEAI_FLAGS=${INVOKEAI_FLAGS:-} + - INVOKEAI_PORT_HOST=${INVOKEAI_PORT_HOST:-9090} + - INVOKEAI_PORT_LOCAL=${INVOKEAI_PORT_LOCAL:-19090} + - INVOKEAI_METRICS_PORT=${INVOKEAI_METRICS_PORT:-29090} + - INVOKEAI_URL=${INVOKEAI_URL:-} + - JUPYTER_PORT_HOST=${JUPYTER_PORT_HOST:-8888} + - JUPYTER_METRICS_PORT=${JUPYTER_METRICS_PORT:-28888} + - JUPYTER_URL=${JUPYTER_URL:-} + - SERVERLESS=${SERVERLESS:-false} + - SYNCTHING_UI_PORT_HOST=${SYNCTHING_UI_PORT_HOST:-8384} + - SYNCTHING_TRANSPORT_PORT_HOST=${SYNCTHING_TRANSPORT_PORT_HOST:-22999} + - SYNCTHING_URL=${SYNCTHING_URL:-} + #- PROVISIONING_SCRIPT=${PROVISIONING_SCRIPT:-}