From 301eb1c00cc8c0b6c280e12b6714ebc0e3c1b046 Mon Sep 17 00:00:00 2001 From: Carl Date: Fri, 20 Sep 2024 15:29:57 -0700 Subject: [PATCH 01/11] wip --- jupyterhub/public-config.yaml | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/jupyterhub/public-config.yaml b/jupyterhub/public-config.yaml index 72de6ab..64f7b89 100644 --- a/jupyterhub/public-config.yaml +++ b/jupyterhub/public-config.yaml @@ -28,6 +28,15 @@ singleuser: - name: shm-volume emptyDir: medium: Memory + extraFiles: + jupyter_server_config.json: + mountPath: /usr/local/etc/jupyter/jupyter_server_config.json + data: + # Allow JupyterLab to show the 'View -> Show Hidden Files' option + # in the menu. Defaults are not changed. + # https://github.com/jupyterlab/jupyterlab/issues/11304#issuecomment-945466766 + ContentsManager: + allow_hidden: true profileList: - display_name: "CPU" slug: rocker @@ -52,8 +61,13 @@ singleuser: kubespawner_override: mem_guarantee: 59861460992 mem_limit: 59861460992 + special: + display_name: Special-use (up to 120 GB RAM) + kubespawner_override: + mem_guarantee: 59861460992 + mem_limit: 122861460992 kubespawner_override: - image: zcr.thelio.carlboettiger.info/boettiger-lab/k8s:latest + image: "ghcr.io/boettiger-lab/k8s:latest" default_url: /lab # change to /rstudio to go straight to RStudio working_dir: /home/rstudio - display_name: "GPU" @@ -62,7 +76,7 @@ singleuser: profile_options: resource_allocation: *profile_options_resource_allocation kubespawner_override: - image: zcr.thelio.carlboettiger.info/boettiger-lab/k8s-gpu:latest + image: "ghcr.io/boettiger-lab/k8s-gpu:latest" default_url: /lab # change to /rstudio to go straight to RStudio working_dir: /home/rstudio - display_name: "Pangeo" @@ -85,6 +99,7 @@ singleuser: image: "{value}" choices: {} hub: + allowNamedServers: true config: # Shared settings. we don't need nvidia to be default KubeSpawner: From 4d7a12a7374b0841078689b311aa9b0ea4c5fe9a Mon Sep 17 00:00:00 2001 From: Carl Date: Fri, 20 Sep 2024 17:09:43 -0700 Subject: [PATCH 02/11] pytorch --- .github/workflows/pytorch-image.yml | 34 +++++++++++++++++++++++++++++ images/Dockerfile.pytorch | 4 ++++ images/jupyter-ai.yml | 7 ++++++ 3 files changed, 45 insertions(+) create mode 100644 .github/workflows/pytorch-image.yml create mode 100644 images/Dockerfile.pytorch create mode 100644 images/jupyter-ai.yml diff --git a/.github/workflows/pytorch-image.yml b/.github/workflows/pytorch-image.yml new file mode 100644 index 0000000..1b670da --- /dev/null +++ b/.github/workflows/pytorch-image.yml @@ -0,0 +1,34 @@ +name: Docker GPU Image +on: + workflow_dispatch: null + push: + paths: ['images/*'] +jobs: + build: + runs-on: ubuntu-latest + permissions: write-all + steps: + # For biggish images, github actions runs out of disk space. + # So we cleanup some unwanted things in the disk image, and reclaim that space for our docker use + # https://github.com/actions/virtual-environments/issues/2606#issuecomment-772683150 + # and https://github.com/easimon/maximize-build-space/blob/b4d02c14493a9653fe7af06cc89ca5298071c66e/action.yml#L104 + # This gives us a total of about 52G of free space, which should be enough for now + - name: cleanup disk space + run: | + sudo rm -rf /usr/local/lib/android /usr/share/dotnet /opt/ghc + df -h + - uses: actions/checkout@v3 + - name: Login to GitHub Container Registry + if: github.repository == 'boettiger-lab/k8s' + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{github.actor}} + password: ${{secrets.GITHUB_TOKEN}} + - name: Build the Docker image + if: github.repository == 'boettiger-lab/k8s' + run: docker build images/ -f images/Dockerfile.pytorch --tag ghcr.io/boettiger-lab/pytorch:latest + - name: Publish + if: github.repository == 'boettiger-lab/k8s' + run: docker push ghcr.io/boettiger-lab/pytorch:latest + diff --git a/images/Dockerfile.pytorch b/images/Dockerfile.pytorch new file mode 100644 index 0000000..5ffe3ec --- /dev/null +++ b/images/Dockerfile.pytorch @@ -0,0 +1,4 @@ +FROM quay.io/jupyter/pytorch-notebook:cuda12-2024-04-22 +COPY jupyter-ai.yml environment.yml +RUN conda env update --file environment.yml + diff --git a/images/jupyter-ai.yml b/images/jupyter-ai.yml new file mode 100644 index 0000000..8c98215 --- /dev/null +++ b/images/jupyter-ai.yml @@ -0,0 +1,7 @@ +name: base +channels: + - conda-forge +dependencies: + - jupyterlab-myst + - jupyter-ai + From 565b4eb9144aa38dbf8d65536f174ad205b35eb5 Mon Sep 17 00:00:00 2001 From: Carl Date: Fri, 20 Sep 2024 19:54:28 -0700 Subject: [PATCH 03/11] more stuff --- images/Dockerfile.pytorch | 8 ++++++++ images/jupyter-ai.yml | 3 +++ 2 files changed, 11 insertions(+) diff --git a/images/Dockerfile.pytorch b/images/Dockerfile.pytorch index 5ffe3ec..bc2f3b3 100644 --- a/images/Dockerfile.pytorch +++ b/images/Dockerfile.pytorch @@ -2,3 +2,11 @@ FROM quay.io/jupyter/pytorch-notebook:cuda12-2024-04-22 COPY jupyter-ai.yml environment.yml RUN conda env update --file environment.yml +USER root +RUN curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/local/bin/ollama && chmod +x /usr/local/bin/ollama +RUN curl -fsSL https://code-server.dev/install.sh | sh && rm -rf .cache +RUN git config --system pull.rebase false && \ + git config --system credential.helper 'cache --timeout=30000' && \ + echo '"\e[5~": history-search-backward' >> /etc/inputrc && \ + echo '"\e[6~": history-search-forward' >> /etc/inputrc + diff --git a/images/jupyter-ai.yml b/images/jupyter-ai.yml index 8c98215..1a6f808 100644 --- a/images/jupyter-ai.yml +++ b/images/jupyter-ai.yml @@ -4,4 +4,7 @@ channels: dependencies: - jupyterlab-myst - jupyter-ai + - jupyter-tensorboard-proxy + - jupyter-resource-usage + - jupyter-vscode-proxy From c69bf2ae0b8fd67387fc1e4d0de884713ceeed71 Mon Sep 17 00:00:00 2001 From: Carl Date: Fri, 20 Sep 2024 20:27:45 -0700 Subject: [PATCH 04/11] update --- images/jupyter-ai.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/images/jupyter-ai.yml b/images/jupyter-ai.yml index 1a6f808..3a7aca9 100644 --- a/images/jupyter-ai.yml +++ b/images/jupyter-ai.yml @@ -4,7 +4,8 @@ channels: dependencies: - jupyterlab-myst - jupyter-ai - - jupyter-tensorboard-proxy - jupyter-resource-usage - jupyter-vscode-proxy + - pip: + - jupyter-tensorboard-proxy From 71a39ff1f98d703aa9da7dba25d8439a7dfcdbdd Mon Sep 17 00:00:00 2001 From: Carl Date: Fri, 20 Sep 2024 20:32:57 -0700 Subject: [PATCH 05/11] rename action --- .github/workflows/pytorch-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytorch-image.yml b/.github/workflows/pytorch-image.yml index 1b670da..a7912c3 100644 --- a/.github/workflows/pytorch-image.yml +++ b/.github/workflows/pytorch-image.yml @@ -1,4 +1,4 @@ -name: Docker GPU Image +name: PyTorch-notebook Image on: workflow_dispatch: null push: From 21852a00c472bbd4a39eecd32db58df42e038f7e Mon Sep 17 00:00:00 2001 From: Carl Date: Fri, 20 Sep 2024 21:10:32 -0700 Subject: [PATCH 06/11] here we go! --- images/Dockerfile.pytorch | 6 ++++-- images/jupyter-ai.yml | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/images/Dockerfile.pytorch b/images/Dockerfile.pytorch index bc2f3b3..3b241a5 100644 --- a/images/Dockerfile.pytorch +++ b/images/Dockerfile.pytorch @@ -1,6 +1,8 @@ -FROM quay.io/jupyter/pytorch-notebook:cuda12-2024-04-22 +FROM quay.io/jupyter/pytorch-notebook:cuda12-ubuntu-24.04 COPY jupyter-ai.yml environment.yml -RUN conda env update --file environment.yml + +RUN conda update -n base -c conda-forge conda && \ + conda env update --file environment.yml USER root RUN curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/local/bin/ollama && chmod +x /usr/local/bin/ollama diff --git a/images/jupyter-ai.yml b/images/jupyter-ai.yml index 3a7aca9..2cc1106 100644 --- a/images/jupyter-ai.yml +++ b/images/jupyter-ai.yml @@ -6,6 +6,7 @@ dependencies: - jupyter-ai - jupyter-resource-usage - jupyter-vscode-proxy + - pip - pip: - jupyter-tensorboard-proxy From fc8c5ab249d1984c574274c8ae999a2544cb3c2c Mon Sep 17 00:00:00 2001 From: Carl Date: Fri, 20 Sep 2024 21:15:02 -0700 Subject: [PATCH 07/11] names --- .github/workflows/jupyter-image.yml | 2 +- github-actions/.gitignore | 2 + github-actions/arc_values.yaml | 132 ---------------------------- jupyterhub/cirrus.sh | 4 +- 4 files changed, 5 insertions(+), 135 deletions(-) create mode 100644 github-actions/.gitignore delete mode 100644 github-actions/arc_values.yaml diff --git a/.github/workflows/jupyter-image.yml b/.github/workflows/jupyter-image.yml index c2d1280..7f54b8c 100644 --- a/.github/workflows/jupyter-image.yml +++ b/.github/workflows/jupyter-image.yml @@ -1,4 +1,4 @@ -name: Docker Image CI +name: Jupyter Image on: workflow_dispatch: null push: diff --git a/github-actions/.gitignore b/github-actions/.gitignore new file mode 100644 index 0000000..487fe52 --- /dev/null +++ b/github-actions/.gitignore @@ -0,0 +1,2 @@ +helm-secrets.sh + diff --git a/github-actions/arc_values.yaml b/github-actions/arc_values.yaml deleted file mode 100644 index 8e74317..0000000 --- a/github-actions/arc_values.yaml +++ /dev/null @@ -1,132 +0,0 @@ -# Default values for gha-runner-scale-set-controller. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -labels: {} - -# leaderElection will be enabled when replicaCount>1, -# So, only one replica will in charge of reconciliation at a given time -# leaderElectionId will be set to {{ define gha-runner-scale-set-controller.fullname }}. -replicaCount: 1 - -image: - repository: "ghcr.io/actions/gha-runner-scale-set-controller" - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "" - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - -env: -## Define environment variables for the controller pod -# - name: "ENV_VAR_NAME_1" -# value: "ENV_VAR_VALUE_1" -# - name: "ENV_VAR_NAME_2" -# valueFrom: -# secretKeyRef: -# key: ENV_VAR_NAME_2 -# name: secret-name -# optional: true - -serviceAccount: - # Specifies whether a service account should be created for running the controller pod - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # You can not use the default service account for this. - name: "" - -podAnnotations: {} - -podLabels: {} - -podSecurityContext: {} -# fsGroup: 2000 - -securityContext: {} -# capabilities: -# drop: -# - ALL -# readOnlyRootFilesystem: true -# runAsNonRoot: true -# runAsUser: 1000 - -resources: {} -## We usually recommend not to specify default resources and to leave this as a conscious -## choice for the user. This also increases chances charts run on environments with little -## resources, such as Minikube. If you do want to specify resources, uncomment the following -## lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi - -nodeSelector: {} - -tolerations: [] - -affinity: {} - -topologySpreadConstraints: [] - -# Mount volumes in the container. -volumes: [] -volumeMounts: [] - -# Leverage a PriorityClass to ensure your pods survive resource shortages -# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -# PriorityClass: system-cluster-critical -priorityClassName: "" - -## If `metrics:` object is not provided, or commented out, the following flags -## will be applied the controller-manager and listener pods with empty values: -## `--metrics-addr`, `--listener-metrics-addr`, `--listener-metrics-endpoint`. -## This will disable metrics. -## -## To enable metrics, uncomment the following lines. -# metrics: -# controllerManagerAddr: ":8080" -# listenerAddr: ":8080" -# listenerEndpoint: "/metrics" - -flags: - ## Log level can be set here with one of the following values: "debug", "info", "warn", "error". - ## Defaults to "debug". - logLevel: "debug" - ## Log format can be set with one of the following values: "text", "json" - ## Defaults to "text" - logFormat: "text" - - ## Restricts the controller to only watch resources in the desired namespace. - ## Defaults to watch all namespaces when unset. - # watchSingleNamespace: "" - - ## Defines how the controller should handle upgrades while having running jobs. - ## - ## The strategies available are: - ## - "immediate": (default) The controller will immediately apply the change causing the - ## recreation of the listener and ephemeral runner set. This can lead to an - ## overprovisioning of runners, if there are pending / running jobs. This should not - ## be a problem at a small scale, but it could lead to a significant increase of - ## resources if you have a lot of jobs running concurrently. - ## - ## - "eventual": The controller will remove the listener and ephemeral runner set - ## immediately, but will not recreate them (to apply changes) until all - ## pending / running jobs have completed. - ## This can lead to a longer time to apply the change but it will ensure - ## that you don't have any overprovisioning of runners. - updateStrategy: "immediate" - - ## Defines a list of prefixes that should not be propagated to internal resources. - ## This is useful when you have labels that are used for internal purposes and should not be propagated to internal resources. - ## See https://github.com/actions/actions-runner-controller/issues/3533 for more information. - ## - ## By default, all labels are propagated to internal resources - ## Labels that match prefix specified in the list are excluded from propagation. - # excludeLabelPropagationPrefixes: - # - "argocd.argoproj.io/instance" diff --git a/jupyterhub/cirrus.sh b/jupyterhub/cirrus.sh index 3f75561..9e7d8a1 100755 --- a/jupyterhub/cirrus.sh +++ b/jupyterhub/cirrus.sh @@ -1,7 +1,7 @@ #!/bin/bash -helm repo add jupyterhub https://hub.jupyter.org/helm-chart/ -helm repo update +#helm repo add jupyterhub https://hub.jupyter.org/helm-chart/ +#helm repo update ## use your name for install name and namespace name helm upgrade --cleanup-on-fail \ From c597fb1463f4bfe46582e5b5c3417052e2b34a90 Mon Sep 17 00:00:00 2001 From: Carl Date: Fri, 20 Sep 2024 21:15:42 -0700 Subject: [PATCH 08/11] conda+venv --- images/Dockerfile.dual | 57 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 images/Dockerfile.dual diff --git a/images/Dockerfile.dual b/images/Dockerfile.dual new file mode 100644 index 0000000..e3b7df4 --- /dev/null +++ b/images/Dockerfile.dual @@ -0,0 +1,57 @@ +## devcontainer-focused Rocker +FROM ghcr.io/rocker-org/devcontainer/tidyverse:4.3 + +## latest version of geospatial libs +RUN /rocker_scripts/experimental/install_dev_osgeo.sh +RUN apt-get update -qq && apt-get -y install vim texlive + +# standard python/jupyter setup +ENV NB_USER=rstudio +ENV VIRTUAL_ENV=/opt/venv +ENV PATH=${VIRTUAL_ENV}/bin:${PATH} +RUN wget https://github.com/rocker-org/rocker-versioned2/raw/master/scripts/install_jupyter.sh && \ + bash -e install_jupyter.sh && \ + rm install_jupyter.sh && \ + chown ${NB_USER}:staff -R ${VIRTUAL_ENV} + +# Set up conda +ENV CONDA_ENV=/opt/miniforge3 +ENV PATH=${PATH}:$CONDA_ENV/bin +RUN curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh" && \ + bash Miniforge3-$(uname)-$(uname -m).sh -b -p ${CONDA_ENV} && \ + chown ${NB_USER}:staff -R ${CONDA_ENV} + +# podman doesn't not understand group permissions +RUN chown ${NB_USER}:staff -R ${R_HOME}/site-library + +# some teaching preferences +RUN git config --system pull.rebase false && \ + git config --system credential.helper 'cache --timeout=36000' + +## codeserver +RUN curl -fsSL https://code-server.dev/install.sh | sh + + +## Openscapes-specific configs +USER rstudio +WORKDIR /home/rstudio +RUN usermod -s /bin/bash rstudio + +# install into the default environment +COPY requirements.txt requirements.txt +RUN python -m pip install -r requirements.txt && rm requirements.txt +COPY install.R install.R +RUN Rscript install.R && rm install.R + +# Create a conda-based env and install into it without using conda init/conda activate +# (this yaml file doesn't include everything from pangeo, consider a different one...) +ENV MY_ENV=${CONDA_ENV}/envs/openscapes +RUN wget https://github.com/NASA-Openscapes/corn/raw/main/ci/environment.yml && \ + conda env create -p ${MY_ENV} -f environment.yml + +# This won't be the default enviornment but we register it +RUN ${MY_ENV}/bin/python -m pip install ipykernel && \ + ${MY_ENV}/bin/python -m ipykernel install --prefix /opt/venv --name=openscapes + + + From e11b66bed9ffaaab558819a1ce59ebc0008b26af Mon Sep 17 00:00:00 2001 From: Carl Date: Fri, 20 Sep 2024 21:34:11 -0700 Subject: [PATCH 09/11] new ollama distribution mechanism --- images/Dockerfile.pytorch | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/images/Dockerfile.pytorch b/images/Dockerfile.pytorch index 3b241a5..7435ac6 100644 --- a/images/Dockerfile.pytorch +++ b/images/Dockerfile.pytorch @@ -5,7 +5,8 @@ RUN conda update -n base -c conda-forge conda && \ conda env update --file environment.yml USER root -RUN curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/local/bin/ollama && chmod +x /usr/local/bin/ollama +RUN curl -L https://ollama.com/download/ollama-linux-amd64.tgz -o ollama-linux-amd64.tgz && tar -C /usr -xzf ollama-linux-amd64.tgz + RUN curl -fsSL https://code-server.dev/install.sh | sh && rm -rf .cache RUN git config --system pull.rebase false && \ git config --system credential.helper 'cache --timeout=30000' && \ From 6d6b323bab532aae2cf836bc0fb287e0c4d07ace Mon Sep 17 00:00:00 2001 From: Carl Date: Wed, 25 Sep 2024 16:01:51 -0700 Subject: [PATCH 10/11] revised ollama --- images/Dockerfile.gpu | 3 +-- images/jupyter-ai.yml | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/images/Dockerfile.gpu b/images/Dockerfile.gpu index c1a42b3..b5eb254 100644 --- a/images/Dockerfile.gpu +++ b/images/Dockerfile.gpu @@ -19,8 +19,7 @@ RUN curl -fsSL https://code-server.dev/install.sh | sh && rm -rf .cache #RUN wget https://github.com/coder/code-server/releases/download/v4.89.1/code-server_4.89.1_amd64.deb && dpkg -i code-server*.deb && rm code-server*.deb # ollama -# RUN curl -fsSL https://ollama.com/install.sh | sh -RUN curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/local/bin/ollama && chmod +x /usr/local/bin/ollama +RUN curl -L https://ollama.com/download/ollama-linux-amd64.tgz -o ollama-linux-amd64.tgz && tar -C /usr -xzf ollama-linux-amd64.tgz # some preferences diff --git a/images/jupyter-ai.yml b/images/jupyter-ai.yml index 2cc1106..2e476dc 100644 --- a/images/jupyter-ai.yml +++ b/images/jupyter-ai.yml @@ -6,6 +6,7 @@ dependencies: - jupyter-ai - jupyter-resource-usage - jupyter-vscode-proxy + - pypdf - pip - pip: - jupyter-tensorboard-proxy From 16a77fb1b94f3562ed6870cb1a469fbf9783d4d2 Mon Sep 17 00:00:00 2001 From: Carl Date: Thu, 26 Sep 2024 08:42:26 -0700 Subject: [PATCH 11/11] new gpu image --- images/Dockerfile.gpu | 54 +++++++++++++++--------------------------- images/jupyter-ai.yml | 2 ++ images/rl-env.yml | 20 ++++++++++++++++ images/spatial-env.yml | 38 +++++++++++++++++++++++++++++ 4 files changed, 79 insertions(+), 35 deletions(-) create mode 100644 images/rl-env.yml create mode 100644 images/spatial-env.yml diff --git a/images/Dockerfile.gpu b/images/Dockerfile.gpu index b5eb254..2623353 100644 --- a/images/Dockerfile.gpu +++ b/images/Dockerfile.gpu @@ -1,51 +1,35 @@ -FROM docker.io/rocker/ml +FROM quay.io/jupyter/pytorch-notebook:cuda12-ubuntu-24.04 -ENV NB_USER rstudio -ENV VIRTUAL_ENV /opt/venv -ENV PATH ${VIRTUAL_ENV}/bin:$PATH +COPY jupyter-ai.yml environment.yml +RUN conda update -n base -c conda-forge conda && \ + conda env update --file environment.yml -RUN /rocker_scripts/install_jupyter.sh -RUN python3 -m pip install numpy -RUN chown -R ${NB_USER}:staff ${VIRTUAL_ENV} && chmod -R g+rw ${VIRTUAL_ENV} +USER root +RUN curl -L https://ollama.com/download/ollama-linux-amd64.tgz -o ollama-linux-amd64.tgz && tar -C /usr -xzf ollama-linux-amd64.tgz -RUN /rocker_scripts/experimental/install_dev_osgeo.sh -RUN chown -R ${NB_USER}:staff ${VIRTUAL_ENV} && chmod -R g+rw ${VIRTUAL_ENV} +RUN curl -fsSL https://code-server.dev/install.sh | sh && rm -rf .cache +RUN git config --system pull.rebase false && \ + echo '"\e[5~": history-search-backward' >> /etc/inputrc && \ + echo '"\e[6~": history-search-forward' >> /etc/inputrc + +#git config --system credential.helper 'cache --timeout=30000' && \ -COPY apt.txt apt.txt -RUN apt-get update -qq && xargs sudo apt-get -y install < apt.txt +RUN apt-get update -qq && apt-get -y install vim git-lfs # install codeserver RUN curl -fsSL https://code-server.dev/install.sh | sh && rm -rf .cache -#RUN wget https://github.com/coder/code-server/releases/download/v4.89.1/code-server_4.89.1_amd64.deb && dpkg -i code-server*.deb && rm code-server*.deb # ollama RUN curl -L https://ollama.com/download/ollama-linux-amd64.tgz -o ollama-linux-amd64.tgz && tar -C /usr -xzf ollama-linux-amd64.tgz - -# some preferences -RUN git config --system pull.rebase false && \ - git config --system credential.helper 'cache --timeout=30000' && \ - echo '"\e[5~": history-search-backward' >> /etc/inputrc && \ - echo '"\e[6~": history-search-forward' >> /etc/inputrc - USER ${NB_USER} -WORKDIR /home/${NB_USER} - -RUN usermod -s /bin/bash ${NB_USER} -COPY spatial-requirements.txt /tmp/spatial-requirements.txt -RUN python3 -m pip install --no-cache-dir -r /tmp/spatial-requirements.txt -COPY rl-requirements.txt /tmp/rl-requirements.txt -RUN python3 -m pip install --no-cache-dir -r /tmp/rl-requirements.txt -COPY jupyter-requirements.txt /tmp/jupyter-requirements.txt -RUN python3 -m pip install --no-cache-dir -r /tmp/jupyter-requirements.txt - -## Register the environment with ipykernel, mostly for vscode to find it -RUN python3 -m ipykernel install --user --name=venv +COPY spatial-env.yml spatial-env.yml +RUN conda update -n base -c conda-forge conda && \ + conda env update --file spatial-env.yml +COPY rl-env.yml rl-env.yml +RUN conda update -n base -c conda-forge conda && \ + conda env update --file rl-env.yml -#USER root -#COPY install.R /tmp/install.R -#RUN Rscript /tmp/install.R && rm /tmp/install.R && chown -R ${NB_USER}:staff ${R_HOME}/site-library -#USER ${NB_USER} diff --git a/images/jupyter-ai.yml b/images/jupyter-ai.yml index 2e476dc..da4f940 100644 --- a/images/jupyter-ai.yml +++ b/images/jupyter-ai.yml @@ -2,12 +2,14 @@ name: base channels: - conda-forge dependencies: + - ibis-duckdb - jupyterlab-myst - jupyter-ai - jupyter-resource-usage - jupyter-vscode-proxy - pypdf - pip + - seaborn - pip: - jupyter-tensorboard-proxy diff --git a/images/rl-env.yml b/images/rl-env.yml new file mode 100644 index 0000000..52a9be6 --- /dev/null +++ b/images/rl-env.yml @@ -0,0 +1,20 @@ +name: base +channels: + - conda-forge +dependencies: + - darts + - gymnasium + - gputil + - huggingface_hub + - nox + - nvitop + - plotnine + - PyYaml + - pytest + - stable-baselines3 + - sb3-contrib + - scikit-optimize + - pip: + - ray[rllib,tune] + - huggingface_sb3 + diff --git a/images/spatial-env.yml b/images/spatial-env.yml new file mode 100644 index 0000000..d104af8 --- /dev/null +++ b/images/spatial-env.yml @@ -0,0 +1,38 @@ +name: base +channels: + - conda-forge +dependencies: + - awscliv2 + - distributed + - earthaccess + - fiona + - fsspec + - geopandas + - geocube + - leafmap[maplibregl] + - localtileserver + - mapclassify + - maplibre + - minio + - netCDF4 + - odc-geo + - odc-stac + - planetary-computer + - pmtiles + - polars + - pyarrow + - pydeck + - pyogrio + - pystac + - pystac-client + - rasterio + - rasterstats + - requests + - rio-cogeo + - rioxarray + - stackstac + - streamlit + - tqdm + - xarray + - zarr +