From ca2bf2d00d5de4f8cf13e3d0ef07063b8a4d1a59 Mon Sep 17 00:00:00 2001 From: Connor Nelson Date: Thu, 22 Feb 2024 22:48:42 +0000 Subject: [PATCH 1/4] Workspace: Use kubernetes --- Dockerfile | 3 +- challenge/Dockerfile | 1 + ctfd/requirements.txt | 1 + docker-compose.yml | 95 ++++++++++++++++----- dojo_plugin/api/v1/docker.py | 113 +++++++++++++++++++++++++ dojo_plugin/config.py | 8 ++ dojo_plugin/utils/__init__.py | 14 +-- etc/systemd/system/pwn.college.service | 4 +- kube/bin/mount | 7 ++ kube/k3s/registries.yaml | 4 + kube/manifests/nfs.yaml | 53 ++++++++++++ script/container-setup.sh | 9 +- script/dojo | 50 ++++++++--- 13 files changed, 313 insertions(+), 49 deletions(-) create mode 100755 kube/bin/mount create mode 100644 kube/k3s/registries.yaml create mode 100644 kube/manifests/nfs.yaml diff --git a/Dockerfile b/Dockerfile index 61fd59dd2..9074681f0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -44,4 +44,5 @@ EXPOSE 22 EXPOSE 80 EXPOSE 443 WORKDIR /opt/pwn.college -CMD ["dojo", "start"] +ENTRYPOINT ["dojo"] +CMD ["start"] diff --git a/challenge/Dockerfile b/challenge/Dockerfile index 957eda12c..ca5b7f4c9 100644 --- a/challenge/Dockerfile +++ b/challenge/Dockerfile @@ -50,6 +50,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ curl socat sudo + tini vim wget unzip diff --git a/ctfd/requirements.txt b/ctfd/requirements.txt index 137f32998..05d37cb0a 100644 --- a/ctfd/requirements.txt +++ b/ctfd/requirements.txt @@ -1,5 +1,6 @@ # DOJO docker==6.1.2 +kubernetes==28.1.0 pyyaml==5.4.1 schema==0.7.5 bleach==6.1.0 diff --git a/docker-compose.yml b/docker-compose.yml index 475bdee6a..e95a67348 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,73 @@ version: '3.4' services: + kube-server: + container_name: kube-server + hostname: kube-server + image: rancher/k3s:latest + command: server --node-taint node-role.kubernetes.io/control-plane:NoSchedule + tmpfs: + - /run + - /var/run + ulimits: + nproc: 65535 + nofile: + soft: 65535 + hard: 65535 + privileged: true + restart: always + environment: + - K3S_TOKEN=${K3S_TOKEN:?err} + - K3S_KUBECONFIG_OUTPUT=/output/kube.yaml + volumes: + - ./data/kube/kubeconfig:/output + - ./data/kube/node/server:/etc/rancher/node + - ./data/kube/server:/var/lib/rancher/k3s + - ./kube/k3s:/etc/rancher/k3s:ro + - ./kube/manifests:/var/lib/rancher/k3s/server/manifests/dojo:ro + - ./kube/bin:/usr/local/bin:ro + ports: + - 6443:6443 # Kubernetes API Server + # - 80:80 # Ingress controller port 80 + # - 443:443 # Ingress controller port 443 + extra_hosts: + - homes-nfs:10.43.0.20 + + kube-agent: + container_name: kube-agent + hostname: kube-agent + image: rancher/k3s:latest + tmpfs: + - /run + - /var/run + ulimits: + nproc: 65535 + nofile: + soft: 65535 + hard: 65535 + privileged: true + restart: always + environment: + - K3S_URL=https://kube-server:6443 + - K3S_TOKEN=${K3S_TOKEN:?err} + volumes: + - ./data/kube/node/agent:/etc/rancher/node + - ./data/kube/agent:/var/lib/rancher/k3s + - ./data/homes:/var/homes:shared + - ./kube/k3s:/etc/rancher/k3s:ro + - ./kube/bin:/usr/local/bin:ro + extra_hosts: + - homes-nfs:10.43.0.20 + + registry: + container_name: registry + image: registry:2 + restart: always + ports: + - "5000:5000" + volumes: + - ./data/registry:/var/lib/registry + challenge: build: context: ./challenge @@ -28,10 +95,13 @@ services: - INSTALL_XFCE=${INSTALL_XFCE:-${DEFAULT_INSTALL_SELECTION}} - UBUNTU_VERSION=${UBUNTU_VERSION} - DOJO_CHALLENGE=${DOJO_CHALLENGE} + image: localhost:5000/challenge platform: linux/amd64 entrypoint: /bin/true - networks: - - user_network + + volume_nfs: + build: ./volume_nfs + image: localhost:5000/volume-nfs ctfd: container_name: ctfd @@ -65,9 +135,10 @@ services: - ERROR_LOG=- - REVERSE_PROXY=true - SERVER_SENT_EVENTS=false - - SECRET_KEY=${SECRET_KEY} + - SECRET_KEY=${SECRET_KEY}' - DOJO_HOST=${DOJO_HOST} - HOST_DATA_PATH=/opt/pwn.college/data + - KUBECONFIG=/var/kubeconfig/kube.yaml - MAIL_SERVER=${MAIL_SERVER} - MAIL_PORT=${MAIL_PORT} - MAIL_USERNAME=${MAIL_USERNAME} @@ -89,6 +160,7 @@ services: - ./data/homes:/var/homes:shared - ./data/challenges:/var/challenges:ro - ./data/dojos:/var/dojos + - ./data/kube/kubeconfig:/var/kubeconfig:ro - ./data/ssh_host_keys/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro - ./index.html:/var/index.html:ro - ./user_firewall.allowed:/var/user_firewall.allowed:ro @@ -178,12 +250,6 @@ services: - ./nginx-proxy/etc/passwd:/etc/passwd:ro - ./data/homes:/var/homes:shared - /var/run/docker.sock:/tmp/${DOCKER_PSLR}/docker.sock:ro - networks: - default: - user_network: - aliases: - - nginx - ipv4_address: 10.0.0.3 nginx-certs: container_name: nginx_certs @@ -208,14 +274,3 @@ volumes: certs: acme: windows: - -networks: - user_network: - name: user_network - driver: bridge - ipam: - config: - - subnet: 10.0.0.0/8 - driver_opts: - com.docker.network.bridge.name: "user_network" - com.docker.network.bridge.enable_icc: "false" diff --git a/dojo_plugin/api/v1/docker.py b/dojo_plugin/api/v1/docker.py index 76d43da65..c6a4cc7af 100644 --- a/dojo_plugin/api/v1/docker.py +++ b/dojo_plugin/api/v1/docker.py @@ -6,6 +6,8 @@ import traceback import docker +import yaml +from kubernetes import client from flask import request from flask_restx import Namespace, Resource from CTFd.utils.user import get_current_user, is_admin @@ -23,6 +25,8 @@ def start_challenge(user, dojo_challenge, practice): + # TODO: Remove this docker-based implementation in favor of kube + def exec_run(cmd, *, shell=False, assert_success=True, user="root", **kwargs): if shell: cmd = f"""/bin/sh -c \" @@ -222,6 +226,115 @@ def initialize_container(): initialize_container() +def start_challenge(user, dojo_challenge, practice): + namespace = "default" + + def setup_home(): + # TODO-KUBE: we don't actually care about nosuid or random_home_path anymore + homes = pathlib.Path("/var/homes") + homefs = homes / "homefs" + user_data = homes / "data" / str(user.id) + user_nosuid = homes / "nosuid" / random_home_path(user) + + assert homefs.exists() + user_data.parent.mkdir(exist_ok=True) + user_nosuid.parent.mkdir(exist_ok=True) + + if not user_data.exists(): + # Shell out to `cp` in order to sparsely copy + subprocess.run(["cp", homefs, user_data], check=True) + + process = subprocess.run( + ["findmnt", "--output", "OPTIONS", user_nosuid], capture_output=True + ) + if b"nosuid" not in process.stdout: + subprocess.run( + ["mount", user_data, "-o", "nosuid,X-mount.mkdir", user_nosuid], + check=True, + ) + + def start_container(): + # TODO-KUBE: Latest forces a pull, we should make the pull async + image = "registry:5000/challenge:latest" + name = f"user-{user.id}" + # TODO-KUBE: Upstream changed the hostname + hostname = "-".join((dojo_challenge.module.id, dojo_challenge.id)) + if practice: + hostname = f"practice~{hostname}" + + container = client.V1Container( + name=name, + image=image, + command=["/bin/tini", "--", "/bin/sleep", "6h"], + volume_mounts=[ + client.V1VolumeMount(name="home", mount_path=f"/home/hacker"), + client.V1VolumeMount(name="kvm", mount_path="/dev/kvm"), + ], + resources=client.V1ResourceRequirements( + limits=dict(cpu="4000m", memory="4000Mi"), + requests=dict(cpu="100m", memory="100Mi"), + ), + # TODO-KUBE: seccomp + security_context=client.V1SecurityContext( + capabilities=client.V1Capabilities(add=["SYS_PTRACE"]), + ), + ) + + home_volume = client.V1Volume( + name="home", + nfs=client.V1NFSVolumeSource( + server="homes-nfs", + path=f"/nosuid/{random_home_path(user)}", + ) + ) + + kvm_volume = client.V1Volume( + name="kvm", + host_path=client.V1HostPathVolumeSource(path="/dev/kvm", type="CharDevice"), + ) + + pod = client.V1Pod( + api_version="v1", + kind="Pod", + metadata=client.V1ObjectMeta(name=name), + spec=client.V1PodSpec( + containers=[container], + volumes=[home_volume, kvm_volume], + hostname=hostname, + restart_policy="Never", + automount_service_account_token=False, + ), + ) + + service = client.V1Service( + api_version="v1", + kind="Service", + metadata=client.V1ObjectMeta(name=name), + spec=client.V1ServiceSpec( + selector={"app": name}, + ports=[client.V1ServicePort(port=80, target_port=80)], + type="ClusterIP" # or use "NodePort" or "LoadBalancer" as needed + ), + ) + + api_instance = client.CoreV1Api() + + try: + api_instance.delete_namespaced_pod( + name=name, + namespace=namespace, + body=client.V1DeleteOptions(grace_period_seconds=0), + ) + except client.exceptions.ApiException: + pass + + # TODO-KUBE: This is async, we might be in "ContainerCreating" state for a while before "Running" + api_instance.create_namespaced_pod(namespace=namespace, body=pod) + + setup_home() + start_container() + + @docker_namespace.route("") class RunDocker(Resource): @authed_only diff --git a/dojo_plugin/config.py b/dojo_plugin/config.py index 787c6de1f..82c821e07 100644 --- a/dojo_plugin/config.py +++ b/dojo_plugin/config.py @@ -6,6 +6,8 @@ import json import socket +import yaml +from kubernetes import config as kube_config from sqlalchemy.exc import IntegrityError from CTFd.models import db, Admins, Pages from CTFd.utils import config, set_config @@ -18,6 +20,12 @@ INDEX_HTML = pathlib.Path("/var/index.html").read_text() +kube_config_dict = yaml.safe_load(open(kube_config.KUBE_CONFIG_DEFAULT_LOCATION, "r")) +for cluster in kube_config_dict["clusters"]: + if cluster["name"] == "default": + cluster["cluster"]["server"] = "https://kube-server:6443" +kube_config.load_kube_config_from_dict(kube_config_dict) + def create_seccomp(): seccomp = json.load(pathlib.Path("/etc/docker/seccomp.json").open()) diff --git a/dojo_plugin/utils/__init__.py b/dojo_plugin/utils/__init__.py index f3fbb33a9..17d389025 100644 --- a/dojo_plugin/utils/__init__.py +++ b/dojo_plugin/utils/__init__.py @@ -16,6 +16,7 @@ import bleach import docker +from kubernetes import client from flask import current_app, Response, Markup, abort, g from itsdangerous.url_safe import URLSafeSerializer from CTFd.models import db, Solves, Challenges, Users @@ -36,12 +37,10 @@ def get_current_container(user=None): if not user: return None - docker_client = docker.from_env() - container_name = f"user_{user.id}" - + api_instance = client.CoreV1Api() try: - return docker_client.containers.get(container_name) - except docker.errors.NotFound: + return api_instance.read_namespaced_pod(name=f"user-{user.id}", namespace="default") + except client.rest.ApiException: return None @@ -95,7 +94,10 @@ def redirect_internal(redirect_uri, auth=None): def redirect_user_socket(user, port, url_path): assert user is not None - return redirect_internal(f"http://user_{user.id}:{port}/{url_path}") + api_instance = client.CoreV1Api() + user_ip = api_instance.read_namespaced_pod(name=f"user-{user.id}", namespace="default").status.pod_ip + # TODO-KUBE: Using Kube DNS instead would be nice + return redirect_internal(f"http://{user_ip}:{port}/{url_path}") def render_markdown(s): raw_html = build_markdown(s or "") diff --git a/etc/systemd/system/pwn.college.service b/etc/systemd/system/pwn.college.service index 1c63e17d5..82c3631f6 100644 --- a/etc/systemd/system/pwn.college.service +++ b/etc/systemd/system/pwn.college.service @@ -7,8 +7,8 @@ After=docker.service Type=oneshot RemainAfterExit=true WorkingDirectory=/opt/pwn.college -ExecStart=/usr/bin/docker compose --env-file=/opt/pwn.college/data/config.env up -d --build --remove-orphans -ExecStop=/usr/bin/docker compose down +ExecStart=/usr/bin/dojo up +ExecStop=/usr/bin/dojo down [Install] WantedBy=multi-user.target diff --git a/kube/bin/mount b/kube/bin/mount new file mode 100755 index 000000000..96e5b18fc --- /dev/null +++ b/kube/bin/mount @@ -0,0 +1,7 @@ +#!/bin/sh + +if [ "$1" = "-t" ] && [ "$2" = "nfs" ]; then + /bin/aux/mount "$@" -o vers=4.2,nosuid +else + /bin/aux/mount "$@" +fi diff --git a/kube/k3s/registries.yaml b/kube/k3s/registries.yaml new file mode 100644 index 000000000..d5f1105a8 --- /dev/null +++ b/kube/k3s/registries.yaml @@ -0,0 +1,4 @@ +mirrors: + "registry:5000": + endpoint: + - "http://registry:5000" diff --git a/kube/manifests/nfs.yaml b/kube/manifests/nfs.yaml new file mode 100644 index 000000000..84cbee33a --- /dev/null +++ b/kube/manifests/nfs.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfs-server +spec: + replicas: 1 + selector: + matchLabels: + role: nfs-server + template: + metadata: + labels: + role: nfs-server + spec: + containers: + - name: nfs-server + image: registry:5000/volume-nfs + ports: + - name: nfs + containerPort: 2049 + - name: mountd + containerPort: 20048 + - name: rpcbind + containerPort: 111 + securityContext: + privileged: true + volumeMounts: + - mountPath: /exports + mountPropagation: HostToContainer + name: homes + volumes: + - name: homes + hostPath: + path: /var/homes + type: DirectoryOrCreate + +--- + +apiVersion: v1 +kind: Service +metadata: + name: nfs-server +spec: + ports: + - name: nfs + port: 2049 + - name: mountd + port: 20048 + - name: rpcbind + port: 111 + clusterIP: 10.43.0.20 + selector: + role: nfs-server diff --git a/script/container-setup.sh b/script/container-setup.sh index b196d4d64..490625c85 100755 --- a/script/container-setup.sh +++ b/script/container-setup.sh @@ -19,6 +19,7 @@ define DOJO_ENV development define DOJO_CHALLENGE challenge-mini define WINDOWS_VM none define SECRET_KEY $(openssl rand -hex 16) +define K3S_TOKEN $(openssl rand -hex 16) define DOCKER_PSLR $(openssl rand -hex 16) define UBUNTU_VERSION 20.04 define INTERNET_FOR_ALL False @@ -80,11 +81,3 @@ mkdir -p $DOJO_DIR/data/logging sysctl -w kernel.pty.max=1048576 echo core > /proc/sys/kernel/core_pattern - -iptables -N DOCKER-USER -iptables -I DOCKER-USER -i user_network -j DROP -for host in $(cat $DOJO_DIR/user_firewall.allowed); do - iptables -I DOCKER-USER -i user_network -d $(host $host | awk '{print $NF; exit}') -j ACCEPT -done -iptables -I DOCKER-USER -i user_network -s 10.0.0.0/24 -m conntrack --ctstate NEW -j ACCEPT -iptables -I DOCKER-USER -i user_network -d 10.0.0.0/8 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT diff --git a/script/dojo b/script/dojo index e398c43cf..04954b414 100755 --- a/script/dojo +++ b/script/dojo @@ -14,11 +14,29 @@ DOCKER_ARGS=${DOCKER_ARGS:--i} [ -t 0 ] && DOCKER_ARGS="-t $DOCKER_ARGS" case "$ACTION" in + # HELP: up: create and start the dojo + "up") + dojo build + dojo compose up -d --remove-orphans + ;; + + # HELP: down: stop and remove the dojo + "down") + dojo compose down + ;; + + "build") + dojo sync + dojo compose build + dojo compose up -d registry + dojo compose push + ;; + # HELP: update: update dojo files (warning: does `git pull`), rebuild containers, and restart any changed services "update") git pull - dojo sync - dojo compose up -d --build + dojo build + dojo up ;; # HELP: sync: sync changed dojo files @@ -28,22 +46,20 @@ case "$ACTION" in cp -rv --preserve=timestamps dojo_theme /opt/CTFd/CTFd/themes/ ;; - # HELP: enter [ -s ] USER_ID: enter a user's running container. -s for entering as root + # HELP: enter USER_ID: enter a user's running container "enter") - USER_SWITCH="" - if [ "$1" == "-s" ] - then - USER_SWITCH="--user=root" - shift - fi DOJO_UID="$1" [ -n "${DOJO_UID//[0-9]}" ] && DOJO_UID=$( echo "select id from users where name='$DOJO_UID'" | - $0 db -s + $0 db -s ) - CONTAINER="user_$DOJO_UID" + CONTAINER="user-$DOJO_UID" shift - docker exec $DOCKER_ARGS $USER_SWITCH "$CONTAINER" bash + if [ -z "$USER" ]; then + dojo kubectl exec $DOCKER_ARGS "$CONTAINER" -- bash + else + dojo kubectl exec $DOCKER_ARGS --user="$USER" "$CONTAINER" -- bash + fi ;; # HELP: compose ARGS: run a docker compose command with the config.env file loaded @@ -51,11 +67,21 @@ case "$ACTION" in docker compose --env-file=/opt/pwn.college/data/config.env "$@" ;; + # HELP: bash: drop into a bash shell + "bash") + /bin/bash "$@" + ;; + # HELP: flask: drop into a flask shell in the ctfd container "flask") docker exec $DOCKER_ARGS ctfd flask shell "$@" ;; + # HELP: kubectl: control the workspace kubernetes cluster + "kubectl") + docker exec $DOCKER_ARGS -e KUBECONFIG=/output/kube.yaml kube-server kubectl "$@" + ;; + # HELP: db: launch a mysql client session, connected to the ctfd db "db") docker exec $DOCKER_ARGS db mysql -pctfd -Dctfd ctfd "$@" From bf84def68a25eca9c3a1acf4c06711d9bb705129 Mon Sep 17 00:00:00 2001 From: Connor Nelson Date: Fri, 12 Apr 2024 00:58:31 +0000 Subject: [PATCH 2/4] Workspace: Support vscode/desktop in kubernetes --- challenge/Dockerfile | 8 ++-- challenge/docker-entrypoint.sh | 10 ----- challenge/docker-initialize.sh | 1 - .../entrypoint.d/00_root_challenge_init.sh | 8 ++++ .../10_hacker_start_vm.sh} | 0 .../20_hacker_start_code_server.sh} | 2 +- .../31_hacker_configure_gui.sh} | 0 .../33_hacker_remove_tool_shortcuts.sh} | 0 .../38_hacker_start_windows_gui.sh} | 2 +- .../39_hacker_start_gui.sh} | 7 ++-- .../40_hacker_configure_pwntools.sh} | 0 .../41_hacker_fix_ipython_autocomplete.sh} | 0 challenge/entrypoint.sh | 13 +++++++ docker-compose.yml | 2 - dojo_plugin/api/v1/docker.py | 37 +++++++++++-------- dojo_plugin/pages/dojos.py | 8 ++-- dojo_plugin/pages/workspace.py | 4 +- dojo_plugin/utils/dojo.py | 4 +- 18 files changed, 59 insertions(+), 47 deletions(-) delete mode 100755 challenge/docker-entrypoint.sh delete mode 100755 challenge/docker-initialize.sh create mode 100755 challenge/entrypoint.d/00_root_challenge_init.sh rename challenge/{docker-entrypoint.d/10_start_vm.sh => entrypoint.d/10_hacker_start_vm.sh} (100%) rename challenge/{docker-entrypoint.d/20_start_code_server.sh => entrypoint.d/20_hacker_start_code_server.sh} (92%) rename challenge/{docker-entrypoint.d/31_configure_gui.sh => entrypoint.d/31_hacker_configure_gui.sh} (100%) rename challenge/{docker-entrypoint.d/33_remove_tool_shortcuts.sh => entrypoint.d/33_hacker_remove_tool_shortcuts.sh} (100%) rename challenge/{docker-entrypoint.d/38_start_windows_gui.sh => entrypoint.d/38_hacker_start_windows_gui.sh} (94%) rename challenge/{docker-entrypoint.d/39_start_gui.sh => entrypoint.d/39_hacker_start_gui.sh} (89%) rename challenge/{docker-entrypoint.d/40_configure_pwntools.sh => entrypoint.d/40_hacker_configure_pwntools.sh} (100%) rename challenge/{docker-entrypoint.d/41_fix_ipython_autocomplete.sh => entrypoint.d/41_hacker_fix_ipython_autocomplete.sh} (100%) create mode 100755 challenge/entrypoint.sh diff --git a/challenge/Dockerfile b/challenge/Dockerfile index ca5b7f4c9..da0330fbd 100644 --- a/challenge/Dockerfile +++ b/challenge/Dockerfile @@ -536,9 +536,8 @@ FROM builder-tools-pip-${INSTALL_TOOLS_PIP} as builder-tools-pip FROM builder-essentials as builder-pwn.college RUN mkdir /opt/pwn.college -COPY docker-initialize.sh /opt/pwn.college/docker-initialize.sh -COPY docker-entrypoint.d /opt/pwn.college/docker-entrypoint.d -COPY docker-entrypoint.sh /opt/pwn.college/docker-entrypoint.sh +COPY entrypoint.d /opt/pwn.college/entrypoint.d +COPY entrypoint.sh /opt/pwn.college/entrypoint.sh COPY setuid_interpreter.c /opt/pwn.college/setuid_interpreter.c COPY bash.bashrc /opt/pwn.college/bash.bashrc COPY vm /opt/pwn.college/vm @@ -690,5 +689,4 @@ RUN < /opt/pwn.college/build EOF -USER hacker -WORKDIR /home/hacker +ENTRYPOINT [ "/opt/pwn.college/entrypoint.sh" ] diff --git a/challenge/docker-entrypoint.sh b/challenge/docker-entrypoint.sh deleted file mode 100755 index a6b921c0f..000000000 --- a/challenge/docker-entrypoint.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh -e - -mkdir -p /tmp/.dojo -exec >/tmp/.dojo/entrypoint.log 2>&1 - -for SCRIPT in /opt/pwn.college/docker-entrypoint.d/* -do - echo "[*] docker-entrypoint running script: $SCRIPT" - "$SCRIPT" -done diff --git a/challenge/docker-initialize.sh b/challenge/docker-initialize.sh deleted file mode 100755 index 1a2485251..000000000 --- a/challenge/docker-initialize.sh +++ /dev/null @@ -1 +0,0 @@ -#!/bin/sh diff --git a/challenge/entrypoint.d/00_root_challenge_init.sh b/challenge/entrypoint.d/00_root_challenge_init.sh new file mode 100755 index 000000000..61b81139f --- /dev/null +++ b/challenge/entrypoint.d/00_root_challenge_init.sh @@ -0,0 +1,8 @@ +chown hacker:hacker /home/hacker +chmod 755 /home/hacker + +if [ -x "/challenge/.init" ]; then + /challenge/.init +fi + +touch /opt/pwn.college/.initialized diff --git a/challenge/docker-entrypoint.d/10_start_vm.sh b/challenge/entrypoint.d/10_hacker_start_vm.sh similarity index 100% rename from challenge/docker-entrypoint.d/10_start_vm.sh rename to challenge/entrypoint.d/10_hacker_start_vm.sh diff --git a/challenge/docker-entrypoint.d/20_start_code_server.sh b/challenge/entrypoint.d/20_hacker_start_code_server.sh similarity index 92% rename from challenge/docker-entrypoint.d/20_start_code_server.sh rename to challenge/entrypoint.d/20_hacker_start_code_server.sh index e6ee0607d..95708c48f 100755 --- a/challenge/docker-entrypoint.d/20_start_code_server.sh +++ b/challenge/entrypoint.d/20_hacker_start_code_server.sh @@ -9,7 +9,7 @@ start-stop-daemon --start \ --startas /usr/bin/code-server \ -- \ --auth=none \ - --bind-addr=dojo-user:6080 \ + --bind-addr=0.0.0.0:6080 \ --extensions-dir=/opt/code-server/extensions \ --disable-telemetry \ >/tmp/.dojo/vnc/websockify-windows.log \ diff --git a/challenge/docker-entrypoint.d/39_start_gui.sh b/challenge/entrypoint.d/39_hacker_start_gui.sh similarity index 89% rename from challenge/docker-entrypoint.d/39_start_gui.sh rename to challenge/entrypoint.d/39_hacker_start_gui.sh index b2552a169..b16ee5a72 100755 --- a/challenge/docker-entrypoint.d/39_start_gui.sh +++ b/challenge/entrypoint.d/39_hacker_start_gui.sh @@ -2,9 +2,8 @@ mkdir -p /tmp/.dojo/vnc /home/hacker/.vnc -container_id="$(cat /.authtoken)" -password_interact="$(printf 'desktop-interact' | openssl dgst -sha256 -hmac "$container_id" | awk '{print $2}' | head -c 8)" -password_view="$(printf 'desktop-view' | openssl dgst -sha256 -hmac "$container_id" | awk '{print $2}' | head -c 8)" +password_interact="$(printf 'desktop-interact' | openssl dgst -sha256 -hmac "$DOJO_AUTH_TOKEN" | awk '{print $2}' | head -c 8)" +password_view="$(printf 'desktop-view' | openssl dgst -sha256 -hmac "$DOJO_AUTH_TOKEN" | awk '{print $2}' | head -c 8)" printf '%s\n%s\n' "$password_interact" "$password_view" | tigervncpasswd -f > /tmp/.dojo/vnc/passwd start-stop-daemon --start \ @@ -33,7 +32,7 @@ start-stop-daemon --start \ --startas /usr/bin/websockify \ -- \ --web /usr/share/novnc/ \ - dojo-user:6081 \ + 0.0.0.0:6081 \ --unix-target=/tmp/.dojo/vnc/socket \ >/tmp/.dojo/vnc/websockify.log \ diff --git a/challenge/docker-entrypoint.d/40_configure_pwntools.sh b/challenge/entrypoint.d/40_hacker_configure_pwntools.sh similarity index 100% rename from challenge/docker-entrypoint.d/40_configure_pwntools.sh rename to challenge/entrypoint.d/40_hacker_configure_pwntools.sh diff --git a/challenge/docker-entrypoint.d/41_fix_ipython_autocomplete.sh b/challenge/entrypoint.d/41_hacker_fix_ipython_autocomplete.sh similarity index 100% rename from challenge/docker-entrypoint.d/41_fix_ipython_autocomplete.sh rename to challenge/entrypoint.d/41_hacker_fix_ipython_autocomplete.sh diff --git a/challenge/entrypoint.sh b/challenge/entrypoint.sh new file mode 100755 index 000000000..60193665c --- /dev/null +++ b/challenge/entrypoint.sh @@ -0,0 +1,13 @@ +#!/bin/sh -e + +mkdir -p /run/dojo +exec >/run/dojo/entrypoint.log 2>&1 + +for script in /opt/pwn.college/entrypoint.d/* +do + user=$(basename "$script" | cut -d_ -f2) + echo "[*] running entrypoint script '$script' as user '$user'" + su "$user" -c "$script" +done + +exec /bin/tini -- /bin/sleep 6h diff --git a/docker-compose.yml b/docker-compose.yml index e95a67348..a14064f03 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,3 @@ -version: '3.4' - services: kube-server: container_name: kube-server diff --git a/dojo_plugin/api/v1/docker.py b/dojo_plugin/api/v1/docker.py index c6a4cc7af..5235f4b0a 100644 --- a/dojo_plugin/api/v1/docker.py +++ b/dojo_plugin/api/v1/docker.py @@ -220,7 +220,7 @@ def initialize_container(): flag = "practice" if practice else serialize_user_flag(user.id, dojo_challenge.challenge_id) insert_flag(flag) - auth_token = container.labels["dojo.auth_token"] + auth_token = container.metadata.annotations["dojo/auth_token"] insert_auth_token(auth_token) initialize_container() @@ -262,10 +262,18 @@ def start_container(): if practice: hostname = f"practice~{hostname}" + config = dict( + privileged=str(bool(practice)).lower(), + auth_token=os.urandom(32).hex(), + ) + container = client.V1Container( name=name, image=image, - command=["/bin/tini", "--", "/bin/sleep", "6h"], + env=[ + client.V1EnvVar(name=f"DOJO_{name.upper()}", value=value) + for name, value in config.items() + ], volume_mounts=[ client.V1VolumeMount(name="home", mount_path=f"/home/hacker"), client.V1VolumeMount(name="kvm", mount_path="/dev/kvm"), @@ -293,30 +301,29 @@ def start_container(): host_path=client.V1HostPathVolumeSource(path="/dev/kvm", type="CharDevice"), ) + annotations = dict( + dojo_id=dojo_challenge.dojo.reference_id, + module_id=dojo_challenge.module.id, + challenge_id=dojo_challenge.id, + challenge_description=dojo_challenge.description, + user_id=str(user.id), + **config, + ) + annotations = {f"dojo/{k}": v for k, v in annotations.items()} + pod = client.V1Pod( api_version="v1", kind="Pod", - metadata=client.V1ObjectMeta(name=name), + metadata=client.V1ObjectMeta(name=name, annotations=annotations), spec=client.V1PodSpec( + hostname=hostname, containers=[container], volumes=[home_volume, kvm_volume], - hostname=hostname, restart_policy="Never", automount_service_account_token=False, ), ) - service = client.V1Service( - api_version="v1", - kind="Service", - metadata=client.V1ObjectMeta(name=name), - spec=client.V1ServiceSpec( - selector={"app": name}, - ports=[client.V1ServicePort(port=80, target_port=80)], - type="ClusterIP" # or use "NodePort" or "LoadBalancer" as needed - ), - ) - api_instance = client.CoreV1Api() try: diff --git a/dojo_plugin/pages/dojos.py b/dojo_plugin/pages/dojos.py index 92988f3d1..19bc4c3d4 100644 --- a/dojo_plugin/pages/dojos.py +++ b/dojo_plugin/pages/dojos.py @@ -136,10 +136,10 @@ def view_dojo_activity(dojo): actives = [] now = datetime.datetime.now() for container in containers: - user_id = container.labels["dojo.user_id"] - dojo_id = container.labels["dojo.dojo_id"] - module_id = container.labels["dojo.module_id"] - challenge_id = container.labels["dojo.challenge_id"] + user_id = container.metadata.annotations["dojo/user_id"] + dojo_id = container.metadata.annotations["dojo/dojo_id"] + module_id = container.metadata.annotations["dojo/module_id"] + challenge_id = container.metadata.annotations["dojo/challenge_id"] user = Users.query.filter_by(id=user_id).first() challenge = DojoChallenges.from_id(dojo_id, module_id, challenge_id).first() diff --git a/dojo_plugin/pages/workspace.py b/dojo_plugin/pages/workspace.py index b6e6c70a6..571324749 100644 --- a/dojo_plugin/pages/workspace.py +++ b/dojo_plugin/pages/workspace.py @@ -20,7 +20,7 @@ def container_password(container, *args): - key = container.labels["dojo.auth_token"].encode() + key = container.metadata.annotations["dojo/auth_token"].encode() message = "-".join(args).encode() return hmac.HMAC(key, message, "sha256").hexdigest() @@ -110,7 +110,7 @@ def forward_workspace(service, service_path=""): container = get_current_container(user) if not container: abort(404) - dojo = Dojos.from_id(container.labels["dojo.dojo_id"]).first() + dojo = Dojos.from_id(container.metadata.annotations["dojo/dojo_id"]).first() if not dojo.is_admin(): abort(403) diff --git a/dojo_plugin/utils/dojo.py b/dojo_plugin/utils/dojo.py index 6ae69dcfb..8bfad72a5 100644 --- a/dojo_plugin/utils/dojo.py +++ b/dojo_plugin/utils/dojo.py @@ -409,7 +409,7 @@ def get_current_dojo_challenge(user=None): return ( DojoChallenges.query - .filter(DojoChallenges.id == container.labels.get("dojo.challenge_id"), - DojoChallenges.dojo == Dojos.from_id(container.labels.get("dojo.dojo_id")).first()) + .filter(DojoChallenges.id == container.metadata.annotations.get("dojo/challenge_id"), + DojoChallenges.dojo == Dojos.from_id(container.metadata.annotations.get("dojo/dojo_id")).first()) .first() ) From f8907bcd6e9a4083dd247f6c57135cd3525f0890 Mon Sep 17 00:00:00 2001 From: Connor Nelson Date: Fri, 12 Apr 2024 20:20:36 +0000 Subject: [PATCH 3/4] Workspace: Add flag in kubernetes --- challenge/entrypoint.sh | 3 +++ docker-compose.yml | 2 +- dojo_plugin/api/v1/docker.py | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/challenge/entrypoint.sh b/challenge/entrypoint.sh index 60193665c..96f07420e 100755 --- a/challenge/entrypoint.sh +++ b/challenge/entrypoint.sh @@ -3,6 +3,9 @@ mkdir -p /run/dojo exec >/run/dojo/entrypoint.log 2>&1 +echo "$DOJO_FLAG" > /flag +unset DOJO_FLAG + for script in /opt/pwn.college/entrypoint.d/* do user=$(basename "$script" | cut -d_ -f2) diff --git a/docker-compose.yml b/docker-compose.yml index a14064f03..628283686 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -133,7 +133,7 @@ services: - ERROR_LOG=- - REVERSE_PROXY=true - SERVER_SENT_EVENTS=false - - SECRET_KEY=${SECRET_KEY}' + - SECRET_KEY=${SECRET_KEY} - DOJO_HOST=${DOJO_HOST} - HOST_DATA_PATH=/opt/pwn.college/data - KUBECONFIG=/var/kubeconfig/kube.yaml diff --git a/dojo_plugin/api/v1/docker.py b/dojo_plugin/api/v1/docker.py index 5235f4b0a..6d7177f83 100644 --- a/dojo_plugin/api/v1/docker.py +++ b/dojo_plugin/api/v1/docker.py @@ -262,7 +262,9 @@ def start_container(): if practice: hostname = f"practice~{hostname}" + flag = "practice" if practice else serialize_user_flag(user.id, dojo_challenge.challenge_id) config = dict( + flag=f"pwn.college{{{flag}}}", privileged=str(bool(practice)).lower(), auth_token=os.urandom(32).hex(), ) @@ -321,6 +323,7 @@ def start_container(): volumes=[home_volume, kvm_volume], restart_policy="Never", automount_service_account_token=False, + enable_service_links=False, ), ) From 8cd08b1baea510770a4432269d83da4fb4ae2b70 Mon Sep 17 00:00:00 2001 From: Connor Nelson Date: Thu, 18 Apr 2024 22:41:15 +0000 Subject: [PATCH 4/4] Workspace: Support ssh in kubernetes --- challenge/entrypoint.sh | 5 +++ docker-compose.yml | 5 ++- sshd/Dockerfile | 19 +++++------ sshd/auth.py | 37 ++++++++++++---------- sshd/enter.py | 70 ++++++++++++++++++++++++----------------- 5 files changed, 80 insertions(+), 56 deletions(-) diff --git a/challenge/entrypoint.sh b/challenge/entrypoint.sh index 96f07420e..172938a95 100755 --- a/challenge/entrypoint.sh +++ b/challenge/entrypoint.sh @@ -3,6 +3,11 @@ mkdir -p /run/dojo exec >/run/dojo/entrypoint.log 2>&1 +for var in $(env | grep -o '^KUBERNETES[^=]*') +do + unset "$var" +done + echo "$DOJO_FLAG" > /flag unset DOJO_FLAG diff --git a/docker-compose.yml b/docker-compose.yml index 628283686..b9f1bb228 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -17,6 +17,7 @@ services: environment: - K3S_TOKEN=${K3S_TOKEN:?err} - K3S_KUBECONFIG_OUTPUT=/output/kube.yaml + - K3S_KUBECONFIG_MODE=644 volumes: - ./data/kube/kubeconfig:/output - ./data/kube/node/server:/etc/rancher/node @@ -214,9 +215,11 @@ services: container_name: sshd build: context: ./sshd + environment: + - KUBECONFIG=/var/kubeconfig/kube.yaml volumes: - ./data/ssh_host_keys:/etc/ssh:ro - - /var/run/docker.sock:/var/run/docker.sock:ro + - ./data/kube/kubeconfig:/var/kubeconfig:ro ports: - "22:22" diff --git a/sshd/Dockerfile b/sshd/Dockerfile index 77ca7c8bb..12ad982b5 100644 --- a/sshd/Dockerfile +++ b/sshd/Dockerfile @@ -1,20 +1,21 @@ FROM alpine:latest RUN apk add --no-cache \ + build-base \ python3 \ + python3-dev \ py3-pip \ + mariadb-dev \ openssh-server-pam \ - docker-cli + kubectl -RUN pip3 install --break-system-packages docker +RUN pip3 install --break-system-packages \ + kubernetes \ + mysqlclient \ + pyyaml -RUN delgroup ping && \ - addgroup -g 999 docker && \ - adduser -S hacker -G docker -s /bin/sh && \ - mkdir -p /home/hacker/.docker && \ - echo '{ "detachKeys": "ctrl-q,ctrl-q" }' > /home/hacker/.docker/config.json - -RUN mkdir -p /run/sshd +RUN adduser -S hacker -s /bin/sh && \ + mkdir -p /run/sshd WORKDIR /opt/sshd COPY . . diff --git a/sshd/auth.py b/sshd/auth.py index ebafbc534..ebdf43924 100755 --- a/sshd/auth.py +++ b/sshd/auth.py @@ -1,35 +1,38 @@ #!/usr/bin/env python3 -import sys import pathlib +import sys -import docker - +from MySQLdb import connect, Error def error(msg): print(msg, file=sys.stderr) exit(1) - def main(): enter_path = pathlib.Path(__file__).parent.resolve() / "enter.py" - client = docker.from_env() + config = dict(user="ctfd", passwd="ctfd", host="db", db="ctfd") try: - container = client.containers.get("db") - except docker.errors.NotFound: - error("Error: ctfd is not running!") - - result = container.exec_run( - "mysql -pctfd -Dctfd -sNe 'select value, user_id from ssh_keys;'" - ) - if result.exit_code != 0: - error(f"Error: db query exited with code '{result.exit_code}'") - - for row in result.output.strip().split(b"\n"): - key, user_id = row.decode().split("\t") + db = connect(**config) + cursor = db.cursor() + except Error as e: + error(f"Error: Failed to connect to database: {e}") + + try: + cursor.execute("SELECT value, user_id FROM ssh_keys;") + rows = cursor.fetchall() + except Error as e: + error(f"Error: DB query failed: {e}") + + if not rows: + error("Error: No data returned from query") + + for key, user_id in rows: print(f'command="{enter_path} user_{user_id}" {key}') + cursor.close() + db.close() if __name__ == "__main__": main() diff --git a/sshd/enter.py b/sshd/enter.py index 00096aaff..919865711 100755 --- a/sshd/enter.py +++ b/sshd/enter.py @@ -1,17 +1,32 @@ #!/usr/bin/env python3 -import sys import os +import pathlib +import sys import time -import docker +import yaml +from kubernetes import client, config as kube_config +from kubernetes.client.rest import ApiException + +KUBECONFIG_PATH = pathlib.Path("/var/kubeconfig/kube.yaml") +KUBE_CONFIG_DEFAULT_PATH = pathlib.Path(os.path.expanduser(kube_config.KUBE_CONFIG_DEFAULT_LOCATION)) +kube_config_dict = yaml.safe_load(open(KUBECONFIG_PATH, "r")) +for cluster in kube_config_dict["clusters"]: + if cluster["name"] == "default": + cluster["cluster"]["server"] = "https://kube-server:6443" +KUBE_CONFIG_DEFAULT_PATH.parent.mkdir(parents=True, exist_ok=True) +kube_config.load_kube_config_from_dict(kube_config_dict) + +if not KUBE_CONFIG_DEFAULT_PATH.exists(): + yaml.dump(kube_config_dict, KUBE_CONFIG_DEFAULT_PATH.open("w")) def main(): original_command = os.getenv("SSH_ORIGINAL_COMMAND") tty = os.getenv("SSH_TTY") is not None simple = bool(not tty or original_command) - + def print(*args, **kwargs): if simple: return @@ -21,34 +36,32 @@ def print(*args, **kwargs): if len(sys.argv) != 2: print(f"{sys.argv[0]} ") exit(1) - container_name = sys.argv[1] + pod_name = sys.argv[1] - client = docker.from_env() + api_instance = client.CoreV1Api() try: - container = client.containers.get(container_name) - except docker.errors.NotFound: + pod = api_instance.read_namespaced_pod(pod_name, "default") + status = pod.status.phase + assert pod.status.phase not in ["Succeeded", "Failed", "Unknown"] + except ApiException: + status = "NotFound" + if status in ["NotFound", "Succeeded", "Failed", "Unknown"]: print("No active challenge session; start a challenge!") exit(1) attempts = 0 while attempts < 30: try: - container = client.containers.get(container_name) - status = container.status - except docker.errors.NotFound: - status = "uninitialized" - - if status == "running": - try: - container.get_archive("/opt/pwn.college/.initialized") - except docker.errors.NotFound: - status = "initializing" - - if status != "running": + pod = api_instance.read_namespaced_pod(pod_name, "default") + status = pod.status.phase + except ApiException as e: + status = "NotFound" + + if status not in ["Running"]: attempts += 1 - print("\033c", end="") # Clear the terminal when the user opens a new chall. - print("\r", " " * 80, f"\rConnecting -- instance status: {status}", end="") + print("\033c", end="") + print("\r", " " * 80, f"\rConnecting -- status: {status}", end="") time.sleep(1) continue @@ -56,19 +69,18 @@ def print(*args, **kwargs): print("\r", " " * 80, "\rConnected!") if not os.fork(): - command = ["/bin/bash", "-c", original_command] if original_command else ["/bin/bash"] - os.execve( - "/usr/bin/docker", + command = ["/bin/sh", "-c", original_command] if original_command else ["/bin/bash"] + os.execv( + "/usr/bin/kubectl", [ - "docker", + "kubectl", "exec", "-it" if tty else "-i", - container_name, + "-q", + pod_name, + "--", *command, ], - { - "HOME": os.environ["HOME"], - }, ) else: