From 65fa357ac01933e2de6df0eb4fc5a339e5429043 Mon Sep 17 00:00:00 2001 From: Will Clark Date: Tue, 10 Sep 2024 09:05:37 +0100 Subject: [PATCH] deploy commander using helm (#550) * deploy commander using helm * teardown commanders using helm, fix status * stop scenario faster * stop warnet faster * fix scenarios_test User proper scenarios delete function --- resources/charts/commander/.helmignore | 23 ++ resources/charts/commander/Chart.yaml | 24 ++ .../charts/commander/templates/NOTES.txt | 1 + .../charts/commander/templates/_helpers.tpl | 60 +++++ .../charts/commander/templates/configmap.yaml | 17 ++ resources/charts/commander/templates/pod.yaml | 32 +++ resources/charts/commander/values.yaml | 78 +++++++ src/warnet/constants.py | 1 + src/warnet/control.py | 208 +++++++++--------- test/scenarios_test.py | 4 +- 10 files changed, 339 insertions(+), 109 deletions(-) create mode 100644 resources/charts/commander/.helmignore create mode 100644 resources/charts/commander/Chart.yaml create mode 100644 resources/charts/commander/templates/NOTES.txt create mode 100644 resources/charts/commander/templates/_helpers.tpl create mode 100644 resources/charts/commander/templates/configmap.yaml create mode 100644 resources/charts/commander/templates/pod.yaml create mode 100644 resources/charts/commander/values.yaml diff --git a/resources/charts/commander/.helmignore b/resources/charts/commander/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/resources/charts/commander/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/resources/charts/commander/Chart.yaml b/resources/charts/commander/Chart.yaml new file mode 100644 index 000000000..202456e92 --- /dev/null +++ b/resources/charts/commander/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: commander +description: A Helm chart for a commander + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: 0.1.0 diff --git a/resources/charts/commander/templates/NOTES.txt b/resources/charts/commander/templates/NOTES.txt new file mode 100644 index 000000000..29639a44e --- /dev/null +++ b/resources/charts/commander/templates/NOTES.txt @@ -0,0 +1 @@ +Commander beginning their mission. diff --git a/resources/charts/commander/templates/_helpers.tpl b/resources/charts/commander/templates/_helpers.tpl new file mode 100644 index 000000000..9383f0ff9 --- /dev/null +++ b/resources/charts/commander/templates/_helpers.tpl @@ -0,0 +1,60 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "commander.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "commander.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "commander.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "commander.labels" -}} +helm.sh/chart: {{ include "commander.chart" . }} +{{ include "commander.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.podLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "commander.selectorLabels" -}} +app.kubernetes.io/name: {{ include "commander.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "commander.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "commander.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/resources/charts/commander/templates/configmap.yaml b/resources/charts/commander/templates/configmap.yaml new file mode 100644 index 000000000..9c45ea0d2 --- /dev/null +++ b/resources/charts/commander/templates/configmap.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "commander.fullname" . }}-scenario + labels: + {{- include "commander.labels" . | nindent 4 }} +binaryData: + scenario.py: {{ .Values.scenario }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "commander.fullname" . }}-warnet + labels: + {{- include "commander.labels" . | nindent 4 }} +binaryData: + warnet.json: {{ .Values.warnet }} diff --git a/resources/charts/commander/templates/pod.yaml b/resources/charts/commander/templates/pod.yaml new file mode 100644 index 000000000..94c79205f --- /dev/null +++ b/resources/charts/commander/templates/pod.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "commander.fullname" . }} + labels: + {{- include "commander.labels" . | nindent 4 }} + app: {{ include "commander.name" . }} + mission: commander +spec: + restartPolicy: {{ .Values.restartPolicy }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/sh", "-c"] + args: + - | + python3 /scenario.py {{ .Values.args }} + volumeMounts: + - name: scenario + mountPath: /scenario.py + subPath: scenario.py + - name: warnet + mountPath: /warnet.json + subPath: warnet.json + volumes: + - name: scenario + configMap: + name: {{ include "commander.fullname" . }}-scenario + - name: warnet + configMap: + name: {{ include "commander.fullname" . }}-warnet diff --git a/resources/charts/commander/values.yaml b/resources/charts/commander/values.yaml new file mode 100644 index 000000000..fc7e8233d --- /dev/null +++ b/resources/charts/commander/values.yaml @@ -0,0 +1,78 @@ +# Default values for commander. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +namespace: warnet + +restartPolicy: Never + +image: + repository: bitcoindevproject/warnet-commander + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +podLabels: + app: "warnet" + mission: "commander" + +podSecurityContext: {} + +securityContext: {} + +service: + type: ClusterIP + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# livenessProbe: +# exec: +# command: +# - pidof +# - commander +# failureThreshold: 3 +# initialDelaySeconds: 5 +# periodSeconds: 5 +# successThreshold: 1 +# timeoutSeconds: 1 +# readinessProbe: +# failureThreshold: 1 +# periodSeconds: 1 +# successThreshold: 1 +# tcpSocket: +# port: 2323 +# timeoutSeconds: 1 + +# Additional volumes on the output Deployment definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# Additional volumeMounts on the output Deployment definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +port: + +scenario: "" + +warnet: "" + +args: "" diff --git a/src/warnet/constants.py b/src/warnet/constants.py index aa53b3484..bdd9dce9d 100644 --- a/src/warnet/constants.py +++ b/src/warnet/constants.py @@ -30,6 +30,7 @@ # Helm charts BITCOIN_CHART_LOCATION = str(CHARTS_DIR.joinpath("bitcoincore")) FORK_OBSERVER_CHART = str(CHARTS_DIR.joinpath("fork-observer")) +COMMANDER_CHART = str(CHARTS_DIR.joinpath("commander")) NAMESPACES_CHART_LOCATION = CHARTS_DIR.joinpath("namespaces") DEFAULT_NETWORK = Path("6_node_bitcoin") DEFAULT_NAMESPACES = Path("two_namespaces_two_users") diff --git a/src/warnet/control.py b/src/warnet/control.py index fa4679b77..203130f80 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -1,20 +1,20 @@ +import base64 import json -import os -import tempfile +import subprocess import time +from concurrent.futures import ThreadPoolExecutor, as_completed +from pathlib import Path import click import inquirer -import yaml from inquirer.themes import GreenPassion from rich import print from rich.console import Console from rich.prompt import Confirm, Prompt from rich.table import Table +from .constants import COMMANDER_CHART from .k8s import ( - apply_kubernetes_yaml, - delete_namespace, get_default_namespace, get_mission, get_pods, @@ -77,16 +77,27 @@ def stop(scenario_name): def stop_scenario(scenario_name): - """Stop a single scenario""" - cmd = f"kubectl delete pod {scenario_name}" + """Stop a single scenario using Helm""" + # Stop the pod immediately (faster than uninstalling) + cmd = f"kubectl delete pod {scenario_name} --grace-period=0 --force" if stream_command(cmd): console.print(f"[bold green]Successfully stopped scenario: {scenario_name}[/bold green]") else: console.print(f"[bold red]Failed to stop scenario: {scenario_name}[/bold red]") + # Then uninstall via helm (non-blocking) + namespace = get_default_namespace() + command = f"helm uninstall {scenario_name} --namespace {namespace} --wait=false" + + # Run the helm uninstall command in the background + subprocess.Popen(command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + console.print( + f"[bold yellow]Initiated helm uninstall for release: {scenario_name}[/bold yellow]" + ) + def stop_all_scenarios(scenarios): - """Stop all active scenarios""" + """Stop all active scenarios using Helm""" with console.status("[bold yellow]Stopping all scenarios...[/bold yellow]"): for scenario in scenarios: stop_scenario(scenario) @@ -95,8 +106,8 @@ def stop_all_scenarios(scenarios): def list_active_scenarios(): """List all active scenarios""" - commanders = get_mission("commander") - if not commanders: + active_scenarios = get_active_scenarios() + if not active_scenarios: print("No active scenarios found.") return @@ -105,43 +116,53 @@ def list_active_scenarios(): table.add_column("Name", style="cyan") table.add_column("Status", style="green") - for commander in commanders: - table.add_row(commander.metadata.name, commander.status.phase.lower()) + for scenario in active_scenarios: + table.add_row(scenario, "deployed") console.print(table) @click.command() def down(): - """Bring down a running warnet""" - with console.status("[bold yellow]Bringing down the warnet...[/bold yellow]"): - # Delete warnet-logging namespace - if delete_namespace("warnet-logging"): - console.print("[green]Warnet logging deleted[/green]") - else: - console.print("[red]Warnet logging NOT deleted[/red]") - - # Uninstall tanks - tanks = get_mission("tank") - with console.status("[yellow]Uninstalling tanks...[/yellow]"): - for tank in tanks: - cmd = f"helm uninstall {tank.metadata.name} --namespace {get_default_namespace()}" - if stream_command(cmd): - console.print(f"[green]Uninstalled tank: {tank.metadata.name}[/green]") - else: - console.print(f"[red]Failed to uninstall tank: {tank.metadata.name}[/red]") - - # Clean up scenarios and other pods - pods = get_pods() - with console.status("[yellow]Cleaning up remaining pods...[/yellow]"): + """Bring down a running warnet quickly""" + console.print("[bold yellow]Bringing down the warnet...[/bold yellow]") + + namespaces = [get_default_namespace(), "warnet-logging"] + + def uninstall_release(namespace, release_name): + cmd = f"helm uninstall {release_name} --namespace {namespace} --wait=false" + subprocess.Popen(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + return f"Initiated uninstall for: {release_name} in namespace {namespace}" + + def delete_pod(pod_name, namespace): + cmd = f"kubectl delete pod --ignore-not-found=true {pod_name} -n {namespace} --grace-period=0 --force" + subprocess.Popen(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + return f"Initiated deletion of pod: {pod_name} in namespace {namespace}" + + with ThreadPoolExecutor(max_workers=10) as executor: + futures = [] + + # Uninstall Helm releases + for namespace in namespaces: + command = f"helm list --namespace {namespace} -o json" + result = run_command(command) + if result: + releases = json.loads(result) + for release in releases: + futures.append(executor.submit(uninstall_release, namespace, release["name"])) + + # Delete remaining pods + pods = get_pods() for pod in pods.items: - cmd = f"kubectl delete pod --ignore-not-found=true {pod.metadata.name} -n {get_default_namespace()}" - if stream_command(cmd): - console.print(f"[green]Deleted pod: {pod.metadata.name}[/green]") - else: - console.print(f"[red]Failed to delete pod: {pod.metadata.name}[/red]") + futures.append(executor.submit(delete_pod, pod.metadata.name, pod.metadata.namespace)) + + # Wait for all tasks to complete and print results + for future in as_completed(futures): + console.print(f"[yellow]{future.result()}[/yellow]") - console.print("[bold green]Warnet has been brought down.[/bold green]") + console.print("[bold yellow]Teardown process initiated for all components.[/bold yellow]") + console.print("[bold yellow]Note: Some processes may continue in the background.[/bold yellow]") + console.print("[bold green]Warnet teardown process completed.[/bold green]") def get_active_network(namespace): @@ -163,11 +184,11 @@ def get_active_network(namespace): @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) def run(scenario_file: str, additional_args: tuple[str]): """Run a scenario from a file""" - scenario_path = os.path.abspath(scenario_file) - scenario_name = os.path.splitext(os.path.basename(scenario_path))[0] + scenario_path = Path(scenario_file).resolve() + scenario_name = scenario_path.stem - with open(scenario_path) as file: - scenario_text = file.read() + with open(scenario_path, "rb") as file: + scenario_data = base64.b64encode(file.read()).decode() name = f"commander-{scenario_name.replace('_', '')}-{int(time.time())}" namespace = get_default_namespace() @@ -184,72 +205,45 @@ def run(scenario_file: str, additional_args: tuple[str]): } for tank in tankpods ] - kubernetes_objects = [ - { - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "warnetjson", - "namespace": namespace, - }, - "data": {"warnet.json": json.dumps(tanks)}, - }, - { - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": "scenariopy", - "namespace": namespace, - }, - "data": {"scenario.py": scenario_text}, - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": name, - "namespace": namespace, - "labels": {"mission": "commander"}, - }, - "spec": { - "restartPolicy": "Never", - "containers": [ - { - "name": name, - "image": "bitcoindevproject/warnet-commander:latest", - "args": additional_args, - "volumeMounts": [ - { - "name": "warnetjson", - "mountPath": "warnet.json", - "subPath": "warnet.json", - }, - { - "name": "scenariopy", - "mountPath": "scenario.py", - "subPath": "scenario.py", - }, - ], - } - ], - "volumes": [ - {"name": "warnetjson", "configMap": {"name": "warnetjson"}}, - {"name": "scenariopy", "configMap": {"name": "scenariopy"}}, - ], - }, - }, - ] - with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: - yaml.dump_all(kubernetes_objects, temp_file) - temp_file_path = temp_file.name - if apply_kubernetes_yaml(temp_file_path): - print(f"Successfully started scenario: {scenario_name}") - print(f"Commander pod name: {name}") - else: - print(f"Failed to start scenario: {scenario_name}") + # Encode warnet data + warnet_data = base64.b64encode(json.dumps(tanks).encode()).decode() - os.unlink(temp_file_path) + try: + # Construct Helm command + helm_command = [ + "helm", + "upgrade", + "--install", + "--namespace", + namespace, + "--set", + f"fullnameOverride={name}", + "--set", + f"scenario={scenario_data}", + "--set", + f"warnet={warnet_data}", + ] + + # Add additional arguments + if additional_args: + helm_command.extend(["--set", f"args={' '.join(additional_args)}"]) + + helm_command.extend([name, COMMANDER_CHART]) + + # Execute Helm command + result = subprocess.run(helm_command, check=True, capture_output=True, text=True) + + if result.returncode == 0: + print(f"Successfully started scenario: {scenario_name}") + print(f"Commander pod name: {name}") + else: + print(f"Failed to start scenario: {scenario_name}") + print(f"Error: {result.stderr}") + + except subprocess.CalledProcessError as e: + print(f"Failed to start scenario: {scenario_name}") + print(f"Error: {e.stderr}") @click.command() diff --git a/test/scenarios_test.py b/test/scenarios_test.py index 01521ff92..8be7f4a14 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -5,7 +5,7 @@ from test_base import TestBase -from warnet.k8s import delete_pod +from warnet.control import stop_scenario from warnet.process import run_command from warnet.status import _get_active_scenarios as scenarios_active @@ -80,7 +80,7 @@ def stop_scenario(self): running = scenarios_active() assert len(running) == 1, f"Expected one running scenario, got {len(running)}" assert running[0]["status"] == "running", "Scenario should be running" - delete_pod(running[0]["name"]) + stop_scenario(running[0]["name"]) self.wait_for_predicate(self.check_scenario_stopped) def check_scenario_stopped(self):