From af71a8189074f0e14554e0789dc5aa735a9a4d3b Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Wed, 22 Sep 2021 10:19:43 -0400 Subject: [PATCH 01/27] Initial commit * install hook has MVP functionality. It installs correctly and monitors the stateful until it is ready. Does not monitor/check other objs * includes basic test coverage Todo: * update-status hook * remove hook * all TODO items in code --- .flake8 | 9 + .github/workflows/integrate.yaml | 75 +++ .github/workflows/publish.yaml | 45 ++ .gitignore | 8 + .jujuignore | 3 + charmcraft.yaml | 24 + metadata.yaml | 22 + requirements.txt | 4 + src/charm.py | 131 +++++ test-integration-requirements.txt | 1 + test-requirements.txt | 4 + tests/integration/test_charm.py | 36 ++ .../input_full/metacontroller-crds-v1.yaml | 534 ++++++++++++++++++ .../input_full/metacontroller-rbac.yaml | 72 +++ .../input_full/metacontroller.yaml | 28 + .../input_simple/manifests.yaml | 17 + tests/unit/render_manifests/result_full.yaml | 17 + .../unit/render_manifests/result_simple.yaml | 17 + tests/unit/test_charm.py | 97 ++++ tox.ini | 31 + 20 files changed, 1175 insertions(+) create mode 100644 .flake8 create mode 100644 .github/workflows/integrate.yaml create mode 100644 .github/workflows/publish.yaml create mode 100644 .gitignore create mode 100644 .jujuignore create mode 100644 charmcraft.yaml create mode 100644 metadata.yaml create mode 100644 requirements.txt create mode 100755 src/charm.py create mode 100644 test-integration-requirements.txt create mode 100644 test-requirements.txt create mode 100644 tests/integration/test_charm.py create mode 100644 tests/unit/render_manifests/input_full/metacontroller-crds-v1.yaml create mode 100644 tests/unit/render_manifests/input_full/metacontroller-rbac.yaml create mode 100644 tests/unit/render_manifests/input_full/metacontroller.yaml create mode 100644 tests/unit/render_manifests/input_simple/manifests.yaml create mode 100644 tests/unit/render_manifests/result_full.yaml create mode 100644 tests/unit/render_manifests/result_simple.yaml create mode 100644 tests/unit/test_charm.py create mode 100644 tox.ini diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..8ef84fc --- /dev/null +++ b/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/.github/workflows/integrate.yaml b/.github/workflows/integrate.yaml new file mode 100644 index 0000000..c8d1e1e --- /dev/null +++ b/.github/workflows/integrate.yaml @@ -0,0 +1,75 @@ +name: CI + +on: + push: + branches: + - main + pull_request: + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Install dependencies + run: | + sudo apt update + sudo apt install tox + - name: Lint code + run: tox -vve lint + + unit: + name: Unit Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Install dependencies + run: pip install tox + - name: Run unit tests + run: tox -vve unit + + integration: + name: Integration Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@main + with: + provider: microk8s + + # TODO: Remove once the actions-operator does this automatically + - name: Configure kubectl + run: | + sg microk8s -c "microk8s config > ~/.kube/config" + + - name: Test + run: | + sg microk8s -c "tox -vve integration -- --model testing" + + # On failure, capture debugging resources + - name: Get all + run: kubectl get all -A + if: failure() + + - name: Describe deployments + run: kubectl describe deployments -A + if: failure() + + - name: Describe replicasets + run: kubectl describe replicasets -A + if: failure() + + - name: Get juju status + run: juju status + if: failure() + + - name: Get application logs + run: kubectl logs -n testing --tail 1000 -ljuju-app=metacontroller-operator + if: failure() + + - name: Get application operator logs + run: kubectl logs -n testing --tail 1000 -ljuju-operator=metacontroller-operator + if: failure() diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 0000000..25e0251 --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,45 @@ +name: Publish + +on: + push: + branches: + # TODO: Set this to 'main' when ready to publish + - master + +jobs: + charm: + name: Charm + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Install dependencies + run: | + set -eux + sudo snap install charm --classic + sudo pip3 install charmcraft= + sudo snap install yq + + - name: Publish charm + env: + CHARMSTORE_CREDENTIAL: ${{ secrets.CHARMSTORE_CREDENTIAL }} + run: | + set -eux + echo $CHARMSTORE_CREDENTIAL > ~/.go-cookies + + CS_URL="cs:~kubeflow-charmers/metacontroller-operator" + + # TODO: Make this loop over all resources + IMAGE=$(yq eval '.resources.noop.upstream-source' metadata.yaml) + charmcraft build + docker pull $IMAGE + charm push ./*.charm $CS_URL --resource oci-image=$IMAGE + + REVISION=$(charm show $CS_URL --channel unpublished --format yaml | yq e .id-revision.Revision -) + OCI_VERSION=$(charm list-resources $CS_URL --format yaml --channel unpublished | yq e .0.revision -) + charm release \ + --channel edge \ + --resource oci-image-$OCI_VERSION \ + $CS_URL-$REVISION diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..88985fe --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +venv/ +build/ +.tox/ +*.charm + +.coverage +__pycache__/ +*.py[cod] diff --git a/.jujuignore b/.jujuignore new file mode 100644 index 0000000..6ccd559 --- /dev/null +++ b/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/charmcraft.yaml b/charmcraft.yaml new file mode 100644 index 0000000..39408c9 --- /dev/null +++ b/charmcraft.yaml @@ -0,0 +1,24 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" +parts: +# TODO/DEBUG: Including the below kubectl and charm parts USUALLY causes an infinite loop during `charmcraft pack`. +# Omitting for now... + kubectl: + plugin: dump + source: . + override-pull: | + curl -sSLO https://dl.k8s.io/release/v1.22.1/bin/linux/amd64/kubectl + chmod +x kubectl + build-packages: + - curl +# charm: +# prime: +# - ./files/manifests/*.yaml diff --git a/metadata.yaml b/metadata.yaml new file mode 100644 index 0000000..682fb7c --- /dev/null +++ b/metadata.yaml @@ -0,0 +1,22 @@ +name: metacontroller-operator +display-name: Metacontroller-operator +description: | + Metacontroller is an add-on for Kubernetes to make it easier to write and deploy custom controllers, + reducing the need for boilerplate code. + + https://github.com/metacontroller/metacontroller +summary: An add-on for Kubernetes to write and deploy custom controllers from simple scripts + +# Do I need this at all? This would be if I'm maintaining things via pebble? +containers: + noop: + resource: noop + +resources: + noop: + type: oci-image + description: '' + upstream-source: alpine + +# TODO: I don't really need this oci-image, everything gets downloaded by k8s. This is really a config arg. Will refactor +# TODO: Try with recent metacontroller. This is 3 years old... Kubeflow Pipelines has a merged change with update diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..23f7285 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +ops >= 1.2.0 +jinja2 < 4 +oci-image<1.1.0 +kubernetes < 19.0.0 diff --git a/src/charm.py b/src/charm.py new file mode 100755 index 0000000..aa8f190 --- /dev/null +++ b/src/charm.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +import glob +import subprocess +import time + +from jinja2 import Environment, FileSystemLoader +import logging +from pathlib import Path + +from kubernetes import client, config +import kubernetes.client.exceptions +from oci_image import OCIImageResource, OCIImageResourceError +from ops.charm import CharmBase +from ops.framework import StoredState +from ops.main import main +from ops.model import ActiveStatus, MaintenanceStatus + + +class MetacontrollerOperatorCharm(CharmBase): + """Charm the Metacontroller""" + + _stored = StoredState() + + def __init__(self, *args): + super().__init__(*args) + self.framework.observe(self.on.noop_pebble_ready, self._noop_pebble_ready) + self.framework.observe(self.on.install, self._install) + # self.framework.observe(self.on.config_changed, self._reconcile) + + self.logger = logging.getLogger(__name__) + # TODO: Fix file imports and move ./src/files back to ./files + self._manifests_file_root = None + self.manifest_file_root = "./src/files/manifests/" + self.image = OCIImageResource(self, "oci-image") + + def _noop_pebble_ready(self, _): + self.logger.info("noop_pebble_ready fired") + + def _install(self, event): + self.logger.info(f"type(event) = {event}") + self.logger.info("Installing by instantiating Kubernetes objects") + self.unit.status = MaintenanceStatus("Instantiating Kubernetes objects") + _, manifests_str = self._render_manifests() + + self.logger.info("Applying manifests") + subprocess.run(["./kubectl", "apply", "-f-"], input=manifests_str.encode("utf-8"), check=True) + + # TODO: Encapsulate this to reuse in update-status + self.logger.info("Waiting for installed Kubernetes objects to be operational") + attempts = 0 + running = False + max_attempts = 20 + while (attempts < max_attempts) and (running is False): + self.logger.info(f"validation attempt {attempts}") + self.logger.info(f"polling statefulset") + try: + running = validate_statefulset(name="metacontroller", namespace=self.model.name) + self.logger.info(f"found statefulset running = {running}") + except kubernetes.client.exceptions.ApiException: + self.logger.info(f"got ApiException when looking for statefulset (likely does not exist)") + + # TODO: Add checks for other CRDs/services/etc (or, something generic that runs off the manifest) + + # TODO: Sleep on wins? + attempts += 1 + time.sleep(10) + + self.logger.info("Done waiting for objects") + + self.logger.info("Manifests application complete") + self.unit.status = ActiveStatus() + + def _render_manifests(self) -> (list, str): + # Load and render all templates + self.logger.info(f"Rendering templates from {self.manifest_file_root}") + jinja_env = Environment(loader=FileSystemLoader(self.manifest_file_root)) + manifests = [] + for f in self._get_manifest_files(): + self.logger.info(f"Rendering template {f}") + f = Path(f).relative_to(self.manifest_file_root) + template = jinja_env.get_template(str(f)) + manifests.append( + template.render( + namespace=self.model.name, + image="metacontroller/metacontroller:v0.3.0" + ) + ) + + # Combine templates into a single string + manifests_str = "\n---\n".join(manifests) + + logging.info(f"rendered manifests_str = {manifests_str}") + + return manifests, manifests_str + + @property + def manifest_file_root(self): + return self._manifests_file_root + + @manifest_file_root.setter + def manifest_file_root(self, value): + self._manifests_file_root = Path(value) + + def _get_manifest_files(self) -> list: + """Returns a list of all manifest files""" + return glob.glob(str(self.manifest_file_root / "*.yaml")) + + +def init_app_client(app_client=None): + if app_client is None: + config.load_incluster_config() + app_client = client.AppsV1Api() + return app_client + + +def validate_statefulset(name, namespace, app_client=None): + """Returns true if a statefulset has its desired number of ready replicas, else False + + Raises a kubernetes.client.exceptions.ApiException from read_namespaced_stateful_set() + if the object cannot be found + """ + app_client = init_app_client(app_client) + ss = app_client.read_namespaced_stateful_set(name, namespace) + if ss.status.ready_replicas == ss.spec.replicas: + return True + else: + return False + + +if __name__ == "__main__": + main(MetacontrollerOperatorCharm) diff --git a/test-integration-requirements.txt b/test-integration-requirements.txt new file mode 100644 index 0000000..ed950d3 --- /dev/null +++ b/test-integration-requirements.txt @@ -0,0 +1 @@ +pytest-operator diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..926063f --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,4 @@ +-r requirements.txt +coverage +pytest +pytest-mock diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py new file mode 100644 index 0000000..dfdce72 --- /dev/null +++ b/tests/integration/test_charm.py @@ -0,0 +1,36 @@ +import logging +from pathlib import Path +import pytest +import yaml + +from pytest_operator.plugin import OpsTest + +logger = logging.getLogger(__name__) + +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) +APP_NAME = "metacontroller-operator" + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test: OpsTest): + logger.info("Building charm") + built_charm_path = await ops_test.build_charm("./") + logger.info(f"Built charm {built_charm_path}") + + resources = {} + for resource_name, resource_data in METADATA["resources"].items(): + image_path = resource_data["upstream-source"] + resources[resource_name] = image_path + + logger.info(f"Deploying charm {APP_NAME} using resources '{resources}'") + + await ops_test.model.deploy( + entity_url=built_charm_path, + application_name=APP_NAME, + resources=resources + ) + + # TODO: There are more complete ways to do this - see other charms. This passes at incorrect times. + await ops_test.model.wait_for_idle(timeout=60 * 60) + + # TODO: confirm it actually deployed correctly diff --git a/tests/unit/render_manifests/input_full/metacontroller-crds-v1.yaml b/tests/unit/render_manifests/input_full/metacontroller-crds-v1.yaml new file mode 100644 index 0000000..71892f4 --- /dev/null +++ b/tests/unit/render_manifests/input_full/metacontroller-crds-v1.yaml @@ -0,0 +1,534 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "api-approved.kubernetes.io": "unapproved, request not yet submitted" + name: compositecontrollers.metacontroller.k8s.io +spec: + group: metacontroller.k8s.io + names: + kind: CompositeController + listKind: CompositeControllerList + plural: compositecontrollers + shortNames: + - cc + - cctl + singular: compositecontroller + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + childResources: + items: + properties: + apiVersion: + type: string + resource: + type: string + updateStrategy: + properties: + method: + type: string + statusChecks: + properties: + conditions: + items: + properties: + reason: + type: string + status: + type: string + type: + type: string + required: + - type + type: object + type: array + type: object + type: object + required: + - apiVersion + - resource + type: object + type: array + generateSelector: + type: boolean + hooks: + properties: + customize: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + finalize: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + postUpdateChild: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + preUpdateChild: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + sync: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + type: object + parentResource: + properties: + apiVersion: + type: string + resource: + type: string + revisionHistory: + properties: + fieldPaths: + items: + type: string + type: array + type: object + required: + - apiVersion + - resource + type: object + resyncPeriodSeconds: + format: int32 + type: integer + required: + - parentResource + type: object + status: + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "api-approved.kubernetes.io": "unapproved, request not yet submitted" + name: controllerrevisions.metacontroller.k8s.io +spec: + group: metacontroller.k8s.io + names: + kind: ControllerRevision + listKind: ControllerRevisionList + plural: controllerrevisions + singular: controllerrevision + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + children: + items: + properties: + apiGroup: + type: string + kind: + type: string + names: + items: + type: string + type: array + required: + - apiGroup + - kind + - names + type: object + type: array + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + parentPatch: + type: object + required: + - metadata + - parentPatch + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "api-approved.kubernetes.io": "unapproved, request not yet submitted" + name: decoratorcontrollers.metacontroller.k8s.io +spec: + group: metacontroller.k8s.io + names: + kind: DecoratorController + listKind: DecoratorControllerList + plural: decoratorcontrollers + shortNames: + - dec + - decorators + singular: decoratorcontroller + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + attachments: + items: + properties: + apiVersion: + type: string + resource: + type: string + updateStrategy: + properties: + method: + type: string + type: object + required: + - apiVersion + - resource + type: object + type: array + hooks: + properties: + customize: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + finalize: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + sync: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + type: object + resources: + items: + properties: + annotationSelector: + properties: + matchAnnotations: + additionalProperties: + type: string + type: object + matchExpressions: + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + apiVersion: + type: string + labelSelector: + description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + resource: + type: string + required: + - apiVersion + - resource + type: object + type: array + resyncPeriodSeconds: + format: int32 + type: integer + required: + - resources + type: object + status: + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/tests/unit/render_manifests/input_full/metacontroller-rbac.yaml b/tests/unit/render_manifests/input_full/metacontroller-rbac.yaml new file mode 100644 index 0000000..f553ce7 --- /dev/null +++ b/tests/unit/render_manifests/input_full/metacontroller-rbac.yaml @@ -0,0 +1,72 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metacontroller + namespace: {{ namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metacontroller +rules: +- apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metacontroller +subjects: +- kind: ServiceAccount + name: metacontroller + namespace: {{ namespace }} +roleRef: + kind: ClusterRole + name: metacontroller + apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: aggregate-metacontroller-view + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: +- apiGroups: + - metacontroller.k8s.io + resources: + - compositecontrollers + - controllerrevisions + - decoratorcontrollers + verbs: + - get + - list + - watch +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: aggregate-metacontroller-edit + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: +- apiGroups: + - metacontroller.k8s.io + resources: + - controllerrevisions + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch diff --git a/tests/unit/render_manifests/input_full/metacontroller.yaml b/tests/unit/render_manifests/input_full/metacontroller.yaml new file mode 100644 index 0000000..eca0ea6 --- /dev/null +++ b/tests/unit/render_manifests/input_full/metacontroller.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app.kubernetes.io/name: metacontroller + name: metacontroller + namespace: {{ namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: metacontroller + serviceName: "" + template: + metadata: + labels: + app.kubernetes.io/name: metacontroller + spec: + serviceAccountName: metacontroller + containers: + - name: metacontroller + image: {{ image }} + command: ["/usr/bin/metacontroller"] + args: + - --zap-log-level=4 + - --discovery-interval=20s + volumeClaimTemplates: [] diff --git a/tests/unit/render_manifests/input_simple/manifests.yaml b/tests/unit/render_manifests/input_simple/manifests.yaml new file mode 100644 index 0000000..5171190 --- /dev/null +++ b/tests/unit/render_manifests/input_simple/manifests.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: metacontroller + namespace: {{ namespace }} +spec: + template: + spec: + containers: + - name: metacontroller + image: {{ image }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metacontroller + namespace: {{ namespace }} diff --git a/tests/unit/render_manifests/result_full.yaml b/tests/unit/render_manifests/result_full.yaml new file mode 100644 index 0000000..bcbe395 --- /dev/null +++ b/tests/unit/render_manifests/result_full.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: metacontroller + namespace: test-namespace +spec: + template: + spec: + containers: + - name: metacontroller + image: test-image +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metacontroller + namespace: test-namespace diff --git a/tests/unit/render_manifests/result_simple.yaml b/tests/unit/render_manifests/result_simple.yaml new file mode 100644 index 0000000..bcbe395 --- /dev/null +++ b/tests/unit/render_manifests/result_simple.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: metacontroller + namespace: test-namespace +spec: + template: + spec: + containers: + - name: metacontroller + image: test-image +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metacontroller + namespace: test-namespace diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py new file mode 100644 index 0000000..e7cb0ba --- /dev/null +++ b/tests/unit/test_charm.py @@ -0,0 +1,97 @@ +import logging +from pathlib import Path + +import pytest +import yaml + +from ops.model import ActiveStatus, BlockedStatus, WaitingStatus + +from ops.testing import Harness +from charm import MetacontrollerOperatorCharm + +logger = logging.getLogger(__name__) + + +@pytest.fixture +def harness(): + return Harness(MetacontrollerOperatorCharm) + + +def test_not_leader(harness): + harness.begin() + assert harness.charm.model.unit.status == WaitingStatus('Waiting for leadership') + + +def test_render_manifests(harness): + manifests_root = Path("./render_manifests/") + manifests_input_path = manifests_root / "input/" + manifests_rendered = manifests_root / "result.yaml" + + harness.add_oci_resource( + "oci-image", + { + "registrypath": "test-image", + "username": "", + "password": "", + }, + ) + harness.set_model_name("test-namespace") + + harness.begin() + harness.charm.manifest_file_root = manifests_input_path + logger.info("harness.charm: ") + logger.info(type(harness.charm)) + + assert harness.charm.manifest_file_root == manifests_input_path + + manifests, manifests_str = harness.charm._render_manifests() + logger.info(f"manifests_str: {manifests_str}") + expected_manifests_str = manifests_rendered.read_text() + + assert manifests_str.strip() == expected_manifests_str.strip() + + +@pytest.mark.parametrize( + "case", + [ + "simple", + "full", + ] +) +def test_install_minimal(harness, mocker, case): + subprocess_run = mocker.patch('subprocess.run') + + manifests_root = Path("./render_manifests/") + manifests_input_path = manifests_root / f"input_{case}/" + manifests_rendered = manifests_root / f"result_{case}.yaml" + + harness.add_oci_resource( + "oci-image", + { + "registrypath": "test-image", + "username": "", + "password": "", + }, + ) + harness.set_model_name("test-namespace") + + harness.begin() + harness.charm.manifest_file_root = manifests_input_path + + harness.charm._install("ignored_event") + logger.info(f"subprocess.run.call_args_list (after) = {subprocess_run.call_args_list}") + + expected_args = (['./kubectl', 'apply', '-f-'], ) + expected_kwarg_input = list(yaml.safe_load_all(manifests_rendered.read_text())) + + call_args = subprocess_run.call_args_list + actual_args = call_args[0].args + actual_kwarg_input = list(yaml.safe_load_all(call_args[0].kwargs['input'])) + + assert len(call_args) == 1 + + assert actual_args == expected_args + assert actual_kwarg_input == expected_kwarg_input + + +# TODO: Create test that "deploys" manifests then simulates the watchers diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..7c8e391 --- /dev/null +++ b/tox.ini @@ -0,0 +1,31 @@ +[flake8] +max-line-length = 100 + +[tox] +skipsdist = True + +[testenv] +setenv = + PYTHONPATH={toxinidir}/src:{toxinidir}/lib +deps = + -rrequirements.txt + -r test-requirements.txt + +[testenv:lint] +deps = + flake8 + black==20.8b1 +commands = + flake8 {toxinidir}/src {toxinidir}/tests + black --check {toxinidir}/src {toxinidir}/tests + +[testenv:unit] +commands = + pytest -vv --tb native --show-capture=no --log-cli-level=INFO {toxinidir}/tests/unit + +[testenv:integration] +deps = + {[testenv]deps} + -r test-integration-requirements.txt +commands = + pytest -vv --tb native --show-capture=no --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration From 17010e70284366fb44f39a5524995bbcb8a55435 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Wed, 22 Sep 2021 11:36:20 -0400 Subject: [PATCH 02/27] feat: Add remove hook Hook fails because kubectl cannot access environment. Maybe juju sets up the remove hook env differently than install? --- src/charm.py | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/src/charm.py b/src/charm.py index aa8f190..1d96429 100755 --- a/src/charm.py +++ b/src/charm.py @@ -25,6 +25,7 @@ def __init__(self, *args): super().__init__(*args) self.framework.observe(self.on.noop_pebble_ready, self._noop_pebble_ready) self.framework.observe(self.on.install, self._install) + self.framework.observe(self.on.remove, self._remove) # self.framework.observe(self.on.config_changed, self._reconcile) self.logger = logging.getLogger(__name__) @@ -61,15 +62,34 @@ def _install(self, event): # TODO: Add checks for other CRDs/services/etc (or, something generic that runs off the manifest) - # TODO: Sleep on wins? - attempts += 1 - time.sleep(10) + if not running: + attempts += 1 + time.sleep(10) self.logger.info("Done waiting for objects") self.logger.info("Manifests application complete") self.unit.status = ActiveStatus() + def _remove(self, _): + """Remove charm""" + # TODO: How should we test this in our integration tests? + # Should I set a status or does Juju set one? + self.logger.info("Removing kubernetes objects") + + _, manifests_str = self._render_manifests() + completed_process = subprocess.run( + ["./kubectl", "delete", "-f-"], + input=manifests_str.encode("utf-8"), + stderr=subprocess.STDOUT, + stdout=subprocess.PIPE, + ) + if completed_process.returncode == 0: + self.logger.info("Kubernetes objects removed using kubectl") + else: + self.logger.error(f"Unable to remove kubernetes objects - there may be orphaned resources." + f" kubectl exited with '{completed_process.stdout}'") + def _render_manifests(self) -> (list, str): # Load and render all templates self.logger.info(f"Rendering templates from {self.manifest_file_root}") From db1856ab670dd04a05c3379809b269dcecf88c17 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Wed, 22 Sep 2021 12:02:17 -0400 Subject: [PATCH 03/27] feat: encapsulate resource-check logic for reuse --- src/charm.py | 59 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/src/charm.py b/src/charm.py index 1d96429..cabe62e 100755 --- a/src/charm.py +++ b/src/charm.py @@ -48,28 +48,23 @@ def _install(self, event): # TODO: Encapsulate this to reuse in update-status self.logger.info("Waiting for installed Kubernetes objects to be operational") - attempts = 0 - running = False max_attempts = 20 - while (attempts < max_attempts) and (running is False): - self.logger.info(f"validation attempt {attempts}") - self.logger.info(f"polling statefulset") - try: - running = validate_statefulset(name="metacontroller", namespace=self.model.name) - self.logger.info(f"found statefulset running = {running}") - except kubernetes.client.exceptions.ApiException: - self.logger.info(f"got ApiException when looking for statefulset (likely does not exist)") - - # TODO: Add checks for other CRDs/services/etc (or, something generic that runs off the manifest) - - if not running: - attempts += 1 - time.sleep(10) - - self.logger.info("Done waiting for objects") - - self.logger.info("Manifests application complete") - self.unit.status = ActiveStatus() + for attempt in range(max_attempts): + self.logger.info(f"validation attempt {attempt}/{max_attempts}") + running = self._check_deployed_resources() + + if running is True: + self.logger.info("Resources detected as running") + self.logger.info("Install successful") + self.unit.status = ActiveStatus() + return + else: + sleeptime = 10 + self.logger.info(f"Sleeping {sleeptime}s") + time.sleep(sleeptime) + else: + self.unit.status = MaintenanceStatus("Some kubernetes resources missing/not ready") + return def _remove(self, _): """Remove charm""" @@ -113,6 +108,28 @@ def _render_manifests(self) -> (list, str): return manifests, manifests_str + def _check_deployed_resources(self, manifests=None): + """Check the status of all deployed resources, returning True if ok""" + # TODO: Add checks for other CRDs/services/etc (or, something generic that runs off the manifest) + # TODO: Check all resources automatically based on the manifest + if manifests is not None: + raise NotImplementedError("...") + # if manifests is None: + # manifests = self._render_manifests() + + resource_type = "statefulset" + name = "metacontroller" + namespace = self.model.name + self.logger.info(f"Checking {resource_type} {name} in {namespace}") + try: + running = validate_statefulset(name=name, namespace=namespace) + self.logger.info(f"found statefulset running = {running}") + except kubernetes.client.exceptions.ApiException: + self.logger.info(f"got ApiException when looking for statefulset (likely does not exist)") + running = False + + return running + @property def manifest_file_root(self): return self._manifests_file_root From 5e7d673c84a02f68e21e8fc5dde85beb2725fc17 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Wed, 22 Sep 2021 13:35:16 -0400 Subject: [PATCH 04/27] feat: add reconcile loop on update-status hook Current version will restore the state if it notices the statefulset is missing/incomplete --- src/charm.py | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/src/charm.py b/src/charm.py index cabe62e..a8367da 100755 --- a/src/charm.py +++ b/src/charm.py @@ -26,6 +26,7 @@ def __init__(self, *args): self.framework.observe(self.on.noop_pebble_ready, self._noop_pebble_ready) self.framework.observe(self.on.install, self._install) self.framework.observe(self.on.remove, self._remove) + self.framework.observe(self.on.update_status, self._update_status) # self.framework.observe(self.on.config_changed, self._reconcile) self.logger = logging.getLogger(__name__) @@ -38,15 +39,21 @@ def _noop_pebble_ready(self, _): self.logger.info("noop_pebble_ready fired") def _install(self, event): - self.logger.info(f"type(event) = {event}") self.logger.info("Installing by instantiating Kubernetes objects") self.unit.status = MaintenanceStatus("Instantiating Kubernetes objects") _, manifests_str = self._render_manifests() self.logger.info("Applying manifests") - subprocess.run(["./kubectl", "apply", "-f-"], input=manifests_str.encode("utf-8"), check=True) - # TODO: Encapsulate this to reuse in update-status + completed_process = subprocess.run( + ["./kubectl", "apply", "-f-"], + input=manifests_str.encode("utf-8"), + stderr=subprocess.STDOUT, + stdout=subprocess.PIPE, + ) + self.logger.info(f"kubectl returned with code '{completed_process.returncode}'" + f" and output {completed_process.stdout}") + self.logger.info("Waiting for installed Kubernetes objects to be operational") max_attempts = 20 for attempt in range(max_attempts): @@ -66,6 +73,20 @@ def _install(self, event): self.unit.status = MaintenanceStatus("Some kubernetes resources missing/not ready") return + def _update_status(self, event): + self.logger.info(f"Comparing current state to desired state") + + running = self._check_deployed_resources() + if running is True: + self.logger.info("Resources are ok. Unit in ActiveStatus") + self.unit.status = ActiveStatus() + return + else: + self.logger.info("Resources are missing. Triggering install to reconcile resources") + self.unit.status = MaintenanceStatus("Missing kubernetes resources detected - reinstalling") + self._install(event) + return + def _remove(self, _): """Remove charm""" # TODO: How should we test this in our integration tests? From 74fa19ac8958f78215fd1875a957afcb39849c64 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Wed, 22 Sep 2021 14:14:39 -0400 Subject: [PATCH 05/27] fix: tests --- src/charm.py | 47 +- tests/integration/test_charm.py | 7 +- .../input_full/metacontroller-crds-v1.yaml | 534 ------------------ .../input_full/metacontroller-rbac.yaml | 72 --- .../input_full/metacontroller.yaml | 28 - tests/unit/render_manifests/result_full.yaml | 17 - .../unit/render_manifests/result_simple.yaml | 2 +- tests/unit/test_charm.py | 53 +- 8 files changed, 59 insertions(+), 701 deletions(-) delete mode 100644 tests/unit/render_manifests/input_full/metacontroller-crds-v1.yaml delete mode 100644 tests/unit/render_manifests/input_full/metacontroller-rbac.yaml delete mode 100644 tests/unit/render_manifests/input_full/metacontroller.yaml delete mode 100644 tests/unit/render_manifests/result_full.yaml diff --git a/src/charm.py b/src/charm.py index a8367da..1a45539 100755 --- a/src/charm.py +++ b/src/charm.py @@ -9,11 +9,11 @@ from kubernetes import client, config import kubernetes.client.exceptions -from oci_image import OCIImageResource, OCIImageResourceError +from oci_image import OCIImageResource from ops.charm import CharmBase from ops.framework import StoredState from ops.main import main -from ops.model import ActiveStatus, MaintenanceStatus +from ops.model import ActiveStatus, WaitingStatus, MaintenanceStatus class MetacontrollerOperatorCharm(CharmBase): @@ -23,13 +23,20 @@ class MetacontrollerOperatorCharm(CharmBase): def __init__(self, *args): super().__init__(*args) + + if not self.unit.is_leader(): + self.model.unit.status = WaitingStatus("Waiting for leadership") + return + self.framework.observe(self.on.noop_pebble_ready, self._noop_pebble_ready) self.framework.observe(self.on.install, self._install) self.framework.observe(self.on.remove, self._remove) self.framework.observe(self.on.update_status, self._update_status) # self.framework.observe(self.on.config_changed, self._reconcile) + self.x = 1 self.logger = logging.getLogger(__name__) + self.y = 1 # TODO: Fix file imports and move ./src/files back to ./files self._manifests_file_root = None self.manifest_file_root = "./src/files/manifests/" @@ -51,8 +58,10 @@ def _install(self, event): stderr=subprocess.STDOUT, stdout=subprocess.PIPE, ) - self.logger.info(f"kubectl returned with code '{completed_process.returncode}'" - f" and output {completed_process.stdout}") + self.logger.info( + f"kubectl returned with code '{completed_process.returncode}'" + f" and output {completed_process.stdout}" + ) self.logger.info("Waiting for installed Kubernetes objects to be operational") max_attempts = 20 @@ -70,11 +79,13 @@ def _install(self, event): self.logger.info(f"Sleeping {sleeptime}s") time.sleep(sleeptime) else: - self.unit.status = MaintenanceStatus("Some kubernetes resources missing/not ready") + self.unit.status = MaintenanceStatus( + "Some kubernetes resources missing/not ready" + ) return def _update_status(self, event): - self.logger.info(f"Comparing current state to desired state") + self.logger.info("Comparing current state to desired state") running = self._check_deployed_resources() if running is True: @@ -82,8 +93,12 @@ def _update_status(self, event): self.unit.status = ActiveStatus() return else: - self.logger.info("Resources are missing. Triggering install to reconcile resources") - self.unit.status = MaintenanceStatus("Missing kubernetes resources detected - reinstalling") + self.logger.info( + "Resources are missing. Triggering install to reconcile resources" + ) + self.unit.status = MaintenanceStatus( + "Missing kubernetes resources detected - reinstalling" + ) self._install(event) return @@ -103,8 +118,10 @@ def _remove(self, _): if completed_process.returncode == 0: self.logger.info("Kubernetes objects removed using kubectl") else: - self.logger.error(f"Unable to remove kubernetes objects - there may be orphaned resources." - f" kubectl exited with '{completed_process.stdout}'") + self.logger.error( + f"Unable to remove kubernetes objects - there may be orphaned resources." + f" kubectl exited with '{completed_process.stdout}'" + ) def _render_manifests(self) -> (list, str): # Load and render all templates @@ -118,7 +135,7 @@ def _render_manifests(self) -> (list, str): manifests.append( template.render( namespace=self.model.name, - image="metacontroller/metacontroller:v0.3.0" + image="metacontroller/metacontroller:v0.3.0", ) ) @@ -131,8 +148,8 @@ def _render_manifests(self) -> (list, str): def _check_deployed_resources(self, manifests=None): """Check the status of all deployed resources, returning True if ok""" - # TODO: Add checks for other CRDs/services/etc (or, something generic that runs off the manifest) - # TODO: Check all resources automatically based on the manifest + # TODO: Add checks for other CRDs/services/etc + # TODO: ideally check all resources automatically based on the manifest if manifests is not None: raise NotImplementedError("...") # if manifests is None: @@ -146,7 +163,9 @@ def _check_deployed_resources(self, manifests=None): running = validate_statefulset(name=name, namespace=namespace) self.logger.info(f"found statefulset running = {running}") except kubernetes.client.exceptions.ApiException: - self.logger.info(f"got ApiException when looking for statefulset (likely does not exist)") + self.logger.info( + "got ApiException when looking for statefulset (likely does not exist)" + ) running = False return running diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index dfdce72..d814613 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -25,12 +25,11 @@ async def test_build_and_deploy(ops_test: OpsTest): logger.info(f"Deploying charm {APP_NAME} using resources '{resources}'") await ops_test.model.deploy( - entity_url=built_charm_path, - application_name=APP_NAME, - resources=resources + entity_url=built_charm_path, application_name=APP_NAME, resources=resources ) - # TODO: There are more complete ways to do this - see other charms. This passes at incorrect times. + # TODO: Replace this with a more accurate way of testing for success. + # This passes sometimes when model is not ok await ops_test.model.wait_for_idle(timeout=60 * 60) # TODO: confirm it actually deployed correctly diff --git a/tests/unit/render_manifests/input_full/metacontroller-crds-v1.yaml b/tests/unit/render_manifests/input_full/metacontroller-crds-v1.yaml deleted file mode 100644 index 71892f4..0000000 --- a/tests/unit/render_manifests/input_full/metacontroller-crds-v1.yaml +++ /dev/null @@ -1,534 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - "api-approved.kubernetes.io": "unapproved, request not yet submitted" - name: compositecontrollers.metacontroller.k8s.io -spec: - group: metacontroller.k8s.io - names: - kind: CompositeController - listKind: CompositeControllerList - plural: compositecontrollers - shortNames: - - cc - - cctl - singular: compositecontroller - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - childResources: - items: - properties: - apiVersion: - type: string - resource: - type: string - updateStrategy: - properties: - method: - type: string - statusChecks: - properties: - conditions: - items: - properties: - reason: - type: string - status: - type: string - type: - type: string - required: - - type - type: object - type: array - type: object - type: object - required: - - apiVersion - - resource - type: object - type: array - generateSelector: - type: boolean - hooks: - properties: - customize: - properties: - webhook: - properties: - path: - type: string - service: - properties: - name: - type: string - namespace: - type: string - port: - format: int32 - type: integer - protocol: - type: string - required: - - name - - namespace - type: object - timeout: - type: string - url: - type: string - type: object - type: object - finalize: - properties: - webhook: - properties: - path: - type: string - service: - properties: - name: - type: string - namespace: - type: string - port: - format: int32 - type: integer - protocol: - type: string - required: - - name - - namespace - type: object - timeout: - type: string - url: - type: string - type: object - type: object - postUpdateChild: - properties: - webhook: - properties: - path: - type: string - service: - properties: - name: - type: string - namespace: - type: string - port: - format: int32 - type: integer - protocol: - type: string - required: - - name - - namespace - type: object - timeout: - type: string - url: - type: string - type: object - type: object - preUpdateChild: - properties: - webhook: - properties: - path: - type: string - service: - properties: - name: - type: string - namespace: - type: string - port: - format: int32 - type: integer - protocol: - type: string - required: - - name - - namespace - type: object - timeout: - type: string - url: - type: string - type: object - type: object - sync: - properties: - webhook: - properties: - path: - type: string - service: - properties: - name: - type: string - namespace: - type: string - port: - format: int32 - type: integer - protocol: - type: string - required: - - name - - namespace - type: object - timeout: - type: string - url: - type: string - type: object - type: object - type: object - parentResource: - properties: - apiVersion: - type: string - resource: - type: string - revisionHistory: - properties: - fieldPaths: - items: - type: string - type: array - type: object - required: - - apiVersion - - resource - type: object - resyncPeriodSeconds: - format: int32 - type: integer - required: - - parentResource - type: object - status: - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - "api-approved.kubernetes.io": "unapproved, request not yet submitted" - name: controllerrevisions.metacontroller.k8s.io -spec: - group: metacontroller.k8s.io - names: - kind: ControllerRevision - listKind: ControllerRevisionList - plural: controllerrevisions - singular: controllerrevision - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - children: - items: - properties: - apiGroup: - type: string - kind: - type: string - names: - items: - type: string - type: array - required: - - apiGroup - - kind - - names - type: object - type: array - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - parentPatch: - type: object - required: - - metadata - - parentPatch - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - "api-approved.kubernetes.io": "unapproved, request not yet submitted" - name: decoratorcontrollers.metacontroller.k8s.io -spec: - group: metacontroller.k8s.io - names: - kind: DecoratorController - listKind: DecoratorControllerList - plural: decoratorcontrollers - shortNames: - - dec - - decorators - singular: decoratorcontroller - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - attachments: - items: - properties: - apiVersion: - type: string - resource: - type: string - updateStrategy: - properties: - method: - type: string - type: object - required: - - apiVersion - - resource - type: object - type: array - hooks: - properties: - customize: - properties: - webhook: - properties: - path: - type: string - service: - properties: - name: - type: string - namespace: - type: string - port: - format: int32 - type: integer - protocol: - type: string - required: - - name - - namespace - type: object - timeout: - type: string - url: - type: string - type: object - type: object - finalize: - properties: - webhook: - properties: - path: - type: string - service: - properties: - name: - type: string - namespace: - type: string - port: - format: int32 - type: integer - protocol: - type: string - required: - - name - - namespace - type: object - timeout: - type: string - url: - type: string - type: object - type: object - sync: - properties: - webhook: - properties: - path: - type: string - service: - properties: - name: - type: string - namespace: - type: string - port: - format: int32 - type: integer - protocol: - type: string - required: - - name - - namespace - type: object - timeout: - type: string - url: - type: string - type: object - type: object - type: object - resources: - items: - properties: - annotationSelector: - properties: - matchAnnotations: - additionalProperties: - type: string - type: object - matchExpressions: - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - apiVersion: - type: string - labelSelector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - resource: - type: string - required: - - apiVersion - - resource - type: object - type: array - resyncPeriodSeconds: - format: int32 - type: integer - required: - - resources - type: object - status: - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/tests/unit/render_manifests/input_full/metacontroller-rbac.yaml b/tests/unit/render_manifests/input_full/metacontroller-rbac.yaml deleted file mode 100644 index f553ce7..0000000 --- a/tests/unit/render_manifests/input_full/metacontroller-rbac.yaml +++ /dev/null @@ -1,72 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metacontroller - namespace: {{ namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metacontroller -rules: -- apiGroups: - - "*" - resources: - - "*" - verbs: - - "*" ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metacontroller -subjects: -- kind: ServiceAccount - name: metacontroller - namespace: {{ namespace }} -roleRef: - kind: ClusterRole - name: metacontroller - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: aggregate-metacontroller-view - labels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-view: "true" -rules: -- apiGroups: - - metacontroller.k8s.io - resources: - - compositecontrollers - - controllerrevisions - - decoratorcontrollers - verbs: - - get - - list - - watch ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: aggregate-metacontroller-edit - labels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" -rules: -- apiGroups: - - metacontroller.k8s.io - resources: - - controllerrevisions - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch diff --git a/tests/unit/render_manifests/input_full/metacontroller.yaml b/tests/unit/render_manifests/input_full/metacontroller.yaml deleted file mode 100644 index eca0ea6..0000000 --- a/tests/unit/render_manifests/input_full/metacontroller.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - app.kubernetes.io/name: metacontroller - name: metacontroller - namespace: {{ namespace }} -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: metacontroller - serviceName: "" - template: - metadata: - labels: - app.kubernetes.io/name: metacontroller - spec: - serviceAccountName: metacontroller - containers: - - name: metacontroller - image: {{ image }} - command: ["/usr/bin/metacontroller"] - args: - - --zap-log-level=4 - - --discovery-interval=20s - volumeClaimTemplates: [] diff --git a/tests/unit/render_manifests/result_full.yaml b/tests/unit/render_manifests/result_full.yaml deleted file mode 100644 index bcbe395..0000000 --- a/tests/unit/render_manifests/result_full.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: metacontroller - namespace: test-namespace -spec: - template: - spec: - containers: - - name: metacontroller - image: test-image ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metacontroller - namespace: test-namespace diff --git a/tests/unit/render_manifests/result_simple.yaml b/tests/unit/render_manifests/result_simple.yaml index bcbe395..3a7eef1 100644 --- a/tests/unit/render_manifests/result_simple.yaml +++ b/tests/unit/render_manifests/result_simple.yaml @@ -8,7 +8,7 @@ spec: spec: containers: - name: metacontroller - image: test-image + image: metacontroller/metacontroller:v0.3.0 --- apiVersion: v1 kind: ServiceAccount diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index e7cb0ba..3b56383 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -4,7 +4,7 @@ import pytest import yaml -from ops.model import ActiveStatus, BlockedStatus, WaitingStatus +from ops.model import WaitingStatus from ops.testing import Harness from charm import MetacontrollerOperatorCharm @@ -19,33 +19,25 @@ def harness(): def test_not_leader(harness): harness.begin() - assert harness.charm.model.unit.status == WaitingStatus('Waiting for leadership') + assert harness.charm.model.unit.status == WaitingStatus("Waiting for leadership") def test_render_manifests(harness): + harness.set_leader(True) manifests_root = Path("./render_manifests/") - manifests_input_path = manifests_root / "input/" - manifests_rendered = manifests_root / "result.yaml" - - harness.add_oci_resource( - "oci-image", - { - "registrypath": "test-image", - "username": "", - "password": "", - }, - ) + manifests_input_path = manifests_root / "input_simple/" + # NOTE: imagename substitution is currently hard coded in charm.py, so updating image requires + # us to update result.yaml as well + manifests_rendered = manifests_root / "result_simple.yaml" + harness.set_model_name("test-namespace") harness.begin() harness.charm.manifest_file_root = manifests_input_path - logger.info("harness.charm: ") - logger.info(type(harness.charm)) assert harness.charm.manifest_file_root == manifests_input_path manifests, manifests_str = harness.charm._render_manifests() - logger.info(f"manifests_str: {manifests_str}") expected_manifests_str = manifests_rendered.read_text() assert manifests_str.strip() == expected_manifests_str.strip() @@ -55,38 +47,37 @@ def test_render_manifests(harness): "case", [ "simple", - "full", - ] + ], ) def test_install_minimal(harness, mocker, case): - subprocess_run = mocker.patch('subprocess.run') + harness.set_leader(True) + subprocess_run = mocker.patch("subprocess.run") + + # Mock check_deployed_resources so we don't try to load/use k8s api + check_deployed_resources_run = mocker.patch( + "charm.MetacontrollerOperatorCharm._check_deployed_resources", + return_value=True, + ) manifests_root = Path("./render_manifests/") manifests_input_path = manifests_root / f"input_{case}/" manifests_rendered = manifests_root / f"result_{case}.yaml" - harness.add_oci_resource( - "oci-image", - { - "registrypath": "test-image", - "username": "", - "password": "", - }, - ) harness.set_model_name("test-namespace") - harness.begin() harness.charm.manifest_file_root = manifests_input_path harness.charm._install("ignored_event") - logger.info(f"subprocess.run.call_args_list (after) = {subprocess_run.call_args_list}") + logger.info( + f"subprocess.run.call_args_list (after) = {subprocess_run.call_args_list}" + ) - expected_args = (['./kubectl', 'apply', '-f-'], ) + expected_args = (["./kubectl", "apply", "-f-"],) expected_kwarg_input = list(yaml.safe_load_all(manifests_rendered.read_text())) call_args = subprocess_run.call_args_list actual_args = call_args[0].args - actual_kwarg_input = list(yaml.safe_load_all(call_args[0].kwargs['input'])) + actual_kwarg_input = list(yaml.safe_load_all(call_args[0].kwargs["input"])) assert len(call_args) == 1 From 795323a13d5e1432f6c6852123786990492e7577 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Wed, 22 Sep 2021 15:54:14 -0400 Subject: [PATCH 06/27] fix: tests --- tests/unit/test_charm.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 3b56383..fc72501 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -23,15 +23,14 @@ def test_not_leader(harness): def test_render_manifests(harness): - harness.set_leader(True) - manifests_root = Path("./render_manifests/") + manifests_root = Path("./tests/unit/render_manifests/") manifests_input_path = manifests_root / "input_simple/" # NOTE: imagename substitution is currently hard coded in charm.py, so updating image requires # us to update result.yaml as well manifests_rendered = manifests_root / "result_simple.yaml" + harness.set_leader(True) harness.set_model_name("test-namespace") - harness.begin() harness.charm.manifest_file_root = manifests_input_path @@ -50,20 +49,20 @@ def test_render_manifests(harness): ], ) def test_install_minimal(harness, mocker, case): - harness.set_leader(True) subprocess_run = mocker.patch("subprocess.run") # Mock check_deployed_resources so we don't try to load/use k8s api - check_deployed_resources_run = mocker.patch( + mocker.patch( "charm.MetacontrollerOperatorCharm._check_deployed_resources", return_value=True, ) - manifests_root = Path("./render_manifests/") + manifests_root = Path("./tests/unit/render_manifests/") manifests_input_path = manifests_root / f"input_{case}/" manifests_rendered = manifests_root / f"result_{case}.yaml" harness.set_model_name("test-namespace") + harness.set_leader(True) harness.begin() harness.charm.manifest_file_root = manifests_input_path From 4887fb48cda3ab30b9fc56c271f71c98ffe87d61 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Wed, 27 Oct 2021 15:56:07 -0400 Subject: [PATCH 07/27] fix: add setuptools and pip to charmcraft parts for building --- charmcraft.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/charmcraft.yaml b/charmcraft.yaml index 39408c9..f14d412 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -19,6 +19,7 @@ parts: chmod +x kubectl build-packages: - curl -# charm: + charm: + charm-python-packages: [setuptools, pip] # Fixes jinja install during pack # prime: # - ./files/manifests/*.yaml From 75c974ec7cd0af0c87a113e5c104f229db887c96 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Thu, 28 Oct 2021 12:01:18 -0400 Subject: [PATCH 08/27] fix: add missing yaml resources --- .../manifests/metacontroller-crds-v1.yaml | 534 ++++++++++++++++++ src/files/manifests/metacontroller-rbac.yaml | 72 +++ src/files/manifests/metacontroller.yaml | 29 + 3 files changed, 635 insertions(+) create mode 100644 src/files/manifests/metacontroller-crds-v1.yaml create mode 100644 src/files/manifests/metacontroller-rbac.yaml create mode 100644 src/files/manifests/metacontroller.yaml diff --git a/src/files/manifests/metacontroller-crds-v1.yaml b/src/files/manifests/metacontroller-crds-v1.yaml new file mode 100644 index 0000000..71892f4 --- /dev/null +++ b/src/files/manifests/metacontroller-crds-v1.yaml @@ -0,0 +1,534 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "api-approved.kubernetes.io": "unapproved, request not yet submitted" + name: compositecontrollers.metacontroller.k8s.io +spec: + group: metacontroller.k8s.io + names: + kind: CompositeController + listKind: CompositeControllerList + plural: compositecontrollers + shortNames: + - cc + - cctl + singular: compositecontroller + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + childResources: + items: + properties: + apiVersion: + type: string + resource: + type: string + updateStrategy: + properties: + method: + type: string + statusChecks: + properties: + conditions: + items: + properties: + reason: + type: string + status: + type: string + type: + type: string + required: + - type + type: object + type: array + type: object + type: object + required: + - apiVersion + - resource + type: object + type: array + generateSelector: + type: boolean + hooks: + properties: + customize: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + finalize: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + postUpdateChild: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + preUpdateChild: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + sync: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + type: object + parentResource: + properties: + apiVersion: + type: string + resource: + type: string + revisionHistory: + properties: + fieldPaths: + items: + type: string + type: array + type: object + required: + - apiVersion + - resource + type: object + resyncPeriodSeconds: + format: int32 + type: integer + required: + - parentResource + type: object + status: + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "api-approved.kubernetes.io": "unapproved, request not yet submitted" + name: controllerrevisions.metacontroller.k8s.io +spec: + group: metacontroller.k8s.io + names: + kind: ControllerRevision + listKind: ControllerRevisionList + plural: controllerrevisions + singular: controllerrevision + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + children: + items: + properties: + apiGroup: + type: string + kind: + type: string + names: + items: + type: string + type: array + required: + - apiGroup + - kind + - names + type: object + type: array + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + parentPatch: + type: object + required: + - metadata + - parentPatch + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + "api-approved.kubernetes.io": "unapproved, request not yet submitted" + name: decoratorcontrollers.metacontroller.k8s.io +spec: + group: metacontroller.k8s.io + names: + kind: DecoratorController + listKind: DecoratorControllerList + plural: decoratorcontrollers + shortNames: + - dec + - decorators + singular: decoratorcontroller + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + attachments: + items: + properties: + apiVersion: + type: string + resource: + type: string + updateStrategy: + properties: + method: + type: string + type: object + required: + - apiVersion + - resource + type: object + type: array + hooks: + properties: + customize: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + finalize: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + sync: + properties: + webhook: + properties: + path: + type: string + service: + properties: + name: + type: string + namespace: + type: string + port: + format: int32 + type: integer + protocol: + type: string + required: + - name + - namespace + type: object + timeout: + type: string + url: + type: string + type: object + type: object + type: object + resources: + items: + properties: + annotationSelector: + properties: + matchAnnotations: + additionalProperties: + type: string + type: object + matchExpressions: + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + apiVersion: + type: string + labelSelector: + description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + resource: + type: string + required: + - apiVersion + - resource + type: object + type: array + resyncPeriodSeconds: + format: int32 + type: integer + required: + - resources + type: object + status: + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/src/files/manifests/metacontroller-rbac.yaml b/src/files/manifests/metacontroller-rbac.yaml new file mode 100644 index 0000000..f553ce7 --- /dev/null +++ b/src/files/manifests/metacontroller-rbac.yaml @@ -0,0 +1,72 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metacontroller + namespace: {{ namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metacontroller +rules: +- apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metacontroller +subjects: +- kind: ServiceAccount + name: metacontroller + namespace: {{ namespace }} +roleRef: + kind: ClusterRole + name: metacontroller + apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: aggregate-metacontroller-view + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: +- apiGroups: + - metacontroller.k8s.io + resources: + - compositecontrollers + - controllerrevisions + - decoratorcontrollers + verbs: + - get + - list + - watch +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: aggregate-metacontroller-edit + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: +- apiGroups: + - metacontroller.k8s.io + resources: + - controllerrevisions + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch diff --git a/src/files/manifests/metacontroller.yaml b/src/files/manifests/metacontroller.yaml new file mode 100644 index 0000000..f7adbfd --- /dev/null +++ b/src/files/manifests/metacontroller.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app.kubernetes.io/name: metacontroller + name: metacontroller + namespace: {{ namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: metacontroller + serviceName: "" + template: + metadata: + labels: + app.kubernetes.io/name: metacontroller + spec: + serviceAccountName: metacontroller + containers: + - name: metacontroller + image: {{ image }} + command: ["/usr/bin/metacontroller"] + args: + - -v=4 + - --logtostderr + - --discovery-interval=20s + volumeClaimTemplates: [] From b9694a94224d5ea385e35e3a8fb9559516d3b125 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Mon, 15 Nov 2021 11:27:07 -0500 Subject: [PATCH 09/27] feat: Add LICENSE, minor fixes --- .flake8 | 9 -- .github/workflows/publish.yaml | 2 +- LICENSE | 201 ++++++++++++++++++++++++++++++++ src/charm.py | 7 +- tests/integration/test_charm.py | 5 + tests/unit/test_charm.py | 3 + tox.ini | 12 +- 7 files changed, 225 insertions(+), 14 deletions(-) delete mode 100644 .flake8 create mode 100644 LICENSE diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 8ef84fc..0000000 --- a/.flake8 +++ /dev/null @@ -1,9 +0,0 @@ -[flake8] -max-line-length = 99 -select: E,W,F,C,N -exclude: - venv - .git - build - dist - *.egg_info diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index 25e0251..cab417b 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -19,7 +19,7 @@ jobs: run: | set -eux sudo snap install charm --classic - sudo pip3 install charmcraft= + sudo pip3 install charmcraft sudo snap install yq - name: Publish charm diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/charm.py b/src/charm.py index 1a45539..34b5053 100755 --- a/src/charm.py +++ b/src/charm.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + import glob import subprocess import time @@ -34,9 +37,8 @@ def __init__(self, *args): self.framework.observe(self.on.update_status, self._update_status) # self.framework.observe(self.on.config_changed, self._reconcile) - self.x = 1 self.logger = logging.getLogger(__name__) - self.y = 1 + # TODO: Fix file imports and move ./src/files back to ./files self._manifests_file_root = None self.manifest_file_root = "./src/files/manifests/" @@ -104,7 +106,6 @@ def _update_status(self, event): def _remove(self, _): """Remove charm""" - # TODO: How should we test this in our integration tests? # Should I set a status or does Juju set one? self.logger.info("Removing kubernetes objects") diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index d814613..3e792c9 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -1,3 +1,6 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + import logging from pathlib import Path import pytest @@ -33,3 +36,5 @@ async def test_build_and_deploy(ops_test: OpsTest): await ops_test.model.wait_for_idle(timeout=60 * 60) # TODO: confirm it actually deployed correctly + +# TODO: Add test for charm removal diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index fc72501..9d80ae1 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -1,3 +1,6 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + import logging from pathlib import Path diff --git a/tox.ini b/tox.ini index 7c8e391..01a2478 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,14 @@ [flake8] -max-line-length = 100 +max-line-length = 99 +select: E,W,F,C,N +copyright-check = True +copyright-author = Canonical Ltd. +exclude: + venv + .git + build + dist + *.egg_info [tox] skipsdist = True @@ -14,6 +23,7 @@ deps = [testenv:lint] deps = flake8 + flake8-copyright<0.3 black==20.8b1 commands = flake8 {toxinidir}/src {toxinidir}/tests From 838d57938e195dfc319afb4c814a4d431d64a714 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Mon, 15 Nov 2021 16:46:06 -0500 Subject: [PATCH 10/27] Misc updates from peer review --- .github/workflows/integrate.yaml | 16 ++++------- .github/workflows/publish.yaml | 48 ++++++++++++++++---------------- charmcraft.yaml | 5 ++-- src/charm.py | 2 -- tox.ini | 8 +----- 5 files changed, 32 insertions(+), 47 deletions(-) diff --git a/.github/workflows/integrate.yaml b/.github/workflows/integrate.yaml index c8d1e1e..6233494 100644 --- a/.github/workflows/integrate.yaml +++ b/.github/workflows/integrate.yaml @@ -15,10 +15,10 @@ jobs: - uses: actions/checkout@v2 - name: Install dependencies run: | + set -eux sudo apt update sudo apt install tox - - name: Lint code - run: tox -vve lint + - name: tox -vve lint unit: name: Unit Tests @@ -27,8 +27,7 @@ jobs: - uses: actions/checkout@v2 - name: Install dependencies run: pip install tox - - name: Run unit tests - run: tox -vve unit + - run: tox -vve unit integration: name: Integration Tests @@ -39,15 +38,10 @@ jobs: uses: charmed-kubernetes/actions-operator@main with: provider: microk8s - - # TODO: Remove once the actions-operator does this automatically - - name: Configure kubectl - run: | - sg microk8s -c "microk8s config > ~/.kube/config" + charmcraft-channel: latest/candidate - name: Test - run: | - sg microk8s -c "tox -vve integration -- --model testing" + run: sg microk8s -c "tox -vve integration -- --model testing" # On failure, capture debugging resources - name: Get all diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index cab417b..5dca142 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -3,8 +3,7 @@ name: Publish on: push: branches: - # TODO: Set this to 'main' when ready to publish - - master + - main jobs: charm: @@ -19,27 +18,28 @@ jobs: run: | set -eux sudo snap install charm --classic - sudo pip3 install charmcraft + sudo snap install charmcraft --classic --channel=latest-candidate sudo snap install yq - - name: Publish charm - env: - CHARMSTORE_CREDENTIAL: ${{ secrets.CHARMSTORE_CREDENTIAL }} - run: | - set -eux - echo $CHARMSTORE_CREDENTIAL > ~/.go-cookies - - CS_URL="cs:~kubeflow-charmers/metacontroller-operator" - - # TODO: Make this loop over all resources - IMAGE=$(yq eval '.resources.noop.upstream-source' metadata.yaml) - charmcraft build - docker pull $IMAGE - charm push ./*.charm $CS_URL --resource oci-image=$IMAGE - - REVISION=$(charm show $CS_URL --channel unpublished --format yaml | yq e .id-revision.Revision -) - OCI_VERSION=$(charm list-resources $CS_URL --format yaml --channel unpublished | yq e .0.revision -) - charm release \ - --channel edge \ - --resource oci-image-$OCI_VERSION \ - $CS_URL-$REVISION +# TODO: Publish to charmhub +# - name: Publish charm +# env: +# CHARMSTORE_CREDENTIAL: ${{ secrets.CHARMSTORE_CREDENTIAL }} +# run: | +# set -eux +# echo $CHARMSTORE_CREDENTIAL > ~/.go-cookies +# +# CS_URL="cs:~kubeflow-charmers/metacontroller-operator" +# +# # TODO: Make this loop over all resources +# IMAGE=$(yq eval '.resources.noop.upstream-source' metadata.yaml) +# charmcraft build +# docker pull $IMAGE +# charm push ./*.charm $CS_URL --resource oci-image=$IMAGE +# +# REVISION=$(charm show $CS_URL --channel unpublished --format yaml | yq e .id-revision.Revision -) +# OCI_VERSION=$(charm list-resources $CS_URL --format yaml --channel unpublished | yq e .0.revision -) +# charm release \ +# --channel edge \ +# --resource oci-image-$OCI_VERSION \ +# $CS_URL-$REVISION diff --git a/charmcraft.yaml b/charmcraft.yaml index f14d412..bc071ba 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -1,5 +1,3 @@ -# Learn more about charmcraft.yaml configuration at: -# https://juju.is/docs/sdk/charmcraft-config type: "charm" bases: - build-on: @@ -20,6 +18,7 @@ parts: build-packages: - curl charm: - charm-python-packages: [setuptools, pip] # Fixes jinja install during pack + # Fixes jinja install during pack. See https://github.com/canonical/charmcraft/issues/551 + charm-python-packages: [setuptools, pip] # prime: # - ./files/manifests/*.yaml diff --git a/src/charm.py b/src/charm.py index 34b5053..a39594c 100755 --- a/src/charm.py +++ b/src/charm.py @@ -22,8 +22,6 @@ class MetacontrollerOperatorCharm(CharmBase): """Charm the Metacontroller""" - _stored = StoredState() - def __init__(self, *args): super().__init__(*args) diff --git a/tox.ini b/tox.ini index 01a2478..f11d26c 100644 --- a/tox.ini +++ b/tox.ini @@ -1,14 +1,8 @@ [flake8] -max-line-length = 99 +max-line-length = 100 select: E,W,F,C,N copyright-check = True copyright-author = Canonical Ltd. -exclude: - venv - .git - build - dist - *.egg_info [tox] skipsdist = True From aa0dcc37bf4070f547275d46462177dd992bff38 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Tue, 16 Nov 2021 11:25:43 -0500 Subject: [PATCH 11/27] Misc updates from peer review --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index f11d26c..41f2d75 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [flake8] max-line-length = 100 -select: E,W,F,C,N +--max-complexity copyright-check = True copyright-author = Canonical Ltd. From 92045d0089c495cceb8938784d3635428a2593dd Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Tue, 16 Nov 2021 13:52:42 -0500 Subject: [PATCH 12/27] Remove noop container Uses https://github.com/juju/juju/pull/13442 (landed in juju 2.9.18) --- metadata.yaml | 14 -------------- src/charm.py | 5 ----- tests/integration/test_charm.py | 10 ++++++++-- tox.ini | 2 +- 4 files changed, 9 insertions(+), 22 deletions(-) diff --git a/metadata.yaml b/metadata.yaml index 682fb7c..238c566 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -6,17 +6,3 @@ description: | https://github.com/metacontroller/metacontroller summary: An add-on for Kubernetes to write and deploy custom controllers from simple scripts - -# Do I need this at all? This would be if I'm maintaining things via pebble? -containers: - noop: - resource: noop - -resources: - noop: - type: oci-image - description: '' - upstream-source: alpine - -# TODO: I don't really need this oci-image, everything gets downloaded by k8s. This is really a config arg. Will refactor -# TODO: Try with recent metacontroller. This is 3 years old... Kubeflow Pipelines has a merged change with update diff --git a/src/charm.py b/src/charm.py index a39594c..5841920 100755 --- a/src/charm.py +++ b/src/charm.py @@ -14,7 +14,6 @@ import kubernetes.client.exceptions from oci_image import OCIImageResource from ops.charm import CharmBase -from ops.framework import StoredState from ops.main import main from ops.model import ActiveStatus, WaitingStatus, MaintenanceStatus @@ -29,7 +28,6 @@ def __init__(self, *args): self.model.unit.status = WaitingStatus("Waiting for leadership") return - self.framework.observe(self.on.noop_pebble_ready, self._noop_pebble_ready) self.framework.observe(self.on.install, self._install) self.framework.observe(self.on.remove, self._remove) self.framework.observe(self.on.update_status, self._update_status) @@ -42,9 +40,6 @@ def __init__(self, *args): self.manifest_file_root = "./src/files/manifests/" self.image = OCIImageResource(self, "oci-image") - def _noop_pebble_ready(self, _): - self.logger.info("noop_pebble_ready fired") - def _install(self, event): self.logger.info("Installing by instantiating Kubernetes objects") self.unit.status = MaintenanceStatus("Instantiating Kubernetes objects") diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 3e792c9..b818341 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -21,14 +21,17 @@ async def test_build_and_deploy(ops_test: OpsTest): logger.info(f"Built charm {built_charm_path}") resources = {} - for resource_name, resource_data in METADATA["resources"].items(): + for resource_name, resource_data in METADATA.get("resources", {}).items(): image_path = resource_data["upstream-source"] resources[resource_name] = image_path logger.info(f"Deploying charm {APP_NAME} using resources '{resources}'") await ops_test.model.deploy( - entity_url=built_charm_path, application_name=APP_NAME, resources=resources + entity_url=built_charm_path, + application_name=APP_NAME, + resources=resources, + trust=True, ) # TODO: Replace this with a more accurate way of testing for success. @@ -37,4 +40,7 @@ async def test_build_and_deploy(ops_test: OpsTest): # TODO: confirm it actually deployed correctly + # TODO: Add test for charm removal +# TODO: Add test that USES metacontroller for something (act on a namespace +# given particular metadata, similar to kfp?) diff --git a/tox.ini b/tox.ini index 41f2d75..673833a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [flake8] max-line-length = 100 ---max-complexity +max-complexity = True copyright-check = True copyright-author = Canonical Ltd. From 8f2744248677637e991452a47d3de5f2d727df77 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Tue, 16 Nov 2021 16:42:51 -0500 Subject: [PATCH 13/27] feat: Use lightkube instead of kubectl for creating objects WIP. Missing unit tests and not fully tested --- requirements.txt | 1 + src/charm.py | 166 ++++++++++++++++++++++++++------ tests/integration/test_charm.py | 15 ++- tox.ini | 4 +- 4 files changed, 148 insertions(+), 38 deletions(-) diff --git a/requirements.txt b/requirements.txt index 23f7285..5a41d2e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,4 @@ ops >= 1.2.0 jinja2 < 4 oci-image<1.1.0 kubernetes < 19.0.0 +lightkube diff --git a/src/charm.py b/src/charm.py index 5841920..0784656 100755 --- a/src/charm.py +++ b/src/charm.py @@ -5,6 +5,7 @@ import glob import subprocess import time +from typing import Optional from jinja2 import Environment, FileSystemLoader import logging @@ -12,11 +13,16 @@ from kubernetes import client, config import kubernetes.client.exceptions -from oci_image import OCIImageResource from ops.charm import CharmBase from ops.main import main from ops.model import ActiveStatus, WaitingStatus, MaintenanceStatus +import lightkube +from lightkube import codecs + + +METACONTROLLER_IMAGE = "metacontroller/metacontroller:v0.3.0" + class MetacontrollerOperatorCharm(CharmBase): """Charm the Metacontroller""" @@ -38,25 +44,23 @@ def __init__(self, *args): # TODO: Fix file imports and move ./src/files back to ./files self._manifests_file_root = None self.manifest_file_root = "./src/files/manifests/" - self.image = OCIImageResource(self, "oci-image") + + self._lightkube_client = None def _install(self, event): self.logger.info("Installing by instantiating Kubernetes objects") self.unit.status = MaintenanceStatus("Instantiating Kubernetes objects") - _, manifests_str = self._render_manifests() - self.logger.info("Applying manifests") + # TODO: catch error when this fails due to permissions and set appropriate "we aren't + # trusted" blocked status + # create rbac + self._create_rbac() - completed_process = subprocess.run( - ["./kubectl", "apply", "-f-"], - input=manifests_str.encode("utf-8"), - stderr=subprocess.STDOUT, - stdout=subprocess.PIPE, - ) - self.logger.info( - f"kubectl returned with code '{completed_process.returncode}'" - f" and output {completed_process.stdout}" - ) + # Create crds + self._create_crds() + + # deploy the controller + self._create_controller() self.logger.info("Waiting for installed Kubernetes objects to be operational") max_attempts = 20 @@ -99,25 +103,46 @@ def _update_status(self, event): def _remove(self, _): """Remove charm""" - # Should I set a status or does Juju set one? - self.logger.info("Removing kubernetes objects") - - _, manifests_str = self._render_manifests() - completed_process = subprocess.run( - ["./kubectl", "delete", "-f-"], - input=manifests_str.encode("utf-8"), - stderr=subprocess.STDOUT, - stdout=subprocess.PIPE, - ) - if completed_process.returncode == 0: - self.logger.info("Kubernetes objects removed using kubectl") - else: - self.logger.error( - f"Unable to remove kubernetes objects - there may be orphaned resources." - f" kubectl exited with '{completed_process.stdout}'" - ) + raise NotImplementedError() + + def _create_crds(self, if_exists=None): + self.logger.info("Applying manifests for CRDs") + objs = self._render_crds() + create_all_lightkube_objects(objs, if_exists=if_exists) + + def _create_rbac(self, if_exists=None): + self.logger.info("Applying manifests for RBAC") + objs = self._render_rbac() + create_all_lightkube_objects(objs, if_exists=if_exists) + + def _create_controller(self, if_exists=None): + self.logger.info("Applying manifests for controller") + objs = self._render_controller() + create_all_lightkube_objects(objs, if_exists=if_exists) + + def _render_yaml(self, yaml_filename: [str, Path]): + """Returns a list of lightkube k8s objects for a yaml file, rendered in charm context""" + context = { + "namespace": self.model.name, + "image": METACONTROLLER_IMAGE, + } + with open(self._manifests_file_root / yaml_filename) as f: + return codecs.load_all_yaml(f, context=context) + + def _render_crds(self): + """Returns a list of lightkube k8s objects for the CRDs, rendered in charm context""" + return self._render_yaml("metacontroller-crds-v1.yaml") + + def _render_rbac(self): + """Returns a list of lightkube k8s objects for the charm RBAC, rendered in charm context""" + return self._render_yaml("metacontroller-rbac.yaml") + + def _render_controller(self): + """Returns a list of lightkube k8s objects for the controller, rendered in charm context""" + return self._render_yaml("metacontroller.yaml") def _render_manifests(self) -> (list, str): + raise Exception("TODO: Remove this") # Load and render all templates self.logger.info(f"Rendering templates from {self.manifest_file_root}") jinja_env = Environment(loader=FileSystemLoader(self.manifest_file_root)) @@ -144,6 +169,7 @@ def _check_deployed_resources(self, manifests=None): """Check the status of all deployed resources, returning True if ok""" # TODO: Add checks for other CRDs/services/etc # TODO: ideally check all resources automatically based on the manifest + # TODO: Use lightkube here if manifests is not None: raise NotImplementedError("...") # if manifests is None: @@ -176,6 +202,12 @@ def _get_manifest_files(self) -> list: """Returns a list of all manifest files""" return glob.glob(str(self.manifest_file_root / "*.yaml")) + @property + def lightkube_client(self): + if not self._lightkube_client: + self._lightkube_client = lightkube.Client() + return self._lightkube_client + def init_app_client(app_client=None): if app_client is None: @@ -198,5 +230,77 @@ def validate_statefulset(name, namespace, app_client=None): return False +ALLOWED_IF_EXISTS = (None, "replace", "patch") + + +def _validate_if_exists(if_exists): + if if_exists in ALLOWED_IF_EXISTS: + return if_exists + else: + raise ValueError( + f"Invalid value for if_exists '{if_exists}'. Must be one of {ALLOWED_IF_EXISTS}" + ) + + +def _safe_load_file_to_text(filename: str): + """Returns the contents of filename if it is an existing file, else it returns filename""" + try: + text = Path(filename).read_text() + except FileNotFoundError: + text = filename + return text + + +def create_all_lightkube_objects( + objs: [list, tuple], + if_exists: [str, None] = None, + lightkube_client: lightkube.Client = None, + logger: Optional[logging.Logger] = None, +): + """Creates all k8s resources listed in a YAML file via lightkube + + Args: + objs (list, tuple): List of lightkube objects to create + if_exists (str): If an object already exists, do one of: + patch: Try to lightkube.patch the existing resource + replace: Try to lightkube.replace the existing resource (not yet implemented) + None: Do nothing (lightkube.core.exceptions.ApiError will be raised) + lightkube_client: Instantiated lightkube client or None + logger (Logger): (optional) logger to write log messages to + """ + _validate_if_exists(if_exists) + + if lightkube_client is None: + lightkube_client = lightkube.Client() + + for obj in objs: + try: + lightkube_client.create(obj) + except lightkube.core.exceptions.ApiError as e: + if if_exists is None: + raise e + else: + if logger: + logger.info( + f"Caught {e.status} when creating {obj.metadata.name}. " + f"Trying to {if_exists}" + ) + if if_exists == "replace": + raise NotImplementedError() + # Not sure what is wrong with this syntax but it wouldn't work + elif if_exists == "patch": + lightkube_client.patch( + type(obj), + obj.metadata.name, + obj.to_dict(), + patch_type=lightkube.types.PatchType.MERGE, + ) + else: + raise ValueError( + "This should not be reached. This is likely due to an " + "uncaught, invalid value for if_exists" + ) + + if __name__ == "__main__": main(MetacontrollerOperatorCharm) diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index b818341..ab8bed8 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -34,11 +34,16 @@ async def test_build_and_deploy(ops_test: OpsTest): trust=True, ) - # TODO: Replace this with a more accurate way of testing for success. - # This passes sometimes when model is not ok - await ops_test.model.wait_for_idle(timeout=60 * 60) - - # TODO: confirm it actually deployed correctly + apps = [APP_NAME] + await ops_test.model.wait_for_idle( + apps=apps, status="active", raise_on_blocked=True, timeout=300 + ) + for app_name in apps: + for i_unit, unit in enumerate(ops_test.model.applications[app_name].units): + assert ( + unit.workload_status == "active" + ), f"Application {app_name}.Unit {i_unit}.workload_status != active" + assert ops_test.model.applications[APP_NAME].units[0].workload_status == "active" # TODO: Add test for charm removal diff --git a/tox.ini b/tox.ini index 673833a..ba0bc5c 100644 --- a/tox.ini +++ b/tox.ini @@ -25,11 +25,11 @@ commands = [testenv:unit] commands = - pytest -vv --tb native --show-capture=no --log-cli-level=INFO {toxinidir}/tests/unit + pytest -vv --show-capture=no --log-cli-level=INFO {toxinidir}/tests/unit [testenv:integration] deps = {[testenv]deps} -r test-integration-requirements.txt commands = - pytest -vv --tb native --show-capture=no --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration + pytest -vv --show-capture=no --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration From 03838fa5264e27c760ccbc9dc3f24ddb4430fd19 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Wed, 17 Nov 2021 12:07:38 -0500 Subject: [PATCH 14/27] feat: unit tests for rendering yaml and creating k8s objects --- src/charm.py | 21 ++--- src/files/manifests/metacontroller.yaml | 2 +- .../simple_manifest_input.yaml} | 2 +- .../simple_manifest_result.yaml} | 4 +- tests/unit/test_charm.py | 83 ++++++++----------- 5 files changed, 45 insertions(+), 67 deletions(-) rename tests/unit/{render_manifests/input_simple/manifests.yaml => render_yaml/simple_manifest_input.yaml} (86%) rename tests/unit/{render_manifests/result_simple.yaml => render_yaml/simple_manifest_result.yaml} (74%) diff --git a/src/charm.py b/src/charm.py index 0784656..9a20f0a 100755 --- a/src/charm.py +++ b/src/charm.py @@ -108,23 +108,23 @@ def _remove(self, _): def _create_crds(self, if_exists=None): self.logger.info("Applying manifests for CRDs") objs = self._render_crds() - create_all_lightkube_objects(objs, if_exists=if_exists) + create_all_lightkube_objects(objs, if_exists=if_exists, lightkube_client=self.lightkube_client) def _create_rbac(self, if_exists=None): self.logger.info("Applying manifests for RBAC") objs = self._render_rbac() - create_all_lightkube_objects(objs, if_exists=if_exists) + create_all_lightkube_objects(objs, if_exists=if_exists, lightkube_client=self.lightkube_client) def _create_controller(self, if_exists=None): self.logger.info("Applying manifests for controller") objs = self._render_controller() - create_all_lightkube_objects(objs, if_exists=if_exists) + create_all_lightkube_objects(objs, if_exists=if_exists, lightkube_client=self.lightkube_client) def _render_yaml(self, yaml_filename: [str, Path]): """Returns a list of lightkube k8s objects for a yaml file, rendered in charm context""" context = { "namespace": self.model.name, - "image": METACONTROLLER_IMAGE, + "metacontroller_image": METACONTROLLER_IMAGE, } with open(self._manifests_file_root / yaml_filename) as f: return codecs.load_all_yaml(f, context=context) @@ -208,6 +208,10 @@ def lightkube_client(self): self._lightkube_client = lightkube.Client() return self._lightkube_client + @lightkube_client.setter + def lightkube_client(self, client): + self._lightkube_client = client + def init_app_client(app_client=None): if app_client is None: @@ -242,15 +246,6 @@ def _validate_if_exists(if_exists): ) -def _safe_load_file_to_text(filename: str): - """Returns the contents of filename if it is an existing file, else it returns filename""" - try: - text = Path(filename).read_text() - except FileNotFoundError: - text = filename - return text - - def create_all_lightkube_objects( objs: [list, tuple], if_exists: [str, None] = None, diff --git a/src/files/manifests/metacontroller.yaml b/src/files/manifests/metacontroller.yaml index f7adbfd..a020256 100644 --- a/src/files/manifests/metacontroller.yaml +++ b/src/files/manifests/metacontroller.yaml @@ -20,7 +20,7 @@ spec: serviceAccountName: metacontroller containers: - name: metacontroller - image: {{ image }} + image: {{ metacontroller-image }} command: ["/usr/bin/metacontroller"] args: - -v=4 diff --git a/tests/unit/render_manifests/input_simple/manifests.yaml b/tests/unit/render_yaml/simple_manifest_input.yaml similarity index 86% rename from tests/unit/render_manifests/input_simple/manifests.yaml rename to tests/unit/render_yaml/simple_manifest_input.yaml index 5171190..1aa693d 100644 --- a/tests/unit/render_manifests/input_simple/manifests.yaml +++ b/tests/unit/render_yaml/simple_manifest_input.yaml @@ -8,7 +8,7 @@ spec: spec: containers: - name: metacontroller - image: {{ image }} + image: {{ metacontroller_image }} --- apiVersion: v1 kind: ServiceAccount diff --git a/tests/unit/render_manifests/result_simple.yaml b/tests/unit/render_yaml/simple_manifest_result.yaml similarity index 74% rename from tests/unit/render_manifests/result_simple.yaml rename to tests/unit/render_yaml/simple_manifest_result.yaml index 3a7eef1..71e01b4 100644 --- a/tests/unit/render_manifests/result_simple.yaml +++ b/tests/unit/render_yaml/simple_manifest_result.yaml @@ -7,8 +7,8 @@ spec: template: spec: containers: - - name: metacontroller - image: metacontroller/metacontroller:v0.3.0 + - image: metacontroller/metacontroller:v0.3.0 + name: metacontroller --- apiVersion: v1 kind: ServiceAccount diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 9d80ae1..51b50a7 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -3,14 +3,15 @@ import logging from pathlib import Path +from unittest import mock -import pytest -import yaml - +from charm import MetacontrollerOperatorCharm +import lightkube.codecs +from lightkube.resources.apps_v1 import Deployment, StatefulSet +from lightkube.models.meta_v1 import ObjectMeta from ops.model import WaitingStatus - from ops.testing import Harness -from charm import MetacontrollerOperatorCharm +import pytest logger = logging.getLogger(__name__) @@ -25,66 +26,48 @@ def test_not_leader(harness): assert harness.charm.model.unit.status == WaitingStatus("Waiting for leadership") -def test_render_manifests(harness): - manifests_root = Path("./tests/unit/render_manifests/") - manifests_input_path = manifests_root / "input_simple/" - # NOTE: imagename substitution is currently hard coded in charm.py, so updating image requires - # us to update result.yaml as well - manifests_rendered = manifests_root / "result_simple.yaml" - +def test_render_yaml(harness): + manifests_root = Path("./tests/unit/render_yaml/") + unrendered_yaml_file = "simple_manifest_input.yaml" + expected_yaml_file = "simple_manifest_result.yaml" harness.set_leader(True) harness.set_model_name("test-namespace") harness.begin() - harness.charm.manifest_file_root = manifests_input_path - - assert harness.charm.manifest_file_root == manifests_input_path + harness.charm.manifest_file_root = manifests_root - manifests, manifests_str = harness.charm._render_manifests() - expected_manifests_str = manifests_rendered.read_text() + assert harness.charm.manifest_file_root == Path(manifests_root) + objs = harness.charm._render_yaml(unrendered_yaml_file) - assert manifests_str.strip() == expected_manifests_str.strip() + rendered_yaml_expected = ( + Path(manifests_root / expected_yaml_file).read_text().strip() + ) + assert lightkube.codecs.dump_all_yaml(objs).strip() == rendered_yaml_expected -@pytest.mark.parametrize( - "case", - [ - "simple", - ], -) -def test_install_minimal(harness, mocker, case): - subprocess_run = mocker.patch("subprocess.run") +def test_create_controller(harness, mocker): + namespace = "test-namespace" - # Mock check_deployed_resources so we don't try to load/use k8s api + mocked_render_controller_return = [ + Deployment(metadata=ObjectMeta(name="test-deployment", namespace=namespace)), + StatefulSet(metadata=ObjectMeta(name="test-deployment", namespace=namespace)), + ] mocker.patch( - "charm.MetacontrollerOperatorCharm._check_deployed_resources", - return_value=True, + "charm.MetacontrollerOperatorCharm._render_controller", + return_value=mocked_render_controller_return, ) - manifests_root = Path("./tests/unit/render_manifests/") - manifests_input_path = manifests_root / f"input_{case}/" - manifests_rendered = manifests_root / f"result_{case}.yaml" + mocked_lightkube_client = mock.Mock() + mocked_lightkube_client_expected_calls = [ + mock.call.create(obj) for obj in mocked_render_controller_return + ] - harness.set_model_name("test-namespace") harness.set_leader(True) + harness.set_model_name(namespace) harness.begin() - harness.charm.manifest_file_root = manifests_input_path - - harness.charm._install("ignored_event") - logger.info( - f"subprocess.run.call_args_list (after) = {subprocess_run.call_args_list}" - ) - - expected_args = (["./kubectl", "apply", "-f-"],) - expected_kwarg_input = list(yaml.safe_load_all(manifests_rendered.read_text())) - - call_args = subprocess_run.call_args_list - actual_args = call_args[0].args - actual_kwarg_input = list(yaml.safe_load_all(call_args[0].kwargs["input"])) - - assert len(call_args) == 1 + harness.charm.lightkube_client = mocked_lightkube_client - assert actual_args == expected_args - assert actual_kwarg_input == expected_kwarg_input + harness.charm._create_controller() + mocked_lightkube_client.assert_has_calls(mocked_lightkube_client_expected_calls) # TODO: Create test that "deploys" manifests then simulates the watchers From da5abfbc2ae6b26dbb9b722a8e68120ae921fbb0 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Wed, 17 Nov 2021 14:11:26 -0500 Subject: [PATCH 15/27] feat: add unit tests for install and update_status events --- src/charm.py | 17 +++++++---- tests/unit/test_charm.py | 63 ++++++++++++++++++++++++++++++++++------ 2 files changed, 65 insertions(+), 15 deletions(-) diff --git a/src/charm.py b/src/charm.py index 9a20f0a..bcbb87c 100755 --- a/src/charm.py +++ b/src/charm.py @@ -3,7 +3,6 @@ # See LICENSE file for licensing details. import glob -import subprocess import time from typing import Optional @@ -15,7 +14,7 @@ import kubernetes.client.exceptions from ops.charm import CharmBase from ops.main import main -from ops.model import ActiveStatus, WaitingStatus, MaintenanceStatus +from ops.model import ActiveStatus, WaitingStatus, MaintenanceStatus, BlockedStatus import lightkube from lightkube import codecs @@ -78,7 +77,7 @@ def _install(self, event): self.logger.info(f"Sleeping {sleeptime}s") time.sleep(sleeptime) else: - self.unit.status = MaintenanceStatus( + self.unit.status = BlockedStatus( "Some kubernetes resources missing/not ready" ) return @@ -108,17 +107,23 @@ def _remove(self, _): def _create_crds(self, if_exists=None): self.logger.info("Applying manifests for CRDs") objs = self._render_crds() - create_all_lightkube_objects(objs, if_exists=if_exists, lightkube_client=self.lightkube_client) + create_all_lightkube_objects( + objs, if_exists=if_exists, lightkube_client=self.lightkube_client + ) def _create_rbac(self, if_exists=None): self.logger.info("Applying manifests for RBAC") objs = self._render_rbac() - create_all_lightkube_objects(objs, if_exists=if_exists, lightkube_client=self.lightkube_client) + create_all_lightkube_objects( + objs, if_exists=if_exists, lightkube_client=self.lightkube_client + ) def _create_controller(self, if_exists=None): self.logger.info("Applying manifests for controller") objs = self._render_controller() - create_all_lightkube_objects(objs, if_exists=if_exists, lightkube_client=self.lightkube_client) + create_all_lightkube_objects( + objs, if_exists=if_exists, lightkube_client=self.lightkube_client + ) def _render_yaml(self, yaml_filename: [str, Path]): """Returns a list of lightkube k8s objects for a yaml file, rendered in charm context""" diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 51b50a7..e49209e 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -9,7 +9,7 @@ import lightkube.codecs from lightkube.resources.apps_v1 import Deployment, StatefulSet from lightkube.models.meta_v1 import ObjectMeta -from ops.model import WaitingStatus +from ops.model import WaitingStatus, ActiveStatus, BlockedStatus, MaintenanceStatus from ops.testing import Harness import pytest @@ -21,18 +21,24 @@ def harness(): return Harness(MetacontrollerOperatorCharm) +@pytest.fixture() +def harness_with_charm(harness): + harness.set_leader(True) + harness.set_model_name("test-namespace") + harness.begin() + return harness + + def test_not_leader(harness): harness.begin() assert harness.charm.model.unit.status == WaitingStatus("Waiting for leadership") -def test_render_yaml(harness): +def test_render_yaml(harness_with_charm): manifests_root = Path("./tests/unit/render_yaml/") unrendered_yaml_file = "simple_manifest_input.yaml" expected_yaml_file = "simple_manifest_result.yaml" - harness.set_leader(True) - harness.set_model_name("test-namespace") - harness.begin() + harness = harness_with_charm harness.charm.manifest_file_root = manifests_root assert harness.charm.manifest_file_root == Path(manifests_root) @@ -44,7 +50,7 @@ def test_render_yaml(harness): assert lightkube.codecs.dump_all_yaml(objs).strip() == rendered_yaml_expected -def test_create_controller(harness, mocker): +def test_create_controller(harness_with_charm, mocker): namespace = "test-namespace" mocked_render_controller_return = [ @@ -61,9 +67,7 @@ def test_create_controller(harness, mocker): mock.call.create(obj) for obj in mocked_render_controller_return ] - harness.set_leader(True) - harness.set_model_name(namespace) - harness.begin() + harness = harness_with_charm harness.charm.lightkube_client = mocked_lightkube_client harness.charm._create_controller() @@ -71,3 +75,44 @@ def test_create_controller(harness, mocker): # TODO: Create test that "deploys" manifests then simulates the watchers + + +@pytest.mark.parametrize( + "install_succeeded, expected_status", + ( + (True, ActiveStatus()), + (False, BlockedStatus("Some kubernetes resources missing/not ready")), + ) +) +def test_install(harness_with_charm, mocker, install_succeeded, expected_status): + mocker.patch("charm.MetacontrollerOperatorCharm._create_rbac") + mocker.patch("charm.MetacontrollerOperatorCharm._create_crds") + mocker.patch("charm.MetacontrollerOperatorCharm._create_controller") + mocker.patch("charm.MetacontrollerOperatorCharm._check_deployed_resources", return_value=install_succeeded) + mocker.patch("time.sleep") + + harness = harness_with_charm + harness.charm.on.install.emit() + harness.charm._create_rbac.assert_called_once() + harness.charm._create_crds.assert_called_once() + harness.charm._create_controller.assert_called_once() + assert harness.charm.model.unit.status == expected_status + + +@pytest.mark.parametrize( + "deployed_resources_status, expected_status, n_install_calls", + ( + (True, ActiveStatus(), 0), + (False, MaintenanceStatus("Missing kubernetes resources detected - reinstalling"), 1) + ) +) +def test_update_status(harness_with_charm, mocker, deployed_resources_status, expected_status, n_install_calls): + mocker.patch("charm.MetacontrollerOperatorCharm._install") + mocker.patch("charm.MetacontrollerOperatorCharm._check_deployed_resources", return_value=deployed_resources_status) + + harness = harness_with_charm + harness.charm.on.update_status.emit() + assert harness.charm.model.unit.status == expected_status + + # Assert install called correct number of times + assert harness.charm._install.call_count == n_install_calls From af06c847924ed4e13f472ef25235e1d96c66186a Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Thu, 18 Nov 2021 14:17:01 -0500 Subject: [PATCH 16/27] feat: refactor check_deployed_resources, add tests --- src/charm.py | 71 +++++++++--------------- tests/unit/test_charm.py | 115 +++++++++++++++++++++++++++++++++++---- 2 files changed, 131 insertions(+), 55 deletions(-) diff --git a/src/charm.py b/src/charm.py index bcbb87c..ff713e5 100755 --- a/src/charm.py +++ b/src/charm.py @@ -6,7 +6,6 @@ import time from typing import Optional -from jinja2 import Environment, FileSystemLoader import logging from pathlib import Path @@ -146,54 +145,29 @@ def _render_controller(self): """Returns a list of lightkube k8s objects for the controller, rendered in charm context""" return self._render_yaml("metacontroller.yaml") - def _render_manifests(self) -> (list, str): - raise Exception("TODO: Remove this") - # Load and render all templates - self.logger.info(f"Rendering templates from {self.manifest_file_root}") - jinja_env = Environment(loader=FileSystemLoader(self.manifest_file_root)) - manifests = [] - for f in self._get_manifest_files(): - self.logger.info(f"Rendering template {f}") - f = Path(f).relative_to(self.manifest_file_root) - template = jinja_env.get_template(str(f)) - manifests.append( - template.render( - namespace=self.model.name, - image="metacontroller/metacontroller:v0.3.0", - ) - ) - - # Combine templates into a single string - manifests_str = "\n---\n".join(manifests) + def _render_all_resources(self): + """Returns a list of lightkube8s objects for all resources, rendered in charm context""" + return self._render_rbac() + self._render_crds() + self._render_controller() - logging.info(f"rendered manifests_str = {manifests_str}") + def _check_deployed_resources(self): + """Check the status of all deployed resources, returning True if ok""" + expected_resources = self._render_all_resources() + found_resources = [None] * len(expected_resources) + for i, resource in enumerate(expected_resources): + self.logger.info(f"Checking for '{resource.metadata}'") + try: + found_resources[i] = get_k8s_obj(resource, self.lightkube_client) + except lightkube.core.exceptions.ApiError: + self.logger.info( + f"Cannot find k8s object for metadata '{resource.metadata}'" + ) - return manifests, manifests_str + found_all_resources = all(found_resources) - def _check_deployed_resources(self, manifests=None): - """Check the status of all deployed resources, returning True if ok""" - # TODO: Add checks for other CRDs/services/etc - # TODO: ideally check all resources automatically based on the manifest - # TODO: Use lightkube here - if manifests is not None: - raise NotImplementedError("...") - # if manifests is None: - # manifests = self._render_manifests() - - resource_type = "statefulset" - name = "metacontroller" - namespace = self.model.name - self.logger.info(f"Checking {resource_type} {name} in {namespace}") - try: - running = validate_statefulset(name=name, namespace=namespace) - self.logger.info(f"found statefulset running = {running}") - except kubernetes.client.exceptions.ApiException: - self.logger.info( - "got ApiException when looking for statefulset (likely does not exist)" - ) - running = False + # TODO: Assert the statefulset/deployments/pods are ready/have replicas. + # Might be able to use lightkube objects status subresource? - return running + return found_all_resources @property def manifest_file_root(self): @@ -302,5 +276,12 @@ def create_all_lightkube_objects( ) +def get_k8s_obj(obj, client=None): + if not client: + client = lightkube.Client() + + return client.get(type(obj), obj.metadata.name, namespace=obj.metadata.namespace) + + if __name__ == "__main__": main(MetacontrollerOperatorCharm) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index e49209e..899602e 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -8,6 +8,7 @@ from charm import MetacontrollerOperatorCharm import lightkube.codecs from lightkube.resources.apps_v1 import Deployment, StatefulSet +from lightkube.resources.apiextensions_v1 import CustomResourceDefinition from lightkube.models.meta_v1 import ObjectMeta from ops.model import WaitingStatus, ActiveStatus, BlockedStatus, MaintenanceStatus from ops.testing import Harness @@ -74,21 +75,21 @@ def test_create_controller(harness_with_charm, mocker): mocked_lightkube_client.assert_has_calls(mocked_lightkube_client_expected_calls) -# TODO: Create test that "deploys" manifests then simulates the watchers - - @pytest.mark.parametrize( "install_succeeded, expected_status", ( (True, ActiveStatus()), (False, BlockedStatus("Some kubernetes resources missing/not ready")), - ) + ), ) def test_install(harness_with_charm, mocker, install_succeeded, expected_status): mocker.patch("charm.MetacontrollerOperatorCharm._create_rbac") mocker.patch("charm.MetacontrollerOperatorCharm._create_crds") mocker.patch("charm.MetacontrollerOperatorCharm._create_controller") - mocker.patch("charm.MetacontrollerOperatorCharm._check_deployed_resources", return_value=install_succeeded) + mocker.patch( + "charm.MetacontrollerOperatorCharm._check_deployed_resources", + return_value=install_succeeded, + ) mocker.patch("time.sleep") harness = harness_with_charm @@ -102,13 +103,26 @@ def test_install(harness_with_charm, mocker, install_succeeded, expected_status) @pytest.mark.parametrize( "deployed_resources_status, expected_status, n_install_calls", ( - (True, ActiveStatus(), 0), - (False, MaintenanceStatus("Missing kubernetes resources detected - reinstalling"), 1) - ) + (True, ActiveStatus(), 0), + ( + False, + MaintenanceStatus("Missing kubernetes resources detected - reinstalling"), + 1, + ), + ), ) -def test_update_status(harness_with_charm, mocker, deployed_resources_status, expected_status, n_install_calls): +def test_update_status( + harness_with_charm, + mocker, + deployed_resources_status, + expected_status, + n_install_calls, +): mocker.patch("charm.MetacontrollerOperatorCharm._install") - mocker.patch("charm.MetacontrollerOperatorCharm._check_deployed_resources", return_value=deployed_resources_status) + mocker.patch( + "charm.MetacontrollerOperatorCharm._check_deployed_resources", + return_value=deployed_resources_status, + ) harness = harness_with_charm harness.charm.on.update_status.emit() @@ -116,3 +130,84 @@ def test_update_status(harness_with_charm, mocker, deployed_resources_status, ex # Assert install called correct number of times assert harness.charm._install.call_count == n_install_calls + + +def fake_get_k8s_obj_always_successful(obj, _): + """Mock of get_k8s_obj that always succeeds, returning the requested lightkube object""" + return obj + + +def fake_get_k8s_obj_crds_fail(obj, _): + """Mock of get_k8s_obj that fails if getting a crd, otherwise succeeds""" + # Not sure why lightkube doesn't use their obj.kind. Use isinstance as a fix + if isinstance(obj, lightkube.resources.apiextensions_v1.CustomResourceDefinition): + raise _FakeApiError() + else: + return obj + + +@pytest.fixture() +def mock_resources_ready(): + # TODO: When testing stateful sets for scale, add scale to this statefulset + return [ + StatefulSet(metadata=ObjectMeta(name="ss1", namespace="namespace")), + CustomResourceDefinition(metadata=ObjectMeta(name="crd1"), spec={}), + ] + + +@pytest.mark.parametrize( + "mock_get_k8s_obj,mock_resources_fixture,expected_are_resources_ok", + ( + (fake_get_k8s_obj_always_successful, "mock_resources_ready", True), + (fake_get_k8s_obj_crds_fail, "mock_resources_ready", False), + # (fake_get_k8s_statefulsets_have_no_replicas, mock_resources_not_ready, False), # TODO: Add when ss check enabled + ), +) +def test_check_deployed_resources( + request, + mocker, + harness_with_charm, + mock_get_k8s_obj, + mock_resources_fixture, + expected_are_resources_ok, +): + mocker.patch("charm.get_k8s_obj", side_effect=mock_get_k8s_obj) + + # Get the value of the fixture passed as a param, otherwise it is just the fixture itself + mock_resources = request.getfixturevalue(mock_resources_fixture) + mocker.patch( + "charm.MetacontrollerOperatorCharm._render_all_resources", + return_value=mock_resources, + ) + + harness = harness_with_charm + are_resources_ok = harness.charm._check_deployed_resources() + assert are_resources_ok == expected_are_resources_ok + + # TODO: Assert on the logs emitted? + + +class _FakeResponse: + """Used to fake an httpx response during testing only.""" + + def __init__(self, code): + self.code = code + self.name = "" + + def json(self): + reason = "" + if self.code == 409: + reason = "AlreadyExists" + return { + "apiVersion": 1, + "code": self.code, + "message": "broken", + "reason": reason, + } + + +class _FakeApiError(lightkube.core.exceptions.ApiError): + """Used to simulate an ApiError during testing.""" + + def __init__(self, code=401): + super().__init__(response=_FakeResponse(code)) From 4d1a478fa28d48e804ed465f6de22ec86f9a696c Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Thu, 18 Nov 2021 14:24:01 -0500 Subject: [PATCH 17/27] fix: flake8 max-complexity, linting problems --- src/charm.py | 1 - tests/unit/test_charm.py | 3 ++- tox.ini | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/charm.py b/src/charm.py index ff713e5..0fcc4fa 100755 --- a/src/charm.py +++ b/src/charm.py @@ -10,7 +10,6 @@ from pathlib import Path from kubernetes import client, config -import kubernetes.client.exceptions from ops.charm import CharmBase from ops.main import main from ops.model import ActiveStatus, WaitingStatus, MaintenanceStatus, BlockedStatus diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 899602e..ac36bd0 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -160,7 +160,8 @@ def mock_resources_ready(): ( (fake_get_k8s_obj_always_successful, "mock_resources_ready", True), (fake_get_k8s_obj_crds_fail, "mock_resources_ready", False), - # (fake_get_k8s_statefulsets_have_no_replicas, mock_resources_not_ready, False), # TODO: Add when ss check enabled + # (fake_get_k8s_statefulsets_have_no_replicas, mock_resources_not_ready, + # False), # TODO: Add when ss check enabled ), ) def test_check_deployed_resources( diff --git a/tox.ini b/tox.ini index ba0bc5c..86cbd36 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [flake8] max-line-length = 100 -max-complexity = True +max-complexity = 10 copyright-check = True copyright-author = Canonical Ltd. From 74f44d263b6728852550822c3704b91c20e2a2c8 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Thu, 18 Nov 2021 15:17:48 -0500 Subject: [PATCH 18/27] feat: add StatefulSet validation and tests --- src/charm.py | 43 ++++++++++++++++++++-------------------- tests/unit/test_charm.py | 29 +++++++++++++++++++++++---- 2 files changed, 46 insertions(+), 26 deletions(-) diff --git a/src/charm.py b/src/charm.py index 0fcc4fa..ed05b49 100755 --- a/src/charm.py +++ b/src/charm.py @@ -9,13 +9,13 @@ import logging from pathlib import Path -from kubernetes import client, config from ops.charm import CharmBase from ops.main import main from ops.model import ActiveStatus, WaitingStatus, MaintenanceStatus, BlockedStatus import lightkube from lightkube import codecs +from lightkube.resources.apps_v1 import StatefulSet METACONTROLLER_IMAGE = "metacontroller/metacontroller:v0.3.0" @@ -160,13 +160,11 @@ def _check_deployed_resources(self): self.logger.info( f"Cannot find k8s object for metadata '{resource.metadata}'" ) - found_all_resources = all(found_resources) - # TODO: Assert the statefulset/deployments/pods are ready/have replicas. - # Might be able to use lightkube objects status subresource? + statefulset_ok = validate_statefulsets(found_resources, self.logger) - return found_all_resources + return found_all_resources and statefulset_ok @property def manifest_file_root(self): @@ -191,25 +189,26 @@ def lightkube_client(self, client): self._lightkube_client = client -def init_app_client(app_client=None): - if app_client is None: - config.load_incluster_config() - app_client = client.AppsV1Api() - return app_client - +def validate_statefulsets(objs, logger=None): + """Returns True if all StatefulSets in objs have the expected number of readyReplicas else False -def validate_statefulset(name, namespace, app_client=None): - """Returns true if a statefulset has its desired number of ready replicas, else False - - Raises a kubernetes.client.exceptions.ApiException from read_namespaced_stateful_set() - if the object cannot be found + Optionally emits a message to logger for any StatefulSets that do not have their desired number + of replicas """ - app_client = init_app_client(app_client) - ss = app_client.read_namespaced_stateful_set(name, namespace) - if ss.status.ready_replicas == ss.spec.replicas: - return True - else: - return False + statefulset_not_ok = False + for obj in objs: + if isinstance(obj, StatefulSet): + readyReplicas = obj.status.readyReplicas + replicas_expected = obj.spec.replicas + if readyReplicas != replicas_expected: + statefulset_not_ok = True + if logger: + logger.info( + f"StatefulSet {obj.metadata.name} in namespace " + f"{obj.metadata.namespace} has {readyReplicas} readyReplicas, " + f"expected {replicas_expected}" + ) + return not statefulset_not_ok ALLOWED_IF_EXISTS = (None, "replace", "patch") diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index ac36bd0..2f2dc2a 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -10,6 +10,7 @@ from lightkube.resources.apps_v1 import Deployment, StatefulSet from lightkube.resources.apiextensions_v1 import CustomResourceDefinition from lightkube.models.meta_v1 import ObjectMeta +from lightkube.models.apps_v1 import StatefulSetSpec, StatefulSetStatus from ops.model import WaitingStatus, ActiveStatus, BlockedStatus, MaintenanceStatus from ops.testing import Harness import pytest @@ -148,20 +149,40 @@ def fake_get_k8s_obj_crds_fail(obj, _): @pytest.fixture() def mock_resources_ready(): - # TODO: When testing stateful sets for scale, add scale to this statefulset + """List of lightkube resources that are ok""" return [ - StatefulSet(metadata=ObjectMeta(name="ss1", namespace="namespace")), + StatefulSet( + metadata=ObjectMeta(name="ss1", namespace="namespace"), + spec=StatefulSetSpec(replicas=3, selector="", serviceName="", template=""), + status=StatefulSetStatus(replicas=3, readyReplicas=3), + ), CustomResourceDefinition(metadata=ObjectMeta(name="crd1"), spec={}), ] +@pytest.fixture() +def mock_resources_ss_not_ready(): + """ + List of lightkube resources that has a StatefulSet that does not have all replicas available + """ + return [ + StatefulSet( + metadata=ObjectMeta(name="ss1", namespace="namespace"), + spec=StatefulSetSpec(replicas=3, selector="", serviceName="", template=""), + status=StatefulSetStatus(replicas=0, readyReplicas=0), + ), + ] + + @pytest.mark.parametrize( "mock_get_k8s_obj,mock_resources_fixture,expected_are_resources_ok", ( + # Test where objects exist in k8s (fake_get_k8s_obj_always_successful, "mock_resources_ready", True), + # Test where objects do not exist in k8s (fake_get_k8s_obj_crds_fail, "mock_resources_ready", False), - # (fake_get_k8s_statefulsets_have_no_replicas, mock_resources_not_ready, - # False), # TODO: Add when ss check enabled + # Test with StatefulSet that does not have all replicas + (fake_get_k8s_obj_always_successful, "mock_resources_ss_not_ready", False), ), ) def test_check_deployed_resources( From 6e38a7cc6a2124db3bb1b497247107055ef0c084 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Thu, 18 Nov 2021 16:06:18 -0500 Subject: [PATCH 19/27] feat: add error message for when charm does not have trust Add tests for catch of trust-related error --- src/charm.py | 18 ++++++++- tests/integration/test_charm_no_trust.py | 50 ++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_charm_no_trust.py diff --git a/src/charm.py b/src/charm.py index ed05b49..d19e6b8 100755 --- a/src/charm.py +++ b/src/charm.py @@ -16,6 +16,7 @@ import lightkube from lightkube import codecs from lightkube.resources.apps_v1 import StatefulSet +from lightkube.core.exceptions import ApiError METACONTROLLER_IMAGE = "metacontroller/metacontroller:v0.3.0" @@ -51,7 +52,22 @@ def _install(self, event): # TODO: catch error when this fails due to permissions and set appropriate "we aren't # trusted" blocked status # create rbac - self._create_rbac() + try: + self._create_rbac() + except ApiError as e: + # Handle forbidden error as this likely means we don't have --trust + if e.status.code == 403: + self.logger.error( + "Received Forbidden (403) error from lightkube when creating required RBAC. " + "This may be due to the charm lacking permissions to create cluster-scoped " + "roles and resources. Charm must be deployed with `--trust`" + ) + self.unit.status = BlockedStatus( + "Cannot create required RBAC. Charm may not have `--trust`" + ) + return + else: + raise e # Create crds self._create_crds() diff --git a/tests/integration/test_charm_no_trust.py b/tests/integration/test_charm_no_trust.py new file mode 100644 index 0000000..0a84b42 --- /dev/null +++ b/tests/integration/test_charm_no_trust.py @@ -0,0 +1,50 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +from pathlib import Path +import pytest +import yaml + +from pytest_operator.plugin import OpsTest + + +logger = logging.getLogger(__name__) + +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) +APP_NAME = "metacontroller-operator" + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy_without_trust(ops_test: OpsTest): + """Tests whether the Charm catches and messages that it was deployed without --trust""" + logger.info("Building charm") + built_charm_path = await ops_test.build_charm("./") + logger.info(f"Built charm {built_charm_path}") + + resources = {} + for resource_name, resource_data in METADATA.get("resources", {}).items(): + image_path = resource_data["upstream-source"] + resources[resource_name] = image_path + + logger.info( + f"Deploying charm {APP_NAME} using resources '{resources}' without `--trust`" + ) + + await ops_test.model.deploy( + entity_url=built_charm_path, + application_name=APP_NAME, + resources=resources, + trust=False, + ) + + apps = [APP_NAME] + await ops_test.model.wait_for_idle( + apps=apps, status="blocked", raise_on_blocked=False, timeout=300 + ) + + assert ops_test.model.applications[APP_NAME].units[0].workload_status == "blocked" + assert ( + ops_test.model.applications[APP_NAME].units[0].workload_status_message + == "Cannot create required RBAC. Charm may not have `--trust`" + ) From 64bebb667b05509dc3dca909890b87d3720ca4e0 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Thu, 18 Nov 2021 16:49:20 -0500 Subject: [PATCH 20/27] fix: typo in integrate.yaml --- .github/workflows/integrate.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integrate.yaml b/.github/workflows/integrate.yaml index 6233494..de8af4b 100644 --- a/.github/workflows/integrate.yaml +++ b/.github/workflows/integrate.yaml @@ -18,7 +18,7 @@ jobs: set -eux sudo apt update sudo apt install tox - - name: tox -vve lint + - run: tox -vve lint unit: name: Unit Tests From bfe2da4bdabce1a68c7c3aecfd8225cd3510ecb2 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Fri, 19 Nov 2021 10:07:27 -0500 Subject: [PATCH 21/27] fix: mock away lightkube client in unit tests client fails when kube config cannot be found on system (such as in GH runner) --- tests/unit/test_charm.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 2f2dc2a..5c1063c 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -193,6 +193,10 @@ def test_check_deployed_resources( mock_resources_fixture, expected_are_resources_ok, ): + # Mock away lightkube_client so it doesn't look for/fail on kubernetes config + mocked_client = mocker.patch("charm.MetacontrollerOperatorCharm.lightkube_client", new_callable=mock.PropertyMock) + mocked_client.return_value = None + mocker.patch("charm.get_k8s_obj", side_effect=mock_get_k8s_obj) # Get the value of the fixture passed as a param, otherwise it is just the fixture itself From 454e7a2d855327823cab48b18f8f99626a8ca999 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Fri, 19 Nov 2021 15:31:59 -0500 Subject: [PATCH 22/27] fix: update install process to handle resource conflicts Install now attempts to create all resources, failing over to patching any that already exist (and logging this to debug-log). This is not foolproof (it could overwrite existing resources) Also changed: * k8s resource names now slightly more unique, attempting to avoid conflicts * changed CI name for `test_build_and_deploy` to `test_build_and_deploy_with_trust` to make it easier to run just that test during debugging --- src/charm.py | 23 +++++++++++++------ .../manifests/metacontroller-crds-v1.yaml | 6 +++++ src/files/manifests/metacontroller-rbac.yaml | 18 +++++++++++---- src/files/manifests/metacontroller.yaml | 12 +++++----- tests/integration/test_charm.py | 2 +- 5 files changed, 42 insertions(+), 19 deletions(-) diff --git a/src/charm.py b/src/charm.py index d19e6b8..3aceea8 100755 --- a/src/charm.py +++ b/src/charm.py @@ -28,6 +28,10 @@ class MetacontrollerOperatorCharm(CharmBase): def __init__(self, *args): super().__init__(*args) + self._name = self.model.app.name + self._namespace = self.model.name + self._metacontroller_image = METACONTROLLER_IMAGE + if not self.unit.is_leader(): self.model.unit.status = WaitingStatus("Waiting for leadership") return @@ -46,6 +50,7 @@ def __init__(self, *args): self._lightkube_client = None def _install(self, event): + """Creates k8s resources required for the charm, patching over any existing ones it finds""" self.logger.info("Installing by instantiating Kubernetes objects") self.unit.status = MaintenanceStatus("Instantiating Kubernetes objects") @@ -62,6 +67,7 @@ def _install(self, event): "This may be due to the charm lacking permissions to create cluster-scoped " "roles and resources. Charm must be deployed with `--trust`" ) + self.logger.error(f"Error received: {str(e)}") self.unit.status = BlockedStatus( "Cannot create required RBAC. Charm may not have `--trust`" ) @@ -118,21 +124,21 @@ def _remove(self, _): """Remove charm""" raise NotImplementedError() - def _create_crds(self, if_exists=None): + def _create_crds(self, if_exists="patch"): self.logger.info("Applying manifests for CRDs") objs = self._render_crds() create_all_lightkube_objects( objs, if_exists=if_exists, lightkube_client=self.lightkube_client ) - def _create_rbac(self, if_exists=None): + def _create_rbac(self, if_exists="patch"): self.logger.info("Applying manifests for RBAC") objs = self._render_rbac() create_all_lightkube_objects( objs, if_exists=if_exists, lightkube_client=self.lightkube_client ) - def _create_controller(self, if_exists=None): + def _create_controller(self, if_exists="patch"): self.logger.info("Applying manifests for controller") objs = self._render_controller() create_all_lightkube_objects( @@ -142,8 +148,9 @@ def _create_controller(self, if_exists=None): def _render_yaml(self, yaml_filename: [str, Path]): """Returns a list of lightkube k8s objects for a yaml file, rendered in charm context""" context = { - "namespace": self.model.name, - "metacontroller_image": METACONTROLLER_IMAGE, + "app_name": self._name, + "namespace": self._namespace, + "metacontroller_image": self._metacontroller_image, } with open(self._manifests_file_root / yaml_filename) as f: return codecs.load_all_yaml(f, context=context) @@ -263,14 +270,16 @@ def create_all_lightkube_objects( for obj in objs: try: + if logger: + logger.info(f"Creating {obj.metadata.name}.") lightkube_client.create(obj) except lightkube.core.exceptions.ApiError as e: - if if_exists is None: + if e.status.code != 409 or if_exists is None: raise e else: if logger: logger.info( - f"Caught {e.status} when creating {obj.metadata.name}. " + f"Caught {e.status} when creating {obj.metadata.name} ({obj.metadata}). " f"Trying to {if_exists}" ) if if_exists == "replace": diff --git a/src/files/manifests/metacontroller-crds-v1.yaml b/src/files/manifests/metacontroller-crds-v1.yaml index 71892f4..c431bdb 100644 --- a/src/files/manifests/metacontroller-crds-v1.yaml +++ b/src/files/manifests/metacontroller-crds-v1.yaml @@ -5,6 +5,8 @@ kind: CustomResourceDefinition metadata: annotations: "api-approved.kubernetes.io": "unapproved, request not yet submitted" + labels: + app.kubernetes.io/name: {{ namespace }}-{{ app_name }}-charm name: compositecontrollers.metacontroller.k8s.io spec: group: metacontroller.k8s.io @@ -251,6 +253,8 @@ kind: CustomResourceDefinition metadata: annotations: "api-approved.kubernetes.io": "unapproved, request not yet submitted" + labels: + app.kubernetes.io/name: {{ namespace }}-{{ app_name }}-charm name: controllerrevisions.metacontroller.k8s.io spec: group: metacontroller.k8s.io @@ -313,6 +317,8 @@ kind: CustomResourceDefinition metadata: annotations: "api-approved.kubernetes.io": "unapproved, request not yet submitted" + labels: + app.kubernetes.io/name: {{ namespace }}-{{ app_name }}-charm name: decoratorcontrollers.metacontroller.k8s.io spec: group: metacontroller.k8s.io diff --git a/src/files/manifests/metacontroller-rbac.yaml b/src/files/manifests/metacontroller-rbac.yaml index f553ce7..d5717f0 100644 --- a/src/files/manifests/metacontroller-rbac.yaml +++ b/src/files/manifests/metacontroller-rbac.yaml @@ -1,13 +1,17 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: metacontroller + labels: + app.kubernetes.io/name: {{ namespace }}-{{ app_name }}-charm + name: {{ app_name }}-charm namespace: {{ namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: metacontroller + labels: + app.kubernetes.io/name: {{ namespace }}-{{ app_name }}-charm + name: {{ namespace }}-{{ app_name }}-charm rules: - apiGroups: - "*" @@ -19,14 +23,16 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: metacontroller + name: {{ namespace }}-{{ app_name }}-charm + labels: + app.kubernetes.io/name: {{ namespace }}-{{ app_name }}-charm subjects: - kind: ServiceAccount - name: metacontroller + name: {{ app_name }}-charm namespace: {{ namespace }} roleRef: kind: ClusterRole - name: metacontroller + name: {{ namespace }}-{{ app_name }}-charm apiGroup: rbac.authorization.k8s.io --- kind: ClusterRole @@ -37,6 +43,7 @@ metadata: rbac.authorization.k8s.io/aggregate-to-admin: "true" rbac.authorization.k8s.io/aggregate-to-edit: "true" rbac.authorization.k8s.io/aggregate-to-view: "true" + app.kubernetes.io/name: {{ namespace }}-{{ app_name }}-charm rules: - apiGroups: - metacontroller.k8s.io @@ -56,6 +63,7 @@ metadata: labels: rbac.authorization.k8s.io/aggregate-to-admin: "true" rbac.authorization.k8s.io/aggregate-to-edit: "true" + app.kubernetes.io/name: {{ namespace }}-{{ app_name }}-charm rules: - apiGroups: - metacontroller.k8s.io diff --git a/src/files/manifests/metacontroller.yaml b/src/files/manifests/metacontroller.yaml index a020256..ca6616c 100644 --- a/src/files/manifests/metacontroller.yaml +++ b/src/files/manifests/metacontroller.yaml @@ -3,24 +3,24 @@ apiVersion: apps/v1 kind: StatefulSet metadata: labels: - app.kubernetes.io/name: metacontroller - name: metacontroller + app.kubernetes.io/name: {{ namespace }}-{{ app_name }}-charm + name: {{ app_name }}-charm namespace: {{ namespace }} spec: replicas: 1 selector: matchLabels: - app.kubernetes.io/name: metacontroller + app.kubernetes.io/name: {{ namespace }}-{{ app_name }}-charm serviceName: "" template: metadata: labels: - app.kubernetes.io/name: metacontroller + app.kubernetes.io/name: {{ namespace }}-{{ app_name }}-charm spec: - serviceAccountName: metacontroller + serviceAccountName: {{ app_name }}-charm containers: - name: metacontroller - image: {{ metacontroller-image }} + image: {{ metacontroller_image }} command: ["/usr/bin/metacontroller"] args: - -v=4 diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index ab8bed8..91e0d52 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -15,7 +15,7 @@ @pytest.mark.abort_on_fail -async def test_build_and_deploy(ops_test: OpsTest): +async def test_build_and_deploy_with_trust(ops_test: OpsTest): logger.info("Building charm") built_charm_path = await ops_test.build_charm("./") logger.info(f"Built charm {built_charm_path}") From 4200f8c7bf1256eefb6dea215463ef09d9011b3b Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Fri, 19 Nov 2021 15:37:24 -0500 Subject: [PATCH 23/27] fix: formatting --- tests/unit/test_charm.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 5c1063c..58690c9 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -194,7 +194,10 @@ def test_check_deployed_resources( expected_are_resources_ok, ): # Mock away lightkube_client so it doesn't look for/fail on kubernetes config - mocked_client = mocker.patch("charm.MetacontrollerOperatorCharm.lightkube_client", new_callable=mock.PropertyMock) + mocked_client = mocker.patch( + "charm.MetacontrollerOperatorCharm.lightkube_client", + new_callable=mock.PropertyMock, + ) mocked_client.return_value = None mocker.patch("charm.get_k8s_obj", side_effect=mock_get_k8s_obj) From 85c3af536c8afbf2ac292e922fbfb7591930c950 Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Fri, 19 Nov 2021 16:02:16 -0500 Subject: [PATCH 24/27] fix: disable _no_trust integration test Seems to be colliding with the regular deploy test, maybe because the second test starts before the first is fully torn down? Not sure, but both work in isolation locally. --- tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 86cbd36..9f9f29d 100644 --- a/tox.ini +++ b/tox.ini @@ -31,5 +31,6 @@ commands = deps = {[testenv]deps} -r test-integration-requirements.txt +# _no_trust failing in github runner, possibly because it starts before previous charm is torn down? Works in isolation locally commands = - pytest -vv --show-capture=no --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration + pytest -vv --show-capture=no --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration --ignore=tests/integration/test_charm_no_trust.py From 24ef72b4fb7fcd15d24ff0062b933e0c453b616d Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Fri, 19 Nov 2021 16:49:23 -0500 Subject: [PATCH 25/27] Refactor to address review comments * refactor _create and _render to minimize duplicated code * misc minor changes --- .github/workflows/integrate.yaml | 5 +- charmcraft.yaml | 10 ---- src/charm.py | 52 ++++++------------- .../simple_manifest_input.yaml | 0 .../simple_manifest_result.yaml | 0 tests/unit/test_charm.py | 23 ++++---- 6 files changed, 32 insertions(+), 58 deletions(-) rename tests/unit/{render_yaml => render_resource}/simple_manifest_input.yaml (100%) rename tests/unit/{render_yaml => render_resource}/simple_manifest_result.yaml (100%) diff --git a/.github/workflows/integrate.yaml b/.github/workflows/integrate.yaml index de8af4b..64a5268 100644 --- a/.github/workflows/integrate.yaml +++ b/.github/workflows/integrate.yaml @@ -38,6 +38,7 @@ jobs: uses: charmed-kubernetes/actions-operator@main with: provider: microk8s + channel: 1.21/stable charmcraft-channel: latest/candidate - name: Test @@ -61,9 +62,9 @@ jobs: if: failure() - name: Get application logs - run: kubectl logs -n testing --tail 1000 -ljuju-app=metacontroller-operator + run: kubectl logs -n testing --tail 1000 -lapp.kubernetes.io/name=metacontroller-operator if: failure() - name: Get application operator logs - run: kubectl logs -n testing --tail 1000 -ljuju-operator=metacontroller-operator + run: kubectl logs -n testing --tail 1000 -loperator.juju.is/name=metacontroller-operator if: failure() diff --git a/charmcraft.yaml b/charmcraft.yaml index bc071ba..41250d8 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -7,16 +7,6 @@ bases: - name: "ubuntu" channel: "20.04" parts: -# TODO/DEBUG: Including the below kubectl and charm parts USUALLY causes an infinite loop during `charmcraft pack`. -# Omitting for now... - kubectl: - plugin: dump - source: . - override-pull: | - curl -sSLO https://dl.k8s.io/release/v1.22.1/bin/linux/amd64/kubectl - chmod +x kubectl - build-packages: - - curl charm: # Fixes jinja install during pack. See https://github.com/canonical/charmcraft/issues/551 charm-python-packages: [setuptools, pip] diff --git a/src/charm.py b/src/charm.py index 3aceea8..4e88148 100755 --- a/src/charm.py +++ b/src/charm.py @@ -31,6 +31,11 @@ def __init__(self, *args): self._name = self.model.app.name self._namespace = self.model.name self._metacontroller_image = METACONTROLLER_IMAGE + self._resource_files = { + "crds": "metacontroller-crds-v1.yaml", + "rbac": "metacontroller-rbac.yaml", + "controller": "metacontroller.yaml", + } if not self.unit.is_leader(): self.model.unit.status = WaitingStatus("Waiting for leadership") @@ -58,7 +63,7 @@ def _install(self, event): # trusted" blocked status # create rbac try: - self._create_rbac() + self._create_resource("rbac") except ApiError as e: # Handle forbidden error as this likely means we don't have --trust if e.status.code == 403: @@ -76,10 +81,10 @@ def _install(self, event): raise e # Create crds - self._create_crds() + self._create_resource("crds") # deploy the controller - self._create_controller() + self._create_resource("controller") self.logger.info("Waiting for installed Kubernetes objects to be operational") max_attempts = 20 @@ -124,52 +129,29 @@ def _remove(self, _): """Remove charm""" raise NotImplementedError() - def _create_crds(self, if_exists="patch"): - self.logger.info("Applying manifests for CRDs") - objs = self._render_crds() - create_all_lightkube_objects( - objs, if_exists=if_exists, lightkube_client=self.lightkube_client - ) - - def _create_rbac(self, if_exists="patch"): - self.logger.info("Applying manifests for RBAC") - objs = self._render_rbac() - create_all_lightkube_objects( - objs, if_exists=if_exists, lightkube_client=self.lightkube_client - ) - - def _create_controller(self, if_exists="patch"): - self.logger.info("Applying manifests for controller") - objs = self._render_controller() + def _create_resource(self, resource_name, if_exists="patch"): + self.logger.info(f"Applying manifests for {resource_name}") + objs = self._render_resource(resource_name) create_all_lightkube_objects( objs, if_exists=if_exists, lightkube_client=self.lightkube_client ) - def _render_yaml(self, yaml_filename: [str, Path]): + def _render_resource(self, yaml_name: [str, Path]): """Returns a list of lightkube k8s objects for a yaml file, rendered in charm context""" context = { "app_name": self._name, "namespace": self._namespace, "metacontroller_image": self._metacontroller_image, } - with open(self._manifests_file_root / yaml_filename) as f: + with open(self._manifests_file_root / self._resource_files[yaml_name]) as f: return codecs.load_all_yaml(f, context=context) - def _render_crds(self): - """Returns a list of lightkube k8s objects for the CRDs, rendered in charm context""" - return self._render_yaml("metacontroller-crds-v1.yaml") - - def _render_rbac(self): - """Returns a list of lightkube k8s objects for the charm RBAC, rendered in charm context""" - return self._render_yaml("metacontroller-rbac.yaml") - - def _render_controller(self): - """Returns a list of lightkube k8s objects for the controller, rendered in charm context""" - return self._render_yaml("metacontroller.yaml") - def _render_all_resources(self): """Returns a list of lightkube8s objects for all resources, rendered in charm context""" - return self._render_rbac() + self._render_crds() + self._render_controller() + resources = [] + for resource_name in self._resource_files: + resources.extend(self._render_resource(resource_name)) + return resources def _check_deployed_resources(self): """Check the status of all deployed resources, returning True if ok""" diff --git a/tests/unit/render_yaml/simple_manifest_input.yaml b/tests/unit/render_resource/simple_manifest_input.yaml similarity index 100% rename from tests/unit/render_yaml/simple_manifest_input.yaml rename to tests/unit/render_resource/simple_manifest_input.yaml diff --git a/tests/unit/render_yaml/simple_manifest_result.yaml b/tests/unit/render_resource/simple_manifest_result.yaml similarity index 100% rename from tests/unit/render_yaml/simple_manifest_result.yaml rename to tests/unit/render_resource/simple_manifest_result.yaml diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 58690c9..b2a4637 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -36,16 +36,17 @@ def test_not_leader(harness): assert harness.charm.model.unit.status == WaitingStatus("Waiting for leadership") -def test_render_yaml(harness_with_charm): - manifests_root = Path("./tests/unit/render_yaml/") +def test_render_resource(harness_with_charm): + manifests_root = Path("./tests/unit/render_resource/") unrendered_yaml_file = "simple_manifest_input.yaml" expected_yaml_file = "simple_manifest_result.yaml" harness = harness_with_charm harness.charm.manifest_file_root = manifests_root + harness.charm._resource_files = {"test_yaml": unrendered_yaml_file} assert harness.charm.manifest_file_root == Path(manifests_root) - objs = harness.charm._render_yaml(unrendered_yaml_file) + objs = harness.charm._render_resource("test_yaml") rendered_yaml_expected = ( Path(manifests_root / expected_yaml_file).read_text().strip() ) @@ -60,7 +61,7 @@ def test_create_controller(harness_with_charm, mocker): StatefulSet(metadata=ObjectMeta(name="test-deployment", namespace=namespace)), ] mocker.patch( - "charm.MetacontrollerOperatorCharm._render_controller", + "charm.MetacontrollerOperatorCharm._render_resource", return_value=mocked_render_controller_return, ) @@ -72,7 +73,7 @@ def test_create_controller(harness_with_charm, mocker): harness = harness_with_charm harness.charm.lightkube_client = mocked_lightkube_client - harness.charm._create_controller() + harness.charm._create_resource("dummy-name") mocked_lightkube_client.assert_has_calls(mocked_lightkube_client_expected_calls) @@ -84,20 +85,20 @@ def test_create_controller(harness_with_charm, mocker): ), ) def test_install(harness_with_charm, mocker, install_succeeded, expected_status): - mocker.patch("charm.MetacontrollerOperatorCharm._create_rbac") - mocker.patch("charm.MetacontrollerOperatorCharm._create_crds") - mocker.patch("charm.MetacontrollerOperatorCharm._create_controller") + mocker.patch("charm.MetacontrollerOperatorCharm._create_resource") mocker.patch( "charm.MetacontrollerOperatorCharm._check_deployed_resources", return_value=install_succeeded, ) mocker.patch("time.sleep") + expected_calls = [ + mock.call(resource_name) for resource_name in ["rbac", "crds", "controller"] + ] + harness = harness_with_charm harness.charm.on.install.emit() - harness.charm._create_rbac.assert_called_once() - harness.charm._create_crds.assert_called_once() - harness.charm._create_controller.assert_called_once() + harness.charm._create_resource.assert_has_calls(expected_calls) assert harness.charm.model.unit.status == expected_status From 198dd8cd597c0c5329ea63888e3f298cca62f5cc Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Tue, 23 Nov 2021 09:46:07 -0500 Subject: [PATCH 26/27] Refactor to address review comments Includes: * Removing properties for simplicity * Using tenacity.retry instead of while loop for checking resources * Using CheckFailed exception for raising errors in helpers * Scheduled weekly integration CI --- .github/workflows/integrate.yaml | 2 + requirements.txt | 7 +- src/charm.py | 170 ++++++++++++++++++------------- tests/unit/test_charm.py | 121 ++++++++++++++-------- 4 files changed, 186 insertions(+), 114 deletions(-) diff --git a/.github/workflows/integrate.yaml b/.github/workflows/integrate.yaml index 64a5268..2c54810 100644 --- a/.github/workflows/integrate.yaml +++ b/.github/workflows/integrate.yaml @@ -5,6 +5,8 @@ on: branches: - main pull_request: + schedule: + - cron: '0 8 * * TUE' jobs: lint: diff --git a/requirements.txt b/requirements.txt index 5a41d2e..b335dcd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ -ops >= 1.2.0 -jinja2 < 4 -oci-image<1.1.0 +ops < 1.3.0 +jinja2 < 3.1 +oci-image < 1.1.0 kubernetes < 19.0.0 lightkube +tenacity < 9 diff --git a/src/charm.py b/src/charm.py index 4e88148..982827f 100755 --- a/src/charm.py +++ b/src/charm.py @@ -3,7 +3,6 @@ # See LICENSE file for licensing details. import glob -import time from typing import Optional import logging @@ -17,6 +16,12 @@ from lightkube import codecs from lightkube.resources.apps_v1 import StatefulSet from lightkube.core.exceptions import ApiError +from tenacity import ( + Retrying, + retry_if_exception_type, + stop_after_delay, + wait_exponential, +) METACONTROLLER_IMAGE = "metacontroller/metacontroller:v0.3.0" @@ -28,15 +33,6 @@ class MetacontrollerOperatorCharm(CharmBase): def __init__(self, *args): super().__init__(*args) - self._name = self.model.app.name - self._namespace = self.model.name - self._metacontroller_image = METACONTROLLER_IMAGE - self._resource_files = { - "crds": "metacontroller-crds-v1.yaml", - "rbac": "metacontroller-rbac.yaml", - "controller": "metacontroller.yaml", - } - if not self.unit.is_leader(): self.model.unit.status = WaitingStatus("Waiting for leadership") return @@ -44,23 +40,29 @@ def __init__(self, *args): self.framework.observe(self.on.install, self._install) self.framework.observe(self.on.remove, self._remove) self.framework.observe(self.on.update_status, self._update_status) - # self.framework.observe(self.on.config_changed, self._reconcile) - self.logger = logging.getLogger(__name__) + self.logger: logging.Logger = logging.getLogger(__name__) + + self._name: str = self.model.app.name + self._namespace: str = self.model.name + self._metacontroller_image: str = METACONTROLLER_IMAGE + self._resource_files: dict = { + "crds": "metacontroller-crds-v1.yaml", + "rbac": "metacontroller-rbac.yaml", + "controller": "metacontroller.yaml", + } # TODO: Fix file imports and move ./src/files back to ./files - self._manifests_file_root = None - self.manifest_file_root = "./src/files/manifests/" + self._manifest_file_root: Path = Path("./src/files/manifests/") - self._lightkube_client = None + self._lightkube_client: Optional[lightkube.Client] = None + self._max_time_checking_resources = 150 def _install(self, event): """Creates k8s resources required for the charm, patching over any existing ones it finds""" self.logger.info("Installing by instantiating Kubernetes objects") self.unit.status = MaintenanceStatus("Instantiating Kubernetes objects") - # TODO: catch error when this fails due to permissions and set appropriate "we aren't - # trusted" blocked status # create rbac try: self._create_resource("rbac") @@ -80,42 +82,45 @@ def _install(self, event): else: raise e - # Create crds self._create_resource("crds") - # deploy the controller self._create_resource("controller") self.logger.info("Waiting for installed Kubernetes objects to be operational") - max_attempts = 20 - for attempt in range(max_attempts): - self.logger.info(f"validation attempt {attempt}/{max_attempts}") - running = self._check_deployed_resources() - - if running is True: - self.logger.info("Resources detected as running") - self.logger.info("Install successful") - self.unit.status = ActiveStatus() - return - else: - sleeptime = 10 - self.logger.info(f"Sleeping {sleeptime}s") - time.sleep(sleeptime) - else: + + self.logger.info( + f"Checking status of charm deployment in Kubernetes " + f"(retrying for maximum {self._max_time_checking_resources}s)" + ) + try: + for attempt in Retrying( + retry=retry_if_exception_type(CheckFailed), + stop=stop_after_delay(max_delay=self._max_time_checking_resources), + wait=wait_exponential(multiplier=0.1, min=0.1, max=15), + reraise=True, + ): + with attempt: + self.logger.info( + f"Trying attempt {attempt.retry_state.attempt_number}" + ) + self._check_deployed_resources() + except CheckFailed: self.unit.status = BlockedStatus( - "Some kubernetes resources missing/not ready" + "Some Kubernetes resources did not start correctly during install" ) return + self.logger.info("Resources detected as running") + self.logger.info("Install successful") + self.unit.status = ActiveStatus() + return + def _update_status(self, event): self.logger.info("Comparing current state to desired state") - running = self._check_deployed_resources() - if running is True: - self.logger.info("Resources are ok. Unit in ActiveStatus") - self.unit.status = ActiveStatus() - return - else: + try: + self._check_deployed_resources() + except CheckFailed: self.logger.info( "Resources are missing. Triggering install to reconcile resources" ) @@ -125,6 +130,10 @@ def _update_status(self, event): self._install(event) return + self.logger.info("Resources are ok. Unit in ActiveStatus") + self.unit.status = ActiveStatus() + return + def _remove(self, _): """Remove charm""" raise NotImplementedError() @@ -143,7 +152,7 @@ def _render_resource(self, yaml_name: [str, Path]): "namespace": self._namespace, "metacontroller_image": self._metacontroller_image, } - with open(self._manifests_file_root / self._resource_files[yaml_name]) as f: + with open(self._manifest_file_root / self._resource_files[yaml_name]) as f: return codecs.load_all_yaml(f, context=context) def _render_all_resources(self): @@ -154,34 +163,46 @@ def _render_all_resources(self): return resources def _check_deployed_resources(self): - """Check the status of all deployed resources, returning True if ok""" + """Check the status of deployed resources, returning True if ok else raising CheckFailed + + All abnormalities are captured in logs + """ expected_resources = self._render_all_resources() found_resources = [None] * len(expected_resources) + errors = [] + + self.logger.info("Checking for expected resources") for i, resource in enumerate(expected_resources): - self.logger.info(f"Checking for '{resource.metadata}'") try: - found_resources[i] = get_k8s_obj(resource, self.lightkube_client) + found_resources[i] = self.lightkube_client.get( + type(resource), + resource.metadata.name, + namespace=resource.metadata.namespace, + ) except lightkube.core.exceptions.ApiError: - self.logger.info( + errors.append( f"Cannot find k8s object for metadata '{resource.metadata}'" ) - found_all_resources = all(found_resources) - - statefulset_ok = validate_statefulsets(found_resources, self.logger) - return found_all_resources and statefulset_ok + self.logger.info("Checking readiness of found StatefulSets") + statefulsets_ok, statefulsets_errors = validate_statefulsets(found_resources) + errors.extend(statefulsets_errors) - @property - def manifest_file_root(self): - return self._manifests_file_root + # Log any errors + for err in errors: + self.logger.info(err) - @manifest_file_root.setter - def manifest_file_root(self, value): - self._manifests_file_root = Path(value) + if len(errors) == 0: + return True + else: + raise CheckFailed( + "Some Kubernetes resources missing/not ready. See logs for details", + WaitingStatus, + ) def _get_manifest_files(self) -> list: """Returns a list of all manifest files""" - return glob.glob(str(self.manifest_file_root / "*.yaml")) + return glob.glob(str(self._manifest_file_root / "*.yaml")) @property def lightkube_client(self): @@ -194,26 +215,29 @@ def lightkube_client(self, client): self._lightkube_client = client -def validate_statefulsets(objs, logger=None): +def validate_statefulsets(objs): """Returns True if all StatefulSets in objs have the expected number of readyReplicas else False Optionally emits a message to logger for any StatefulSets that do not have their desired number of replicas + + Returns: Tuple of (Success [Boolean], Errors [list of str error messages] """ - statefulset_not_ok = False + errors = [] + for obj in objs: if isinstance(obj, StatefulSet): readyReplicas = obj.status.readyReplicas replicas_expected = obj.spec.replicas if readyReplicas != replicas_expected: - statefulset_not_ok = True - if logger: - logger.info( - f"StatefulSet {obj.metadata.name} in namespace " - f"{obj.metadata.namespace} has {readyReplicas} readyReplicas, " - f"expected {replicas_expected}" - ) - return not statefulset_not_ok + message = ( + f"StatefulSet {obj.metadata.name} in namespace " + f"{obj.metadata.namespace} has {readyReplicas} readyReplicas, " + f"expected {replicas_expected}" + ) + errors.append(message) + + return len(errors) == 0, errors ALLOWED_IF_EXISTS = (None, "replace", "patch") @@ -281,11 +305,15 @@ def create_all_lightkube_objects( ) -def get_k8s_obj(obj, client=None): - if not client: - client = lightkube.Client() +class CheckFailed(Exception): + """Raise this exception if one of the checks in main fails.""" + + def __init__(self, msg, status_type=None): + super().__init__() - return client.get(type(obj), obj.metadata.name, namespace=obj.metadata.namespace) + self.msg = msg + self.status_type = status_type + self.status = status_type(msg) if __name__ == "__main__": diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index b2a4637..5797b04 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -1,11 +1,12 @@ # Copyright 2021 Canonical Ltd. # See LICENSE file for licensing details. +from contextlib import nullcontext as does_not_raise import logging from pathlib import Path from unittest import mock -from charm import MetacontrollerOperatorCharm +from charm import MetacontrollerOperatorCharm, CheckFailed import lightkube.codecs from lightkube.resources.apps_v1 import Deployment, StatefulSet from lightkube.resources.apiextensions_v1 import CustomResourceDefinition @@ -37,18 +38,18 @@ def test_not_leader(harness): def test_render_resource(harness_with_charm): - manifests_root = Path("./tests/unit/render_resource/") + manifest_root = Path("./tests/unit/render_resource/") unrendered_yaml_file = "simple_manifest_input.yaml" expected_yaml_file = "simple_manifest_result.yaml" harness = harness_with_charm - harness.charm.manifest_file_root = manifests_root + harness.charm._manifest_file_root = manifest_root harness.charm._resource_files = {"test_yaml": unrendered_yaml_file} - assert harness.charm.manifest_file_root == Path(manifests_root) + assert harness.charm._manifest_file_root == Path(manifest_root) objs = harness.charm._render_resource("test_yaml") rendered_yaml_expected = ( - Path(manifests_root / expected_yaml_file).read_text().strip() + Path(manifest_root / expected_yaml_file).read_text().strip() ) assert lightkube.codecs.dump_all_yaml(objs).strip() == rendered_yaml_expected @@ -77,18 +78,29 @@ def test_create_controller(harness_with_charm, mocker): mocked_lightkube_client.assert_has_calls(mocked_lightkube_client_expected_calls) +def returns_true(*args, **kwargs): + return True + + @pytest.mark.parametrize( - "install_succeeded, expected_status", + "install_side_effect, expected_charm_status", ( - (True, ActiveStatus()), - (False, BlockedStatus("Some kubernetes resources missing/not ready")), + (returns_true, ActiveStatus()), + ( + CheckFailed("", BlockedStatus), + BlockedStatus( + "Some Kubernetes resources did not start correctly during install" + ), + ), ), ) -def test_install(harness_with_charm, mocker, install_succeeded, expected_status): +def test_install( + harness_with_charm, mocker, install_side_effect, expected_charm_status +): mocker.patch("charm.MetacontrollerOperatorCharm._create_resource") mocker.patch( "charm.MetacontrollerOperatorCharm._check_deployed_resources", - return_value=install_succeeded, + side_effect=install_side_effect, ) mocker.patch("time.sleep") @@ -97,17 +109,22 @@ def test_install(harness_with_charm, mocker, install_succeeded, expected_status) ] harness = harness_with_charm + + # Fail fast + harness.charm._max_time_checking_resources = 1.5 + harness.charm.on.install.emit() + harness.charm._create_resource.assert_has_calls(expected_calls) - assert harness.charm.model.unit.status == expected_status + assert harness.charm.model.unit.status == expected_charm_status @pytest.mark.parametrize( - "deployed_resources_status, expected_status, n_install_calls", + "check_deployed_resources_side_effect, expected_status, n_install_calls", ( - (True, ActiveStatus(), 0), + (returns_true, ActiveStatus(), 0), ( - False, + CheckFailed("", WaitingStatus), MaintenanceStatus("Missing kubernetes resources detected - reinstalling"), 1, ), @@ -116,14 +133,14 @@ def test_install(harness_with_charm, mocker, install_succeeded, expected_status) def test_update_status( harness_with_charm, mocker, - deployed_resources_status, + check_deployed_resources_side_effect, expected_status, n_install_calls, ): mocker.patch("charm.MetacontrollerOperatorCharm._install") mocker.patch( "charm.MetacontrollerOperatorCharm._check_deployed_resources", - return_value=deployed_resources_status, + side_effect=check_deployed_resources_side_effect, ) harness = harness_with_charm @@ -149,7 +166,7 @@ def fake_get_k8s_obj_crds_fail(obj, _): @pytest.fixture() -def mock_resources_ready(): +def resources_that_are_ok(): """List of lightkube resources that are ok""" return [ StatefulSet( @@ -162,7 +179,20 @@ def mock_resources_ready(): @pytest.fixture() -def mock_resources_ss_not_ready(): +def resources_with_apierror(): + """List of lightkube resources that includes an exception""" + return [ + StatefulSet( + metadata=ObjectMeta(name="ss1", namespace="namespace"), + spec=StatefulSetSpec(replicas=3, selector="", serviceName="", template=""), + status=StatefulSetStatus(replicas=3, readyReplicas=3), + ), + _FakeApiError(), + ] + + +@pytest.fixture() +def resources_with_unready_statefulset(): """ List of lightkube resources that has a StatefulSet that does not have all replicas available """ @@ -176,45 +206,56 @@ def mock_resources_ss_not_ready(): @pytest.mark.parametrize( - "mock_get_k8s_obj,mock_resources_fixture,expected_are_resources_ok", + "render_all_resources_side_effect_fixture,lightkube_get_side_effect_fixture,expected_result", ( # Test where objects exist in k8s - (fake_get_k8s_obj_always_successful, "mock_resources_ready", True), + ("resources_that_are_ok", "resources_that_are_ok", does_not_raise()), # Test where objects do not exist in k8s - (fake_get_k8s_obj_crds_fail, "mock_resources_ready", False), + ( + "resources_that_are_ok", + "resources_with_apierror", + pytest.raises(CheckFailed), + ), # Test with StatefulSet that does not have all replicas - (fake_get_k8s_obj_always_successful, "mock_resources_ss_not_ready", False), + ( + "resources_with_unready_statefulset", + "resources_with_unready_statefulset", + pytest.raises(CheckFailed), + ), ), ) def test_check_deployed_resources( request, mocker, harness_with_charm, - mock_get_k8s_obj, - mock_resources_fixture, - expected_are_resources_ok, + render_all_resources_side_effect_fixture, + lightkube_get_side_effect_fixture, + expected_result, ): - # Mock away lightkube_client so it doesn't look for/fail on kubernetes config - mocked_client = mocker.patch( - "charm.MetacontrollerOperatorCharm.lightkube_client", - new_callable=mock.PropertyMock, - ) - mocked_client.return_value = None - - mocker.patch("charm.get_k8s_obj", side_effect=mock_get_k8s_obj) - + # Mock _render_all_resources # Get the value of the fixture passed as a param, otherwise it is just the fixture itself - mock_resources = request.getfixturevalue(mock_resources_fixture) + # Resources for render_all_resources are assigned to return_value so it returns the entire list + # of resources in a single call + render_all_resources_side_effect = request.getfixturevalue( + render_all_resources_side_effect_fixture + ) mocker.patch( "charm.MetacontrollerOperatorCharm._render_all_resources", - return_value=mock_resources, + return_value=render_all_resources_side_effect, ) - harness = harness_with_charm - are_resources_ok = harness.charm._check_deployed_resources() - assert are_resources_ok == expected_are_resources_ok + # Mock charm's lightkube client + # Resources for mock of client.get are assigned to side_effect so a single resource is returned + # for each call + lightkube_get_side_effect_fixture = request.getfixturevalue( + lightkube_get_side_effect_fixture + ) + mock_client = mock.MagicMock() + mock_client.get.side_effect = lightkube_get_side_effect_fixture - # TODO: Assert on the logs emitted? + harness = harness_with_charm + with expected_result: + harness.charm._check_deployed_resources() class _FakeResponse: From 29d85268915187b069e0f4762599b96efb3427eb Mon Sep 17 00:00:00 2001 From: Andrew Scribner Date: Tue, 23 Nov 2021 10:15:55 -0500 Subject: [PATCH 27/27] fix: test_check_deployed_resources Restore assignment of mock object that was removed by mistake --- tests/unit/test_charm.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 5797b04..df6176a 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -111,7 +111,7 @@ def test_install( harness = harness_with_charm # Fail fast - harness.charm._max_time_checking_resources = 1.5 + harness.charm._max_time_checking_resources = 0.5 harness.charm.on.install.emit() @@ -254,6 +254,8 @@ def test_check_deployed_resources( mock_client.get.side_effect = lightkube_get_side_effect_fixture harness = harness_with_charm + harness.charm._lightkube_client = mock_client + with expected_result: harness.charm._check_deployed_resources()