From 66f97f7f421ee1b874ceacea26b804cb9e2ca693 Mon Sep 17 00:00:00 2001 From: Ayoub Nasr Date: Tue, 3 Sep 2024 18:17:51 +0200 Subject: [PATCH] e2e tests for cert-manager --- tests/kube_utils.py | 123 +++++++++++++++-------- tests/post/features/cert_manager.feature | 40 ++++++++ tests/post/features/sanity.feature | 3 + tests/post/steps/conftest.py | 17 +++- tests/post/steps/test_cert_manager.py | 89 ++++++++++++++++ 5 files changed, 229 insertions(+), 43 deletions(-) create mode 100644 tests/post/features/cert_manager.feature create mode 100644 tests/post/steps/test_cert_manager.py diff --git a/tests/kube_utils.py b/tests/kube_utils.py index cda0097bab..883f5d56dd 100644 --- a/tests/kube_utils.py +++ b/tests/kube_utils.py @@ -80,6 +80,18 @@ storage: {size} """ +DEFAULT_SS_CLUSTERISSUER = """ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: {name} + labels: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/managed-by: metalk8s +spec: + selfSigned: {{}} +""" + # }}} # See https://kubernetes.io/docs/concepts/architecture/nodes/#condition @@ -224,6 +236,48 @@ def _check_deletion_marker(): name="checking that {} {} is marked for deletion".format(self._kind, name), ) + def wait_for_status(self, name, status, wait_for_key=None): + def _wait_for_status(): + object = self.get(name) + assert object is not None, f"{self._kind} not found" + + actual_status = object.get("status") + assert actual_status, f"Unexpected status expected {status}, got none" + + phase = self.compute_phase(actual_status) + assert phase == status, "Unexpected status: expected {}, got {}".format( + status, phase + ) + + if wait_for_key is not None: + assert ( + wait_for_key in actual_status.keys() + ), f"{self._kind} status.{wait_for_key} has not been reconciled" + + return object + + return utils.retry( + _wait_for_status, + times=24, + wait=5, # wait for 2mn + name=f"waiting for {self._kind} {name} to become {status}", + ) + + @staticmethod + def compute_phase(status): + for condition in status.get("conditions", []): + if condition["type"] != "Ready": + continue + if condition["status"] == "True": + return "Available" + elif condition["status"] == "False": + return "Failed" + elif condition["status"] == "Unknown": + return condition["reason"] + else: + assert False, "invalid condition status: {}".format(condition["status"]) + return "" + def list(self): """Return a list of existing objects.""" return self._client.get(namespace=self._namespace).items @@ -268,48 +322,6 @@ def _create(self, body): ) self._client.create(body=body) - def wait_for_status(self, name, status, wait_for_device_name=False): - def _wait_for_status(): - volume = self.get(name) - assert volume is not None, "Volume not found" - - actual_status = volume.get("status") - assert actual_status, f"Unexpected status expected {status}, got none" - - phase = self.compute_phase(actual_status) - assert phase == status, "Unexpected status: expected {}, got {}".format( - status, phase - ) - - if wait_for_device_name: - assert ( - "deviceName" in actual_status.keys() - ), "Volume status.deviceName has not been reconciled" - - return volume - - return utils.retry( - _wait_for_status, - times=24, - wait=5, # wait for 2mn - name="waiting for Volume {} to become {}".format(name, status), - ) - - @staticmethod - def compute_phase(volume_status): - for condition in volume_status.get("conditions", []): - if condition["type"] != "Ready": - continue - if condition["status"] == "True": - return "Available" - elif condition["status"] == "False": - return "Failed" - elif condition["status"] == "Unknown": - return condition["reason"] - else: - assert False, "invalid condition status: {}".format(condition["status"]) - return "" - @staticmethod def get_error(volume_status): for condition in volume_status.get("conditions", []): @@ -396,4 +408,31 @@ def __init__(self, k8s_client): ) +# }}} +# SecretClient {{{ + + +class SecretClient(Client): + def __init__(self, k8s_client, namespace="metalk8s-certs"): + super().__init__(k8s_client, kind="Secret", namespace=namespace) + + +# }}} +# CertificateClient {{{ + + +class CertificateClient(Client): + def __init__(self, k8s_client, namespace="metalk8s-certs"): + super().__init__(k8s_client, kind="Certificate", namespace=namespace) + + +# }}} +# ClusterIssuerClient {{{ + + +class ClusterIssuerClient(Client): + def __init__(self, k8s_client): + super().__init__(k8s_client, kind="ClusterIssuer") + + # }}} diff --git a/tests/post/features/cert_manager.feature b/tests/post/features/cert_manager.feature new file mode 100644 index 0000000000..2f4702d346 --- /dev/null +++ b/tests/post/features/cert_manager.feature @@ -0,0 +1,40 @@ +@post @local @ci @cert +Feature: CertManager + Scenario: Create a self-signed ClusterIssuer + Given the Kubernetes API is available + When we create the following ClusterIssuer: + apiVersion: cert-manager.io/v1 + kind: ClusterIssuer + metadata: + name: test-selfsigned-issuer + labels: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/managed-by: metalk8s + spec: + selfSigned: {} + Then the 'test-selfsigned-issuer' ClusterIssuer is 'Available' + + Scenario: Create a Certificate Authority + Given the Kubernetes API is available + And a 'test-selfsigned-issuer' self-signed ClusterIssuer exists + When we create the following Certificate: + apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + name: test-root-ca + labels: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/managed-by: metalk8s + namespace: metalk8s-certs + spec: + isCA: true + commonName: Metalk8s-CA + secretName: test-root-ca + duration: 86400h + renewBefore: 2160h + issuerRef: + name: test-selfsigned-issuer + kind: ClusterIssuer + group: cert-manager.io + Then the 'test-root-ca' Certificate is 'Available' + And the 'test-root-ca' Secret has the correct fields diff --git a/tests/post/features/sanity.feature b/tests/post/features/sanity.feature index 90c55485d4..0f8bf17226 100644 --- a/tests/post/features/sanity.feature +++ b/tests/post/features/sanity.feature @@ -42,6 +42,9 @@ Feature: Cluster Sanity Checks | metalk8s-monitoring | prometheus-operator-operator | | metalk8s-monitoring | thanos-query | | metalk8s-ui | metalk8s-ui | + | metalk8s-certs | cert-manager | + | metalk8s-certs | cert-manager-cainjector | + | metalk8s-certs | cert-manager-webhook | Scenario Outline: DaemonSet has desired Pods ready Then the DaemonSet in the namespace has all desired Pods ready diff --git a/tests/post/steps/conftest.py b/tests/post/steps/conftest.py index 65ab46a4bf..3f42035bf4 100644 --- a/tests/post/steps/conftest.py +++ b/tests/post/steps/conftest.py @@ -38,6 +38,21 @@ def sc_client(k8s_client): return kube_utils.StorageClassClient(k8s_client) +@pytest.fixture +def clusterissuer_client(k8s_client): + return kube_utils.ClusterIssuerClient(k8s_client) + + +@pytest.fixture +def cert_client(k8s_client): + return kube_utils.CertificateClient(k8s_client) + + +@pytest.fixture +def secret_client(k8s_client): + return kube_utils.SecretClient(k8s_client) + + # }}} # Helpers {{{ @@ -111,7 +126,7 @@ def test_volume(volume_client, name): try: yield volume_client.wait_for_status( - name, "Available", wait_for_device_name=True + name, "Available", wait_for_key="deviceName" ) finally: volume_client.delete(name, sync=True) diff --git a/tests/post/steps/test_cert_manager.py b/tests/post/steps/test_cert_manager.py new file mode 100644 index 0000000000..d5f6b6fa1f --- /dev/null +++ b/tests/post/steps/test_cert_manager.py @@ -0,0 +1,89 @@ +import pytest +from pytest_bdd import given, when, then, scenario, parsers + +from tests import kube_utils + +# Fixture {{{ + + +@pytest.fixture(scope="function") +def context(): + return {} + + +@pytest.fixture +def teardown(cert_client, secret_client, clusterissuer_client): + yield + cert_client.delete_all(sync=True, prefix="test-") + secret_client.delete_all(sync=True, prefix="test-") + clusterissuer_client.delete_all(sync=True, prefix="test-") + + +# }}} +# Scenarios {{{ + + +@scenario("../features/cert_manager.feature", "Create a self-signed ClusterIssuer") +def test_create_self_signed_issuer(host): + pass + + +@scenario("../features/cert_manager.feature", "Create a Certificate Authority") +def test_create_certificate_authority(host, teardown): + pass + + +# }}} +# Given {{{ + + +@given(parsers.parse("a '{name}' self-signed ClusterIssuer exists")) +def ss_clusterissuer_exists(name, clusterissuer_client): + if clusterissuer_client.get(name) is None: + clusterissuer_client.create_from_yaml( + kube_utils.DEFAULT_SS_CLUSTERISSUER.format(name=name) + ) + clusterissuer_client.wait_for_status(name, "Ready") + + +# }}} +# When {{{ + + +@when(parsers.parse("we create the following ClusterIssuer:\n{body}")) +def create_clusterissuer(body, clusterissuer_client): + clusterissuer_client.create_from_yaml(body) + + +@when(parsers.parse("we create the following Certificate:\n{body}")) +def create_certificate(body, cert_client): + cert_client.create_from_yaml(body) + + +# }}} +# Then {{{ + + +@then(parsers.parse("the '{name}' ClusterIssuer is '{status}'")) +def check_clusterissuer_status(name, status, clusterissuer_client): + clusterissuer_client.wait_for_status(name, status) + + +@then(parsers.parse("the '{name}' Certificate is '{status}'")) +def check_certificate_status(name, status, cert_client): + cert_client.wait_for_status(name, status) + + +@then(parsers.parse("the '{name}' Secret has the correct fields")) +def check_secret_fields(name, secret_client): + secret = secret_client.get(name) + assert secret is not None, "secret {} not found".format(name) + for field in ["ca.crt", "tls.crt", "tls.key"]: + assert field in secret["data"].keys(), "missing {} field in secret data".format( + field + ) + + +# }}} +# Helpers +# }}}