diff --git a/config.yaml b/config.yaml index d04ce667..a42d677e 100644 --- a/config.yaml +++ b/config.yaml @@ -83,6 +83,13 @@ options: description: | Comma separated authorization modes. Allowed values are "RBAC", "Node", "Webhook", "ABAC", "AlwaysDeny" and "AlwaysAllow". + authorization-webhook-config-file: + type: string + default: "" + description: | + Authorization webhook config passed to kube-apiserver via --authorization-webhook-config-file. + For more info, please refer to the upstream documentation at + https://kubernetes.io/docs/reference/access-authn-authz/webhook/ channel: type: string default: "1.29/stable" diff --git a/metadata.yaml b/metadata.yaml index 643b9fac..0a9a059e 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -49,6 +49,8 @@ requires: interface: ceph-client description: | [Deprecated] LP:2048692 only necessary for upgrades from < 1.29 + keystone-credentials: + interface: keystone-credentials certificates: interface: tls-certificates dns-provider: diff --git a/src/auth_webhook.py b/src/auth_webhook.py index cbca0455..7ea58c41 100644 --- a/src/auth_webhook.py +++ b/src/auth_webhook.py @@ -6,18 +6,20 @@ import string import tempfile from base64 import b64decode, b64encode +from pathlib import Path from subprocess import CalledProcessError, check_call, check_output import charms.contextual_status as status import yaml from jinja2 import Environment, FileSystemLoader -from kubectl import kubectl +from kubectl import kubectl, kubectl_get from ops import MaintenanceStatus auth_secret_ns = "kube-system" auth_secret_type = "juju.is/token-auth" auth_webhook_root = "/root/cdk/auth-webhook" auth_webhook_conf = os.path.join(auth_webhook_root, "auth-webhook-conf.yaml") +authz_webhook_conf = Path(auth_webhook_root) / "authz-webhook-conf.yaml" auth_webhook_exe = os.path.join(auth_webhook_root, "auth-webhook.py") # wokeignore:rule=master auth_webhook_svc_name = "cdk.master.auth-webhook" @@ -27,11 +29,39 @@ log = logging.getLogger(__name__) -def configure( - charm_dir, aws_iam_endpoint=None, custom_authn_endpoint=None, keystone_endpoint=None -): +def _uplift_keystone_endpoint() -> str: + """Uplift the keystone auth service from a cdk-addons installation.""" + try: + keystone_auth_service = kubectl_get( + "service", "-n", "kube-system", "k8s-keystone-auth-service", "--ignore-not-found=true" + ) + except (FileNotFoundError, CalledProcessError) as e: + log.info("No k8s-keystone-auth-service to uplift: error %s", e) + return None + labels = keystone_auth_service.get("metadata", {}).get("labels", {}) + if labels.get("cdk-addons") != "true": + log.info("No cdk-addons based k8s-keystone-auth-service to uplift") + return None + if not (spec := keystone_auth_service.get("spec")): + log.error("No spec found for k8s-keystone-auth-service") + return None + cluster_ip, port = spec.get("clusterIP"), spec.get("ports")[0].get("port") + if not cluster_ip or not port: + log.error("No clusterIP or port found for k8s-keystone-auth-service") + return None + return f"https://{cluster_ip}:{port}/webhook" + + +def _uplift_aws_iam_endpoint() -> str: + log.warning("TODO: AWS IAM auth is not yet supported for uplift") + return None + + +def configure(charm_dir, custom_authn_endpoint=None, custom_authz_config_file=None): """Render auth webhook templates and start the related service.""" status.add(MaintenanceStatus("Configuring auth webhook")) + keystone_endpoint = _uplift_keystone_endpoint() + aws_iam_endpoint = _uplift_aws_iam_endpoint() # Set the number of gunicorn workers based on our core count. (2*cores)+1 is # recommended: https://docs.gunicorn.org/en/stable/design.html#how-many-workers @@ -71,6 +101,8 @@ def configure( render("auth-webhook.service", auth_webhook_svc, context) restart() + authz_webhook_conf.write_text(custom_authz_config_file or "") + def create_token(uid, username, groups=[]): token = get_token(username) diff --git a/src/cdk_addons.py b/src/cdk_addons.py index 583c6d01..1e1614ba 100644 --- a/src/cdk_addons.py +++ b/src/cdk_addons.py @@ -5,9 +5,9 @@ from subprocess import CalledProcessError, check_call, check_output import charms.contextual_status as status +import tenacity from kubectl import get_service_ip, kubectl, kubectl_get from ops import BlockedStatus -from tenacity import retry, stop_after_delay, wait_exponential kubeconfig_dir = "/root/snap/cdk-addons/common" kubeconfig_path = f"{kubeconfig_dir}/kubeconfig" @@ -20,7 +20,12 @@ class CdkAddons: def __init__(self, charm): self.charm = charm - @retry(stop=stop_after_delay(60), wait=wait_exponential()) + @tenacity.retry( + reraise=True, + stop=tenacity.stop_after_delay(60), + wait=tenacity.wait_exponential(), + before=tenacity.before_log(log, logging.WARNING), + ) def apply(self): """Apply addons.""" check_call(["cdk-addons.apply"]) diff --git a/src/charm.py b/src/charm.py index e356106f..6baa4b76 100755 --- a/src/charm.py +++ b/src/charm.py @@ -118,6 +118,7 @@ def configure_apiserver(self): privileged=self.model.config["allow-privileged"], service_cidr=self.model.config["service-cidr"], external_cloud_provider=self.external_cloud_provider, + authz_webhook_conf_file=auth_webhook.authz_webhook_conf, ) def configure_apiserver_kubelet_api_admin(self): @@ -128,11 +129,20 @@ def configure_auth_webhook(self): auth_webhook.configure( charm_dir=self.charm_dir, custom_authn_endpoint=self.model.config["authn-webhook-endpoint"], - # TODO: aws iam, keystone - # aws_iam_endpoint=???, - # keystone_endpoint=??? + custom_authz_config_file=self.model.config["authorization-webhook-config-file"], ) + def warn_keystone_management(self): + relation = self.model.relations.get("keystone-credentials") + if relation and any(r.units for r in relation): + log.warning( + "------------------------------------------------------------\n" + "Keystone credential relation is no longer managed\n" + "Please remove the relation and manage keystone manually\n" + "Run `juju remove-relation kubernetes-control-plane:keystone-credentials keystone`" + ) + status.add(ops.BlockedStatus("Keystone credential relation is no longer managed")) + def configure_container_runtime(self): if not self.container_runtime.relations: status.add(BlockedStatus("Missing container-runtime integration")) @@ -491,6 +501,7 @@ def reconcile(self, event): self.write_etcd_client_credentials() self.write_service_account_key() self.configure_auth_webhook() + self.warn_keystone_management() self.configure_loadbalancers() if self.api_dependencies_ready(): self.encryption_at_rest.prepare() diff --git a/src/kubectl.py b/src/kubectl.py index 254a8589..f99c19b1 100644 --- a/src/kubectl.py +++ b/src/kubectl.py @@ -1,8 +1,9 @@ import json import logging +from pathlib import Path from subprocess import CalledProcessError, check_output -from tenacity import retry, stop_after_delay, wait_exponential +import tenacity log = logging.getLogger(__name__) @@ -12,17 +13,55 @@ def get_service_ip(name, namespace): return service.get("spec", {}).get("clusterIP") -@retry(stop=stop_after_delay(60), wait=wait_exponential()) -def kubectl(*args, external=False): +def kubectl_get(*args: str, **kwargs) -> dict: + """Run a kubectl get command with json. + + By default, this function uses the root kubeconfig that points to the local apiserver. + Setting the 'external' keyword-argument to 'True' will use the ubuntu config which points to + the external cluster endpoint. + + Args: + args (str): arguments to pass to kubectl get. + kwargs : flags passed to kubectl(). + + Returns: + dict: A mapping of the get response. + + Raises: + json.JSONDecodeError: If the output is not valid json. + """ + output = kubectl("get", "-o", "json", *args, **kwargs) + return json.loads(output) if output else {} + + +@tenacity.retry( + retry=tenacity.retry_if_exception_type(CalledProcessError), + reraise=True, + stop=tenacity.stop_after_delay(60), + wait=tenacity.wait_exponential(), + before=tenacity.before_log(log, logging.WARNING), +) +def kubectl(*args: str, external=False): """Run a kubectl cli command with a config file. By default, this function uses the root kubeconfig that points to the local apiserver. Setting the 'external' parameter to 'True' will use the ubuntu config which points to the external cluster endpoint. - Returns stdout and throws an error if the command fails. + Args: + args (str): arguments to pass to kubectl. + external (bool): Use the external cluster kubeconfig. + + Returns: + str: The output of the command. + + Raises: + FileNotFoundError: If the kubeconfig file is not found. + CalledProcessError: If the command fails. """ - cfg = "/home/ubuntu/config" if external else "/root/.kube/config" + cfg = Path("/home/ubuntu/config" if external else "/root/.kube/config") + if not cfg.exists(): + raise FileNotFoundError(f"kubeconfig not found at {cfg}") command = ["kubectl", f"--kubeconfig={cfg}", *args] log.info("Executing {}".format(command)) try: @@ -32,8 +71,3 @@ def kubectl(*args, external=False): f"Command failed: {command}\nreturncode: {e.returncode}\nstdout: {e.output.decode()}" ) raise - - -def kubectl_get(*args): - output = kubectl("get", "-o", "json", *args) - return json.loads(output) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 5ee43bcb..230eada8 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -4,6 +4,7 @@ # Learn more about testing at: https://juju.is/docs/sdk/testing import json +from pathlib import Path from unittest.mock import call, patch import ops @@ -145,7 +146,7 @@ def test_active( assert harness.model.unit.status == ActiveStatus() auth_webhook_configure.assert_called_once_with( - charm_dir=harness.charm.charm_dir, custom_authn_endpoint="" + charm_dir=harness.charm.charm_dir, custom_authn_endpoint="", custom_authz_config_file="" ) configure_apiserver.assert_called_once_with( advertise_address="10.0.0.10", @@ -159,6 +160,7 @@ def test_active( privileged="auto", service_cidr="10.152.183.0/24", external_cloud_provider=harness.charm.external_cloud_provider, + authz_webhook_conf_file=Path("/root/cdk/auth-webhook/authz-webhook-conf.yaml"), ) configure_apiserver_kubelet_api_admin.assert_called_once_with() configure_controller_manager.assert_called_once_with( diff --git a/tests/unit/test_kubectl.py b/tests/unit/test_kubectl.py index b2bd753a..a01de55a 100644 --- a/tests/unit/test_kubectl.py +++ b/tests/unit/test_kubectl.py @@ -1,17 +1,69 @@ -from unittest.mock import patch +import subprocess +import unittest.mock as mock import kubectl +import pytest +import tenacity -def test_kubectl(): +@pytest.fixture(params=["/root/.kube/config", "/home/ubuntu/config"]) +def kubeconfig(request): + with mock.patch("pathlib.Path.exists") as exists: + exists.return_value = True + yield request.param, (request.param == "/home/ubuntu/config") + + +@mock.patch("pathlib.Path.exists") +def test_kubectl_no_kubeconfig(exists): + """Verify kubectl fails immediately when there's no kubeconfig.""" + exists.return_value = False + kubectl.kubectl.retry.wait = tenacity.wait_none() + kubectl.kubectl.retry.stop = tenacity.stop_after_attempt(3) + with pytest.raises(FileNotFoundError): + kubectl.kubectl("get", "svc", "my-service") + + +@pytest.mark.usefixtures("kubeconfig") +def test_kubectl_retried(): + """Verify kubectl retries on failure.""" + with mock.patch("kubectl.check_output") as check_output: + kubectl.kubectl.retry.wait = tenacity.wait_none() + kubectl.kubectl.retry.stop = tenacity.stop_after_attempt(3) + check_output.side_effect = subprocess.CalledProcessError( + 1, "kubectl", b"stdout", b"stderr" + ) + with pytest.raises(subprocess.CalledProcessError): + kubectl.kubectl("get", "svc", "my-service") + assert check_output.call_count == 3 + + +def test_kubectl_external(kubeconfig): """Verify kubectl uses the appropriate kubeconfig files.""" - int_cfg = "--kubeconfig=/root/.kube/config" - ext_cfg = "--kubeconfig=/home/ubuntu/config" + path, external = kubeconfig + + with mock.patch("kubectl.check_output") as check_output: + kubectl.kubectl("apply", "-f", "test.yaml", external=external) + check_output.assert_called_once_with( + ["kubectl", f"--kubeconfig={path}", "apply", "-f", "test.yaml"] + ) + + +def test_kubectl_get(): + """Verify kubectl_get parses kubectl results.""" + with mock.patch("kubectl.kubectl") as m_kubectl: + m_kubectl.return_value = '{"kind": "Service", "metadata": {"name": "my-service"}}' + value = kubectl.kubectl_get("svc", "my-service") + m_kubectl.assert_called_once_with("get", "-o", "json", "svc", "my-service") + assert value == {"kind": "Service", "metadata": {"name": "my-service"}} + + m_kubectl.return_value = "" + value = kubectl.kubectl_get("svc", "my-service") + assert value == {} - with patch("kubectl.check_output") as mock: - kubectl.kubectl() - assert int_cfg in mock.call_args.args[0] - with patch("kubectl.check_output") as mock: - kubectl.kubectl(external=True) - assert ext_cfg in mock.call_args.args[0] +def test_get_service_ip(): + """Verify get_service_ip parses kubectl results.""" + with mock.patch("kubectl.kubectl_get") as m_kubectl_get: + m_kubectl_get.return_value = {"kind": "Service", "spec": {"clusterIP": "1.2.3.4"}} + value = kubectl.get_service_ip("my-service", "my-namespace") + assert value == "1.2.3.4"