diff --git a/lib/charms/grafana_k8s/v0/grafana_dashboard.py b/lib/charms/grafana_k8s/v0/grafana_dashboard.py index 7d7549a..e7c325b 100644 --- a/lib/charms/grafana_k8s/v0/grafana_dashboard.py +++ b/lib/charms/grafana_k8s/v0/grafana_dashboard.py @@ -218,7 +218,7 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 16 +LIBPATCH = 18 logger = logging.getLogger(__name__) @@ -615,7 +615,7 @@ def _replace_template_fields( # noqa: C901 if datasources or not existing_templates: panels = dict_content["panels"] - # Go through all of the panels. If they have a datasource set, AND it's one + # Go through all the panels. If they have a datasource set, AND it's one # that we can convert to ${lokids} or ${prometheusds}, by stripping off the # ${} templating and comparing the name to the list we built, replace it, # otherwise, leave it alone. @@ -710,7 +710,7 @@ def _inject_labels(content: str, topology: dict, transformer: "CosTool") -> str: if "panels" not in dict_content.keys(): return json.dumps(dict_content) - # Go through all of the panels and inject topology labels + # Go through all the panels and inject topology labels # Panels may have more than one 'target' where the expressions live, so that must be # accounted for. Additionally, `promql-transform` does not necessarily gracefully handle # expressions with range queries including variables. Exclude these. @@ -924,7 +924,7 @@ def __init__( If you would like to use relation name other than `grafana-dashboard`, you will need to specify the relation name via the `relation_name` argument when instantiating the :class:`GrafanaDashboardProvider` object. - However, it is strongly advised to keep the the default relation name, + However, it is strongly advised to keep the default relation name, so that people deploying your charm will have a consistent experience with all other charms that provide Grafana dashboards. @@ -1053,7 +1053,7 @@ def _update_all_dashboards_from_dir( # Path.glob uses fnmatch on the backend, which is pretty limited, so use a # custom function for the filter def _is_dashboard(p: Path) -> bool: - return p.is_file and p.name.endswith((".json", ".json.tmpl", ".tmpl")) + return p.is_file() and p.name.endswith((".json", ".json.tmpl", ".tmpl")) for path in filter(_is_dashboard, Path(self._dashboards_path).glob("*")): # path = Path(path) @@ -1105,7 +1105,7 @@ def _reinitialize_dashboard_data(self, inject_dropdowns: bool = True) -> None: del stored_dashboard_templates[dashboard_id] self._stored.dashboard_templates = stored_dashboard_templates - # With all of the file-based dashboards cleared out, force a refresh + # With all the file-based dashboards cleared out, force a refresh # of relation data if self._charm.unit.is_leader(): for dashboard_relation in self._charm.model.relations[self._relation_name]: @@ -1155,7 +1155,7 @@ def _content_to_dashboard_object(self, content: str, inject_dropdowns: bool = Tr return { "charm": self._charm.meta.name, "content": content, - "juju_topology": self._juju_topology, + "juju_topology": self._juju_topology if inject_dropdowns else {}, "inject_dropdowns": inject_dropdowns, } @@ -1752,7 +1752,7 @@ def _maybe_get_builtin_dashboards(self, event: RelationEvent) -> Dict: if dashboards_path: def _is_dashboard(p: Path) -> bool: - return p.is_file and p.name.endswith((".json", ".json.tmpl", ".tmpl")) + return p.is_file() and p.name.endswith((".json", ".json.tmpl", ".tmpl")) for path in filter(_is_dashboard, Path(dashboards_path).glob("*")): # path = Path(path) diff --git a/lib/charms/observability_libs/v0/juju_topology.py b/lib/charms/observability_libs/v0/juju_topology.py index ef4ec58..e68e93f 100644 --- a/lib/charms/observability_libs/v0/juju_topology.py +++ b/lib/charms/observability_libs/v0/juju_topology.py @@ -75,7 +75,7 @@ LIBID = "bced1658f20f49d28b88f61f83c2d232" LIBAPI = 0 -LIBPATCH = 3 +LIBPATCH = 4 class InvalidUUIDError(Exception): @@ -94,8 +94,8 @@ def __init__( model: str, model_uuid: str, application: str, - unit: str = None, - charm_name: str = None, + unit: Optional[str] = None, + charm_name: Optional[str] = None, ): """Build a JujuTopology object. @@ -181,7 +181,10 @@ def from_dict(cls, data: dict): ) def as_dict( - self, *, remapped_keys: Dict[str, str] = None, excluded_keys: List[str] = None + self, + *, + remapped_keys: Optional[Dict[str, str]] = None, + excluded_keys: Optional[List[str]] = None, ) -> OrderedDict: """Format the topology information into an ordered dict. diff --git a/lib/charms/observability_libs/v0/metrics_endpoint_discovery.py b/lib/charms/observability_libs/v0/metrics_endpoint_discovery.py new file mode 100644 index 0000000..6a5fdeb --- /dev/null +++ b/lib/charms/observability_libs/v0/metrics_endpoint_discovery.py @@ -0,0 +1,225 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +"""# MetricsEndpointDiscovery Library. + +This library provides functionality for discovering metrics endpoints exposed +by applications deployed to a Kubernetes cluster. + +It comprises: +- A custom event and event source for handling metrics endpoint changes. +- Logic to observe cluster events and emit the events as appropriate. + +## Using the Library + +### Handling Events + +To ensure that your charm can react to changing metrics endpoint events, +use the CharmEvents extension. +```python +import json + +from charms.observability_libs.v0.metrics_endpoint_discovery import + MetricsEndpointCharmEvents, + MetricsEndpointObserver +) + +class MyCharm(CharmBase): + + on = MetricsEndpointChangeCharmEvents() + + def __init__(self, *args): + super().__init__(*args) + + self._observer = MetricsEndpointObserver(self, {"app.kubernetes.io/name": ["grafana-k8s"]}) + self.framework.observe(self.on.metrics_endpoint_change, self._on_endpoints_change) + + def _on_endpoints_change(self, event): + self.unit.status = ActiveStatus(json.dumps(event.discovered)) +``` +""" + +import json +import logging +import os +import signal +import subprocess +import sys +from typing import Dict, Iterable + +from lightkube import Client +from lightkube.resources.core_v1 import Pod +from ops.charm import CharmBase, CharmEvents +from ops.framework import EventBase, EventSource, Object + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "a141d5620152466781ed83aafb948d03" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 2 + +# File path where metrics endpoint change data is written for exchange +# between the discovery process and the materialised event. +PAYLOAD_FILE_PATH = "/tmp/metrics-endpoint-payload.json" + +# File path for the spawned discovery process to write logs. +LOG_FILE_PATH = "/var/log/discovery.log" + + +class MetricsEndpointChangeEvent(EventBase): + """A custom event for metrics endpoint changes.""" + + def __init__(self, handle): + super().__init__(handle) + + with open(PAYLOAD_FILE_PATH, "r") as f: + self._discovered = json.loads(f.read()) + + def snapshot(self): + """Save the event payload data.""" + return {"payload": self._discovered} + + def restore(self, snapshot): + """Restore the event payload data.""" + self._discovered = {} + + if snapshot: + self._discovered = snapshot["payload"] + + @property + def discovered(self): + """Return the payload of detected endpoint changes for this event.""" + return self._discovered + + +class MetricsEndpointChangeCharmEvents(CharmEvents): + """A CharmEvents extension for metrics endpoint changes. + + Includes :class:`MetricsEndpointChangeEvent` in those that can be handled. + """ + + metrics_endpoint_change = EventSource(MetricsEndpointChangeEvent) + + +class MetricsEndpointObserver(Object): + """Observes changing metrics endpoints in the cluster. + + Observed endpoint changes cause :class"`MetricsEndpointChangeEvent` to be emitted. + """ + + def __init__(self, charm: CharmBase, labels: Dict[str, Iterable]): + """Constructor for MetricsEndpointObserver. + + Args: + charm: the charm that is instantiating the library. + labels: dictionary of label/value to be observed for changing metrics endpoints. + """ + super().__init__(charm, "metrics-endpoint-observer") + + self._charm = charm + self._observer_pid = 0 + + self._labels = labels + + def start_observer(self): + """Start the metrics endpoint observer running in a new process.""" + self.stop_observer() + + logging.info("Starting metrics endpoint observer process") + + # We need to trick Juju into thinking that we are not running + # in a hook context, as Juju will disallow use of juju-run. + new_env = os.environ.copy() + if "JUJU_CONTEXT_ID" in new_env: + new_env.pop("JUJU_CONTEXT_ID") + + pid = subprocess.Popen( + [ + "/usr/bin/python3", + "lib/charms/observability_libs/v{}/metrics_endpoint_discovery.py".format(LIBAPI), + json.dumps(self._labels), + "/var/lib/juju/tools/{}/juju-run".format(self.unit_tag), + self._charm.unit.name, + self._charm.charm_dir, + ], + stdout=open(LOG_FILE_PATH, "a"), + stderr=subprocess.STDOUT, + env=new_env, + ).pid + + self._observer_pid = pid + logging.info("Started metrics endopint observer process with PID {}".format(pid)) + + def stop_observer(self): + """Stop the running observer process if we have previously started it.""" + if not self._observer_pid: + return + + try: + os.kill(self._observer_pid, signal.SIGINT) + msg = "Stopped running metrics endpoint observer process with PID {}" + logging.info(msg.format(self._observer_pid)) + except OSError: + pass + + @property + def unit_tag(self): + """Juju-style tag identifying the unit being run by this charm.""" + unit_num = self._charm.unit.name.split("/")[-1] + return "unit-{}-{}".format(self._charm.app.name, unit_num) + + +def write_payload(payload): + """Write the input event data to event payload file.""" + with open(PAYLOAD_FILE_PATH, "w") as f: + f.write(json.dumps(payload)) + + +def dispatch(run_cmd, unit, charm_dir): + """Use the input juju-run command to dispatch a :class:`MetricsEndpointChangeEvent`.""" + dispatch_sub_cmd = "JUJU_DISPATCH_PATH=hooks/metrics_endpoint_change {}/dispatch" + subprocess.run([run_cmd, "-u", unit, dispatch_sub_cmd.format(charm_dir)]) + + +def main(): + """Main watch and dispatch loop. + + Watch the input k8s service names. When changes are detected, write the + observed data to the payload file, and dispatch the change event. + """ + labels, run_cmd, unit, charm_dir = sys.argv[1:] + + client = Client() + labels = json.loads(labels) + + for change, entity in client.watch(Pod, namespace="*", labels=labels): + meta = entity.metadata + metrics_path = "" + if entity.metadata.annotations.get("prometheus.io/path", ""): + metrics_path = entity.metadata.annotations.get("prometheus.io/path", "") + + target_ports = [] + for c in filter(lambda c: c.ports is not None, entity.spec.containers): + for p in filter(lambda p: p.name == "metrics", c.ports): + target_ports.append("*:{}".format(p.containerPort)) + + payload = { + "change": change, + "namespace": meta.namespace, + "name": meta.name, + "path": metrics_path, + "targets": target_ports or ["*:80"], + } + + write_payload(payload) + dispatch(run_cmd, unit, charm_dir) + + +if __name__ == "__main__": + main() diff --git a/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/lib/charms/observability_libs/v1/kubernetes_service_patch.py index 282ab00..b458795 100644 --- a/lib/charms/observability_libs/v1/kubernetes_service_patch.py +++ b/lib/charms/observability_libs/v1/kubernetes_service_patch.py @@ -146,7 +146,7 @@ def setUp(self, *unused): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 4 +LIBPATCH = 5 ServiceType = Literal["ClusterIP", "LoadBalancer"] @@ -158,11 +158,11 @@ def __init__( self, charm: CharmBase, ports: List[ServicePort], - service_name: str = None, + service_name: Optional[str] = None, service_type: ServiceType = "ClusterIP", - additional_labels: dict = None, - additional_selectors: dict = None, - additional_annotations: dict = None, + additional_labels: Optional[dict] = None, + additional_selectors: Optional[dict] = None, + additional_annotations: Optional[dict] = None, *, refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, ): @@ -213,11 +213,11 @@ def __init__( def _service_object( self, ports: List[ServicePort], - service_name: str = None, + service_name: Optional[str] = None, service_type: ServiceType = "ClusterIP", - additional_labels: dict = None, - additional_selectors: dict = None, - additional_annotations: dict = None, + additional_labels: Optional[dict] = None, + additional_selectors: Optional[dict] = None, + additional_annotations: Optional[dict] = None, ) -> Service: """Creates a valid Service representation. diff --git a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py index 70b7f1e..92ac450 100644 --- a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py +++ b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py @@ -255,7 +255,11 @@ def _on_scrape_targets_changed(self, event): - a single rule format, which is a simplified subset of the official format, comprising a single alert rule per file, using the same YAML fields. -The file name must have the `.rule` extension. +The file name must have one of the following extensions: +- `.rule` +- `.rules` +- `.yml` +- `.yaml` An example of the contents of such a file in the custom single rule format is shown below. @@ -333,7 +337,7 @@ def _on_scrape_targets_changed(self, event): import tempfile from collections import defaultdict from pathlib import Path -from typing import Dict, List, Optional, Tuple, Union +from typing import Callable, Dict, List, Optional, Tuple, Union from urllib.parse import urlparse import yaml @@ -350,7 +354,7 @@ def _on_scrape_targets_changed(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 24 +LIBPATCH = 25 logger = logging.getLogger(__name__) @@ -939,7 +943,9 @@ def _from_dir(self, dir_path: Path, recursive: bool) -> List[dict]: alert_groups = [] # type: List[dict] # Gather all alerts into a list of groups - for file_path in self._multi_suffix_glob(dir_path, [".rule", ".rules"], recursive): + for file_path in self._multi_suffix_glob( + dir_path, [".rule", ".rules", ".yml", ".yaml"], recursive + ): alert_groups_from_file = self._from_file(dir_path, file_path) if alert_groups_from_file: logger.debug("Reading alert rule from %s", file_path) @@ -1377,7 +1383,8 @@ def __init__( jobs=None, alert_rules_path: str = DEFAULT_ALERT_RULES_RELATIVE_PATH, refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, - external_url: str = None, + external_url: str = "", + lookaside_jobs_callable: Callable = None, ): """Construct a metrics provider for a Prometheus charm. @@ -1484,6 +1491,11 @@ def __init__( will be observed to re-set scrape job data (IP address and others) external_url: an optional argument that represents an external url that can be generated by an Ingress or a Proxy. + lookaside_jobs_callable: an optional `Callable` which should be invoked + when the job configuration is built as a secondary mapping. The callable + should return a `List[Dict]` which is syntactically identical to the + `jobs` parameter, but can be updated out of step initialization of + this library without disrupting the 'global' job spec. Raises: RelationNotFoundError: If there is no relation in the charm's metadata.yaml @@ -1523,13 +1535,14 @@ def __init__( external_url if urlparse(external_url).scheme else ("http://" + external_url) ) self.external_url = external_url + self._lookaside_jobs = lookaside_jobs_callable events = self._charm.on[self._relation_name] self.framework.observe(events.relation_changed, self._on_relation_changed) if not refresh_event: # FIXME remove once podspec charms are verified. - # `self._set_scrape_job_spec()` is called every re-init so this should not be needed. + # `self.set_scrape_job_spec()` is called every re-init so this should not be needed. if len(self._charm.meta.containers) == 1: if "kubernetes" in self._charm.meta.series: # This is a podspec charm @@ -1552,7 +1565,7 @@ def __init__( refresh_event = [refresh_event] for ev in refresh_event: - self.framework.observe(ev, self._set_scrape_job_spec) + self.framework.observe(ev, self.set_scrape_job_spec) # Update relation data every reinit. If instead we used event hooks then observing only # relation-joined would not be sufficient: @@ -1563,7 +1576,7 @@ def __init__( # - The ingerss-ready custom event is currently emitted prematurely and cannot be relied # upon: https://github.com/canonical/traefik-k8s-operator/issues/78 # NOTE We may still end up waiting for update-status before changes are applied. - self._set_scrape_job_spec() + self.set_scrape_job_spec() def _on_relation_changed(self, event): """Check for alert rule messages in the relation data before moving on.""" @@ -1579,7 +1592,12 @@ def _on_relation_changed(self, event): else: self.on.alert_rule_status_changed.emit(valid=valid, errors=errors) - def _set_scrape_job_spec(self, _=None): + def update_scrape_job_spec(self, jobs): + """Update scrape job specification.""" + self._jobs = PrometheusConfig.sanitize_scrape_configs(jobs) + self.set_scrape_job_spec() + + def set_scrape_job_spec(self, _=None): """Ensure scrape target information is made available to prometheus. When a metrics provider charm is related to a prometheus charm, the @@ -1664,7 +1682,11 @@ def _scrape_jobs(self) -> list: A list of dictionaries, where each dictionary specifies a single scrape job for Prometheus. """ - return self._jobs if self._jobs else [DEFAULT_JOB] + jobs = self._jobs if self._jobs else [DEFAULT_JOB] + if callable(self._lookaside_jobs): + return jobs + PrometheusConfig.sanitize_scrape_configs(self._lookaside_jobs()) + else: + return jobs @property def _scrape_metadata(self) -> dict: diff --git a/requirements-fmt.txt b/requirements-fmt.txt index 53094cd..6666852 100644 --- a/requirements-fmt.txt +++ b/requirements-fmt.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.8 +# This file is autogenerated by pip-compile with Python 3.10 # by the following command: # # pip-compile --resolver=backtracking ./requirements-fmt.in @@ -18,5 +18,3 @@ platformdirs==2.5.4 # via black tomli==2.0.1 # via black -typing-extensions==4.4.0 - # via black diff --git a/requirements-integration.txt b/requirements-integration.txt index 81bb48f..152ad47 100644 --- a/requirements-integration.txt +++ b/requirements-integration.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.8 +# This file is autogenerated by pip-compile with Python 3.10 # by the following command: # # pip-compile --resolver=backtracking ./requirements-integration.in @@ -292,8 +292,10 @@ wcwidth==0.2.5 # via prompt-toolkit websocket-client==1.4.2 # via kubernetes -websockets==7.0 - # via juju +websockets==10.4 + # via + # -r ./requirements.txt + # juju yarl==1.8.2 # via aiohttp diff --git a/requirements-lint.txt b/requirements-lint.txt index 01133f9..5019afb 100644 --- a/requirements-lint.txt +++ b/requirements-lint.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.8 +# This file is autogenerated by pip-compile with Python 3.10 # by the following command: # # pip-compile --resolver=backtracking ./requirements-lint.in @@ -58,10 +58,6 @@ tomli==2.0.1 # -r ./requirements-fmt.txt # black # pyproject-flake8 -typing-extensions==4.4.0 - # via - # -r ./requirements-fmt.txt - # black # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/requirements-unit.txt b/requirements-unit.txt index a0ac1ff..43c1273 100644 --- a/requirements-unit.txt +++ b/requirements-unit.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.8 +# This file is autogenerated by pip-compile with Python 3.10 # by the following command: # # pip-compile --resolver=backtracking ./requirements-unit.in @@ -145,6 +145,8 @@ urllib3==1.26.13 # via # -r ./requirements.txt # requests +websockets==10.4 + # via -r ./requirements.txt # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/requirements.in b/requirements.in index 193a655..295195a 100644 --- a/requirements.in +++ b/requirements.in @@ -5,3 +5,4 @@ ops oci-image serialized-data-interface tenacity +websockets diff --git a/requirements.txt b/requirements.txt index 2f40ceb..d464182 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.8 +# This file is autogenerated by pip-compile with Python 3.10 # by the following command: # # pip-compile --resolver=backtracking ./requirements.in @@ -81,6 +81,8 @@ tenacity==8.1.0 # via -r ./requirements.in urllib3==1.26.13 # via requests +websockets==10.4 + # via -r ./requirements.in # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/src/charm.py b/src/charm.py index 12ca3e2..b1dc4ae 100755 --- a/src/charm.py +++ b/src/charm.py @@ -16,6 +16,10 @@ from charmed_kubeflow_chisme.lightkube.batch import delete_many from charms.grafana_k8s.v0.grafana_dashboard import GrafanaDashboardProvider from charms.istio_pilot.v0.istio_gateway_info import GatewayRelationError, GatewayRequirer +from charms.observability_libs.v0.metrics_endpoint_discovery import ( + MetricsEndpointChangeCharmEvents, + MetricsEndpointObserver, +) from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch from charms.prometheus_k8s.v0.prometheus_scrape import MetricsEndpointProvider from lightkube import ApiError @@ -46,6 +50,9 @@ class SeldonCoreOperator(CharmBase): _stored = StoredState() + # using CharmEvents extension to handle metrics endpoint changes + on = MetricsEndpointChangeCharmEvents() + def __init__(self, *args): """Initialize charm and setup the container.""" super().__init__(*args) @@ -65,7 +72,7 @@ def __init__(self, *args): self._container = self.unit.get_container(self._container_name) # generate certs - self._stored.set_default(**self._gen_certs()) + self._stored.set_default(**self._gen_certs(), targets={}) # setup context to be used for updating K8S resources self._context = { @@ -87,17 +94,6 @@ def __init__(self, *args): service_name=f"{self.model.app.name}", ) - # setup events - self.framework.observe(self.on.upgrade_charm, self.main) - self.framework.observe(self.on.config_changed, self.main) - self.framework.observe(self.on.leader_elected, self.main) - self.framework.observe(self.on.seldon_core_pebble_ready, self.main) - - for rel in self.model.relations.keys(): - self.framework.observe(self.on[rel].relation_changed, self.main) - self.framework.observe(self.on.install, self._on_install) - self.framework.observe(self.on.remove, self._on_remove) - # Prometheus related config self.prometheus_provider = MetricsEndpointProvider( charm=self, @@ -108,6 +104,11 @@ def __init__(self, *args): "static_configs": [{"targets": ["*:{}".format(self.config["metrics-port"])]}], } ], + lookaside_jobs_callable=self._return_list_of_running_models, + ) + # metrics endpoint observer setup + self.metrics_server_observer = MetricsEndpointObserver( + self, {"seldon-deployment-id": None} ) # Dashboard related config (Grafana) @@ -116,6 +117,18 @@ def __init__(self, *args): relation_name="grafana-dashboard", ) + # setup events + self.framework.observe(self.on.upgrade_charm, self.main) + self.framework.observe(self.on.config_changed, self.main) + self.framework.observe(self.on.leader_elected, self.main) + self.framework.observe(self.on.seldon_core_pebble_ready, self.main) + + for rel in self.model.relations.keys(): + self.framework.observe(self.on[rel].relation_changed, self.main) + self.framework.observe(self.on.install, self._on_install) + self.framework.observe(self.on.remove, self._on_remove) + self.framework.observe(self.on.metrics_endpoint_change, self._on_metrics_endpoint_change) + @property def container(self): """Return container.""" @@ -426,6 +439,20 @@ def _get_istio_gateway(self): istio_gateway = gateway_info["gateway_namespace"] + "/" + gateway_info["gateway_name"] return istio_gateway + def _on_metrics_endpoint_change(self, event): + """Populate ports of discovered targets.""" + self._stored.targets["ports"].append(event.discovered["targets"]) + self.prometheus_provider.set_scrape_job_spec() + + def _return_list_of_running_models(self): + """Return running models based on stored targets.""" + models_list = [] + if self._stored.targets is not None and len(self._stored.targets) != 0: + models_list = [ + {"running-models": [{"targets": [p for p in self._stored.targets["ports"]]}]} + ] + return models_list + def main(self, _) -> None: """Perform all required actions the Charm.""" try: