From 9a6cf43c6e0b11d3cf5951572a1ccb47cded7729 Mon Sep 17 00:00:00 2001 From: Michael Edgar Date: Mon, 20 May 2024 13:08:43 -0400 Subject: [PATCH] Use YAML configuration for Kafka connections, improve local runtime support (#749) * Load cluster configuration from YAML * Add local build and compose scripts/configurations * Documentation updates * Cleanup config model, test updates, add instructions for running locally * Optionally deploy Prometheus ingress, set Prometheus URL in compose * Use host network for compose, document service account setup * Update root README instructions for running locally --------- Signed-off-by: Michael Edgar --- .github/workflows/release.yml | 4 +- .gitignore | 6 +- Makefile | 39 +++++ README.md | 60 ++++++- api/pom.xml | 4 +- .../console/api/ClientFactory.java | 153 ++++++++++++------ .../console/api/config/ConsoleConfig.java | 14 ++ .../api/config/KafkaClusterConfig.java | 79 +++++++++ .../console/api/config/KafkaConfig.java | 28 ++++ .../api/service/KafkaClusterService.java | 10 +- api/src/main/resources/application.properties | 7 +- .../console/api/KafkaClustersResourceIT.java | 9 +- .../console/api/RecordsResourceIT.java | 9 +- .../console/api/TopicsResourceIT.java | 9 +- .../KafkaUnsecuredResourceManager.java | 46 ++++-- compose.yaml | 27 ++++ console-config-example.yaml | 28 ++++ install/001-deploy-prometheus.sh | 7 + install/003-install-console.sh | 6 + install/README.md | 54 ++----- .../resources/console/console.deployment.yaml | 51 +++--- .../console-prometheus.ingress.yaml | 6 +- .../console-prometheus.service.yaml | 11 ++ ui/Dockerfile | 10 +- ui/messages/en.json | 2 +- 25 files changed, 516 insertions(+), 163 deletions(-) create mode 100644 Makefile create mode 100644 api/src/main/java/com/github/eyefloaters/console/api/config/ConsoleConfig.java create mode 100644 api/src/main/java/com/github/eyefloaters/console/api/config/KafkaClusterConfig.java create mode 100644 api/src/main/java/com/github/eyefloaters/console/api/config/KafkaConfig.java create mode 100644 compose.yaml create mode 100644 console-config-example.yaml create mode 100644 install/resources/prometheus/console-prometheus.service.yaml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e5b7ed1d3..e855afa88 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -46,6 +46,7 @@ jobs: export QUARKUS_CONTAINER_IMAGE_USERNAME="${{ secrets.IMAGE_REPO_USERNAME }}" export QUARKUS_CONTAINER_IMAGE_PASSWORD="${{ secrets.IMAGE_REPO_PASSWORD }}" export QUARKUS_CONTAINER_IMAGE_PUSH="true" + export QUARKUS_CONTAINER_IMAGE_ADDITIONAL_TAGS=latest export GIT_REVISION=$(git rev-parse --short release) # Build and push the release images using the commit tagged in `release:prepare` mvn -B -P docker release:perform -f api/pom.xml @@ -63,7 +64,8 @@ jobs: context: ui/ push: true tags: | - ${{ secrets.IMAGE_REPO_HOSTNAME }}/${{ secrets.IMAGE_REPO_NAMESPACE }}/ui:${{steps.metadata.outputs.current-version}} + ${{ secrets.IMAGE_REPO_HOSTNAME }}/${{ secrets.IMAGE_REPO_NAMESPACE }}/console-ui:${{steps.metadata.outputs.current-version}} + ${{ secrets.IMAGE_REPO_HOSTNAME }}/${{ secrets.IMAGE_REPO_NAMESPACE }}/console-ui:latest - name: Push Release Tag run: | diff --git a/.gitignore b/.gitignore index 990c1aeb5..2193d1bb4 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,10 @@ release.properties .factorypath .vscode - # env files .env* + +# User-provided and generated compose configurations +/compose-runtime.env +/console-config.yaml +/compose.env diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..d2019234a --- /dev/null +++ b/Makefile @@ -0,0 +1,39 @@ + +.PHONY: container-image-api container-image-ui container-images + +include *compose.env + +CONSOLE_API_IMAGE ?= quay.io/eyefloaters/console-api:latest +CONSOLE_UI_IMAGE ?= quay.io/eyefloaters/console-api:latest +CONSOLE_UI_NEXTAUTH_SECRET ?= $(shell openssl rand -base64 32) +CONSOLE_METRICS_PROMETHEUS_URL ?= +CONTAINER_RUNTIME ?= $(shell which podman || which docker) + +container-image-api: + mvn package -f api/pom.xml -Pdocker -DskipTests -Dquarkus.container-image.image=$(CONSOLE_API_IMAGE) + +container-image-api-push: container-image-api + $(CONTAINER_RUNTIME) push $(CONSOLE_API_IMAGE) + +container-image-ui: + $(CONTAINER_RUNTIME) build -t $(CONSOLE_UI_IMAGE) ./ui -f ./ui/Dockerfile + +container-image-ui-push: container-image-ui + $(CONTAINER_RUNTIME) push $(CONSOLE_UI_IMAGE) + +container-images: container-image-api container-image-ui + +container-images-push: container-image-api-push container-image-ui-push + +compose-up: + > compose-runtime.env + echo "CONSOLE_API_IMAGE=$(CONSOLE_API_IMAGE)" >> compose-runtime.env + echo "CONSOLE_API_SERVICE_ACCOUNT_TOKEN=$(CONSOLE_API_SERVICE_ACCOUNT_TOKEN)" >> compose-runtime.env + echo "CONSOLE_API_KUBERNETES_API_SERVER_URL=$(CONSOLE_API_KUBERNETES_API_SERVER_URL)" >> compose-runtime.env + echo "CONSOLE_UI_IMAGE=$(CONSOLE_UI_IMAGE)" >> compose-runtime.env + echo "CONSOLE_UI_NEXTAUTH_SECRET=$(CONSOLE_UI_NEXTAUTH_SECRET)" >> compose-runtime.env + echo "CONSOLE_METRICS_PROMETHEUS_URL=$(CONSOLE_METRICS_PROMETHEUS_URL)" >> compose-runtime.env + $(CONTAINER_RUNTIME) compose --env-file compose-runtime.env up -d + +compose-down: + $(CONTAINER_RUNTIME) compose --env-file compose-runtime.env down diff --git a/README.md b/README.md index 0cc59472c..291745075 100644 --- a/README.md +++ b/README.md @@ -6,10 +6,68 @@ It is composed of two main parts: - a REST API backend developed with Java and [Quarkus](https://quarkus.io/) - a user interface (UI) built with [Next.js](https://nextjs.org/) and [PatternFly](https://patternfly.org) -## Installing +#### Roadmap / Goals + +The future goals of this project are to provide a user interface to interact with and manage additional data streaming components such as: + +- [Apicurio Registry](https://www.apicur.io/registry/) for message serialization and de-serialization + validation +- [Kroxylicious](https://kroxylicious.io/) +- [Apache Flink](https://flink.apache.org/) + +Contributions and discussions around use cases for these (and other relevant) components are both welcome and encouraged. + +## Running the Application + +The console application may either be run in a Kubernetes cluster or locally to try it out. + +### Install to Kubernetes Please refer to the [installation README](./install/README.md) file for detailed information about how to install the latest release of the console in a Kubernetes cluster. +### Run locally + +Running the console locally requires the use of a remote or locally-running Kubernetes cluster that hosts the Strimzi Kafka operator +and any Apache Kafkaâ„¢ clusters that will be accessed from the console. To get started, you will need to provide a console configuration +file and credentials to connect to the Kubernetes cluster where Strimzi and Kafka are available. + +1. Using the [console-config-example.yaml](./console-config-example.yaml) file as an example, create your own configuration + in a file `console-config.yaml` in the repository root. The `compose.yaml` file expects this location to be used and + and difference in name or location requires an adjustment to the compose file. + +2. Install the prerequisite software into the Kubernetes cluster. This step assumes none have yet been installed. + ```shell + ./install/000-install-dependency-operators.sh + ./install/001-deploy-prometheus.sh + ./install/002-deploy-console-kafka.sh + ``` + Note that the Prometheus instance will be available at `http://console-prometheus.` when this step + completes. + +3. Provide the Prometheus endpoint, the API server endpoint, and the service account token that you would like to use to connect to the Kubernetes cluster. These may be placed in a `compose.env` file that will be detected when starting the console. + ``` + CONSOLE_API_SERVICE_ACCOUNT_TOKEN= + CONSOLE_API_KUBERNETES_API_SERVER_URL=https://my-kubernetes-api.example.com:6443 + CONSOLE_METRICS_PROMETHEUS_URL=http://console-prometheus. + ``` + The service account token may be obtain using the `kubectl create token` command. For example, to create a service account + named "console-server" (from [console-server.serviceaccount.yaml](./install/resources/console/console-server.serviceaccount.yaml) + with the correct permissions and a token that expires in 1 year ([yq](https://github.com/mikefarah/yq/releases) required): + ```shell + export NAMESPACE= + kubectl apply -n ${NAMESPACE} -f ./install/resources/console/console-server.clusterrole.yaml + kubectl apply -n ${NAMESPACE} -f ./install/resources/console/console-server.serviceaccount.yaml + yq '.subjects[0].namespace = strenv(NAMESPACE)' ./install/resources/console/console-server.clusterrolebinding.yaml | kubectl apply -n ${NAMESPACE} -f - + kubectl create token console-server -n ${NAMESPACE} --duration=$((365*24))h + ``` + +4. By default, the provided configuration will use the latest console release container images. If you would like to + build your own images with changes you've made locally, you may also set the `CONSOLE_API_IMAGE` and `CONSOLE_UI_IMAGE` + in your `compose.env` and build them with `make container-images` + +5. Start the environment with `make compose-up`. + +6. When finished with the local console process, you may run `make compose-down` to clean up. + ## Contributing We welcome contributions of all forms. Please see the [CONTRIBUTING](./CONTRIBUTING.md) file for how to get started. diff --git a/api/pom.xml b/api/pom.xml index 70e4ee73a..c4a6c3123 100644 --- a/api/pom.xml +++ b/api/pom.xml @@ -6,7 +6,7 @@ com.github.eyefloaters console-api jar - 0.0.36-SNAPSHOT + 0.1.0-SNAPSHOT UTF-8 @@ -322,6 +322,8 @@ + true + false src/main/docker/Dockerfile diff --git a/api/src/main/java/com/github/eyefloaters/console/api/ClientFactory.java b/api/src/main/java/com/github/eyefloaters/console/api/ClientFactory.java index 4ea361127..dde6d27f9 100644 --- a/api/src/main/java/com/github/eyefloaters/console/api/ClientFactory.java +++ b/api/src/main/java/com/github/eyefloaters/console/api/ClientFactory.java @@ -1,9 +1,13 @@ package com.github.eyefloaters.console.api; +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.nio.file.Path; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -42,6 +46,10 @@ import org.eclipse.microprofile.config.inject.ConfigProperty; import org.jboss.logging.Logger; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.github.eyefloaters.console.api.config.ConsoleConfig; +import com.github.eyefloaters.console.api.config.KafkaClusterConfig; import com.github.eyefloaters.console.api.service.KafkaClusterService; import com.github.eyefloaters.console.api.support.TrustAllCertificateManager; @@ -68,7 +76,6 @@ @ApplicationScoped public class ClientFactory { - static final String KAFKA_CONFIG_PREFIX = "console.kafka"; static final String NO_SUCH_KAFKA_MESSAGE = "Requested Kafka cluster %s does not exist or is not configured"; private final Function noSuchKafka = clusterName -> new NotFoundException(NO_SUCH_KAFKA_MESSAGE.formatted(clusterName)); @@ -80,8 +87,8 @@ public class ClientFactory { Config config; @Inject - @ConfigProperty(name = KAFKA_CONFIG_PREFIX) - Optional> clusterNames; + @ConfigProperty(name = "console.config-path") + Optional configPath; @Inject SharedIndexInformer kafkaInformer; @@ -118,8 +125,34 @@ public class ClientFactory { @Named("kafkaAdminFilter") UnaryOperator kafkaAdminFilter = UnaryOperator.identity(); - private Map clusterNames() { - return clusterNames.orElseGet(Collections::emptyMap); + @Produces + @ApplicationScoped + public ConsoleConfig produceConsoleConfig() { + return configPath.map(Path::of) + .map(Path::toUri) + .map(uri -> { + try { + return uri.toURL(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }) + .filter(Objects::nonNull) + .map(url -> { + log.infof("Loading console configuration from %s", url); + + ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); + + try (InputStream stream = url.openStream()) { + return mapper.readValue(stream, ConsoleConfig.class); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }) + .orElseGet(() -> { + log.infof("Console configuration not specified"); + return new ConsoleConfig(); + }); } /** @@ -153,7 +186,7 @@ public Supplier kafkaResourceSupplier() { @Produces @ApplicationScoped - Map getAdmins(Function, Admin> adminBuilder) { + Map getAdmins(ConsoleConfig consoleConfig, Function, Admin> adminBuilder) { final Map adminClients = new HashMap<>(); kafkaInformer.addEventHandlerWithResyncPeriod(new ResourceEventHandler() { @@ -168,12 +201,11 @@ public void onUpdate(Kafka oldKafka, Kafka newKafka) { private void put(Kafka kafka, String eventType) { String clusterKey = Cache.metaNamespaceKeyFunc(kafka); - clusterNames().entrySet() - .stream() - .filter(e -> clusterKey.equals(e.getValue())) - .findFirst() + consoleConfig.getKafka() + .getCluster(clusterKey) .map(e -> { - var configs = buildConfig(AdminClientConfig.configNames(), e.getKey(), "admin", kafka); + var configs = buildConfig(AdminClientConfig.configNames(), e, "admin", e::getAdminProperties, kafka); + if (truststoreRequired(configs)) { log.warnf(""" %s Admin client for Kafka cluster %s failed. Connection \ @@ -183,7 +215,11 @@ private void put(Kafka kafka, String eventType) { .formatted(eventType, kafka.getStatus().getClusterId())); return null; } else { - logConfig("Admin[key=%s, id=%s]".formatted(e.getKey(), kafka.getStatus().getClusterId()), configs); + logConfig("Admin[name=%s, namespace=%s, id=%s]".formatted( + e.getName(), + e.getNamespace(), + kafka.getStatus().getClusterId()), + configs); return adminBuilder.apply(configs); } }) @@ -236,18 +272,19 @@ public void adminClientDisposer(@Disposes Supplier client, Map> consumerSupplier(Supplier cluster) { + public Supplier> consumerSupplier(ConsoleConfig consoleConfig, Supplier cluster) { String clusterKey = Cache.metaNamespaceKeyFunc(cluster.get()); - return clusterNames().entrySet() - .stream() - .filter(e -> clusterKey.equals(e.getValue())) + return consoleConfig.getKafka() + .getCluster(clusterKey) .>>map(e -> { + Set configNames = ConsumerConfig.configNames().stream() // Do not allow a group Id to be set for this application .filter(Predicate.not(ConsumerConfig.GROUP_ID_CONFIG::equals)) .collect(Collectors.toSet()); - var configs = buildConfig(configNames, e.getKey(), "consumer", cluster.get()); + + var configs = buildConfig(configNames, e, "consumer", e::getConsumerProperties, cluster.get()); configs.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false"); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); @@ -255,12 +292,14 @@ public Supplier> consumerSupplier(Supplier clust configs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); configs.put(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, 5000); - logConfig("Consumer[" + e.getKey() + ']', configs); + logConfig("Consumer[name=%s, namespace=%s]".formatted( + e.getName(), + e.getNamespace()), + configs); @SuppressWarnings("resource") // no resource leak - client closed by disposer Consumer client = new KafkaConsumer<>(configs); return () -> client; }) - .findFirst() .orElseThrow(() -> noSuchKafka.apply(cluster.get().getStatus().getClusterId())); } @@ -270,14 +309,13 @@ public void consumerDisposer(@Disposes Supplier> consum @Produces @RequestScoped - public Supplier> producerSupplier(Supplier cluster) { + public Supplier> producerSupplier(ConsoleConfig consoleConfig, Supplier cluster) { String clusterKey = Cache.metaNamespaceKeyFunc(cluster.get()); - return clusterNames().entrySet() - .stream() - .filter(e -> clusterKey.equals(e.getValue())) + return consoleConfig.getKafka() + .getCluster(clusterKey) .>>map(e -> { - var configs = buildConfig(ProducerConfig.configNames(), e.getKey(), "producer", cluster.get()); + var configs = buildConfig(ProducerConfig.configNames(), e, "producer", e::getProducerProperties, cluster.get()); configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); configs.put(ProducerConfig.ACKS_CONFIG, "all"); @@ -285,12 +323,14 @@ public Supplier> producerSupplier(Supplier clust configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, false); configs.put(ProducerConfig.RETRIES_CONFIG, 0); - logConfig("Producer[" + e.getKey() + ']', configs); + logConfig("Producer[name=%s, namespace=%s]".formatted( + e.getName(), + e.getNamespace()), + configs); @SuppressWarnings("resource") // no resource leak - client closed by disposer Producer client = new KafkaProducer<>(configs); return () -> client; }) - .findFirst() .orElseThrow(() -> noSuchKafka.apply(cluster.get().getStatus().getClusterId())); } @@ -298,16 +338,33 @@ public void producerDisposer(@Disposes Supplier> produc producer.get().close(); } - Map buildConfig(Set configNames, String clusterKey, String clientType, Kafka cluster) { + Map buildConfig(Set configNames, + KafkaClusterConfig config, + String clientType, + Supplier> clientProperties, + Kafka cluster) { + Map cfg = configNames .stream() - .map(configName -> getClusterConfig(clusterKey, clientType, configName) - .or(() -> getDefaultConfig(clusterKey, clientType, configName)) + .map(configName -> Optional.ofNullable(clientProperties.get().get(configName)) + .or(() -> Optional.ofNullable(config.getProperties().get(configName))) + .or(() -> getDefaultConfig(clientType, configName)) .map(configValue -> Map.entry(configName, configValue))) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + if (!cfg.containsKey(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)) { + Optional.ofNullable(cluster.getStatus()) + .map(KafkaStatus::getListeners) + .map(Collection::stream) + .orElseGet(Stream::empty) + .filter(listener -> listener.getName().equals(config.getListener())) + .map(ListenerStatus::getBootstrapServers) + .findFirst() + .ifPresent(bootstrapServers -> cfg.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers)); + } + if (truststoreRequired(cfg)) { if (trustManager.isResolvable()) { trustManager.get().trustClusterCertificate(cfg); @@ -316,10 +373,17 @@ Map buildConfig(Set configNames, String clusterKey, Stri .map(KafkaStatus::getListeners) .map(Collection::stream) .orElseGet(Stream::empty) - .filter(listener -> cfg.getOrDefault(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "") - .toString() - .contains(listener.getBootstrapServers())) + .filter(listener -> { + if (listener.getName().equals(config.getListener())) { + return true; + } + + return cfg.getOrDefault(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "") + .toString() + .contains(listener.getBootstrapServers()); + }) .map(ListenerStatus::getCertificates) + .filter(Objects::nonNull) .filter(Predicate.not(Collection::isEmpty)) .findFirst() .ifPresent(certificates -> { @@ -332,28 +396,13 @@ Map buildConfig(Set configNames, String clusterKey, Stri return cfg; } - Optional getClusterConfig(String clusterKey, String clientType, String configName) { - String clientSpecificKey = "%s.%s.%s.%s".formatted(KAFKA_CONFIG_PREFIX, clusterKey, clientType, configName); - String generalKey = "%s.%s.%s".formatted(KAFKA_CONFIG_PREFIX, clusterKey, configName); + Optional getDefaultConfig(String clientType, String configName) { + String clientSpecificKey = "console.kafka.%s.%s".formatted(clientType, configName); + String generalKey = "console.kafka.%s".formatted(configName); return config.getOptionalValue(clientSpecificKey, String.class) .or(() -> config.getOptionalValue(generalKey, String.class)) - .map(cfg -> { - log.tracef("OVERRIDE config %s for cluster %s", configName, clusterKey); - return unquote(cfg); - }); - } - - Optional getDefaultConfig(String clusterKey, String clientType, String configName) { - String clientSpecificKey = "kafka.%s.%s".formatted(clientType, configName); - String generalKey = "kafka.%s".formatted(configName); - - return config.getOptionalValue(clientSpecificKey, String.class) - .or(() -> config.getOptionalValue(generalKey, String.class)) - .map(cfg -> { - log.tracef("DEFAULT config %s for cluster %s", configName, clusterKey); - return unquote(cfg); - }); + .map(this::unquote); } String unquote(String cfg) { diff --git a/api/src/main/java/com/github/eyefloaters/console/api/config/ConsoleConfig.java b/api/src/main/java/com/github/eyefloaters/console/api/config/ConsoleConfig.java new file mode 100644 index 000000000..a64a263f2 --- /dev/null +++ b/api/src/main/java/com/github/eyefloaters/console/api/config/ConsoleConfig.java @@ -0,0 +1,14 @@ +package com.github.eyefloaters.console.api.config; + +public class ConsoleConfig { + + KafkaConfig kafka = new KafkaConfig(); + + public KafkaConfig getKafka() { + return kafka; + } + + public void setKafka(KafkaConfig kafka) { + this.kafka = kafka; + } +} diff --git a/api/src/main/java/com/github/eyefloaters/console/api/config/KafkaClusterConfig.java b/api/src/main/java/com/github/eyefloaters/console/api/config/KafkaClusterConfig.java new file mode 100644 index 000000000..766e4600c --- /dev/null +++ b/api/src/main/java/com/github/eyefloaters/console/api/config/KafkaClusterConfig.java @@ -0,0 +1,79 @@ +package com.github.eyefloaters.console.api.config; + +import java.util.HashMap; +import java.util.Map; + +import com.fasterxml.jackson.annotation.JsonIgnore; + +public class KafkaClusterConfig { + + private String name; + private String namespace; + private String listener; + private Map properties = new HashMap<>(); + private Map adminProperties = new HashMap<>(); + private Map consumerProperties = new HashMap<>(); + private Map producerProperties = new HashMap<>(); + + @JsonIgnore + public String clusterKey() { + return "%s/%s".formatted(namespace, name); + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getNamespace() { + return namespace; + } + + public void setNamespace(String namespace) { + this.namespace = namespace; + } + + public String getListener() { + return listener; + } + + public void setListener(String listener) { + this.listener = listener; + } + + public Map getProperties() { + return properties; + } + + public void setProperties(Map properties) { + this.properties = properties; + } + + public Map getAdminProperties() { + return adminProperties; + } + + public void setAdminProperties(Map adminProperties) { + this.adminProperties = adminProperties; + } + + public Map getConsumerProperties() { + return consumerProperties; + } + + public void setConsumerProperties(Map consumerProperties) { + this.consumerProperties = consumerProperties; + } + + public Map getProducerProperties() { + return producerProperties; + } + + public void setProducerProperties(Map producerProperties) { + this.producerProperties = producerProperties; + } + +} diff --git a/api/src/main/java/com/github/eyefloaters/console/api/config/KafkaConfig.java b/api/src/main/java/com/github/eyefloaters/console/api/config/KafkaConfig.java new file mode 100644 index 000000000..087b39229 --- /dev/null +++ b/api/src/main/java/com/github/eyefloaters/console/api/config/KafkaConfig.java @@ -0,0 +1,28 @@ +package com.github.eyefloaters.console.api.config; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +import com.fasterxml.jackson.annotation.JsonIgnore; + +public class KafkaConfig { + + List clusters = new ArrayList<>(); + + @JsonIgnore + public Optional getCluster(String clusterKey) { + return clusters.stream() + .filter(k -> k.clusterKey().equals(clusterKey)) + .findFirst(); + } + + public List getClusters() { + return clusters; + } + + public void setClusters(List clusters) { + this.clusters = clusters; + } + +} diff --git a/api/src/main/java/com/github/eyefloaters/console/api/service/KafkaClusterService.java b/api/src/main/java/com/github/eyefloaters/console/api/service/KafkaClusterService.java index ff757c4d5..4c9644cfe 100644 --- a/api/src/main/java/com/github/eyefloaters/console/api/service/KafkaClusterService.java +++ b/api/src/main/java/com/github/eyefloaters/console/api/service/KafkaClusterService.java @@ -3,7 +3,6 @@ import java.util.Collection; import java.util.Comparator; import java.util.List; -import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.concurrent.CompletionStage; @@ -18,10 +17,10 @@ import org.apache.kafka.clients.admin.DescribeClusterOptions; import org.apache.kafka.clients.admin.DescribeClusterResult; import org.apache.kafka.common.KafkaFuture; -import org.eclipse.microprofile.config.inject.ConfigProperty; import org.jboss.logging.Logger; import com.github.eyefloaters.console.api.Annotations; +import com.github.eyefloaters.console.api.config.ConsoleConfig; import com.github.eyefloaters.console.api.model.Condition; import com.github.eyefloaters.console.api.model.KafkaCluster; import com.github.eyefloaters.console.api.model.KafkaListener; @@ -44,8 +43,6 @@ @ApplicationScoped public class KafkaClusterService { - static final String KAFKA_CONFIG_PREFIX = "console.kafka"; - @Inject Logger logger; @@ -53,8 +50,7 @@ public class KafkaClusterService { SharedIndexInformer kafkaInformer; @Inject - @ConfigProperty(name = KAFKA_CONFIG_PREFIX) - Optional> clusterNames; + ConsoleConfig consoleConfig; @Inject Supplier clientSupplier; @@ -103,7 +99,7 @@ KafkaCluster toKafkaCluster(Kafka kafka) { // Identify that the cluster is configured with connection information String clusterKey = Cache.metaNamespaceKeyFunc(kafka); - cluster.setConfigured(clusterNames.map(names -> names.containsValue(clusterKey)).orElse(false)); + cluster.setConfigured(consoleConfig.getKafka().getCluster(clusterKey).isPresent()); return cluster; } diff --git a/api/src/main/resources/application.properties b/api/src/main/resources/application.properties index a3c294f4b..244dc8633 100644 --- a/api/src/main/resources/application.properties +++ b/api/src/main/resources/application.properties @@ -41,9 +41,6 @@ quarkus.swagger-ui.title=Console API quarkus.log.category."org.apache.kafka".level=WARN -quarkus.jacoco.reuse-data-file=true -quarkus.jacoco.report=false - quarkus.container-image.labels."org.opencontainers.image.version"=${quarkus.application.version} quarkus.container-image.labels."org.opencontainers.image.revision"=${git.revision} @@ -57,8 +54,8 @@ quarkus.container-image.labels."org.opencontainers.image.revision"=${git.revisio # eligible for access from a CDI `Instance`. quarkus.arc.unremovable-types=com.github.eyefloaters.console.api.** -kafka.admin.request.timeout.ms=10000 -kafka.admin.default.api.timeout.ms=10000 +console.kafka.admin.request.timeout.ms=10000 +console.kafka.admin.default.api.timeout.ms=10000 ######## #%dev.quarkus.http.auth.proactive=false diff --git a/api/src/test/java/com/github/eyefloaters/console/api/KafkaClustersResourceIT.java b/api/src/test/java/com/github/eyefloaters/console/api/KafkaClustersResourceIT.java index bdaf4d72f..120bdb588 100644 --- a/api/src/test/java/com/github/eyefloaters/console/api/KafkaClustersResourceIT.java +++ b/api/src/test/java/com/github/eyefloaters/console/api/KafkaClustersResourceIT.java @@ -36,6 +36,7 @@ import org.junit.jupiter.params.provider.CsvSource; import org.mockito.Mockito; +import com.github.eyefloaters.console.api.config.ConsoleConfig; import com.github.eyefloaters.console.api.model.ListFetchParams; import com.github.eyefloaters.console.api.service.KafkaClusterService; import com.github.eyefloaters.console.api.support.ErrorCategory; @@ -102,6 +103,9 @@ class KafkaClustersResourceIT { @Inject KafkaClusterService kafkaClusterService; + @Inject + ConsoleConfig consoleConfig; + @DeploymentManager.InjectDeploymentManager DeploymentManager deployments; @@ -117,7 +121,10 @@ class KafkaClustersResourceIT { void setup() throws IOException { kafkaContainer = deployments.getKafkaContainer(); bootstrapServers = URI.create(kafkaContainer.getBootstrapServers()); - randomBootstrapServers = URI.create(config.getValue("console.kafka.testk2.bootstrap.servers", String.class)); + randomBootstrapServers = URI.create(consoleConfig.getKafka() + .getCluster("default/test-kafka2") + .map(k -> k.getProperties().get("bootstrap.servers")) + .orElseThrow()); utils = new TestHelper(bootstrapServers, config, null); diff --git a/api/src/test/java/com/github/eyefloaters/console/api/RecordsResourceIT.java b/api/src/test/java/com/github/eyefloaters/console/api/RecordsResourceIT.java index 50ecabee0..6a2dbfbc5 100644 --- a/api/src/test/java/com/github/eyefloaters/console/api/RecordsResourceIT.java +++ b/api/src/test/java/com/github/eyefloaters/console/api/RecordsResourceIT.java @@ -34,6 +34,7 @@ import org.junit.jupiter.params.provider.CsvSource; import org.junit.jupiter.params.provider.ValueSource; +import com.github.eyefloaters.console.api.config.ConsoleConfig; import com.github.eyefloaters.console.api.service.RecordService; import com.github.eyefloaters.console.kafka.systemtest.TestPlainProfile; import com.github.eyefloaters.console.kafka.systemtest.deployment.DeploymentManager; @@ -70,6 +71,9 @@ class RecordsResourceIT { @Inject Config config; + @Inject + ConsoleConfig consoleConfig; + @Inject KubernetesClient client; @@ -85,7 +89,10 @@ class RecordsResourceIT { @BeforeEach void setup() throws IOException { URI bootstrapServers = URI.create(deployments.getExternalBootstrapServers()); - URI randomBootstrapServers = URI.create(config.getValue("console.kafka.testk2.bootstrap.servers", String.class)); + URI randomBootstrapServers = URI.create(consoleConfig.getKafka() + .getCluster("default/test-kafka2") + .map(k -> k.getProperties().get("bootstrap.servers")) + .orElseThrow()); topicUtils = new TopicHelper(bootstrapServers, config, null); topicUtils.deleteAllTopics(); diff --git a/api/src/test/java/com/github/eyefloaters/console/api/TopicsResourceIT.java b/api/src/test/java/com/github/eyefloaters/console/api/TopicsResourceIT.java index d1e0e61c7..a36fc1d03 100644 --- a/api/src/test/java/com/github/eyefloaters/console/api/TopicsResourceIT.java +++ b/api/src/test/java/com/github/eyefloaters/console/api/TopicsResourceIT.java @@ -64,6 +64,7 @@ import org.skyscreamer.jsonassert.JSONAssert; import org.skyscreamer.jsonassert.JSONCompareMode; +import com.github.eyefloaters.console.api.config.ConsoleConfig; import com.github.eyefloaters.console.kafka.systemtest.TestPlainProfile; import com.github.eyefloaters.console.kafka.systemtest.deployment.DeploymentManager; import com.github.eyefloaters.console.kafka.systemtest.utils.ConsumerUtils; @@ -118,6 +119,9 @@ class TopicsResourceIT { @Inject Config config; + @Inject + ConsoleConfig consoleConfig; + @Inject KubernetesClient client; @@ -142,7 +146,10 @@ class TopicsResourceIT { @BeforeEach void setup() throws IOException { bootstrapServers1 = URI.create(deployments.getExternalBootstrapServers()); - URI randomBootstrapServers = URI.create(config.getValue("console.kafka.testk2.bootstrap.servers", String.class)); + URI randomBootstrapServers = URI.create(consoleConfig.getKafka() + .getCluster("default/test-kafka2") + .map(k -> k.getProperties().get("bootstrap.servers")) + .orElseThrow()); topicUtils = new TopicHelper(bootstrapServers1, config, null); topicUtils.deleteAllTopics(); diff --git a/api/src/test/java/com/github/eyefloaters/console/kafka/systemtest/deployment/KafkaUnsecuredResourceManager.java b/api/src/test/java/com/github/eyefloaters/console/kafka/systemtest/deployment/KafkaUnsecuredResourceManager.java index f3c5472aa..91edb02cf 100644 --- a/api/src/test/java/com/github/eyefloaters/console/kafka/systemtest/deployment/KafkaUnsecuredResourceManager.java +++ b/api/src/test/java/com/github/eyefloaters/console/kafka/systemtest/deployment/KafkaUnsecuredResourceManager.java @@ -1,19 +1,22 @@ package com.github.eyefloaters.console.kafka.systemtest.deployment; +import java.io.File; import java.io.IOException; import java.io.UncheckedIOException; import java.net.ServerSocket; import java.net.URI; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; import java.util.Map; import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.common.security.auth.SecurityProtocol; import io.quarkus.test.common.QuarkusTestResourceLifecycleManager; public class KafkaUnsecuredResourceManager extends KafkaResourceManager implements QuarkusTestResourceLifecycleManager { ServerSocket randomSocket; + File configFile; @Override public Map start() { @@ -30,16 +33,39 @@ public Map start() { URI randomBootstrapServers = URI.create("dummy://localhost:" + randomSocket.getLocalPort()); + try { + configFile = File.createTempFile("console-test-config-", ".yaml"); + configFile.deleteOnExit(); + + Files.writeString(configFile.toPath(), """ + kafka: + clusters: + - name: test-kafka1 + namespace: default + properties: + bootstrap.servers: %s + - name: test-kafka2 + namespace: default + properties: + bootstrap.servers: %s + - name: test-kafka3 + namespace: default + # listener is named and bootstrap.servers not set (will be retrieved from Kafka CR) + listener: listener0 + properties: + security.protocol: SSL + """.formatted( + externalBootstrap, + randomBootstrapServers.toString(), + externalBootstrap), + StandardOpenOption.WRITE); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + return Map.ofEntries( Map.entry(profile + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, externalBootstrap), - Map.entry(profile + "console.kafka.testk1", "default/test-kafka1"), - Map.entry(profile + "console.kafka.testk1." + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, externalBootstrap), - Map.entry(profile + "console.kafka.testk2", "default/test-kafka2"), - Map.entry(profile + "console.kafka.testk2." + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, randomBootstrapServers.toString()), - // Placeholder configuration to allow for a CR to be added named test-kafka3 that will proxy to test-kafka1 - Map.entry(profile + "console.kafka.testk3", "default/test-kafka3"), - Map.entry(profile + "console.kafka.testk3." + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SSL.name), - Map.entry(profile + "console.kafka.testk3." + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, externalBootstrap)); + Map.entry(profile + "console.config-path", configFile.getAbsolutePath())); } @Override @@ -53,5 +79,7 @@ public void stop() { throw new UncheckedIOException(e); } } + + configFile.delete(); } } diff --git a/compose.yaml b/compose.yaml new file mode 100644 index 000000000..ca69d1eaa --- /dev/null +++ b/compose.yaml @@ -0,0 +1,27 @@ +--- +version: '3.9' + +services: + console-api: + image: ${CONSOLE_API_IMAGE} + container_name: console-api + network_mode: host + volumes: + - ${PWD}/console-config.yaml:/deployments/console-config.yaml:z + environment: + CONSOLE_CONFIG_PATH: /deployments/console-config.yaml + QUARKUS_KUBERNETES_CLIENT_API_SERVER_URL: ${CONSOLE_API_KUBERNETES_API_SERVER_URL} + QUARKUS_KUBERNETES_CLIENT_TRUST_CERTS: "true" + QUARKUS_KUBERNETES_CLIENT_TOKEN: ${CONSOLE_API_SERVICE_ACCOUNT_TOKEN} + + console-ui: + image: ${CONSOLE_UI_IMAGE} + container_name: console-ui + network_mode: host + environment: + HOSTNAME: localhost + PORT: 3005 + CONSOLE_METRICS_PROMETHEUS_URL: ${CONSOLE_METRICS_PROMETHEUS_URL} + NEXTAUTH_SECRET: ${CONSOLE_UI_NEXTAUTH_SECRET} + NEXTAUTH_URL: http://localhost:3005 + BACKEND_URL: http://localhost:8080/ diff --git a/console-config-example.yaml b/console-config-example.yaml new file mode 100644 index 000000000..369e6b851 --- /dev/null +++ b/console-config-example.yaml @@ -0,0 +1,28 @@ +kafka: + clusters: + - name: my-kafka1 # name of the Strimzi Kafka CR + namespace: my-namespace1 # namespace of the Strimzi Kafka CR + listener: "secure" # name of the listener to use for connections from the console + # `properties` contains keys/values to use for any Kafka connection + properties: + security.protocol: SASL_SSL + sasl.mechanism: SCRAM-SHA-512 + bootstrap.servers: my-kafka1.cloud.example.com:9093 # optional, if omitted the bootstrap servers from the Strimzi Kafka CR are used + sasl.jaas.config: org.apache.kafka.common.security.scram.ScramLoginModule required username="kafka1-user" password="sCr@m!"; + # `adminProperties` contains keys/values to use for Admin client Kafka connections. + # Properties specified here override properties of the same name in `properties` + adminProperties: {} + # `consumerProperties` contains keys/values to use for Consumer client Kafka connections. + # Properties specified here override properties of the same name in `properties` + consumerProperties: {} + # `producerProperties` contains keys/values to use for Producer client Kafka connections. + # Properties specified here override properties of the same name in `properties` + producerProperties: {} + + - name: my-kafka2 + namespace: my-namespace2 + listener: "secure" + properties: + security.protocol: SASL_SSL + sasl.mechanism: SCRAM-SHA-512 + sasl.jaas.config: org.apache.kafka.common.security.scram.ScramLoginModule required username="kafka2-user" password="sCr@m!"; diff --git a/install/001-deploy-prometheus.sh b/install/001-deploy-prometheus.sh index 465b3fecd..e33e0d670 100755 --- a/install/001-deploy-prometheus.sh +++ b/install/001-deploy-prometheus.sh @@ -6,6 +6,7 @@ CONSOLE_INSTALL_PATH="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)" RESOURCE_PATH=${CONSOLE_INSTALL_PATH}/resources export NAMESPACE="${1?Please provide the deployment namespace}" +export CLUSTER_DOMAIN="${2:-}" source ${CONSOLE_INSTALL_PATH}/_common.sh if ! ${KUBE} get crd prometheuses.monitoring.coreos.com >/dev/null ; then @@ -26,3 +27,9 @@ ${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/prometheus/kubernetes-scrape-c echo -e "${INFO} Apply Prometheus instance" ${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/prometheus/console-prometheus.prometheus.yaml +${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/prometheus/console-prometheus.service.yaml + +if [ -n "${CLUSTER_DOMAIN}" ] ; then + # Replace env variables + ${YQ} '(.. | select(tag == "!!str")) |= envsubst(ne)' ${RESOURCE_PATH}/prometheus/console-prometheus.ingress.yaml | ${KUBE} apply -n ${NAMESPACE} -f - +fi diff --git a/install/003-install-console.sh b/install/003-install-console.sh index 9440f313c..232ba2e76 100755 --- a/install/003-install-console.sh +++ b/install/003-install-console.sh @@ -7,6 +7,7 @@ RESOURCE_PATH=${CONSOLE_INSTALL_PATH}/resources export NAMESPACE="${1?Please provide the deployment namespace}" export CLUSTER_DOMAIN="${2?Please provide the base domain name for Kafka listener ingress}" +export CONSOLE_CONFIG="${3?Please provide the path to a console configuration YAML. See console-config-example.yaml for sample}" source ${CONSOLE_INSTALL_PATH}/_common.sh @@ -76,6 +77,11 @@ else echo -e "${WARN} Console Credential secret console-ui-secrets already exists, nothing applied" fi +${KUBE} create secret generic console-config -n ${NAMESPACE} \ + --dry-run=client \ + --from-file=console-config.yaml="${CONSOLE_CONFIG}" \ + -o yaml | ${KUBE} apply -n ${NAMESPACE} -f - + if ${KUBE} get deployment console -n ${NAMESPACE} 1>/dev/null 2>&1 ; then ${KUBE} scale --replicas=0 deployment/console -n ${NAMESPACE} fi diff --git a/install/README.md b/install/README.md index 74520ed56..ca5a342c6 100644 --- a/install/README.md +++ b/install/README.md @@ -7,7 +7,7 @@ are available on the `PATH`. ## Prerequisites -### Strimzi & Prometheus +### 1. Strimzi & Prometheus The console requires that the Strimzi Kafka Operator is installed and available in the cluster before deployment. Strimzi may be installed either using Operator Lifecycle Manager (OLM, preferred) or directly @@ -19,21 +19,25 @@ is not available, the cluster metrics graphs on the Kafka cluster overview scree Users who do not previously have Strimzi and Promethus installed may use the `000-install-dependency-operators.sh` and `001-deploy-prometheus.sh` scripts to bootstrap the environment. The scripts will install either the community-supported -or commercially supported (i.e. AMQ Streams) version of the two operators using OLM and deploy a Prometheus instance -configured to scrape metrics from any Kafka clusters deployed by Strimzi within the cluster. +or commercially supported (i.e. streams for Apache Kafka) version of the two operators using OLM and deploy a Prometheus instance +configured to scrape metrics from Kafka clusters deployed by Strimzi within the cluster. ```shell 000-install-dependency-operators.sh ${TARGET_NAMESPACE} -001-deploy-prometheus.sh ${TARGET_NAMESPACE} +001-deploy-prometheus.sh ${TARGET_NAMESPACE} ${CLUSTER_DOMAIN} ``` -### Apache Kafka Cluster +### 2. Apache Kafka Cluster Once the two prerequisite components have been installed, the demo Kafka cluster may be created using the `002-deploy-console-kafka.sh` script. This script will create a Strimzi `Kafka` custom resource as well as a `KafkaUser` custom resource for a user to access the cluster. Additionally, the Kafka cluster will be configured via a ConfigMap to export metrics in the way expected by the Prometheus instance created earlier. +```shell +002-deploy-console-kafka.sh ${TARGET_NAMESPACE} ${CLUSTER_DOMAIN} +``` + ### Authorization In order to allow the necessary access for the console to function, a minimum level of authorization must be configured @@ -45,47 +49,19 @@ of ACL types are: 1. `READ`, `DESCRIBE`, `DESCRIBE_CONFIGS` for all `TOPIC` resources 1. `READ`, `DESCRIBE` for all `GROUP` resources -## Installation +## 3. Installation With the prerequisites met, the console can be deployed using the `003-install-console.sh` script. This script will create the role, role binding, service account, services, and ingress (or route in OpenShift) necessary to run the console. Finally, the console deployment is applied to the Kubernetes/OpenShift cluster. A link to access the application will be printed to the script's output if no errors are encountered. -The configurations used by the console to connect to Kafka may be customized by altering the environment variables -for the `console-api` container in `resources/console/console.deployment.yaml`. The format used for the variables -is as follows. - -Configurations that apply to all Kafka connections should use the format `KAFKA_CONFIG_WITH_UNDERSCORES`. For example, -if all clusters are configured to use `SASL_SSL` for the Kafka `security.protocol` property, you may set env -`KAFKA_SECURITY_PROTOCOL` to `SASL_SSL`. - -Each individual cluster must be configured with a variable like `CONSOLE_KAFKA_CLUSTER1` where `CLUSTER1` is a unique -name or identifier for each cluster and the value of the env is the `${namespace}/${name}` of the `Kafka` CR that -represents the cluster. - -Configurations that apply to an individual Kafka connection should use the format `CONSOLE_KAFKA_CLUSTER1_CONFIG_WITH_UNDERSCORES`. -Using the example above, if you would like to configure one cluster to use `SASL_SSL` for the Kafka `security.protocol` property, -the following env settings would be needed: - -```yaml -- name: CONSOLE_KAFKA_EXAMPLE - value: example-ns/console-kafka -- name: CONSOLE_KAFKA_EXAMPLE_SECURITY_PROTOCOL - value: SASL_SSL -- name: CONSOLE_KAFKA_EXAMPLE_BOOTSTRAP_SERVERS - value: bootstrap.console-kafka.example.com:443 -``` - -As always, configuration properties that contain sensitive information may be mounted from a `Secret`. For example, to -set the `sasl.jaas.config` property, you could use an env entry such as the following. +The configurations used by the console to connect to Kafka may be customized by providing your own configuration. +See [console-config-example.yaml](../console-config-example.yaml) in the repository root for an example. The path to +the customized configuration must be provided as the third argument of `003-install-console.sh` -```yaml -- name: CONSOLE_KAFKA_EXAMPLE_SASL_JAAS_CONFIG - valueFrom: - secretKeyRef: - name: console-kafka-user1 - key: sasl.jaas.config +```shell +003-install-console.sh ${TARGET_NAMESPACE} ${CLUSTER_DOMAIN} ${CONSOLE_CONFIG} ``` ## References diff --git a/install/resources/console/console.deployment.yaml b/install/resources/console/console.deployment.yaml index 5fd9e7787..6e7cc0a11 100644 --- a/install/resources/console/console.deployment.yaml +++ b/install/resources/console/console.deployment.yaml @@ -15,56 +15,43 @@ spec: spec: serviceAccountName: console-server volumes: - - emptyDir: {} - name: cache + - name: cache + emptyDir: {} + - name: config + secret: + secretName: console-config containers: ### API - name: console-api - image: quay.io/eyefloaters/console-api:0.0.35 + image: quay.io/eyefloaters/console-api:latest + imagePullPolicy: Always ports: - containerPort: 8080 - # Adjust KAFKA and CONSOLE_KAFKA variables to match your Kafka cluster + volumeMounts: + - name: config + mountPath: /deployments/console-config.yaml + subPath: console-config.yaml env: - - name: KAFKA_SECURITY_PROTOCOL - value: SASL_SSL - - name: KAFKA_SASL_MECHANISM - value: SCRAM-SHA-512 - - name: CONSOLE_KAFKA_EXAMPLE - value: ${NAMESPACE}/console-kafka - - name: CONSOLE_KAFKA_EXAMPLE_BOOTSTRAP_SERVERS - # Replace with your Kafka's boostrap address - value: bootstrap.console-kafka.${CLUSTER_DOMAIN}:443 - - name: CONSOLE_KAFKA_EXAMPLE_SASL_JAAS_CONFIG - valueFrom: - secretKeyRef: - name: console-kafka-user1 - key: sasl.jaas.config + - name: CONSOLE_CONFIG_PATH + value: /deployments/console-config.yaml ### User Interface - name: console-ui - image: quay.io/eyefloaters/ui:0.0.35 + image: quay.io/eyefloaters/console-ui:latest + imagePullPolicy: Always + ports: + - containerPort: 3000 volumeMounts: - - mountPath: /app/.next/cache - name: cache + - name: cache + mountPath: /app/.next/cache env: - name: NEXTAUTH_SECRET valueFrom: secretKeyRef: name: console-ui-secrets key: NEXTAUTH_SECRET - - name: SESSION_SECRET - valueFrom: - secretKeyRef: - name: console-ui-secrets - key: SESSION_SECRET - name: NEXTAUTH_URL value: 'https://${CONSOLE_HOSTNAME}' - name: BACKEND_URL value: 'http://127.0.0.1:8080' - name: CONSOLE_METRICS_PROMETHEUS_URL value: 'http://prometheus-operated.${NAMESPACE}.svc.cluster.local:9090' - - name: CONSOLE_MODE - value: read-only - - name: LOG_LEVEL - value: info - ports: - - containerPort: 3000 diff --git a/install/resources/prometheus/console-prometheus.ingress.yaml b/install/resources/prometheus/console-prometheus.ingress.yaml index 094fc06f1..4b008a8f2 100644 --- a/install/resources/prometheus/console-prometheus.ingress.yaml +++ b/install/resources/prometheus/console-prometheus.ingress.yaml @@ -4,11 +4,11 @@ metadata: name: console-prometheus-ingress annotations: nginx.ingress.kubernetes.io/backend-protocol: HTTP - route.openshift.io/termination: edge + route.openshift.io/termination: none spec: defaultBackend: service: - name: prometheus-operated + name: console-prometheus port: number: 9090 rules: @@ -18,6 +18,6 @@ spec: - pathType: ImplementationSpecific backend: service: - name: prometheus-operated + name: console-prometheus port: number: 9090 diff --git a/install/resources/prometheus/console-prometheus.service.yaml b/install/resources/prometheus/console-prometheus.service.yaml new file mode 100644 index 000000000..e5e8e8518 --- /dev/null +++ b/install/resources/prometheus/console-prometheus.service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: console-prometheus +spec: + type: ClusterIP + ports: + - port: 9090 + targetPort: 9090 + selector: + prometheus: console-prometheus diff --git a/ui/Dockerfile b/ui/Dockerfile index 60b884530..1d3605535 100644 --- a/ui/Dockerfile +++ b/ui/Dockerfile @@ -4,14 +4,8 @@ ARG console_mode=read-only FROM registry.access.redhat.com/ubi9/nodejs-18 AS deps USER 0 WORKDIR /app -RUN npm install -g yarn -COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml* ./ -RUN \ - if [ -f yarn.lock ]; then yarn --frozen-lockfile; \ - elif [ -f package-lock.json ]; then npm ci --omit=dev; \ - elif [ -f pnpm-lock.yaml ]; then yarn global add pnpm && pnpm i --frozen-lockfile; \ - else echo "Lockfile not found." && exit 1; \ - fi +COPY package.json package-lock.json* ./ +RUN npm ci --omit=dev FROM registry.access.redhat.com/ubi9/nodejs-18 AS builder USER 0 diff --git a/ui/messages/en.json b/ui/messages/en.json index fbe6a6ec4..b19de044c 100644 --- a/ui/messages/en.json +++ b/ui/messages/en.json @@ -290,7 +290,7 @@ "kafka_version": "Kafka version", "project": "Project", "connection_not_configured": "Connection not configured", - "brokers_link": "Brokers" + "brokers_link": "brokers" }, "ColumnsModal": { "title": "Manage columns",