From da9cc56bb7ebe8b1341b18e99274513a571353d5 Mon Sep 17 00:00:00 2001 From: Michael Edgar Date: Mon, 10 Jun 2024 08:00:39 -0400 Subject: [PATCH] Authorization, audit logging, optional OIDC authN, UI error handling Signed-off-by: Michael Edgar --- README.md | 5 + api/pom.xml | 18 +- .../console/api/BrokersResource.java | 5 + .../streamshub/console/api/ClientFactory.java | 117 ++-- .../console/api/ConsumerGroupsResource.java | 11 + .../console/api/KafkaClustersResource.java | 11 +- .../console/api/KafkaRebalancesResource.java | 85 +++ .../console/api/RecordsResource.java | 7 + .../console/api/TopicsResource.java | 26 +- .../client/ForbiddenExceptionHandler.java | 34 ++ .../client/JsonProcessingExceptionMapper.java | 2 +- .../console/api/model/ConsumerGroup.java | 7 +- .../console/api/model/KafkaRebalance.java | 13 +- .../streamshub/console/api/model/Topic.java | 17 +- .../security/AuthorizationInterceptor.java | 175 ++++++ .../console/api/security/Authorized.java | 18 + .../ConsoleAuthenticationMechanism.java | 489 +++++++++++++++ .../api/security/ConsolePermission.java | 169 ++++++ .../security/OidcTenantConfigResolver.java | 51 ++ .../api/security/PermissionService.java | 69 +++ .../api/security/ResourcePrivilege.java | 20 + .../security/SaslJaasConfigCredential.java | 40 ++ .../api/service/ConsumerGroupService.java | 91 ++- .../api/service/KafkaClusterService.java | 8 +- .../api/service/KafkaRebalanceService.java | 16 + .../console/api/service/RecordService.java | 44 +- .../api/service/TopicDescribeService.java | 533 +++++++++++++++++ .../console/api/service/TopicService.java | 484 ++------------- api/src/main/resources/application.properties | 9 +- .../console/api/BrokersResourceIT.java | 2 + .../console/api/ConsumerGroupsResourceIT.java | 2 + .../console/api/KafkaClustersResourceIT.java | 47 +- .../api/KafkaClustersResourceNoK8sIT.java | 1 + .../api/KafkaClustersResourceOidcIT.java | 340 +++++++++++ .../api/KafkaRebalancesResourceIT.java | 8 +- .../api/KafkaRebalancesResourceOidcIT.java | 281 +++++++++ .../console/api/RecordsResourceIT.java | 10 + .../console/api/TopicsResourceIT.java | 74 +++ .../console/api/TopicsResourceOidcIT.java | 557 ++++++++++++++++++ .../kafka/systemtest/TestPlainProfile.java | 15 +- .../deployment/KeycloakResourceManager.java | 58 ++ .../kafka/systemtest/utils/TokenUtils.java | 33 +- .../streamshub/console/test/LogCapture.java | 93 +++ .../streamshub/console/test/TestHelper.java | 11 + .../console/test/VarargsAggregator.java | 26 + .../resources/keycloak/console-realm.json | 131 ++++ common/pom.xml | 9 + .../console/config/ConsoleConfig.java | 39 ++ .../console/config/KafkaClusterConfig.java | 18 + .../console/config/KafkaConfig.java | 10 + .../console/config/KubernetesConfig.java | 3 + .../console/config/SchemaRegistryConfig.java | 3 + .../console/config/security/Audit.java | 39 ++ .../console/config/security/AuditConfig.java | 17 + .../config/security/GlobalSecurityConfig.java | 20 + .../console/config/security/OidcConfig.java | 59 ++ .../console/config/security/Privilege.java | 41 ++ .../config/security/ResourceTypes.java | 179 ++++++ .../console/config/security/RoleConfig.java | 38 ++ .../console/config/security/RuleConfig.java | 54 ++ .../config/security/SecurityConfig.java | 46 ++ .../config/security/SubjectConfig.java | 46 ++ .../console/config/ConsoleConfigTest.java | 128 ++++ console-config-example.yaml | 54 -- examples/console-config.yaml | 135 +++++ .../020-ClusterRole-console-dex.yaml | 12 + .../030-ClusterRoleBinding-console-dex.yaml | 13 + .../dex-openshift/040-Secret-console-dex.yaml | 53 ++ .../050-Deployment-console-dex.yaml | 62 ++ .../060-Service-console-dex.yaml | 14 + .../070-Ingress-console-dex.yaml | 24 + examples/dex-openshift/README.md | 55 ++ operator/pom.xml | 1 - .../src/main/resources/application.properties | 9 + .../dependents/console.deployment.yaml | 9 +- pom.xml | 5 + ui/Dockerfile | 1 + ui/api/api.ts | 175 +++++- ui/api/consumerGroups/actions.ts | 189 ++---- ui/api/consumerGroups/schema.ts | 64 +- ui/api/kafka/actions.ts | 113 ++-- ui/api/kafka/schema.ts | 2 +- ui/api/messages/actions.ts | 148 +++-- ui/api/messages/schema.ts | 4 +- ui/api/nodes/actions.ts | 24 +- ui/api/rebalance/actions.ts | 93 ++- ui/api/topics/actions.ts | 146 ++--- ui/api/topics/schema.ts | 36 +- .../nodes/[nodeId]/NodeBreadcrumb.tsx | 16 +- .../topics/[topicId]/TopicBreadcrumb.tsx | 4 +- .../kafka/[kafkaId]/@header/KafkaHeader.tsx | 13 +- .../consumer-groups/[groupId]/page.tsx | 7 +- .../@header/nodes/[nodeId]/NodeHeader.tsx | 21 +- .../kafka/[kafkaId]/@header/nodes/page.tsx | 2 +- .../@header/nodes/rebalances/page.tsx | 2 +- .../@header/overview/ConnectButton.tsx | 6 +- .../kafka/[kafkaId]/@header/overview/page.tsx | 10 +- .../@header/topics/[topicId]/TopicHeader.tsx | 13 +- .../kafka/[kafkaId]/@header/topics/page.tsx | 17 +- .../@modal/topics/[topicId]/delete/page.tsx | 11 +- .../kafka/[kafkaId]/ClusterLinks.tsx | 2 +- .../consumer-groups/ConsumerGroupsTable.tsx | 20 +- .../consumer-groups/[groupId]/LagTable.tsx | 4 +- .../[groupId]/MembersTable.tsx | 14 +- .../consumer-groups/[groupId]/page.tsx | 17 +- .../[groupId]/reset-offset/Dryrun.tsx | 4 +- .../reset-offset/ResetConsumerOffset.tsx | 52 +- .../[groupId]/reset-offset/page.tsx | 28 +- .../kafka/[kafkaId]/consumer-groups/page.tsx | 15 +- .../(authorized)/kafka/[kafkaId]/layout.tsx | 11 +- .../nodes/[nodeId]/configuration/page.tsx | 10 +- .../kafka/[kafkaId]/nodes/page.tsx | 5 +- .../rebalances/ConnectedRebalancesTable.tsx | 4 +- .../nodes/rebalances/[rebalanceId]/page.tsx | 7 +- .../kafka/[kafkaId]/nodes/rebalances/page.tsx | 12 +- .../overview/ConnectedClusterCard.tsx | 5 +- .../ConnectedTopicsPartitionsCard.tsx | 22 +- .../kafka/[kafkaId]/overview/page.tsx | 5 +- .../(authorized)/kafka/[kafkaId]/page.tsx | 2 +- .../kafka/[kafkaId]/topics/(page)/page.tsx | 10 +- .../[topicId]/configuration/ConfigTable.tsx | 33 +- .../topics/[topicId]/configuration/page.tsx | 16 +- .../consumer-groups/ConsumerGroupsTable.tsx | 8 +- .../topics/[topicId]/consumer-groups/page.tsx | 16 +- .../messages/ConnectedMessagesTable.tsx | 113 ++-- .../topics/[topicId]/messages/page.tsx | 18 +- .../[topicId]/partitions/PartitionsTable.tsx | 11 +- .../topics/[topicId]/partitions/page.tsx | 12 +- .../[kafkaId]/topics/create/CreateTopic.tsx | 46 +- .../kafka/[kafkaId]/topics/create/Errors.tsx | 8 +- .../[kafkaId]/topics/create/StepDetails.tsx | 8 +- .../[kafkaId]/topics/create/StepOptions.tsx | 13 +- .../[kafkaId]/topics/create/StepReview.tsx | 11 +- .../kafka/[kafkaId]/topics/create/page.tsx | 14 +- .../create/topicMutateErrorToFieldError.ts | 24 +- ui/app/[locale]/(authorized)/kafka/page.tsx | 2 - ui/app/[locale]/(public)/(home)/page.tsx | 30 +- .../(public)/kafka/[kafkaId]/login/page.tsx | 3 +- ui/app/[locale]/layout.tsx | 4 - ui/app/api/auth/[...nextauth]/anonymous.ts | 1 - ui/app/api/auth/[...nextauth]/auth-options.ts | 66 ++- ui/app/api/auth/[...nextauth]/keycloak.ts | 144 ----- ui/app/api/auth/[...nextauth]/oauth-token.ts | 2 - ui/app/api/auth/[...nextauth]/oidc.ts | 195 ++++++ ui/app/api/auth/[...nextauth]/scram.ts | 2 - ui/app/api/auth/oidc/layout.tsx | 15 + ui/app/api/auth/oidc/signin/page.tsx | 23 + ui/app/config/route.ts | 15 + ui/app/layout.tsx | 4 +- ui/components/ClusterConnectionDetails.tsx | 2 +- ui/components/ClusterOverview/ClusterCard.tsx | 6 +- .../ClusterOverview/TopicsPartitionsCard.tsx | 46 +- ui/components/ClustersTable.tsx | 28 +- ui/components/Format/Number.tsx | 4 +- ui/components/NoDataErrorState.tsx | 46 ++ ui/components/ReconciliationPausedBanner.tsx | 6 +- ui/components/ReconciliationProvider.tsx | 2 +- ui/components/TopicsTable/TopicsTable.tsx | 34 +- ui/environment.d.ts | 3 - ui/middleware.ts | 21 +- ui/package-lock.json | 102 ++-- ui/package.json | 2 + ui/utils/config.ts | 56 ++ ui/utils/env.ts | 14 +- 164 files changed, 6178 insertions(+), 1875 deletions(-) create mode 100644 api/src/main/java/com/github/streamshub/console/api/errors/client/ForbiddenExceptionHandler.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/AuthorizationInterceptor.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/Authorized.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/ConsoleAuthenticationMechanism.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/ConsolePermission.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/OidcTenantConfigResolver.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/PermissionService.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/ResourcePrivilege.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/security/SaslJaasConfigCredential.java create mode 100644 api/src/main/java/com/github/streamshub/console/api/service/TopicDescribeService.java create mode 100644 api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceOidcIT.java create mode 100644 api/src/test/java/com/github/streamshub/console/api/KafkaRebalancesResourceOidcIT.java create mode 100644 api/src/test/java/com/github/streamshub/console/api/TopicsResourceOidcIT.java create mode 100644 api/src/test/java/com/github/streamshub/console/kafka/systemtest/deployment/KeycloakResourceManager.java create mode 100644 api/src/test/java/com/github/streamshub/console/test/LogCapture.java create mode 100644 api/src/test/java/com/github/streamshub/console/test/VarargsAggregator.java create mode 100644 api/src/test/resources/keycloak/console-realm.json create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/Audit.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/AuditConfig.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/GlobalSecurityConfig.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/OidcConfig.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/Privilege.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/ResourceTypes.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/RoleConfig.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/RuleConfig.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/SecurityConfig.java create mode 100644 common/src/main/java/com/github/streamshub/console/config/security/SubjectConfig.java delete mode 100644 console-config-example.yaml create mode 100644 examples/console-config.yaml create mode 100644 examples/dex-openshift/020-ClusterRole-console-dex.yaml create mode 100644 examples/dex-openshift/030-ClusterRoleBinding-console-dex.yaml create mode 100644 examples/dex-openshift/040-Secret-console-dex.yaml create mode 100644 examples/dex-openshift/050-Deployment-console-dex.yaml create mode 100644 examples/dex-openshift/060-Service-console-dex.yaml create mode 100644 examples/dex-openshift/070-Ingress-console-dex.yaml create mode 100644 examples/dex-openshift/README.md delete mode 100644 ui/app/api/auth/[...nextauth]/keycloak.ts create mode 100644 ui/app/api/auth/[...nextauth]/oidc.ts create mode 100644 ui/app/api/auth/oidc/layout.tsx create mode 100644 ui/app/api/auth/oidc/signin/page.tsx create mode 100644 ui/app/config/route.ts create mode 100644 ui/components/NoDataErrorState.tsx create mode 100644 ui/utils/config.ts diff --git a/README.md b/README.md index 5b223055c..7c09f6301 100644 --- a/README.md +++ b/README.md @@ -56,14 +56,19 @@ Prometheus is an optional dependency of the console if cluster metrics are to be - User-supplied Prometheus instances - Private Prometheus instance for each `Console`. The operator creates a managed Prometheus deployment for use only by the console. +#### OIDC Provider +The console may be configured to use an OpenID Connect (OIDC) provider for user authentication. An example using [dex](https://dexidp.io/) for OIDC with an OpenShift identity provider is available in [examples/dex-openshift](./examples/dex-openshift). + ### Deploy the operator with OLM The preferred way to deploy the console is using the Operator Lifecycle Manager, or OLM. The sample install files in `install/operator-olm` will install the operator with cluster-wide scope. This means that `Console` instances may be created in any namespace. If you wish to limit the scope of the operator, the `OperatorGroup` resource may be modified to specify only the namespace that should be watched by the operator. This example will create the operator's OLM resources in the `default` namespace. Modify the `NAMESPACE` variable according to your needs. + ```shell export NAMESPACE=default cat install/operator-olm/*.yaml | envsubst | kubectl apply -n ${NAMESPACE} -f - ``` + #### Console Custom Resource Example Once the operator is ready, you may then create a `Console` resource in the namespace where the console should be deployed. This example `Console` is based on the example Apache Kafka® cluster deployed above in the [prerequisites section](#prerequisites). Also see [examples/console/010-Console-example.yaml](examples/console/010-Console-example.yaml). ```yaml diff --git a/api/pom.xml b/api/pom.xml index 9765d680f..7cd793517 100644 --- a/api/pom.xml +++ b/api/pom.xml @@ -88,6 +88,10 @@ io.quarkus quarkus-apicurio-registry-avro + + io.quarkus + quarkus-oidc + io.smallrye.common smallrye-common-annotation @@ -335,13 +339,19 @@ - default-report-integration + report-aggregate - report-integration + report-aggregate + verify - ${project.build.directory}/jacoco-quarkus.exec - XML + true + + com/github/streamshub/console/config/**/*Builder.class + com/github/streamshub/console/config/**/*Fluent.class + com/github/streamshub/console/config/**/*Nested.class + + ${project.reporting.outputDirectory}/jacoco diff --git a/api/src/main/java/com/github/streamshub/console/api/BrokersResource.java b/api/src/main/java/com/github/streamshub/console/api/BrokersResource.java index 7109aba39..bcf004311 100644 --- a/api/src/main/java/com/github/streamshub/console/api/BrokersResource.java +++ b/api/src/main/java/com/github/streamshub/console/api/BrokersResource.java @@ -16,7 +16,10 @@ import org.eclipse.microprofile.openapi.annotations.tags.Tag; import com.github.streamshub.console.api.model.ConfigEntry; +import com.github.streamshub.console.api.security.Authorized; +import com.github.streamshub.console.api.security.ResourcePrivilege; import com.github.streamshub.console.api.service.BrokerService; +import com.github.streamshub.console.config.security.Privilege; @Path("/api/kafkas/{clusterId}/nodes") @Tag(name = "Kafka Cluster Resources") @@ -32,6 +35,8 @@ public class BrokersResource { @APIResponse(responseCode = "404", ref = "NotFound") @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.GET) public CompletionStage describeConfigs( @Parameter(description = "Cluster identifier") @PathParam("clusterId") diff --git a/api/src/main/java/com/github/streamshub/console/api/ClientFactory.java b/api/src/main/java/com/github/streamshub/console/api/ClientFactory.java index 240543c97..0a164817b 100644 --- a/api/src/main/java/com/github/streamshub/console/api/ClientFactory.java +++ b/api/src/main/java/com/github/streamshub/console/api/ClientFactory.java @@ -1,6 +1,5 @@ package com.github.streamshub.console.api; -import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -46,14 +45,13 @@ import org.apache.kafka.common.config.SslConfigs; import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule; -import org.apache.kafka.common.security.plain.PlainLoginModule; -import org.apache.kafka.common.security.scram.ScramLoginModule; import org.apache.kafka.common.serialization.ByteArrayDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.eclipse.microprofile.config.Config; import org.jboss.logging.Logger; import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.streamshub.console.api.security.SaslJaasConfigCredential; import com.github.streamshub.console.api.service.MetricsService; import com.github.streamshub.console.api.support.Holder; import com.github.streamshub.console.api.support.KafkaContext; @@ -67,6 +65,7 @@ import io.fabric8.kubernetes.client.informers.ResourceEventHandler; import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.fabric8.kubernetes.client.informers.cache.Cache; +import io.quarkus.security.identity.SecurityIdentity; import io.strimzi.api.kafka.model.kafka.Kafka; import io.strimzi.api.kafka.model.kafka.KafkaClusterSpec; import io.strimzi.api.kafka.model.kafka.KafkaSpec; @@ -96,20 +95,11 @@ public class ClientFactory { public static final String SCRAM_SHA256 = "SCRAM-SHA-256"; public static final String SCRAM_SHA512 = "SCRAM-SHA-512"; - private static final String BEARER = "Bearer "; private static final String STRIMZI_OAUTH_CALLBACK = "io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler"; - private static final String SASL_OAUTH_CONFIG_TEMPLATE = OAuthBearerLoginModule.class.getName() - + " required" - + " oauth.access.token=\"%s\" ;"; - private static final String BASIC = "Basic "; - private static final String BASIC_TEMPLATE = "%s required username=\"%%s\" password=\"%%s\" ;"; - private static final String SASL_PLAIN_CONFIG_TEMPLATE = BASIC_TEMPLATE.formatted(PlainLoginModule.class.getName()); - private static final String SASL_SCRAM_CONFIG_TEMPLATE = BASIC_TEMPLATE.formatted(ScramLoginModule.class.getName()); - - static final String NO_SUCH_KAFKA_MESSAGE = "Requested Kafka cluster %s does not exist or is not configured"; - private final Function noSuchKafka = - clusterName -> new NotFoundException(NO_SUCH_KAFKA_MESSAGE.formatted(clusterName)); + public static final String NO_SUCH_KAFKA_MESSAGE = "Requested Kafka cluster %s does not exist or is not configured"; + public static final Function NO_SUCH_KAFKA = + clusterId -> new NotFoundException(NO_SUCH_KAFKA_MESSAGE.formatted(clusterId)); @Inject Logger log; @@ -439,7 +429,7 @@ void disposeKafkaContexts(@Disposes Map contexts) { log.infof("Closing all known KafkaContexts"); contexts.values().parallelStream().forEach(context -> { - log.infof("Closing KafkaContext %s", Cache.metaNamespaceKeyFunc(context.resource())); + log.infof("Closing KafkaContext %s", context.clusterId()); try { context.close(); } catch (Exception e) { @@ -464,6 +454,7 @@ void disposeKafkaContexts(@Disposes Map contexts) { @Produces @RequestScoped public KafkaContext produceKafkaContext(Map contexts, + SecurityIdentity identity, UnaryOperator filter, Function, Admin> adminBuilder) { @@ -473,22 +464,29 @@ public KafkaContext produceKafkaContext(Map contexts, return KafkaContext.EMPTY; } - return Optional.ofNullable(contexts.get(clusterId)) - .map(ctx -> { - if (ctx.admin() == null) { - /* - * Admin may be null if credentials were not given in the - * configuration. The user must provide the login secrets - * in the request in that case. - */ - var adminConfigs = maybeAuthenticate(ctx, Admin.class); - var admin = adminBuilder.apply(adminConfigs); - return new KafkaContext(ctx, filter.apply(admin)); - } + KafkaContext ctx = contexts.get(clusterId); - return ctx; - }) - .orElseThrow(() -> noSuchKafka.apply(clusterId)); + if (ctx == null) { + throw NO_SUCH_KAFKA.apply(clusterId); + } + + if (ctx.admin() == null) { + /* + * Admin may be null if credentials were not given in the + * configuration. The user must provide the login secrets + * in the request in that case. + * + * The identity should already carry the SASL credentials + * at this point (set in ConsoleAuthenticationMechanism), + * so here we will only retrieve them (if applicable) and + * set them in the admin configuration map. + */ + var adminConfigs = maybeAuthenticate(identity, ctx, Admin.class); + var admin = adminBuilder.apply(adminConfigs); + return new KafkaContext(ctx, filter.apply(admin)); + } + + return ctx; } public void disposeKafkaContext(@Disposes KafkaContext context, Map contexts) { @@ -505,8 +503,8 @@ public void disposeKafkaContext(@Disposes KafkaContext context, Map consumerSupplier(KafkaContext context) { - var configs = maybeAuthenticate(context, Consumer.class); + public Consumer consumerSupplier(SecurityIdentity identity, KafkaContext context) { + var configs = maybeAuthenticate(identity, context, Consumer.class); return new KafkaConsumer<>( configs, @@ -520,8 +518,8 @@ public void disposeConsumer(@Disposes Consumer consumer) @Produces @RequestScoped - public Producer producerSupplier(KafkaContext context) { - var configs = maybeAuthenticate(context, Producer.class); + public Producer producerSupplier(SecurityIdentity identity, KafkaContext context) { + var configs = maybeAuthenticate(identity, context, Producer.class); return new KafkaProducer<>( configs, context.schemaRegistryContext().keySerializer(), @@ -532,13 +530,13 @@ public void disposeProducer(@Disposes Producer producer) producer.close(); } - Map maybeAuthenticate(KafkaContext context, Class clientType) { + Map maybeAuthenticate(SecurityIdentity identity, KafkaContext context, Class clientType) { Map configs = context.configs(clientType); if (configs.containsKey(SaslConfigs.SASL_MECHANISM) && !configs.containsKey(SaslConfigs.SASL_JAAS_CONFIG)) { configs = new HashMap<>(configs); - configureAuthentication(context.saslMechanism(clientType), configs); + configureAuthentication(identity, context.saslMechanism(clientType), configs); } return configs; @@ -697,23 +695,25 @@ void logConfig(String clientType, Map config) { } } - void configureAuthentication(String saslMechanism, Map configs) { + void configureAuthentication(SecurityIdentity identity, String saslMechanism, Map configs) { + SaslJaasConfigCredential credential = identity.getCredential(SaslJaasConfigCredential.class); + switch (saslMechanism) { case OAUTHBEARER: - configureOAuthBearer(configs); + configureOAuthBearer(credential, configs); break; case PLAIN: - configureBasic(configs, SASL_PLAIN_CONFIG_TEMPLATE); + configureBasic(credential, configs); break; case SCRAM_SHA256, SCRAM_SHA512: - configureBasic(configs, SASL_SCRAM_CONFIG_TEMPLATE); + configureBasic(credential, configs); break; default: throw new NotAuthorizedException("Unknown"); } } - void configureOAuthBearer(Map configs) { + void configureOAuthBearer(SaslJaasConfigCredential credential, Map configs) { log.trace("SASL/OAUTHBEARER enabled"); configs.putIfAbsent(SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, STRIMZI_OAUTH_CALLBACK); @@ -721,39 +721,12 @@ void configureOAuthBearer(Map configs) { // May still cause warnings to be logged when token will expire in less than SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS. configs.putIfAbsent(SaslConfigs.SASL_LOGIN_REFRESH_BUFFER_SECONDS, "0"); - String jaasConfig = getAuthorization(BEARER) - .map(SASL_OAUTH_CONFIG_TEMPLATE::formatted) - .orElseThrow(() -> new NotAuthorizedException(BEARER.trim())); - - configs.put(SaslConfigs.SASL_JAAS_CONFIG, jaasConfig); + configs.put(SaslConfigs.SASL_JAAS_CONFIG, credential.value()); } - void configureBasic(Map configs, String template) { + void configureBasic(SaslJaasConfigCredential credential, Map configs) { log.trace("SASL/SCRAM enabled"); - - String jaasConfig = getBasicAuthentication() - .map(template::formatted) - .orElseThrow(() -> new NotAuthorizedException(BASIC.trim())); - - configs.put(SaslConfigs.SASL_JAAS_CONFIG, jaasConfig); - } - - Optional getBasicAuthentication() { - return getAuthorization(BASIC) - .map(Base64.getDecoder()::decode) - .map(String::new) - .filter(authn -> authn.indexOf(':') >= 0) - .map(authn -> new String[] { - authn.substring(0, authn.indexOf(':')), - authn.substring(authn.indexOf(':') + 1) - }) - .filter(userPass -> !userPass[0].isEmpty() && !userPass[1].isEmpty()); - } - - Optional getAuthorization(String scheme) { - return Optional.ofNullable(headers.getHeaderString(HttpHeaders.AUTHORIZATION)) - .filter(header -> header.regionMatches(true, 0, scheme, 0, scheme.length())) - .map(header -> header.substring(scheme.length())); + configs.put(SaslConfigs.SASL_JAAS_CONFIG, credential.value()); } private static final Pattern BOUNDARY_QUOTES = Pattern.compile("(^[\"'])|([\"']$)"); diff --git a/api/src/main/java/com/github/streamshub/console/api/ConsumerGroupsResource.java b/api/src/main/java/com/github/streamshub/console/api/ConsumerGroupsResource.java index 813c55d04..6ef2d56a7 100644 --- a/api/src/main/java/com/github/streamshub/console/api/ConsumerGroupsResource.java +++ b/api/src/main/java/com/github/streamshub/console/api/ConsumerGroupsResource.java @@ -36,11 +36,14 @@ import com.github.streamshub.console.api.model.ConsumerGroup; import com.github.streamshub.console.api.model.ConsumerGroupFilterParams; import com.github.streamshub.console.api.model.ListFetchParams; +import com.github.streamshub.console.api.security.Authorized; +import com.github.streamshub.console.api.security.ResourcePrivilege; import com.github.streamshub.console.api.service.ConsumerGroupService; import com.github.streamshub.console.api.support.ErrorCategory; import com.github.streamshub.console.api.support.FieldFilter; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.api.support.StringEnumeration; +import com.github.streamshub.console.config.security.Privilege; import io.xlate.validation.constraints.Expression; @@ -67,6 +70,8 @@ public class ConsumerGroupsResource { @APIResponseSchema(ConsumerGroup.ListResponse.class) @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.LIST) public CompletionStage listConsumerGroups( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -132,6 +137,8 @@ public CompletionStage listConsumerGroups( @APIResponse(responseCode = "404", ref = "NotFound") @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.GET) public CompletionStage describeConsumerGroup( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -200,6 +207,8 @@ public CompletionStage describeConsumerGroup( node = { "data", "id" }, payload = ErrorCategory.InvalidResource.class, validationAppliesTo = ConstraintTarget.PARAMETERS) + @Authorized + @ResourcePrivilege(Privilege.UPDATE) public CompletionStage patchConsumerGroup( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -244,6 +253,8 @@ public CompletionStage patchConsumerGroup( @Path("{groupId}") @DELETE @APIResponseSchema(responseCode = "204", value = Void.class) + @Authorized + @ResourcePrivilege(Privilege.DELETE) public CompletionStage deleteConsumerGroup( @Parameter(description = "Cluster identifier") @PathParam("clusterId") diff --git a/api/src/main/java/com/github/streamshub/console/api/KafkaClustersResource.java b/api/src/main/java/com/github/streamshub/console/api/KafkaClustersResource.java index ee260d9f9..b84951ac6 100644 --- a/api/src/main/java/com/github/streamshub/console/api/KafkaClustersResource.java +++ b/api/src/main/java/com/github/streamshub/console/api/KafkaClustersResource.java @@ -32,11 +32,14 @@ import com.github.streamshub.console.api.model.KafkaCluster; import com.github.streamshub.console.api.model.ListFetchParams; +import com.github.streamshub.console.api.security.Authorized; +import com.github.streamshub.console.api.security.ResourcePrivilege; import com.github.streamshub.console.api.service.KafkaClusterService; import com.github.streamshub.console.api.support.ErrorCategory; import com.github.streamshub.console.api.support.FieldFilter; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.api.support.StringEnumeration; +import com.github.streamshub.console.config.security.Privilege; import io.xlate.validation.constraints.Expression; @@ -63,6 +66,8 @@ public class KafkaClustersResource { @APIResponseSchema(KafkaCluster.KafkaClusterDataList.class) @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.LIST) public Response listClusters( @QueryParam(KafkaCluster.FIELDS_PARAM) @DefaultValue(KafkaCluster.Fields.LIST_DEFAULT) @@ -121,6 +126,8 @@ public Response listClusters( @APIResponse(responseCode = "404", ref = "NotFound") @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.GET) public CompletionStage describeCluster( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -194,6 +201,8 @@ public CompletionStage describeCluster( node = { "data", "id" }, payload = ErrorCategory.InvalidResource.class, validationAppliesTo = ConstraintTarget.PARAMETERS) + @Authorized + @ResourcePrivilege(Privilege.UPDATE) public Response patchCluster( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -205,7 +214,7 @@ public Response patchCluster( // Return all fields requestedFields.accept(Arrays.asList(KafkaCluster.Fields.DESCRIBE_DEFAULT.split(",\\s*"))); - var result = clusterService.patchCluster(clusterId, clusterData.getData()); + var result = clusterService.patchCluster(clusterData.getData()); var responseEntity = new KafkaCluster.KafkaClusterData(result); return Response.ok(responseEntity).build(); diff --git a/api/src/main/java/com/github/streamshub/console/api/KafkaRebalancesResource.java b/api/src/main/java/com/github/streamshub/console/api/KafkaRebalancesResource.java index a70026149..4027e87d2 100644 --- a/api/src/main/java/com/github/streamshub/console/api/KafkaRebalancesResource.java +++ b/api/src/main/java/com/github/streamshub/console/api/KafkaRebalancesResource.java @@ -31,11 +31,14 @@ import com.github.streamshub.console.api.model.KafkaRebalance; import com.github.streamshub.console.api.model.KafkaRebalanceFilterParams; import com.github.streamshub.console.api.model.ListFetchParams; +import com.github.streamshub.console.api.security.Authorized; +import com.github.streamshub.console.api.security.ResourcePrivilege; import com.github.streamshub.console.api.service.KafkaRebalanceService; import com.github.streamshub.console.api.support.ErrorCategory; import com.github.streamshub.console.api.support.FieldFilter; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.api.support.StringEnumeration; +import com.github.streamshub.console.config.security.Privilege; import io.xlate.validation.constraints.Expression; @@ -62,6 +65,8 @@ public class KafkaRebalancesResource { @APIResponseSchema(KafkaRebalance.RebalanceDataList.class) @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.LIST) public Response listRebalances( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -143,6 +148,84 @@ public Response listRebalances( return Response.ok(responseEntity).build(); } + @Path("{rebalanceId}") + @GET + @Produces(MediaType.APPLICATION_JSON) + @APIResponseSchema(KafkaRebalance.RebalanceData.class) + @APIResponse(responseCode = "500", ref = "ServerError") + @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.GET) + public Response getRebalance( + @Parameter(description = "Cluster identifier") + @PathParam("clusterId") + String clusterId, + + @PathParam("rebalanceId") + @Parameter(description = "Rebalance identifier") + String rebalanceId, + + @QueryParam(KafkaRebalance.FIELDS_PARAM) + @DefaultValue(KafkaRebalance.Fields.DESCRIBE_DEFAULT) + @StringEnumeration( + source = KafkaRebalance.FIELDS_PARAM, + allowedValues = { + KafkaRebalance.Fields.NAME, + KafkaRebalance.Fields.NAMESPACE, + KafkaRebalance.Fields.CREATION_TIMESTAMP, + KafkaRebalance.Fields.STATUS, + KafkaRebalance.Fields.MODE, + KafkaRebalance.Fields.BROKERS, + KafkaRebalance.Fields.GOALS, + KafkaRebalance.Fields.SKIP_HARD_GOAL_CHECK, + KafkaRebalance.Fields.REBALANCE_DISK, + KafkaRebalance.Fields.EXCLUDED_TOPICS, + KafkaRebalance.Fields.CONCURRENT_PARTITION_MOVEMENTS_PER_BROKER, + KafkaRebalance.Fields.CONCURRENT_INTRABROKER_PARTITION_MOVEMENTS, + KafkaRebalance.Fields.CONCURRENT_LEADER_MOVEMENTS, + KafkaRebalance.Fields.REPLICATION_THROTTLE, + KafkaRebalance.Fields.REPLICA_MOVEMENT_STRATEGIES, + KafkaRebalance.Fields.SESSION_ID, + KafkaRebalance.Fields.OPTIMIZATION_RESULT, + KafkaRebalance.Fields.CONDITIONS, + }, + payload = ErrorCategory.InvalidQueryParameter.class) + @Parameter( + description = FieldFilter.FIELDS_DESCR, + explode = Explode.FALSE, + schema = @Schema( + type = SchemaType.ARRAY, + implementation = String.class, + enumeration = { + KafkaRebalance.Fields.NAME, + KafkaRebalance.Fields.NAMESPACE, + KafkaRebalance.Fields.CREATION_TIMESTAMP, + KafkaRebalance.Fields.STATUS, + KafkaRebalance.Fields.MODE, + KafkaRebalance.Fields.BROKERS, + KafkaRebalance.Fields.GOALS, + KafkaRebalance.Fields.SKIP_HARD_GOAL_CHECK, + KafkaRebalance.Fields.REBALANCE_DISK, + KafkaRebalance.Fields.EXCLUDED_TOPICS, + KafkaRebalance.Fields.CONCURRENT_PARTITION_MOVEMENTS_PER_BROKER, + KafkaRebalance.Fields.CONCURRENT_INTRABROKER_PARTITION_MOVEMENTS, + KafkaRebalance.Fields.CONCURRENT_LEADER_MOVEMENTS, + KafkaRebalance.Fields.REPLICATION_THROTTLE, + KafkaRebalance.Fields.REPLICA_MOVEMENT_STRATEGIES, + KafkaRebalance.Fields.SESSION_ID, + KafkaRebalance.Fields.OPTIMIZATION_RESULT, + KafkaRebalance.Fields.CONDITIONS, + })) + List fields) { + + requestedFields.accept(fields); + + var result = rebalanceService.getRebalance(rebalanceId); + var responseEntity = new KafkaRebalance.RebalanceData(result); + + return Response.ok(responseEntity).build(); + } + @Path("{rebalanceId}") @PATCH @Consumes(MediaType.APPLICATION_JSON) @@ -158,6 +241,8 @@ public Response listRebalances( node = { "data", "id" }, payload = ErrorCategory.InvalidResource.class, validationAppliesTo = ConstraintTarget.PARAMETERS) + @Authorized + @ResourcePrivilege(Privilege.UPDATE) public Response patchRebalance( @Parameter(description = "Cluster identifier") @PathParam("clusterId") diff --git a/api/src/main/java/com/github/streamshub/console/api/RecordsResource.java b/api/src/main/java/com/github/streamshub/console/api/RecordsResource.java index 5e3f6fca0..3bec9b3ab 100644 --- a/api/src/main/java/com/github/streamshub/console/api/RecordsResource.java +++ b/api/src/main/java/com/github/streamshub/console/api/RecordsResource.java @@ -34,11 +34,14 @@ import com.github.streamshub.console.api.model.KafkaRecord; import com.github.streamshub.console.api.model.RecordFilterParams; +import com.github.streamshub.console.api.security.Authorized; +import com.github.streamshub.console.api.security.ResourcePrivilege; import com.github.streamshub.console.api.service.RecordService; import com.github.streamshub.console.api.support.ErrorCategory; import com.github.streamshub.console.api.support.FieldFilter; import com.github.streamshub.console.api.support.KafkaUuid; import com.github.streamshub.console.api.support.StringEnumeration; +import com.github.streamshub.console.config.security.Privilege; @Path("/api/kafkas/{clusterId}/topics/{topicId}/records") @Tag(name = "Kafka Cluster Resources") @@ -71,6 +74,8 @@ public class RecordsResource { @APIResponse(responseCode = "404", ref = "NotFound") @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.LIST) public Response consumeRecords( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -152,6 +157,8 @@ public Response consumeRecords( @APIResponse(responseCode = "404", ref = "NotFound") @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.CREATE) public Response produceRecord( @Parameter(description = "Cluster identifier") @PathParam("clusterId") diff --git a/api/src/main/java/com/github/streamshub/console/api/TopicsResource.java b/api/src/main/java/com/github/streamshub/console/api/TopicsResource.java index 17f3386e0..f52670baf 100644 --- a/api/src/main/java/com/github/streamshub/console/api/TopicsResource.java +++ b/api/src/main/java/com/github/streamshub/console/api/TopicsResource.java @@ -42,6 +42,8 @@ import com.github.streamshub.console.api.model.Topic; import com.github.streamshub.console.api.model.TopicFilterParams; import com.github.streamshub.console.api.model.TopicPatch; +import com.github.streamshub.console.api.security.Authorized; +import com.github.streamshub.console.api.security.ResourcePrivilege; import com.github.streamshub.console.api.service.ConsumerGroupService; import com.github.streamshub.console.api.service.TopicService; import com.github.streamshub.console.api.support.ErrorCategory; @@ -50,6 +52,7 @@ import com.github.streamshub.console.api.support.KafkaUuid; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.api.support.StringEnumeration; +import com.github.streamshub.console.config.security.Privilege; import io.xlate.validation.constraints.Expression; @@ -83,7 +86,8 @@ public class TopicsResource { @APIResponse(responseCode = "201", description = "New topic successfully created", content = @Content(schema = @Schema(implementation = NewTopic.NewTopicDocument.class))) - public CompletionStage createTopic( + // authorization checked by TopicService + public Response createTopic( @Parameter(description = "Cluster identifier") @PathParam("clusterId") String clusterId, @@ -107,18 +111,19 @@ public CompletionStage createTopic( final UriBuilder location = uriInfo.getRequestUriBuilder(); final boolean validateOnly = Boolean.TRUE.equals(topic.meta("validateOnly")); + var entity = new NewTopic.NewTopicDocument(topicService.createTopic(topic.getData().getAttributes(), validateOnly)); - return topicService.createTopic(topic.getData().getAttributes(), validateOnly) - .thenApply(NewTopic.NewTopicDocument::new) - .thenApply(entity -> Response.status(validateOnly ? Status.OK : Status.CREATED) - .entity(entity) - .location(location.path(entity.getData().getId()).build())) - .thenApply(Response.ResponseBuilder::build); + return Response.status(validateOnly ? Status.OK : Status.CREATED) + .entity(entity) + .location(location.path(entity.getData().getId()).build()) + .build(); } @Path("{topicId}") @DELETE @APIResponseSchema(responseCode = "204", value = Void.class) + @Authorized + @ResourcePrivilege(Privilege.DELETE) public CompletionStage deleteTopic( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -138,6 +143,8 @@ public CompletionStage deleteTopic( @APIResponseSchema(Topic.ListResponse.class) @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.LIST) public CompletionStage listTopics( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -221,6 +228,8 @@ public CompletionStage listTopics( @APIResponse(responseCode = "404", ref = "NotFound") @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + @Authorized + @ResourcePrivilege(Privilege.GET) public CompletionStage describeTopic( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -293,6 +302,7 @@ public CompletionStage describeTopic( @APIResponseSchema(ConsumerGroup.ListResponse.class) @APIResponse(responseCode = "500", ref = "ServerError") @APIResponse(responseCode = "504", ref = "ServerTimeout") + // authorization checked by ConsumerGroupService public CompletionStage listTopicConsumerGroups( @Parameter(description = "Cluster identifier") @PathParam("clusterId") @@ -362,6 +372,8 @@ public CompletionStage listTopicConsumerGroups( node = { "data", "id" }, payload = ErrorCategory.InvalidResource.class, validationAppliesTo = ConstraintTarget.PARAMETERS) + @Authorized + @ResourcePrivilege(Privilege.UPDATE) public CompletionStage patchTopic( @Parameter(description = "Cluster identifier") @PathParam("clusterId") diff --git a/api/src/main/java/com/github/streamshub/console/api/errors/client/ForbiddenExceptionHandler.java b/api/src/main/java/com/github/streamshub/console/api/errors/client/ForbiddenExceptionHandler.java new file mode 100644 index 000000000..800249665 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/errors/client/ForbiddenExceptionHandler.java @@ -0,0 +1,34 @@ +package com.github.streamshub.console.api.errors.client; + +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.ws.rs.ForbiddenException; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.ext.Provider; + +import com.github.streamshub.console.api.model.ErrorResponse; +import com.github.streamshub.console.api.support.ErrorCategory; + +@Provider +@ApplicationScoped +public class ForbiddenExceptionHandler extends AbstractClientExceptionHandler { + + public ForbiddenExceptionHandler() { + super(ErrorCategory.NotAuthorized.class, "Insufficient permissions to resource or action", (String) null); + } + + @Override + public boolean handlesException(Throwable thrown) { + return thrown instanceof ForbiddenException; + } + + @Override + public Response toResponse(ForbiddenException exception) { + var responseBuilder = Response.status(category.getHttpStatus()) + .entity(new ErrorResponse(buildErrors(exception))); + + exception.getResponse().getHeaders().forEach((k, v) -> + responseBuilder.header(k, exception.getResponse().getHeaderString(k))); + + return responseBuilder.build(); + } +} \ No newline at end of file diff --git a/api/src/main/java/com/github/streamshub/console/api/errors/client/JsonProcessingExceptionMapper.java b/api/src/main/java/com/github/streamshub/console/api/errors/client/JsonProcessingExceptionMapper.java index 29e5220ad..4a83345e4 100644 --- a/api/src/main/java/com/github/streamshub/console/api/errors/client/JsonProcessingExceptionMapper.java +++ b/api/src/main/java/com/github/streamshub/console/api/errors/client/JsonProcessingExceptionMapper.java @@ -36,7 +36,7 @@ public List buildErrors(JsonProcessingException exception) { } else { error = category.createError("Unable to process JSON", exception, null); } - LOGGER.debugf("error=%s", error); + LOGGER.debugf("error=%s, exception=%s", error, exception.getMessage()); return List.of(error); } diff --git a/api/src/main/java/com/github/streamshub/console/api/model/ConsumerGroup.java b/api/src/main/java/com/github/streamshub/console/api/model/ConsumerGroup.java index 793f0adb7..851f1a1de 100644 --- a/api/src/main/java/com/github/streamshub/console/api/model/ConsumerGroup.java +++ b/api/src/main/java/com/github/streamshub/console/api/model/ConsumerGroup.java @@ -34,7 +34,8 @@ @JsonFilter("fieldFilter") public class ConsumerGroup { - public static final String FIELDS_PARAM = "fields[consumerGroups]"; + public static final String API_TYPE = "consumerGroups"; + public static final String FIELDS_PARAM = "fields[" + API_TYPE + "]"; public static final class Fields { public static final String STATE = "state"; @@ -118,7 +119,7 @@ public ConsumerGroupDocument(ConsumerGroup attributes) { payload = ErrorCategory.InvalidResource.class) @Expression( when = "self.type != null", - value = "self.type == 'consumerGroups'", + value = "self.type == '" + API_TYPE + "'", message = "resource type conflicts with operation", node = "type", payload = ErrorCategory.ResourceConflict.class) @@ -135,7 +136,7 @@ public ConsumerGroupResource(String id, String type, ConsumerGroup attributes) { * Used by list and describe */ public ConsumerGroupResource(ConsumerGroup attributes) { - super(attributes.groupId, "consumerGroups", attributes); + super(attributes.groupId, API_TYPE, attributes); if (attributes.errors != null) { addMeta("errors", attributes.errors); diff --git a/api/src/main/java/com/github/streamshub/console/api/model/KafkaRebalance.java b/api/src/main/java/com/github/streamshub/console/api/model/KafkaRebalance.java index c4cf514a5..3d15c41c8 100644 --- a/api/src/main/java/com/github/streamshub/console/api/model/KafkaRebalance.java +++ b/api/src/main/java/com/github/streamshub/console/api/model/KafkaRebalance.java @@ -98,7 +98,18 @@ MODE, comparing(KafkaRebalance::mode, nullsLast(String::compareTo)), + STATUS + ", " + MODE + ", " + BROKERS + ", " - + GOALS; + + GOALS + ", " + + SKIP_HARD_GOAL_CHECK + ", " + + REBALANCE_DISK + ", " + + EXCLUDED_TOPICS + ", " + + CONCURRENT_PARTITION_MOVEMENTS_PER_BROKER + ", " + + CONCURRENT_INTRABROKER_PARTITION_MOVEMENTS + ", " + + CONCURRENT_LEADER_MOVEMENTS + ", " + + REPLICATION_THROTTLE + ", " + + REPLICA_MOVEMENT_STRATEGIES + ", " + + SESSION_ID + ", " + + OPTIMIZATION_RESULT + ", " + + CONDITIONS; private Fields() { // Prevent instances diff --git a/api/src/main/java/com/github/streamshub/console/api/model/Topic.java b/api/src/main/java/com/github/streamshub/console/api/model/Topic.java index 84ddd26ce..d8d36f276 100644 --- a/api/src/main/java/com/github/streamshub/console/api/model/Topic.java +++ b/api/src/main/java/com/github/streamshub/console/api/model/Topic.java @@ -33,7 +33,8 @@ @Schema(name = "Topic") public class Topic extends RelatableResource { - public static final String FIELDS_PARAM = "fields[topics]"; + public static final String API_TYPE = "topics"; + public static final String FIELDS_PARAM = "fields[" + API_TYPE + "]"; public static final class Fields { public static final String NAME = "name"; @@ -197,7 +198,7 @@ public Integer numPartitions() { return null; } - return partitions.getOptionalPrimary().map(Collection::size).orElse(0); + return partitions.getOptionalPrimary().map(Collection::size).orElse(null); } @Schema(readOnly = true, description = """ @@ -209,6 +210,10 @@ When support for tiered storage (KIP-405) is available, this property may also include the size of remote replica storage. """) public BigInteger getTotalLeaderLogBytes() { + if (partitions == null) { + return null; + } + return partitions.getOptionalPrimary() .map(Collection::stream) .map(p -> p.map(PartitionInfo::leaderLocalStorage) @@ -223,11 +228,11 @@ public BigInteger getTotalLeaderLogBytes() { @JsonFilter("fieldFilter") static class Relationships { @JsonProperty - final DataList consumerGroups = new DataList<>(); + DataList consumerGroups = new DataList<>(); } public Topic(String name, boolean internal, String id) { - super(id, "topics", new Attributes(name, internal), new Relationships()); + super(id, API_TYPE, new Attributes(name, internal), new Relationships()); } public static Topic fromTopicListing(org.apache.kafka.clients.admin.TopicListing listing) { @@ -358,6 +363,10 @@ public DataList consumerGroups() { return relationships.consumerGroups; } + public void consumerGroups(DataList consumerGroups) { + relationships.consumerGroups = consumerGroups; + } + public boolean partitionsOnline() { return attributes.partitions.getOptionalPrimary() .map(Collection::stream) diff --git a/api/src/main/java/com/github/streamshub/console/api/security/AuthorizationInterceptor.java b/api/src/main/java/com/github/streamshub/console/api/security/AuthorizationInterceptor.java new file mode 100644 index 000000000..b8b3e65b7 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/AuthorizationInterceptor.java @@ -0,0 +1,175 @@ +package com.github.streamshub.console.api.security; + +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.UnaryOperator; + +import jakarta.annotation.Priority; +import jakarta.enterprise.context.Dependent; +import jakarta.inject.Inject; +import jakarta.interceptor.AroundInvoke; +import jakarta.interceptor.Interceptor; +import jakarta.interceptor.InvocationContext; +import jakarta.ws.rs.ForbiddenException; +import jakarta.ws.rs.NotFoundException; +import jakarta.ws.rs.core.PathSegment; +import jakarta.ws.rs.core.UriInfo; + +import org.apache.kafka.common.errors.UnknownTopicIdException; +import org.jboss.logging.Logger; + +import com.github.streamshub.console.api.ClientFactory; +import com.github.streamshub.console.api.service.TopicDescribeService; +import com.github.streamshub.console.api.support.KafkaContext; +import com.github.streamshub.console.config.KafkaClusterConfig; +import com.github.streamshub.console.config.security.ResourceTypes; + +import io.quarkus.security.identity.SecurityIdentity; + +@Authorized +@Priority(1) +@Interceptor +@Dependent +public class AuthorizationInterceptor { + + @Inject + Logger logger; + + @Inject + Map contexts; + + @Inject + SecurityIdentity securityIdentity; + + @Inject + UriInfo requestUri; + + @Inject + TopicDescribeService topicDescribe; + + @AroundInvoke + Object authorize(InvocationContext context) throws Exception { + ResourcePrivilege authz = context.getMethod().getAnnotation(ResourcePrivilege.class); + StringBuilder resource = new StringBuilder(); + List resourceNames = new ArrayList<>(1); + + setResource(resource, resourceNames); + + var requiredPermission = new ConsolePermission(resource.toString(), resourceNames, authz.value()); + + boolean allow = securityIdentity.checkPermission(requiredPermission) + .subscribeAsCompletionStage() + .get(); + + if (!allow) { + throw new ForbiddenException("Access denied"); + } + + return context.proceed(); + } + + /** + * Pull the resource type and resource name from the request URI path to be used + * to determine authorization. The path is transformed as follows. + * + *

+ * Given a resource path `/api/kafkas/xyz/topics/abc/records`: + * + *

    + *
  1. Skip the leading `/api` segment + *
  2. Append segments `kafkas/xyz/topics` to the resource type + *
  3. Use segment `abc` as the resource name + *
  4. Append segment `/records` to the resource type + *
+ * + *

+ * For a principal to be authorized to access the resource, they must be a member + * of a role with access to `kafkas` `xyz` (named or all `kafkas`), and further + * with access to resource `topics/records` `abc` (named or all `topics/records`). + * + * @param resource target resource type builder + * @param resourceNames collection to hold the resource name + */ + private void setResource(StringBuilder resource, List resourceNames) { + var segments = requestUri.getPathSegments(); + var segmentCount = segments.size(); + + // skip the first segment `/api` + String kafkas = segments.get(1).getPath(); + resource.append(kafkas); + + if (segmentCount > 2) { + String kafkaId = segments.get(2).getPath(); + String kafkaName = Optional.ofNullable(contexts.get(kafkaId)) + .map(KafkaContext::clusterConfig) + .map(KafkaClusterConfig::getName) + .orElseThrow(() -> ClientFactory.NO_SUCH_KAFKA.apply(kafkaId)); + + /* + * For URLs like `/api/kafkas/123`, the Kafka ID is the resource name + * and is configured at the top-level `security` key in the console's + * configuration. Otherwise, the Kafka ID is appended to the resource + * path and the configuration originates from the Kafka-level `security` + * key, scoped to the Kafka cluster under which it is specified. + */ + + if (segmentCount > 3) { + resource.append('/'); + resource.append(kafkaName); + } else { + resourceNames.add(kafkaName); + } + } + + setKafkaResource(resource, resourceNames, segments); + } + + private void setKafkaResource(StringBuilder resource, List resourceNames, List segments) { + int segmentCount = segments.size(); + UnaryOperator converter = UnaryOperator.identity(); + + for (int s = 3; s < segmentCount; s++) { + String segment = segments.get(s).getPath(); + + if (s == 4) { + resourceNames.add(converter.apply(segment)); + } else { + if (s == 3) { + if (ResourceTypes.Kafka.TOPICS.value().equals(segment)) { + converter = this::topicName; + } else if (ResourceTypes.Kafka.REBALANCES.value().equals(segment)) { + converter = this::rebalanceName; + } + } + resource.append('/'); + resource.append(segment); + } + } + } + + /** + * Attempt to cross-reference the topic ID to the topic name which is used to + * configure topic-level authorization. + */ + private String topicName(String topicId) { + return topicDescribe.topicNameForId(topicId).toCompletableFuture().join() + .orElseThrow(() -> new UnknownTopicIdException("No such topic: " + topicId)); + } + + /** + * Extract the Kafka Rebalance name from the encoded rebalanceId. + */ + private String rebalanceName(String rebalanceId) { + String decodedId = new String(Base64.getUrlDecoder().decode(rebalanceId)); + String[] idElements = decodedId.split("/"); + + if (idElements.length != 2) { + throw new NotFoundException("No such rebalance: " + rebalanceId); + } + + return idElements[1]; + } +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/Authorized.java b/api/src/main/java/com/github/streamshub/console/api/security/Authorized.java new file mode 100644 index 000000000..4fc482335 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/Authorized.java @@ -0,0 +1,18 @@ +package com.github.streamshub.console.api.security; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import jakarta.interceptor.InterceptorBinding; + +/** + * Binding annotation to mark methods that should be intercepted by the + * {@link AuthorizationInterceptor}. + */ +@InterceptorBinding +@Retention(RetentionPolicy.RUNTIME) +@Target({ ElementType.TYPE, ElementType.METHOD }) +public @interface Authorized { +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/ConsoleAuthenticationMechanism.java b/api/src/main/java/com/github/streamshub/console/api/security/ConsoleAuthenticationMechanism.java new file mode 100644 index 000000000..853c2b76b --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/ConsoleAuthenticationMechanism.java @@ -0,0 +1,489 @@ +package com.github.streamshub.console.api.security; + +import java.io.IOException; +import java.security.Permission; +import java.security.Principal; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.inject.Alternative; +import jakarta.inject.Inject; +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.MediaType; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule; +import org.eclipse.microprofile.jwt.JsonWebToken; +import org.jboss.logging.Logger; +import org.jose4j.jwt.JwtClaims; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.streamshub.console.api.ClientFactory; +import com.github.streamshub.console.api.model.Error; +import com.github.streamshub.console.api.model.ErrorResponse; +import com.github.streamshub.console.api.support.ErrorCategory; +import com.github.streamshub.console.api.support.KafkaContext; +import com.github.streamshub.console.config.ConsoleConfig; +import com.github.streamshub.console.config.security.Audit; +import com.github.streamshub.console.config.security.AuditConfig; +import com.github.streamshub.console.config.security.Privilege; +import com.github.streamshub.console.config.security.SecurityConfig; +import com.github.streamshub.console.config.security.SubjectConfig; + +import io.quarkus.oidc.runtime.OidcAuthenticationMechanism; +import io.quarkus.oidc.runtime.OidcJwtCallerPrincipal; +import io.quarkus.security.AuthenticationFailedException; +import io.quarkus.security.credential.Credential; +import io.quarkus.security.identity.IdentityProviderManager; +import io.quarkus.security.identity.SecurityIdentity; +import io.quarkus.security.identity.request.AnonymousAuthenticationRequest; +import io.quarkus.security.identity.request.AuthenticationRequest; +import io.quarkus.security.identity.request.TokenAuthenticationRequest; +import io.quarkus.security.identity.request.UsernamePasswordAuthenticationRequest; +import io.quarkus.security.runtime.QuarkusPrincipal; +import io.quarkus.security.runtime.QuarkusSecurityIdentity; +import io.quarkus.vertx.http.runtime.security.ChallengeData; +import io.quarkus.vertx.http.runtime.security.HttpAuthenticationMechanism; +import io.smallrye.mutiny.Uni; +import io.vertx.core.MultiMap; +import io.vertx.ext.web.RoutingContext; + +@Alternative +@Priority(1) +@ApplicationScoped +public class ConsoleAuthenticationMechanism implements HttpAuthenticationMechanism { + + public static final String OAUTHBEARER = OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; + public static final String PLAIN = "PLAIN"; + public static final String SCRAM_SHA256 = "SCRAM-SHA-256"; + public static final String SCRAM_SHA512 = "SCRAM-SHA-512"; + + private static final String BEARER = "Bearer "; + private static final String BASIC = "Basic "; + + private static final SecurityIdentity ANONYMOUS = QuarkusSecurityIdentity.builder() + .setAnonymous(true) + .setPrincipal(new QuarkusPrincipal("ANONYMOUS")) + .build(); + + @Inject + Logger log; + + @Inject + ObjectMapper mapper; + + @Inject + ConsoleConfig consoleConfig; + + @Inject + Map contexts; + + @Inject + OidcAuthenticationMechanism oidc; + + boolean oidcEnabled() { + return Objects.nonNull(consoleConfig.getSecurity().getOidc()); + } + + @Override + public Uni authenticate(RoutingContext context, IdentityProviderManager identityProviderManager) { + if (oidcEnabled()) { + return oidc.authenticate(context, identityProviderManager) + .map(identity -> augmentIdentity(context, identity)) + .onFailure().invoke(this::maybeLogAuthenticationFailure); + } + + String clusterId = getClusterId(context); + + if (clusterId == null) { + return Uni.createFrom().item(createAnonymousIdentity(null)); + } + + var ctx = contexts.get(clusterId); + + if (ctx == null) { + // No Kafka context to establish identity, become anonymous + return Uni.createFrom().item(createAnonymousIdentity(null)); + } + + String saslMechanism = ctx.saslMechanism(Admin.class); + + if (ctx.admin() != null || saslMechanism.isEmpty()) { + // Admin credentials already given or there is no SASL authentication needed + return Uni.createFrom().item(createAnonymousIdentity(ctx)); + } + + var identity = createIdentity(ctx, context.request().headers(), saslMechanism); + + if (identity != null) { + return Uni.createFrom().item(identity); + } + + return Uni.createFrom().failure(new AuthenticationFailedException()); + } + + @Override + public Uni sendChallenge(RoutingContext context) { + return getChallenge(context).map(challengeData -> { + if (challengeData == null) { + return false; + } + + var response = context.response(); + response.setStatusCode(challengeData.status); + + if (challengeData.headerName != null) { + response.headers().set(challengeData.headerName, challengeData.headerContent); + } + + response.headers().set(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON); + + try { + response.send(mapper.writeValueAsString(((PayloadChallengeData) challengeData).payload)); + } catch (IOException e) { + log.warnf(e, "Failed to serialize challenge response body: %s", e.getMessage()); + } + + return true; + }); + } + + @Override + public Uni getChallenge(RoutingContext context) { + if (oidcEnabled()) { + return oidc.getChallenge(context) + .map(data -> { + var category = ErrorCategory.get(ErrorCategory.NotAuthenticated.class); + Error error = category.createError("Authentication credentials missing or invalid", null, null); + var responseBody = new ErrorResponse(List.of(error)); + return new PayloadChallengeData(data, responseBody); + }); + } + + String clusterId = getClusterId(context); + + if (clusterId == null) { + return Uni.createFrom().nullItem(); + } + + var ctx = contexts.get(clusterId); + + if (ctx == null) { + return Uni.createFrom().nullItem(); + } + + String saslMechanism = ctx.saslMechanism(Admin.class); + String scheme = getAuthorizationScheme(saslMechanism); + ChallengeData challenge; + + if (scheme != null) { + var category = ErrorCategory.get(ErrorCategory.NotAuthenticated.class); + Error error = category.createError("Authentication credentials missing or invalid", null, null); + var responseBody = new ErrorResponse(List.of(error)); + challenge = new PayloadChallengeData(401, "WWW-Authenticate", scheme, responseBody); + } else { + log.warnf("Access not permitted to cluster %s with unknown SASL mechanism '%s'", + clusterId, saslMechanism); + var category = ErrorCategory.get(ErrorCategory.ResourceNotFound.class); + Error error = category.createError(ClientFactory.NO_SUCH_KAFKA_MESSAGE.formatted(clusterId), null, null); + var responseBody = new ErrorResponse(List.of(error)); + challenge = new PayloadChallengeData(404, null, null, responseBody); + } + + return Uni.createFrom().item(challenge); + } + + @Override + public Set> getCredentialTypes() { + if (oidcEnabled()) { + return oidc.getCredentialTypes(); + } + + return Set.of( + AnonymousAuthenticationRequest.class, + TokenAuthenticationRequest.class, + UsernamePasswordAuthenticationRequest.class + ); + } + + private String getClusterId(RoutingContext context) { + Pattern p = Pattern.compile("/api/kafkas/([^/]+)(?:/.*)?"); + Matcher m = p.matcher(context.normalizedPath()); + if (m.matches()) { + return m.group(1); + } + return null; + } + + private String getAuthorizationScheme(String saslMechanism) { + switch (saslMechanism) { + case OAUTHBEARER: + return BEARER.trim(); + case PLAIN, SCRAM_SHA256, SCRAM_SHA512: + return BASIC.trim(); + default: + return null; + } + } + + private SecurityIdentity createAnonymousIdentity(KafkaContext ctx) { + return createIdentity(ctx, ANONYMOUS); + } + + private SecurityIdentity augmentIdentity(RoutingContext context, SecurityIdentity identity) { + if (identity != null) { + String clusterId = getClusterId(context); + var ctx = clusterId != null ? contexts.get(clusterId) : null; + return createIdentity(ctx, identity); + } + throw new AuthenticationFailedException(); + } + + private SecurityIdentity createIdentity(KafkaContext ctx, SecurityIdentity source) { + var builder = QuarkusSecurityIdentity.builder(source); + addRoleChecker(ctx, builder, source.getPrincipal()); + return builder.build(); + } + + private SecurityIdentity createIdentity(KafkaContext ctx, MultiMap headers, String saslMechanism) { + switch (saslMechanism) { + case OAUTHBEARER: + return createOAuthIdentity(ctx, headers); + case PLAIN: + return createBasicIdentity(ctx, headers, SaslJaasConfigCredential::forPlainLogin); + case SCRAM_SHA256, SCRAM_SHA512: + return createBasicIdentity(ctx, headers, SaslJaasConfigCredential::forScramLogin); + default: + return null; + } + } + + private SecurityIdentity createOAuthIdentity(KafkaContext ctx, MultiMap headers) { + return getAuthorization(headers, BEARER) + .map(accessToken -> { + var builder = QuarkusSecurityIdentity.builder(); + builder.addCredential(SaslJaasConfigCredential.forOAuthLogin(accessToken)); + Principal principal; + + try { + var claims = JwtClaims.parse(accessToken); + principal = new OidcJwtCallerPrincipal(claims, null); + } catch (Exception e) { + log.infof("JWT access token could not be parsed: %s", e.getMessage()); + principal = new QuarkusPrincipal("UNKNOWN"); + } + + builder.setPrincipal(principal); + addRoleChecker(ctx, builder, principal); + return builder.build(); + }) + .orElse(null); + } + + private SecurityIdentity createBasicIdentity(KafkaContext ctx, MultiMap headers, BiFunction credentialBuilder) { + return getBasicAuthentication(headers) + .map(userpass -> { + var builder = QuarkusSecurityIdentity.builder(); + var principal = new QuarkusPrincipal(userpass[0]); + builder.addCredential(credentialBuilder.apply(userpass[0], userpass[1])); + builder.setPrincipal(principal); + addRoleChecker(ctx, builder, principal); + return builder.build(); + }) + .orElse(null); + } + + private void addRoleChecker(KafkaContext ctx, QuarkusSecurityIdentity.Builder builder, Principal principal) { + var globalSecurity = consoleConfig.getSecurity(); + Optional clusterSecurity = ctx != null + ? Optional.of(ctx.clusterConfig().getSecurity()) + : Optional.empty(); + + var auditRules = mergeAuditRules( + getAuditRules(globalSecurity.getAudit(), ""), + clusterSecurity.map(c -> getAuditRules(c.getAudit(), "kafkas/" + ctx.clusterConfig().getName() + '/')) + .orElseGet(Collections::emptyMap) + ); + + if (globalSecurity.getRoles().isEmpty() + && clusterSecurity.map(cs -> cs.getRoles().isEmpty()).orElse(true)) { + // No roles are defined - allow everything + builder.addPermissionChecker(requiredPermission -> { + auditLog(principal, requiredPermission, true, auditRules.get(requiredPermission)); + return Uni.createFrom().item(true); + }); + + return; + } + + Stream globalSubjects = globalSecurity.getSubjects().stream(); + Stream clusterSubjects = clusterSecurity.map(cs -> cs.getSubjects().stream()) + .orElseGet(Stream::empty); + + List roleNames = Stream.concat(clusterSubjects, globalSubjects) + .filter(sub -> matchesPrincipal(sub, principal)) + .flatMap(sub -> sub.getRoleNames().stream()) + .distinct() + .toList(); + + Stream globalPermissions = getPermissions(globalSecurity, roleNames, ""); + Stream clusterPermissions = clusterSecurity + .map(cs -> getPermissions(cs, roleNames, "kafkas/" + ctx.clusterConfig().getName() + '/')) + .orElseGet(Stream::empty); + + List possessedPermissions = Stream.concat(globalPermissions, clusterPermissions).toList(); + + builder.addPermissionChecker(requiredPermission -> { + boolean allowed = possessedPermissions + .stream() + .anyMatch(possessed -> possessed.implies(requiredPermission)); + + auditLog(principal, requiredPermission, allowed, auditRules.get(requiredPermission)); + return Uni.createFrom().item(allowed); + }); + } + + private void auditLog(Principal principal, Permission required, boolean allowed, Audit audit) { + if (audit != null && audit.logResult(allowed)) { + log.infof("%s %s %s", principal.getName(), allowed ? "allowed" : "denied", required); + } else { + log.tracef("%s %s %s", principal.getName(), allowed ? "allowed" : "denied", required); + } + } + + private void maybeLogAuthenticationFailure(Throwable t) { + if (t.getCause() instanceof org.jose4j.jwt.consumer.InvalidJwtException ije) { + log.debugf("Invalid JWT: %s", ije.getErrorDetails()); + } + } + + private boolean matchesPrincipal(SubjectConfig subjectConfig, Principal principal) { + String claimName = subjectConfig.getClaim(); + List include = subjectConfig.getInclude(); + + if (claimName == null) { + return include.contains(principal.getName()); + } else if (principal instanceof JsonWebToken jwt) { + Object claim = jwt.getClaim(claimName); + + if (claim instanceof String) { + return include.contains(claim); + } + + // array claim, like set/list of groups + if (claim instanceof Collection values) { + for (Object value : values) { + if (include.contains(value)) { + return true; + } + } + } + } + + return false; + } + + private Stream getPermissions(SecurityConfig security, Collection roleNames, String resourcePrefix) { + return security.getRoles() + .stream() + .filter(role -> roleNames.contains(role.getName())) + .flatMap(role -> role.getRules().stream()) + .flatMap(rule -> { + List rulePermissions = new ArrayList<>(); + Privilege[] actions = rule.getPrivileges().toArray(Privilege[]::new); + + for (var resource : rule.getResources()) { + rulePermissions.add(new ConsolePermission( + resourcePrefix + resource, + rule.getResourceNames(), + actions + )); + } + + return rulePermissions.stream(); + }); + } + + private Map mergeAuditRules(Map global, Map cluster) { + return Stream.concat(global.entrySet().stream(), cluster.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + private Map getAuditRules(List audits, String resourcePrefix) { + return audits.stream().flatMap(rule -> { + Map auditRules = new HashMap<>(); + Set actions = rule.getPrivileges().stream().flatMap(p -> p.expand().stream()).collect(Collectors.toSet()); + + for (var action : actions) { + for (var resource : rule.getResources()) { + if (rule.getResourceNames().isEmpty()) { + auditRules.put( + new ConsolePermission( + resourcePrefix + resource, + Collections.emptySet(), + action), + rule.getDecision()); + } else { + for (String name : rule.getResourceNames()) { + auditRules.put( + new ConsolePermission( + resourcePrefix + resource, + Collections.singleton(name), + action), + rule.getDecision()); + } + } + } + } + + return auditRules.entrySet().stream(); + }).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + private Optional getBasicAuthentication(MultiMap headers) { + return getAuthorization(headers, BASIC) + .map(Base64.getDecoder()::decode) + .map(String::new) + .filter(authn -> authn.indexOf(':') >= 0) + .map(authn -> new String[] { + authn.substring(0, authn.indexOf(':')), + authn.substring(authn.indexOf(':') + 1) + }) + .filter(userPass -> !userPass[0].isEmpty() && !userPass[1].isEmpty()); + } + + private Optional getAuthorization(MultiMap headers, String scheme) { + return Optional.ofNullable(headers.get(HttpHeaders.AUTHORIZATION)) + .filter(header -> header.regionMatches(true, 0, scheme, 0, scheme.length())) + .map(header -> header.substring(scheme.length())); + } + + private static class PayloadChallengeData extends ChallengeData { + public final Object payload; + + public PayloadChallengeData(int status, CharSequence headerName, String headerContent, Object payload) { + super(status, headerName, headerContent); + this.payload = payload; + } + + public PayloadChallengeData(ChallengeData data, Object payload) { + super(data.status, data.headerName, data.headerContent); + this.payload = payload; + } + } +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/ConsolePermission.java b/api/src/main/java/com/github/streamshub/console/api/security/ConsolePermission.java new file mode 100644 index 000000000..3bbc9aae4 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/ConsolePermission.java @@ -0,0 +1,169 @@ +package com.github.streamshub.console.api.security; + +import java.security.Permission; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +import com.github.streamshub.console.config.security.Privilege; + +import static java.util.function.Predicate.not; + +public class ConsolePermission extends Permission { + + private static final long serialVersionUID = 1L; + public static final String ACTIONS_SEPARATOR = ","; + + private String resource; + private Collection resourceNames; + private final Set actions; + + public ConsolePermission(String resource, Privilege... actions) { + super("console"); + this.resource = resource; + this.resourceNames = Collections.emptySet(); + this.actions = checkActions(actions); + } + + public ConsolePermission(String resource, Collection resourceNames, Privilege... actions) { + super("console"); + this.resource = resource; + this.resourceNames = resourceNames; + this.actions = checkActions(actions); + } + + private static Set checkActions(Privilege[] actions) { + Objects.requireNonNull(actions); + + if (actions.length == 0) { + throw new IllegalArgumentException("actions must not be zero length"); + } + + Set validActions = new HashSet<>(actions.length, 1); + + for (Privilege action : actions) { + validActions.add(Objects.requireNonNull(action)); + } + + return Collections.unmodifiableSet(validActions); + } + + ConsolePermission resourceName(String resourceName) { + this.resourceNames = Collections.singleton(resourceName); + return this; + } + + @Override + public boolean implies(Permission other) { + if (other instanceof ConsolePermission requiredPermission) { + if (!getName().equals(requiredPermission.getName())) { + return false; + } + + return implies(requiredPermission); + } else { + return false; + } + } + + boolean implies(ConsolePermission requiredPermission) { + if (resourceDenied(requiredPermission)) { + return false; + } + + if (actions.contains(Privilege.ALL)) { + // all actions possessed + return true; + } + + for (Privilege action : requiredPermission.actions) { + if (actions.contains(action)) { + // has at least one of required actions + return true; + } + } + + return false; + } + + boolean resourceDenied(ConsolePermission requiredPermission) { + /* + * The action requires a permission unrelated to this configured + * permission. + * E.g. consumerGroups versus topics + */ + if (!requiredPermission.resource.equals(resource)) { + return true; + } + + if (resourceNames.isEmpty()) { + /* + * Configuration does not specify any resource names, so + * access to any is allowed. + */ + return false; + } + + if (requiredPermission.resourceNames.isEmpty()) { + /* + * Configuration specifies named resources, but this request + * has no resource name. I.e., the request is for an index/list + * end point. The permission is granted here, but individual + * resources in the list response may be filtered later. + */ + return false; + } + + /* + * Deny when any of the required names are not given in configuration. + */ + return requiredPermission.resourceNames.stream().anyMatch(not(this::matchesResourceName)); + } + + boolean matchesResourceName(String requiredName) { + if (resourceNames.contains(requiredName)) { + return true; + } + + return resourceNames.stream() + .filter(n -> n.endsWith("*")) + .map(n -> n.substring(0, n.length() - 1)) + .anyMatch(requiredName::startsWith); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (!(obj instanceof ConsolePermission other)) { + return false; + } + + return getName().equals(other.getName()) + && resource.equals(other.resource) + && actions.equals(other.actions); + } + + @Override + public int hashCode() { + return Objects.hash(getName(), resource, actions); + } + + @Override + public String toString() { + return getName() + ":" + resource + ":" + resourceNames + ":" + actions; + } + + /** + * @return null if no actions were specified, or actions joined together with the {@link #ACTIONS_SEPARATOR} + */ + @Override + public String getActions() { + return actions.isEmpty() ? null : actions.stream().map(Enum::name).collect(Collectors.joining(ACTIONS_SEPARATOR)); + } +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/OidcTenantConfigResolver.java b/api/src/main/java/com/github/streamshub/console/api/security/OidcTenantConfigResolver.java new file mode 100644 index 000000000..f91189fa0 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/OidcTenantConfigResolver.java @@ -0,0 +1,51 @@ +package com.github.streamshub.console.api.security; + +import java.util.List; + +import jakarta.annotation.PostConstruct; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import com.github.streamshub.console.config.ConsoleConfig; + +import io.quarkus.oidc.OidcRequestContext; +import io.quarkus.oidc.OidcTenantConfig; +import io.quarkus.oidc.TenantConfigResolver; +import io.smallrye.mutiny.Uni; +import io.vertx.ext.web.RoutingContext; + +/** + * This class is discovered and used by the Quarkus OIDC framework. The purpose + * is to create an OIDC tenant from the ConsoleConfig (sourced from YAML) that + * is provided to the console by the user directly or via the operator. + */ +@ApplicationScoped +public class OidcTenantConfigResolver implements TenantConfigResolver { + + @Inject + ConsoleConfig consoleConfig; + + OidcTenantConfig oidcConfig; + + @PostConstruct + void initialize() { + oidcConfig = new OidcTenantConfig(); + var oidc = consoleConfig.getSecurity().getOidc(); + + oidcConfig.setTenantId(oidc.getTenantId()); + oidcConfig.setDiscoveryEnabled(true); + oidcConfig.setAuthServerUrl(oidc.getAuthServerUrl()); + oidcConfig.setRoles(OidcTenantConfig.Roles.fromClaimPath(List.of("groups"))); + + if (oidc.getIssuer() != null) { + oidcConfig.getToken().setIssuer(oidc.getIssuer()); + } + } + + @Override + public Uni resolve(RoutingContext routingContext, + OidcRequestContext requestContext) { + return Uni.createFrom().item(oidcConfig); + } + +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/PermissionService.java b/api/src/main/java/com/github/streamshub/console/api/security/PermissionService.java new file mode 100644 index 000000000..711044777 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/PermissionService.java @@ -0,0 +1,69 @@ +package com.github.streamshub.console.api.security; + +import java.util.List; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import jakarta.enterprise.context.RequestScoped; +import jakarta.inject.Inject; +import jakarta.ws.rs.ForbiddenException; + +import com.github.streamshub.console.api.support.KafkaContext; +import com.github.streamshub.console.config.security.Privilege; +import com.github.streamshub.console.config.security.ResourceTypes; + +import io.quarkus.security.identity.SecurityIdentity; + +@RequestScoped +public class PermissionService { + + private static final Set KAFKA_SUBRESOURCES = Stream.of(ResourceTypes.Kafka.values()) + .map(v -> v.value()) + .collect(Collectors.toSet()); + + @Inject + SecurityIdentity securityIdentity; + + @Inject + KafkaContext kafkaContext; + + private String resolveResource(String resource) { + if (KAFKA_SUBRESOURCES.contains(resource)) { + resource = "kafkas/" + kafkaContext.clusterConfig().getName() + '/' + resource; + } + return resource; + } + + private boolean checkPermission(ConsolePermission required) { + return securityIdentity.checkPermission(required) + .subscribeAsCompletionStage() + .join(); + } + + public Predicate permitted(String resource, Privilege privilege, Function name) { + ConsolePermission required = new ConsolePermission(resolveResource(resource), privilege); + + return (T item) -> { + required.resourceName(name.apply(item)); + return checkPermission(required); + }; + } + + public boolean permitted(String resource, Privilege privilege, String name) { + return checkPermission(new ConsolePermission(resolveResource(resource), List.of(name), privilege)); + } + + public void assertPermitted(String resource, Privilege privilege, String name) { + if (!permitted(resource, privilege, name)) { + throw forbidden(resource, privilege, name); + } + } + + public ForbiddenException forbidden(String resource, Privilege privilege, String name) { + return new ForbiddenException("Access denied: resource={%s} privilege:{%s}, resourceName:{%s}" + .formatted(resource, privilege, name)); + } +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/ResourcePrivilege.java b/api/src/main/java/com/github/streamshub/console/api/security/ResourcePrivilege.java new file mode 100644 index 000000000..da592bb32 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/ResourcePrivilege.java @@ -0,0 +1,20 @@ +package com.github.streamshub.console.api.security; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import com.github.streamshub.console.config.security.Privilege; + +/** + * Method annotation used by the {@link AuthorizationInterceptor} to declare + * the privilege a principal must be granted to execute the annotated method. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +public @interface ResourcePrivilege { + + Privilege value() default Privilege.ALL; + +} diff --git a/api/src/main/java/com/github/streamshub/console/api/security/SaslJaasConfigCredential.java b/api/src/main/java/com/github/streamshub/console/api/security/SaslJaasConfigCredential.java new file mode 100644 index 000000000..be87105b5 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/security/SaslJaasConfigCredential.java @@ -0,0 +1,40 @@ +package com.github.streamshub.console.api.security; + +import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule; +import org.apache.kafka.common.security.plain.PlainLoginModule; +import org.apache.kafka.common.security.scram.ScramLoginModule; + +import io.quarkus.security.credential.Credential; + +public class SaslJaasConfigCredential implements Credential { + + private static final String SASL_OAUTH_CONFIG_TEMPLATE = OAuthBearerLoginModule.class.getName() + + " required" + + " oauth.access.token=\"%s\" ;"; + + private static final String BASIC_TEMPLATE = "%s required username=\"%%s\" password=\"%%s\" ;"; + private static final String SASL_PLAIN_CONFIG_TEMPLATE = BASIC_TEMPLATE.formatted(PlainLoginModule.class.getName()); + private static final String SASL_SCRAM_CONFIG_TEMPLATE = BASIC_TEMPLATE.formatted(ScramLoginModule.class.getName()); + + public static SaslJaasConfigCredential forOAuthLogin(String accessToken) { + return new SaslJaasConfigCredential(SASL_OAUTH_CONFIG_TEMPLATE.formatted(accessToken)); + } + + public static SaslJaasConfigCredential forPlainLogin(String username, String password) { + return new SaslJaasConfigCredential(SASL_PLAIN_CONFIG_TEMPLATE.formatted(username, password)); + } + + public static SaslJaasConfigCredential forScramLogin(String username, String password) { + return new SaslJaasConfigCredential(SASL_SCRAM_CONFIG_TEMPLATE.formatted(username, password)); + } + + private final String value; + + private SaslJaasConfigCredential(String value) { + this.value = value; + } + + public String value() { + return value; + } +} diff --git a/api/src/main/java/com/github/streamshub/console/api/service/ConsumerGroupService.java b/api/src/main/java/com/github/streamshub/console/api/service/ConsumerGroupService.java index 075c81e10..2b068d76b 100644 --- a/api/src/main/java/com/github/streamshub/console/api/service/ConsumerGroupService.java +++ b/api/src/main/java/com/github/streamshub/console/api/service/ConsumerGroupService.java @@ -51,6 +51,7 @@ import com.github.streamshub.console.api.model.PartitionId; import com.github.streamshub.console.api.model.PartitionInfo; import com.github.streamshub.console.api.model.Topic; +import com.github.streamshub.console.api.security.PermissionService; import com.github.streamshub.console.api.support.ConsumerGroupValidation; import com.github.streamshub.console.api.support.FetchFilterPredicate; import com.github.streamshub.console.api.support.KafkaContext; @@ -58,6 +59,7 @@ import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.api.support.UnknownTopicIdPatch; import com.github.streamshub.console.api.support.ValidationProxy; +import com.github.streamshub.console.config.security.Privilege; @ApplicationScoped public class ConsumerGroupService { @@ -88,7 +90,10 @@ public class ConsumerGroupService { KafkaContext kafkaContext; @Inject - TopicService topicService; + PermissionService permissionService; + + @Inject + TopicDescribeService topicService; @Inject ValidationProxy validationService; @@ -111,7 +116,10 @@ public CompletionStage> listConsumerGroups(String topicId, L .exceptionally(error -> { throw (RuntimeException) UnknownTopicIdPatch.apply(error, CompletionException::new); }) - .thenComposeAsync(unused -> listConsumerGroupMembership(List.of(topicId)), asyncExec) + .thenComposeAsync(topic -> { + permissionService.assertPermitted(Topic.API_TYPE, Privilege.GET, topic.name()); + return listConsumerGroupMembership(List.of(topicId)); + }, asyncExec) .thenComposeAsync(topicGroups -> { if (topicGroups.containsKey(topicId)) { return listConsumerGroups(topicGroups.get(topicId), includes, listSupport); @@ -120,7 +128,9 @@ public CompletionStage> listConsumerGroups(String topicId, L }, asyncExec); } - CompletionStage> listConsumerGroups(List groupIds, List includes, ListRequestContext listSupport) { + private CompletionStage> listConsumerGroups(List groupIds, + List includes, ListRequestContext listSupport) { + Admin adminClient = kafkaContext.admin(); Set states = listSupport.filters() @@ -142,11 +152,12 @@ CompletionStage> listConsumerGroups(List groupIds, L .inStates(states)) .valid() .toCompletionStage() - .thenApply(groups -> groups.stream() + .thenApplyAsync(groups -> groups.stream() .filter(group -> groupIds.isEmpty() || groupIds.contains(group.groupId())) - .map(ConsumerGroup::fromKafkaModel) - .toList()) - .thenApply(list -> list.stream() + .filter(permissionService.permitted(ConsumerGroup.API_TYPE, Privilege.LIST, ConsumerGroupListing::groupId)) + .map(ConsumerGroup::fromKafkaModel), + threadContext.currentContextExecutor()) + .thenApply(groups -> groups .filter(listSupport) .map(listSupport::tally) .filter(listSupport::betweenCursors) @@ -154,7 +165,9 @@ CompletionStage> listConsumerGroups(List groupIds, L .dropWhile(listSupport::beforePageBegin) .takeWhile(listSupport::pageCapacityAvailable) .toList()) - .thenCompose(groups -> augmentList(adminClient, groups, includes)); + .thenComposeAsync( + groups -> augmentList(adminClient, groups, includes), + threadContext.currentContextExecutor()); } public CompletionStage describeConsumerGroup(String requestGroupId, List includes) { @@ -162,7 +175,9 @@ public CompletionStage describeConsumerGroup(String requestGroupI String groupId = preprocessGroupId(requestGroupId); return assertConsumerGroupExists(adminClient, groupId) - .thenCompose(nothing -> describeConsumerGroups(adminClient, List.of(groupId), includes)) + .thenComposeAsync( + nothing -> describeConsumerGroups(adminClient, List.of(groupId), includes), + threadContext.currentContextExecutor()) .thenApply(groups -> groups.get(groupId)) .thenApply(result -> result.getOrThrow(CompletionException::new)); } @@ -174,13 +189,18 @@ public CompletionStage>> listConsumerGroupMembership(Co .inStates(Set.of( ConsumerGroupState.STABLE, ConsumerGroupState.PREPARING_REBALANCE, - ConsumerGroupState.COMPLETING_REBALANCE))) + ConsumerGroupState.COMPLETING_REBALANCE, + ConsumerGroupState.EMPTY))) .valid() .toCompletionStage() - .thenApply(groups -> groups.stream().map(ConsumerGroup::fromKafkaModel).toList()) - .thenCompose(groups -> augmentList(adminClient, groups, List.of( + .thenApplyAsync(groups -> groups.stream() + .filter(permissionService.permitted(ConsumerGroup.API_TYPE, Privilege.LIST, ConsumerGroupListing::groupId)) + .map(ConsumerGroup::fromKafkaModel).toList(), + threadContext.currentContextExecutor()) + .thenComposeAsync(groups -> augmentList(adminClient, groups, List.of( ConsumerGroup.Fields.MEMBERS, - ConsumerGroup.Fields.OFFSETS))) + ConsumerGroup.Fields.OFFSETS)), + threadContext.currentContextExecutor()) .thenApply(list -> list.stream() .map(group -> Map.entry( group.getGroupId(), @@ -225,11 +245,14 @@ CompletionStage assertConsumerGroupExists(Admin adminClient, String groupI return adminClient.listConsumerGroups() .all() .toCompletionStage() - .thenAccept(listing -> { - if (listing.stream().map(ConsumerGroupListing::groupId).noneMatch(groupId::equals)) { + .thenAcceptAsync(listing -> { + if (listing.stream() + .filter(permissionService.permitted(ConsumerGroup.API_TYPE, Privilege.GET, ConsumerGroupListing::groupId)) + .map(ConsumerGroupListing::groupId) + .noneMatch(groupId::equals)) { throw new GroupIdNotFoundException("No such consumer group: " + groupId); } - }); + }, threadContext.currentContextExecutor()); } CompletionStage> alterConsumerGroupOffsets(Admin adminClient, String groupId, ConsumerGroup patch, boolean dryRun) { @@ -328,7 +351,7 @@ CompletionStage> alterConsumerGroupOffsets(Admin adminCl ) .thenApply(nothing1 -> targetOffsets); }) - .thenCompose(alterRequest -> { + .thenComposeAsync(alterRequest -> { if (dryRun) { return alterConsumerGroupOffsetsDryRun(adminClient, groupId, alterRequest) .thenApply(Optional::of); @@ -336,12 +359,12 @@ CompletionStage> alterConsumerGroupOffsets(Admin adminCl return alterConsumerGroupOffsets(adminClient, groupId, alterRequest) .thenApply(nothing -> Optional.empty()); } - }); + }, threadContext.currentContextExecutor()); } CompletionStage alterConsumerGroupOffsetsDryRun(Admin adminClient, String groupId, Map alterRequest) { - var pendingTopicsIds = fetchTopicIdMap(adminClient); + var pendingTopicsIds = fetchTopicIdMap(); return describeConsumerGroups(adminClient, List.of(groupId), Collections.emptyList()) .thenApply(groups -> groups.get(groupId)) @@ -387,7 +410,7 @@ CompletableFuture alterConsumerGroupOffsets(Admin adminClient, String grou return allOf(offsetResults.values()); } - Map> getListOffsetsResults( + private Map> getListOffsetsResults( Set partitions, ListOffsetsResult topicOffsetsResult) { @@ -434,7 +457,7 @@ public CompletionStage deleteConsumerGroup(String requestGroupId) { .toCompletionStage(); } - CompletionStage> augmentList(Admin adminClient, List list, List includes) { + private CompletionStage> augmentList(Admin adminClient, List list, List includes) { Map groups = list.stream().collect(Collectors.toMap(ConsumerGroup::getGroupId, Function.identity())); CompletableFuture describePromise; @@ -450,11 +473,15 @@ CompletionStage> augmentList(Admin adminClient, List list); } - void mergeDescriptions(ConsumerGroup group, Either description) { + private void mergeDescriptions(ConsumerGroup group, Either description) { if (description.isPrimaryEmpty()) { Throwable thrown = description.getAlternate(); Error error = new Error("Unable to describe consumer group", thrown.getMessage(), thrown); group.addError(error); + group.setMembers(null); + group.setOffsets(null); + group.setCoordinator(null); + group.setAuthorizedOperations(null); } else { ConsumerGroup describedGroup = description.getPrimary(); group.setMembers(describedGroup.getMembers()); @@ -464,14 +491,14 @@ void mergeDescriptions(ConsumerGroup group, Either des } } - CompletionStage>> describeConsumerGroups( + private CompletionStage>> describeConsumerGroups( Admin adminClient, Collection groupIds, List includes) { Map> result = new LinkedHashMap<>(groupIds.size()); - var pendingTopicsIds = fetchTopicIdMap(adminClient); + var pendingTopicsIds = fetchTopicIdMap(); var pendingDescribes = adminClient.describeConsumerGroups(groupIds, new DescribeConsumerGroupsOptions() @@ -482,7 +509,10 @@ CompletionStage>> describeConsumerG .map(entry -> entry.getValue() .toCompletionStage() - .thenCombine(pendingTopicsIds, ConsumerGroup::fromKafkaModel) + .thenCombineAsync(pendingTopicsIds, (description, topicIds) -> { + permissionService.assertPermitted(ConsumerGroup.API_TYPE, Privilege.GET, description.groupId()); + return ConsumerGroup.fromKafkaModel(description, topicIds); + }, threadContext.currentContextExecutor()) .handle((consumerGroup, error) -> { result.put(entry.getKey(), Either.of( Optional.ofNullable(consumerGroup), @@ -513,13 +543,13 @@ CompletionStage>> describeConsumerG }); } - CompletableFuture> fetchTopicIdMap(Admin adminClient) { - return topicService.listTopics(adminClient, true) + private CompletableFuture> fetchTopicIdMap() { + return topicService.listTopics(true, true) .thenApply(topics -> topics.stream() .collect(Collectors.toMap(TopicListing::name, l -> l.topicId().toString()))); } - CompletableFuture fetchOffsets(Admin adminClient, Map groups, Map topicIds) { + private CompletableFuture fetchOffsets(Admin adminClient, Map groups, Map topicIds) { var groupOffsetsRequest = groups.keySet() .stream() .collect(Collectors.toMap(Function.identity(), key -> ALL_GROUP_PARTITIONS)); @@ -549,6 +579,7 @@ CompletableFuture fetchOffsets(Admin adminClient, Map topicIds.containsKey(topicPartition.topic())) .collect(Collectors.toMap(Function.identity(), key -> LATEST_TOPIC_OFFSETS)); var topicOffsetsResult = adminClient.listOffsets(topicOffsetsRequest); @@ -571,7 +602,7 @@ CompletableFuture fetchOffsets(Admin adminClient, Map topicIds, Map> topicOffsets, Map groupOffsets, @@ -612,7 +643,7 @@ void addOffsets(ConsumerGroup group, } } - static String preprocessGroupId(String groupId) { + private static String preprocessGroupId(String groupId) { return "+".equals(groupId) ? "" : groupId; } } diff --git a/api/src/main/java/com/github/streamshub/console/api/service/KafkaClusterService.java b/api/src/main/java/com/github/streamshub/console/api/service/KafkaClusterService.java index b6963e5d9..e9bf53f20 100644 --- a/api/src/main/java/com/github/streamshub/console/api/service/KafkaClusterService.java +++ b/api/src/main/java/com/github/streamshub/console/api/service/KafkaClusterService.java @@ -34,10 +34,12 @@ import com.github.streamshub.console.api.model.KafkaCluster; import com.github.streamshub.console.api.model.KafkaListener; import com.github.streamshub.console.api.model.Node; +import com.github.streamshub.console.api.security.PermissionService; import com.github.streamshub.console.api.support.Holder; import com.github.streamshub.console.api.support.KafkaContext; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.config.ConsoleConfig; +import com.github.streamshub.console.config.security.Privilege; import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.client.KubernetesClient; @@ -96,6 +98,9 @@ public class KafkaClusterService { */ KafkaContext kafkaContext; + @Inject + PermissionService permissionService; + boolean listUnconfigured = false; Predicate includeAll = k -> listUnconfigured; @@ -127,6 +132,7 @@ public List listClusters(ListRequestContext listSupp .toList(); return Stream.concat(configuredClusters.values().stream(), otherClusters.stream()) + .filter(permissionService.permitted(KafkaCluster.API_TYPE, Privilege.LIST, KafkaCluster::name)) .map(listSupport::tally) .filter(listSupport::betweenCursors) .sorted(listSupport.getSortComparator()) @@ -159,7 +165,7 @@ public CompletionStage describeCluster(List fields) { .thenApply(this::setManaged); } - public KafkaCluster patchCluster(String id, KafkaCluster cluster) { + public KafkaCluster patchCluster(KafkaCluster cluster) { Kafka resource = kafkaContext.resource(); if (resource != null) { diff --git a/api/src/main/java/com/github/streamshub/console/api/service/KafkaRebalanceService.java b/api/src/main/java/com/github/streamshub/console/api/service/KafkaRebalanceService.java index 28ddaf71c..c2fcb8446 100644 --- a/api/src/main/java/com/github/streamshub/console/api/service/KafkaRebalanceService.java +++ b/api/src/main/java/com/github/streamshub/console/api/service/KafkaRebalanceService.java @@ -18,9 +18,12 @@ import com.github.streamshub.console.api.model.Condition; import com.github.streamshub.console.api.model.KafkaRebalance; +import com.github.streamshub.console.api.security.PermissionService; import com.github.streamshub.console.api.support.KafkaContext; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.config.ConsoleConfig; +import com.github.streamshub.console.config.security.Privilege; +import com.github.streamshub.console.config.security.ResourceTypes; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.informers.cache.Cache; @@ -47,11 +50,18 @@ public class KafkaRebalanceService { @Inject KafkaContext kafkaContext; + @Inject + PermissionService permissionService; + public List listRebalances(ListRequestContext listSupport) { final Map statuses = new HashMap<>(); listSupport.meta().put("summary", Map.of("statuses", statuses)); return rebalanceResources() + .filter(permissionService.permitted( + ResourceTypes.Kafka.REBALANCES.value(), + Privilege.LIST, + r -> r.getMetadata().getName())) .map(this::toKafkaRebalance) .map(rebalance -> tallyStatus(statuses, rebalance)) .filter(listSupport) @@ -63,6 +73,12 @@ public List listRebalances(ListRequestContext li .toList(); } + public KafkaRebalance getRebalance(String id) { + return findRebalance(id) + .map(this::toKafkaRebalance) + .orElseThrow(() -> new NotFoundException("No such Kafka rebalance resource")); + } + public KafkaRebalance patchRebalance(String id, KafkaRebalance rebalance) { return findRebalance(id) .map(resource -> { diff --git a/api/src/main/java/com/github/streamshub/console/api/service/RecordService.java b/api/src/main/java/com/github/streamshub/console/api/service/RecordService.java index 6f0d963a9..5784e7a73 100644 --- a/api/src/main/java/com/github/streamshub/console/api/service/RecordService.java +++ b/api/src/main/java/com/github/streamshub/console/api/service/RecordService.java @@ -4,7 +4,6 @@ import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -25,8 +24,6 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; -import org.apache.kafka.clients.admin.ListTopicsOptions; -import org.apache.kafka.clients.admin.TopicListing; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; @@ -35,7 +32,6 @@ import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.InvalidPartitionsException; import org.apache.kafka.common.errors.UnknownTopicIdException; import org.apache.kafka.common.header.Header; @@ -75,6 +71,9 @@ public class RecordService { @Inject ThreadContext threadContext; + @Inject + TopicDescribeService topicService; + public List consumeRecords(String topicId, Integer partition, Long offset, @@ -94,9 +93,19 @@ public List consumeRecords(String topicId, return Collections.emptyList(); } + var beginningOffsets = consumer.beginningOffsets(assignments); var endOffsets = consumer.endOffsets(assignments); // End offset of zero means the partition has not been written to - don't bother reading them - assignments.removeIf(assignment -> endOffsets.get(assignment) == 0); + assignments.removeIf(assignment -> { + long endOffset = endOffsets.get(assignment); + + if (endOffset == 0) { + return true; + } + + long beginningOffset = beginningOffsets.get(assignment); + return endOffset - beginningOffset == 0; + }); if (assignments.isEmpty()) { return Collections.emptyList(); @@ -107,7 +116,7 @@ public List consumeRecords(String topicId, if (timestamp != null) { seekToTimestamp(consumer, assignments, timestamp); } else { - seekToOffset(consumer, assignments, endOffsets, offset, limit); + seekToOffset(consumer, assignments, beginningOffsets, endOffsets, offset, limit); } if (assignments.isEmpty()) { @@ -230,18 +239,8 @@ Optional schemaMeta(JsonApiRelationship schemaRelationship, String key) } String topicNameForId(String topicId) { - Uuid kafkaTopicId = Uuid.fromString(topicId); - - return kafkaContext.admin() - .listTopics(new ListTopicsOptions().listInternal(true)) - .listings() - .toCompletionStage() - .thenApply(Collection::stream) - .thenApply(listings -> listings - .filter(topic -> kafkaTopicId.equals(topic.topicId())) - .findFirst() - .map(TopicListing::name) - .orElseThrow(() -> noSuchTopic(topicId))) + return topicService.topicNameForId(topicId) + .thenApply(topic -> topic.orElseThrow(() -> noSuchTopic(topicId))) .toCompletableFuture() .join(); } @@ -274,8 +273,11 @@ void seekToTimestamp(Consumer consumer, List consumer, List assignments, Map endOffsets, Long offset, int limit) { - var beginningOffsets = consumer.beginningOffsets(assignments); + void seekToOffset(Consumer consumer, List assignments, + Map beginningOffsets, + Map endOffsets, + Long offset, int limit) { + Iterator cursor = assignments.iterator(); while (cursor.hasNext()) { @@ -474,12 +476,14 @@ public ConsumerRecords next() { if (total >= limit) { // Consumed `limit` records for this partition + LOGGER.tracef("Consumed %d records (more than limit %d) from partition %s", total, limit, partition); assignments.remove(partition); } else if (consumed > 0) { long maxOffset = partitionRecords.stream().mapToLong(ConsumerRecord::offset).max().getAsLong() + 1; if (maxOffset >= endOffsets.get(partition)) { // Reached the end of the partition + LOGGER.tracef("Reached end of partition %s at offset %s", partition, maxOffset); assignments.remove(partition); } } diff --git a/api/src/main/java/com/github/streamshub/console/api/service/TopicDescribeService.java b/api/src/main/java/com/github/streamshub/console/api/service/TopicDescribeService.java new file mode 100644 index 000000000..27a0ffc39 --- /dev/null +++ b/api/src/main/java/com/github/streamshub/console/api/service/TopicDescribeService.java @@ -0,0 +1,533 @@ +package com.github.streamshub.console.api.service; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import jakarta.inject.Named; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.DescribeLogDirsOptions; +import org.apache.kafka.clients.admin.DescribeTopicsOptions; +import org.apache.kafka.clients.admin.ListOffsetsOptions; +import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo; +import org.apache.kafka.clients.admin.ListTopicsOptions; +import org.apache.kafka.clients.admin.OffsetSpec; +import org.apache.kafka.clients.admin.TopicListing; +import org.apache.kafka.common.TopicCollection; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.config.ConfigResource; +import org.eclipse.microprofile.context.ThreadContext; +import org.jboss.logging.Logger; + +import com.github.streamshub.console.api.model.Either; +import com.github.streamshub.console.api.model.Identifier; +import com.github.streamshub.console.api.model.OffsetInfo; +import com.github.streamshub.console.api.model.PartitionId; +import com.github.streamshub.console.api.model.PartitionInfo; +import com.github.streamshub.console.api.model.ReplicaLocalStorage; +import com.github.streamshub.console.api.model.Topic; +import com.github.streamshub.console.api.security.PermissionService; +import com.github.streamshub.console.api.support.KafkaContext; +import com.github.streamshub.console.api.support.KafkaOffsetSpec; +import com.github.streamshub.console.api.support.ListRequestContext; +import com.github.streamshub.console.api.support.UnknownTopicIdPatch; +import com.github.streamshub.console.config.security.Privilege; + +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.topic.KafkaTopic; + +@ApplicationScoped +public class TopicDescribeService { + + private static final List DEFAULT_OFFSET_SPECS = + List.of(OffsetSpec.earliest(), OffsetSpec.latest(), OffsetSpec.maxTimestamp()); + private static final Predicate CONFIG_SORT = + Pattern.compile("^-?configs\\..+$").asMatchPredicate(); + private static final Set REQUIRE_DESCRIBE = Set.of( + Topic.Fields.PARTITIONS, + Topic.Fields.NUM_PARTITIONS, + Topic.Fields.AUTHORIZED_OPERATIONS, + Topic.Fields.TOTAL_LEADER_LOG_BYTES, + Topic.Fields.STATUS); + private static final Set REQUIRE_PARTITIONS = Set.of( + Topic.Fields.PARTITIONS, + Topic.Fields.NUM_PARTITIONS, + Topic.Fields.TOTAL_LEADER_LOG_BYTES, + Topic.Fields.STATUS); + + @Inject + Logger logger; + + /** + * ThreadContext of the request thread. This is used to execute asynchronous + * tasks to allow access to request-scoped beans such as an injected + * {@linkplain Admin Admin client} + */ + @Inject + ThreadContext threadContext; + + @Inject + KafkaContext kafkaContext; + + @Inject + @Named("KafkaTopics") + Map>> managedTopics; + + @Inject + KubernetesClient k8s; + + @Inject + PermissionService permissionService; + + @Inject + ConfigService configService; + + @Inject + ConsumerGroupService consumerGroupService; + + public CompletionStage> listTopics(List fields, String offsetSpec, ListRequestContext listSupport) { + List fetchList = new ArrayList<>(fields); + + if (listSupport.getSortEntries().stream().anyMatch(CONFIG_SORT)) { + fetchList.add(Topic.Fields.CONFIGS); + } + + Admin adminClient = kafkaContext.admin(); + final Map statuses = new HashMap<>(); + final AtomicInteger partitionCount = new AtomicInteger(0); + + listSupport.meta().put("summary", Map.of( + "statuses", statuses, + "totalPartitions", partitionCount)); + + return listTopics(true, true) + .thenApply(list -> list.stream().map(Topic::fromTopicListing).toList()) + .thenComposeAsync( + list -> augmentList(adminClient, list, fetchList, offsetSpec), + threadContext.currentContextExecutor()) + .thenApply(list -> list.stream() + .filter(listSupport) + .map(topic -> tallySummary(statuses, partitionCount, topic)) + .map(listSupport::tally) + .filter(listSupport::betweenCursors) + .sorted(listSupport.getSortComparator()) + .dropWhile(listSupport::beforePageBegin) + .takeWhile(listSupport::pageCapacityAvailable)) + .thenApplyAsync( + topics -> topics.map(this::setManaged).toList(), + threadContext.currentContextExecutor()); + } + + private Topic tallySummary(Map statuses, AtomicInteger partitionCount, Topic topic) { + statuses.compute(topic.status(), (k, v) -> v == null ? 1 : v + 1); + + Integer numPartitions = topic.getAttributes().numPartitions(); + //numPartitions may be null if it was not included in the requested fields + if (numPartitions != null) { + partitionCount.addAndGet(numPartitions); + } + + return topic; + } + + CompletableFuture> listTopics(boolean listInternal, boolean checkAuthorization) { + Admin adminClient = kafkaContext.admin(); + Predicate authorizationFilter; + + if (checkAuthorization) { + authorizationFilter = permissionService.permitted(Topic.API_TYPE, Privilege.LIST, TopicListing::name); + } else { + authorizationFilter = x -> true; + } + + return adminClient + .listTopics(new ListTopicsOptions().listInternal(listInternal)) + .listings() + .toCompletionStage() + .thenApplyAsync(topics -> topics.stream() + .filter(authorizationFilter) + .toList(), threadContext.currentContextExecutor()) + .toCompletableFuture(); + } + + public CompletionStage> topicNameForId(String topicId) { + Uuid kafkaTopicId = Uuid.fromString(topicId); + + return listTopics(true, false) + .thenApply(listings -> listings.stream() + .filter(topic -> kafkaTopicId.equals(topic.topicId())) + .findFirst() + .map(TopicListing::name)); + } + + public CompletionStage describeTopic(String topicId, List fields, String offsetSpec) { + Admin adminClient = kafkaContext.admin(); + Uuid id = Uuid.fromString(topicId); + + CompletableFuture describePromise = describeTopics(adminClient, List.of(id), fields, offsetSpec) + .thenApply(result -> result.get(id)) + .thenApply(result -> result.getOrThrow(CompletionException::new)) + .thenApplyAsync(this::setManaged, threadContext.currentContextExecutor()) + .toCompletableFuture(); + + return describePromise.thenComposeAsync(topic -> { + var topics = Map.of(id, topic); + + return CompletableFuture.allOf( + maybeDescribeConfigs(adminClient, topics, fields), + maybeFetchConsumerGroups(topics, fields)) + .thenApply(nothing -> topic); + }, threadContext.currentContextExecutor()); + } + + Topic setManaged(Topic topic) { + topic.addMeta("managed", getManagedTopic(topic.name()) + .map(kafkaTopic -> Boolean.TRUE) + .orElse(Boolean.FALSE)); + return topic; + } + + Optional getManagedTopic(String topicName) { + return Optional.ofNullable(kafkaContext.resource()) + .map(Kafka::getMetadata) + .flatMap(kafkaMeta -> Optional.ofNullable(managedTopics.get(kafkaMeta.getNamespace())) + .map(clustersInNamespace -> clustersInNamespace.get(kafkaMeta.getName())) + .map(topicsInCluster -> topicsInCluster.get(topicName)) + // Do not consider topics without a status set by Strimzi as managed + .filter(topic -> Objects.nonNull(topic.getStatus())) + .filter(this::isManaged)); + } + + boolean isManaged(KafkaTopic topic) { + return Optional.of(topic) + .map(KafkaTopic::getMetadata) + .map(ObjectMeta::getAnnotations) + .map(annotations -> annotations.getOrDefault("strimzi.io/managed", "true")) + .map(managed -> !"false".equals(managed)) + .orElse(true); + } + + CompletionStage> augmentList(Admin adminClient, List list, List fields, String offsetSpec) { + Map topics = list.stream().collect(Collectors.toMap(t -> Uuid.fromString(t.getId()), Function.identity())); + CompletableFuture configPromise = maybeDescribeConfigs(adminClient, topics, fields); + CompletableFuture describePromise = maybeDescribeTopics(adminClient, topics, fields, offsetSpec); + CompletableFuture consumerGroupPromise = maybeFetchConsumerGroups(topics, fields); + + return CompletableFuture.allOf(configPromise, describePromise, consumerGroupPromise) + .thenApply(nothing -> list); + } + + CompletableFuture maybeDescribeConfigs(Admin adminClient, Map topics, List fields) { + if (fields.contains(Topic.Fields.CONFIGS)) { + Map topicIds = new HashMap<>(); + List keys = topics.values().stream() + .filter(e -> permissionService.permitted(Topic.API_TYPE, Privilege.GET, e.name())) + .map(topic -> { + topicIds.put(topic.name(), Uuid.fromString(topic.getId())); + return topic.name(); + }) + .map(name -> new ConfigResource(ConfigResource.Type.TOPIC, name)) + .toList(); + + return configService.describeConfigs(adminClient, keys) + .thenAccept(configs -> + configs.forEach((name, either) -> topics.get(topicIds.get(name)).addConfigs(either))) + .toCompletableFuture(); + } + + return CompletableFuture.completedFuture(null); + } + + private CompletableFuture maybeDescribeTopics(Admin adminClient, Map topics, List fields, String offsetSpec) { + if (REQUIRE_DESCRIBE.stream().anyMatch(fields::contains)) { + Collection topicIds = topics.entrySet().stream() + .filter(e -> permissionService.permitted(Topic.API_TYPE, Privilege.GET, e.getValue().name())) + .map(Map.Entry::getKey) + .toList(); + + return describeTopics(adminClient, topicIds, fields, offsetSpec) + .thenApply(descriptions -> { + descriptions.forEach((id, either) -> { + if (REQUIRE_PARTITIONS.stream().anyMatch(fields::contains)) { + topics.get(id).addPartitions(either); + } + if (fields.contains(Topic.Fields.AUTHORIZED_OPERATIONS)) { + topics.get(id).addAuthorizedOperations(either); + } + }); + + return null; + }) + .toCompletableFuture(); + } + + return CompletableFuture.completedFuture(null); + } + + private CompletableFuture maybeFetchConsumerGroups(Map topics, List fields) { + if (!fields.contains(Topic.Fields.CONSUMER_GROUPS)) { + return CompletableFuture.completedFuture(null); + } + + List searchTopics = topics.entrySet().stream() + .filter(e -> permissionService.permitted(Topic.API_TYPE, Privilege.GET, e.getValue().name())) + .map(Map.Entry::getKey) + .map(Uuid::toString) + .toList(); + + return consumerGroupService + .listConsumerGroupMembership(searchTopics) + .thenAccept(consumerGroups -> + topics.forEach((topicId, topic) -> { + String idString = topicId.toString(); + + if (searchTopics.contains(idString)) { + var topicGroups = consumerGroups.getOrDefault(idString, Collections.emptyList()); + var identifiers = topicGroups.stream().map(g -> new Identifier("consumerGroups", g)).toList(); + topic.consumerGroups().data().addAll(identifiers); + topic.consumerGroups().addMeta("count", identifiers.size()); + } else { + topic.consumerGroups(null); + } + })) + .toCompletableFuture(); + } + + /* package */ CompletionStage>> describeTopics( + Admin adminClient, + Collection topicIds, + List fields, + String offsetSpec) { + + Map> result = new LinkedHashMap<>(topicIds.size()); + TopicCollection request = TopicCollection.ofTopicIds(topicIds); + DescribeTopicsOptions options = new DescribeTopicsOptions() + .includeAuthorizedOperations(fields.contains(Topic.Fields.AUTHORIZED_OPERATIONS)); + + var pendingDescribes = adminClient.describeTopics(request, options) + .topicIdValues() + .entrySet() + .stream() + .map(entry -> + entry.getValue() + .toCompletionStage() + .handleAsync((description, error) -> { + if (error == null && !permissionService.permitted(Topic.API_TYPE, Privilege.GET, description.name())) { + error = permissionService.forbidden(Topic.API_TYPE, Privilege.GET, description.name()); + } + result.put( + entry.getKey(), + Either.of(description, + UnknownTopicIdPatch.apply(error, Function.identity()), + Topic::fromTopicDescription)); + return null; + }, threadContext.currentContextExecutor())) + .map(CompletionStage::toCompletableFuture) + .toArray(CompletableFuture[]::new); + + return CompletableFuture.allOf(pendingDescribes) + .thenCompose(nothing -> CompletableFuture.allOf( + listOffsets(adminClient, result, offsetSpec).toCompletableFuture(), + describeLogDirs(adminClient, result).toCompletableFuture() + )) + .thenApply(nothing -> result); + } + + private CompletionStage listOffsets(Admin adminClient, Map> topics, String offsetSpec) { + Map topicIds = new HashMap<>(topics.size()); + var onlineTopics = topics.entrySet() + .stream() + .filter(topic -> topic.getValue() + .getOptionalPrimary() + .map(Topic::partitionsOnline) + .orElse(false)) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + var pendingOffsets = getRequestOffsetSpecs(offsetSpec) + .stream() + .map(reqOffsetSpec -> topicPartitionLeaders(onlineTopics, topicIds) + .keySet() + .stream() + .collect(Collectors.toMap(Function.identity(), ignored -> reqOffsetSpec))) + .flatMap(request -> listOffsets(adminClient, onlineTopics, topicIds, request)) + .map(CompletionStage::toCompletableFuture) + .toArray(CompletableFuture[]::new); + + return CompletableFuture.allOf(pendingOffsets); + } + + private List getRequestOffsetSpecs(String offsetSpec) { + List specs = new ArrayList<>(DEFAULT_OFFSET_SPECS); + + // Never null, defaults to latest + switch (offsetSpec) { // NOSONAR + case KafkaOffsetSpec.EARLIEST, KafkaOffsetSpec.LATEST, KafkaOffsetSpec.MAX_TIMESTAMP: + break; + default: + specs.add(OffsetSpec.forTimestamp(Instant.parse(offsetSpec).toEpochMilli())); + break; + } + + return specs; + } + + /** + * Build of map of {@linkplain PartitionId}s to the partition leader node ID. + * Concurrently, a map of topic names to topic identifiers is constructed to + * support cross referencing the {@linkplain PartitionId} keys (via + * {@linkplain PartitionId#topicId()}) back to the topic's {@linkplain Uuid}. + * This allows easy access of the topics located in the topics map provided to + * this method and is particularly useful for Kafka operations that still + * require topic name. + * + * @param topics map of topics (keyed by Id) + * @param topicIds map of topic names to topic Ids, modified by this method + * @return map of {@linkplain PartitionId}s to the partition leader node ID + */ + private Map topicPartitionLeaders(Map> topics, Map topicIds) { + return topics.entrySet() + .stream() + .filter(entry -> entry.getValue().isPrimaryPresent()) + .map(entry -> { + var topic = entry.getValue().getPrimary(); + topicIds.put(topic.name(), entry.getKey()); + return topic; + }) + .filter(topic -> topic.partitions().isPrimaryPresent()) + .flatMap(topic -> topic.partitions().getPrimary() + .stream() + .filter(PartitionInfo::online) + .map(partition -> { + var key = new PartitionId(topic.getId(), topic.name(), partition.getPartition()); + return Map.entry(key, partition.getLeaderId()); + })) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + private String getOffsetKey(OffsetSpec spec) { + if (spec instanceof OffsetSpec.EarliestSpec) { + return KafkaOffsetSpec.EARLIEST; + } + if (spec instanceof OffsetSpec.LatestSpec) { + return KafkaOffsetSpec.LATEST; + } + if (spec instanceof OffsetSpec.MaxTimestampSpec) { + return KafkaOffsetSpec.MAX_TIMESTAMP; + } + return "timestamp"; + } + + private Stream> listOffsets( + Admin adminClient, + Map> topics, + Map topicIds, + Map request) { + + var kafkaRequest = request.entrySet() + .stream() + .map(e -> Map.entry(e.getKey().toKafkaModel(), e.getValue())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + var result = adminClient.listOffsets(kafkaRequest, new ListOffsetsOptions() + .timeoutMs(5000)); + + return kafkaRequest.entrySet() + .stream() + .map(entry -> result.partitionResult(entry.getKey()) + .toCompletionStage() + .handle((offsetResult, error) -> { + addOffset(topics.get(topicIds.get(entry.getKey().topic())).getPrimary(), + entry.getKey().partition(), + getOffsetKey(entry.getValue()), + offsetResult, + error); + return null; + })); + + } + + private void addOffset(Topic topic, int partitionNo, String key, ListOffsetsResultInfo result, Throwable error) { + topic.partitions() + .getPrimary() + .stream() + .filter(partition -> partition.getPartition() == partitionNo) + .findFirst() + .ifPresent(partition -> partition.addOffset(key, either(result, error))); + } + + private Either either(ListOffsetsResultInfo result, Throwable error) { + Function transformer = offsetInfo -> { + Instant timestamp = offsetInfo.timestamp() != -1 ? Instant.ofEpochMilli(offsetInfo.timestamp()) : null; + return new OffsetInfo(offsetInfo.offset(), timestamp, offsetInfo.leaderEpoch().orElse(null)); + }; + + return Either.of(result, error, transformer); + } + + private CompletionStage describeLogDirs(Admin adminClient, Map> topics) { + Map topicIds = new HashMap<>(topics.size()); + + var topicPartitionReplicas = topicPartitionLeaders(topics, topicIds); + var nodeIds = topicPartitionReplicas.values().stream().distinct().toList(); + var logDirs = adminClient.describeLogDirs(nodeIds, new DescribeLogDirsOptions() + .timeoutMs(5000)) + .descriptions(); + + var pendingInfo = topicPartitionReplicas.entrySet() + .stream() + .map(e -> { + var topicPartition = e.getKey().toKafkaModel(); + int nodeId = e.getValue(); + var partitionInfo = topics.get(topicIds.get(topicPartition.topic())) + .getPrimary() + .partitions() + .getPrimary() + .stream() + .filter(p -> p.getPartition() == topicPartition.partition()) + .findFirst(); + + return logDirs.get(nodeId).toCompletionStage().handle((nodeLogDirs, error) -> { + if (error != null) { + partitionInfo.ifPresent(p -> p.setReplicaLocalStorage(nodeId, Either.ofAlternate(error))); + } else { + nodeLogDirs.values() + .stream() + .map(dir -> dir.replicaInfos()) + .map(replicas -> replicas.get(topicPartition)) + .filter(Objects::nonNull) + .map(org.apache.kafka.clients.admin.ReplicaInfo.class::cast) + .map(ReplicaLocalStorage::fromKafkaModel) + .forEach(replicaInfo -> partitionInfo.ifPresent(p -> p.setReplicaLocalStorage(nodeId, Either.of(replicaInfo)))); + } + + return null; + }); + }) + .map(CompletionStage::toCompletableFuture) + .toArray(CompletableFuture[]::new); + + return CompletableFuture.allOf(pendingInfo); + } + +} diff --git a/api/src/main/java/com/github/streamshub/console/api/service/TopicService.java b/api/src/main/java/com/github/streamshub/console/api/service/TopicService.java index a92cd3f25..d8b67655d 100644 --- a/api/src/main/java/com/github/streamshub/console/api/service/TopicService.java +++ b/api/src/main/java/com/github/streamshub/console/api/service/TopicService.java @@ -1,92 +1,52 @@ package com.github.streamshub.console.api.service; -import java.time.Instant; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Optional; -import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Function; import java.util.function.Predicate; -import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.IntStream; -import java.util.stream.Stream; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; -import jakarta.inject.Named; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.CreatePartitionsOptions; import org.apache.kafka.clients.admin.CreateTopicsOptions; import org.apache.kafka.clients.admin.CreateTopicsResult; -import org.apache.kafka.clients.admin.DescribeLogDirsOptions; -import org.apache.kafka.clients.admin.DescribeTopicsOptions; -import org.apache.kafka.clients.admin.ListOffsetsOptions; -import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo; -import org.apache.kafka.clients.admin.ListTopicsOptions; import org.apache.kafka.clients.admin.NewPartitionReassignment; -import org.apache.kafka.clients.admin.OffsetSpec; -import org.apache.kafka.clients.admin.TopicListing; import org.apache.kafka.common.TopicCollection; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.errors.UnknownTopicIdException; import org.eclipse.microprofile.context.ThreadContext; import org.jboss.logging.Logger; -import com.github.streamshub.console.api.model.Either; -import com.github.streamshub.console.api.model.Identifier; import com.github.streamshub.console.api.model.NewTopic; -import com.github.streamshub.console.api.model.OffsetInfo; -import com.github.streamshub.console.api.model.PartitionId; -import com.github.streamshub.console.api.model.PartitionInfo; -import com.github.streamshub.console.api.model.ReplicaLocalStorage; import com.github.streamshub.console.api.model.Topic; import com.github.streamshub.console.api.model.TopicPatch; +import com.github.streamshub.console.api.security.PermissionService; import com.github.streamshub.console.api.support.KafkaContext; import com.github.streamshub.console.api.support.KafkaOffsetSpec; import com.github.streamshub.console.api.support.ListRequestContext; import com.github.streamshub.console.api.support.TopicValidation; -import com.github.streamshub.console.api.support.UnknownTopicIdPatch; import com.github.streamshub.console.api.support.ValidationProxy; +import com.github.streamshub.console.config.security.Privilege; -import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.client.KubernetesClient; import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.topic.KafkaTopic; import static org.apache.kafka.clients.admin.NewPartitions.increaseTo; @ApplicationScoped public class TopicService { - private static final List DEFAULT_OFFSET_SPECS = - List.of(OffsetSpec.earliest(), OffsetSpec.latest(), OffsetSpec.maxTimestamp()); - private static final Predicate CONFIG_SORT = - Pattern.compile("^-?configs\\..+$").asMatchPredicate(); - private static final Set REQUIRE_DESCRIBE = Set.of( - Topic.Fields.PARTITIONS, - Topic.Fields.NUM_PARTITIONS, - Topic.Fields.AUTHORIZED_OPERATIONS, - Topic.Fields.TOTAL_LEADER_LOG_BYTES, - Topic.Fields.STATUS); - private static final Set REQUIRE_PARTITIONS = Set.of( - Topic.Fields.PARTITIONS, - Topic.Fields.NUM_PARTITIONS, - Topic.Fields.TOTAL_LEADER_LOG_BYTES, - Topic.Fields.STATUS); - @Inject Logger logger; @@ -105,19 +65,19 @@ public class TopicService { KafkaContext kafkaContext; @Inject - @Named("KafkaTopics") - Map>> managedTopics; + KubernetesClient k8s; @Inject - KubernetesClient k8s; + PermissionService permissionService; @Inject ConfigService configService; @Inject - ConsumerGroupService consumerGroupService; + TopicDescribeService topicDescribe; - public CompletionStage createTopic(NewTopic topic, boolean validateOnly) { + public NewTopic createTopic(NewTopic topic, boolean validateOnly) { + permissionService.assertPermitted(Topic.API_TYPE, Privilege.CREATE, topic.name()); Kafka kafka = kafkaContext.resource(); Admin adminClient = kafkaContext.admin(); @@ -152,81 +112,17 @@ public CompletionStage createTopic(NewTopic topic, boolean validateOnl return result.all() .thenApply(nothing -> NewTopic.fromKafkaModel(topicName, result)) - .toCompletionStage(); + .toCompletionStage() + .toCompletableFuture() + .join(); } public CompletionStage> listTopics(List fields, String offsetSpec, ListRequestContext listSupport) { - List fetchList = new ArrayList<>(fields); - - if (listSupport.getSortEntries().stream().anyMatch(CONFIG_SORT)) { - fetchList.add(Topic.Fields.CONFIGS); - } - - Admin adminClient = kafkaContext.admin(); - final Map statuses = new HashMap<>(); - final AtomicInteger partitionCount = new AtomicInteger(0); - - listSupport.meta().put("summary", Map.of( - "statuses", statuses, - "totalPartitions", partitionCount)); - - return listTopics(adminClient, true) - .thenApply(list -> list.stream().map(Topic::fromTopicListing).toList()) - .thenComposeAsync( - list -> augmentList(adminClient, list, fetchList, offsetSpec), - threadContext.currentContextExecutor()) - .thenApply(list -> list.stream() - .filter(listSupport) - .map(topic -> tallySummary(statuses, partitionCount, topic)) - .map(listSupport::tally) - .filter(listSupport::betweenCursors) - .sorted(listSupport.getSortComparator()) - .dropWhile(listSupport::beforePageBegin) - .takeWhile(listSupport::pageCapacityAvailable)) - .thenApplyAsync( - topics -> topics.map(this::setManaged).toList(), - threadContext.currentContextExecutor()); - } - - Topic tallySummary(Map statuses, AtomicInteger partitionCount, Topic topic) { - statuses.compute(topic.status(), (k, v) -> v == null ? 1 : v + 1); - - Integer numPartitions = topic.getAttributes().numPartitions(); - //numPartitions may be null if it was not included in the requested fields - if (numPartitions != null) { - partitionCount.addAndGet(numPartitions); - } - - return topic; - } - - CompletableFuture> listTopics(Admin adminClient, boolean listInternal) { - return adminClient - .listTopics(new ListTopicsOptions().listInternal(listInternal)) - .listings() - .thenApply(topics -> topics.stream().toList()) - .toCompletionStage() - .toCompletableFuture(); + return topicDescribe.listTopics(fields, offsetSpec, listSupport); } public CompletionStage describeTopic(String topicId, List fields, String offsetSpec) { - Admin adminClient = kafkaContext.admin(); - Uuid id = Uuid.fromString(topicId); - - CompletableFuture describePromise = describeTopics(adminClient, List.of(id), fields, offsetSpec) - .thenApply(result -> result.get(id)) - .thenApply(result -> result.getOrThrow(CompletionException::new)) - .thenApplyAsync(this::setManaged, threadContext.currentContextExecutor()) - .toCompletableFuture(); - - return describePromise.thenComposeAsync(topic -> { - var topics = Map.of(id, topic); - - return CompletableFuture.allOf( - maybeDescribeConfigs(adminClient, topics, fields), - maybeFetchConsumerGroups(topics, fields)) - .thenApply(nothing -> topic); - }, threadContext.currentContextExecutor()); + return topicDescribe.describeTopic(topicId, fields, offsetSpec); } /** @@ -246,14 +142,30 @@ public CompletionStage patchTopic(String topicId, TopicPatch patch, boolea return describeTopic(topicId, List.of(Topic.Fields.CONFIGS), KafkaOffsetSpec.LATEST) .thenApply(topic -> validationService.validate(new TopicValidation.TopicPatchInputs(kafka, topic, patch))) .thenApply(TopicValidation.TopicPatchInputs::topic) - .thenComposeAsync(topic -> getManagedTopic(topic.name()) + .thenComposeAsync(topic -> topicDescribe.getManagedTopic(topic.name()) .map(kafkaTopic -> patchManagedTopic()) .orElseGet(() -> patchUnmanagedTopic(topic, patch, validateOnly)), - threadContext.currentContextExecutor()); + threadContext.currentContextExecutor()); + } + + public CompletionStage deleteTopic(String topicId) { + Admin adminClient = kafkaContext.admin(); + Uuid id = Uuid.fromString(topicId); + + return topicDescribe.topicNameForId(topicId).thenComposeAsync(topicName -> { + if (topicName.isPresent()) { + return adminClient.deleteTopics(TopicCollection.ofTopicIds(List.of(id))) + .topicIdValues() + .get(id) + .toCompletionStage(); + } + + throw new UnknownTopicIdException("No such topic: " + topicId); + }, threadContext.currentContextExecutor()); } // Modifications disabled for now - CompletionStage patchManagedTopic(/*KafkaTopic topic, TopicPatch patch, boolean validateOnly*/) { + private CompletionStage patchManagedTopic(/*KafkaTopic topic, TopicPatch patch, boolean validateOnly*/) { return CompletableFuture.completedStage(null); // if (validateOnly) { // NOSONAR // return CompletableFuture.completedStage(null); @@ -282,7 +194,7 @@ CompletionStage patchManagedTopic(/*KafkaTopic topic, TopicPatch patch, bo // return CompletableFuture.runAsync(() -> k8s.resource(modifiedTopic).serverSideApply()); } - CompletionStage patchUnmanagedTopic(Topic topic, TopicPatch patch, boolean validateOnly) { + private CompletionStage patchUnmanagedTopic(Topic topic, TopicPatch patch, boolean validateOnly) { List> pending = new ArrayList<>(); pending.add(maybeCreatePartitions(topic, patch, validateOnly)); @@ -309,7 +221,7 @@ CompletionStage patchUnmanagedTopic(Topic topic, TopicPatch patch, boolean }); } - CompletableFuture maybeCreatePartitions(Topic topic, TopicPatch topicPatch, boolean validateOnly) { + private CompletableFuture maybeCreatePartitions(Topic topic, TopicPatch topicPatch, boolean validateOnly) { int currentNumPartitions = topic.partitions().getPrimary().size(); int newNumPartitions = Optional.ofNullable(topicPatch.numPartitions()).orElse(currentNumPartitions); @@ -326,7 +238,7 @@ CompletableFuture maybeCreatePartitions(Topic topic, TopicPatch topicPatch return CompletableFuture.completedFuture(null); } - CompletionStage createPartitions(String topicName, int totalCount, List> newAssignments, boolean validateOnly) { + private CompletionStage createPartitions(String topicName, int totalCount, List> newAssignments, boolean validateOnly) { Admin adminClient = kafkaContext.admin(); org.apache.kafka.clients.admin.NewPartitions newPartitions; @@ -345,7 +257,7 @@ CompletionStage createPartitions(String topicName, int totalCount, List

  • > maybeAlterPartitionAssignments(Topic topic, TopicPatch topicPatch) { + private List> maybeAlterPartitionAssignments(Topic topic, TopicPatch topicPatch) { int currentNumPartitions = topic.partitions().getPrimary().size(); var alteredAssignments = IntStream.range(0, currentNumPartitions) @@ -381,7 +293,7 @@ List> maybeAlterPartitionAssignments(Topic topic, TopicP .toList(); } - void logPartitionReassignments(Topic topic, + private void logPartitionReassignments(Topic topic, Map> alteredAssignments) { StringBuilder changes = new StringBuilder(); @@ -417,331 +329,11 @@ void logPartitionReassignments(Topic topic, changes); } - CompletableFuture maybeAlterConfigs(Topic topic, TopicPatch topicPatch, boolean validateOnly) { + private CompletableFuture maybeAlterConfigs(Topic topic, TopicPatch topicPatch, boolean validateOnly) { return Optional.ofNullable(topicPatch.configs()) .filter(Predicate.not(Map::isEmpty)) .map(configs -> configService.alterConfigs(ConfigResource.Type.TOPIC, topic.name(), configs, validateOnly) .toCompletableFuture()) .orElseGet(() -> CompletableFuture.completedFuture(null)); } - - public CompletionStage deleteTopic(String topicId) { - Admin adminClient = kafkaContext.admin(); - Uuid id = Uuid.fromString(topicId); - - return adminClient.deleteTopics(TopicCollection.ofTopicIds(List.of(id))) - .topicIdValues() - .get(id) - .toCompletionStage(); - } - - Topic setManaged(Topic topic) { - topic.addMeta("managed", getManagedTopic(topic.name()) - .map(kafkaTopic -> Boolean.TRUE) - .orElse(Boolean.FALSE)); - return topic; - } - - Optional getManagedTopic(String topicName) { - return Optional.ofNullable(kafkaContext.resource()) - .map(Kafka::getMetadata) - .flatMap(kafkaMeta -> Optional.ofNullable(managedTopics.get(kafkaMeta.getNamespace())) - .map(clustersInNamespace -> clustersInNamespace.get(kafkaMeta.getName())) - .map(topicsInCluster -> topicsInCluster.get(topicName)) - // Do not consider topics without a status set by Strimzi as managed - .filter(topic -> Objects.nonNull(topic.getStatus())) - .filter(this::isManaged)); - } - - boolean isManaged(KafkaTopic topic) { - return Optional.of(topic) - .map(KafkaTopic::getMetadata) - .map(ObjectMeta::getAnnotations) - .map(annotations -> annotations.getOrDefault("strimzi.io/managed", "true")) - .map(managed -> !"false".equals(managed)) - .orElse(true); - } - - CompletionStage> augmentList(Admin adminClient, List list, List fields, String offsetSpec) { - Map topics = list.stream().collect(Collectors.toMap(t -> Uuid.fromString(t.getId()), Function.identity())); - CompletableFuture configPromise = maybeDescribeConfigs(adminClient, topics, fields); - CompletableFuture describePromise = maybeDescribeTopics(adminClient, topics, fields, offsetSpec); - CompletableFuture consumerGroupPromise = maybeFetchConsumerGroups(topics, fields); - - return CompletableFuture.allOf(configPromise, describePromise, consumerGroupPromise) - .thenApply(nothing -> list); - } - - CompletableFuture maybeDescribeConfigs(Admin adminClient, Map topics, List fields) { - if (fields.contains(Topic.Fields.CONFIGS)) { - Map topicIds = new HashMap<>(); - List keys = topics.values().stream() - .map(topic -> { - topicIds.put(topic.name(), Uuid.fromString(topic.getId())); - return topic.name(); - }) - .map(name -> new ConfigResource(ConfigResource.Type.TOPIC, name)) - .toList(); - - return configService.describeConfigs(adminClient, keys) - .thenAccept(configs -> - configs.forEach((name, either) -> topics.get(topicIds.get(name)).addConfigs(either))) - .toCompletableFuture(); - } - - return CompletableFuture.completedFuture(null); - } - - CompletableFuture maybeDescribeTopics(Admin adminClient, Map topics, List fields, String offsetSpec) { - if (REQUIRE_DESCRIBE.stream().anyMatch(fields::contains)) { - return describeTopics(adminClient, topics.keySet(), fields, offsetSpec) - .thenApply(descriptions -> { - descriptions.forEach((id, either) -> { - if (REQUIRE_PARTITIONS.stream().anyMatch(fields::contains)) { - topics.get(id).addPartitions(either); - } - if (fields.contains(Topic.Fields.AUTHORIZED_OPERATIONS)) { - topics.get(id).addAuthorizedOperations(either); - } - }); - - return null; - }) - .toCompletableFuture(); - } - - return CompletableFuture.completedFuture(null); - } - - CompletableFuture maybeFetchConsumerGroups(Map topics, List fields) { - CompletionStage>> pendingConsumerGroups; - - if (fields.contains(Topic.Fields.CONSUMER_GROUPS)) { - var topicIds = topics.keySet().stream().map(Uuid::toString).toList(); - pendingConsumerGroups = consumerGroupService.listConsumerGroupMembership(topicIds); - } else { - pendingConsumerGroups = CompletableFuture.completedStage(Collections.emptyMap()); - } - - return pendingConsumerGroups.thenAccept(consumerGroups -> - consumerGroups.entrySet() - .stream() - .forEach(e -> { - Topic topic = topics.get(Uuid.fromString(e.getKey())); - var identifiers = e.getValue().stream().map(g -> new Identifier("consumerGroups", g)).toList(); - topic.consumerGroups().data().addAll(identifiers); - topic.consumerGroups().addMeta("count", identifiers.size()); - })) - .toCompletableFuture(); - } - - CompletionStage>> describeTopics( - Admin adminClient, - Collection topicIds, - List fields, - String offsetSpec) { - - Map> result = new LinkedHashMap<>(topicIds.size()); - TopicCollection request = TopicCollection.ofTopicIds(topicIds); - DescribeTopicsOptions options = new DescribeTopicsOptions() - .includeAuthorizedOperations(fields.contains(Topic.Fields.AUTHORIZED_OPERATIONS)); - - var pendingDescribes = adminClient.describeTopics(request, options) - .topicIdValues() - .entrySet() - .stream() - .map(entry -> - entry.getValue().toCompletionStage().handle((description, error) -> { - error = UnknownTopicIdPatch.apply(error, Function.identity()); - - result.put( - entry.getKey(), - Either.of(description, error, Topic::fromTopicDescription)); - return null; - })) - .map(CompletionStage::toCompletableFuture) - .toArray(CompletableFuture[]::new); - - return CompletableFuture.allOf(pendingDescribes) - .thenCompose(nothing -> CompletableFuture.allOf( - listOffsets(adminClient, result, offsetSpec).toCompletableFuture(), - describeLogDirs(adminClient, result).toCompletableFuture() - )) - .thenApply(nothing -> result); - } - - CompletionStage listOffsets(Admin adminClient, Map> topics, String offsetSpec) { - Map topicIds = new HashMap<>(topics.size()); - var onlineTopics = topics.entrySet() - .stream() - .filter(topic -> topic.getValue() - .getOptionalPrimary() - .map(Topic::partitionsOnline) - .orElse(false)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - - var pendingOffsets = getRequestOffsetSpecs(offsetSpec) - .stream() - .map(reqOffsetSpec -> topicPartitionLeaders(onlineTopics, topicIds) - .keySet() - .stream() - .collect(Collectors.toMap(Function.identity(), ignored -> reqOffsetSpec))) - .flatMap(request -> listOffsets(adminClient, onlineTopics, topicIds, request)) - .map(CompletionStage::toCompletableFuture) - .toArray(CompletableFuture[]::new); - - return CompletableFuture.allOf(pendingOffsets); - } - - List getRequestOffsetSpecs(String offsetSpec) { - List specs = new ArrayList<>(DEFAULT_OFFSET_SPECS); - - // Never null, defaults to latest - switch (offsetSpec) { // NOSONAR - case KafkaOffsetSpec.EARLIEST, KafkaOffsetSpec.LATEST, KafkaOffsetSpec.MAX_TIMESTAMP: - break; - default: - specs.add(OffsetSpec.forTimestamp(Instant.parse(offsetSpec).toEpochMilli())); - break; - } - - return specs; - } - - /** - * Build of map of {@linkplain PartitionId}s to the partition leader node ID. - * Concurrently, a map of topic names to topic identifiers is constructed to - * support cross referencing the {@linkplain PartitionId} keys (via - * {@linkplain PartitionId#topicId()}) back to the topic's {@linkplain Uuid}. - * This allows easy access of the topics located in the topics map provided to - * this method and is particularly useful for Kafka operations that still - * require topic name. - * - * @param topics map of topics (keyed by Id) - * @param topicIds map of topic names to topic Ids, modified by this method - * @return map of {@linkplain PartitionId}s to the partition leader node ID - */ - Map topicPartitionLeaders(Map> topics, Map topicIds) { - return topics.entrySet() - .stream() - .filter(entry -> entry.getValue().isPrimaryPresent()) - .map(entry -> { - var topic = entry.getValue().getPrimary(); - topicIds.put(topic.name(), entry.getKey()); - return topic; - }) - .filter(topic -> topic.partitions().isPrimaryPresent()) - .flatMap(topic -> topic.partitions().getPrimary() - .stream() - .filter(PartitionInfo::online) - .map(partition -> { - var key = new PartitionId(topic.getId(), topic.name(), partition.getPartition()); - return Map.entry(key, partition.getLeaderId()); - })) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - } - - String getOffsetKey(OffsetSpec spec) { - if (spec instanceof OffsetSpec.EarliestSpec) { - return KafkaOffsetSpec.EARLIEST; - } - if (spec instanceof OffsetSpec.LatestSpec) { - return KafkaOffsetSpec.LATEST; - } - if (spec instanceof OffsetSpec.MaxTimestampSpec) { - return KafkaOffsetSpec.MAX_TIMESTAMP; - } - return "timestamp"; - } - - Stream> listOffsets( - Admin adminClient, - Map> topics, - Map topicIds, - Map request) { - - var kafkaRequest = request.entrySet() - .stream() - .map(e -> Map.entry(e.getKey().toKafkaModel(), e.getValue())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - var result = adminClient.listOffsets(kafkaRequest, new ListOffsetsOptions() - .timeoutMs(5000)); - - return kafkaRequest.entrySet() - .stream() - .map(entry -> result.partitionResult(entry.getKey()) - .toCompletionStage() - .handle((offsetResult, error) -> { - addOffset(topics.get(topicIds.get(entry.getKey().topic())).getPrimary(), - entry.getKey().partition(), - getOffsetKey(entry.getValue()), - offsetResult, - error); - return null; - })); - - } - - void addOffset(Topic topic, int partitionNo, String key, ListOffsetsResultInfo result, Throwable error) { - topic.partitions() - .getPrimary() - .stream() - .filter(partition -> partition.getPartition() == partitionNo) - .findFirst() - .ifPresent(partition -> partition.addOffset(key, either(result, error))); - } - - Either either(ListOffsetsResultInfo result, Throwable error) { - Function transformer = offsetInfo -> { - Instant timestamp = offsetInfo.timestamp() != -1 ? Instant.ofEpochMilli(offsetInfo.timestamp()) : null; - return new OffsetInfo(offsetInfo.offset(), timestamp, offsetInfo.leaderEpoch().orElse(null)); - }; - - return Either.of(result, error, transformer); - } - - CompletionStage describeLogDirs(Admin adminClient, Map> topics) { - Map topicIds = new HashMap<>(topics.size()); - - var topicPartitionReplicas = topicPartitionLeaders(topics, topicIds); - var nodeIds = topicPartitionReplicas.values().stream().distinct().toList(); - var logDirs = adminClient.describeLogDirs(nodeIds, new DescribeLogDirsOptions() - .timeoutMs(5000)) - .descriptions(); - - var pendingInfo = topicPartitionReplicas.entrySet() - .stream() - .map(e -> { - var topicPartition = e.getKey().toKafkaModel(); - int nodeId = e.getValue(); - var partitionInfo = topics.get(topicIds.get(topicPartition.topic())) - .getPrimary() - .partitions() - .getPrimary() - .stream() - .filter(p -> p.getPartition() == topicPartition.partition()) - .findFirst(); - - return logDirs.get(nodeId).toCompletionStage().handle((nodeLogDirs, error) -> { - if (error != null) { - partitionInfo.ifPresent(p -> p.setReplicaLocalStorage(nodeId, Either.ofAlternate(error))); - } else { - nodeLogDirs.values() - .stream() - .map(dir -> dir.replicaInfos()) - .map(replicas -> replicas.get(topicPartition)) - .filter(Objects::nonNull) - .map(org.apache.kafka.clients.admin.ReplicaInfo.class::cast) - .map(ReplicaLocalStorage::fromKafkaModel) - .forEach(replicaInfo -> partitionInfo.ifPresent(p -> p.setReplicaLocalStorage(nodeId, Either.of(replicaInfo)))); - } - - return null; - }); - }) - .map(CompletionStage::toCompletableFuture) - .toArray(CompletableFuture[]::new); - - return CompletableFuture.allOf(pendingInfo); - } } diff --git a/api/src/main/resources/application.properties b/api/src/main/resources/application.properties index 16e9fee69..5d4494bc9 100644 --- a/api/src/main/resources/application.properties +++ b/api/src/main/resources/application.properties @@ -1,7 +1,7 @@ quarkus.http.access-log.enabled=true quarkus.http.record-request-start-time=true # Default access-log pattern with `%u` removed. Due to the mixing of Quarkus and Vert.x authorization, the user authenticated cannot be obtained at this time -quarkus.http.access-log.pattern=%{REMOTE_HOST} %l "%{REQUEST_LINE}" %{RESPONSE_CODE} %{RESPONSE_TIME}ms %{BYTES_SENT} +quarkus.http.access-log.pattern=%{REMOTE_USER} %{REMOTE_HOST} %l "%{REQUEST_LINE}" %{RESPONSE_CODE} %{RESPONSE_TIME}ms %{BYTES_SENT} quarkus.http.access-log.exclude-pattern=(?:/health(/live|/ready|/started)?|/metrics) quarkus.http.non-application-root-path=${quarkus.http.root-path} quarkus.http.http2=false @@ -16,8 +16,8 @@ quarkus.http.cors.access-control-allow-credentials=true quarkus.http.header."Strict-Transport-Security".value=max-age=31536000 quarkus.http.auth.basic=false -#quarkus.http.auth.permission."oidc".policy=authenticated -#quarkus.http.auth.permission."oidc".paths=/api/* +quarkus.http.auth.permission."oidc".policy=permit +quarkus.http.auth.permission."oidc".paths=/api/* # See https://quarkus.io/guides/kafka-dev-services # Enable when using quarkus-kafka-client @@ -78,13 +78,16 @@ console.kafka.admin.default.api.timeout.ms=10000 %dev.quarkus.kubernetes-client.trust-certs=true %dev.quarkus.log.category."io.vertx.core.impl.BlockedThreadChecker".level=OFF %dev.quarkus.log.category."com.github.streamshub.console".level=DEBUG +%dev.quarkus.log.category."io.quarkus.oidc.runtime".level=INFO # %dev.quarkus.apicurio-registry.devservices.enabled=true # %dev.apicurio.rest.client.disable-auto-basepath-append=true # %dev.quarkus.apicurio-registry.devservices.image-name=quay.io/apicurio/apicurio-registry-mem:2.6.x-release ######## +#%testplain.quarkus.http.test-timeout=600s %testplain.quarkus.devservices.enabled=true +%testplain.quarkus.keycloak.devservices.enabled=false %testplain.quarkus.kubernetes-client.devservices.enabled=true %testplain.quarkus.kubernetes-client.devservices.override-kubeconfig=true %testplain.quarkus.apicurio-registry.devservices.image-name=quay.io/apicurio/apicurio-registry-mem:2.6.x-release diff --git a/api/src/test/java/com/github/streamshub/console/api/BrokersResourceIT.java b/api/src/test/java/com/github/streamshub/console/api/BrokersResourceIT.java index b778af6a4..3ffbb3e05 100644 --- a/api/src/test/java/com/github/streamshub/console/api/BrokersResourceIT.java +++ b/api/src/test/java/com/github/streamshub/console/api/BrokersResourceIT.java @@ -63,6 +63,8 @@ void setup() { utils = new TestHelper(bootstrapServers, config, null); client.resources(Kafka.class).inAnyNamespace().delete(); + consoleConfig.clearSecurity(); + utils.apply(client, new KafkaBuilder() .withNewMetadata() .withName("test-kafka1") diff --git a/api/src/test/java/com/github/streamshub/console/api/ConsumerGroupsResourceIT.java b/api/src/test/java/com/github/streamshub/console/api/ConsumerGroupsResourceIT.java index 4dbd2ceac..ec0a574dc 100644 --- a/api/src/test/java/com/github/streamshub/console/api/ConsumerGroupsResourceIT.java +++ b/api/src/test/java/com/github/streamshub/console/api/ConsumerGroupsResourceIT.java @@ -125,6 +125,8 @@ void setup() throws IOException { utils = new TestHelper(bootstrapServers, config, null); client.resources(Kafka.class).inAnyNamespace().delete(); + consoleConfig.clearSecurity(); + utils.apply(client, utils.buildKafkaResource("test-kafka1", utils.getClusterId(), bootstrapServers)); // Wait for the informer cache to be populated with all Kafka CRs diff --git a/api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceIT.java b/api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceIT.java index 5264979d5..c698434af 100644 --- a/api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceIT.java +++ b/api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceIT.java @@ -2,13 +2,13 @@ import java.io.IOException; import java.net.URI; +import java.util.Arrays; import java.util.Base64; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.stream.IntStream; @@ -30,7 +30,6 @@ import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.security.scram.ScramLoginModule; import org.eclipse.microprofile.config.Config; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -45,6 +44,8 @@ import com.github.streamshub.console.api.support.KafkaContext; import com.github.streamshub.console.config.ConsoleConfig; import com.github.streamshub.console.config.KafkaClusterConfig; +import com.github.streamshub.console.config.security.GlobalSecurityConfigBuilder; +import com.github.streamshub.console.config.security.Privilege; import com.github.streamshub.console.kafka.systemtest.TestPlainProfile; import com.github.streamshub.console.kafka.systemtest.deployment.DeploymentManager; import com.github.streamshub.console.test.AdminClientSpy; @@ -68,7 +69,6 @@ import static com.github.streamshub.console.test.TestHelper.whenRequesting; import static java.util.Objects.isNull; -import static java.util.function.Predicate.not; import static org.awaitility.Awaitility.await; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.allOf; @@ -95,7 +95,7 @@ class KafkaClustersResourceIT { * List of Kafka clusters that are always created. test-kafkaY is configured in {@link TestPlainProfile} * but has no associated Strimzi Kafka CR. */ - private static final List STATIC_KAFKAS = List.of("test-kafka1", "test-kafka2", "test-kafkaY"); + static final List STATIC_KAFKAS = List.of("test-kafka1", "test-kafka2", "test-kafkaY"); @Inject Config config; @@ -138,6 +138,7 @@ void setup() throws IOException { utils = new TestHelper(bootstrapServers, config, null); client.resources(Kafka.class).inAnyNamespace().delete(); + consoleConfig.clearSecurity(); utils.apply(client, new KafkaBuilder(utils.buildKafkaResource("test-kafka1", utils.getClusterId(), bootstrapServers, new KafkaListenerAuthenticationCustomBuilder() @@ -173,16 +174,6 @@ void setup() throws IOException { kafkaClusterService.setListUnconfigured(false); } - @AfterEach - void teardown() throws IOException { - client.resources(Kafka.class).inAnyNamespace() - .list() - .getItems() - .stream() - .filter(not(k -> Set.of("test-kafka1", "test-kafka2").contains(k.getMetadata().getName()))) - .forEach(k -> client.resource(k).delete()); - } - @Test void testListClusters() { String k1Bootstrap = bootstrapServers.getHost() + ":" + bootstrapServers.getPort(); @@ -207,6 +198,34 @@ void testListClusters() { .body("data.find { it.attributes.name == 'test-kafkaY'}.attributes.listeners", is(nullValue())); } + @Test + void testListClustersWithAnonymousLimited() { + List visibleClusters = Arrays.asList("test-kafka1", "test-kafkaY"); + + consoleConfig.setSecurity(new GlobalSecurityConfigBuilder() + .addNewRole() + .withName("unauthenticated") + .addNewRule() + .withResources("kafkas") + .withPrivileges(Privilege.LIST) + .withResourceNames(visibleClusters) + .endRule() + .endRole() + .addNewSubject() + // ANONYMOUS is a special subject name for unauthenticated requests + .withInclude("ANONYMOUS") + .withRoleNames("unauthenticated") + .endSubject() + .build()); + + whenRequesting(req -> req.param("fields[" + KafkaCluster.API_TYPE + "]", "name") + .get()) + .assertThat() + .statusCode(is(Status.OK.getStatusCode())) + .body("data.size()", equalTo(visibleClusters.size())) + .body("data.attributes.name", containsInAnyOrder(visibleClusters.toArray(String[]::new))); + } + @Test void testListClustersWithInformerError() { SharedIndexInformer informer = Mockito.mock(); diff --git a/api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceNoK8sIT.java b/api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceNoK8sIT.java index 4dc3047d5..33f188254 100644 --- a/api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceNoK8sIT.java +++ b/api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceNoK8sIT.java @@ -66,6 +66,7 @@ void setup() throws IOException { .map(k -> k.getProperties().get("bootstrap.servers")) .orElseThrow()); + consoleConfig.clearSecurity(); utils = new TestHelper(bootstrapServers, config, null); clusterId1 = consoleConfig.getKafka().getCluster("test-kafka1").get().getId(); diff --git a/api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceOidcIT.java b/api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceOidcIT.java new file mode 100644 index 000000000..100196817 --- /dev/null +++ b/api/src/test/java/com/github/streamshub/console/api/KafkaClustersResourceOidcIT.java @@ -0,0 +1,340 @@ +package com.github.streamshub.console.api; + +import java.io.IOException; +import java.net.URI; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import jakarta.inject.Inject; +import jakarta.ws.rs.core.Response.Status; + +import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.eclipse.microprofile.config.Config; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +import com.github.streamshub.console.api.model.KafkaCluster; +import com.github.streamshub.console.api.support.KafkaContext; +import com.github.streamshub.console.config.ConsoleConfig; +import com.github.streamshub.console.config.KafkaClusterConfig; +import com.github.streamshub.console.config.security.GlobalSecurityConfigBuilder; +import com.github.streamshub.console.config.security.Privilege; +import com.github.streamshub.console.kafka.systemtest.TestPlainProfile; +import com.github.streamshub.console.kafka.systemtest.deployment.DeploymentManager; +import com.github.streamshub.console.kafka.systemtest.utils.TokenUtils; +import com.github.streamshub.console.test.AdminClientSpy; +import com.github.streamshub.console.test.TestHelper; + +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.informers.cache.Cache; +import io.quarkus.test.common.http.TestHTTPEndpoint; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; +import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.kafka.KafkaBuilder; +import io.strimzi.test.container.StrimziKafkaContainer; + +import static com.github.streamshub.console.test.TestHelper.whenRequesting; +import static org.awaitility.Awaitility.await; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +@QuarkusTest +@TestHTTPEndpoint(KafkaClustersResource.class) +@TestProfile(TestPlainProfile.class) +class KafkaClustersResourceOidcIT { + + @Inject + Config config; + + @Inject + KubernetesClient client; + + @Inject + Map configuredContexts; + + @Inject + ConsoleConfig consoleConfig; + + @DeploymentManager.InjectDeploymentManager + DeploymentManager deployments; + + TestHelper utils; + TokenUtils tokens; + + StrimziKafkaContainer kafkaContainer; + String clusterId1; + URI bootstrapServers; + + @BeforeEach + void setup() throws IOException { + kafkaContainer = deployments.getKafkaContainer(); + bootstrapServers = URI.create(kafkaContainer.getBootstrapServers()); + + utils = new TestHelper(bootstrapServers, config, null); + tokens = new TokenUtils(config); + + client.resources(Kafka.class).inAnyNamespace().delete(); + consoleConfig.clearSecurity(); + + Kafka kafka1 = new KafkaBuilder(utils.buildKafkaResource("test-kafka1", utils.getClusterId(), bootstrapServers)) + .editOrNewStatus() + .addNewCondition() + .withType("Ready") + .withStatus("True") + .endCondition() + .addNewKafkaNodePool() + .withName("my-node-pool") + .endKafkaNodePool() + .endStatus() + .build(); + + utils.apply(client, kafka1); + + // Second cluster is offline/non-existent + URI randomBootstrapServers = URI.create(consoleConfig.getKafka() + .getCluster("default/test-kafka2") + .map(k -> k.getProperties().get("bootstrap.servers")) + .orElseThrow()); + + utils.apply(client, new KafkaBuilder(utils.buildKafkaResource("test-kafka2", UUID.randomUUID().toString(), randomBootstrapServers)) + .editOrNewStatus() + .addNewCondition() + .withType("NotReady") + .withStatus("True") + .endCondition() + .endStatus() + .build()); + + // Wait for the added cluster to be configured in the context map + await().atMost(10, TimeUnit.SECONDS) + .until(() -> configuredContexts.values() + .stream() + .map(KafkaContext::clusterConfig) + .map(KafkaClusterConfig::clusterKey) + .anyMatch(Cache.metaNamespaceKeyFunc(kafka1)::equals)); + + clusterId1 = consoleConfig.getKafka().getCluster("default/test-kafka1").get().getId(); + } + + @Test + void testListClustersWithNoRolesDefined() { + consoleConfig.setSecurity(oidcSecurity().build()); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken("alice")) + .param("fields[" + KafkaCluster.API_TYPE + "]", "name") + .get()) + .assertThat() + .statusCode(is(Status.OK.getStatusCode())) + .body("data.size()", equalTo(KafkaClustersResourceIT.STATIC_KAFKAS.size())) + .body("data.attributes.name", containsInAnyOrder(KafkaClustersResourceIT.STATIC_KAFKAS.toArray(String[]::new))); + } + + @Test + void testListClustersWithFullAccess() { + // alice is a developer and developers may list all kafkas + consoleConfig.setSecurity(oidcSecurity() + .addNewRole() + .withName("developer") + .addNewRule() + .withResources("kafkas") + .withPrivileges(Privilege.LIST) + .endRule() + .endRole() + .addNewSubject() + .withInclude("alice") + .withRoleNames("developer") + .endSubject() + .build()); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken("alice")) + .param("fields[" + KafkaCluster.API_TYPE + "]", "name") + .get()) + .assertThat() + .statusCode(is(Status.OK.getStatusCode())) + .body("data.size()", equalTo(KafkaClustersResourceIT.STATIC_KAFKAS.size())) + .body("data.attributes.name", containsInAnyOrder(KafkaClustersResourceIT.STATIC_KAFKAS.toArray(String[]::new))); + } + + @Test + void testListClustersUnauthenticated() { + // alice is a developer and developers may list all kafkas + consoleConfig.setSecurity(oidcSecurity() + .addNewRole() + .withName("developer") + .addNewRule() + .withResources("kafkas") + .withPrivileges(Privilege.LIST) + .endRule() + .endRole() + .build()); + + whenRequesting(req -> req + .param("fields[" + KafkaCluster.API_TYPE + "]", "name") + .get()) + .assertThat() + .statusCode(is(Status.UNAUTHORIZED.getStatusCode())) + .body("errors.size()", is(1)) + .body("errors.status", contains("401")) + .body("errors.code", contains("4011")); + } + + @Test + void testListClustersWithReducedAccess() { + List visibleClusters = Arrays.asList("test-kafka1", "test-kafkaY"); + + // alice is a developer and developers may only list two of three clusters + consoleConfig.setSecurity(oidcSecurity() + .addNewRole() + .withName("developer") + .addNewRule() + .withResources("kafkas") + .withPrivileges(Privilege.LIST) + .withResourceNames(visibleClusters) + .endRule() + .endRole() + .addNewSubject() + .withInclude("alice") + .withRoleNames("developer") + .endSubject() + .build()); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken("alice")) + .param("fields[" + KafkaCluster.API_TYPE + "]", "name") + .get()) + .assertThat() + .statusCode(is(Status.OK.getStatusCode())) + .body("data.size()", equalTo(visibleClusters.size())) + .body("data.attributes.name", containsInAnyOrder(visibleClusters.toArray(String[]::new))); + } + + @Test + void testDescribeClusterWithAdminAccess() { + // make alice an admin + consoleConfig.setSecurity(oidcSecurity() + .addNewRole() + .withName("admin") + .addNewRule() + .withResources("kafkas") + .withPrivileges(Privilege.ALL) + .endRule() + .endRole() + .addNewSubject() + .withInclude("alice") + .withRoleNames("admin") + .endSubject() + .build()); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken("alice")) + .param("fields[" + KafkaCluster.API_TYPE + "]", "name") + .get("{clusterId}", clusterId1)) + .assertThat() + .statusCode(is(Status.OK.getStatusCode())); + } + + @Test + void testDescribeClusterWithoutAdminAccess() { + consoleConfig.setSecurity(oidcSecurity() + .addNewRole() + .withName("admin") + .addNewRule() + .withResources("kafkas") + .withPrivileges(Privilege.ALL) + .endRule() + .endRole() + .addNewSubject() + .withInclude("alice") + .withRoleNames("developer") + .endSubject() + .build()); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken("alice")) + .param("fields[" + KafkaCluster.API_TYPE + "]", "name") + .get("{clusterId}", clusterId1)) + .assertThat() + .statusCode(is(Status.FORBIDDEN.getStatusCode())); + } + + @ParameterizedTest + // alice and bob are on team-a; susan is only on team-b + @CsvSource({ + "alice, OK", + "bob, OK", + "susan, FORBIDDEN", + }) + void testDescribeClusterWithGroupAccess(String username, Status expectedStatus) { + // team-a group is given admin access Kafkas + consoleConfig.setSecurity(oidcSecurity() + .addNewRole() + .withName("admin-a") + .addNewRule() + .withResources("kafkas") + .withPrivileges(Privilege.ALL) + .endRule() + .endRole() + .addNewSubject() + // here we define the subject in terms of group membership + .withClaim("groups") + .withInclude("team-a") + .withRoleNames("admin-a") + .endSubject() + .build()); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken(username)) + .param("fields[" + KafkaCluster.API_TYPE + "]", "name") + .get("{clusterId}", clusterId1)) + .assertThat() + .statusCode(is(expectedStatus.getStatusCode())); + } + + // Helper methods + + GlobalSecurityConfigBuilder oidcSecurity() { + return new GlobalSecurityConfigBuilder() + .withNewOidc() + .withClientId("console-client") + .withClientSecret("console-client-secret") + .withAuthServerUrl(config.getValue("console.test.oidc-url", String.class)) + .withIssuer(config.getValue("console.test.oidc-issuer", String.class)) + .endOidc(); + } + + static Map mockAdminClient() { + return mockAdminClient(Map.of(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.PLAINTEXT.name)); + } + + static Map mockAdminClient(Map overrides) { + Map clientConfig = new HashMap<>(); + + AdminClientSpy.install(config -> { + clientConfig.putAll(config); + + Map newConfig = new HashMap<>(config); + newConfig.putAll(overrides); + return newConfig; + }, client -> { /* No-op */ }); + + return clientConfig; + } +} diff --git a/api/src/test/java/com/github/streamshub/console/api/KafkaRebalancesResourceIT.java b/api/src/test/java/com/github/streamshub/console/api/KafkaRebalancesResourceIT.java index d2835be2f..aa321b747 100644 --- a/api/src/test/java/com/github/streamshub/console/api/KafkaRebalancesResourceIT.java +++ b/api/src/test/java/com/github/streamshub/console/api/KafkaRebalancesResourceIT.java @@ -14,7 +14,6 @@ import jakarta.ws.rs.core.Response.Status; import org.eclipse.microprofile.config.Config; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -121,6 +120,7 @@ void setup() { client.resources(Kafka.class).inAnyNamespace().delete(); client.resources(KafkaRebalance.class).inAnyNamespace().delete(); + consoleConfig.clearSecurity(); utils.apply(client, new KafkaBuilder(utils.buildKafkaResource("test-kafka1", utils.getClusterId(), bootstrapServers)) .editSpec() @@ -160,12 +160,6 @@ void setup() { clusterId2 = consoleConfig.getKafka().getCluster("default/test-kafka2").get().getId(); } - @AfterEach - void tearDown() { - client.resources(Kafka.class).inAnyNamespace().delete(); - client.resources(KafkaRebalance.class).inAnyNamespace().delete(); - } - @Test void testListRebalancesIncludesAllowedActions() { whenRequesting(req -> req.get("", clusterId1)) diff --git a/api/src/test/java/com/github/streamshub/console/api/KafkaRebalancesResourceOidcIT.java b/api/src/test/java/com/github/streamshub/console/api/KafkaRebalancesResourceOidcIT.java new file mode 100644 index 000000000..87604d2ad --- /dev/null +++ b/api/src/test/java/com/github/streamshub/console/api/KafkaRebalancesResourceOidcIT.java @@ -0,0 +1,281 @@ +package com.github.streamshub.console.api; + +import java.net.URI; +import java.time.Instant; +import java.util.Arrays; +import java.util.UUID; + +import jakarta.inject.Inject; +import jakarta.ws.rs.core.Response.Status; + +import org.eclipse.microprofile.config.Config; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +import com.github.streamshub.console.config.ConsoleConfig; +import com.github.streamshub.console.config.security.Privilege; +import com.github.streamshub.console.config.security.SecurityConfigBuilder; +import com.github.streamshub.console.kafka.systemtest.TestPlainProfile; +import com.github.streamshub.console.kafka.systemtest.deployment.DeploymentManager; +import com.github.streamshub.console.kafka.systemtest.utils.TokenUtils; +import com.github.streamshub.console.test.TestHelper; + +import io.fabric8.kubernetes.client.KubernetesClient; +import io.quarkus.test.common.http.TestHTTPEndpoint; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; +import io.strimzi.api.ResourceLabels; +import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.kafka.KafkaBuilder; +import io.strimzi.api.kafka.model.rebalance.KafkaRebalance; +import io.strimzi.api.kafka.model.rebalance.KafkaRebalanceBuilder; +import io.strimzi.api.kafka.model.rebalance.KafkaRebalanceMode; +import io.strimzi.api.kafka.model.rebalance.KafkaRebalanceState; +import io.strimzi.test.container.StrimziKafkaContainer; + +import static com.github.streamshub.console.test.TestHelper.whenRequesting; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +@QuarkusTest +@TestHTTPEndpoint(KafkaRebalancesResource.class) +@TestProfile(TestPlainProfile.class) +class KafkaRebalancesResourceOidcIT { + + @Inject + Config config; + + @Inject + KubernetesClient client; + + @Inject + ConsoleConfig consoleConfig; + + @DeploymentManager.InjectDeploymentManager + DeploymentManager deployments; + + TestHelper utils; + TokenUtils tokens; + + StrimziKafkaContainer kafkaContainer; + String clusterId1; + String clusterId2; + URI bootstrapServers; + URI randomBootstrapServers; + + static KafkaRebalance buildRebalance(int sequence, String clusterName, KafkaRebalanceMode mode, KafkaRebalanceState state) { + var builder = new KafkaRebalanceBuilder() + .withNewMetadata() + .withName("rebalance-" + sequence) + .withNamespace("default") + .endMetadata() + .withNewSpec() + .withMode(mode) + .endSpec(); + + if (clusterName != null) { + builder.editMetadata() + .addToLabels(ResourceLabels.STRIMZI_CLUSTER_LABEL, clusterName) + .endMetadata(); + } + + if (state != null) { + builder = builder + .withNewStatus() + .addNewCondition() + .withType(state.name()) + .withStatus("True") + .withLastTransitionTime(Instant.now().toString()) + .endCondition() + .addToOptimizationResult("intraBrokerDataToMoveMB", "0") + .endStatus(); + } + + return builder.build(); + } + + @BeforeEach + void setup() { + kafkaContainer = deployments.getKafkaContainer(); + bootstrapServers = URI.create(kafkaContainer.getBootstrapServers()); + randomBootstrapServers = URI.create(consoleConfig.getKafka() + .getCluster("default/test-kafka2") + .map(k -> k.getProperties().get("bootstrap.servers")) + .orElseThrow()); + + utils = new TestHelper(bootstrapServers, config, null); + tokens = new TokenUtils(config); + + client.resources(Kafka.class).inAnyNamespace().delete(); + client.resources(KafkaRebalance.class).inAnyNamespace().delete(); + consoleConfig.clearSecurity(); + + utils.apply(client, new KafkaBuilder(utils.buildKafkaResource("test-kafka1", utils.getClusterId(), bootstrapServers)) + .editSpec() + .withNewCruiseControl() + // empty + .endCruiseControl() + .endSpec() + .build()); + + // Second cluster is offline/non-existent + utils.apply(client, new KafkaBuilder(utils.buildKafkaResource("test-kafka2", UUID.randomUUID().toString(), randomBootstrapServers)) + .editOrNewStatus() + .addNewCondition() + .withType("NotReady") + .withStatus("True") + .endCondition() + .endStatus() + .build()); + + int r = 0; + + // No cluster name - MUST BE FIRST for "Not found" test + utils.apply(client, buildRebalance(r++, null, KafkaRebalanceMode.FULL, null)); + + for (String clusterName : Arrays.asList("test-kafka1", "test-kafka2", "test-kafka3")) { + for (KafkaRebalanceMode mode : KafkaRebalanceMode.values()) { + // No status + utils.apply(client, buildRebalance(r++, clusterName, mode, null)); + + for (KafkaRebalanceState state : KafkaRebalanceState.values()) { + utils.apply(client, buildRebalance(r++, clusterName, mode, state)); + } + } + } + + clusterId1 = consoleConfig.getKafka().getCluster("default/test-kafka1").get().getId(); + clusterId2 = consoleConfig.getKafka().getCluster("default/test-kafka2").get().getId(); + } + + @ParameterizedTest + @CsvSource({ + "alice, a", + // bob is on both teams, not used for this test + "susan, b", + }) + void testListRebalancesWithPerTeamKafkaClusterAccess(String username, String team) { + int total = KafkaRebalanceMode.values().length * (KafkaRebalanceState.values().length + 1); + + consoleConfig.setSecurity(utils.oidcSecurity() + .addNewSubject() + .withClaim("groups") + .withInclude("team-a") + .withRoleNames("dev-a") + .endSubject() + .addNewSubject() + .withClaim("groups") + .withInclude("team-b") + .withRoleNames("dev-b") + .endSubject() + .build()); + + consoleConfig.getKafka().getClusterById(clusterId1).ifPresent(cfg -> { + cfg.setSecurity(new SecurityConfigBuilder() + .addNewRole() + .withName("dev-a") + .addNewRule() + .withResources("rebalances") + .withPrivileges(Privilege.LIST) + .endRule() + .endRole() + .build()); + }); + + consoleConfig.getKafka().getClusterById(clusterId2).ifPresent(cfg -> { + cfg.setSecurity(new SecurityConfigBuilder() + .addNewRole() + .withName("dev-b") + .addNewRule() + .withResources("rebalances") + .withPrivileges(Privilege.LIST) + .endRule() + .endRole() + .build()); + }); + + String allowedId = "a".equals(team) ? clusterId1 : clusterId2; + String forbiddenId = "a".equals(team) ? clusterId2 : clusterId1; + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken(username)) + .param("page[size]", total) + .get("", allowedId)) + .assertThat() + .statusCode(is(Status.OK.getStatusCode())) + .body("data.size()", equalTo(total)); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken(username)) + .get("", forbiddenId)) + .assertThat() + .statusCode(is(Status.FORBIDDEN.getStatusCode())); + } + + @Test + void testListRebalancesWithUnrelatedRoleAccess() { + // alice is granted access to topics, but not rebalances + + consoleConfig.setSecurity(utils.oidcSecurity() + .addNewSubject() + .withInclude("alice") + .withRoleNames("developer") + .endSubject() + .build()); + + consoleConfig.getKafka().getClusterById(clusterId1).ifPresent(cfg -> { + cfg.setSecurity(new SecurityConfigBuilder() + .addNewRole() + .withName("developer") + .addNewRule() + .withResources("topics") + .withPrivileges(Privilege.ALL) + .endRule() + .endRole() + .build()); + }); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken("alice")) + .get("", clusterId1)) + .assertThat() + .statusCode(is(Status.FORBIDDEN.getStatusCode())); + } + + @Test + void testListRebalancesWithMissingPrivilege() { + // alice can get and update rebalances, but she may not list them + + consoleConfig.setSecurity(utils.oidcSecurity() + .addNewSubject() + .withInclude("alice") + .withRoleNames("developer") + .endSubject() + .build()); + + consoleConfig.getKafka().getClusterById(clusterId1).ifPresent(cfg -> { + cfg.setSecurity(new SecurityConfigBuilder() + .addNewRole() + .withName("developer") + .addNewRule() + .withResources("rebalances") + .withPrivileges(Privilege.GET, Privilege.UPDATE) + .endRule() + .endRole() + .build()); + }); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken("alice")) + .get("", clusterId1)) + .assertThat() + .statusCode(is(Status.FORBIDDEN.getStatusCode())); + } + +} diff --git a/api/src/test/java/com/github/streamshub/console/api/RecordsResourceIT.java b/api/src/test/java/com/github/streamshub/console/api/RecordsResourceIT.java index 231505a0e..f48f13ad6 100644 --- a/api/src/test/java/com/github/streamshub/console/api/RecordsResourceIT.java +++ b/api/src/test/java/com/github/streamshub/console/api/RecordsResourceIT.java @@ -41,6 +41,7 @@ import com.github.streamshub.console.api.support.KafkaContext; import com.github.streamshub.console.api.support.serdes.RecordData; import com.github.streamshub.console.config.ConsoleConfig; +import com.github.streamshub.console.config.KafkaClusterConfig; import com.github.streamshub.console.kafka.systemtest.TestPlainProfile; import com.github.streamshub.console.kafka.systemtest.deployment.DeploymentManager; import com.github.streamshub.console.test.RecordHelper; @@ -110,11 +111,20 @@ void setup() throws IOException { recordUtils = new RecordHelper(bootstrapServers, config, null); client.resources(Kafka.class).inAnyNamespace().delete(); + consoleConfig.clearSecurity(); utils.apply(client, utils.buildKafkaResource("test-kafka1", utils.getClusterId(), bootstrapServers)); // Second cluster is offline/non-existent utils.apply(client, utils.buildKafkaResource("test-kafka2", UUID.randomUUID().toString(), randomBootstrapServers)); + // Wait for the context map to be populated with all Kafka configurations + await().atMost(10, TimeUnit.SECONDS).until(() -> kafkaContexts.values() + .stream() + .map(KafkaContext::clusterConfig) + .map(KafkaClusterConfig::getName) + .toList() + .containsAll(List.of("test-kafka1", "test-kafka2"))); + clusterId1 = consoleConfig.getKafka().getCluster("default/test-kafka1").get().getId(); clusterId2 = consoleConfig.getKafka().getCluster("default/test-kafka2").get().getId(); } diff --git a/api/src/test/java/com/github/streamshub/console/api/TopicsResourceIT.java b/api/src/test/java/com/github/streamshub/console/api/TopicsResourceIT.java index 3f3f2dac3..6cf4aacdf 100644 --- a/api/src/test/java/com/github/streamshub/console/api/TopicsResourceIT.java +++ b/api/src/test/java/com/github/streamshub/console/api/TopicsResourceIT.java @@ -18,6 +18,7 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.function.Function; +import java.util.logging.Level; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -55,23 +56,33 @@ import org.hamcrest.Description; import org.hamcrest.TypeSafeMatcher; import org.json.JSONException; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.aggregator.AggregateWith; import org.junit.jupiter.params.provider.CsvFileSource; import org.junit.jupiter.params.provider.CsvSource; import org.mockito.stubbing.Answer; import org.skyscreamer.jsonassert.JSONAssert; import org.skyscreamer.jsonassert.JSONCompareMode; +import com.github.streamshub.console.api.security.ConsoleAuthenticationMechanism; import com.github.streamshub.console.api.support.Holder; import com.github.streamshub.console.config.ConsoleConfig; +import com.github.streamshub.console.config.security.Audit; +import com.github.streamshub.console.config.security.Privilege; +import com.github.streamshub.console.config.security.ResourceTypes; +import com.github.streamshub.console.config.security.SecurityConfigBuilder; import com.github.streamshub.console.kafka.systemtest.TestPlainProfile; import com.github.streamshub.console.kafka.systemtest.deployment.DeploymentManager; import com.github.streamshub.console.kafka.systemtest.utils.ConsumerUtils; import com.github.streamshub.console.test.AdminClientSpy; +import com.github.streamshub.console.test.LogCapture; import com.github.streamshub.console.test.TestHelper; import com.github.streamshub.console.test.TopicHelper; +import com.github.streamshub.console.test.VarargsAggregator; import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.client.KubernetesClient; @@ -89,13 +100,17 @@ import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasProperty; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.iterableWithSize; @@ -114,6 +129,11 @@ @TestProfile(TestPlainProfile.class) class TopicsResourceIT { + static LogCapture auditLogCapture = LogCapture.with(logRecord -> logRecord + .getLoggerName() + .equals(ConsoleAuthenticationMechanism.class.getName()), + Level.INFO); + @Inject Config config; @@ -141,6 +161,16 @@ class TopicsResourceIT { URI bootstrapServers1; String clusterId2; + @BeforeAll + static void initialize() { + auditLogCapture.register(); + } + + @AfterAll + static void cleanup() { + auditLogCapture.deregister(); + } + @BeforeEach void setup() throws IOException { bootstrapServers1 = URI.create(deployments.getExternalBootstrapServers()); @@ -158,6 +188,9 @@ void setup() throws IOException { client.resources(Kafka.class).inAnyNamespace().delete(); client.resources(KafkaTopic.class).inAnyNamespace().delete(); + consoleConfig.clearSecurity(); + + auditLogCapture.records().clear(); utils.apply(client, utils.buildKafkaResource(clusterName1, utils.getClusterId(), bootstrapServers1)); // Second cluster is offline/non-existent @@ -725,6 +758,47 @@ void testListTopicsWithNumPartitions() { .body("data.attributes.numPartitions", containsInAnyOrder(1, 2, 3, 4, 5, 6, 7, 8, 9)); } + + @ParameterizedTest + @CsvSource({ + "name, LIST", + // numPartitions requires an additional describe + "'name,numPartitions', LIST, GET" + }) + void testListTopicsWithAuditLogging(String fields, @AggregateWith(VarargsAggregator.class) Privilege... privilegesAudited) { + String topicName = UUID.randomUUID().toString(); + topicUtils.createTopics(clusterId1, List.of(topicName), 1); + + consoleConfig.getKafka().getClusterById(clusterId1).ifPresent(clusterConfig -> { + clusterConfig.setSecurity(new SecurityConfigBuilder() + .addNewAudit() + .withDecision(Audit.ALL) + .withResources(ResourceTypes.Kafka.TOPICS.value()) + .withPrivileges(privilegesAudited) + .endAudit() + .build()); + }); + + whenRequesting(req -> req + .queryParam("fields[topics]", fields) + .get("", clusterId1)) + .assertThat() + .statusCode(is(Status.OK.getStatusCode())) + .body("data.size()", equalTo(1)); + + var auditLogs = auditLogCapture.records(); + final String auditTmpl = "ANONYMOUS allowed console:kafkas/test-kafka1/topics:[%s]:[%s]"; + + assertThat(auditLogs, not(hasItem(hasProperty("message", containsString("denied"))))); + assertThat(auditLogs, hasItem(both(hasProperty("message", containsString(auditTmpl.formatted("", Privilege.LIST)))) + .and(hasProperty("level", equalTo(Level.INFO))))); + + for (var p : privilegesAudited) { + assertThat(auditLogs, hasItem(both(hasProperty("message", containsString(auditTmpl.formatted(topicName, p)))) + .and(hasProperty("level", equalTo(Level.INFO))))); + } + } + @Test void testListTopicsWithManagedTopic() { String topic1 = "t1-" + UUID.randomUUID().toString(); diff --git a/api/src/test/java/com/github/streamshub/console/api/TopicsResourceOidcIT.java b/api/src/test/java/com/github/streamshub/console/api/TopicsResourceOidcIT.java new file mode 100644 index 000000000..0905a61df --- /dev/null +++ b/api/src/test/java/com/github/streamshub/console/api/TopicsResourceOidcIT.java @@ -0,0 +1,557 @@ +package com.github.streamshub.console.api; + +import java.io.IOException; +import java.net.URI; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; + +import jakarta.inject.Inject; +import jakarta.inject.Named; +import jakarta.json.Json; +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response.Status; + +import org.eclipse.microprofile.config.Config; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.aggregator.AggregateWith; +import org.junit.jupiter.params.provider.CsvSource; + +import com.github.streamshub.console.api.security.ConsoleAuthenticationMechanism; +import com.github.streamshub.console.api.support.KafkaContext; +import com.github.streamshub.console.config.ConsoleConfig; +import com.github.streamshub.console.config.KafkaClusterConfig; +import com.github.streamshub.console.config.security.Audit; +import com.github.streamshub.console.config.security.Privilege; +import com.github.streamshub.console.config.security.ResourceTypes; +import com.github.streamshub.console.config.security.SecurityConfigBuilder; +import com.github.streamshub.console.kafka.systemtest.TestPlainProfile; +import com.github.streamshub.console.kafka.systemtest.deployment.DeploymentManager; +import com.github.streamshub.console.kafka.systemtest.utils.ConsumerUtils; +import com.github.streamshub.console.kafka.systemtest.utils.TokenUtils; +import com.github.streamshub.console.test.LogCapture; +import com.github.streamshub.console.test.TestHelper; +import com.github.streamshub.console.test.TopicHelper; +import com.github.streamshub.console.test.VarargsAggregator; + +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.informers.cache.Cache; +import io.quarkus.test.common.http.TestHTTPEndpoint; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; +import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.topic.KafkaTopic; + +import static com.github.streamshub.console.test.TestHelper.whenRequesting; +import static org.awaitility.Awaitility.await; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasProperty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +@QuarkusTest +@TestHTTPEndpoint(TopicsResource.class) +@TestProfile(TestPlainProfile.class) +class TopicsResourceOidcIT { + + static LogCapture auditLogCapture = LogCapture.with(logRecord -> logRecord + .getLoggerName() + .equals(ConsoleAuthenticationMechanism.class.getName()), + Level.INFO); + + @Inject + Config config; + + @Inject + ConsoleConfig consoleConfig; + + @Inject + KubernetesClient client; + + @Inject + Map configuredContexts; + + @Inject + @Named("KafkaTopics") + Map>> managedTopics; + + @DeploymentManager.InjectDeploymentManager + DeploymentManager deployments; + + TestHelper utils; + TopicHelper topicUtils; + ConsumerUtils groupUtils; + TokenUtils tokens; + + final String clusterName1 = "test-kafka1"; + String clusterId1; + URI bootstrapServers1; + String clusterId2; + + @BeforeAll + static void initialize() { + auditLogCapture.register(); + } + + @AfterAll + static void cleanup() { + auditLogCapture.deregister(); + } + + @BeforeEach + void setup() throws IOException { + bootstrapServers1 = URI.create(deployments.getExternalBootstrapServers()); + URI randomBootstrapServers = URI.create(consoleConfig.getKafka() + .getCluster("default/test-kafka2") + .map(k -> k.getProperties().get("bootstrap.servers")) + .orElseThrow()); + + topicUtils = new TopicHelper(bootstrapServers1, config, null); + topicUtils.deleteAllTopics(); + + groupUtils = new ConsumerUtils(config, null); + + utils = new TestHelper(bootstrapServers1, config, null); + tokens = new TokenUtils(config); + + client.resources(Kafka.class).inAnyNamespace().delete(); + client.resources(KafkaTopic.class).inAnyNamespace().delete(); + consoleConfig.clearSecurity(); + + var kafka1 = utils.apply(client, utils.buildKafkaResource(clusterName1, utils.getClusterId(), bootstrapServers1)); + // Second cluster is offline/non-existent + utils.apply(client, utils.buildKafkaResource("test-kafka2", UUID.randomUUID().toString(), randomBootstrapServers)); + + // Wait for the added cluster to be configured in the context map + await().atMost(10, TimeUnit.SECONDS) + .until(() -> configuredContexts.values() + .stream() + .map(KafkaContext::clusterConfig) + .map(KafkaClusterConfig::clusterKey) + .anyMatch(Cache.metaNamespaceKeyFunc(kafka1)::equals)); + + clusterId1 = consoleConfig.getKafka().getCluster("default/test-kafka1").get().getId(); + clusterId2 = consoleConfig.getKafka().getCluster("default/test-kafka2").get().getId(); + } + + @ParameterizedTest + @CsvSource({ + "alice, a", + // bob is on both teams, not used for this test + "susan, b", + }) + void testListTopicsWithForbiddenFieldsNull(String username, String team) { + consoleConfig.setSecurity(utils.oidcSecurity() + .addNewSubject() + .withClaim("groups") + .withInclude("team-a") + .withRoleNames("dev-a") + .endSubject() + .addNewSubject() + .withClaim("groups") + .withInclude("team-b") + .withRoleNames("dev-b") + .endSubject() + .build()); + + /* + * Both teams may list all topics, but may only describe their own team's topics + * and may only list/get their own groups. + */ + consoleConfig.getKafka().getClusterById(clusterId1).ifPresent(cfg -> { + cfg.setSecurity(new SecurityConfigBuilder() + .addNewRole() + .withName("dev-a") + .addNewRule() + .withResources("topics") + .withPrivileges(Privilege.LIST) + .endRule() + .addNewRule() + .withResources("topics") + .withResourceNames("a-*") + .withPrivileges(Privilege.GET) + .endRule() + .addNewRule() + .withResources("consumerGroups") + .withResourceNames("ga-*") + .withPrivileges(Privilege.LIST, Privilege.GET) + .endRule() + .endRole() + .addNewRole() + .withName("dev-b") + .addNewRule() + .withResources("topics") + .withPrivileges(Privilege.LIST) + .endRule() + .addNewRule() + .withResources("topics") + .withResourceNames("b-*") + .withPrivileges(Privilege.GET) + .endRule() + .addNewRule() + .withResources("consumerGroups") + .withResourceNames("gb-*") + .withPrivileges(Privilege.LIST, Privilege.GET) + .endRule() + .endRole() + .build()); + }); + + String topicA = "a-" + UUID.randomUUID().toString(); + String topicB = "b-" + UUID.randomUUID().toString(); + + String groupA = "ga-" + UUID.randomUUID().toString(); + String groupB = "gb-" + UUID.randomUUID().toString(); + + String clientA = "ca-" + UUID.randomUUID().toString(); + String clientB = "cb-" + UUID.randomUUID().toString(); + + String allowedTopic; + String allowedGroup; + String forbiddenTopic; + + if ("a".equals(team)) { + allowedTopic = topicA; + allowedGroup = groupA; + forbiddenTopic = topicB; + } else { + allowedTopic = topicB; + allowedGroup = groupB; + forbiddenTopic = topicA; + } + + try (var consumerA = groupUtils.consume(groupA, topicA, clientA, 2, false); + var consumerB = groupUtils.consume(groupB, topicB, clientB, 2, false)) { + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken(username)) + .queryParam("fields[topics]", "name,partitions,configs,consumerGroups") + .get("", clusterId1)) + .assertThat() + .statusCode(is(Status.OK.getStatusCode())) + .body("data.size()", is(2)) + .body("data.attributes.name", containsInAnyOrder(topicA, topicB)) + .body("data.find { it.attributes.name == '" + allowedTopic + "'}.attributes", allOf( + hasEntry(is("partitions"), not(nullValue())), + hasEntry(is("configs"), not(nullValue())) + )) + .body("data.find { it.attributes.name == '" + allowedTopic + "'}.relationships", allOf( + hasEntry( + is("consumerGroups"), + hasEntry( + is("data"), + contains(allOf( + hasEntry(equalTo("type"), equalTo("consumerGroups")), + hasEntry(equalTo("id"), equalTo(allowedGroup)) + )) + ) + ) + )) + .body("data.find { it.attributes.name == '" + forbiddenTopic + "'}.attributes", allOf( + hasEntry(is("partitions"), nullValue()), + hasEntry(is("configs"), nullValue()) + )) + .body("data.find { it.attributes.name == '" + forbiddenTopic + "'}.relationships", allOf( + hasEntry(is("consumerGroups"), nullValue()) + )); + } + } + + + @ParameterizedTest + @CsvSource({ + "name, LIST", + // numPartitions requires an additional describe + "'name,numPartitions', LIST, GET" + }) + void testListTopicsWithAuditLogging(String fields, @AggregateWith(VarargsAggregator.class) Privilege... privilegesAudited) { + String allowedTopic = "a-" + UUID.randomUUID().toString(); + String deniedTopic = "b-" + UUID.randomUUID().toString(); + topicUtils.createTopics(clusterId1, List.of(allowedTopic, deniedTopic), 1); + + consoleConfig.setSecurity(utils.oidcSecurity() + .addNewSubject() + .withClaim("groups") + .withInclude("team-a") + .withRoleNames("dev-a") + .endSubject() + .build()); + + consoleConfig.getKafka().getClusterById(clusterId1).ifPresent(clusterConfig -> { + clusterConfig.setSecurity(new SecurityConfigBuilder() + .addNewAudit() + .withDecision(Audit.ALL) + .withResources(ResourceTypes.Kafka.TOPICS.value()) + .withPrivileges(privilegesAudited) + .endAudit() + .addNewRole() + .withName("dev-a") + .addNewRule() + .withResources("topics") + .withResourceNames("a-*") + .withPrivileges(Privilege.LIST, Privilege.GET) + .endRule() + .endRole() + .build()); + }); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken("alice")) + .queryParam("fields[topics]", fields) + .get("", clusterId1)) + .assertThat() + .statusCode(is(Status.OK.getStatusCode())) + .body("data.size()", equalTo(1)); + + var auditLogs = auditLogCapture.records(); + final String auditTmpl = "alice %s console:kafkas/test-kafka1/topics:[%s]:[%s]"; + + assertThat(auditLogs, hasItem(both(hasProperty("message", containsString( + auditTmpl.formatted("allowed", "", Privilege.LIST)))) + .and(hasProperty("level", equalTo(Level.INFO))))); + assertThat(auditLogs, hasItem(both(hasProperty("message", containsString( + auditTmpl.formatted("denied", deniedTopic, Privilege.LIST)))) + .and(hasProperty("level", equalTo(Level.INFO))))); + + for (var p : privilegesAudited) { + assertThat(auditLogs, hasItem(both(hasProperty("message", containsString(auditTmpl.formatted("allowed", allowedTopic, p)))) + .and(hasProperty("level", equalTo(Level.INFO))))); + } + } + + @ParameterizedTest + @CsvSource({ + "alice, a", + // bob is on both teams, not used for this test + "susan, b", + }) + void testDescribeTopicWithForbiddenFieldsNull(String username, String team) { + consoleConfig.setSecurity(utils.oidcSecurity() + .addNewSubject() + .withClaim("groups") + .withInclude("team-a") + .withRoleNames("dev-a") + .endSubject() + .addNewSubject() + .withClaim("groups") + .withInclude("team-b") + .withRoleNames("dev-b") + .endSubject() + .build()); + + /* + * Both teams may only describe their own topics. + */ + consoleConfig.getKafka().getClusterById(clusterId1).ifPresent(cfg -> { + cfg.setSecurity(new SecurityConfigBuilder() + .addNewRole() + .withName("dev-a") + .addNewRule() + .withResources("topics") + .withResourceNames("a-*") + .withPrivileges(Privilege.GET) + .endRule() + .endRole() + .addNewRole() + .withName("dev-b") + .addNewRule() + .withResources("topics") + .withResourceNames("b-*") + .withPrivileges(Privilege.GET) + .endRule() + .endRole() + .build()); + }); + + String topicA = "a-" + UUID.randomUUID().toString(); + String topicB = "b-" + UUID.randomUUID().toString(); + var topicIds = topicUtils.createTopics(clusterId1, List.of(topicA, topicB), 1); + + String allowedTopic; + String forbiddenTopic; + + if ("a".equals(team)) { + allowedTopic = topicA; + forbiddenTopic = topicB; + } else { + allowedTopic = topicB; + forbiddenTopic = topicA; + } + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken(username)) + .queryParam("fields[topics]", "name,partitions,configs") + .get("{topicId}", clusterId1, topicIds.get(allowedTopic))) + .assertThat() + .statusCode(is(Status.OK.getStatusCode())) + .body("data.attributes.name", is(allowedTopic)) + .body("data.attributes", allOf( + hasEntry(is("partitions"), not(nullValue())), + hasEntry(is("configs"), not(nullValue())) + )); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken(username)) + .queryParam("fields[topics]", "name,partitions,configs") + .get("{topicId}", clusterId1, topicIds.get(forbiddenTopic))) + .assertThat() + .statusCode(is(Status.FORBIDDEN.getStatusCode())); + } + + @ParameterizedTest + @CsvSource({ + "a-, CREATED", + "b-, FORBIDDEN", + }) + void testCreateTopicWithAuthorization(String topicPrefix, Status expectedStatus) { + consoleConfig.setSecurity(utils.oidcSecurity() + .addNewSubject() + .withClaim("groups") + .withInclude("team-a") + .withRoleNames("dev-a") + .endSubject() + .build()); + + // alice's team may only create topics starting with `a-` + consoleConfig.getKafka().getClusterById(clusterId1).ifPresent(cfg -> { + cfg.setSecurity(new SecurityConfigBuilder() + .addNewRole() + .withName("dev-a") + .addNewRule() + .withResources("topics") + .withResourceNames("a-*") + .withPrivileges(Privilege.CREATE) + .endRule() + .endRole() + .build()); + }); + + String topicName = topicPrefix + UUID.randomUUID().toString(); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken("alice")) + .header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON) + .body(Json.createObjectBuilder() + .add("data", Json.createObjectBuilder() + .add("type", "topics") + .add("attributes", Json.createObjectBuilder() + .add("name", topicName) + .add("numPartitions", 3) + .add("replicationFactor", 1))) + .build() + .toString()) + .post("", clusterId1)) + .assertThat() + .statusCode(is(expectedStatus.getStatusCode())); + } + + @ParameterizedTest + @CsvSource({ + "a-, NO_CONTENT", + "b-, FORBIDDEN", + }) + void testDeleteTopicWithAuthorization(String topicPrefix, Status expectedStatus) { + consoleConfig.setSecurity(utils.oidcSecurity() + .addNewSubject() + .withClaim("groups") + .withInclude("team-a") + .withRoleNames("dev-a") + .endSubject() + .build()); + + // alice's team may only delete topics starting with `a-` + consoleConfig.getKafka().getClusterById(clusterId1).ifPresent(cfg -> { + cfg.setSecurity(new SecurityConfigBuilder() + .addNewRole() + .withName("dev-a") + .addNewRule() + .withResources("topics") + .withResourceNames("a-*") + .withPrivileges(Privilege.DELETE) + .endRule() + .endRole() + .build()); + }); + + String topicName = topicPrefix + UUID.randomUUID().toString(); + Map topicIds = topicUtils.createTopics(clusterId1, List.of(topicName), 2); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken("alice")) + .delete("{topicId}", clusterId1, topicIds.get(topicName))) + .assertThat() + .statusCode(is(expectedStatus.getStatusCode())); + } + + @ParameterizedTest + @CsvSource({ + "a-, NO_CONTENT", + "b-, FORBIDDEN", + }) + void testPatchTopicWithAuthorization(String topicPrefix, Status expectedStatus) { + consoleConfig.setSecurity(utils.oidcSecurity() + .addNewSubject() + .withClaim("groups") + .withInclude("team-a") + .withRoleNames("dev-a") + .endSubject() + .build()); + + // alice's team may only update topics starting with `a-` + // UPDATE requires GET: old version of topic required for validations + consoleConfig.getKafka().getClusterById(clusterId1).ifPresent(cfg -> { + cfg.setSecurity(new SecurityConfigBuilder() + .addNewRole() + .withName("dev-a") + .addNewRule() + .withResources("topics") + .withResourceNames("a-*") + .withPrivileges(Privilege.GET, Privilege.UPDATE) + .endRule() + .endRole() + .build()); + }); + + String topicName = topicPrefix + UUID.randomUUID().toString(); + Map topicIds = topicUtils.createTopics(clusterId1, List.of(topicName), 1); + + whenRequesting(req -> req + .auth() + .oauth2(tokens.getToken("alice")) + .header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON) + .body(Json.createObjectBuilder() + .add("data", Json.createObjectBuilder() + .add("id", topicIds.get(topicName)) + .add("type", "topics") + .add("attributes", Json.createObjectBuilder() + .add("numPartitions", 2) // adding partition + ) + ) + .build() + .toString()) + .patch("{topicId}", clusterId1, topicIds.get(topicName))) + .assertThat() + .statusCode(is(expectedStatus.getStatusCode())); + } + +} diff --git a/api/src/test/java/com/github/streamshub/console/kafka/systemtest/TestPlainProfile.java b/api/src/test/java/com/github/streamshub/console/kafka/systemtest/TestPlainProfile.java index 7310a2f75..a8422d0e2 100644 --- a/api/src/test/java/com/github/streamshub/console/kafka/systemtest/TestPlainProfile.java +++ b/api/src/test/java/com/github/streamshub/console/kafka/systemtest/TestPlainProfile.java @@ -5,10 +5,12 @@ import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.StandardOpenOption; +import java.util.Collections; import java.util.List; import java.util.Map; import com.github.streamshub.console.kafka.systemtest.deployment.KafkaUnsecuredResourceManager; +import com.github.streamshub.console.kafka.systemtest.deployment.KeycloakResourceManager; import com.github.streamshub.console.kafka.systemtest.deployment.StrimziCrdResourceManager; import io.quarkus.test.junit.QuarkusTestProfile; @@ -19,6 +21,14 @@ public class TestPlainProfile implements QuarkusTestProfile { public static final int MAX_PARTITIONS = 100; public static final int EXCESSIVE_PARTITIONS = 101; + static { + /* + * Requires JDK 11.0.4+. If the `Host` header is not set, Keycloak will + * generate tokens with an issuer URI containing localhost:. + */ + System.setProperty("jdk.httpclient.allowRestrictedHeaders", "host"); + } + @Override public String getConfigProfile() { return PROFILE; @@ -27,8 +37,9 @@ public String getConfigProfile() { @Override public List testResources() { return List.of( - new TestResourceEntry(StrimziCrdResourceManager.class), - new TestResourceEntry(KafkaUnsecuredResourceManager.class, Map.of("profile", PROFILE))); + new TestResourceEntry(StrimziCrdResourceManager.class, Collections.emptyMap(), true), + new TestResourceEntry(KeycloakResourceManager.class, Collections.emptyMap(), true), + new TestResourceEntry(KafkaUnsecuredResourceManager.class, Map.of("profile", PROFILE), true)); } @Override diff --git a/api/src/test/java/com/github/streamshub/console/kafka/systemtest/deployment/KeycloakResourceManager.java b/api/src/test/java/com/github/streamshub/console/kafka/systemtest/deployment/KeycloakResourceManager.java new file mode 100644 index 000000000..407797fc9 --- /dev/null +++ b/api/src/test/java/com/github/streamshub/console/kafka/systemtest/deployment/KeycloakResourceManager.java @@ -0,0 +1,58 @@ +package com.github.streamshub.console.kafka.systemtest.deployment; + +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.time.Duration; +import java.util.Map; + +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.images.builder.Transferable; + +import io.quarkus.test.common.QuarkusTestResourceLifecycleManager; + +public class KeycloakResourceManager implements QuarkusTestResourceLifecycleManager { + + GenericContainer keycloak; + + @Override + @SuppressWarnings("resource") + public Map start() { + byte[] realmConfig; + + try (InputStream stream = getClass().getResourceAsStream("/keycloak/console-realm.json")) { + realmConfig = stream.readAllBytes(); + } catch (IOException ioe) { + throw new UncheckedIOException(ioe); + } + + keycloak = new GenericContainer<>("quay.io/keycloak/keycloak:26.0") + .withLogConsumer(new Slf4jLogConsumer(LoggerFactory.getLogger("systemtests.keycloak"), true)) + .withExposedPorts(8080) + .withEnv(Map.of( + "KC_BOOTSTRAP_ADMIN_USERNAME", "admin", + "KC_BOOTSTRAP_ADMIN_PASSWORD", "admin", + "PROXY_ADDRESS_FORWARDING", "true")) + .withCopyToContainer( + Transferable.of(realmConfig), + "/opt/keycloak/data/import/console-realm.json") + .withCommand("start", "--hostname=localhost", "--http-enabled=true", "--import-realm") + .waitingFor(Wait.forHttp("/realms/console-authz").withStartupTimeout(Duration.ofMinutes(1))); + + keycloak.start(); + + String urlTemplate = "http://localhost:%d/realms/console-authz"; + var oidcUrl = urlTemplate.formatted(keycloak.getMappedPort(8080)); + return Map.of( + "console.test.oidc-url", oidcUrl, + "console.test.oidc-issuer", urlTemplate.formatted(8080)); + } + + @Override + public void stop() { + keycloak.stop(); + } +} diff --git a/api/src/test/java/com/github/streamshub/console/kafka/systemtest/utils/TokenUtils.java b/api/src/test/java/com/github/streamshub/console/kafka/systemtest/utils/TokenUtils.java index 9788914bd..9af0a021f 100644 --- a/api/src/test/java/com/github/streamshub/console/kafka/systemtest/utils/TokenUtils.java +++ b/api/src/test/java/com/github/streamshub/console/kafka/systemtest/utils/TokenUtils.java @@ -6,7 +6,6 @@ import java.net.http.HttpClient; import java.net.http.HttpRequest; import java.net.http.HttpResponse; -import java.net.http.HttpResponse.BodyHandlers; import java.util.UUID; import jakarta.json.Json; @@ -14,7 +13,6 @@ import jakarta.json.JsonReader; import jakarta.ws.rs.core.HttpHeaders; -import org.apache.kafka.common.config.SaslConfigs; import org.eclipse.microprofile.config.Config; import io.restassured.http.Header; @@ -24,7 +22,7 @@ public class TokenUtils { final String tokenEndpoint; public TokenUtils(Config config) { - this.tokenEndpoint = config.getValue(SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, String.class); + this.tokenEndpoint = config.getValue("console.test.oidc-url", String.class) + "/protocol/openid-connect/token"; } public Header authorizationHeader(String username) { @@ -44,30 +42,33 @@ public String getToken(String username) { } public JsonObject getTokenObject(String username) { - final String payload = String.format("grant_type=password&username=%1$s&password=%1$s-password&client_id=kafka-cli", username); + final String form = String.format("grant_type=password&" + + "username=%1$s&" + + "password=%1$s-password&" + + "client_id=console-client", username); - /* - * Requires JDK 11.0.4+. If the `Host` header is not set, Keycloak will - * generate tokens with an issuer URI containing localhost:. - */ - System.setProperty("jdk.httpclient.allowRestrictedHeaders", "host"); + HttpClient client = HttpClient.newBuilder().build(); HttpRequest request = HttpRequest.newBuilder() .uri(URI.create(tokenEndpoint)) - .header("Host", "keycloak:8080") + .header("Host", "localhost:8080") .header("Content-Type", "application/x-www-form-urlencoded") - .POST(HttpRequest.BodyPublishers.ofString(payload)) + .POST(HttpRequest.BodyPublishers.ofString(form)) .build(); try { - HttpResponse response = HttpClient - .newBuilder() - .build() - .send(request, BodyHandlers.ofString()); + HttpResponse response = client.send(request, java.net.http.HttpResponse.BodyHandlers.ofString()); + JsonObject payload; try (JsonReader reader = Json.createReader(new StringReader(response.body()))) { - return reader.readObject(); + payload = reader.readObject(); } + + if (response.statusCode() != 200) { + throw new RuntimeException(payload.toString()); + } + + return payload; } catch (IOException | InterruptedException e) { throw new RuntimeException(e); } diff --git a/api/src/test/java/com/github/streamshub/console/test/LogCapture.java b/api/src/test/java/com/github/streamshub/console/test/LogCapture.java new file mode 100644 index 000000000..5aa9f9748 --- /dev/null +++ b/api/src/test/java/com/github/streamshub/console/test/LogCapture.java @@ -0,0 +1,93 @@ +package com.github.streamshub.console.test; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.LogManager; +import java.util.logging.LogRecord; +import java.util.logging.Logger; + +public class LogCapture { + + private static final Logger ROOT; + + private final InMemoryLogHandler inMemoryLogHandler; + + static { + ROOT = LogManager.getLogManager().getLogger(""); + } + + public static LogCapture none() { + return new LogCapture(); + } + + public static LogCapture with(Predicate predicate) { + return LogCapture.with(predicate, Level.INFO); + } + + public static LogCapture with(Predicate predicate, Level logLevel) { + LogCapture capture = new LogCapture(predicate); + return capture.setLevel(logLevel); + } + + private LogCapture() { + // Capture nothing by default + inMemoryLogHandler = new InMemoryLogHandler(r -> false); + } + + private LogCapture(Predicate predicate) { + inMemoryLogHandler = new InMemoryLogHandler(predicate); + } + + public void register() { + ROOT.addHandler(inMemoryLogHandler); + } + + public void deregister() { + ROOT.removeHandler(inMemoryLogHandler); + } + + private LogCapture setLevel(Level newLevel) { + ROOT.setLevel(newLevel); + inMemoryLogHandler.setLevel(newLevel); + return this; + } + + public List records() { + return inMemoryLogHandler.records; + } + + private static class InMemoryLogHandler extends Handler { + + InMemoryLogHandler(Predicate predicate) { + if (predicate == null) { + throw new IllegalArgumentException("Parameter 'predicate' may not be null"); + } + setFilter(predicate::test); + setLevel(Level.FINE); + } + + final List records = new ArrayList<>(); + + @Override + public void publish(LogRecord rec) { + if (!isLoggable(rec)) { + return; + } + + records.add(rec); + } + + @Override + public void flush() { + // Nothing to flush + } + + @Override + public void close() throws SecurityException { + records.clear(); + } + } +} diff --git a/api/src/test/java/com/github/streamshub/console/test/TestHelper.java b/api/src/test/java/com/github/streamshub/console/test/TestHelper.java index afc1935bb..c4be837a0 100644 --- a/api/src/test/java/com/github/streamshub/console/test/TestHelper.java +++ b/api/src/test/java/com/github/streamshub/console/test/TestHelper.java @@ -18,6 +18,7 @@ import org.jboss.logging.Logger; import com.github.streamshub.console.api.Annotations; +import com.github.streamshub.console.config.security.GlobalSecurityConfigBuilder; import com.github.streamshub.console.kafka.systemtest.utils.ClientsConfig; import io.fabric8.kubernetes.client.CustomResource; @@ -164,4 +165,14 @@ public static ValidatableResponse whenRequesting(Function parameterType = context.getParameter().getType(); + Class componentType = parameterType.getComponentType(); + + return IntStream.range(context.getIndex(), accessor.size()) + .mapToObj(index -> accessor.get(index, componentType)) + .toArray(size -> (Object[]) Array.newInstance(componentType, size)); + } +} diff --git a/api/src/test/resources/keycloak/console-realm.json b/api/src/test/resources/keycloak/console-realm.json new file mode 100644 index 000000000..18d2e667b --- /dev/null +++ b/api/src/test/resources/keycloak/console-realm.json @@ -0,0 +1,131 @@ +{ + "realm": "console-authz", + "accessTokenLifespan": 60, + "ssoSessionIdleTimeout": 864000, + "ssoSessionMaxLifespan": 864000, + "enabled": true, + "sslRequired": "external", + "roles": { + "realm": [ + { + "name": "Dev Team A", + "description": "Developer on Dev Team A" + }, + { + "name": "Dev Team B", + "description": "Developer on Dev Team B" + }, + { + "name": "Ops Team", + "description": "Operations team member" + } + ], + "client": { + "console-client": [] + } + }, + "groups" : [ + { + "name" : "team-a", + "path" : "/team-a" + }, { + "name" : "team-b", + "path" : "/team-b" + }, { + "name" : "team-c", + "path" : "/team-c" + } + ], + "users": [ + { + "username" : "alice", + "createdTimestamp": 1, + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "Alice", + "lastName" : "User", + "email" : "alice@streamshub.io", + "credentials" : [ { + "type" : "password", + "secretData" : "{\"value\":\"KqABIiReBuRWbP4pBct3W067pNvYzeN7ILBV+8vT8nuF5cgYs2fdl2QikJT/7bGTW/PBXg6CYLwJQFYrBK9MWg==\",\"salt\":\"EPgscX9CQz7UnuZDNZxtMw==\"}", + "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\"}" + } ], + "disableableCredentialTypes" : [ ], + "realmRoles" : [ "offline_access", "uma_authorization" ], + "clientRoles" : { + "account" : [ "view-profile", "manage-account" ] + }, + "groups" : [ "/team-a", "/team-c" ] + }, { + "username" : "susan", + "createdTimestamp": 1, + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "Susan", + "lastName" : "User", + "email" : "susan@streamshub.io", + "credentials" : [ { + "type" : "password", + "value" : "susan-password" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "offline_access", "uma_authorization" ], + "clientRoles" : { + "account" : [ "view-profile", "manage-account" ] + }, + "groups" : [ "/team-b" ] + }, { + "username" : "bob", + "createdTimestamp": 1, + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "Bob", + "lastName" : "User", + "email" : "bob@streamshub.io", + "credentials" : [ { + "type" : "password", + "secretData" : "{\"value\":\"QhK0uLsKuBDrMm9Z9XHvq4EungecFRnktPgutfjKtgVv2OTPd8D390RXFvJ8KGvqIF8pdoNxHYQyvDNNwMORpg==\",\"salt\":\"yxkgwEyTnCGLn42Yr9GxBQ==\"}", + "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\"}" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "offline_access", "uma_authorization" ], + "clientRoles" : { + "account" : [ "view-profile", "manage-account" ] + }, + "groups" : [ "/team-a", "/team-b" ] + } + ], + "clients": [ + { + "clientId": "console-client", + "enabled": true, + "clientAuthenticatorType": "client-secret", + "secret": "console-client-secret", + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "fullScopeAllowed": true, + "protocolMappers": [{ + "name": "Groups Mapper", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups" + } + }] + } + ] +} \ No newline at end of file diff --git a/common/pom.xml b/common/pom.xml index 4b5c95b8a..e0e7c238e 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -37,6 +37,10 @@ io.xlate validators + + io.sundr + builder-annotations + org.junit.jupiter @@ -54,6 +58,11 @@ test true + + org.hamcrest + hamcrest-core + test + diff --git a/common/src/main/java/com/github/streamshub/console/config/ConsoleConfig.java b/common/src/main/java/com/github/streamshub/console/config/ConsoleConfig.java index 75717bc3c..fb28ea99a 100644 --- a/common/src/main/java/com/github/streamshub/console/config/ConsoleConfig.java +++ b/common/src/main/java/com/github/streamshub/console/config/ConsoleConfig.java @@ -9,7 +9,12 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.github.streamshub.console.config.security.GlobalSecurityConfig; +import com.github.streamshub.console.config.security.ResourceTypes; +import com.github.streamshub.console.config.security.ResourceTypes.ValidResourceTypes; +import com.github.streamshub.console.config.security.SecurityConfig; +import io.sundr.builder.annotations.Buildable; import io.xlate.validation.constraints.Expression; @Expression( @@ -35,10 +40,15 @@ .allMatch(registry -> registryNames.contains(registry)) """) @JsonInclude(Include.NON_NULL) +@Buildable(editableEnabled = false) public class ConsoleConfig { KubernetesConfig kubernetes = new KubernetesConfig(); + @Valid + @ValidResourceTypes(type = ResourceTypes.Global.class) + GlobalSecurityConfig security = new GlobalSecurityConfig(); + @Valid List metricsSources = new ArrayList<>(); @@ -60,6 +70,27 @@ public boolean hasUniqueRegistryNames() { return Named.uniqueNames(schemaRegistries); } + /** + * Specifying security subjects local to a Kafka cluster is not allowed when global OIDC + * security is enabled. + */ + @JsonIgnore + @AssertTrue(message = "Security subjects must not be specified for Kafka clusters when OIDC security is used") + public boolean isWithoutOidcOrKafkaClusterSubjectsEmpty() { + if (security.getOidc() == null) { + return true; + } + + return kafka.getClusters().stream().allMatch(k -> k.getSecurity().getSubjects().isEmpty()); + } + + // testing + @JsonIgnore + public void clearSecurity() { + security = new GlobalSecurityConfig(); + kafka.getClusters().forEach(k -> k.setSecurity(new SecurityConfig())); + } + public KubernetesConfig getKubernetes() { return kubernetes; } @@ -68,6 +99,14 @@ public void setKubernetes(KubernetesConfig kubernetes) { this.kubernetes = kubernetes; } + public GlobalSecurityConfig getSecurity() { + return security; + } + + public void setSecurity(GlobalSecurityConfig security) { + this.security = security; + } + public List getMetricsSources() { return metricsSources; } diff --git a/common/src/main/java/com/github/streamshub/console/config/KafkaClusterConfig.java b/common/src/main/java/com/github/streamshub/console/config/KafkaClusterConfig.java index 7197764ee..a51dfb20e 100644 --- a/common/src/main/java/com/github/streamshub/console/config/KafkaClusterConfig.java +++ b/common/src/main/java/com/github/streamshub/console/config/KafkaClusterConfig.java @@ -3,13 +3,20 @@ import java.util.LinkedHashMap; import java.util.Map; +import jakarta.validation.Valid; import jakarta.validation.constraints.NotBlank; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.github.streamshub.console.config.security.ResourceTypes; +import com.github.streamshub.console.config.security.SecurityConfig; +import com.github.streamshub.console.config.security.ResourceTypes.ValidResourceTypes; + +import io.sundr.builder.annotations.Buildable; @JsonInclude(Include.NON_NULL) +@Buildable(editableEnabled = false) public class KafkaClusterConfig implements Named { private String id; @@ -17,6 +24,9 @@ public class KafkaClusterConfig implements Named { private String name; private String namespace; private String listener; + @Valid + @ValidResourceTypes(type = ResourceTypes.Kafka.class) + private SecurityConfig security = new SecurityConfig(); /** * Name of a configured metrics source used by this Kafka cluster */ @@ -66,6 +76,14 @@ public void setNamespace(String namespace) { this.namespace = namespace; } + public SecurityConfig getSecurity() { + return security; + } + + public void setSecurity(SecurityConfig security) { + this.security = security; + } + public String getListener() { return listener; } diff --git a/common/src/main/java/com/github/streamshub/console/config/KafkaConfig.java b/common/src/main/java/com/github/streamshub/console/config/KafkaConfig.java index 7c16ca926..0d1d26930 100644 --- a/common/src/main/java/com/github/streamshub/console/config/KafkaConfig.java +++ b/common/src/main/java/com/github/streamshub/console/config/KafkaConfig.java @@ -11,7 +11,10 @@ import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude.Include; +import io.sundr.builder.annotations.Buildable; + @JsonInclude(Include.NON_NULL) +@Buildable(editableEnabled = false) public class KafkaConfig { @Valid @@ -30,6 +33,13 @@ public Optional getCluster(String clusterKey) { .findFirst(); } + @JsonIgnore + public Optional getClusterById(String clusterId) { + return clusters.stream() + .filter(k -> clusterId.equals(k.getId())) + .findFirst(); + } + public List getClusters() { return clusters; } diff --git a/common/src/main/java/com/github/streamshub/console/config/KubernetesConfig.java b/common/src/main/java/com/github/streamshub/console/config/KubernetesConfig.java index 38229655c..6cf0311c3 100644 --- a/common/src/main/java/com/github/streamshub/console/config/KubernetesConfig.java +++ b/common/src/main/java/com/github/streamshub/console/config/KubernetesConfig.java @@ -1,5 +1,8 @@ package com.github.streamshub.console.config; +import io.sundr.builder.annotations.Buildable; + +@Buildable(editableEnabled = false) public class KubernetesConfig { boolean enabled = true; diff --git a/common/src/main/java/com/github/streamshub/console/config/SchemaRegistryConfig.java b/common/src/main/java/com/github/streamshub/console/config/SchemaRegistryConfig.java index 999459091..e191f0694 100644 --- a/common/src/main/java/com/github/streamshub/console/config/SchemaRegistryConfig.java +++ b/common/src/main/java/com/github/streamshub/console/config/SchemaRegistryConfig.java @@ -5,7 +5,10 @@ import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude.Include; +import io.sundr.builder.annotations.Buildable; + @JsonInclude(Include.NON_NULL) +@Buildable(editableEnabled = false) public class SchemaRegistryConfig implements Named { @NotBlank(message = "Schema registry `name` is required") diff --git a/common/src/main/java/com/github/streamshub/console/config/security/Audit.java b/common/src/main/java/com/github/streamshub/console/config/security/Audit.java new file mode 100644 index 000000000..e682374ab --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/Audit.java @@ -0,0 +1,39 @@ +package com.github.streamshub.console.config.security; + +import java.util.Locale; + +import com.fasterxml.jackson.annotation.JsonCreator; + +public enum Audit { + + ALLOWED { + @Override + public boolean logResult(boolean allowed) { + return allowed; + } + }, + + DENIED { + @Override + public boolean logResult(boolean allowed) { + return !allowed; + } + }, + + ALL { + @Override + public boolean logResult(boolean allowed) { + return true; + } + }; + + public abstract boolean logResult(boolean allowed); + + @JsonCreator + public static Audit forValue(String value) { + if ("*".equals(value)) { + return ALL; + } + return valueOf(value.toUpperCase(Locale.ROOT)); + } +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/AuditConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/AuditConfig.java new file mode 100644 index 000000000..a7f3758ec --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/AuditConfig.java @@ -0,0 +1,17 @@ +package com.github.streamshub.console.config.security; + +import io.sundr.builder.annotations.Buildable; + +@Buildable(editableEnabled = false) +public class AuditConfig extends RuleConfig { + + Audit decision; + + public Audit getDecision() { + return decision; + } + + public void setDecision(Audit decision) { + this.decision = decision; + } +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/GlobalSecurityConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/GlobalSecurityConfig.java new file mode 100644 index 000000000..a2602deb4 --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/GlobalSecurityConfig.java @@ -0,0 +1,20 @@ +package com.github.streamshub.console.config.security; + +import jakarta.validation.Valid; + +import io.sundr.builder.annotations.Buildable; + +@Buildable(editableEnabled = false) +public class GlobalSecurityConfig extends SecurityConfig { + + @Valid + private OidcConfig oidc; + + public OidcConfig getOidc() { + return oidc; + } + + public void setOidc(OidcConfig oidc) { + this.oidc = oidc; + } +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/OidcConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/OidcConfig.java new file mode 100644 index 000000000..c916e623e --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/OidcConfig.java @@ -0,0 +1,59 @@ +package com.github.streamshub.console.config.security; + +import jakarta.validation.constraints.NotBlank; + +import io.sundr.builder.annotations.Buildable; + +@Buildable(editableEnabled = false) +public class OidcConfig { + + private String tenantId = "streamshub-console"; + @NotBlank + private String authServerUrl; + private String issuer; + @NotBlank + private String clientId; + @NotBlank + private String clientSecret; + + public String getTenantId() { + return tenantId; + } + + public void setTenantId(String tenantId) { + this.tenantId = tenantId; + } + + public String getAuthServerUrl() { + return authServerUrl; + } + + public void setAuthServerUrl(String authServerUrl) { + this.authServerUrl = authServerUrl; + } + + public String getIssuer() { + return issuer; + } + + public void setIssuer(String issuer) { + this.issuer = issuer; + } + + public String getClientId() { + return clientId; + } + + public void setClientId(String clientId) { + this.clientId = clientId; + } + + public String getClientSecret() { + return clientSecret; + } + + public void setClientSecret(String clientSecret) { + this.clientSecret = clientSecret; + } + +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/Privilege.java b/common/src/main/java/com/github/streamshub/console/config/security/Privilege.java new file mode 100644 index 000000000..d18dc8c72 --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/Privilege.java @@ -0,0 +1,41 @@ +package com.github.streamshub.console.config.security; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Locale; +import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import com.fasterxml.jackson.annotation.JsonCreator; + +public enum Privilege { + + CREATE, + DELETE, + GET, + LIST, + UPDATE, + ALL { + @Override + public Set expand() { + return ALL_EXPANDED; + } + }; + + private static final Set ALL_EXPANDED = Arrays.stream(Privilege.values()) + .filter(Predicate.not(ALL::equals)) + .collect(Collectors.toSet()); + + @JsonCreator + public static Privilege forValue(String value) { + if ("*".equals(value)) { + return ALL; + } + return valueOf(value.toUpperCase(Locale.ROOT)); + } + + public Set expand() { + return Collections.singleton(this); + } +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/ResourceTypes.java b/common/src/main/java/com/github/streamshub/console/config/security/ResourceTypes.java new file mode 100644 index 000000000..46290d5d7 --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/ResourceTypes.java @@ -0,0 +1,179 @@ +package com.github.streamshub.console.config.security; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import jakarta.validation.Constraint; +import jakarta.validation.ConstraintValidator; +import jakarta.validation.ConstraintValidatorContext; +import jakarta.validation.ConstraintValidatorContext.ConstraintViolationBuilder.ContainerElementNodeBuilderDefinedContext; +import jakarta.validation.Payload; + +public class ResourceTypes { + + interface ResourceType { + String value(); + + default Set> expand() { + return Set.of(this); + } + } + + public static > E forValue(String value, Class type) { + for (var v : type.getEnumConstants()) { + if (v.value().equals(value)) { + return v; + } + } + return null; + } + + public enum Global implements ResourceType { + KAFKAS("kafkas"), + ALL("*") { + @Override + public Set> expand() { + return ALL_EXPANDED; + } + }; + + private static final Set> ALL_EXPANDED = Arrays.stream(Global.values()) + .filter(Predicate.not(ALL::equals)) + .collect(Collectors.toSet()); + + private String value; + + private Global(String value) { + this.value = value; + } + + @Override + public String value() { + return value; + } + } + + public enum Kafka implements ResourceType { + CONSUMER_GROUPS("consumerGroups"), + NODE_CONFIGS("nodes/configs"), + REBALANCES("rebalances"), + TOPICS("topics"), + TOPIC_RECORDS("topics/records"), + ALL("*") { + @Override + public Set> expand() { + return ALL_EXPANDED; + } + }; + + private static final Set> ALL_EXPANDED = Arrays.stream(Kafka.values()) + .filter(Predicate.not(ALL::equals)) + .collect(Collectors.toSet()); + + private String value; + + private Kafka(String value) { + this.value = value; + } + + @Override + public String value() { + return value; + } + } + + @Target(ElementType.FIELD) + @Retention(RetentionPolicy.RUNTIME) + @Constraint(validatedBy = ValidResourceTypes.Validator.class) + @Documented + public @interface ValidResourceTypes { + static final String MESSAGE = "Invalid resource"; + + String message() default MESSAGE; + + Class[] groups() default {}; + + Class[] payload() default {}; + + @SuppressWarnings("rawtypes") + Class type(); + + static class Validator implements ConstraintValidator { + @SuppressWarnings("rawtypes") + private Class type; + + @Override + public void initialize(ValidResourceTypes constraintAnnotation) { + this.type = constraintAnnotation.type(); + } + + @Override + public boolean isValid(SecurityConfig value, ConstraintValidatorContext context) { + AtomicBoolean valid = new AtomicBoolean(true); + int i = -1; + + for (var role : value.getRoles()) { + i++; + int j = -1; + for (var rule : role.getRules()) { + j++; + + // Setup a violation builder in case it is needed + var builder = context.buildConstraintViolationWithTemplate(MESSAGE) + .addContainerElementNode("roles", List.class, 0) + .addContainerElementNode("rules", List.class, 0) + .inIterable().atIndex(i) + .addContainerElementNode("resources", List.class, 0) + .inIterable().atIndex(j); + + validate(rule, context, valid, builder); + } + } + + i = -1; + for (var auditRule : value.getAudit()) { + i++; + + // Setup a violation builder in case it is needed + var builder = context.buildConstraintViolationWithTemplate(MESSAGE) + .addContainerElementNode("audit", List.class, 0) + .addContainerElementNode("resources", List.class, 0) + .inIterable().atIndex(i); + + validate(auditRule, context, valid, builder); + } + + return valid.get(); + } + + @SuppressWarnings("unchecked") + private void validate(RuleConfig value, ConstraintValidatorContext context, AtomicBoolean valid, ContainerElementNodeBuilderDefinedContext builder) { + int i = -1; + + for (var resource : value.getResources()) { + i++; + if (ResourceTypes.forValue(resource, type) == null) { + valid.set(false); + context.disableDefaultConstraintViolation(); + + builder.addContainerElementNode("", List.class, 0) + .inIterable() + .atIndex(i); + + builder.addConstraintViolation(); + } + } + + } + } + } +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/RoleConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/RoleConfig.java new file mode 100644 index 000000000..03a6bf6ad --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/RoleConfig.java @@ -0,0 +1,38 @@ +package com.github.streamshub.console.config.security; + +import java.util.ArrayList; +import java.util.List; + +import jakarta.validation.Valid; +import jakarta.validation.constraints.NotBlank; +import jakarta.validation.constraints.NotEmpty; + +import io.sundr.builder.annotations.Buildable; + +@Buildable(editableEnabled = false) +public class RoleConfig { + + @NotBlank + private String name; + + @Valid + @NotEmpty + private List rules = new ArrayList<>(); + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public List getRules() { + return rules; + } + + public void setRules(List rules) { + this.rules = rules; + } + +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/RuleConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/RuleConfig.java new file mode 100644 index 000000000..aae9d35a5 --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/RuleConfig.java @@ -0,0 +1,54 @@ +package com.github.streamshub.console.config.security; + +import java.util.ArrayList; +import java.util.List; + +import jakarta.validation.constraints.NotEmpty; +import jakarta.validation.constraints.NotNull; + +import io.sundr.builder.annotations.Buildable; + +@Buildable(editableEnabled = false) +public class RuleConfig { + + /** + * Resources to which this rule applies (required) + */ + @NotEmpty + List<@NotNull String> resources = new ArrayList<>(); + + /** + * Specific resource names to which this rule applies (optional) + */ + List<@NotNull String> resourceNames = new ArrayList<>(); + + /** + * Privileges/actions that may be performed for subjects having this rule + */ + @NotEmpty + List<@NotNull Privilege> privileges = new ArrayList<>(); + + public List getResources() { + return resources; + } + + public void setResources(List resources) { + this.resources = resources; + } + + public List getResourceNames() { + return resourceNames; + } + + public void setResourceNames(List resourceNames) { + this.resourceNames = resourceNames; + } + + public List getPrivileges() { + return privileges; + } + + public void setPrivileges(List privileges) { + this.privileges = privileges; + } +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/SecurityConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/SecurityConfig.java new file mode 100644 index 000000000..bb60fc311 --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/SecurityConfig.java @@ -0,0 +1,46 @@ +package com.github.streamshub.console.config.security; + +import java.util.ArrayList; +import java.util.List; + +import jakarta.validation.Valid; + +import io.sundr.builder.annotations.Buildable; + +@Buildable(editableEnabled = false) +public class SecurityConfig { + + @Valid + private List subjects = new ArrayList<>(); + + @Valid + private List roles = new ArrayList<>(); + + @Valid + private List audit = new ArrayList<>(); + + public List getSubjects() { + return subjects; + } + + public void setSubjects(List subjects) { + this.subjects = subjects; + } + + public List getRoles() { + return roles; + } + + public void setRoles(List roles) { + this.roles = roles; + } + + public List getAudit() { + return audit; + } + + public void setAudit(List audit) { + this.audit = audit; + } + +} diff --git a/common/src/main/java/com/github/streamshub/console/config/security/SubjectConfig.java b/common/src/main/java/com/github/streamshub/console/config/security/SubjectConfig.java new file mode 100644 index 000000000..3aee5ae2f --- /dev/null +++ b/common/src/main/java/com/github/streamshub/console/config/security/SubjectConfig.java @@ -0,0 +1,46 @@ +package com.github.streamshub.console.config.security; + +import java.util.ArrayList; +import java.util.List; + +import jakarta.validation.constraints.NotEmpty; +import jakarta.validation.constraints.NotNull; + +import io.sundr.builder.annotations.Buildable; + +@Buildable(editableEnabled = false) +public class SubjectConfig { + + private String claim; + + @NotEmpty + private List<@NotNull String> include = new ArrayList<>(); + + @NotEmpty + private List<@NotNull String> roleNames = new ArrayList<>(); + + public String getClaim() { + return claim; + } + + public void setClaim(String claim) { + this.claim = claim; + } + + public List getInclude() { + return include; + } + + public void setInclude(List include) { + this.include = include; + } + + public List getRoleNames() { + return roleNames; + } + + public void setRoleNames(List roleNames) { + this.roleNames = roleNames; + } + +} diff --git a/common/src/test/java/com/github/streamshub/console/config/ConsoleConfigTest.java b/common/src/test/java/com/github/streamshub/console/config/ConsoleConfigTest.java index 41c81e829..82f6e73f1 100644 --- a/common/src/test/java/com/github/streamshub/console/config/ConsoleConfigTest.java +++ b/common/src/test/java/com/github/streamshub/console/config/ConsoleConfigTest.java @@ -10,6 +10,16 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import com.github.streamshub.console.config.security.Audit; +import com.github.streamshub.console.config.security.GlobalSecurityConfigBuilder; +import com.github.streamshub.console.config.security.Privilege; +import com.github.streamshub.console.config.security.ResourceTypes; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasProperty; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -159,4 +169,122 @@ void testMetricsSourceNamesUniquePassesValidation() { assertTrue(violations.isEmpty()); } + + @Test + void testKnownResourceTypesPassValidation() { + config.setSecurity(new GlobalSecurityConfigBuilder() + .addNewAudit() + .withDecision(Audit.ALLOWED) + .withResources(ResourceTypes.Global.KAFKAS.value()) + .withPrivileges(Privilege.forValue("*")) + .endAudit() + .addNewRole() + .withName("role1") + .addNewRule() + .withResources(ResourceTypes.Global.KAFKAS.value()) + .withPrivileges(Privilege.forValue("*")) + .endRule() + .endRole() + .build()); + + config.getKafka().getClusters().add(new KafkaClusterConfigBuilder() + .withName("kafka1") + .withNewSecurity() + .addNewAudit() + .withDecision(Audit.ALLOWED) + .withResources(ResourceTypes.Kafka.ALL.value()) + .withPrivileges(Privilege.forValue("*")) + .endAudit() + .addNewRole() + .withName("role1") + .addNewRule() + .withResources(ResourceTypes.Kafka.ALL.value()) + .withPrivileges(Privilege.forValue("*")) + .endRule() + .endRole() + .endSecurity() + .build()); + + var violations = validator.validate(config); + assertTrue(violations.isEmpty(), () -> String.valueOf(violations)); + } + + @Test + void testKnownResourceTypesFailValidation() { + String unknownResource = "unknown"; + + config.setSecurity(new GlobalSecurityConfigBuilder() + .addNewAudit() + .withDecision(Audit.ALLOWED) + .withResources( + ResourceTypes.Global.KAFKAS.value(), + unknownResource) + .withPrivileges(Privilege.forValue("*")) + .endAudit() + .addNewRole() + .withName("role1") + .addNewRule() + .withResources(ResourceTypes.Global.KAFKAS.value()) + .withPrivileges(Privilege.forValue("*")) + .endRule() + .addNewRule() + .withResources( + unknownResource, + ResourceTypes.Global.KAFKAS.value()) + .withPrivileges(Privilege.forValue("*")) + .endRule() + .endRole() + .build()); + + config.getKafka().getClusters().add(new KafkaClusterConfigBuilder() + .withName("kafka1") + .withNewSecurity() + .addNewAudit() + .withDecision(Audit.ALLOWED) + .withResources(ResourceTypes.Kafka.ALL.value()) + .withPrivileges(Privilege.forValue("CREATE")) + .endAudit() + .addNewAudit() + .withDecision(Audit.DENIED) + .withResources(unknownResource) + .withPrivileges(Privilege.forValue("DELETE")) + .endAudit() + .addNewAudit() + .withDecision(Audit.ALL) + .withResources(ResourceTypes.Kafka.CONSUMER_GROUPS.value(), unknownResource) + .withPrivileges(Privilege.forValue("UPDATE")) + .endAudit() + .addNewRole() + .withName("role1") + .addNewRule() + .withResources(ResourceTypes.Kafka.NODE_CONFIGS.value()) + .withPrivileges(Privilege.forValue("*")) + .endRule() + .addNewRule() + .withResources(unknownResource, ResourceTypes.Kafka.ALL.value()) + .withPrivileges(Privilege.forValue("*")) + .endRule() + .endRole() + .addNewRole() + .withName("role2") + .addNewRule() + .withResources(ResourceTypes.Kafka.CONSUMER_GROUPS.value(), unknownResource) + .withPrivileges(Privilege.forValue("*")) + .endRule() + .endRole() + .endSecurity() + .build()); + + var violations = validator.validate(config); + assertEquals(6, violations.size(), () -> String.valueOf(violations)); + assertThat(violations, everyItem(hasProperty("message", equalTo("Invalid resource")))); + + var propertyPaths = violations.stream().map(v -> v.getPropertyPath().toString()).toList(); + assertThat(propertyPaths, hasItem(equalTo("security.audit[0].resources[1]"))); + assertThat(propertyPaths, hasItem(equalTo("security.roles[0].rules[1].resources[0]"))); + assertThat(propertyPaths, hasItem(equalTo("kafka.clusters[0].security.audit[1].resources[0]"))); + assertThat(propertyPaths, hasItem(equalTo("kafka.clusters[0].security.audit[2].resources[1]"))); + assertThat(propertyPaths, hasItem(equalTo("kafka.clusters[0].security.roles[0].rules[1].resources[0]"))); + assertThat(propertyPaths, hasItem(equalTo("kafka.clusters[0].security.roles[1].rules[0].resources[1]"))); + } } diff --git a/console-config-example.yaml b/console-config-example.yaml deleted file mode 100644 index 26df27c20..000000000 --- a/console-config-example.yaml +++ /dev/null @@ -1,54 +0,0 @@ -kubernetes: - # enable/disable use of Kubernetes to obtain additional information from Strimzi - # Kafka and KafkaTopic custom resources. Enabled by default - enabled: true - -metricsSources: - # Array of Prometheus API servers that my be referenced by Kafka cluster configurations - # for metrics retrieval to render graphs in the UI and provide other information based - # on the cluster metrics - - name: cluster-monitoring - type: openshift-monitoring - url: https://thanos-querier-openshift-monitoring.cloud.example.com - - name: my-custom-prometheus - type: standalone - url: http://my-custom-prometheus.cloud2.example.com - -schemaRegistries: - # Array of Apicurio Registries that my be referenced by Kafka cluster configurations - # to resolve Avro or Protobuf schemas for topic message browsing - - name: "my-apicurio-registry" - url: "http://registry.exampl.com/apis/registry/v2/" - -kafka: - clusters: - - name: my-kafka1 # name of the Strimzi Kafka CR - namespace: my-namespace1 # namespace of the Strimzi Kafka CR (optional) - id: my-kafka1-id # value to be used as an identifier for the cluster. Must be specified when namespace is not. - listener: "secure" # name of the listener to use for connections from the console - metricsSource: cluster-monitoring - schemaRegistry: "my-apicurio-registry" # name of the schema registry to use with this Kafka (optional) - # `properties` contains keys/values to use for any Kafka connection - properties: - security.protocol: SASL_SSL - sasl.mechanism: SCRAM-SHA-512 - bootstrap.servers: my-kafka1.cloud.example.com:9093 # optional, if omitted the bootstrap servers from the Strimzi Kafka CR are used - sasl.jaas.config: org.apache.kafka.common.security.scram.ScramLoginModule required username="kafka1-user" password="sCr@m!"; - # `adminProperties` contains keys/values to use for Admin client Kafka connections. - # Properties specified here override properties of the same name in `properties` - adminProperties: {} - # `consumerProperties` contains keys/values to use for Consumer client Kafka connections. - # Properties specified here override properties of the same name in `properties` - consumerProperties: {} - # `producerProperties` contains keys/values to use for Producer client Kafka connections. - # Properties specified here override properties of the same name in `properties` - producerProperties: {} - - - name: my-kafka2 - namespace: my-namespace2 - listener: "secure" - metricsSource: my-custom-prometheus - properties: - security.protocol: SASL_SSL - sasl.mechanism: SCRAM-SHA-512 - sasl.jaas.config: org.apache.kafka.common.security.scram.ScramLoginModule required username="kafka2-user" password="sCr@m!"; diff --git a/examples/console-config.yaml b/examples/console-config.yaml new file mode 100644 index 000000000..75470d3fd --- /dev/null +++ b/examples/console-config.yaml @@ -0,0 +1,135 @@ +kubernetes: + # enable/disable use of Kubernetes to obtain additional information from Strimzi + # Kafka and KafkaTopic custom resources. Enabled by default + enabled: true + +metricsSources: + # Array of Prometheus API servers that my be referenced by Kafka cluster configurations + # for metrics retrieval to render graphs in the UI and provide other information based + # on the cluster metrics + - name: cluster-monitoring + type: openshift-monitoring + url: https://thanos-querier-openshift-monitoring.cloud.example.com + - name: my-custom-prometheus + type: standalone + url: http://my-custom-prometheus.cloud2.example.com + +schemaRegistries: + # Array of Apicurio Registries that my be referenced by Kafka cluster configurations + # to resolve Avro or Protobuf schemas for topic message browsing + - name: "my-apicurio-registry" + url: "http://registry.example.com/apis/registry/v2/" + +security: + # OpenID connect security configuration. When OIDC is in use, all Kafka clusters must have credentials provided + # (if security is enabled for the cluster). Additionally, `security.subjects` may not be used at the cluster + # level when OIDC is in use. + oidc: + authServerUrl: 'https://my-oidc.example.com' + clientId: streamshub-console-client + clientSecret: ${console.client.secret} + + subjects: + # Subjects and their roles may be specified in terms of JWT claims or their subject name (user1, user100 below). + # Using claims is only supported when OIDC security is enabled. + - claim: groups + include: + - team-a + - team-b + roleNames: + - developers + - claim: groups + include: + - team-c + roleNames: + - administrators + - include: + # When no claim is specified, the subject's name will be used. For JWT, this is typically `preferred_username` + # `upn`, or `sub` claims. When using per-Kafka authentication credentials, this will be the user name used to + # authenticate. + - user1 + - user200 + roleNames: + - administrators + + # Roles and associate rules for global resources (currently only Kafka clusters) are given here in the `security.roles` + # section. Rules for Kafka-scoped resources are specified within the cluster configuration section below. That is, + # at paths `kafka.clusters[].security.rules[]. + roles: + # developers may perform any operation with clusters 'a' and 'b'. + - name: developers + rules: + - resources: + - kafkas + - resourceNames: + - dev-cluster-a + - dev-cluster-b + - privileges: + - '*' + # administrators may operate on any (unspecified) Kafka clusters + - name: administrators + rules: + - resources: + - kafkas + - privileges: + - '*' + +kafka: + clusters: + - name: my-kafka1 # name of the Strimzi Kafka CR + namespace: my-namespace1 # namespace of the Strimzi Kafka CR (optional) + id: my-kafka1-id # value to be used as an identifier for the cluster. Must be specified when namespace is not. + listener: "secure" # name of the listener to use for connections from the console + metricsSource: cluster-monitoring + schemaRegistry: "my-apicurio-registry" # name of the schema registry to use with this Kafka (optional) + # `properties` contains keys/values to use for any Kafka connection + properties: + security.protocol: SASL_SSL + sasl.mechanism: SCRAM-SHA-512 + bootstrap.servers: my-kafka1.cloud.example.com:9093 # optional, if omitted the bootstrap servers from the Strimzi Kafka CR are used + sasl.jaas.config: org.apache.kafka.common.security.scram.ScramLoginModule required username="kafka1-user" password="sCr@m!"; + # `adminProperties` contains keys/values to use for Admin client Kafka connections. + # Properties specified here override properties of the same name in `properties` + adminProperties: {} + # `consumerProperties` contains keys/values to use for Consumer client Kafka connections. + # Properties specified here override properties of the same name in `properties` + consumerProperties: {} + # `producerProperties` contains keys/values to use for Producer client Kafka connections. + # Properties specified here override properties of the same name in `properties` + producerProperties: {} + security: + roles: + # developers may only list and view some resources + - name: developers + rules: + - resources: + - topics + - topics/records + - consumerGroups + - rebalances + - privileges: + - get + - list + + # administrators may list, view, and update an expanded set of resources + - name: administrators + rules: + - resources: + - topics + - topics/records + - consumerGroups + - rebalances + - nodes/configs + - privileges: + - get + - list + - update + + - name: my-kafka2 + namespace: my-namespace2 + listener: "secure" + metricsSource: my-custom-prometheus + properties: + security.protocol: SASL_SSL + sasl.mechanism: SCRAM-SHA-512 + sasl.jaas.config: org.apache.kafka.common.security.scram.ScramLoginModule required username="kafka2-user" password="sCr@m!"; diff --git a/examples/dex-openshift/020-ClusterRole-console-dex.yaml b/examples/dex-openshift/020-ClusterRole-console-dex.yaml new file mode 100644 index 000000000..426f08c90 --- /dev/null +++ b/examples/dex-openshift/020-ClusterRole-console-dex.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: console-dex +rules: +- apiGroups: ["dex.coreos.com"] # API group created by dex + resources: ["*"] + verbs: ["*"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create"] # To manage its own resources, dex must be able to create customresourcedefinitions diff --git a/examples/dex-openshift/030-ClusterRoleBinding-console-dex.yaml b/examples/dex-openshift/030-ClusterRoleBinding-console-dex.yaml new file mode 100644 index 000000000..1a972c829 --- /dev/null +++ b/examples/dex-openshift/030-ClusterRoleBinding-console-dex.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: console-dex +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: console-dex +subjects: +- kind: ServiceAccount + name: console-dex # Service account assigned to the dex pod + namespace: ${NAMESPACE} # The namespace dex is running in diff --git a/examples/dex-openshift/040-Secret-console-dex.yaml b/examples/dex-openshift/040-Secret-console-dex.yaml new file mode 100644 index 000000000..d0ba0e24b --- /dev/null +++ b/examples/dex-openshift/040-Secret-console-dex.yaml @@ -0,0 +1,53 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: console-dex +stringData: + config.yaml: | + issuer: https://console-dex.${CLUSTER_DOMAIN} + + storage: + type: kubernetes + config: + inCluster: true + + web: + http: 0.0.0.0:5556 + + oauth2: + skipApprovalScreen: true + + staticClients: + - id: streamshub-console + name: 'StreamsHub Console' + secret: ${STATIC_CLIENT_SECRET} + redirectURIs: + - 'https://example-console.${CLUSTER_DOMAIN}/api/auth/callback/oidc' + - 'http://localhost:3000/api/auth/callback/oidc' + + connectors: + - type: openshift + id: openshift + name: OpenShift + config: + # OpenShift API, e.g. `https://api.example.com:6443` + # Get from kubectl via `kubectl config view --minify=true --flatten=false -o json | jq -r .clusters[0].cluster.server` + issuer: ${CLUSTER_APISERVER} + + # OpenShift root CA + rootCA: /etc/ssl/openshift.pem + + # Communicate to OpenShift without validating SSL certificates + insecureCA: false + + # Credentials can be string literals or pulled from the environment. + clientID: ${${X}OPENSHIFT_OAUTH_CLIENT_ID} + clientSecret: ${${X}OPENSHIFT_OAUTH_CLIENT_SECRET} + + # Optional list of required groups a user must be a member of + groups: [] + + # Redirect to the console's callback following authentication + redirectURI: https://console-dex.${CLUSTER_DOMAIN}/callback + diff --git a/examples/dex-openshift/050-Deployment-console-dex.yaml b/examples/dex-openshift/050-Deployment-console-dex.yaml new file mode 100644 index 000000000..3ac625eab --- /dev/null +++ b/examples/dex-openshift/050-Deployment-console-dex.yaml @@ -0,0 +1,62 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: console-dex +spec: + replicas: 1 + selector: + matchLabels: + app: console-dex + template: + metadata: + labels: + app: console-dex + spec: + serviceAccountName: console-dex + volumes: + - name: config + secret: + secretName: console-dex + items: + - key: config.yaml + path: config.yaml + - name: openshift-ca + configMap: + name: kube-root-ca.crt + items: + - key: ca.crt + path: openshift.pem + containers: + - image: ghcr.io/dexidp/dex:v2.32.0 + name: dex + command: ["/usr/local/bin/dex", "serve", "/etc/dex/cfg/config.yaml"] + + ports: + - name: http + containerPort: 5556 + + volumeMounts: + - name: config + mountPath: /etc/dex/cfg + - name: openshift-ca + mountPath: /etc/ssl/openshift.pem + subPath: openshift.pem + + env: + - name: OPENSHIFT_OAUTH_CLIENT_ID + valueFrom: + secretKeyRef: + name: console-dex-secrets + key: DEX_CLIENT_ID + - name: OPENSHIFT_OAUTH_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: console-dex-secrets + key: DEX_CLIENT_SECRET + + readinessProbe: + httpGet: + path: /healthz + port: 5556 + scheme: HTTP diff --git a/examples/dex-openshift/060-Service-console-dex.yaml b/examples/dex-openshift/060-Service-console-dex.yaml new file mode 100644 index 000000000..bd0fc34af --- /dev/null +++ b/examples/dex-openshift/060-Service-console-dex.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: console-dex +spec: + type: ClusterIP + ports: + - name: dex + port: 5556 + protocol: TCP + targetPort: http + selector: + app: console-dex diff --git a/examples/dex-openshift/070-Ingress-console-dex.yaml b/examples/dex-openshift/070-Ingress-console-dex.yaml new file mode 100644 index 000000000..709a2077a --- /dev/null +++ b/examples/dex-openshift/070-Ingress-console-dex.yaml @@ -0,0 +1,24 @@ +--- +kind: Ingress +apiVersion: networking.k8s.io/v1 +metadata: + name: console-dex + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTP + route.openshift.io/termination: edge +spec: + defaultBackend: + service: + name: console-dex + port: + number: 5556 + rules: + - host: console-dex.${CLUSTER_DOMAIN} + http: + paths: + - pathType: ImplementationSpecific + backend: + service: + name: console-dex + port: + number: 5556 diff --git a/examples/dex-openshift/README.md b/examples/dex-openshift/README.md new file mode 100644 index 000000000..39702d0d7 --- /dev/null +++ b/examples/dex-openshift/README.md @@ -0,0 +1,55 @@ +# OpenShift Authentication + +This directory contains resources to deploy a [dex](https://dexidp.io/) server to support authentication in the console +using OpenShift's platform identity provider. Other identity providers [supported by dex](https://dexidp.io/docs/connectors/) +may also work and require modification to the `[040-Secret-console-dex.yaml](./040-Secret-console-dex.yaml)` file. + +Deploying these requires requires several parameters set in the environment +- `NAMESPACE` to deploy dex (may be the same namespace used for the console deployment) +- `CLUSTER_DOMAIN`: base domain used by your Kubernetes cluster. This will be used to configure dex's JWT issuer, dex's own ingress domain name, and the redirect URL to the console application. The example resources assume that a console instance is hosted at `https://example-console.${CLUSTER_DOMAIN}/api/auth/callback/oidc`. +- `CLUSTER_APISERVER`: API server end-point for the Kubernetes cluster +- `STATIC_CLIENT_SECRET`: client secret used by the console to interact with dex as an OAuth2 client + +The following script is based on the [dex OpenShift documentation](https://dexidp.io/docs/connectors/openshift/). + +```shell +export NAMESPACE=streams-console-dex +export CLUSTER_DOMAIN=apps-crc.testing +export CLUSTER_APISERVER=$(kubectl config view --minify=true --flatten=false -o json | jq -r .clusters[0].cluster.server) +export STATIC_CLIENT_SECRET="$(tr -dc A-Za-z0-9 io.sundr builder-annotations - 0.200.0 jakarta.validation diff --git a/operator/src/main/resources/application.properties b/operator/src/main/resources/application.properties index 846d7ff3b..a7f397819 100644 --- a/operator/src/main/resources/application.properties +++ b/operator/src/main/resources/application.properties @@ -25,6 +25,15 @@ quarkus.vertx.classpath-resolving=false %build.quarkus.container-image.labels."org.opencontainers.image.version"=${quarkus.application.version} %build.quarkus.container-image.labels."org.opencontainers.image.revision"=${git.revision} +# Do not gather coverage information for generated builder types +quarkus.jacoco.excludes=\ +com/github/streamshub/console/config/**/*Builder.class,\ +com/github/streamshub/console/config/**/*Fluent.class,\ +com/github/streamshub/console/config/**/*Nested.class,\ +com/github/streamshub/console/api/v1alpha1/**/*Builder.class,\ +com/github/streamshub/console/api/v1alpha1/**/*Fluent.class,\ +com/github/streamshub/console/api/v1alpha1/**/*Nested.class + # set to true to automatically apply CRDs to the cluster when they get regenerated %dev.quarkus.operator-sdk.crd.apply=true %test.quarkus.operator-sdk.crd.apply=true diff --git a/operator/src/main/resources/com/github/streamshub/console/dependents/console.deployment.yaml b/operator/src/main/resources/com/github/streamshub/console/dependents/console.deployment.yaml index 6f5759998..cf36e14cf 100644 --- a/operator/src/main/resources/com/github/streamshub/console/dependents/console.deployment.yaml +++ b/operator/src/main/resources/com/github/streamshub/console/dependents/console.deployment.yaml @@ -75,8 +75,11 @@ spec: - name: console-ui image: quay.io/streamshub/console-ui volumeMounts: - - mountPath: /app/.next/cache - name: cache + - name: cache + mountPath: /app/.next/cache + - name: config + mountPath: /app/console-config.yaml + subPath: console-config.yaml ports: - containerPort: 3000 name: http @@ -90,6 +93,8 @@ spec: value: 'https://${CONSOLE_HOSTNAME}' - name: BACKEND_URL value: 'http://127.0.0.1:8080' + - name: CONSOLE_CONFIG_PATH + value: /app/console-config.yaml - name: CONSOLE_MODE value: read-only - name: LOG_LEVEL diff --git a/pom.xml b/pom.xml index db8a82153..a67f50903 100644 --- a/pom.xml +++ b/pom.xml @@ -126,6 +126,11 @@ validators 1.4.2 + + io.sundr + builder-annotations + 0.200.0 + diff --git a/ui/Dockerfile b/ui/Dockerfile index a5a15084a..953e4e24f 100644 --- a/ui/Dockerfile +++ b/ui/Dockerfile @@ -18,6 +18,7 @@ USER 1001 EXPOSE 3000 ENV NODE_ENV=production +ENV HOSTNAME=0.0.0.0 ENV PORT=3000 CMD ["node", "server.js"] diff --git a/ui/api/api.ts b/ui/api/api.ts index 52377c1eb..f185b7053 100644 --- a/ui/api/api.ts +++ b/ui/api/api.ts @@ -1,19 +1,167 @@ import { getUser } from "@/utils/session"; import { z } from "zod"; +import { logger } from "@/utils/logger"; -export async function getHeaders(): Promise> { - const user = await getUser(); +const log = logger.child({ module: "api" }); + +const SERVER_ROOT = !process.env.BACKEND_URL?.endsWith("/") ? + process.env.BACKEND_URL : + process.env.BACKEND_URL.substring(0, process.env.BACKEND_URL.length - 1); + +export function sortParam( + sortField: string | undefined, + order: string | undefined +) { + if (sortField) { + return (order === "asc" ? "-" : "") + sortField; + } + return undefined; +} + +export function filterEq(value: string | undefined) { + if (value) { + return `eq,${value}`; + } + return undefined; +} + +export function filterGte(value: string | undefined) { + if (value) { + return `gte,${value}`; + } + return undefined; +} + +export function filterLike(value: string | undefined) { + if (value) { + return `like,*${value}*`; + } + return undefined; +} + +export function filterIn(values: string[] | undefined) { + if (values?.length ?? -1 > 0) { + return `in,${values?.join(",")}`; + } + return undefined; +} + +export async function getHeaders(anonymous?: boolean): Promise> { + const user = anonymous? null : await getUser(); let headers: Record = { Accept: "application/json", "Content-Type": "application/json", }; - if (user.authorization) { + if (user?.authorization) { headers["Authorization"] = user.authorization; } return headers; } -export const ApiError = z.object({ +export async function fetchData( + path: string, + query: URLSearchParams | string, + parser: (json: any) => T, + anonymous?: boolean, + options?: { cache?: "no-store" | "force-cache", next?: { revalidate: false | 0 | number } } +) : Promise> { + + const queryString = query?.toString() ?? ""; + const url = `${SERVER_ROOT}${path}${queryString.length > 0 ? "?" + queryString : ""}`; + + const response = await fetch( + url, { + headers: await getHeaders(anonymous), + ...options + }); + const rawData = await response.json(); + + if (response.ok) { + log.debug(rawData, `fetch ${url} response`); + + return { + payload: parser(rawData), + timestamp: new Date(), + }; + } else { + log.info(rawData, `fetch ${url} response`); + + return { + errors: ApiErrorResponse.parse(rawData).errors, + timestamp: new Date(), + }; + } +} + +export async function postData( + path: string, + body: any, + parser: (json: any) => T, +) : Promise> { + + const url = `${SERVER_ROOT}${path}`; + + const response = await fetch( + url, { + method: 'POST', + headers: await getHeaders(), + body: JSON.stringify(body), + }); + + const rawData = await response.text(); + + if (response.ok) { + log.debug(rawData, `patch ${url} response`); + + return { + payload: rawData.length > 0 ? parser(JSON.parse(rawData)) : null, + timestamp: new Date(), + }; + } else { + log.info(rawData, `patch ${url} response`); + + return { + errors: ApiErrorResponse.parse(JSON.parse(rawData)).errors, + timestamp: new Date(), + }; + } +} + +export async function patchData( + path: string, + body: any, + parser: (json: any) => T, +) : Promise> { + + const url = `${SERVER_ROOT}${path}`; + + const response = await fetch( + url, { + method: 'PATCH', + headers: await getHeaders(), + body: JSON.stringify(body), + }); + + const rawData = await response.text(); + + if (response.ok) { + log.debug(rawData, `patch ${url} response`); + + return { + payload: rawData.length > 0 ? parser(JSON.parse(rawData)) : null, + timestamp: new Date(), + }; + } else { + log.info(rawData, `patch ${url} response`); + + return { + errors: ApiErrorResponse.parse(JSON.parse(rawData)).errors, + timestamp: new Date(), + }; + } +} + +export const ApiErrorSchema = z.object({ meta: z.object({ type: z.string() }).optional(), // z.map(z.string(), z.string()), id: z.string().optional(), status: z.string().optional(), @@ -22,9 +170,22 @@ export const ApiError = z.object({ detail: z.string(), source: z .object({ - pointer: z.string(), - parameter: z.string(), - header: z.string(), + pointer: z.string().optional(), + parameter: z.string().optional(), + header: z.string().optional(), }) .optional(), }); + +export type ApiError = z.infer; + +export const ApiErrorResponse = z.object({ + meta: z.object({}).nullable().optional(), + errors: z.array(ApiErrorSchema), +}); + +export type ApiResponse = { + errors?: ApiError[]; + payload?: T | null; + timestamp: Date; +}; diff --git a/ui/api/consumerGroups/actions.ts b/ui/api/consumerGroups/actions.ts index b1cc24743..aa1d0c1d8 100644 --- a/ui/api/consumerGroups/actions.ts +++ b/ui/api/consumerGroups/actions.ts @@ -1,35 +1,23 @@ "use server"; -import { getHeaders } from "@/api/api"; +import { fetchData, patchData, sortParam, ApiResponse } from "@/api/api"; import { ConsumerGroup, - ConsumerGroupDryrunResponseSchema, ConsumerGroupResponseSchema, ConsumerGroupsResponse, ConsumerGroupsResponseSchema, ConsumerGroupState, - DryrunResponse, - UpdateConsumerGroupErrorSchema, } from "@/api/consumerGroups/schema"; import { filterUndefinedFromObj } from "@/utils/filterUndefinedFromObj"; -import { logger } from "@/utils/logger"; - -const log = logger.child({ module: "consumergroup-api" }); export async function getConsumerGroup( kafkaId: string, groupId: string, -): Promise { - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/consumerGroups/${groupId}`; - const res = await fetch(url, { - headers: await getHeaders(), - next: { - tags: [`consumer-group-${kafkaId}-${groupId}`], - }, - }); - log.debug({ url }, "getConsumerGroup"); - const rawData = await res.json(); - log.debug({ url, rawData }, "getConsumerGroup response"); - return ConsumerGroupResponseSchema.parse(rawData).data; +): Promise> { + return fetchData( + `/api/kafkas/${kafkaId}/consumerGroups/${groupId}`, + "", + (rawData) => ConsumerGroupResponseSchema.parse(rawData).data + ); } export async function getConsumerGroups( @@ -43,44 +31,28 @@ export async function getConsumerGroups( sort?: string; sortDir?: string; }, -): Promise { - try { - const sp = new URLSearchParams( - filterUndefinedFromObj({ - "fields[consumerGroups]": - params.fields ?? "state,simpleConsumerGroup,members,offsets", - "filter[id]": params.id ? `eq,${params.id}` : undefined, - // TODO: pass filter from UI - "filter[state]": - params.state && params.state.length > 0 - ? `in,${params.state.join(",")}` - : undefined, - "page[size]": params.pageSize, - "page[after]": params.pageCursor, - sort: params.sort - ? (params.sortDir !== "asc" ? "-" : "") + params.sort +): Promise> { + const sp = new URLSearchParams( + filterUndefinedFromObj({ + "fields[consumerGroups]": + params.fields ?? "state,simpleConsumerGroup,members,offsets", + "filter[id]": params.id ? `eq,${params.id}` : undefined, + // TODO: pass filter from UI + "filter[state]": + params.state && params.state.length > 0 + ? `in,${params.state.join(",")}` : undefined, - }), - ); - const cgQuery = sp.toString(); - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/consumerGroups?${cgQuery}`; - const res = await fetch(url, { - headers: await getHeaders(), - next: { - tags: [`consumer-groups`], - }, - }); - log.debug({ url }, "getConsumerGroups"); - if (res.status === 200) { - const rawData = await res.json(); - log.debug({ url, rawData }, "getConsumerGroups response"); - return ConsumerGroupsResponseSchema.parse(rawData); - } - } catch (err) { - log.error(err, "getConsumerGroups"); - throw new Error("getConsumerGroups: couldn't connect with backend"); - } - return null; + "page[size]": params.pageSize, + "page[after]": params.pageCursor, + sort: sortParam(params.sort, params.sortDir), + }), + ); + + return fetchData( + `/api/kafkas/${kafkaId}/consumerGroups`, + sp, + (rawData) => ConsumerGroupsResponseSchema.parse(rawData), + ); } export async function getTopicConsumerGroups( @@ -92,30 +64,20 @@ export async function getTopicConsumerGroups( sort?: string; sortDir?: string; }, -): Promise { +): Promise> { const sp = new URLSearchParams( filterUndefinedFromObj({ - "fields[consumerGroups]": - "state,simpleConsumerGroup,members,offsets,authorizedOperations,coordinator,partitionAssignor", + "fields[consumerGroups]": "state,simpleConsumerGroup,members,offsets,coordinator,partitionAssignor", "page[size]": params.pageSize, "page[after]": params.pageCursor, - sort: params.sort - ? (params.sortDir !== "asc" ? "-" : "") + params.sort - : undefined, + sort: sortParam(params.sort, params.sortDir), }), ); - const cgQuery = sp.toString(); - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/topics/${topicId}/consumerGroups?${cgQuery}`; - const res = await fetch(url, { - headers: await getHeaders(), - next: { - tags: [`consumer-group-${topicId}`], - }, - }); - log.debug({ url }, "getTopicConsumerGroups"); - const rawData = await res.json(); - log.debug({ url, rawData }, "getTopicConsumerGroups response"); - return ConsumerGroupsResponseSchema.parse(rawData); + return fetchData( + `/api/kafkas/${kafkaId}/topics/${topicId}/consumerGroups`, + sp, + (rawData) => ConsumerGroupsResponseSchema.parse(rawData) + ); } export async function updateConsumerGroup( @@ -127,71 +89,22 @@ export async function updateConsumerGroup( offset: string | number; metadata?: string; }>, -): Promise { - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/consumerGroups/${consumerGroupId}`; - const body = { - data: { - type: "consumerGroups", - id: consumerGroupId, - attributes: { - offsets, + dryRun?: boolean, +): Promise> { + return patchData( + `/api/kafkas/${kafkaId}/consumerGroups/${consumerGroupId}`, + { + meta: { + dryRun: dryRun, }, - }, - }; - - log.debug({ url, body }, "calling updateConsumerGroup"); - - try { - const res = await fetch(url, { - headers: await getHeaders(), - method: "PATCH", - body: JSON.stringify(body), - }); - - log.debug({ status: res.status }, "updateConsumerGroup response"); - - if (res.status === 204) { - return true; - } else { - const rawData = await res.json(); - return UpdateConsumerGroupErrorSchema.parse(rawData); - } - } catch (e) { - log.error(e, "updateConsumerGroup unknown error"); - console.error("Unknown error occurred:", e); - return false; - } -} - -export async function getDryrunResult( - kafkaId: string, - consumerGroupId: string, - offsets: Array<{ - topicId: string; - partition?: number; - offset: string | number; - metadata?: string; - }>, -): Promise { - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/consumerGroups/${consumerGroupId}`; - const body = { - meta: { - dryRun: true, - }, - data: { - type: "consumerGroups", - id: consumerGroupId, - attributes: { - offsets, + data: { + type: "consumerGroups", + id: consumerGroupId, + attributes: { + offsets, + }, }, }, - }; - const res = await fetch(url, { - headers: await getHeaders(), - method: "PATCH", - body: JSON.stringify(body), - }); - const rawData = await res.json(); - log.debug({ url, rawData }, "getConsumerGroup response"); - return ConsumerGroupDryrunResponseSchema.parse(rawData).data; + (rawData) => dryRun ? ConsumerGroupResponseSchema.parse(rawData).data : undefined, + ); } diff --git a/ui/api/consumerGroups/schema.ts b/ui/api/consumerGroups/schema.ts index 23e175766..49546d4e8 100644 --- a/ui/api/consumerGroups/schema.ts +++ b/ui/api/consumerGroups/schema.ts @@ -1,4 +1,4 @@ -import { ApiError } from "@/api/api"; +import { ApiErrorSchema } from "@/api/api"; import { NodeSchema } from "@/api/kafka/schema"; import { z } from "zod"; @@ -14,7 +14,7 @@ const ConsumerGroupStateSchema = z.union([ ]); const OffsetAndMetadataSchema = z.object({ - topicId: z.string(), + topicId: z.string().optional(), topicName: z.string(), partition: z.number(), offset: z.number(), @@ -23,11 +23,14 @@ const OffsetAndMetadataSchema = z.object({ metadata: z.string(), leaderEpoch: z.number().optional(), }); +export type OffsetAndMetadata = z.infer; + const PartitionKeySchema = z.object({ - topicId: z.string(), + topicId: z.string().optional(), topicName: z.string(), partition: z.number(), }); + const MemberDescriptionSchema = z.object({ memberId: z.string(), groupInstanceId: z.string().nullable().optional(), @@ -35,29 +38,23 @@ const MemberDescriptionSchema = z.object({ host: z.string(), assignments: z.array(PartitionKeySchema).optional(), }); +export type MemberDescription = z.infer; + export const ConsumerGroupSchema = z.object({ id: z.string(), type: z.literal("consumerGroups"), attributes: z.object({ simpleConsumerGroup: z.boolean().optional(), state: ConsumerGroupStateSchema, - members: z.array(MemberDescriptionSchema).optional(), + members: z.array(MemberDescriptionSchema).nullable().optional(), partitionAssignor: z.string().nullable().optional(), coordinator: NodeSchema.nullable().optional(), - authorizedOperations: z.array(z.string()).nullable().optional(), - offsets: z.array(OffsetAndMetadataSchema).optional(), - errors: z.array(ApiError).optional(), + authorizedOperations: z.array(z.string()).nullable().nullable().optional(), + offsets: z.array(OffsetAndMetadataSchema).nullable().optional(), + errors: z.array(ApiErrorSchema).optional(), }), }); -const DryrunOffsetSchema = z.object({ - topicId: z.string(), - topicName: z.string(), - partition: z.number(), - offset: z.number(), - metadata: z.string(), -}); - export type ConsumerGroup = z.infer; export type ConsumerGroupState = z.infer; @@ -77,33 +74,6 @@ export const ConsumerGroupsResponseSchema = z.object({ data: z.array(ConsumerGroupSchema), }); -export const DryrunSchema = z.object({ - id: z.string(), - type: z.literal("consumerGroups"), - attributes: z.object({ - state: ConsumerGroupStateSchema, - members: z.array(MemberDescriptionSchema).optional(), - offsets: z.array(DryrunOffsetSchema).optional(), - }), -}); - -export const UpdateConsumerGroupErrorSchema = z.object({ - errors: z.array( - z.object({ - id: z.string(), - status: z.string(), - code: z.string(), - title: z.string(), - detail: z.string(), - source: z - .object({ - pointer: z.string().optional(), - }) - .optional(), - }), - ), -}); - export type ConsumerGroupsResponse = z.infer< typeof ConsumerGroupsResponseSchema >; @@ -114,13 +84,3 @@ export const ConsumerGroupResponseSchema = z.object({ export type ConsumerGroupResponse = z.infer< typeof ConsumerGroupsResponseSchema >; - -export type UpdateConsumerGroupErrorSchema = z.infer< - typeof UpdateConsumerGroupErrorSchema ->; - -export const ConsumerGroupDryrunResponseSchema = z.object({ - data: DryrunSchema, -}); - -export type DryrunResponse = z.infer; diff --git a/ui/api/kafka/actions.ts b/ui/api/kafka/actions.ts index 560d3a5aa..1947eac0a 100644 --- a/ui/api/kafka/actions.ts +++ b/ui/api/kafka/actions.ts @@ -1,5 +1,6 @@ "use server"; -import { getHeaders } from "@/api/api"; + +import { fetchData, patchData, ApiResponse, getHeaders } from "@/api/api"; import { ClusterDetail, ClusterList, @@ -10,30 +11,21 @@ import { logger } from "@/utils/logger"; const log = logger.child({ module: "kafka-api" }); -export async function getKafkaClusters(): Promise { - const sp = new URLSearchParams({ - "fields[kafkas]": "name,namespace,kafkaVersion", - sort: "name", - }); - const kafkaClustersQuery = sp.toString(); - const url = `${process.env.BACKEND_URL}/api/kafkas?${kafkaClustersQuery}`; - try { - const res = await fetch(url, { - headers: { - Accept: "application/json", - "Content-Type": "application/json", - }, +export async function getKafkaClusters(anonymous?: boolean): Promise> { + return fetchData( + "/api/kafkas", + new URLSearchParams({ + "fields[kafkas]": "name,namespace,kafkaVersion", + sort: "name", + }), + (rawData: any) => ClustersResponseSchema.parse(rawData).data, + anonymous, + { next: { - revalidate: 30, - }, - }); - const rawData = await res.json(); - log.trace(rawData, "getKafkaClusters response"); - return ClustersResponseSchema.parse(rawData).data; - } catch (err) { - log.error(err, "getKafkaClusters"); - throw new Error("getKafkaClusters: couldn't connect with backend"); - } + revalidate: 60 + } + }, + ); } export async function getKafkaCluster( @@ -41,58 +33,37 @@ export async function getKafkaCluster( params?: { fields?: string; } -): Promise { - const sp = new URLSearchParams({ - "fields[kafkas]": - params?.fields ?? "name,namespace,creationTimestamp,status,kafkaVersion,nodes,controller,authorizedOperations,listeners,conditions,nodePools,cruiseControlEnabled", - }); - const kafkaClusterQuery = sp.toString(); - const url = `${process.env.BACKEND_URL}/api/kafkas/${clusterId}?${kafkaClusterQuery}`; - try { - const res = await fetch(url, { - headers: await getHeaders(), - }); - if (res.status === 200) { - const rawData = await res.json(); - log.trace(rawData, "getKafkaCluster response"); - return ClusterResponse.parse(rawData).data; - } - return null; - } catch (err) { - log.error({ err, clusterId }, "getKafkaCluster"); - throw new Error("getKafkaCluster: couldn't connect with backend"); - } +): Promise> { + return fetchData( + `/api/kafkas/${clusterId}`, + new URLSearchParams({ + "fields[kafkas]": params?.fields ?? + "name,namespace,creationTimestamp,status,kafkaVersion,nodes,controller,listeners,conditions,nodePools,cruiseControlEnabled", + }), + (rawData: any) => ClusterResponse.parse(rawData).data, + undefined, + { + cache: "no-store", + }, + ); } export async function updateKafkaCluster( clusterId: string, reconciliationPaused?: boolean, -): Promise { - const url = `${process.env.BACKEND_URL}/api/kafkas/${clusterId}`; - const body = { - data: { - type: "kafkas", - id: clusterId, - meta: { - reconciliationPaused: reconciliationPaused, +): Promise> { + return patchData( + `/api/kafkas/${clusterId}`, + { + data: { + type: "kafkas", + id: clusterId, + meta: { + reconciliationPaused: reconciliationPaused, + }, + attributes: {}, }, - attributes: {}, }, - }; - - try { - const res = await fetch(url, { - headers: await getHeaders(), - method: "PATCH", - body: JSON.stringify(body), - }); - - if (res.status === 200) { - return true; - } else { - return false; - } - } catch (e) { - return false; - } + (_: any) => undefined + ); } diff --git a/ui/api/kafka/schema.ts b/ui/api/kafka/schema.ts index 1509b8d8f..64a4d2052 100644 --- a/ui/api/kafka/schema.ts +++ b/ui/api/kafka/schema.ts @@ -55,7 +55,7 @@ const ClusterDetailSchema = z.object({ kafkaVersion: z.string().nullable().optional(), nodes: z.array(NodeSchema), controller: NodeSchema, - authorizedOperations: z.array(z.string()), + authorizedOperations: z.array(z.string()).optional(), cruiseControlEnabled: z.boolean().optional(), listeners: z .array( diff --git a/ui/api/messages/actions.ts b/ui/api/messages/actions.ts index 8fdd78886..af9008518 100644 --- a/ui/api/messages/actions.ts +++ b/ui/api/messages/actions.ts @@ -1,16 +1,47 @@ "use server"; -import { getHeaders } from "@/api/api"; +import { fetchData, filterGte, ApiResponse } from "@/api/api"; import { Message, MessageApiResponse } from "@/api/messages/schema"; import { filterUndefinedFromObj } from "@/utils/filterUndefinedFromObj"; import { logger } from "@/utils/logger"; +const RECORD_FIELDS = "partition,offset,timestamp,timestampType,headers,key,keySchema,value,valueSchema,size"; const log = logger.child({ module: "messages-api" }); -export type GetTopicMessagesReturn = { - messages?: Message[]; - ts?: Date; - error?: "topic-not-found" | "unknown"; -}; +function getTimestampFilter( + filter: + | { + type: "offset"; + value: number; + } + | { + type: "timestamp"; + value: string; + } + | { + type: "epoch"; + value: number; + } + | undefined +) { + let timestamp: string | undefined; + try { + if (filter?.type === "epoch") { + const maybeEpoch = filter.value; + const maybeDate = Number.isInteger(maybeEpoch) + ? maybeEpoch * 1000 + : filter.value; + const date = maybeDate ? new Date(maybeDate) : undefined; + timestamp = date?.toISOString(); + } + if (filter?.type === "timestamp") { + const maybeDate = filter.value; + const date = maybeDate ? new Date(maybeDate) : undefined; + timestamp = date?.toISOString(); + } + } catch {} + + return timestamp; +} export async function getTopicMessages( kafkaId: string, @@ -36,56 +67,33 @@ export async function getTopicMessages( | undefined; maxValueLength?: number; }, -): Promise { - let timestamp: string | undefined; - try { - if (params.filter?.type === "epoch") { - const maybeEpoch = params.filter.value; - const maybeDate = Number.isInteger(maybeEpoch) - ? maybeEpoch * 1000 - : params.filter.value; - const date = maybeDate ? new Date(maybeDate) : undefined; - timestamp = date?.toISOString(); - } - if (params.filter?.type === "timestamp") { - const maybeDate = params.filter.value; - const date = maybeDate ? new Date(maybeDate) : undefined; - timestamp = date?.toISOString(); - } - } catch {} +): Promise> { const sp = new URLSearchParams( filterUndefinedFromObj({ - "fields[records]": - "partition,offset,timestamp,timestampType,headers,key,keySchema,value,valueSchema,size", + "fields[records]": RECORD_FIELDS, "filter[partition]": params.partition, "filter[offset]": params.filter?.type === "offset" ? "gte," + params.filter?.value : undefined, - "filter[timestamp]": timestamp ? "gte," + timestamp : undefined, + "filter[timestamp]": filterGte(getTimestampFilter(params.filter)), "page[size]": params.pageSize, // maxValueLength: Math.min(params.maxValueLength || 150, 50000), }), ); - const consumeRecordsQuery = sp.toString(); - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/topics/${topicId}/records?${consumeRecordsQuery}`; - log.info( - { url, query: Object.fromEntries(sp.entries()), params }, - "Fetching topic messages", + + const response = fetchData( + `/api/kafkas/${kafkaId}/topics/${topicId}/records`, + sp, + (rawData) => MessageApiResponse.parse(rawData).data ); - const res = await fetch(url, { - headers: await getHeaders(), - next: { tags: [`messages-${topicId}`] }, - }); - const rawData = await res.json(); - log.trace({ rawData }, "Received messages"); - try { - const messages = MessageApiResponse.parse(rawData).data; - const query = params.query?.toLowerCase(); - const where = params.where; - if (query !== undefined && query !== null && query.length > 0) { - const filteredMessages = messages.filter( + return response.then(resp => { + const query = params.query?.toLowerCase() ?? ""; + + if (resp.payload && query.length > 0) { + const where = params.where; + const filteredMessages = resp.payload.filter( (m) => ((where === "key" || where === undefined) && m.attributes.key?.toLowerCase().includes(query)) || @@ -94,29 +102,13 @@ export async function getTopicMessages( ((where === "headers" || where === undefined) && JSON.stringify(m.attributes.headers).toLowerCase().includes(query)), ); + log.trace({ filteredMessages, query: params.query }, "Filtered messages"); - return { messages: filteredMessages, ts: new Date() }; - } else { - return { messages: messages, ts: new Date() }; + resp.payload = filteredMessages; } - } catch (e) { - log.error( - { error: e, status: res.status, message: rawData, url }, - "Error fetching message", - ); - if (res.status === 404) { - return { - messages: [], - ts: new Date(), - error: "topic-not-found", - }; - } - return { - messages: [], - ts: new Date(), - error: "unknown", - }; - } + + return resp; + }); } export async function getTopicMessage( @@ -126,20 +118,20 @@ export async function getTopicMessage( partition: number; offset: number; }, -): Promise { - log.debug({ kafkaId, topicId, params }, "getTopicMessage"); - const { messages } = await getTopicMessages(kafkaId, topicId, { - pageSize: 1, - partition: params.partition, - query: undefined, - filter: { - type: "offset", - value: params.offset, - }, - maxValueLength: 50000, +): Promise> { + const sp = new URLSearchParams({ + "fields[records]": RECORD_FIELDS, + "filter[partition]": String(params.partition), + "filter[offset]": "gte," + params.offset, + "page[size]": "1", }); - log.debug({ liveMessages: messages }, "getTopicMessage response"); - - return messages?.length === 1 ? messages[0] : undefined; + return fetchData( + `/api/kafkas/${kafkaId}/topics/${topicId}/records`, + sp, + (rawData) => { + const messages = MessageApiResponse.parse(rawData).data; + return messages.length === 1 ? messages[0] : undefined; + } + ); } diff --git a/ui/api/messages/schema.ts b/ui/api/messages/schema.ts index 4c2170278..239fcdfc9 100644 --- a/ui/api/messages/schema.ts +++ b/ui/api/messages/schema.ts @@ -1,11 +1,11 @@ import { z } from "zod"; -import { ApiError } from "@/api/api"; +import { ApiErrorSchema } from "@/api/api"; const RelatedSchema = z.object({ meta: z.object({ artifactType: z.string().optional(), name: z.string().optional(), - errors: z.array(ApiError).optional(), + errors: z.array(ApiErrorSchema).optional(), }).nullable().optional(), links: z.object({ content: z.string(), diff --git a/ui/api/nodes/actions.ts b/ui/api/nodes/actions.ts index c458c602b..78c79734a 100644 --- a/ui/api/nodes/actions.ts +++ b/ui/api/nodes/actions.ts @@ -1,23 +1,15 @@ "use server"; -import { getHeaders } from "@/api/api"; -import { ConfigResponseSchema, NodeConfig } from "@/api/nodes/schema"; -import { logger } from "@/utils/logger"; -const log = logger.child({ module: "api-topics" }); +import { fetchData, ApiResponse } from "@/api/api"; +import { ConfigResponseSchema, NodeConfig } from "@/api/nodes/schema"; export async function getNodeConfiguration( kafkaId: string, nodeId: number | string, -): Promise { - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/nodes/${nodeId}/configs`; - log.debug({ url }, "Fetching node configuration"); - const res = await fetch(url, { - headers: await getHeaders(), - - next: { tags: [`node-${nodeId}`] }, - }); - const rawData = await res.json(); - log.trace(rawData, "Node configuration response"); - const data = ConfigResponseSchema.parse(rawData); - return data.data; +): Promise> { + return fetchData( + `/api/kafkas/${kafkaId}/nodes/${nodeId}/configs`, + "", + (rawData) => ConfigResponseSchema.parse(rawData).data + ); } diff --git a/ui/api/rebalance/actions.ts b/ui/api/rebalance/actions.ts index b0a4c1e24..0d7bf3179 100644 --- a/ui/api/rebalance/actions.ts +++ b/ui/api/rebalance/actions.ts @@ -1,17 +1,15 @@ "use server"; -import { logger } from "@/utils/logger"; + +import { fetchData, patchData, sortParam, filterIn, ApiResponse } from "@/api/api"; import { + RebalanceMode, RebalanceResponse, RebalanceResponseSchema, RebalanceSchema, - RebalancesResponse, RebalanceStatus, + RebalancesResponse, } from "./schema"; import { filterUndefinedFromObj } from "@/utils/filterUndefinedFromObj"; -import { getHeaders } from "@/api/api"; -import { RebalanceMode } from "./schema"; - -const log = logger.child({ module: "rebalance-api" }); export async function getRebalancesList( kafkaId: string, @@ -24,69 +22,54 @@ export async function getRebalancesList( sort?: string; sortDir?: string; }, -): Promise { +): Promise> { const sp = new URLSearchParams( filterUndefinedFromObj({ "fields[kafkaRebalances]": "name,namespace,creationTimestamp,status,mode,brokers,optimizationResult", "filter[name]": params.name ? `like,*${params.name}*` : undefined, - "filter[status]": - params.status && params.status.length > 0 - ? `in,${params.status.join(",")}` - : undefined, - "filter[mode]": - params.mode && params.mode.length > 0 - ? `in,${params.mode.join(",")}` - : undefined, + "filter[status]": filterIn(params.status), + "filter[mode]": filterIn(params.mode), "page[size]": params.pageSize, "page[after]": params.pageCursor, - sort: params.sort - ? (params.sortDir !== "asc" ? "-" : "") + params.sort - : undefined, + sort: sortParam(params.sort, params.sortDir), }), ); - const rebalanceQuery = sp.toString(); - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/rebalances?${rebalanceQuery}`; - const res = await fetch(url, { - headers: await getHeaders(), - next: { - tags: ["rebalances"], - }, - }); + return fetchData( + `/api/kafkas/${kafkaId}/rebalances`, + sp, + (rawData) => RebalanceResponseSchema.parse(rawData), + ); +} - log.debug({ url }, "getRebalanceList"); - const rawData = await res.json(); - log.trace({ url, rawData }, "getRebalanceList response"); - return RebalanceResponseSchema.parse(rawData); +export async function getRebalance( + kafkaId: string, + rebalanceId: string, +): Promise> { + return fetchData( + `/api/kafkas/${kafkaId}/rebalances/${rebalanceId}`, + "", + (rawData) => RebalanceSchema.parse(rawData.data) + ); } -export async function getRebalanceDetails( +export async function patchRebalance( kafkaId: string, rebalanceId: string, - action?: string, -): Promise { - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/rebalances/${rebalanceId}`; - const decodedRebalanceId = decodeURIComponent(rebalanceId); - const body = { - data: { - type: "kafkaRebalances", - id: decodedRebalanceId, - meta: { - action: action, + action: string, +): Promise> { + return patchData( + `/api/kafkas/${kafkaId}/rebalances/${rebalanceId}`, + { + data: { + type: "kafkaRebalances", + id: decodeURIComponent(rebalanceId), + meta: { + action: action, + }, + attributes: {}, }, - attributes: {}, }, - }; - log.debug({ url }, "Fetching rebalance details"); - const res = await fetch(url, { - headers: await getHeaders(), - method: "PATCH", - body: JSON.stringify(body), - }); - if (action) { - return res.ok; - } else { - const rawData = await res.json(); - return RebalanceSchema.parse(rawData.data); - } + (rawData) => RebalanceSchema.parse(rawData.data) + ); } diff --git a/ui/api/topics/actions.ts b/ui/api/topics/actions.ts index 49ff91af1..49ad4a4f0 100644 --- a/ui/api/topics/actions.ts +++ b/ui/api/topics/actions.ts @@ -1,5 +1,5 @@ "use server"; -import { getHeaders } from "@/api/api"; +import { fetchData, patchData, postData, getHeaders, ApiResponse, filterEq, filterIn, filterLike, sortParam } from "@/api/api"; import { getKafkaCluster } from "@/api/kafka/actions"; import { describeTopicsQuery, @@ -7,8 +7,6 @@ import { Topic, TopicCreateResponse, TopicCreateResponseSchema, - TopicMutateError, - TopicMutateResponseErrorSchema, TopicResponse, TopicsResponse, TopicsResponseSchema, @@ -33,18 +31,15 @@ export async function getTopics( sortDir?: string; includeHidden?: boolean; }, -): Promise { +): Promise> { const sp = new URLSearchParams( filterUndefinedFromObj({ "fields[topics]": params.fields ?? "name,status,visibility,numPartitions,totalLeaderLogBytes,consumerGroups", - "filter[id]": params.id ? `eq,${params.id}` : undefined, - "filter[name]": params.name ? `like,*${params.name}*` : undefined, - "filter[status]": - params.status && params.status.length > 0 - ? `in,${params.status.join(",")}` - : undefined, + "filter[id]": filterEq(params.id), + "filter[name]": filterLike(params.name), + "filter[status]": filterIn(params.status), "filter[visibility]": params.includeHidden ? "in,external,internal" : "eq,external", @@ -60,38 +55,23 @@ export async function getTopics( : undefined, }), ); - const topicsQuery = sp.toString(); - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/topics?${topicsQuery}&`; - const res = await fetch(url, { - headers: await getHeaders(), - next: { - tags: ["topics"], - }, - }); - log.debug({ url }, "getTopics"); - const rawData = await res.json(); - log.trace({ url, rawData }, "getTopics response"); - return TopicsResponseSchema.parse(rawData); + + return fetchData( + `/api/kafkas/${kafkaId}/topics`, + sp, + (rawData: any) => TopicsResponseSchema.parse(rawData), + ); } export async function getTopic( kafkaId: string, topicId: string, -): Promise { - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/topics/${topicId}?${describeTopicsQuery}`; - const res = await fetch(url, { - headers: await getHeaders(), - next: { - tags: [`topic-${topicId}`], - }, - }); - const rawData = await res.json(); - log.trace(rawData, "getTopic"); - try { - return TopicResponse.parse(rawData).data; - } catch { - return null; - } +): Promise> { + return fetchData( + `/api/kafkas/${kafkaId}/topics/${topicId}`, + describeTopicsQuery, + (rawData: any) => TopicResponse.parse(rawData).data, + ); } export async function createTopic( @@ -101,33 +81,25 @@ export async function createTopic( replicationFactor: number, configs: NewConfigMap, validateOnly = false, -): Promise { - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/topics`; - const body = { - meta: { - validateOnly, - }, - data: { - type: "topics", - attributes: { - name, - numPartitions, - replicationFactor, - configs: filterUndefinedFromObj(configs), +): Promise> { + return postData( + `/api/kafkas/${kafkaId}/topics`, + { + meta: { + validateOnly, + }, + data: { + type: "topics", + attributes: { + name, + numPartitions, + replicationFactor, + configs: filterUndefinedFromObj(configs), + }, }, }, - }; - log.debug({ url, body }, "calling createTopic"); - const res = await fetch(url, { - headers: await getHeaders(), - method: "POST", - body: JSON.stringify(body), - }); - const rawData = await res.json(); - log.debug({ url, rawData }, "createTopic response"); - const response = TopicCreateResponseSchema.parse(rawData); - log.debug(response, "createTopic response parsed"); - return response; + (rawData) => TopicCreateResponseSchema.parse(rawData) + ); } export async function updateTopic( @@ -136,37 +108,22 @@ export async function updateTopic( numPartitions?: number, replicationFactor?: number, configs?: NewConfigMap, -): Promise { - const url = `${process.env.BACKEND_URL}/api/kafkas/${kafkaId}/topics/${topicId}`; - const body = { - data: { - type: "topics", - id: topicId, - attributes: { - numPartitions, - replicationFactor, - configs: filterUndefinedFromObj(configs || {}), +): Promise> { + return patchData( + `/api/kafkas/${kafkaId}/topics/${topicId}`, + { + data: { + type: "topics", + id: topicId, + attributes: { + numPartitions, + replicationFactor, + configs: filterUndefinedFromObj(configs ?? {}), + }, }, }, - }; - log.debug({ url, body }, "calling updateTopic"); - const res = await fetch(url, { - headers: await getHeaders(), - method: "PATCH", - body: JSON.stringify(body), - }); - log.debug({ status: res.status }, "updateTopic response"); - try { - if (res.status === 204) { - return true; - } else { - const rawData = await res.json(); - return TopicMutateResponseErrorSchema.parse(rawData); - } - } catch (e) { - log.error(e, "updateTopic unknown error"); - } - return false; + _ => undefined + ); } export async function deleteTopic( @@ -208,10 +165,11 @@ export async function getViewedTopics(): Promise { } export async function setTopicAsViewed(kafkaId: string, topicId: string) { - log.debug({ kafkaId, topicId }, "setTopicAsViewed"); - const cluster = await getKafkaCluster(kafkaId); - const topic = await getTopic(kafkaId, topicId); + log.trace({ kafkaId, topicId }, "setTopicAsViewed"); + const cluster = (await getKafkaCluster(kafkaId)).payload; + const topic = (await getTopic(kafkaId, topicId)).payload; const viewedTopics = await getViewedTopics(); + if (cluster && topic) { const viewedTopic: ViewedTopic = { kafkaId, diff --git a/ui/api/topics/schema.ts b/ui/api/topics/schema.ts index ac8ebe79a..93f36b174 100644 --- a/ui/api/topics/schema.ts +++ b/ui/api/topics/schema.ts @@ -1,4 +1,4 @@ -import { ApiError } from "@/api/api"; +import { ApiErrorSchema } from "@/api/api"; import { z } from "zod"; export const describeTopicsQuery = encodeURI( @@ -24,7 +24,7 @@ const PartitionSchema = z.object({ nodeId: z.number(), nodeRack: z.string().optional(), inSync: z.boolean(), - localStorage: ApiError.or( + localStorage: ApiErrorSchema.or( z.object({ size: z.number(), offsetLag: z.number(), @@ -65,6 +65,7 @@ const TopicStatusSchema = z.union([ z.literal("UnderReplicated"), z.literal("PartiallyOffline"), z.literal("Offline"), + z.literal("Unknown"), ]); export type TopicStatus = z.infer; const TopicSchema = z.object({ @@ -78,15 +79,16 @@ const TopicSchema = z.object({ status: TopicStatusSchema.optional(), visibility: z.string().optional(), partitions: z.array(PartitionSchema).optional(), - numPartitions: z.number().optional(), + numPartitions: z.number().optional().nullable(), authorizedOperations: z.array(z.string()), configs: ConfigMapSchema, totalLeaderLogBytes: z.number().optional().nullable(), }), relationships: z.object({ consumerGroups: z.object({ + meta: z.record(z.any()).optional(), data: z.array(z.any()), - }).optional(), + }).optional().nullable(), }), }); export const TopicResponse = z.object({ @@ -126,6 +128,7 @@ export const TopicsResponseSchema = z.object({ UnderReplicated: z.number().optional(), PartiallyOffline: z.number().optional(), Offline: z.number().optional(), + Unknown: z.number().optional(), }), totalPartitions: z.number(), }), @@ -139,30 +142,11 @@ export const TopicsResponseSchema = z.object({ data: z.array(TopicListSchema), }); export type TopicsResponse = z.infer; -const TopicCreateResponseSuccessSchema = z.object({ + +export const TopicCreateResponseSchema = z.object({ data: z.object({ id: z.string(), }), }); -export const TopicMutateResponseErrorSchema = z.object({ - errors: z.array( - z.object({ - id: z.string(), - status: z.string(), - code: z.string(), - title: z.string(), - detail: z.string(), - source: z - .object({ - pointer: z.string().optional(), - }) - .optional(), - }), - ), -}); -export const TopicCreateResponseSchema = z.union([ - TopicCreateResponseSuccessSchema, - TopicMutateResponseErrorSchema, -]); -export type TopicMutateError = z.infer; + export type TopicCreateResponse = z.infer; diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@activeBreadcrumb/nodes/[nodeId]/NodeBreadcrumb.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@activeBreadcrumb/nodes/[nodeId]/NodeBreadcrumb.tsx index e3f8945b6..419231d67 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@activeBreadcrumb/nodes/[nodeId]/NodeBreadcrumb.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@activeBreadcrumb/nodes/[nodeId]/NodeBreadcrumb.tsx @@ -3,7 +3,6 @@ import { KafkaNodeParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/nod import { BreadcrumbLink } from "@/components/Navigation/BreadcrumbLink"; import { BreadcrumbItem } from "@/libs/patternfly/react-core"; import { Skeleton } from "@patternfly/react-core"; -import { notFound } from "next/navigation"; import { Suspense } from "react"; export async function NodeBreadcrumb({ @@ -33,13 +32,10 @@ async function ConnectedNodeBreadcrumb({ }: { params: KafkaNodeParams; }) { - const cluster = await getKafkaCluster(kafkaId); - if (!cluster) { - notFound(); - } - const node = cluster.attributes.nodes.find((n) => `${n.id}` === nodeId); - if (!node) { - notFound(); - } - return node.id; + return (await getKafkaCluster(kafkaId))?. + payload?. + attributes. + nodes. + find((n) => `${n.id}` === nodeId)?. + id ?? "-"; } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@activeBreadcrumb/topics/[topicId]/TopicBreadcrumb.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@activeBreadcrumb/topics/[topicId]/TopicBreadcrumb.tsx index 28df70810..08681a6b9 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@activeBreadcrumb/topics/[topicId]/TopicBreadcrumb.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@activeBreadcrumb/topics/[topicId]/TopicBreadcrumb.tsx @@ -16,7 +16,7 @@ async function ConnectedTopicBreadcrumb({ }: { params: KafkaTopicParams; }) { - const topic = await getTopic(kafkaId, topicId); + const response = await getTopic(kafkaId, topicId); return [ , - {topic?.attributes.name} + { response.payload?.attributes.name ?? topicId } , ]; } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/KafkaHeader.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/KafkaHeader.tsx index 7238100f2..719c9bfc8 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/KafkaHeader.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/KafkaHeader.tsx @@ -1,9 +1,7 @@ import { getKafkaCluster } from "@/api/kafka/actions"; -import { getTopics } from "@/api/topics/actions"; import { KafkaParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/kafka.params"; import { AppHeader } from "@/components/AppHeader"; import { Skeleton } from "@patternfly/react-core"; -import { notFound } from "next/navigation"; import { Suspense } from "react"; export function KafkaHeader({ params: { kafkaId } }: { params: KafkaParams }) { @@ -19,10 +17,11 @@ async function ConnectedKafkaHeader({ }: { params: KafkaParams; }) { - const cluster = await getKafkaCluster(kafkaId); - if (!cluster) { - notFound(); + const cluster = (await getKafkaCluster(kafkaId))?.payload; + + if (cluster) { + return ; } - const topics = await getTopics(kafkaId, { pageSize: 1 }); - return ; + + return ; } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/consumer-groups/[groupId]/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/consumer-groups/[groupId]/page.tsx index 1bd4d1fcc..63ad86200 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/consumer-groups/[groupId]/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/consumer-groups/[groupId]/page.tsx @@ -24,8 +24,11 @@ async function ConnectedAppHeader({ }: { params: KafkaConsumerGroupMembersParams; }) { - const cg = await getConsumerGroup(kafkaId, groupId); - const disabled = cg.attributes.state !== "EMPTY"; + const disabled = (await getConsumerGroup(kafkaId, groupId))?. + payload?. + attributes. + state !== "EMPTY"; + return
    ; } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/nodes/[nodeId]/NodeHeader.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/nodes/[nodeId]/NodeHeader.tsx index cfcbe6165..c4885645e 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/nodes/[nodeId]/NodeHeader.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/nodes/[nodeId]/NodeHeader.tsx @@ -1,8 +1,6 @@ -import { getKafkaCluster } from "@/api/kafka/actions"; import { KafkaNodeParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/kafkaNode.params"; import { AppHeader } from "@/components/AppHeader"; import { Skeleton } from "@patternfly/react-core"; -import { notFound } from "next/navigation"; import { Suspense } from "react"; export async function NodeHeader({ @@ -14,7 +12,7 @@ export async function NodeHeader({ }> - + Broker {nodeId} } navigation={ @@ -34,20 +32,3 @@ export async function NodeHeader({ /> ); } - -async function ConnectedNodeHeader({ - params: { kafkaId, nodeId }, -}: { - params: KafkaNodeParams; -}) { - const cluster = await getKafkaCluster(kafkaId); - if (!cluster) { - notFound(); - } - - const node = cluster.attributes.nodes.find((n) => `${n.id}` === nodeId); - if (!node) { - notFound(); - } - return <>Broker {node.id}; -} diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/nodes/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/nodes/page.tsx index 3c50b82ad..30a7b20ba 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/nodes/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/nodes/page.tsx @@ -26,7 +26,7 @@ export default function NodesHeader({ params }: { params: KafkaParams }) { } async function ConnectedHeader({ params }: { params: KafkaParams }) { - const cluster = await getKafkaCluster(params.kafkaId); + const cluster = (await getKafkaCluster(params.kafkaId))?.payload; return (
    { try { - const success = await updateKafkaCluster(clusterId, pausedState); + const response = await updateKafkaCluster(clusterId, pausedState); - if (success) { + if (response.errors) { + console.log("Unknown error occurred", response.errors); + } else { setReconciliationPaused(pausedState); setIsModalOpen(false); } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/overview/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/overview/page.tsx index 864b990b9..b2c1c4cf5 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/overview/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/overview/page.tsx @@ -3,7 +3,6 @@ import { KafkaParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/kafka.p import { AppHeader } from "@/components/AppHeader"; import { useTranslations } from "next-intl"; import { getKafkaCluster } from "@/api/kafka/actions"; -import { notFound } from "next/navigation"; import { Suspense } from "react"; export default function Header({ @@ -25,15 +24,12 @@ async function ConnectedHeader({ }: { params: KafkaParams; }) { - const cluster = await getKafkaCluster(kafkaId); - if (!cluster) { - notFound(); - } - + const cluster = (await getKafkaCluster(kafkaId))?.payload; + return ( ); } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/topics/[topicId]/TopicHeader.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/topics/[topicId]/TopicHeader.tsx index 9f789a4c1..ca7ec27d9 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/topics/[topicId]/TopicHeader.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@header/topics/[topicId]/TopicHeader.tsx @@ -1,4 +1,3 @@ -import { getKafkaCluster } from "@/api/kafka/actions"; import { getTopic } from "@/api/topics/actions"; import { KafkaTopicParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/kafkaTopic.params"; import { AppHeader } from "@/components/AppHeader"; @@ -13,7 +12,6 @@ import { Spinner, } from "@/libs/patternfly/react-core"; import { Skeleton } from "@patternfly/react-core"; -import { notFound } from "next/navigation"; import { ReactNode, Suspense } from "react"; export type TopicHeaderProps = { @@ -98,11 +96,14 @@ async function ConnectedTopicHeader({ showRefresh?: boolean; portal: ReactNode; }) { - const cluster = await getKafkaCluster(kafkaId); - if (!cluster) { - notFound(); + const response = await getTopic(kafkaId, topicId); + + if (response.errors) { + return ; } - const topic = await getTopic(cluster.id, topicId); + + const topic = response.payload; + return ( ; + } + + const topics = response.payload!; + return (
    ); diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@modal/topics/[topicId]/delete/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@modal/topics/[topicId]/delete/page.tsx index c02cdea9f..876d172d1 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@modal/topics/[topicId]/delete/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/@modal/topics/[topicId]/delete/page.tsx @@ -1,22 +1,29 @@ import { deleteTopic, getTopic } from "@/api/topics/actions"; import { DeleteTopicModal } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/@modal/topics/[topicId]/delete/DeleteTopicModal"; import { KafkaTopicParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/kafkaTopic.params"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; export default async function DeletePage({ params: { kafkaId, topicId }, }: { params: KafkaTopicParams; }) { - const topic = await getTopic(kafkaId, topicId); + const response = await getTopic(kafkaId, topicId); + + if (response.errors) { + return ; + } async function onDelete() { "use server"; await deleteTopic(kafkaId, topicId); } + const topic = response.payload!; + return ( ); diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/ClusterLinks.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/ClusterLinks.tsx index 00fb29196..76afd68c2 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/ClusterLinks.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/ClusterLinks.tsx @@ -40,6 +40,6 @@ export function ClusterLinks({ kafkaId }: { kafkaId: string }) { } async function ClusterName({ kafkaId }: { kafkaId: string }) { - const cluster = await getKafkaCluster(kafkaId); + const cluster = (await getKafkaCluster(kafkaId))?.payload; return cluster?.attributes.name ?? `Cluster ${kafkaId}`; } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/ConsumerGroupsTable.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/ConsumerGroupsTable.tsx index 3b52d9a71..9aa49eeaf 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/ConsumerGroupsTable.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/ConsumerGroupsTable.tsx @@ -193,30 +193,28 @@ export function ConsumerGroupsTable({ ); case "topics": - const allTopics: { topicId: string; topicName: string }[] = []; + const allTopics: Record = {}; row.attributes.members ?.flatMap((m) => m.assignments ?? []) .forEach((a) => - allTopics.push({ topicId: a.topicId, topicName: a.topicName }), + allTopics[a.topicName] = a.topicId, ); row.attributes.offsets?.forEach((a) => - allTopics.push({ topicId: a.topicId, topicName: a.topicName }), + allTopics[a.topicName] = a.topicId, ); return ( - {Array.from(new Set(allTopics.map((a) => a.topicName))).map( - (topic, idx) => ( + {Object.entries(allTopics).map( + ([topicName, topicId]) => ( t.topicName === topic)!.topicId - }`} + href={topicId ? `/kafka/${kafkaId}/topics/${topicId}` : "#"} > - {topic} + {topicName} - ), + ) )} diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/LagTable.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/LagTable.tsx index c6862f615..a3106651c 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/LagTable.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/LagTable.tsx @@ -1,4 +1,4 @@ -import { ConsumerGroup } from "@/api/consumerGroups/schema"; +import { OffsetAndMetadata } from "@/api/consumerGroups/schema"; import { Number } from "@/components/Format/Number"; import { ResponsiveTable } from "@/components/Table"; import { Tooltip } from "@/libs/patternfly/react-core"; @@ -12,7 +12,7 @@ export function LagTable({ offsets, }: { kafkaId: string; - offsets: ConsumerGroup["attributes"]["offsets"]; + offsets: OffsetAndMetadata[] | undefined; }) { const t = useTranslations("MemberTable"); diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/MembersTable.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/MembersTable.tsx index da783a67b..11444a987 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/MembersTable.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/MembersTable.tsx @@ -1,5 +1,5 @@ "use client"; -import { ConsumerGroup } from "@/api/consumerGroups/schema"; +import { ConsumerGroup, MemberDescription, OffsetAndMetadata } from "@/api/consumerGroups/schema"; import { LagTable } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/LagTable"; import { Number } from "@/components/Format/Number"; import { ResponsiveTable } from "@/components/Table"; @@ -16,7 +16,7 @@ export function MembersTable({ }: { kafkaId: string; consumerGroup?: ConsumerGroup; - refresh?: () => Promise; + refresh?: () => Promise; }) { const t = useTranslations("MemberTable"); const [consumerGroup, setConsumerGroup] = useState(initialData); @@ -25,12 +25,14 @@ export function MembersTable({ if (refresh) { interval = setInterval(async () => { const cg = await refresh(); - setConsumerGroup(cg); + if (cg != null) { + setConsumerGroup(cg); + } }, 5000); } return () => clearInterval(interval); }, [refresh]); - let members: ConsumerGroup["attributes"]["members"] | undefined = undefined; + let members: MemberDescription[] | undefined = undefined; if (consumerGroup) { if (consumerGroup.attributes.members?.length === 0) { @@ -47,7 +49,7 @@ export function MembersTable({ }, ]; } else { - members = consumerGroup.attributes.members; + members = consumerGroup.attributes.members ?? []; } } return ( @@ -130,7 +132,7 @@ export function MembersTable({ return true; }} getExpandedRow={({ row }) => { - const offsets: ConsumerGroup["attributes"]["offsets"] = + const offsets: OffsetAndMetadata[] | undefined = row.assignments?.map((a) => ({ ...a, ...consumerGroup!.attributes.offsets?.find( diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/page.tsx index 91d700364..9f42b2876 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/page.tsx @@ -4,8 +4,8 @@ import { KafkaConsumerGroupMembersParams } from "@/app/[locale]/(authorized)/kaf import { MembersTable } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/MembersTable"; import { KafkaParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/kafka.params"; import { PageSection } from "@/libs/patternfly/react-core"; -import { notFound } from "next/navigation"; import { Suspense } from "react"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; export async function generateMetadata(props: { params: { kafkaId: string, groupId: string} }) { const t = await getTranslations(); @@ -36,15 +36,18 @@ async function ConnectedMembersTable({ }: { params: KafkaParams & { groupId: string }; }) { + const response = await getConsumerGroup(kafkaId, groupId); + + if (response.errors) { + return ; + } + async function refresh() { "use server"; const res = await getConsumerGroup(kafkaId, groupId); - return res; + return res?.payload ?? null; } - const consumerGroup = await getConsumerGroup(kafkaId, groupId); - if (!consumerGroup) { - notFound(); - } - return ; + const consumerGroup = response.payload!; + return ; } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/Dryrun.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/Dryrun.tsx index edf78cd00..c9cce2957 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/Dryrun.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/Dryrun.tsx @@ -25,8 +25,9 @@ import { SidebarPanel, Stack, StackItem, + TextContent, + Text, } from "@/libs/patternfly/react-core"; -import { TextContent, Text } from "@/libs/patternfly/react-core"; import { useTranslations } from "next-intl"; import { DownloadIcon } from "@/libs/patternfly/react-icons"; @@ -34,7 +35,6 @@ export type NewOffset = { topicName: string; partition: number; offset: number | string; - topicId: string; metadata?: string; }; diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/ResetConsumerOffset.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/ResetConsumerOffset.tsx index 5891f4410..1fd65b0a5 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/ResetConsumerOffset.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/ResetConsumerOffset.tsx @@ -8,11 +8,7 @@ import { partitionSelection, } from "../types"; import { useRouter } from "@/i18n/routing"; -import { - getDryrunResult, - updateConsumerGroup, -} from "@/api/consumerGroups/actions"; -import { UpdateConsumerGroupErrorSchema } from "@/api/consumerGroups/schema"; +import { updateConsumerGroup } from "@/api/consumerGroups/actions"; import { Dryrun } from "./Dryrun"; import { LoadingPage } from "./LoadingPage"; import { ResetOffset } from "./ResetOffset"; @@ -68,7 +64,7 @@ export function ResetConsumerOffset({ const [error, setError] = useState(); - const [newoffsetData, setNewOffsetData] = useState([]); + const [newOffsetData, setNewOffsetData] = useState([]); const [showDryRun, setShowDryRun] = useState(false); @@ -208,13 +204,27 @@ export function ResetConsumerOffset({ const openDryrun = async () => { const uniqueOffsets = generateOffsets(); - const res = await getDryrunResult( + const response = await updateConsumerGroup( kafkaId, consumerGroupName, uniqueOffsets, + true // dryRun ); - setNewOffsetData(res?.attributes?.offsets ?? []); - setShowDryRun(true); + + if (response.payload) { + const res = response.payload; + const offsets: Offset[] = Array.from(res.attributes?.offsets ?? []).map(o => { + return { + topicId: o.topicId!, + topicName: o.topicName, + partition: o.partition, + offset: o.offset, + } + }); + + setNewOffsetData(offsets); + setShowDryRun(true); + } }; const closeDryrun = () => { @@ -240,12 +250,20 @@ export function ResetConsumerOffset({ try { const uniqueOffsets = generateOffsets(); - const success = await updateConsumerGroup( + const response = await updateConsumerGroup( kafkaId, consumerGroupName, uniqueOffsets, ); - if (success === true) { + + if (response.errors) { + const errorMessages = response.errors.map((err) => err.detail); + const errorMessage = + errorMessages.length > 0 + ? errorMessages[0] + : "Failed to update consumer group"; + setError(errorMessage); + } else { closeResetOffset(); addAlert({ title: t("ConsumerGroupsTable.reset_offset_submitted_successfully", { @@ -253,16 +271,6 @@ export function ResetConsumerOffset({ }), variant: "success", }); - } else { - const errorMessages = - (success as UpdateConsumerGroupErrorSchema)?.errors.map( - (err) => err.detail, - ) || []; - const errorMessage = - errorMessages.length > 0 - ? errorMessages[0] - : "Failed to update consumer group"; - setError(errorMessage); } } catch (e: unknown) { setError("Unknown error occurred"); @@ -278,7 +286,7 @@ export function ResetConsumerOffset({ ) : showDryRun ? ( diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/page.tsx index 7b5619827..da032e3ee 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/reset-offset/page.tsx @@ -3,9 +3,9 @@ import { getConsumerGroup } from "@/api/consumerGroups/actions"; import { KafkaConsumerGroupMembersParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/[groupId]/KafkaConsumerGroupMembers.params"; import { KafkaParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/kafka.params"; import { PageSection } from "@/libs/patternfly/react-core"; -import { notFound } from "next/navigation"; import { Suspense } from "react"; import { ResetConsumerOffset } from "./ResetConsumerOffset"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; export async function generateMetadata(props: { params: { kafkaId: string, groupId: string} }) { const t = await getTranslations(); @@ -44,22 +44,38 @@ async function ConnectedResetOffset({ }: { params: KafkaParams & { groupId: string }; }) { - const consumerGroup = await getConsumerGroup(kafkaId, groupId); - if (!consumerGroup) { - notFound(); + const response = await getConsumerGroup(kafkaId, groupId); + + if (response.errors) { + return ; } + const consumerGroup = response.payload!; + const topics = consumerGroup.attributes.offsets?.map((o) => ({ topicId: o.topicId, topicName: o.topicName, partition: o.partition, - })) || []; + })) ?? []; + + const undescribedTopics = topics + .filter((topic) => topic.topicId === undefined) + .map((topic) => topic.topicName); + + if (undescribedTopics.length > 0) { + const distinct = new Set(undescribedTopics); + return ; + } const topicDetails = topics.map((topic) => ({ - topicId: topic.topicId, + topicId: topic.topicId!, topicName: topic.topicName, })); + const partitions = topics.map((t) => t.partition); return ( diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/page.tsx index b809b8dae..a6b566449 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/consumer-groups/page.tsx @@ -10,7 +10,7 @@ import { import { ConsumerGroupState } from "@/api/consumerGroups/schema"; import { ConnectedConsumerGroupTable } from "./ConnectedConsumerGroupTable"; import { stringToInt } from "@/utils/stringToInt"; -import { notFound } from "next/navigation"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; export async function generateMetadata() { const t = await getTranslations(); @@ -101,18 +101,19 @@ async function AsyncConsumerGroupTable({ } & KafkaParams) { async function refresh() { "use server"; - const consumerGroup = await getConsumerGroups(kafkaId, { + const consumerGroup = (await getConsumerGroups(kafkaId, { id, sort: sortMap[sort], sortDir, pageSize, pageCursor, state, - }); + }))?.payload; + return consumerGroup?.data ?? []; } - const consumerGroups = await getConsumerGroups(kafkaId, { + const response = await getConsumerGroups(kafkaId, { id, sort: sortMap[sort], sortDir, @@ -121,10 +122,12 @@ async function AsyncConsumerGroupTable({ state, }); - if (!consumerGroups) { - notFound(); + if (response.errors) { + return ; } + const consumerGroups = response.payload!; + const nextPageQuery = consumerGroups.links.next ? new URLSearchParams(consumerGroups.links.next) : undefined; diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/layout.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/layout.tsx index 75f24c23c..b2b696192 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/layout.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/layout.tsx @@ -11,8 +11,8 @@ import { getServerSession } from "next-auth"; import { useTranslations } from "next-intl"; import { PropsWithChildren, ReactNode, Suspense } from "react"; import { KafkaParams } from "./kafka.params"; -import { notFound } from "next/navigation"; import { getKafkaCluster } from "@/api/kafka/actions"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; export default async function AsyncLayout({ children, @@ -28,13 +28,14 @@ export default async function AsyncLayout({ }>) { const authOptions = await getAuthOptions(); const session = await getServerSession(authOptions); + const response = await getKafkaCluster(kafkaId); - const cluster = await getKafkaCluster(kafkaId); - - if (!cluster) { - notFound(); + if (response.errors) { + return ; } + const cluster = response.payload; + return ( - + - ); + ) : ; } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/page.tsx index 04c1bf299..7227a993f 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/page.tsx @@ -32,7 +32,6 @@ function nodeRangeMetric( return parseFloat(range?.[range?.length - 1]?.[1] ?? "0"); } - export default function NodesPage({ params }: { params: KafkaParams }) { return ( @@ -43,9 +42,9 @@ export default function NodesPage({ params }: { params: KafkaParams }) { async function ConnectedNodes({ params }: { params: KafkaParams }) { const t = await getTranslations(); - const cluster = await getKafkaCluster(params.kafkaId, { + const cluster = (await getKafkaCluster(params.kafkaId, { fields: 'name,namespace,creationTimestamp,status,kafkaVersion,nodes,controller,authorizedOperations,listeners,conditions,metrics' - }); + })).payload; const metrics = cluster?.attributes.metrics; const nodes: Node[] = (cluster?.attributes.nodes ?? []).map((node) => { diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/ConnectedRebalancesTable.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/ConnectedRebalancesTable.tsx index 2336bc797..374411010 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/ConnectedRebalancesTable.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/ConnectedRebalancesTable.tsx @@ -22,7 +22,7 @@ import { RebalanceStatus, } from "@/api/rebalance/schema"; import { ValidationModal } from "./ValidationModal"; -import { getRebalanceDetails } from "@/api/rebalance/actions"; +import { patchRebalance } from "@/api/rebalance/actions"; import { useAlert } from "@/components/AlertContext"; export type ConnectedReabalancesTableProps = { @@ -141,7 +141,7 @@ export function ConnectedReabalancesTable({ : t("refresh_alert"); const onConfirm = async () => { - await getRebalanceDetails(kafkaId, RebalanceId, approvalStatus); + await patchRebalance(kafkaId, RebalanceId, approvalStatus); setModalOpen(false); addAlert({ title: alertMessage, diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/[rebalanceId]/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/[rebalanceId]/page.tsx index 10ebda27d..ff66dabc4 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/[rebalanceId]/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/[rebalanceId]/page.tsx @@ -3,7 +3,7 @@ import { PageSection } from "@/libs/patternfly/react-core"; import { Suspense } from "react"; import { KafkaRebalanceParams } from "./KafkaRebalance.params"; import { OptimizationProposal } from "./OptimizationProposal"; -import { getRebalanceDetails } from "@/api/rebalance/actions"; +import { getRebalance } from "@/api/rebalance/actions"; export default function OptimizationProposalPage({ params, @@ -44,9 +44,10 @@ async function ConnectedOptimizationProposal({ }: { params: KafkaParams & { rebalanceId: string }; }) { - const rebalanceDetails = await getRebalanceDetails(kafkaId, rebalanceId); + const response = await getRebalance(kafkaId, rebalanceId); - if (rebalanceDetails && typeof rebalanceDetails !== "boolean") { + if (response.payload) { + const rebalanceDetails = response.payload; const { optimizationResult, sessionId } = rebalanceDetails.attributes; return ( diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/page.tsx index abead0946..9ef8338f2 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/nodes/rebalances/page.tsx @@ -7,7 +7,9 @@ import { Suspense } from "react"; import { ConnectedReabalancesTable } from "./ConnectedRebalancesTable"; import { getRebalancesList } from "@/api/rebalance/actions"; import { RebalanceMode, RebalanceStatus } from "@/api/rebalance/schema"; -export const dynamic = "force-dynamic"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; + +//export const dynamic = "force-dynamic"; export async function generateMetadata() { const t = await getTranslations(); @@ -103,7 +105,7 @@ async function AsyncReabalanceTable({ status: RebalanceStatus[] | undefined; mode: RebalanceMode[] | undefined; } & KafkaParams) { - const rebalance = await getRebalancesList(kafkaId, { + const response = await getRebalancesList(kafkaId, { name, sort: sortMap[sort], sortDir, @@ -113,6 +115,12 @@ async function AsyncReabalanceTable({ mode, }); + if (response.errors) { + return ; + } + + const rebalance = response.payload!; + const nextPageQuery = rebalance.links.next ? new URLSearchParams(rebalance.links.next) : undefined; diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/ConnectedClusterCard.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/ConnectedClusterCard.tsx index bf336bbcd..660a6db05 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/ConnectedClusterCard.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/ConnectedClusterCard.tsx @@ -1,5 +1,6 @@ import { ConsumerGroupsResponse } from "@/api/consumerGroups/schema"; import { ClusterDetail } from "@/api/kafka/schema"; +import { ApiResponse } from "@/api/api"; import { ClusterCard } from "@/components/ClusterOverview/ClusterCard"; export async function ConnectedClusterCard({ @@ -7,7 +8,7 @@ export async function ConnectedClusterCard({ consumerGroups, }: { cluster: Promise; - consumerGroups: Promise; + consumerGroups: Promise>; }) { const res = await cluster; @@ -27,7 +28,7 @@ export async function ConnectedClusterCard({ ); } const groupCount = await consumerGroups.then( - (grpResp) => grpResp?.meta.page.total ?? 0, + (grpResp) => grpResp.errors ? undefined : grpResp.payload?.meta.page.total ?? 0, ); const brokersTotal = res?.attributes.metrics?.values?.["broker_state"]?.length ?? 0; diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/ConnectedTopicsPartitionsCard.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/ConnectedTopicsPartitionsCard.tsx index 4b8b6edea..f58e90518 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/ConnectedTopicsPartitionsCard.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/ConnectedTopicsPartitionsCard.tsx @@ -1,21 +1,19 @@ +import { ApiResponse } from "@/api/api"; import { TopicsResponse } from "@/api/topics/schema"; import { TopicsPartitionsCard } from "@/components/ClusterOverview/TopicsPartitionsCard"; export async function ConnectedTopicsPartitionsCard({ data, }: { - data: Promise; + data: Promise>; }) { - const summary = (await data).meta.summary; - - if (!summary) { - return null; - } - - const totalPartitions = summary.totalPartitions; - const totalReplicated = summary.statuses.FullyReplicated ?? 0; - const totalUnderReplicated = (summary.statuses.UnderReplicated ?? 0) + (summary.statuses.PartiallyOffline ?? 0); - const totalOffline = summary.statuses.Offline ?? 0; + const summary = (await data).payload?.meta?.summary; + const totalPartitions = summary?.totalPartitions ?? 0; + const totalReplicated = summary?.statuses.FullyReplicated ?? 0; + const totalUnderReplicated = (summary?.statuses.UnderReplicated ?? 0) + (summary?.statuses.PartiallyOffline ?? 0); + const totalOffline = summary?.statuses.Offline ?? 0; + const totalUnknown = summary?.statuses.Unknown ?? 0; + const errors = (await data).errors; return ( ); } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/page.tsx index a2860d520..631360e11 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/overview/page.tsx @@ -18,10 +18,11 @@ export async function generateMetadata() { }; } -export default function OverviewPage({ params }: { params: KafkaParams }) { +export default async function OverviewPage({ params }: { params: KafkaParams }) { const kafkaCluster = getKafkaCluster(params.kafkaId, { fields: 'name,namespace,creationTimestamp,status,kafkaVersion,nodes,controller,authorizedOperations,listeners,conditions,metrics' - }); + }).then(r => r.payload ?? null); + const topics = getTopics(params.kafkaId, { fields: "status", pageSize: 1 }); const consumerGroups = getConsumerGroups(params.kafkaId, { fields: "state" }); const viewedTopics = getViewedTopics().then((topics) => diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/page.tsx index 2d31d7503..7b5dc8be1 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/page.tsx @@ -1,7 +1,7 @@ import { KafkaParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/kafka.params"; import { redirect } from "@/i18n/routing"; -export const dynamic = "force-dynamic"; +//export const dynamic = "force-dynamic"; export default function KafkaRoot({ params }: { params: KafkaParams }) { redirect(`/kafka/${params.kafkaId}/overview`); diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/(page)/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/(page)/page.tsx index 9e7d88d57..18be7aebe 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/(page)/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/(page)/page.tsx @@ -10,8 +10,9 @@ import { PageSection } from "@/libs/patternfly/react-core"; import { stringToInt } from "@/utils/stringToInt"; import { Suspense } from "react"; import { ConnectedTopicsTable } from "./ConnectedTopicsTable"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; -export const dynamic = "force-dynamic"; +//export const dynamic = "force-dynamic"; export async function generateMetadata() { const t = await getTranslations(); @@ -111,7 +112,7 @@ async function AsyncTopicsTable({ includeHidden: boolean; status: TopicStatus[] | undefined; } & KafkaParams) { - const topics = await getTopics(kafkaId, { + const response = await getTopics(kafkaId, { id, name, sort: sortMap[sort], @@ -122,6 +123,11 @@ async function AsyncTopicsTable({ status, }); + if (response.errors) { + return ; + } + + const topics = response.payload!; const nextPageCursor = topics.links.next ? `after:${new URLSearchParams(topics.links.next).get("page[after]")}` : undefined; diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/configuration/ConfigTable.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/configuration/ConfigTable.tsx index 88a05120f..5b7981d93 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/configuration/ConfigTable.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/configuration/ConfigTable.tsx @@ -1,6 +1,7 @@ "use client"; -import { Topic, TopicMutateError } from "@/api/topics/schema"; -import { Error } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/Errors"; +import { ApiResponse, ApiError } from "@/api/api"; +import { Topic } from "@/api/topics/schema"; +import { Errors } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/Errors"; import { topicMutateErrorToFieldError } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/topicMutateErrorToFieldError"; import { Number } from "@/components/Format/Number"; import { ResponsiveTableProps, TableView } from "@/components/Table"; @@ -35,7 +36,7 @@ export function ConfigTable({ }: { topic: Topic | undefined; onSaveProperty: - | ((name: string, value: string) => Promise) + | ((name: string, value: string) => Promise>) | undefined; }) { const t = useTranslations(); @@ -113,11 +114,9 @@ export function ConfigTable({ Record >({}); const [options, setOptions] = useState>({}); - const [error, setError] = useState( - undefined, - ); + const [errors, setErrors] = useState(); const fieldError = topicMutateErrorToFieldError( - error, + errors, true, Object.keys(topic?.attributes.configs || {}), ); @@ -222,12 +221,12 @@ export function ConfigTable({ return ( <> - {error && !fieldError && ( + {errors && !fieldError && ( - + )} ({ ...isEditing, - [name]: undefined, + [name]: "editing", })); } else { - if (res !== false) { - setError(res); - } else { - setError("unknown"); - } setIsEditing((isEditing) => ({ ...isEditing, - [name]: "editing", + [name]: undefined, })); } }} diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/configuration/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/configuration/page.tsx index cac8ced21..06cd53aaa 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/configuration/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/configuration/page.tsx @@ -1,11 +1,12 @@ import { getTranslations } from "next-intl/server"; +import { ApiResponse } from "@/api/api"; import { getTopic, updateTopic } from "@/api/topics/actions"; import { KafkaTopicParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/kafkaTopic.params"; import { PageSection } from "@/libs/patternfly/react-core"; -import { redirect } from "@/i18n/routing"; import { isReadonly } from "@/utils/env"; import { Suspense } from "react"; import { ConfigTable } from "./ConfigTable"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; export async function generateMetadata() { const t = await getTranslations(); @@ -36,19 +37,22 @@ async function ConnectedTopicConfiguration({ }: { params: KafkaTopicParams; }) { - const topic = await getTopic(kafkaId, topicId); + const response = await getTopic(kafkaId, topicId); - if (!topic) { - redirect(`/kafka/${kafkaId}`); - return null; + if (response.errors) { + return ; } + const topic = response.payload!; + async function onSaveProperty(name: string, value: string) { "use server"; + if (isReadonly) { // silently ignore attempt to change a property value in read-only mode - return true; + return Promise.resolve({ payload: undefined } as ApiResponse); } + return updateTopic(kafkaId, topicId, undefined, undefined, { [name]: { value, diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/consumer-groups/ConsumerGroupsTable.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/consumer-groups/ConsumerGroupsTable.tsx index ea59f4932..6c0ce1ca3 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/consumer-groups/ConsumerGroupsTable.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/consumer-groups/ConsumerGroupsTable.tsx @@ -20,8 +20,8 @@ export function ConsumerGroupsTable({ kafkaId: string; page: number; total: number; - consumerGroups: ConsumerGroup[] | undefined; - refresh: (() => Promise) | undefined; + consumerGroups?: ConsumerGroup[]; + refresh?: (() => Promise); }) { const t = useTranslations(); const [consumerGroups, setConsumerGroups] = useState(initialData); @@ -30,7 +30,9 @@ export function ConsumerGroupsTable({ if (refresh) { interval = setInterval(async () => { const consumerGroups = await refresh(); - setConsumerGroups(consumerGroups); + if (consumerGroups != null) { + setConsumerGroups(consumerGroups); + } }, 5000); } return () => clearInterval(interval); diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/consumer-groups/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/consumer-groups/page.tsx index 7e869101d..d25767cb4 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/consumer-groups/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/consumer-groups/page.tsx @@ -3,8 +3,8 @@ import { getTopicConsumerGroups } from "@/api/consumerGroups/actions"; import { ConsumerGroupsTable } from "./ConsumerGroupsTable"; import { KafkaTopicParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/kafkaTopic.params"; import { PageSection } from "@/libs/patternfly/react-core"; -import { notFound } from "next/navigation"; import { Suspense } from "react"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; export async function generateMetadata() { const t = await getTranslations(); @@ -34,8 +34,6 @@ export default function ConsumerGroupsPage({ kafkaId={kafkaId} page={1} total={0} - consumerGroups={undefined} - refresh={undefined} /> } > @@ -63,17 +61,21 @@ async function ConnectedConsumerGroupsPage({ async function refresh() { "use server"; const res = await getTopicConsumerGroups(kafkaId, topicId, searchParams); - return res.data; + return res.payload?.data ?? null; } - const consumerGroups = await getTopicConsumerGroups( + const response = await getTopicConsumerGroups( kafkaId, topicId, searchParams, ); - if (!consumerGroups) { - notFound(); + + if (response.errors) { + return ; } + + const consumerGroups = response.payload!; + return ( = { + payload: undefined, + errors: undefined, + timestamp: new Date(0), +}; + export function ConnectedMessagesTable({ kafkaId, topicId, @@ -41,21 +43,17 @@ export function ConnectedMessagesTable({ }) { const [params, sp] = useParseSearchParams(); const updateUrl = useFilterParams(sp); - const router = useRouter(); const { limit, partition, query, where, offset, timestamp, epoch, _ } = params; const [selectedMessage, setOptimisticSelectedMessage] = useOptimistic< Message | undefined >(serverSelectedMessage); - const [{ messages, ts, error }, setMessages] = - useState({ - messages: undefined, - ts: undefined, - }); + const [messages, setMessages] = useState(EMPTY_MESSAGES); function onSearch({ query, from, limit, partition }: SearchParams) { - setMessages({ messages: undefined, ts: undefined, error: undefined }); + setMessages(EMPTY_MESSAGES); + const newQuery = { query: query?.value, where: query?.where, @@ -105,23 +103,23 @@ export function ConnectedMessagesTable({ return undefined; })(); - const { - messages: newMessages = [], - ts, - error, - } = await getTopicMessages(kafkaId, topicId, { + const response = await getTopicMessages(kafkaId, topicId, { pageSize: limit === "continuously" ? 50 : (limit ?? 50), query, where, partition, filter, }); - if (error) { - setMessages({ messages: newMessages, ts, error }); + + if (response.errors) { + setMessages({ + errors: response.errors, + timestamp: response.timestamp, + }); } else { setMessages({ - messages: newMessages, - ts, + payload: response.payload ?? undefined, + timestamp: response.timestamp, }); } }, @@ -145,23 +143,24 @@ export function ConnectedMessagesTable({ _, // when clicking search multiple times, the search parameters remain the same but a timestamp is added to _. We listen for changes to _ to know we have to trigger a new fetch ]); - const onUpdates = useCallback((newMessages: Message[], ts: Date) => { + const onUpdates = useCallback((newMessages: ApiResponse) => { startTransition(() => - setMessages(({ messages = [] }) => { - const messagesToAdd = newMessages.filter( + setMessages((prevMessages) => { + const messagesToAdd = newMessages.payload?.filter( (m) => - !messages.find( + !prevMessages.payload?.find( (m2) => m2.attributes.offset === m.attributes.offset && m2.attributes.partition === m.attributes.partition, ), - ); + ) ?? []; return { - messages: Array.from(new Set([...messagesToAdd, ...messages])).slice( + payload: Array.from(new Set([...messagesToAdd, ...prevMessages.payload ?? []])).slice( 0, 100, ), - ts, + errors: newMessages.errors, + timestamp: newMessages.timestamp, }; }), ); @@ -170,7 +169,9 @@ export function ConnectedMessagesTable({ const isFiltered = partition || epoch || offset || timestamp || query; switch (true) { - case messages === undefined: + case messages.errors !== undefined: + return ; + case messages.payload === undefined: return ( ); - case !isFiltered && messages && messages?.length === 0: + case !isFiltered && messages.payload && messages.payload?.length === 0: return ; - case error === "topic-not-found": - return ( - - router.push("../")} /> - - ); default: return ( <> void; + onUpdates: (response: ApiResponse) => void; }) { const previousTs = useRef(new Date().toISOString()); const isFetching = useRef(false); @@ -251,7 +246,7 @@ function Refresher({ let t: ReturnType | undefined; async function appendMessages() { - const { messages: newMessages = [], error } = await getTopicMessages( + const response = await getTopicMessages( kafkaId, topicId, { @@ -265,39 +260,28 @@ function Refresher({ }, }, ); - if (!error) { - const sortedMessages = newMessages - .sort( + if (!response.errors) { + const sortedMessages = response.payload + ?.sort( (a, b) => new Date(b.attributes.timestamp).getTime() - new Date(a.attributes.timestamp).getTime(), ) - .sort((a, b) => b.attributes.offset - a.attributes.offset); + .sort((a, b) => b.attributes.offset - a.attributes.offset) ?? []; return { - messages: sortedMessages, - ts: sortedMessages[0]?.attributes.timestamp, + payload: sortedMessages, + timestamp: response.timestamp, }; } } async function tick() { - // console.log("tick", { - // ts: Date.now(), - // fetching: isFetching.current, - // kafkaId, - // topicId, - // partition, - // query, - // where, - // }); if (!isFetching.current && !isPaused) { isFetching.current = true; const res = await appendMessages(); if (!isPaused && res) { - if (res.ts) { - previousTs.current = res.ts; - } - onUpdates(res.messages, new Date()); + previousTs.current = res.timestamp.toISOString(); + onUpdates(res); } isFetching.current = false; } @@ -307,13 +291,6 @@ function Refresher({ void tick(); return () => { - // console.log("destroy", { - // kafkaId, - // topicId, - // partition, - // query, - // where, - // }); clearInterval(t); t = undefined; }; diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/messages/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/messages/page.tsx index ea3758e8d..9799129bc 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/messages/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/messages/page.tsx @@ -2,12 +2,12 @@ import { getTranslations } from "next-intl/server"; import { getTopicMessage } from "@/api/messages/actions"; import { getTopic } from "@/api/topics/actions"; import { KafkaTopicParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/kafkaTopic.params"; -import { redirect } from "@/i18n/routing"; import { ConnectedMessagesTable } from "./ConnectedMessagesTable"; import { MessagesSearchParams, parseSearchParams } from "./parseSearchParams"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; -export const revalidate = 0; -export const dynamic = "force-dynamic"; +//export const revalidate = 0; +//export const dynamic = "force-dynamic"; export async function generateMetadata() { const t = await getTranslations(); @@ -24,11 +24,13 @@ export default async function ConnectedMessagesPage({ params: KafkaTopicParams; searchParams: MessagesSearchParams; }) { - const topic = await getTopic(kafkaId, topicId); - if (!topic) { - redirect(`/kafka/${kafkaId}`); - return null; + const response = await getTopic(kafkaId, topicId); + + if (response.errors) { + return ; } + + const topic = response.payload!; const { selectedOffset, selectedPartition } = parseSearchParams(searchParams); const selectedMessage = @@ -36,7 +38,7 @@ export default async function ConnectedMessagesPage({ ? await getTopicMessage(kafkaId, topicId, { offset: selectedOffset, partition: selectedPartition, - }) + }).then(resp => resp.payload ?? undefined) : undefined; return ( diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/partitions/PartitionsTable.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/partitions/PartitionsTable.tsx index 88e2119b7..4b303f86c 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/partitions/PartitionsTable.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/partitions/PartitionsTable.tsx @@ -85,13 +85,18 @@ export function PartitionsTable({ sort: (typeof SortColumns)[number]; dir: "asc" | "desc"; }>({ sort: "id", dir: "asc" }); + useEffect(() => { let interval: ReturnType; + if (initialData) { interval = setInterval(async () => { - const topic = await getTopic(kafkaId, initialData.id); - if (topic) { - setTopic(topic); + const response = await getTopic(kafkaId, initialData.id); + + if (response.errors) { + console.warn("Failed to reload topic", { kafkaId, topicId: initialData.id }); + } else { + setTopic(response.payload!); } }, 30000); } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/partitions/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/partitions/page.tsx index c547f0342..f49fbf2f5 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/partitions/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/[topicId]/partitions/page.tsx @@ -1,9 +1,9 @@ import { getTranslations } from "next-intl/server"; import { getTopic } from "@/api/topics/actions"; import { KafkaTopicParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/kafkaTopic.params"; -import { redirect } from "@/i18n/routing"; import { Suspense } from "react"; import { PartitionsTable } from "./PartitionsTable"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; export async function generateMetadata() { const t = await getTranslations(); @@ -28,10 +28,12 @@ export default function PartitionsPage({ } async function ConnectedPartitions({ kafkaId, topicId }: KafkaTopicParams) { - const topic = await getTopic(kafkaId, topicId); - if (!topic) { - redirect(`/kafka/${kafkaId}`); - return null; + const response = await getTopic(kafkaId, topicId); + + if (response.errors) { + return ; } + + const topic = response.payload!; return ; } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/CreateTopic.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/CreateTopic.tsx index 01aa10f23..5787ea40d 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/CreateTopic.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/CreateTopic.tsx @@ -1,9 +1,10 @@ "use client"; + +import { ApiResponse, ApiError } from "@/api/api"; import { ConfigMap, NewConfigMap, TopicCreateResponse, - TopicMutateError, } from "@/api/topics/schema"; import { StepDetails } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepDetails"; import { StepOptions } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepOptions"; @@ -36,7 +37,7 @@ export function CreateTopic({ replicas: number, options: NewConfigMap, validateOnly: boolean, - ) => Promise; + ) => Promise>; }) { const t = useTranslations(); const router = useRouter(); @@ -46,24 +47,28 @@ export function CreateTopic({ const [options, setOptions] = useState({}); const [pending, startTransition] = useTransition(); const [loading, setLoading] = useState(false); - const [error, setError] = useState( + const [errors, setErrors] = useState( undefined, ); const save = useCallback(async () => { try { setLoading(true); - setError(undefined); - const result = await onSave(name, partitions, replicas, options, false); + setErrors(undefined); + const response = await onSave(name, partitions, replicas, options, false); + startTransition(() => { - if ("errors" in result) { - setError(result); + if (response.errors) { + setErrors(response.errors); } else { - router.push(`/kafka/${kafkaId}/topics/${result.data.id}`); + router.push(`/kafka/${kafkaId}/topics/${response.payload?.data.id}`); } }); } catch (e: unknown) { - setError("unknown"); + setErrors([{ + title: "Unknown error", + detail: String(e), + }]); } finally { setLoading(false); } @@ -73,17 +78,20 @@ export function CreateTopic({ async (success: () => void) => { try { setLoading(true); - setError(undefined); - const result = await onSave(name, partitions, replicas, options, true); + setErrors(undefined); + const response = await onSave(name, partitions, replicas, options, true); startTransition(() => { - if ("errors" in result) { - setError(result); + if (response.errors) { + setErrors(response.errors); } else { success(); } }); } catch (e: unknown) { - setError("unknown"); + setErrors([{ + title: "Unknown error", + detail: String(e), + }]); } finally { setLoading(false); } @@ -91,7 +99,7 @@ export function CreateTopic({ [name, onSave, options, partitions, replicas], ); - const formInvalid = error !== undefined; + const formInvalid = errors !== undefined; return ( @@ -115,7 +123,7 @@ export function CreateTopic({ onNameChange={setName} onPartitionsChange={setPartitions} onReplicasChange={setReplicas} - error={error} + errors={errors} /> validate(success)} loading={pending || loading} primaryLabel={t("CreateTopic.next")} @@ -135,7 +143,7 @@ export function CreateTopic({ options={options} initialOptions={initialOptions} onChange={setOptions} - error={error} + errors={errors} /> diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/Errors.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/Errors.tsx index 5e7e6c6b2..962423462 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/Errors.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/Errors.tsx @@ -1,9 +1,9 @@ -import { TopicMutateError } from "@/api/topics/schema"; +import { ApiError } from "@/api/api"; import { Alert, Text, TextContent } from "@patternfly/react-core"; -export function Error({ error }: { error: TopicMutateError | "unknown" }) { - return error !== "unknown" ? ( - error.errors.map((e, idx) => ( +export function Errors({ errors }: { errors: ApiError[] | undefined }) { + return errors !== undefined ? ( + errors.map((e, idx) => ( {e.detail} diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepDetails.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepDetails.tsx index 3fdc6db90..c9b721e2e 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepDetails.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepDetails.tsx @@ -1,4 +1,4 @@ -import { TopicMutateError } from "@/api/topics/schema"; +import { ApiError } from "@/api/api"; import { FieldName } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/FieldName"; import { FieldPartitions } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/FieldPartitions"; import { FieldReplicas } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/FieldReplicas"; @@ -15,7 +15,7 @@ export function StepDetails({ onNameChange, onPartitionsChange, onReplicasChange, - error, + errors, }: { name: string; partitions: number; @@ -24,7 +24,7 @@ export function StepDetails({ onNameChange: (name: string) => void; onPartitionsChange: (name: number) => void; onReplicasChange: (name: number) => void; - error: TopicMutateError | "unknown" | undefined; + errors: ApiError[] | undefined; }) { const nameInvalid = { length: name.trim().length < 3, @@ -33,7 +33,7 @@ export function StepDetails({ }; const partitionsInvalid = partitions <= 0; const replicasInvalid = replicas <= 0 || replicas > maxReplicas; - const fieldError = topicMutateErrorToFieldError(error, false, [ + const fieldError = topicMutateErrorToFieldError(errors, false, [ "name", "numPartitions", "replicationFactor", diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepOptions.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepOptions.tsx index 62aafab42..256ee3077 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepOptions.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepOptions.tsx @@ -1,6 +1,7 @@ -import { ConfigMap, NewConfigMap, TopicMutateError } from "@/api/topics/schema"; +import { ApiError } from "@/api/api"; +import { ConfigMap, NewConfigMap } from "@/api/topics/schema"; import { ConfigTable } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/ConfigTable"; -import { Error } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/Errors"; +import { Errors } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/Errors"; import { topicMutateErrorToFieldError } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/topicMutateErrorToFieldError"; import { Text, TextContent, Title } from "@patternfly/react-core"; import { useTranslations } from "next-intl"; @@ -9,22 +10,22 @@ export function StepOptions({ options, initialOptions, onChange, - error, + errors, }: { options: NewConfigMap; initialOptions: Readonly; onChange: (options: NewConfigMap) => void; - error: TopicMutateError | "unknown" | undefined; + errors: ApiError[] | undefined; }) { const t = useTranslations(); const fieldError = topicMutateErrorToFieldError( - error, + errors, true, Object.keys(initialOptions), ); return ( <> - {error && !fieldError && } + {errors && !fieldError && } {t("CreateTopic.step_option_title")} {t("CreateTopic.step_option_description")} diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepReview.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepReview.tsx index 493b0eb0d..e056632b3 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepReview.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/StepReview.tsx @@ -1,5 +1,6 @@ -import { ConfigMap, NewConfigMap, TopicMutateError } from "@/api/topics/schema"; -import { Error } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/Errors"; +import { ApiError } from "@/api/api"; +import { ConfigMap, NewConfigMap } from "@/api/topics/schema"; +import { Errors } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/Errors"; import { ReviewTable } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/ReviewTable"; import { Number } from "@/components/Format/Number"; import { @@ -21,14 +22,14 @@ export function StepReview({ replicas, options, initialOptions, - error, + errors, }: { name: string; partitions: number; replicas: number; options: NewConfigMap; initialOptions: ConfigMap; - error: TopicMutateError | "unknown" | undefined; + errors: ApiError[] | undefined; }) { const t = useTranslations(); const optionEntries = Object.entries(options); @@ -40,7 +41,7 @@ export function StepReview({ {t("CreateTopic.topic_details")} - {error && } + {errors && } diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/page.tsx b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/page.tsx index cb1f8a44a..8dde2cdee 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/page.tsx @@ -5,7 +5,7 @@ import { KafkaParams } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/kafka.p import { CreateTopic } from "@/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/CreateTopic"; import { redirect } from "@/i18n/routing"; import { isReadonly } from "@/utils/env"; -import { notFound } from "next/navigation"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; export default async function CreateTopicPage({ params: { kafkaId }, @@ -13,14 +13,18 @@ export default async function CreateTopicPage({ params: KafkaParams; }) { if (isReadonly) { - redirect(`/kafka/${kafkaId}`); + redirect(`/kafka/${kafkaId}/topics`); return; } - const cluster = await getKafkaCluster(kafkaId); - if (!cluster) { - return notFound(); + + const response = (await getKafkaCluster(kafkaId)); + + if (response.errors) { + return ; } + const cluster = response.payload!; + async function onSave( name: string, partitions: number, diff --git a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/topicMutateErrorToFieldError.ts b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/topicMutateErrorToFieldError.ts index 88f839ab0..a5a4d2f96 100644 --- a/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/topicMutateErrorToFieldError.ts +++ b/ui/app/[locale]/(authorized)/kafka/[kafkaId]/topics/create/topicMutateErrorToFieldError.ts @@ -1,18 +1,22 @@ -import { TopicMutateError } from "@/api/topics/schema"; +import { ApiError } from "@/api/api"; export function topicMutateErrorToFieldError( - error: TopicMutateError | "unknown" | undefined, + errors: ApiError[] | undefined, isConfig: boolean, fields: string[], ) { - if (error && error !== "unknown" && error.errors.length > 0) { - const field = error.errors[0].source?.pointer?.split("/")[isConfig ? 4 : 3]; - if (field && fields.includes(field)) { - return { - field, - error: error.errors[0].detail, - }; - } + if (errors) { + const fieldErrors = errors.map(error => { + const field = error.source?.pointer?.split("/")[isConfig ? 4 : 3]; + if (field && fields.includes(field)) { + return { + field, + error: error.detail, + }; + } + }); + + return fieldErrors.length > 0 ? fieldErrors[0] : undefined } return undefined; } diff --git a/ui/app/[locale]/(authorized)/kafka/page.tsx b/ui/app/[locale]/(authorized)/kafka/page.tsx index 892b4f4c5..d18be9638 100644 --- a/ui/app/[locale]/(authorized)/kafka/page.tsx +++ b/ui/app/[locale]/(authorized)/kafka/page.tsx @@ -1,5 +1,3 @@ -import { getKafkaClusters } from "@/api/kafka/actions"; -import { RedirectOnLoad } from "@/components/Navigation/RedirectOnLoad"; import { redirect } from "@/i18n/routing"; export default function Page({}) { diff --git a/ui/app/[locale]/(public)/(home)/page.tsx b/ui/app/[locale]/(public)/(home)/page.tsx index 35806a74f..c2a24f9e8 100644 --- a/ui/app/[locale]/(public)/(home)/page.tsx +++ b/ui/app/[locale]/(public)/(home)/page.tsx @@ -30,6 +30,12 @@ import { isProductizedBuild } from "@/utils/env"; import { getTranslations } from "next-intl/server"; import { Suspense } from "react"; import styles from "./home.module.css"; +import config from '@/utils/config'; +import { logger } from "@/utils/logger"; +import { getAuthOptions } from "@/app/api/auth/[...nextauth]/auth-options"; +import { getServerSession } from "next-auth"; + +const log = logger.child({ module: "home" }); export async function generateMetadata() { const t = await getTranslations(); @@ -41,16 +47,30 @@ export async function generateMetadata() { export default async function Home() { const t = await getTranslations(); - const allClusters = await getKafkaClusters(); + log.trace("fetching known Kafka clusters...") + const allClusters = (await getKafkaClusters())?.payload ?? []; + log.trace(`fetched ${allClusters.length ?? 0} Kafka clusters`) const productName = t("common.product"); const brand = t("common.brand"); + log.trace("fetching configuration") + let cfg = await config(); + log.trace(`fetched configuration: ${cfg ? 'yes' : 'no'}`); + let oidcCfg = cfg?.security?.oidc ?? null; + let oidcEnabled = !!oidcCfg; + let username: string | undefined; + + if (oidcEnabled) { + const authOptions = await getAuthOptions(); + const session = await getServerSession(authOptions); + username = (session?.user?.name ?? session?.user?.email) ?? undefined; + } - if (allClusters.length === 1) { + if (allClusters.length === 1 && !oidcEnabled) { return ; } return ( - +
    @@ -84,8 +104,8 @@ export default async function Home() { - }> - + }> + diff --git a/ui/app/[locale]/(public)/kafka/[kafkaId]/login/page.tsx b/ui/app/[locale]/(public)/kafka/[kafkaId]/login/page.tsx index 62a41118e..9138c5d5f 100644 --- a/ui/app/[locale]/(public)/kafka/[kafkaId]/login/page.tsx +++ b/ui/app/[locale]/(public)/kafka/[kafkaId]/login/page.tsx @@ -18,7 +18,8 @@ export default async function SignIn({ searchParams?: { callbackUrl?: string }; params: { kafkaId?: string }; }) { - const clusters = await getKafkaClusters(); + const clusters = (await getKafkaClusters())?.payload ?? []; + const cluster = clusters.find((c) => c.id === params.kafkaId); if (cluster) { const authMethod = cluster.meta.authentication; diff --git a/ui/app/[locale]/layout.tsx b/ui/app/[locale]/layout.tsx index 99013a6cb..7953b5d78 100644 --- a/ui/app/[locale]/layout.tsx +++ b/ui/app/[locale]/layout.tsx @@ -26,7 +26,3 @@ export async function generateMetadata({ title: t("title"), }; } - -// export function generateStaticParams() { -// return [{ locale: "en" }]; -// } diff --git a/ui/app/api/auth/[...nextauth]/anonymous.ts b/ui/app/api/auth/[...nextauth]/anonymous.ts index a8e95b652..55a29c5dd 100644 --- a/ui/app/api/auth/[...nextauth]/anonymous.ts +++ b/ui/app/api/auth/[...nextauth]/anonymous.ts @@ -1,4 +1,3 @@ -import { AuthOptions } from "next-auth"; import CredentialsProvider from "next-auth/providers/credentials"; import { Provider } from "next-auth/providers/index"; diff --git a/ui/app/api/auth/[...nextauth]/auth-options.ts b/ui/app/api/auth/[...nextauth]/auth-options.ts index 5c70fcf43..5aa089cc1 100644 --- a/ui/app/api/auth/[...nextauth]/auth-options.ts +++ b/ui/app/api/auth/[...nextauth]/auth-options.ts @@ -1,11 +1,13 @@ import { getKafkaClusters } from "@/api/kafka/actions"; import { ClusterList } from "@/api/kafka/schema"; import { logger } from "@/utils/logger"; -import { AuthOptions } from "next-auth"; +import { AuthOptions, Session } from "next-auth"; +import { JWT } from "next-auth/jwt"; import { Provider } from "next-auth/providers/index"; import { makeAnonymous } from "./anonymous"; import { makeOauthTokenProvider } from "./oauth-token"; import { makeScramShaProvider } from "./scram"; +import oidcSource from "./oidc"; const log = logger.child({ module: "auth" }); @@ -24,25 +26,49 @@ function makeAuthOption(cluster: ClusterList): Provider { } export async function getAuthOptions(): Promise { - // retrieve the authentication method required by the default Kafka cluster - const clusters = await getKafkaClusters(); - const providers = clusters.map(makeAuthOption); - log.trace({ providers }, "getAuthOptions"); - return { - providers, - callbacks: { - async jwt({ token, user }) { - if (user) { - token.authorization = user.authorization; - } - return token; + let providers: Provider[]; + log.trace("fetching the oidcSource"); + let oidc = await oidcSource(); + + if (oidc.isEnabled()) { + log.debug("OIDC is enabled"); + providers = [ oidc.provider! ]; + return { + providers, + pages: { + signIn: "/api/auth/oidc/signin", }, - async session({ session, token, user }) { - // Send properties to the client, like an access_token and user id from a provider. - session.authorization = token.authorization; + callbacks: { + async jwt({ token, account }: { token: JWT, account: any }) { + return oidc.jwt({ token, account }); + }, + async session({ session, token }: { session: Session, token: JWT }) { + return oidc.session({ session, token }); + } + } + } + } else { + log.debug("OIDC is disabled"); + // retrieve the authentication method required by the default Kafka cluster + const clusters = (await getKafkaClusters(true))?.payload ?? []; + providers = clusters.map(makeAuthOption); + log.trace({ providers }, "getAuthOptions"); + return { + providers, + callbacks: { + async jwt({ token, user }) { + if (user) { + token.authorization = user.authorization; + } + return token; + }, + async session({ session, token, user }) { + // Send properties to the client, like an access_token and user id from a provider. + session.authorization = token.authorization; - return session; + return session; + }, }, - }, - }; -} \ No newline at end of file + }; + } +} diff --git a/ui/app/api/auth/[...nextauth]/keycloak.ts b/ui/app/api/auth/[...nextauth]/keycloak.ts deleted file mode 100644 index 957868765..000000000 --- a/ui/app/api/auth/[...nextauth]/keycloak.ts +++ /dev/null @@ -1,144 +0,0 @@ -import { logger } from "@/utils/logger"; -import { AuthOptions, Session, TokenSet } from "next-auth"; -import { JWT } from "next-auth/jwt"; -import { Provider } from "next-auth/providers/index"; -import KeycloakProvider from "next-auth/providers/keycloak"; - -const log = logger.child({ module: "keycloak" }); - -export function makeOauthProvider( - clientId: string, - clientSecret: string, - issuer: string, -): Provider { - const provider = KeycloakProvider({ - clientId, - clientSecret, - issuer, - }); - - let _tokenEndpoint: string | undefined = undefined; - - async function getTokenEndpoint() { - if (provider && provider.wellKnown) { - const kc = await fetch(provider.wellKnown); - const res = await kc.json(); - _tokenEndpoint = res.token_endpoint; - } - return _tokenEndpoint; - } - - async function refreshToken(token: JWT): Promise { - try { - const tokenEndpoint = await getTokenEndpoint(); - if (!provider) { - log.error("Invalid Keycloak configuratio"); - throw token; - } - if (!tokenEndpoint) { - log.error("Invalid Keycloak wellKnow"); - throw token; - } - let tokenExpiration = new Date( - (typeof token?.expires_at === "number" ? token.expires_at : 0) * 1000, - ); - log.trace({ tokenExpiration }, "Token expiration"); - - if (Date.now() < tokenExpiration.getTime()) { - log.trace(token, "Token not yet expired"); - return token; - } else { - log.trace(token, "Token has expired"); - let refresh_token = - typeof token.refresh_token === "string" ? token.refresh_token : ""; - - const params = { - client_id: provider.options!.clientId, - client_secret: provider.options!.clientSecret, - grant_type: "refresh_token", - refresh_token: refresh_token, - }; - - log.trace( - { - url: tokenEndpoint, - }, - "Refreshing token", - ); - - const response = await fetch(tokenEndpoint, { - headers: { "Content-Type": "application/x-www-form-urlencoded" }, - body: new URLSearchParams(params), - method: "POST", - }); - - const refreshToken: TokenSet = await response.json(); - if (!response.ok) { - throw new Error(response.statusText); - } - log.trace(refreshToken, "Got refresh token"); - - let expires_in = - typeof refreshToken.expires_in === "number" - ? refreshToken.expires_in - : -1; - - const newToken: JWT = { - ...token, // Keep the previous token properties - access_token: refreshToken.access_token, - expires_at: Math.floor(Date.now() / 1000 + expires_in), - // Fall back to old refresh token, but note that - // many providers may only allow using a refresh token once. - refresh_token: refreshToken.refresh_token ?? token.refresh_token, - }; - log.trace(newToken, "New token"); - return newToken; - } - } catch (error: unknown) { - if (typeof error === "string") { - log.error({ message: error }, "Error refreshing access token"); - } else if (error instanceof Error) { - log.error(error, "Error refreshing access token"); - } else { - log.error("Unknown error refreshing access token"); - } - // The error property will be used client-side to handle the refresh token error - return { ...token, error: "RefreshAccessTokenError" as const }; - } - } - - return provider; - - // return { - // providers: [provider], - // callbacks: { - // async jwt({ token, account }: { token: JWT; account: any }) { - // // Persist the OAuth access_token and or the user id to the token right after signin - // if (account) { - // log.trace("account present, saving new token"); - // // Save the access token and refresh token in the JWT on the initial login - // return { - // access_token: account.access_token, - // expires_at: account.expires_at, - // refresh_token: account.refresh_token, - // email: token.email, - // name: token.name, - // picture: token.picture, - // sub: token.sub, - // }; - // } - // - // return refreshToken(token); - // }, - // async session({ session, token }: { session: Session; token: JWT }) { - // // Send properties to the client, like an access_token from a provider. - // log.trace(token, "Creating session from token"); - // return { - // ...session, - // error: token.error, - // accessToken: token.access_token, - // }; - // }, - // }, - // }; -} diff --git a/ui/app/api/auth/[...nextauth]/oauth-token.ts b/ui/app/api/auth/[...nextauth]/oauth-token.ts index b31cabeeb..f09bd7376 100644 --- a/ui/app/api/auth/[...nextauth]/oauth-token.ts +++ b/ui/app/api/auth/[...nextauth]/oauth-token.ts @@ -1,6 +1,4 @@ -import { getKafkaClusters } from "@/api/kafka/actions"; import { logger } from "@/utils/logger"; -import { AuthOptions } from "next-auth"; import CredentialsProvider from "next-auth/providers/credentials"; import { Provider } from "next-auth/providers/index"; diff --git a/ui/app/api/auth/[...nextauth]/oidc.ts b/ui/app/api/auth/[...nextauth]/oidc.ts new file mode 100644 index 000000000..c8b77c622 --- /dev/null +++ b/ui/app/api/auth/[...nextauth]/oidc.ts @@ -0,0 +1,195 @@ +import { logger } from "@/utils/logger"; +import { Session, TokenSet } from "next-auth"; +import { JWT } from "next-auth/jwt"; +import { OAuthConfig } from "next-auth/providers/index"; +import config from '@/utils/config'; +import { redirect } from 'next/navigation' + +const log = logger.child({ module: "oidc" }); + +class OpenIdConnect { + + provider: OAuthConfig | null; + + constructor( + authServerUrl: string | null, + clientId: string | null, + clientSecret: string | null + ) { + if (clientId && clientSecret && authServerUrl) { + this.provider = { + id: "oidc", + name: "OpenID Connect Provider", + type: "oauth", + clientId: clientId, + clientSecret: clientSecret, + wellKnown: `${authServerUrl}/.well-known/openid-configuration`, + authorization: { params: { scope: "openid email profile groups" } }, + idToken: true, + profile(profile) { + return { + id: profile.sub, + name: profile.name ?? profile.preferred_username, + email: profile.email, + image: profile.image, + } + }, + } + } else { + this.provider = null; + } + } + + isEnabled() { + return this.provider != null; + } + + async getTokenEndpoint() { + const discoveryEndpoint: string = this.provider!.wellKnown!; + let _tokenEndpoint: string | undefined = undefined; + + log.trace(`wellKnown endpoint: ${discoveryEndpoint}`); + const response = await fetch(discoveryEndpoint); + const discovery = await response.json(); + + _tokenEndpoint = discovery.token_endpoint; + log.trace(`token endpoint: ${_tokenEndpoint}`); + + return _tokenEndpoint; + } + + isExpired(token: JWT): boolean { + let tokenExpiration = new Date( + (typeof token?.expires_at === "number" ? token.expires_at : 0) * 1000, + ); + + let remainingMs = tokenExpiration.getTime() - Date.now(); + + if (remainingMs > 30000) { + log.trace(`Token expires at ${tokenExpiration.toISOString()}, time remaining: ${remainingMs}ms`); + return false; + } + + if (remainingMs > 0) { + log.trace(`Token expires at ${tokenExpiration.toISOString()}, time remaining: ${remainingMs}ms (less than 30s)`); + } else { + log.trace(`Token expired at ${tokenExpiration.toISOString()}`); + } + + return true; + } + + async refreshToken(token: JWT): Promise { + let refresh_token = typeof token.refresh_token === "string" + ? token.refresh_token + : undefined; + + if (refresh_token === undefined) { + return { + error: "Refresh token not available, expiring session" + } + } + + const params = { + client_id: this.provider!.clientId!, + client_secret: this.provider!.clientSecret!, + grant_type: "refresh_token", + refresh_token: refresh_token, + }; + + const tokenEndpoint = await this.getTokenEndpoint(); + + if (!tokenEndpoint) { + return { + error: "Invalid OIDC wellKnown" + } + } + + log.trace({ url: tokenEndpoint, params: JSON.stringify(params) }, "Refreshing token"); + + const response = await fetch(tokenEndpoint, { + headers: { "Content-Type": "application/x-www-form-urlencoded" }, + body: new URLSearchParams(params), + method: "POST", + }); + + const responseBody = await response.text(); + + if (!response.ok) { + log.debug(responseBody, "Bad token response"); + return { + error: responseBody + } + } + + const refreshToken: TokenSet = JSON.parse(responseBody); + log.trace(refreshToken, "Got refresh token"); + + let expires_in = + typeof refreshToken.expires_in === "number" + ? refreshToken.expires_in + : -1; + + const newToken: JWT = { + ...token, // Keep the previous token properties + access_token: refreshToken.access_token, + expires_at: Math.floor(Date.now() / 1000 + expires_in), + // Fall back to old refresh token, but note that + // many providers may only allow using a refresh token once. + refresh_token: refreshToken.refresh_token ?? token.refresh_token, + }; + + log.trace(newToken, "New token"); + return newToken; + } + + async jwt({ token, account }: { token: JWT, account: any }) { + // Persist the OAuth access_token and or the user id to the token right after signin + log.trace("jwt callback invoked") + + if (account) { + log.trace(`account ${JSON.stringify(account)} present, saving new token: ${JSON.stringify(token)}`); + // Save the access token and refresh token in the JWT on the initial login + return { + access_token: account.access_token, + expires_at: account.expires_at, + refresh_token: account.refresh_token, + email: token.email, + name: token.name, + picture: token.picture, + sub: token.sub, + }; + } + + if (this.isExpired(token)) { + return this.refreshToken(token); + } + + return token; + }; + + async session({ session, token }: { session: Session, token: JWT }) { + if (token.error) { + session.expires = new Date(0).toISOString(); + return session; + } + // Send properties to the client, like an access_token from a provider. + log.trace(token, "Updating session with token"); + return { + ...session, + error: token.error, + accessToken: token.access_token, + authorization: `Bearer ${token.access_token}`, + }; + }; +} + +export default async function oidcSource() { + let oidcConfig = (await config())?.security?.oidc; + + return new OpenIdConnect( + oidcConfig?.authServerUrl ?? null, + oidcConfig?.clientId ?? null, + oidcConfig?.clientSecret ?? null + ); +}; diff --git a/ui/app/api/auth/[...nextauth]/scram.ts b/ui/app/api/auth/[...nextauth]/scram.ts index 3bcbd5eb0..b24e6f8c9 100644 --- a/ui/app/api/auth/[...nextauth]/scram.ts +++ b/ui/app/api/auth/[...nextauth]/scram.ts @@ -1,5 +1,3 @@ -import { getKafkaClusters } from "@/api/kafka/actions"; -import { AuthOptions } from "next-auth"; import CredentialsProvider from "next-auth/providers/credentials"; import { Provider } from "next-auth/providers/index"; diff --git a/ui/app/api/auth/oidc/layout.tsx b/ui/app/api/auth/oidc/layout.tsx new file mode 100644 index 000000000..c373a1bac --- /dev/null +++ b/ui/app/api/auth/oidc/layout.tsx @@ -0,0 +1,15 @@ +"use client"; + +import { SessionProvider } from "next-auth/react"; + +interface Props { + children: React.ReactNode +} + +export default function AuthLayout(props: Props) { + return ( + + { props.children } + + ) +} diff --git a/ui/app/api/auth/oidc/signin/page.tsx b/ui/app/api/auth/oidc/signin/page.tsx new file mode 100644 index 000000000..c5127228d --- /dev/null +++ b/ui/app/api/auth/oidc/signin/page.tsx @@ -0,0 +1,23 @@ +"use client"; + +import { signIn, useSession } from "next-auth/react"; +import { useRouter } from 'next/navigation' +import { useEffect } from "react"; + +export default function SignIn() { + const router = useRouter() + const { status } = useSession() + + useEffect(() => { + if (status === 'unauthenticated') { + signIn('oidc') + } + else if (status === 'authenticated') { + router.push('/') + } + }, [ router, status ]) + + return ( +
    + ) +} diff --git a/ui/app/config/route.ts b/ui/app/config/route.ts new file mode 100644 index 000000000..973a7efb6 --- /dev/null +++ b/ui/app/config/route.ts @@ -0,0 +1,15 @@ +import config from '@/utils/config'; + +export const dynamic = "force-dynamic"; + +/* + * This route serves as an endpoint for middleware.js to fetch whether + * OIDC security is enabled or not. + */ +export async function GET() { + const oidcEnabled = await config().then(cfg => cfg.security?.oidc != null); + + return Response.json({ + "oidc": oidcEnabled, + }); +} diff --git a/ui/app/layout.tsx b/ui/app/layout.tsx index 23f2be2ee..a93dfba45 100644 --- a/ui/app/layout.tsx +++ b/ui/app/layout.tsx @@ -4,8 +4,8 @@ type Props = { children: ReactNode; }; -export const fetchCache = "force-no-store"; -export const dynamic = "force-dynamic"; +//export const fetchCache = "force-no-store"; +//export const dynamic = "force-dynamic"; // Since we have a `not-found.tsx` page on the root, a layout file // is required, even if it's just passing children through. diff --git a/ui/components/ClusterConnectionDetails.tsx b/ui/components/ClusterConnectionDetails.tsx index c2d934ebe..b4d22c193 100644 --- a/ui/components/ClusterConnectionDetails.tsx +++ b/ui/components/ClusterConnectionDetails.tsx @@ -20,7 +20,7 @@ export async function ClusterConnectionDetails({ clusterId: string; }) { const t = useTranslations(); - const data = await getKafkaCluster(clusterId); + const data = (await getKafkaCluster(clusterId))?.payload; if (!data) { return null; } diff --git a/ui/components/ClusterOverview/ClusterCard.tsx b/ui/components/ClusterOverview/ClusterCard.tsx index a65178eb0..56510509c 100644 --- a/ui/components/ClusterOverview/ClusterCard.tsx +++ b/ui/components/ClusterOverview/ClusterCard.tsx @@ -76,9 +76,11 @@ export function ClusterCard({ } try { - const success = await updateKafkaCluster(kafkaId, false); + const response = await updateKafkaCluster(kafkaId, false); - if (success) { + if (response.errors) { + console.log("Unknown error occurred", response.errors); + } else { setReconciliationPaused(false); } } catch (e: unknown) { diff --git a/ui/components/ClusterOverview/TopicsPartitionsCard.tsx b/ui/components/ClusterOverview/TopicsPartitionsCard.tsx index 18eedf7a1..4e3951dae 100644 --- a/ui/components/ClusterOverview/TopicsPartitionsCard.tsx +++ b/ui/components/ClusterOverview/TopicsPartitionsCard.tsx @@ -21,12 +21,16 @@ import { } from "@/libs/patternfly/react-icons"; import { Link } from "@/i18n/routing"; import { useTranslations } from "next-intl"; +import { ApiError } from "@/api/api"; +import { NoDataErrorState } from "@/components/NoDataErrorState"; type TopicsPartitionsCardProps = { topicsReplicated: number; topicsUnderReplicated: number; topicsOffline: number; + topicsUnknown: number; partitions: number; + errors?: ApiError[]; }; export function TopicsPartitionsCard({ @@ -34,27 +38,21 @@ export function TopicsPartitionsCard({ topicsReplicated, topicsUnderReplicated, topicsOffline, + topicsUnknown, partitions, + errors, }: | ({ isLoading: false } & TopicsPartitionsCardProps) | ({ isLoading: true; } & Partial<{ [key in keyof TopicsPartitionsCardProps]?: undefined }>)) { const t = useTranslations(); - return ( - - - {t("ClusterOverview.view_all_topics")} - - ), - }} - > - {t("ClusterOverview.topic_header")} - - + let cardBody; + + if (errors) { + cardBody = + } else { + cardBody = ( - {" "} + {" "} {t("ClusterOverview.total_topics")} @@ -183,6 +181,24 @@ export function TopicsPartitionsCard({ + ); + } + + return ( + + + {t("ClusterOverview.view_all_topics")} + + ), + }} + > + {t("ClusterOverview.topic_header")} + + + { cardBody } ); diff --git a/ui/components/ClustersTable.tsx b/ui/components/ClustersTable.tsx index 9fc35d60c..103d737a3 100644 --- a/ui/components/ClustersTable.tsx +++ b/ui/components/ClustersTable.tsx @@ -6,6 +6,7 @@ import { ResponsiveTable } from "@/components/Table"; import { Truncate } from "@/libs/patternfly/react-core"; import { TableVariant } from "@/libs/patternfly/react-table"; import { useTranslations } from "next-intl"; +import { Link } from "@/i18n/routing"; const columns = [ "name", @@ -13,14 +14,28 @@ const columns = [ "namespace", "authentication", "login", -] as const; +]; export function ClustersTable({ clusters, + authenticated, }: { clusters: ClusterList[] | undefined; + authenticated: boolean }) { const t = useTranslations(); + const columns = authenticated ? [ + "name", + "version", + "namespace", + ] as const : [ + "name", + "version", + "namespace", + "authentication", + "login", + ] as const; + return ( - + {authenticated + ? + + + : + } ); case "version": @@ -87,8 +107,8 @@ export function ClustersTable({ case "login": return ( - - Login to cluster + + { authenticated ? "View" : "Login to cluster" } ); diff --git a/ui/components/Format/Number.tsx b/ui/components/Format/Number.tsx index b587973dc..6241cbf8c 100644 --- a/ui/components/Format/Number.tsx +++ b/ui/components/Format/Number.tsx @@ -2,8 +2,8 @@ import { useFormatter } from "next-intl"; -export function Number({ value }: { value: string | number | undefined }) { +export function Number({ value }: { value: string | number | null | undefined }) { const formatter = useFormatter(); value = typeof value === "string" ? parseInt(value, 10) : value; - return value !== undefined && !isNaN(value) ? formatter.number(value) : "-"; + return value !== undefined && value !== null && !isNaN(value) ? formatter.number(value) : "-"; } diff --git a/ui/components/NoDataErrorState.tsx b/ui/components/NoDataErrorState.tsx new file mode 100644 index 000000000..d7fdec062 --- /dev/null +++ b/ui/components/NoDataErrorState.tsx @@ -0,0 +1,46 @@ +"use client"; +import { + EmptyState, + EmptyStateBody, + EmptyStateIcon, + Title, +} from "@patternfly/react-core"; +import { + ErrorCircleOIcon, + BanIcon +} from "@patternfly/react-icons"; +import { ApiError } from '@/api/api'; + +export function NoDataErrorState({ errors }: { errors: ApiError[] }) { + let errorIcon; + + switch (errors[0].status ?? '400') { + case '401': + case '403': + errorIcon = BanIcon; + break; + default: + errorIcon = ErrorCircleOIcon; + break; + } + + return ( + + + + { errors[0].title } + + + <> + { errors.map(err => { + return ( + <> + {err.title}: {err.detail} {err.code && <>({err.code})} + + ); + })} + + + + ); +} diff --git a/ui/components/ReconciliationPausedBanner.tsx b/ui/components/ReconciliationPausedBanner.tsx index 3e5b09580..1f836a8ab 100644 --- a/ui/components/ReconciliationPausedBanner.tsx +++ b/ui/components/ReconciliationPausedBanner.tsx @@ -19,9 +19,11 @@ export function ReconciliationPausedBanner({ kafkaId }: { kafkaId: string }) { const resumeReconciliation = async () => { try { - const success = await updateKafkaCluster(kafkaId, false); + const response = await updateKafkaCluster(kafkaId, false); - if (success) { + if (response.errors) { + console.log("Unknown error occurred", response.errors); + } else { setReconciliationPaused(false); } } catch (e: unknown) { diff --git a/ui/components/ReconciliationProvider.tsx b/ui/components/ReconciliationProvider.tsx index bc5ef0c6e..ab6661422 100644 --- a/ui/components/ReconciliationProvider.tsx +++ b/ui/components/ReconciliationProvider.tsx @@ -25,7 +25,7 @@ export function ReconciliationProvider({ const fetchReconciliationState = async () => { try { - const cluster = await getKafkaCluster(kafkaId); + const cluster = (await getKafkaCluster(kafkaId))?.payload; const reconciliationPaused = cluster?.meta?.reconciliationPaused ?? false; setReconciliationPaused(reconciliationPaused); diff --git a/ui/components/TopicsTable/TopicsTable.tsx b/ui/components/TopicsTable/TopicsTable.tsx index 3934b4bd5..787774863 100644 --- a/ui/components/TopicsTable/TopicsTable.tsx +++ b/ui/components/TopicsTable/TopicsTable.tsx @@ -57,6 +57,14 @@ const StatusLabel: Record = {  Partially offline ), + Unknown: ( + <> + + + +  Unknown + + ), Offline: ( <> @@ -198,25 +206,25 @@ export function TopicsTable({ case "consumerGroups": return ( - - - + {row.relationships.consumerGroups?.meta?.count !== undefined ? ( + + + + ) : ( + + )} ); case "partitions": return ( - + {row.attributes.numPartitions !== null ? ( + + + + ) : ( - + )} ); case "storage": diff --git a/ui/environment.d.ts b/ui/environment.d.ts index af85ca3a9..eee1dff76 100644 --- a/ui/environment.d.ts +++ b/ui/environment.d.ts @@ -3,9 +3,6 @@ namespace NodeJS { NEXTAUTH_URL: string; NEXTAUTH_SECRET: string; BACKEND_URL: string; - KEYCLOAK_CLIENTID?: string; - KEYCLOAK_CLIENTSECRET?: string; - NEXT_PUBLIC_KEYCLOAK_URL?: string; NEXT_PUBLIC_PRODUCTIZED_BUILD?: "true" | "false"; LOG_LEVEL?: "fatal" | "error" | "warn" | "info" | "debug" | "trace"; CONSOLE_MODE?: "read-only" | "read-write"; diff --git a/ui/middleware.ts b/ui/middleware.ts index 8b5909822..58d9e3fa0 100644 --- a/ui/middleware.ts +++ b/ui/middleware.ts @@ -2,7 +2,6 @@ import { locales, routing } from "@/i18n/routing"; import withAuth from "next-auth/middleware"; import createIntlMiddleware from "next-intl/middleware"; import { NextRequest, NextResponse } from "next/server"; - import { logger } from "@/utils/logger"; const log = logger.child({ module: "middleware" }); @@ -24,7 +23,7 @@ const authMiddleware = withAuth( authorized: ({ token }) => token != null, }, pages: { - signIn: `/kafka/1/login`, + //signIn: `/kafka/1/login`, }, }, ) as any; @@ -44,16 +43,22 @@ const protectedPathnameRegex = RegExp( ); export default async function middleware(req: NextRequest) { + /* + * Next.js middleware doesn't support reading files, so here we make a (cached) + * call to the /config endpoint within the same application :( + */ + let oidcEnabled = await fetch(`http://127.0.0.1:${process.env.PORT}/config`, { cache: "force-cache" }) + .then(cfg => cfg.json()) + .then(cfg => cfg["oidc"]); + const requestPath = req.nextUrl.pathname; - const isPublicPage = publicPathnameRegex.test(requestPath); - const isProtectedPage = protectedPathnameRegex.test(requestPath); + const isPublicPage = !oidcEnabled && publicPathnameRegex.test(requestPath); + const isProtectedPage = oidcEnabled || protectedPathnameRegex.test(requestPath); if (isPublicPage) { - log.trace({ requestPath: requestPath }, "public page"); return intlMiddleware(req); } else if (isProtectedPage) { - log.trace({ requestPath: requestPath }, "protected page"); - return (authMiddleware as any)(req); + return (authMiddleware)(req); } else { log.debug( { @@ -70,5 +75,5 @@ export default async function middleware(req: NextRequest) { export const config = { // Skip all paths that should not be internationalized. This example skips the // folders "api", "healthz", "_next" and all files with an extension (e.g. favicon.ico) - matcher: ["/((?!api|healthz|_next|.*\\..*).*)"], + matcher: ["/((?!api|config|healthz|_next|.*\\..*).*)"], }; diff --git a/ui/package-lock.json b/ui/package-lock.json index f77faae49..dfbde2112 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -35,6 +35,7 @@ "eslint-import-resolver-typescript": "^3.7.0", "eslint-plugin-storybook": "^0.11.1", "iron-session": "^8.0.4", + "js-yaml": "^4.1.0", "next": "^14.2.20", "next-auth": "^4.24.11", "next-intl": "^3.26.1", @@ -60,6 +61,7 @@ "@storybook/react": "^8.4.7", "@storybook/test": "^8.0.0", "@storybook/test-runner": "^0.20.1", + "@types/js-yaml": "^4.0.9", "pino-pretty": "^13.0.0", "playwright": "^1.45.2", "prettier": "^3.4.2", @@ -2549,11 +2551,6 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/@eslint/eslintrc/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, "node_modules/@eslint/eslintrc/node_modules/globals": { "version": "13.24.0", "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", @@ -2568,17 +2565,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/@eslint/eslintrc/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, "node_modules/@eslint/eslintrc/node_modules/type-fest": { "version": "0.20.2", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", @@ -3201,6 +3187,15 @@ "node": ">=8" } }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", @@ -3214,6 +3209,19 @@ "node": ">=8" } }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", @@ -7565,6 +7573,13 @@ "@types/istanbul-lib-report": "*" } }, + "node_modules/@types/js-yaml": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-4.0.9.tgz", + "integrity": "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/json-schema": { "version": "7.0.15", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", @@ -8506,13 +8521,9 @@ "dev": true }, "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dev": true, - "dependencies": { - "sprintf-js": "~1.0.2" - } + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" }, "node_modules/aria-query": { "version": "5.3.0", @@ -11420,11 +11431,6 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, "node_modules/eslint/node_modules/globals": { "version": "13.24.0", "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", @@ -11439,17 +11445,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/eslint/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, "node_modules/eslint/node_modules/type-fest": { "version": "0.20.2", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", @@ -14835,13 +14830,12 @@ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "license": "MIT", "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" + "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" @@ -16611,12 +16605,6 @@ } } }, - "node_modules/postcss-loader/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, "node_modules/postcss-loader/node_modules/cosmiconfig": { "version": "9.0.0", "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-9.0.0.tgz", @@ -16643,18 +16631,6 @@ } } }, - "node_modules/postcss-loader/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, "node_modules/postcss-loader/node_modules/semver": { "version": "7.6.2", "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", diff --git a/ui/package.json b/ui/package.json index 6dc56fcd5..9b9d17452 100644 --- a/ui/package.json +++ b/ui/package.json @@ -26,6 +26,7 @@ "@stdlib/string-truncate": "^0.2.2", "@stdlib/string-truncate-middle": "^0.2.2", "@tanstack/react-virtual": "^3.11.1", + "@types/js-yaml": "^4.0.9", "@types/node": "22.10.2", "@types/react": "18.3.12", "@types/react-dom": "18.3.1", @@ -40,6 +41,7 @@ "eslint-import-resolver-typescript": "^3.7.0", "eslint-plugin-storybook": "^0.11.1", "iron-session": "^8.0.4", + "js-yaml": "^4.1.0", "next": "^14.2.20", "next-auth": "^4.24.11", "next-intl": "^3.26.1", diff --git a/ui/utils/config.ts b/ui/utils/config.ts new file mode 100644 index 000000000..6c299a61f --- /dev/null +++ b/ui/utils/config.ts @@ -0,0 +1,56 @@ +"use server"; + +import fs from 'fs'; +import * as yaml from 'js-yaml'; +import { logger } from "@/utils/logger"; + +const log = logger.child({ module: "utils" }); + +export interface OidcConfig { + authServerUrl: string | null; + clientId: string | null; + clientSecret: string | null; +} + +export interface GlobalSecurityConfig { + oidc: OidcConfig | null; +} + +export interface ConsoleConfig { + security: GlobalSecurityConfig | null; +} + +async function getOrLoadConfig(): Promise { + let consoleConfig: ConsoleConfig = (globalThis as any).consoleConfig; + + if (consoleConfig === undefined) { + if (!process.env.CONSOLE_CONFIG_PATH) { + log.warn("console configuration path variable CONSOLE_CONFIG_PATH is not set, configuration not loaded"); + consoleConfig = { security: { oidc: null } }; + } else { + const fileContents = fs.readFileSync(process.env.CONSOLE_CONFIG_PATH, 'utf8'); + const cfg = yaml.load(fileContents) as ConsoleConfig; + + consoleConfig = { + security: { + oidc: cfg.security?.oidc == null ? null : { + authServerUrl: cfg.security?.oidc?.authServerUrl ?? null, + clientId: cfg.security?.oidc?.clientId ?? null, + clientSecret: cfg.security?.oidc?.clientSecret ?? null, + } + } + }; + log.info(`console configuration loaded from ${process.env.CONSOLE_CONFIG_PATH}: ${JSON.stringify(consoleConfig)}`); + } + + (globalThis as any).consoleConfig = consoleConfig; + } else { + log.trace(`console configuration reused from globalThis: ${JSON.stringify(consoleConfig)}`); + } + + return consoleConfig; +} + +export default async function config(): Promise { + return getOrLoadConfig(); +} diff --git a/ui/utils/env.ts b/ui/utils/env.ts index c413be6c4..14ac8a2f8 100644 --- a/ui/utils/env.ts +++ b/ui/utils/env.ts @@ -1,17 +1,5 @@ export const isReadonly = (() => { - if (process.env.CONSOLE_MODE !== "read-write") { - return true; - } - - if ( - process.env.NEXT_PUBLIC_KEYCLOAK_URL && - process.env.KEYCLOAK_CLIENTID && - process.env.KEYCLOAK_CLIENTSECRET - ) { - return false; - } - - return true; + return process.env.CONSOLE_MODE === "read-only"; })(); export const isProductizedBuild = process.env.NEXT_PUBLIC_PRODUCTIZED_BUILD === "true";