From 6880268cdd100372abea18dbbed89934806d2a4f Mon Sep 17 00:00:00 2001 From: Mike Rostermund Date: Mon, 6 May 2024 08:54:55 +0200 Subject: [PATCH] Bump minimum supported version to LogScale 1.118.0 (#792) --- .github/workflows/ci.yaml | 22 +- api/v1alpha1/humiocluster_types.go | 2 + .../crds/core.humio.com_humioclusters.yaml | 6 +- .../bases/core.humio.com_humioclusters.yaml | 6 +- controllers/humiocluster_controller.go | 60 +----- controllers/humiocluster_defaults.go | 190 +++++++----------- controllers/humiocluster_defaults_test.go | 68 +------ controllers/humiocluster_pods.go | 50 ----- controllers/humiocluster_version.go | 4 +- .../clusters/humiocluster_controller_test.go | 110 +--------- controllers/suite/clusters/suite_test.go | 18 +- controllers/suite/common.go | 8 +- controllers/suite/resources/suite_test.go | 2 +- go.mod | 2 +- go.sum | 4 +- images/helper/go.mod | 2 +- images/helper/go.sum | 4 +- images/helper/main.go | 11 +- pkg/helpers/helpers.go | 43 ---- pkg/humio/client.go | 36 ---- pkg/humio/client_mock.go | 92 ++------- 21 files changed, 160 insertions(+), 580 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 16841e45..b056a4b8 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -39,6 +39,17 @@ jobs: - uses: actions/setup-go@v4 with: go-version: '1.22.2' + - name: Run Gosec Security Scanner + run: | + export PATH=$PATH:$(go env GOPATH)/bin + go get github.com/securego/gosec/cmd/gosec + go install github.com/securego/gosec/cmd/gosec + gosec ./... + - name: Run Staticcheck + uses: dominikh/staticcheck-action@v1.3.1 + with: + version: "2023.1.7" + install-go: false - name: operator image run: make docker-build-operator IMG=humio/humio-operator:${{ github.sha }} - name: helper image @@ -67,14 +78,3 @@ jobs: container_tag: ${{ github.sha }} env: FALCON_CLIENT_SECRET: "${{ secrets.FALCON_CLIENT_SECRET }}" - - name: Run Gosec Security Scanner - run: | - export PATH=$PATH:$(go env GOPATH)/bin - go get github.com/securego/gosec/cmd/gosec - go install github.com/securego/gosec/cmd/gosec - gosec ./... - - name: Run Staticcheck - uses: dominikh/staticcheck-action@v1.3.1 - with: - version: "2023.1.7" - install-go: false diff --git a/api/v1alpha1/humiocluster_types.go b/api/v1alpha1/humiocluster_types.go index 73c920d3..cefe57be 100644 --- a/api/v1alpha1/humiocluster_types.go +++ b/api/v1alpha1/humiocluster_types.go @@ -51,10 +51,12 @@ const ( type HumioClusterSpec struct { // AutoRebalancePartitions will enable auto-rebalancing of both digest and storage partitions assigned to humio cluster nodes. // If all Kubernetes worker nodes are located in the same availability zone, you must set DisableInitContainer to true to use auto rebalancing of partitions. + // Deprecated: No longer needed as of 1.89.0 as partitions and segment distribution is now automatically managed by LogScale itself. AutoRebalancePartitions bool `json:"autoRebalancePartitions,omitempty"` // TargetReplicationFactor is the desired number of replicas of both storage and ingest partitions TargetReplicationFactor int `json:"targetReplicationFactor,omitempty"` // StoragePartitionsCount is the desired number of storage partitions + // Deprecated: No longer needed as LogScale now automatically redistributes segments StoragePartitionsCount int `json:"storagePartitionsCount,omitempty"` // DigestPartitionsCount is the desired number of digest partitions DigestPartitionsCount int `json:"digestPartitionsCount,omitempty"` diff --git a/charts/humio-operator/crds/core.humio.com_humioclusters.yaml b/charts/humio-operator/crds/core.humio.com_humioclusters.yaml index 87625fa9..579b2e04 100644 --- a/charts/humio-operator/crds/core.humio.com_humioclusters.yaml +++ b/charts/humio-operator/crds/core.humio.com_humioclusters.yaml @@ -830,6 +830,7 @@ spec: description: |- AutoRebalancePartitions will enable auto-rebalancing of both digest and storage partitions assigned to humio cluster nodes. If all Kubernetes worker nodes are located in the same availability zone, you must set DisableInitContainer to true to use auto rebalancing of partitions. + Deprecated: No longer needed as of 1.89.0 as partitions and segment distribution is now automatically managed by LogScale itself. type: boolean commonEnvironmentVariables: description: |- @@ -14102,8 +14103,9 @@ spec: type: object type: array storagePartitionsCount: - description: StoragePartitionsCount is the desired number of storage - partitions + description: |- + StoragePartitionsCount is the desired number of storage partitions + Deprecated: No longer needed as LogScale now automatically redistributes segments type: integer targetReplicationFactor: description: TargetReplicationFactor is the desired number of replicas diff --git a/config/crd/bases/core.humio.com_humioclusters.yaml b/config/crd/bases/core.humio.com_humioclusters.yaml index 87625fa9..579b2e04 100644 --- a/config/crd/bases/core.humio.com_humioclusters.yaml +++ b/config/crd/bases/core.humio.com_humioclusters.yaml @@ -830,6 +830,7 @@ spec: description: |- AutoRebalancePartitions will enable auto-rebalancing of both digest and storage partitions assigned to humio cluster nodes. If all Kubernetes worker nodes are located in the same availability zone, you must set DisableInitContainer to true to use auto rebalancing of partitions. + Deprecated: No longer needed as of 1.89.0 as partitions and segment distribution is now automatically managed by LogScale itself. type: boolean commonEnvironmentVariables: description: |- @@ -14102,8 +14103,9 @@ spec: type: object type: array storagePartitionsCount: - description: StoragePartitionsCount is the desired number of storage - partitions + description: |- + StoragePartitionsCount is the desired number of storage partitions + Deprecated: No longer needed as LogScale now automatically redistributes segments type: integer targetReplicationFactor: description: TargetReplicationFactor is the desired number of replicas diff --git a/controllers/humiocluster_controller.go b/controllers/humiocluster_controller.go index d75a0431..95688653 100644 --- a/controllers/humiocluster_controller.go +++ b/controllers/humiocluster_controller.go @@ -343,11 +343,6 @@ func (r *HumioClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request } } - if err = r.ensurePartitionsAreBalanced(hc, cluster.Config(), req); err != nil { - return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions(). - withMessage(err.Error())) - } - for _, fun := range []ctxHumioClusterFunc{ r.cleanupUnusedTLSCertificates, r.cleanupUnusedTLSSecrets, @@ -1429,19 +1424,19 @@ func (r *HumioClusterReconciler) ensureOrphanedPvcsAreDeleted(ctx context.Contex if err != nil { return r.logErrorAndReturn(err, "failed to list pvcs") } - for _, pvc := range pvcList { - pvcOrphaned, err := r.isPvcOrphaned(ctx, hnp, hc, pvc) + for idx := range pvcList { + pvcOrphaned, err := r.isPvcOrphaned(ctx, hnp, hc, pvcList[idx]) if err != nil { return r.logErrorAndReturn(err, "could not check if pvc is orphaned") } if pvcOrphaned { - if pvc.DeletionTimestamp == nil { + if pvcList[idx].DeletionTimestamp == nil { r.Log.Info(fmt.Sprintf("node cannot be found for pvc. deleting pvc %s as "+ - "dataVolumePersistentVolumeClaimPolicy is set to %s", pvc.Name, + "dataVolumePersistentVolumeClaimPolicy is set to %s", pvcList[idx].Name, humiov1alpha1.HumioPersistentVolumeReclaimTypeOnNodeDelete)) - err = r.Client.Delete(ctx, &pvc) + err = r.Client.Delete(ctx, &pvcList[idx]) if err != nil { - return r.logErrorAndReturn(err, fmt.Sprintf("cloud not delete pvc %s", pvc.Name)) + return r.logErrorAndReturn(err, fmt.Sprintf("cloud not delete pvc %s", pvcList[idx].Name)) } } } @@ -1555,49 +1550,6 @@ func (r *HumioClusterReconciler) ensureLicense(ctx context.Context, hc *humiov1a return reconcile.Result{}, nil } -func (r *HumioClusterReconciler) ensurePartitionsAreBalanced(hc *humiov1alpha1.HumioCluster, config *humioapi.Config, req reconcile.Request) error { - humioVersion, _ := HumioVersionFromString(NewHumioNodeManagerFromHumioCluster(hc).GetImage()) - if ok, _ := humioVersion.AtLeast(HumioVersionWithAutomaticPartitionManagement); ok { - return nil - } - - if !hc.Spec.AutoRebalancePartitions { - r.Log.Info("partition auto-rebalancing not enabled, skipping") - return nil - } - - currentClusterInfo, err := r.HumioClient.GetClusters(config, req) - if err != nil { - return r.logErrorAndReturn(err, "could not get cluster info") - } - - suggestedStorageLayout, err := r.HumioClient.SuggestedStoragePartitions(config, req) - if err != nil { - return r.logErrorAndReturn(err, "could not get suggested storage layout") - } - currentStorageLayoutInput := helpers.MapStoragePartition(currentClusterInfo.StoragePartitions, helpers.ToStoragePartitionInput) - if !reflect.DeepEqual(currentStorageLayoutInput, suggestedStorageLayout) { - r.Log.Info(fmt.Sprintf("triggering update of storage partitions to use suggested layout, current: %#+v, suggested: %#+v", currentClusterInfo.StoragePartitions, suggestedStorageLayout)) - if err = r.HumioClient.UpdateStoragePartitionScheme(config, req, suggestedStorageLayout); err != nil { - return r.logErrorAndReturn(err, "could not update storage partition scheme") - } - } - - suggestedIngestLayout, err := r.HumioClient.SuggestedIngestPartitions(config, req) - if err != nil { - return r.logErrorAndReturn(err, "could not get suggested ingest layout") - } - currentIngestLayoutInput := helpers.MapIngestPartition(currentClusterInfo.IngestPartitions, helpers.ToIngestPartitionInput) - if !reflect.DeepEqual(currentIngestLayoutInput, suggestedIngestLayout) { - r.Log.Info(fmt.Sprintf("triggering update of ingest partitions to use suggested layout, current: %#+v, suggested: %#+v", currentClusterInfo.IngestPartitions, suggestedIngestLayout)) - if err = r.HumioClient.UpdateIngestPartitionScheme(config, req, suggestedIngestLayout); err != nil { - return r.logErrorAndReturn(err, "could not update ingest partition scheme") - } - } - - return nil -} - func (r *HumioClusterReconciler) ensureService(ctx context.Context, hc *humiov1alpha1.HumioCluster, hnp *HumioNodePool) error { r.Log.Info("ensuring service") existingService, err := kubernetes.GetService(ctx, r, hnp.GetNodePoolName(), hnp.GetNamespace()) diff --git a/controllers/humiocluster_defaults.go b/controllers/humiocluster_defaults.go index c617c104..696c0594 100644 --- a/controllers/humiocluster_defaults.go +++ b/controllers/humiocluster_defaults.go @@ -33,10 +33,9 @@ import ( ) const ( - Image = "humio/humio-core:1.100.0" + Image = "humio/humio-core:1.131.1" HelperImage = "humio/humio-operator-helper:3568eb1e7041beaf70d48e71a3d5fc6c8cfb9a6f" targetReplicationFactor = 2 - storagePartitionsCount = 24 digestPartitionsCount = 24 HumioPort = 8080 elasticPort = 9200 @@ -44,7 +43,6 @@ const ( ExtraKafkaPropertiesFilename = "extra-kafka-properties.properties" ViewGroupPermissionsFilename = "view-group-permissions.json" RolePermissionsFilename = "role-permissions.json" - nodeUUIDPrefix = "humio_" HumioContainerName = "humio" AuthContainerName = "humio-auth" InitContainerName = "humio-init" @@ -81,7 +79,6 @@ type HumioNodePool struct { viewGroupPermissions string // Deprecated: Replaced by rolePermissions rolePermissions string targetReplicationFactor int - storagePartitionsCount int digestPartitionsCount int path string ingress humiov1alpha1.HumioClusterIngressSpec @@ -123,7 +120,6 @@ func NewHumioNodeManagerFromHumioCluster(hc *humiov1alpha1.HumioCluster) *HumioN Affinity: hc.Spec.Affinity, SidecarContainers: hc.Spec.SidecarContainers, ExtraKafkaConfigs: hc.Spec.ExtraKafkaConfigs, - NodeUUIDPrefix: hc.Spec.NodeUUIDPrefix, ExtraHumioVolumeMounts: hc.Spec.ExtraHumioVolumeMounts, ExtraVolumes: hc.Spec.ExtraVolumes, HumioServiceAccountAnnotations: hc.Spec.HumioServiceAccountAnnotations, @@ -144,7 +140,6 @@ func NewHumioNodeManagerFromHumioCluster(hc *humiov1alpha1.HumioCluster) *HumioN viewGroupPermissions: hc.Spec.ViewGroupPermissions, rolePermissions: hc.Spec.RolePermissions, targetReplicationFactor: hc.Spec.TargetReplicationFactor, - storagePartitionsCount: hc.Spec.StoragePartitionsCount, digestPartitionsCount: hc.Spec.DigestPartitionsCount, path: hc.Spec.Path, ingress: hc.Spec.Ingress, @@ -187,7 +182,6 @@ func NewHumioNodeManagerFromHumioNodePool(hc *humiov1alpha1.HumioCluster, hnp *h Affinity: hnp.Affinity, SidecarContainers: hnp.SidecarContainers, ExtraKafkaConfigs: hnp.ExtraKafkaConfigs, - NodeUUIDPrefix: hnp.NodeUUIDPrefix, ExtraHumioVolumeMounts: hnp.ExtraHumioVolumeMounts, ExtraVolumes: hnp.ExtraVolumes, HumioServiceAccountAnnotations: hnp.HumioServiceAccountAnnotations, @@ -208,7 +202,6 @@ func NewHumioNodeManagerFromHumioNodePool(hc *humiov1alpha1.HumioCluster, hnp *h viewGroupPermissions: hc.Spec.ViewGroupPermissions, rolePermissions: hc.Spec.RolePermissions, targetReplicationFactor: hc.Spec.TargetReplicationFactor, - storagePartitionsCount: hc.Spec.StoragePartitionsCount, digestPartitionsCount: hc.Spec.DigestPartitionsCount, path: hc.Spec.Path, ingress: hc.Spec.Ingress, @@ -216,22 +209,22 @@ func NewHumioNodeManagerFromHumioNodePool(hc *humiov1alpha1.HumioCluster, hnp *h } } -func (hnp HumioNodePool) GetClusterName() string { +func (hnp *HumioNodePool) GetClusterName() string { return hnp.clusterName } -func (hnp HumioNodePool) GetNodePoolName() string { +func (hnp *HumioNodePool) GetNodePoolName() string { if hnp.nodePoolName == "" { return hnp.GetClusterName() } return strings.Join([]string{hnp.GetClusterName(), hnp.nodePoolName}, "-") } -func (hnp HumioNodePool) GetNamespace() string { +func (hnp *HumioNodePool) GetNamespace() string { return hnp.namespace } -func (hnp HumioNodePool) GetHostname() string { +func (hnp *HumioNodePool) GetHostname() string { return hnp.hostname } @@ -246,44 +239,37 @@ func (hnp *HumioNodePool) GetImage() string { return Image } -func (hnp HumioNodePool) GetImageSource() *humiov1alpha1.HumioImageSource { +func (hnp *HumioNodePool) GetImageSource() *humiov1alpha1.HumioImageSource { return hnp.humioNodeSpec.ImageSource } -func (hnp HumioNodePool) GetHelperImage() string { +func (hnp *HumioNodePool) GetHelperImage() string { if hnp.humioNodeSpec.HelperImage != "" { return hnp.humioNodeSpec.HelperImage } return HelperImage } -func (hnp HumioNodePool) GetImagePullSecrets() []corev1.LocalObjectReference { +func (hnp *HumioNodePool) GetImagePullSecrets() []corev1.LocalObjectReference { return hnp.humioNodeSpec.ImagePullSecrets } -func (hnp HumioNodePool) GetImagePullPolicy() corev1.PullPolicy { +func (hnp *HumioNodePool) GetImagePullPolicy() corev1.PullPolicy { return hnp.humioNodeSpec.ImagePullPolicy } -func (hnp HumioNodePool) GetEnvironmentVariablesSource() []corev1.EnvFromSource { +func (hnp *HumioNodePool) GetEnvironmentVariablesSource() []corev1.EnvFromSource { return hnp.humioNodeSpec.EnvironmentVariablesSource } -func (hnp HumioNodePool) GetTargetReplicationFactor() int { +func (hnp *HumioNodePool) GetTargetReplicationFactor() int { if hnp.targetReplicationFactor != 0 { return hnp.targetReplicationFactor } return targetReplicationFactor } -func (hnp HumioNodePool) GetStoragePartitionsCount() int { - if hnp.storagePartitionsCount != 0 { - return hnp.storagePartitionsCount - } - return storagePartitionsCount -} - -func (hnp HumioNodePool) GetDigestPartitionsCount() int { +func (hnp *HumioNodePool) GetDigestPartitionsCount() int { if hnp.digestPartitionsCount != 0 { return hnp.digestPartitionsCount } @@ -298,7 +284,7 @@ func (hnp *HumioNodePool) SetHumioClusterNodePoolRevisionAnnotation(newRevision hnp.clusterAnnotations[revisionKey] = strconv.Itoa(newRevision) } -func (hnp HumioNodePool) GetHumioClusterNodePoolRevisionAnnotation() (string, int) { +func (hnp *HumioNodePool) GetHumioClusterNodePoolRevisionAnnotation() (string, int) { annotations := map[string]string{} if len(hnp.clusterAnnotations) > 0 { annotations = hnp.clusterAnnotations @@ -315,11 +301,11 @@ func (hnp HumioNodePool) GetHumioClusterNodePoolRevisionAnnotation() (string, in return podAnnotationKey, existingRevision } -func (hnp HumioNodePool) GetIngress() humiov1alpha1.HumioClusterIngressSpec { +func (hnp *HumioNodePool) GetIngress() humiov1alpha1.HumioClusterIngressSpec { return hnp.ingress } -func (hnp HumioNodePool) GetEnvironmentVariables() []corev1.EnvVar { +func (hnp *HumioNodePool) GetEnvironmentVariables() []corev1.EnvVar { envVars := make([]corev1.EnvVar, len(hnp.humioNodeSpec.EnvironmentVariables)) copy(envVars, hnp.humioNodeSpec.EnvironmentVariables) @@ -359,9 +345,8 @@ func (hnp HumioNodePool) GetEnvironmentVariables() []corev1.EnvVar { {Name: "HUMIO_PORT", Value: strconv.Itoa(HumioPort)}, {Name: "ELASTIC_PORT", Value: strconv.Itoa(elasticPort)}, - {Name: "DIGEST_REPLICATION_FACTOR", Value: strconv.Itoa(hnp.GetTargetReplicationFactor())}, - {Name: "STORAGE_REPLICATION_FACTOR", Value: strconv.Itoa(hnp.GetTargetReplicationFactor())}, - {Name: "DEFAULT_PARTITION_COUNT", Value: strconv.Itoa(hnp.GetStoragePartitionsCount())}, + {Name: "DEFAULT_DIGEST_REPLICATION_FACTOR", Value: strconv.Itoa(hnp.GetTargetReplicationFactor())}, + {Name: "DEFAULT_SEGMENT_REPLICATION_FACTOR", Value: strconv.Itoa(hnp.GetTargetReplicationFactor())}, {Name: "INGEST_QUEUE_INITIAL_PARTITIONS", Value: strconv.Itoa(hnp.GetDigestPartitionsCount())}, {Name: "HUMIO_LOG4J_CONFIGURATION", Value: "log4j2-json-stdout.xml"}, { @@ -378,17 +363,6 @@ func (hnp HumioNodePool) GetEnvironmentVariables() []corev1.EnvVar { }, } - humioVersion, _ := HumioVersionFromString(hnp.GetImage()) - if ok, _ := humioVersion.AtLeast(HumioVersionWithoutOldVhostSelection); !ok { - if EnvVarHasValue(hnp.humioNodeSpec.EnvironmentVariables, "USING_EPHEMERAL_DISKS", "true") && - EnvVarHasKey(hnp.humioNodeSpec.EnvironmentVariables, "ZOOKEEPER_URL") { - envDefaults = append(envDefaults, corev1.EnvVar{ - Name: "ZOOKEEPER_URL_FOR_NODE_UUID", - Value: "$(ZOOKEEPER_URL)", - }) - } - } - for _, defaultEnvVar := range envDefaults { envVars = AppendEnvVarToEnvVarsIfNotAlreadyPresent(envVars, defaultEnvVar) } @@ -424,7 +398,7 @@ func (hnp HumioNodePool) GetEnvironmentVariables() []corev1.EnvVar { return envVars } -func (hnp HumioNodePool) GetContainerSecurityContext() *corev1.SecurityContext { +func (hnp *HumioNodePool) GetContainerSecurityContext() *corev1.SecurityContext { if hnp.humioNodeSpec.ContainerSecurityContext == nil { return &corev1.SecurityContext{ AllowPrivilegeEscalation: helpers.BoolPtr(false), @@ -445,13 +419,13 @@ func (hnp HumioNodePool) GetContainerSecurityContext() *corev1.SecurityContext { return hnp.humioNodeSpec.ContainerSecurityContext } -func (hnp HumioNodePool) GetNodePoolLabels() map[string]string { +func (hnp *HumioNodePool) GetNodePoolLabels() map[string]string { labels := hnp.GetCommonClusterLabels() labels[kubernetes.NodePoolLabelName] = hnp.GetNodePoolName() return labels } -func (hnp HumioNodePool) GetPodLabels() map[string]string { +func (hnp *HumioNodePool) GetPodLabels() map[string]string { labels := hnp.GetNodePoolLabels() for k, v := range hnp.humioNodeSpec.PodLabels { if _, ok := labels[k]; !ok { @@ -461,32 +435,32 @@ func (hnp HumioNodePool) GetPodLabels() map[string]string { return labels } -func (hnp HumioNodePool) GetCommonClusterLabels() map[string]string { +func (hnp *HumioNodePool) GetCommonClusterLabels() map[string]string { return kubernetes.LabelsForHumio(hnp.clusterName) } -func (hnp HumioNodePool) GetCASecretName() string { +func (hnp *HumioNodePool) GetCASecretName() string { if hnp.tls != nil && hnp.tls.CASecretName != "" { return hnp.tls.CASecretName } return fmt.Sprintf("%s-ca-keypair", hnp.GetClusterName()) } -func (hnp HumioNodePool) UseExistingCA() bool { +func (hnp *HumioNodePool) UseExistingCA() bool { return hnp.tls != nil && hnp.tls.CASecretName != "" } -func (hnp HumioNodePool) GetLabelsForSecret(secretName string) map[string]string { +func (hnp *HumioNodePool) GetLabelsForSecret(secretName string) map[string]string { labels := hnp.GetCommonClusterLabels() labels[kubernetes.SecretNameLabelName] = secretName return labels } -func (hnp HumioNodePool) GetNodeCount() int { +func (hnp *HumioNodePool) GetNodeCount() int { return hnp.humioNodeSpec.NodeCount } -func (hnp HumioNodePool) GetDataVolumePersistentVolumeClaimSpecTemplate(pvcName string) corev1.VolumeSource { +func (hnp *HumioNodePool) GetDataVolumePersistentVolumeClaimSpecTemplate(pvcName string) corev1.VolumeSource { if hnp.PVCsEnabled() { return corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ @@ -497,98 +471,98 @@ func (hnp HumioNodePool) GetDataVolumePersistentVolumeClaimSpecTemplate(pvcName return corev1.VolumeSource{} } -func (hnp HumioNodePool) GetDataVolumePersistentVolumeClaimSpecTemplateRAW() corev1.PersistentVolumeClaimSpec { +func (hnp *HumioNodePool) GetDataVolumePersistentVolumeClaimSpecTemplateRAW() corev1.PersistentVolumeClaimSpec { return hnp.humioNodeSpec.DataVolumePersistentVolumeClaimSpecTemplate } -func (hnp HumioNodePool) DataVolumePersistentVolumeClaimSpecTemplateIsSetByUser() bool { +func (hnp *HumioNodePool) DataVolumePersistentVolumeClaimSpecTemplateIsSetByUser() bool { return !reflect.DeepEqual(hnp.humioNodeSpec.DataVolumePersistentVolumeClaimSpecTemplate, corev1.PersistentVolumeClaimSpec{}) } -func (hnp HumioNodePool) GetDataVolumePersistentVolumeClaimPolicy() humiov1alpha1.HumioPersistentVolumeClaimPolicy { +func (hnp *HumioNodePool) GetDataVolumePersistentVolumeClaimPolicy() humiov1alpha1.HumioPersistentVolumeClaimPolicy { if hnp.PVCsEnabled() { return hnp.humioNodeSpec.DataVolumePersistentVolumeClaimPolicy } return humiov1alpha1.HumioPersistentVolumeClaimPolicy{} } -func (hnp HumioNodePool) GetDataVolumeSource() corev1.VolumeSource { +func (hnp *HumioNodePool) GetDataVolumeSource() corev1.VolumeSource { return hnp.humioNodeSpec.DataVolumeSource } -func (hnp HumioNodePool) GetPodAnnotations() map[string]string { +func (hnp *HumioNodePool) GetPodAnnotations() map[string]string { return hnp.humioNodeSpec.PodAnnotations } -func (hnp HumioNodePool) GetAuthServiceAccountSecretName() string { +func (hnp *HumioNodePool) GetAuthServiceAccountSecretName() string { return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), authServiceAccountSecretNameIdentifier) } -func (hnp HumioNodePool) GetInitServiceAccountSecretName() string { +func (hnp *HumioNodePool) GetInitServiceAccountSecretName() string { return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), initServiceAccountSecretNameIdentifier) } -func (hnp HumioNodePool) GetInitServiceAccountName() string { +func (hnp *HumioNodePool) GetInitServiceAccountName() string { if hnp.humioNodeSpec.InitServiceAccountName != "" { return hnp.humioNodeSpec.InitServiceAccountName } return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), initServiceAccountNameSuffix) } -func (hnp HumioNodePool) InitServiceAccountIsSetByUser() bool { +func (hnp *HumioNodePool) InitServiceAccountIsSetByUser() bool { return hnp.humioNodeSpec.InitServiceAccountName != "" } -func (hnp HumioNodePool) GetAuthServiceAccountName() string { +func (hnp *HumioNodePool) GetAuthServiceAccountName() string { if hnp.humioNodeSpec.AuthServiceAccountName != "" { return hnp.humioNodeSpec.AuthServiceAccountName } return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), authServiceAccountNameSuffix) } -func (hnp HumioNodePool) AuthServiceAccountIsSetByUser() bool { +func (hnp *HumioNodePool) AuthServiceAccountIsSetByUser() bool { return hnp.humioNodeSpec.AuthServiceAccountName != "" } -func (hnp HumioNodePool) GetInitClusterRoleName() string { +func (hnp *HumioNodePool) GetInitClusterRoleName() string { return fmt.Sprintf("%s-%s-%s", hnp.GetNamespace(), hnp.GetNodePoolName(), initClusterRoleSuffix) } -func (hnp HumioNodePool) GetInitClusterRoleBindingName() string { +func (hnp *HumioNodePool) GetInitClusterRoleBindingName() string { return fmt.Sprintf("%s-%s-%s", hnp.GetNamespace(), hnp.GetNodePoolName(), initClusterRoleBindingSuffix) } -func (hnp HumioNodePool) GetAuthRoleName() string { +func (hnp *HumioNodePool) GetAuthRoleName() string { return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), authRoleSuffix) } -func (hnp HumioNodePool) GetAuthRoleBindingName() string { +func (hnp *HumioNodePool) GetAuthRoleBindingName() string { return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), authRoleBindingSuffix) } -func (hnp HumioNodePool) GetShareProcessNamespace() *bool { +func (hnp *HumioNodePool) GetShareProcessNamespace() *bool { if hnp.humioNodeSpec.ShareProcessNamespace == nil { return helpers.BoolPtr(false) } return hnp.humioNodeSpec.ShareProcessNamespace } -func (hnp HumioNodePool) HumioServiceAccountIsSetByUser() bool { +func (hnp *HumioNodePool) HumioServiceAccountIsSetByUser() bool { return hnp.humioNodeSpec.HumioServiceAccountName != "" } -func (hnp HumioNodePool) GetHumioServiceAccountName() string { +func (hnp *HumioNodePool) GetHumioServiceAccountName() string { if hnp.humioNodeSpec.HumioServiceAccountName != "" { return hnp.humioNodeSpec.HumioServiceAccountName } return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), HumioServiceAccountNameSuffix) } -func (hnp HumioNodePool) GetHumioServiceAccountAnnotations() map[string]string { +func (hnp *HumioNodePool) GetHumioServiceAccountAnnotations() map[string]string { return hnp.humioNodeSpec.HumioServiceAccountAnnotations } -func (hnp HumioNodePool) GetContainerReadinessProbe() *corev1.Probe { +func (hnp *HumioNodePool) GetContainerReadinessProbe() *corev1.Probe { if hnp.humioNodeSpec.ContainerReadinessProbe != nil && (*hnp.humioNodeSpec.ContainerReadinessProbe == (corev1.Probe{})) { return nil } @@ -612,7 +586,7 @@ func (hnp HumioNodePool) GetContainerReadinessProbe() *corev1.Probe { return hnp.humioNodeSpec.ContainerReadinessProbe } -func (hnp HumioNodePool) GetContainerLivenessProbe() *corev1.Probe { +func (hnp *HumioNodePool) GetContainerLivenessProbe() *corev1.Probe { if hnp.humioNodeSpec.ContainerLivenessProbe != nil && (*hnp.humioNodeSpec.ContainerLivenessProbe == (corev1.Probe{})) { return nil } @@ -636,7 +610,7 @@ func (hnp HumioNodePool) GetContainerLivenessProbe() *corev1.Probe { return hnp.humioNodeSpec.ContainerLivenessProbe } -func (hnp HumioNodePool) GetContainerStartupProbe() *corev1.Probe { +func (hnp *HumioNodePool) GetContainerStartupProbe() *corev1.Probe { if hnp.humioNodeSpec.ContainerStartupProbe != nil && (*hnp.humioNodeSpec.ContainerStartupProbe == (corev1.Probe{})) { return nil } @@ -659,7 +633,7 @@ func (hnp HumioNodePool) GetContainerStartupProbe() *corev1.Probe { return hnp.humioNodeSpec.ContainerStartupProbe } -func (hnp HumioNodePool) GetPodSecurityContext() *corev1.PodSecurityContext { +func (hnp *HumioNodePool) GetPodSecurityContext() *corev1.PodSecurityContext { if hnp.humioNodeSpec.PodSecurityContext == nil { return &corev1.PodSecurityContext{ RunAsUser: helpers.Int64Ptr(65534), @@ -671,7 +645,7 @@ func (hnp HumioNodePool) GetPodSecurityContext() *corev1.PodSecurityContext { return hnp.humioNodeSpec.PodSecurityContext } -func (hnp HumioNodePool) GetAffinity() *corev1.Affinity { +func (hnp *HumioNodePool) GetAffinity() *corev1.Affinity { if hnp.humioNodeSpec.Affinity == (corev1.Affinity{}) { return &corev1.Affinity{ NodeAffinity: &corev1.NodeAffinity{ @@ -703,47 +677,47 @@ func (hnp HumioNodePool) GetAffinity() *corev1.Affinity { return &hnp.humioNodeSpec.Affinity } -func (hnp HumioNodePool) GetSidecarContainers() []corev1.Container { +func (hnp *HumioNodePool) GetSidecarContainers() []corev1.Container { return hnp.humioNodeSpec.SidecarContainers } -func (hnp HumioNodePool) GetTolerations() []corev1.Toleration { +func (hnp *HumioNodePool) GetTolerations() []corev1.Toleration { return hnp.humioNodeSpec.Tolerations } -func (hnp HumioNodePool) GetTopologySpreadConstraints() []corev1.TopologySpreadConstraint { +func (hnp *HumioNodePool) GetTopologySpreadConstraints() []corev1.TopologySpreadConstraint { return hnp.humioNodeSpec.TopologySpreadConstraints } -func (hnp HumioNodePool) GetResources() corev1.ResourceRequirements { +func (hnp *HumioNodePool) GetResources() corev1.ResourceRequirements { return hnp.humioNodeSpec.Resources } -func (hnp HumioNodePool) GetExtraKafkaConfigs() string { +func (hnp *HumioNodePool) GetExtraKafkaConfigs() string { return hnp.humioNodeSpec.ExtraKafkaConfigs } -func (hnp HumioNodePool) GetExtraKafkaConfigsConfigMapName() string { +func (hnp *HumioNodePool) GetExtraKafkaConfigsConfigMapName() string { return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), extraKafkaConfigsConfigMapNameSuffix) } -func (hnp HumioNodePool) GetViewGroupPermissions() string { +func (hnp *HumioNodePool) GetViewGroupPermissions() string { return hnp.viewGroupPermissions } -func (hnp HumioNodePool) GetViewGroupPermissionsConfigMapName() string { +func (hnp *HumioNodePool) GetViewGroupPermissionsConfigMapName() string { return fmt.Sprintf("%s-%s", hnp.GetClusterName(), viewGroupPermissionsConfigMapNameSuffix) } -func (hnp HumioNodePool) GetRolePermissions() string { +func (hnp *HumioNodePool) GetRolePermissions() string { return hnp.rolePermissions } -func (hnp HumioNodePool) GetRolePermissionsConfigMapName() string { +func (hnp *HumioNodePool) GetRolePermissionsConfigMapName() string { return fmt.Sprintf("%s-%s", hnp.GetClusterName(), rolePermissionsConfigMapNameSuffix) } -func (hnp HumioNodePool) GetPath() string { +func (hnp *HumioNodePool) GetPath() string { if hnp.path != "" { if strings.HasPrefix(hnp.path, "/") { return hnp.path @@ -754,83 +728,75 @@ func (hnp HumioNodePool) GetPath() string { return "/" } -// Deprecated: LogScale 1.70.0 deprecated this option, and was later removed in LogScale 1.80.0 -func (hnp HumioNodePool) GetNodeUUIDPrefix() string { - if hnp.humioNodeSpec.NodeUUIDPrefix != "" { - return hnp.humioNodeSpec.NodeUUIDPrefix - } - return nodeUUIDPrefix -} - -func (hnp HumioNodePool) GetHumioServiceLabels() map[string]string { +func (hnp *HumioNodePool) GetHumioServiceLabels() map[string]string { return hnp.humioNodeSpec.HumioServiceLabels } -func (hnp HumioNodePool) GetTerminationGracePeriodSeconds() *int64 { +func (hnp *HumioNodePool) GetTerminationGracePeriodSeconds() *int64 { if hnp.humioNodeSpec.TerminationGracePeriodSeconds == nil { return helpers.Int64Ptr(300) } return hnp.humioNodeSpec.TerminationGracePeriodSeconds } -func (hnp HumioNodePool) GetIDPCertificateSecretName() string { +func (hnp *HumioNodePool) GetIDPCertificateSecretName() string { if hnp.idpCertificateSecretName != "" { return hnp.idpCertificateSecretName } return fmt.Sprintf("%s-%s", hnp.GetClusterName(), idpCertificateSecretNameSuffix) } -func (hnp HumioNodePool) GetExtraHumioVolumeMounts() []corev1.VolumeMount { +func (hnp *HumioNodePool) GetExtraHumioVolumeMounts() []corev1.VolumeMount { return hnp.humioNodeSpec.ExtraHumioVolumeMounts } -func (hnp HumioNodePool) GetExtraVolumes() []corev1.Volume { +func (hnp *HumioNodePool) GetExtraVolumes() []corev1.Volume { return hnp.humioNodeSpec.ExtraVolumes } -func (hnp HumioNodePool) GetHumioServiceAnnotations() map[string]string { +func (hnp *HumioNodePool) GetHumioServiceAnnotations() map[string]string { return hnp.humioNodeSpec.HumioServiceAnnotations } -func (hnp HumioNodePool) GetHumioServicePort() int32 { +func (hnp *HumioNodePool) GetHumioServicePort() int32 { if hnp.humioNodeSpec.HumioServicePort != 0 { return hnp.humioNodeSpec.HumioServicePort } return HumioPort } -func (hnp HumioNodePool) GetHumioESServicePort() int32 { +func (hnp *HumioNodePool) GetHumioESServicePort() int32 { if hnp.humioNodeSpec.HumioESServicePort != 0 { return hnp.humioNodeSpec.HumioESServicePort } return elasticPort } -func (hnp HumioNodePool) GetServiceType() corev1.ServiceType { +func (hnp *HumioNodePool) GetServiceType() corev1.ServiceType { if hnp.humioNodeSpec.HumioServiceType != "" { return hnp.humioNodeSpec.HumioServiceType } return corev1.ServiceTypeClusterIP } -func (hnp HumioNodePool) GetServiceName() string { +func (hnp *HumioNodePool) GetServiceName() string { if hnp.nodePoolName == "" { return hnp.clusterName } return fmt.Sprintf("%s-%s", hnp.clusterName, hnp.nodePoolName) } -func (hnp HumioNodePool) InitContainerDisabled() bool { +func (hnp *HumioNodePool) InitContainerDisabled() bool { return hnp.humioNodeSpec.DisableInitContainer } -func (hnp HumioNodePool) PVCsEnabled() bool { +func (hnp *HumioNodePool) PVCsEnabled() bool { emptyPersistentVolumeClaimSpec := corev1.PersistentVolumeClaimSpec{} return !reflect.DeepEqual(hnp.humioNodeSpec.DataVolumePersistentVolumeClaimSpecTemplate, emptyPersistentVolumeClaimSpec) } -func (hnp HumioNodePool) TLSEnabled() bool { +func (hnp *HumioNodePool) TLSEnabled() bool { if hnp.tls == nil { return helpers.UseCertManager() } @@ -841,7 +807,7 @@ func (hnp HumioNodePool) TLSEnabled() bool { return helpers.UseCertManager() && *hnp.tls.Enabled } -func (hnp HumioNodePool) GetProbeScheme() corev1.URIScheme { +func (hnp *HumioNodePool) GetProbeScheme() corev1.URIScheme { if !hnp.TLSEnabled() { return corev1.URISchemeHTTP } @@ -849,7 +815,7 @@ func (hnp HumioNodePool) GetProbeScheme() corev1.URIScheme { return corev1.URISchemeHTTPS } -func (hnp HumioNodePool) GetUpdateStrategy() *humiov1alpha1.HumioUpdateStrategy { +func (hnp *HumioNodePool) GetUpdateStrategy() *humiov1alpha1.HumioUpdateStrategy { if hnp.humioNodeSpec.UpdateStrategy != nil { return hnp.humioNodeSpec.UpdateStrategy } @@ -860,11 +826,11 @@ func (hnp HumioNodePool) GetUpdateStrategy() *humiov1alpha1.HumioUpdateStrategy } } -func (hnp HumioNodePool) GetPriorityClassName() string { +func (hnp *HumioNodePool) GetPriorityClassName() string { return hnp.humioNodeSpec.PriorityClassName } -func (hnp HumioNodePool) OkToDeletePvc() bool { +func (hnp *HumioNodePool) OkToDeletePvc() bool { return hnp.GetDataVolumePersistentVolumeClaimPolicy().ReclaimType == humiov1alpha1.HumioPersistentVolumeReclaimTypeOnNodeDelete } diff --git a/controllers/humiocluster_defaults_test.go b/controllers/humiocluster_defaults_test.go index ccfe0dc6..3e47abe0 100644 --- a/controllers/humiocluster_defaults_test.go +++ b/controllers/humiocluster_defaults_test.go @@ -17,7 +17,6 @@ limitations under the License. package controllers import ( - "fmt" "strings" "testing" @@ -195,21 +194,16 @@ func Test_constructContainerArgs(t *testing.T) { fields fields }{ { - "no cpu resource settings, ephemeral disks and init container, using zk", + "no cpu resource settings, ephemeral disks and init container", fields{ &humiov1alpha1.HumioCluster{ Spec: humiov1alpha1.HumioClusterSpec{ HumioNodeSpec: humiov1alpha1.HumioNodeSpec{ - Image: fmt.Sprintf("humio/humio-core:%s", HumioVersionMinimumSupported), EnvironmentVariables: []corev1.EnvVar{ { Name: "USING_EPHEMERAL_DISKS", Value: "true", }, - { - Name: "ZOOKEEPER_URL", - Value: "dummy", - }, }, }, }, @@ -217,37 +211,11 @@ func Test_constructContainerArgs(t *testing.T) { []string{ "export CORES=", "export HUMIO_OPTS=", - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", "export ZONE=", }, []string{}, }, }, - { - "no cpu resource settings, ephemeral disks and init container, without zk", - fields{ - &humiov1alpha1.HumioCluster{ - Spec: humiov1alpha1.HumioClusterSpec{ - HumioNodeSpec: humiov1alpha1.HumioNodeSpec{ - EnvironmentVariables: []corev1.EnvVar{ - { - Name: "USING_EPHEMERAL_DISKS", - Value: "true", - }, - }, - }, - }, - }, - []string{ - "export CORES=", - "export HUMIO_OPTS=", - "export ZONE=", - }, - []string{ - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", - }, - }, - }, { "cpu resource settings, ephemeral disks and init container", fields{ @@ -259,10 +227,6 @@ func Test_constructContainerArgs(t *testing.T) { Name: "USING_EPHEMERAL_DISKS", Value: "true", }, - { - Name: "ZOOKEEPER_URL", - Value: "dummy", - }, }, Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{ @@ -278,7 +242,6 @@ func Test_constructContainerArgs(t *testing.T) { []string{ "export CORES=", "export HUMIO_OPTS=", - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", }, }, }, @@ -293,10 +256,6 @@ func Test_constructContainerArgs(t *testing.T) { Name: "USING_EPHEMERAL_DISKS", Value: "true", }, - { - Name: "ZOOKEEPER_URL", - Value: "dummy", - }, }, DisableInitContainer: true, }, @@ -308,7 +267,6 @@ func Test_constructContainerArgs(t *testing.T) { }, []string{ "export ZONE=", - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", }, }, }, @@ -323,10 +281,6 @@ func Test_constructContainerArgs(t *testing.T) { Name: "USING_EPHEMERAL_DISKS", Value: "true", }, - { - Name: "ZOOKEEPER_URL", - Value: "dummy", - }, }, DisableInitContainer: true, Resources: corev1.ResourceRequirements{ @@ -342,7 +296,6 @@ func Test_constructContainerArgs(t *testing.T) { "export CORES=", "export HUMIO_OPTS=", "export ZONE=", - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", }, }, }, @@ -357,9 +310,7 @@ func Test_constructContainerArgs(t *testing.T) { "export HUMIO_OPTS=", "export ZONE=", }, - []string{ - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", - }, + []string{}, }, }, { @@ -380,7 +331,6 @@ func Test_constructContainerArgs(t *testing.T) { "export ZONE=", }, []string{ - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", "export CORES=", "export HUMIO_OPTS=", }, @@ -401,7 +351,6 @@ func Test_constructContainerArgs(t *testing.T) { "export HUMIO_OPTS=", }, []string{ - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", "export ZONE=", }, }, @@ -426,7 +375,6 @@ func Test_constructContainerArgs(t *testing.T) { "export CORES=", "export HUMIO_OPTS=", "export ZONE=", - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", }, }, }, @@ -441,10 +389,6 @@ func Test_constructContainerArgs(t *testing.T) { Name: "USING_EPHEMERAL_DISKS", Value: "true", }, - { - Name: "ZOOKEEPER_URL", - Value: "dummy", - }, { Name: "CORES", Value: "1", @@ -459,7 +403,6 @@ func Test_constructContainerArgs(t *testing.T) { []string{ "export CORES=", "export HUMIO_OPTS=", - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", }, }, }, @@ -474,10 +417,6 @@ func Test_constructContainerArgs(t *testing.T) { Name: "USING_EPHEMERAL_DISKS", Value: "true", }, - { - Name: "ZOOKEEPER_URL", - Value: "dummy", - }, { Name: "CORES", Value: "1", @@ -492,7 +431,6 @@ func Test_constructContainerArgs(t *testing.T) { "export CORES=", "export HUMIO_OPTS=", "export ZONE=", - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", }, }, }, @@ -515,7 +453,6 @@ func Test_constructContainerArgs(t *testing.T) { "export ZONE=", }, []string{ - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", "export CORES=", "export HUMIO_OPTS=", }, @@ -542,7 +479,6 @@ func Test_constructContainerArgs(t *testing.T) { "export CORES=", "export HUMIO_OPTS=", "export ZONE=", - "export ZOOKEEPER_PREFIX_FOR_NODE_UUID=", }, }, }, diff --git a/controllers/humiocluster_pods.go b/controllers/humiocluster_pods.go index e7275bdb..6361740f 100644 --- a/controllers/humiocluster_pods.go +++ b/controllers/humiocluster_pods.go @@ -17,12 +17,10 @@ limitations under the License. package controllers import ( - "bytes" "context" "encoding/json" "errors" "fmt" - "html/template" "reflect" "sort" "strconv" @@ -50,7 +48,6 @@ import ( const ( humioAppPath = "/app/humio" HumioDataPath = "/data/humio-data" - humioDataTmpPath = "/app/humio/humio-data/tmp" sharedPath = "/shared" TmpPath = "/tmp" waitForPodTimeoutSeconds = 10 @@ -63,11 +60,6 @@ type podAttachments struct { envVarSourceData *map[string]string } -// nodeUUIDTemplateVars contains the variables that are allowed to be rendered for the nodeUUID string -type nodeUUIDTemplateVars struct { - Zone string -} - // ConstructContainerArgs returns the container arguments for the Humio pods. We want to grab a UUID from zookeeper // only when using ephemeral disks. If we're using persistent storage, then we rely on Humio to generate the UUID. // Note that relying on PVCs may not be good enough here as it's possible to have persistent storage using hostPath. @@ -75,19 +67,6 @@ type nodeUUIDTemplateVars struct { func ConstructContainerArgs(hnp *HumioNodePool, podEnvVars []corev1.EnvVar) ([]string, error) { var shellCommands []string - humioVersion, _ := HumioVersionFromString(hnp.GetImage()) - if ok, _ := humioVersion.AtLeast(HumioVersionWithoutOldVhostSelection); !ok { - if EnvVarHasValue(podEnvVars, "USING_EPHEMERAL_DISKS", "true") { - if EnvVarHasKey(podEnvVars, "ZOOKEEPER_URL") { - nodeUUIDPrefix, err := constructNodeUUIDPrefix(hnp) - if err != nil { - return []string{""}, fmt.Errorf("unable to construct node UUID: %w", err) - } - shellCommands = append(shellCommands, fmt.Sprintf("export ZOOKEEPER_PREFIX_FOR_NODE_UUID=%s", nodeUUIDPrefix)) - } - } - } - if !hnp.InitContainerDisabled() { shellCommands = append(shellCommands, fmt.Sprintf("export ZONE=$(cat %s/availability-zone)", sharedPath)) } @@ -103,35 +82,6 @@ func ConstructContainerArgs(hnp *HumioNodePool, podEnvVars []corev1.EnvVar) ([]s return []string{"-c", strings.Join(shellCommands, " && ")}, nil } -// constructNodeUUIDPrefix checks the value of the nodeUUID prefix and attempts to render it as a template. If the template -// renders {{.Zone}} as the string set to containsZoneIdentifier, then we can be assured that the desired outcome is -// that the zone in included inside the nodeUUID prefix. -// Deprecated: LogScale 1.70.0 deprecated this option, and was later removed in LogScale 1.80.0 -func constructNodeUUIDPrefix(hnp *HumioNodePool) (string, error) { - prefix := hnp.GetNodeUUIDPrefix() - containsZoneIdentifier := "containsZone" - - t := template.Must(template.New("prefix").Parse(prefix)) - data := nodeUUIDTemplateVars{Zone: containsZoneIdentifier} - - var tpl bytes.Buffer - if err := t.Execute(&tpl, data); err != nil { - return "", err - } - - nodeUUIDPrefix := tpl.String() - nodeUUIDPrefix = strings.Replace(nodeUUIDPrefix, containsZoneIdentifier, fmt.Sprintf("$(cat %s/availability-zone)", sharedPath), 1) - - if !strings.HasPrefix(nodeUUIDPrefix, "/") { - nodeUUIDPrefix = fmt.Sprintf("/%s", nodeUUIDPrefix) - } - if !strings.HasSuffix(nodeUUIDPrefix, "_") { - nodeUUIDPrefix = fmt.Sprintf("%s_", nodeUUIDPrefix) - } - - return nodeUUIDPrefix, nil -} - func ConstructPod(hnp *HumioNodePool, humioNodeName string, attachments *podAttachments) (*corev1.Pod, error) { var pod corev1.Pod mode := int32(420) diff --git a/controllers/humiocluster_version.go b/controllers/humiocluster_version.go index 4521c39b..9ccb4572 100644 --- a/controllers/humiocluster_version.go +++ b/controllers/humiocluster_version.go @@ -8,9 +8,7 @@ import ( ) const ( - HumioVersionMinimumSupported = "1.70.0" - HumioVersionWithoutOldVhostSelection = "1.80.0" - HumioVersionWithAutomaticPartitionManagement = "1.89.0" + HumioVersionMinimumSupported = "1.118.0" ) type HumioVersion struct { diff --git a/controllers/suite/clusters/humiocluster_controller_test.go b/controllers/suite/clusters/humiocluster_controller_test.go index 4f2bc314..d1bfcd8a 100644 --- a/controllers/suite/clusters/humiocluster_controller_test.go +++ b/controllers/suite/clusters/humiocluster_controller_test.go @@ -41,15 +41,15 @@ import ( ) const ( - oldSupportedHumioVersion = "humio/humio-core:1.70.0" + oldSupportedHumioVersion = "humio/humio-core:1.118.0" upgradeJumpHumioVersion = "humio/humio-core:1.128.0" oldUnsupportedHumioVersion = "humio/humio-core:1.18.4" - upgradePatchBestEffortOldVersion = "humio/humio-core:1.82.0" - upgradePatchBestEffortNewVersion = "humio/humio-core:1.82.1" + upgradePatchBestEffortOldVersion = "humio/humio-core:1.124.1" + upgradePatchBestEffortNewVersion = "humio/humio-core:1.124.2" - upgradeRollingBestEffortVersionJumpOldVersion = "humio/humio-core:1.70.0" - upgradeRollingBestEffortVersionJumpNewVersion = "humio/humio-core:1.76.2" + upgradeRollingBestEffortVersionJumpOldVersion = "humio/humio-core:1.124.1" + upgradeRollingBestEffortVersionJumpNewVersion = "humio/humio-core:1.131.1" ) var _ = Describe("HumioCluster Controller", func() { @@ -1117,7 +1117,7 @@ var _ = Describe("HumioCluster Controller", func() { Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1")) suite.UsingClusterBy(key.Name, "Updating the cluster image unsuccessfully") - updatedImage := "humio/humio-operator:1.70.7-missing-image" + updatedImage := fmt.Sprintf("%s-missing-image", controllers.Image) Eventually(func() error { updatedHumioCluster = humiov1alpha1.HumioCluster{} err := k8sClient.Get(ctx, key, &updatedHumioCluster) @@ -1302,10 +1302,6 @@ var _ = Describe("HumioCluster Controller", func() { Name: "test", Value: "", }, - { - Name: "ZOOKEEPER_URL", - Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181", - }, { Name: "KAFKA_SERVERS", Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", @@ -1350,10 +1346,6 @@ var _ = Describe("HumioCluster Controller", func() { Name: "test", Value: "update", }, - { - Name: "ZOOKEEPER_URL", - Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181", - }, { Name: "KAFKA_SERVERS", Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", @@ -1443,10 +1435,6 @@ var _ = Describe("HumioCluster Controller", func() { Name: "HUMIO_OPTS", Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", }, - { - Name: "ZOOKEEPER_URL", - Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181", - }, { Name: "KAFKA_SERVERS", Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", @@ -1477,10 +1465,6 @@ var _ = Describe("HumioCluster Controller", func() { Name: "HUMIO_OPTS", Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", }, - { - Name: "ZOOKEEPER_URL", - Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181", - }, { Name: "KAFKA_SERVERS", Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", @@ -1549,10 +1533,6 @@ var _ = Describe("HumioCluster Controller", func() { Name: "HUMIO_OPTS", Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", }, - { - Name: "ZOOKEEPER_URL", - Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181", - }, { Name: "KAFKA_SERVERS", Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", @@ -1579,10 +1559,6 @@ var _ = Describe("HumioCluster Controller", func() { Name: "HUMIO_OPTS", Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", }, - { - Name: "ZOOKEEPER_URL", - Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181", - }, { Name: "KAFKA_SERVERS", Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", @@ -1680,10 +1656,6 @@ var _ = Describe("HumioCluster Controller", func() { Name: "HUMIO_OPTS", Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", }, - { - Name: "ZOOKEEPER_URL", - Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181", - }, { Name: "KAFKA_SERVERS", Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", @@ -2224,63 +2196,6 @@ var _ = Describe("HumioCluster Controller", func() { }) Context("Humio Cluster Container Arguments", func() { - It("Should correctly configure container arguments and ephemeral disks env var with deprecated zk node uuid", func() { - key := types.NamespacedName{ - Name: "humiocluster-container-args-zk-uuid", - Namespace: testProcessNamespace, - } - toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true) - toCreate.Spec.Image = oldSupportedHumioVersion - - suite.UsingClusterBy(key.Name, "Creating the cluster successfully without ephemeral disks") - ctx := context.Background() - suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout) - defer suite.CleanupCluster(ctx, k8sClient, toCreate) - - hnp := controllers.NewHumioNodeManagerFromHumioCluster(toCreate) - clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, hnp.GetPodLabels()) - for _, pod := range clusterPods { - humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) - Expect(pod.Spec.Containers[humioIdx].Args).To(Equal([]string{"-c", "export CORES=$(getconf _NPROCESSORS_ONLN) && export HUMIO_OPTS=\"$HUMIO_OPTS -XX:ActiveProcessorCount=$(getconf _NPROCESSORS_ONLN)\" && export ZONE=$(cat /shared/availability-zone) && exec bash /app/humio/run.sh"})) - Expect(pod.Spec.Containers[humioIdx].Env).ToNot(ContainElement(corev1.EnvVar{ - Name: "ZOOKEEPER_URL_FOR_NODE_UUID", - Value: "$(ZOOKEEPER_URL)", - })) - } - - suite.UsingClusterBy(key.Name, "Updating node uuid prefix which includes ephemeral disks and zone") - var updatedHumioCluster humiov1alpha1.HumioCluster - - Eventually(func() error { - err := k8sClient.Get(ctx, key, &updatedHumioCluster) - if err != nil { - return err - } - updatedHumioCluster.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{Name: "USING_EPHEMERAL_DISKS", Value: "true"}) - updatedHumioCluster.Spec.NodeUUIDPrefix = "humio_{{.Zone}}_" - return k8sClient.Update(ctx, &updatedHumioCluster) - }, testTimeout, suite.TestInterval).Should(Succeed()) - - hnp = controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster) - Eventually(func() []string { - clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, hnp.GetPodLabels()) - if len(clusterPods) > 0 { - humioIdx, _ := kubernetes.GetContainerIndexByName(clusterPods[0], controllers.HumioContainerName) - return clusterPods[0].Spec.Containers[humioIdx].Args - } - return []string{} - }, testTimeout, suite.TestInterval).Should(BeEquivalentTo([]string{"-c", "export CORES=$(getconf _NPROCESSORS_ONLN) && export HUMIO_OPTS=\"$HUMIO_OPTS -XX:ActiveProcessorCount=$(getconf _NPROCESSORS_ONLN)\" && export ZONE=$(cat /shared/availability-zone) && export ZOOKEEPER_PREFIX_FOR_NODE_UUID=/humio_$(cat /shared/availability-zone)_ && exec bash /app/humio/run.sh"})) - - Eventually(func() []corev1.EnvVar { - clusterPods, err := kubernetes.ListPods(ctx, k8sClient, key.Namespace, hnp.GetPodLabels()) - Expect(err).ToNot(HaveOccurred()) - if len(clusterPods) > 0 { - humioIdx, _ := kubernetes.GetContainerIndexByName(clusterPods[0], controllers.HumioContainerName) - return clusterPods[0].Spec.Containers[humioIdx].Env - } - return []corev1.EnvVar{} - }, testTimeout, suite.TestInterval).Should(ContainElement(corev1.EnvVar{Name: "ZOOKEEPER_URL_FOR_NODE_UUID", Value: "$(ZOOKEEPER_URL)"})) - }) It("Should correctly configure container arguments and ephemeral disks env var with default vhost selection method", func() { key := types.NamespacedName{ Name: "humiocluster-container-args", @@ -2298,10 +2213,6 @@ var _ = Describe("HumioCluster Controller", func() { for _, pod := range clusterPods { humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) Expect(pod.Spec.Containers[humioIdx].Args).To(Equal([]string{"-c", "export CORES=$(getconf _NPROCESSORS_ONLN) && export HUMIO_OPTS=\"$HUMIO_OPTS -XX:ActiveProcessorCount=$(getconf _NPROCESSORS_ONLN)\" && export ZONE=$(cat /shared/availability-zone) && exec bash /app/humio/run.sh"})) - Expect(pod.Spec.Containers[humioIdx].Env).ToNot(ContainElement(corev1.EnvVar{ - Name: "ZOOKEEPER_URL_FOR_NODE_UUID", - Value: "$(ZOOKEEPER_URL)", - })) } suite.UsingClusterBy(key.Name, "Updating node uuid prefix which includes ephemeral disks and zone") @@ -2313,7 +2224,6 @@ var _ = Describe("HumioCluster Controller", func() { return err } updatedHumioCluster.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{Name: "USING_EPHEMERAL_DISKS", Value: "true"}) - updatedHumioCluster.Spec.NodeUUIDPrefix = "humio_{{.Zone}}_" return k8sClient.Update(ctx, &updatedHumioCluster) }, testTimeout, suite.TestInterval).Should(Succeed()) @@ -2328,14 +2238,6 @@ var _ = Describe("HumioCluster Controller", func() { } return []string{} }, testTimeout, suite.TestInterval).Should(BeEquivalentTo([]string{"-c", expectedContainerArgString})) - - clusterPods, err := kubernetes.ListPods(ctx, k8sClient, key.Namespace, hnp.GetPodLabels()) - Expect(err).ToNot(HaveOccurred()) - humioIdx, _ := kubernetes.GetContainerIndexByName(clusterPods[0], controllers.HumioContainerName) - Expect(clusterPods[0].Spec.Containers[humioIdx].Env).ToNot(ContainElement(corev1.EnvVar{ - Name: "ZOOKEEPER_URL_FOR_NODE_UUID", - Value: "$(ZOOKEEPER_URL)", - })) }) }) diff --git a/controllers/suite/clusters/suite_test.go b/controllers/suite/clusters/suite_test.go index c83b8f3a..c8411c2c 100644 --- a/controllers/suite/clusters/suite_test.go +++ b/controllers/suite/clusters/suite_test.go @@ -119,15 +119,15 @@ var _ = BeforeSuite(func() { CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, ErrorIfCRDPathMissing: true, } - humioClientForTestSuite = humio.NewMockClient(humioapi.Cluster{}, nil, nil, nil) - humioClientForHumioAction = humio.NewMockClient(humioapi.Cluster{}, nil, nil, nil) - humioClientForHumioAlert = humio.NewMockClient(humioapi.Cluster{}, nil, nil, nil) - humioClientForHumioCluster = humio.NewMockClient(humioapi.Cluster{}, nil, nil, nil) - humioClientForHumioExternalCluster = humio.NewMockClient(humioapi.Cluster{}, nil, nil, nil) - humioClientForHumioIngestToken = humio.NewMockClient(humioapi.Cluster{}, nil, nil, nil) - humioClientForHumioParser = humio.NewMockClient(humioapi.Cluster{}, nil, nil, nil) - humioClientForHumioRepository = humio.NewMockClient(humioapi.Cluster{}, nil, nil, nil) - humioClientForHumioView = humio.NewMockClient(humioapi.Cluster{}, nil, nil, nil) + humioClientForTestSuite = humio.NewMockClient(humioapi.Cluster{}, nil) + humioClientForHumioAction = humio.NewMockClient(humioapi.Cluster{}, nil) + humioClientForHumioAlert = humio.NewMockClient(humioapi.Cluster{}, nil) + humioClientForHumioCluster = humio.NewMockClient(humioapi.Cluster{}, nil) + humioClientForHumioExternalCluster = humio.NewMockClient(humioapi.Cluster{}, nil) + humioClientForHumioIngestToken = humio.NewMockClient(humioapi.Cluster{}, nil) + humioClientForHumioParser = humio.NewMockClient(humioapi.Cluster{}, nil) + humioClientForHumioRepository = humio.NewMockClient(humioapi.Cluster{}, nil) + humioClientForHumioView = humio.NewMockClient(humioapi.Cluster{}, nil) } var cfg *rest.Config diff --git a/controllers/suite/common.go b/controllers/suite/common.go index 0ff8e281..d824c92f 100644 --- a/controllers/suite/common.go +++ b/controllers/suite/common.go @@ -248,10 +248,6 @@ func ConstructBasicNodeSpecForHumioCluster(key types.NamespacedName) humiov1alph }, }, EnvironmentVariables: []corev1.EnvVar{ - { - Name: "ZOOKEEPER_URL", - Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181", - }, { Name: "KAFKA_SERVERS", Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", @@ -583,11 +579,11 @@ func CreateAndBootstrapCluster(ctx context.Context, k8sClient client.Client, hum Expect(err).ToNot(HaveOccurred()) Expect(pod.Spec.Containers[humioIdx].Env).To(ContainElements([]corev1.EnvVar{ { - Name: "DIGEST_REPLICATION_FACTOR", + Name: "DEFAULT_DIGEST_REPLICATION_FACTOR", Value: strconv.Itoa(cluster.Spec.TargetReplicationFactor), }, { - Name: "STORAGE_REPLICATION_FACTOR", + Name: "DEFAULT_SEGMENT_REPLICATION_FACTOR", Value: strconv.Itoa(cluster.Spec.TargetReplicationFactor), }, })) diff --git a/controllers/suite/resources/suite_test.go b/controllers/suite/resources/suite_test.go index e5ec1f1a..87de6de7 100644 --- a/controllers/suite/resources/suite_test.go +++ b/controllers/suite/resources/suite_test.go @@ -111,7 +111,7 @@ var _ = BeforeSuite(func() { CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, ErrorIfCRDPathMissing: true, } - humioClient = humio.NewMockClient(humioapi.Cluster{}, nil, nil, nil) + humioClient = humio.NewMockClient(humioapi.Cluster{}, nil) } var cfg *rest.Config diff --git a/go.mod b/go.mod index 524df965..3a43ff3c 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/go-logr/logr v1.4.1 github.com/go-logr/zapr v1.3.0 github.com/google/go-cmp v0.6.0 - github.com/humio/cli v0.33.1-0.20240313124410-359de49fb2af + github.com/humio/cli v0.33.1-0.20240425153346-f278dc8465f3 github.com/onsi/ginkgo/v2 v2.17.1 github.com/onsi/gomega v1.32.0 github.com/prometheus/client_golang v1.19.0 diff --git a/go.sum b/go.sum index db0b79b4..08c3507f 100644 --- a/go.sum +++ b/go.sum @@ -52,8 +52,8 @@ github.com/google/pprof v0.0.0-20240415144954-be81aee2d733 h1:nHRIUuWr4qaFmeHwGB github.com/google/pprof v0.0.0-20240415144954-be81aee2d733/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/humio/cli v0.33.1-0.20240313124410-359de49fb2af h1:v9hiih1LUPguOgYCgrmA1CSVfYYdALXmeNC1e6yTXVs= -github.com/humio/cli v0.33.1-0.20240313124410-359de49fb2af/go.mod h1:8wDs9TeN5kRLBKz8uI39Lqz2g6nhfuBeKgQUmXXAV1E= +github.com/humio/cli v0.33.1-0.20240425153346-f278dc8465f3 h1:9UVZdMFGt7FktPvRjJ58RQFHFSYIEfkcbCg4Xq8z9HM= +github.com/humio/cli v0.33.1-0.20240425153346-f278dc8465f3/go.mod h1:GGgOajbd4z5osw50k5+dXYrcSkj9nZssAWS4Lv77yc4= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= diff --git a/images/helper/go.mod b/images/helper/go.mod index 25b6955a..4130b9c6 100644 --- a/images/helper/go.mod +++ b/images/helper/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( github.com/cli/shurcooL-graphql v0.0.4 - github.com/humio/cli v0.33.1-0.20240313124410-359de49fb2af + github.com/humio/cli v0.33.1-0.20240425153346-f278dc8465f3 k8s.io/api v0.28.8 k8s.io/apimachinery v0.28.8 k8s.io/client-go v0.28.8 diff --git a/images/helper/go.sum b/images/helper/go.sum index 8c447969..23cca7cf 100644 --- a/images/helper/go.sum +++ b/images/helper/go.sum @@ -33,8 +33,8 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/humio/cli v0.33.1-0.20240313124410-359de49fb2af h1:v9hiih1LUPguOgYCgrmA1CSVfYYdALXmeNC1e6yTXVs= -github.com/humio/cli v0.33.1-0.20240313124410-359de49fb2af/go.mod h1:8wDs9TeN5kRLBKz8uI39Lqz2g6nhfuBeKgQUmXXAV1E= +github.com/humio/cli v0.33.1-0.20240425153346-f278dc8465f3 h1:9UVZdMFGt7FktPvRjJ58RQFHFSYIEfkcbCg4Xq8z9HM= +github.com/humio/cli v0.33.1-0.20240425153346-f278dc8465f3/go.mod h1:GGgOajbd4z5osw50k5+dXYrcSkj9nZssAWS4Lv77yc4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= diff --git a/images/helper/main.go b/images/helper/main.go index ca8629c6..7f992a96 100644 --- a/images/helper/main.go +++ b/images/helper/main.go @@ -339,10 +339,17 @@ func authMode() { go func() { // Run separate go routine for readiness/liveness endpoint http.HandleFunc("/", httpHandler) - err := http.ListenAndServe(":8180", nil) + + server := &http.Server{ + Addr: ":8180", + ReadHeaderTimeout: 3 * time.Second, + } + + err := server.ListenAndServe() if err != nil { - panic("could not bind on :8180") + panic(err) } + }() kubernetesClient := newKubernetesClientset() diff --git a/pkg/helpers/helpers.go b/pkg/helpers/helpers.go index dfbb922c..b38beae7 100644 --- a/pkg/helpers/helpers.go +++ b/pkg/helpers/helpers.go @@ -24,13 +24,10 @@ import ( "sort" "strings" - graphql "github.com/cli/shurcooL-graphql" uberzap "go.uber.org/zap" "go.uber.org/zap/zapcore" humiov1alpha1 "github.com/humio/humio-operator/api/v1alpha1" - - humioapi "github.com/humio/cli/api" ) // GetTypeName returns the name of the type of object which is obtained by using reflection @@ -62,46 +59,6 @@ func RemoveElement(list []string, s string) []string { return list } -func MapStoragePartition(vs []humioapi.StoragePartition, f func(partition humioapi.StoragePartition) humioapi.StoragePartitionInput) []humioapi.StoragePartitionInput { - vsm := make([]humioapi.StoragePartitionInput, len(vs)) - for i, v := range vs { - vsm[i] = f(v) - } - return vsm -} - -func ToStoragePartitionInput(line humioapi.StoragePartition) humioapi.StoragePartitionInput { - var input humioapi.StoragePartitionInput - nodeIds := make([]graphql.Int, len(line.NodeIds)) - for i, v := range line.NodeIds { - nodeIds[i] = graphql.Int(v) - } - input.ID = graphql.Int(line.Id) - input.NodeIDs = nodeIds - - return input -} - -func MapIngestPartition(vs []humioapi.IngestPartition, f func(partition humioapi.IngestPartition) humioapi.IngestPartitionInput) []humioapi.IngestPartitionInput { - vsm := make([]humioapi.IngestPartitionInput, len(vs)) - for i, v := range vs { - vsm[i] = f(v) - } - return vsm -} - -func ToIngestPartitionInput(line humioapi.IngestPartition) humioapi.IngestPartitionInput { - var input humioapi.IngestPartitionInput - nodeIds := make([]graphql.Int, len(line.NodeIds)) - for i, v := range line.NodeIds { - nodeIds[i] = graphql.Int(v) - } - input.ID = graphql.Int(line.Id) - input.NodeIDs = nodeIds - - return input -} - // UseCertManager returns whether the operator will use cert-manager func UseCertManager() bool { certmanagerEnabled, found := os.LookupEnv("USE_CERTMANAGER") diff --git a/pkg/humio/client.go b/pkg/humio/client.go index 4ac357d0..0425f51a 100644 --- a/pkg/humio/client.go +++ b/pkg/humio/client.go @@ -48,10 +48,6 @@ type Client interface { type ClusterClient interface { GetClusters(*humioapi.Config, reconcile.Request) (humioapi.Cluster, error) - UpdateStoragePartitionScheme(*humioapi.Config, reconcile.Request, []humioapi.StoragePartitionInput) error - UpdateIngestPartitionScheme(*humioapi.Config, reconcile.Request, []humioapi.IngestPartitionInput) error - SuggestedStoragePartitions(*humioapi.Config, reconcile.Request) ([]humioapi.StoragePartitionInput, error) - SuggestedIngestPartitions(*humioapi.Config, reconcile.Request) ([]humioapi.IngestPartitionInput, error) GetHumioClient(*humioapi.Config, reconcile.Request) *humioapi.Client ClearHumioClientConnections() GetBaseURL(*humioapi.Config, reconcile.Request, *humiov1alpha1.HumioCluster) *url.URL @@ -215,38 +211,6 @@ func (h *ClientConfig) GetClusters(config *humioapi.Config, req reconcile.Reques return clusters, err } -// UpdateStoragePartitionScheme updates the storage partition scheme and can be mocked via the Client interface -func (h *ClientConfig) UpdateStoragePartitionScheme(config *humioapi.Config, req reconcile.Request, spi []humioapi.StoragePartitionInput) error { - //lint:ignore SA1019 we can rip out all uses of UpdateStoragePartitionScheme when we no longer support LogScale versions prior to 1.88 - err := h.GetHumioClient(config, req).Clusters().UpdateStoragePartitionScheme(spi) - if err != nil { - h.logger.Error(err, "could not update storage partition scheme cluster information") - } - return err -} - -// UpdateIngestPartitionScheme updates the ingest partition scheme and can be mocked via the Client interface -func (h *ClientConfig) UpdateIngestPartitionScheme(config *humioapi.Config, req reconcile.Request, ipi []humioapi.IngestPartitionInput) error { - //lint:ignore SA1019 we can rip out all uses of UpdateIngestPartitionScheme when we no longer support LogScale versions prior to 1.80 - err := h.GetHumioClient(config, req).Clusters().UpdateIngestPartitionScheme(ipi) - if err != nil { - h.logger.Error(err, "could not update ingest partition scheme cluster information") - } - return err -} - -// SuggestedStoragePartitions gets the suggested storage partition layout -func (h *ClientConfig) SuggestedStoragePartitions(config *humioapi.Config, req reconcile.Request) ([]humioapi.StoragePartitionInput, error) { - //lint:ignore SA1019 we can rip out all uses of SuggestedStoragePartitions when we no longer support LogScale versions prior to 1.88 - return h.GetHumioClient(config, req).Clusters().SuggestedStoragePartitions() -} - -// SuggestedIngestPartitions gets the suggested ingest partition layout -func (h *ClientConfig) SuggestedIngestPartitions(config *humioapi.Config, req reconcile.Request) ([]humioapi.IngestPartitionInput, error) { - //lint:ignore SA1019 we can rip out all uses of SuggestedIngestPartitions when we no longer support LogScale versions prior to 1.80 - return h.GetHumioClient(config, req).Clusters().SuggestedIngestPartitions() -} - // GetBaseURL returns the base URL for given HumioCluster func (h *ClientConfig) GetBaseURL(config *humioapi.Config, req reconcile.Request, hc *humiov1alpha1.HumioCluster) *url.URL { protocol := "https" diff --git a/pkg/humio/client_mock.go b/pkg/humio/client_mock.go index 87a71891..044754f9 100644 --- a/pkg/humio/client_mock.go +++ b/pkg/humio/client_mock.go @@ -31,46 +31,36 @@ import ( ) type ClientMock struct { - Cluster humioapi.Cluster - ClusterError error - UpdateStoragePartitionSchemeError error - UpdateIngestPartitionSchemeError error - IngestToken humioapi.IngestToken - Parser humioapi.Parser - Repository humioapi.Repository - View humioapi.View - OnPremLicense humioapi.OnPremLicense - Action humioapi.Action - Alert humioapi.Alert + Cluster humioapi.Cluster + ClusterError error + IngestToken humioapi.IngestToken + Parser humioapi.Parser + Repository humioapi.Repository + View humioapi.View + OnPremLicense humioapi.OnPremLicense + Action humioapi.Action + Alert humioapi.Alert } type MockClientConfig struct { apiClient *ClientMock } -func NewMockClient(cluster humioapi.Cluster, clusterError error, updateStoragePartitionSchemeError error, updateIngestPartitionSchemeError error) *MockClientConfig { - storagePartition := humioapi.StoragePartition{} - ingestPartition := humioapi.IngestPartition{} - +func NewMockClient(cluster humioapi.Cluster, clusterError error) *MockClientConfig { mockClientConfig := &MockClientConfig{ apiClient: &ClientMock{ - Cluster: cluster, - ClusterError: clusterError, - UpdateStoragePartitionSchemeError: updateStoragePartitionSchemeError, - UpdateIngestPartitionSchemeError: updateIngestPartitionSchemeError, - IngestToken: humioapi.IngestToken{}, - Parser: humioapi.Parser{}, - Repository: humioapi.Repository{}, - View: humioapi.View{}, - OnPremLicense: humioapi.OnPremLicense{}, - Action: humioapi.Action{}, - Alert: humioapi.Alert{}, + Cluster: cluster, + ClusterError: clusterError, + IngestToken: humioapi.IngestToken{}, + Parser: humioapi.Parser{}, + Repository: humioapi.Repository{}, + View: humioapi.View{}, + OnPremLicense: humioapi.OnPremLicense{}, + Action: humioapi.Action{}, + Alert: humioapi.Alert{}, }, } - cluster.StoragePartitions = []humioapi.StoragePartition{storagePartition} - cluster.IngestPartitions = []humioapi.IngestPartition{ingestPartition} - return mockClientConfig } @@ -88,50 +78,6 @@ func (h *MockClientConfig) GetClusters(config *humioapi.Config, req reconcile.Re return h.apiClient.Cluster, nil } -func (h *MockClientConfig) UpdateStoragePartitionScheme(config *humioapi.Config, req reconcile.Request, sps []humioapi.StoragePartitionInput) error { - if h.apiClient.UpdateStoragePartitionSchemeError != nil { - return h.apiClient.UpdateStoragePartitionSchemeError - } - - var storagePartitions []humioapi.StoragePartition - for _, storagePartitionInput := range sps { - var nodeIdsList []int - for _, nodeID := range storagePartitionInput.NodeIDs { - nodeIdsList = append(nodeIdsList, int(nodeID)) - } - storagePartitions = append(storagePartitions, humioapi.StoragePartition{Id: int(storagePartitionInput.ID), NodeIds: nodeIdsList}) - } - h.apiClient.Cluster.StoragePartitions = storagePartitions - - return nil -} - -func (h *MockClientConfig) UpdateIngestPartitionScheme(config *humioapi.Config, req reconcile.Request, ips []humioapi.IngestPartitionInput) error { - if h.apiClient.UpdateIngestPartitionSchemeError != nil { - return h.apiClient.UpdateIngestPartitionSchemeError - } - - var ingestPartitions []humioapi.IngestPartition - for _, ingestPartitionInput := range ips { - var nodeIdsList []int - for _, nodeID := range ingestPartitionInput.NodeIDs { - nodeIdsList = append(nodeIdsList, int(nodeID)) - } - ingestPartitions = append(ingestPartitions, humioapi.IngestPartition{Id: int(ingestPartitionInput.ID), NodeIds: nodeIdsList}) - } - h.apiClient.Cluster.IngestPartitions = ingestPartitions - - return nil -} - -func (h *MockClientConfig) SuggestedStoragePartitions(config *humioapi.Config, req reconcile.Request) ([]humioapi.StoragePartitionInput, error) { - return []humioapi.StoragePartitionInput{}, nil -} - -func (h *MockClientConfig) SuggestedIngestPartitions(config *humioapi.Config, req reconcile.Request) ([]humioapi.IngestPartitionInput, error) { - return []humioapi.IngestPartitionInput{}, nil -} - func (h *MockClientConfig) GetBaseURL(config *humioapi.Config, req reconcile.Request, hc *humiov1alpha1.HumioCluster) *url.URL { baseURL, _ := url.Parse(fmt.Sprintf("http://%s-headless.%s:%d/", hc.Name, hc.Namespace, 8080)) return baseURL