diff --git a/.github/workflows/e2e-dummy.yaml b/.github/workflows/e2e-dummy.yaml new file mode 100644 index 000000000..20be69229 --- /dev/null +++ b/.github/workflows/e2e-dummy.yaml @@ -0,0 +1,57 @@ +on: pull_request +name: e2e-dummy +jobs: + e2e-dummy: + name: ${{ matrix.kind-k8s-version }} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + kind-k8s-version: + - kindest/node:v1.21.14@sha256:8a4e9bb3f415d2bb81629ce33ef9c76ba514c14d707f9797a01e3216376ba093 # Not officially supported by kind 0.22.0 + - kindest/node:v1.22.17@sha256:f5b2e5698c6c9d6d0adc419c0deae21a425c07d81bbf3b6a6834042f25d4fba2 # Not officially supported by kind 0.22.0 + - kindest/node:v1.23.17@sha256:14d0a9a892b943866d7e6be119a06871291c517d279aedb816a4b4bc0ec0a5b3 + - kindest/node:v1.24.17@sha256:bad10f9b98d54586cba05a7eaa1b61c6b90bfc4ee174fdc43a7b75ca75c95e51 + - kindest/node:v1.25.16@sha256:e8b50f8e06b44bb65a93678a65a26248fae585b3d3c2a669e5ca6c90c69dc519 + - kindest/node:v1.26.14@sha256:5d548739ddef37b9318c70cb977f57bf3e5015e4552be4e27e57280a8cbb8e4f + - kindest/node:v1.27.11@sha256:681253009e68069b8e01aad36a1e0fa8cf18bb0ab3e5c4069b2e65cafdd70843 + - kindest/node:v1.28.7@sha256:9bc6c451a289cf96ad0bbaf33d416901de6fd632415b076ab05f5fa7e4f65c58 + - kindest/node:v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: '1.22.2' + - name: cleanup kind + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.22.0/kind-linux-amd64 + chmod +x ./kind + ./kind delete cluster || true + make clean + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Get temp bin dir + id: bin_dir + run: echo "BIN_DIR=$(mktemp -d --tmpdir=${{ github.workspace }})" >> $GITHUB_OUTPUT + - name: run e2e tests + env: + BIN_DIR: ${{ steps.bin_dir.outputs.BIN_DIR }} + HUMIO_E2E_LICENSE: ${{ secrets.HUMIO_E2E_LICENSE }} + E2E_KIND_K8S_VERSION: ${{ matrix.kind-k8s-version }} + E2E_LOGS_HUMIO_HOSTNAME: ${{ secrets.E2E_LOGS_HUMIO_HOSTNAME }} + E2E_LOGS_HUMIO_INGEST_TOKEN: ${{ secrets.E2E_LOGS_HUMIO_INGEST_TOKEN }} + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + GINKGO_NODES: "12" + run: | + hack/run-e2e-using-kind-dummy.sh + - name: cleanup kind and docker files + if: always() + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.22.0/kind-linux-amd64 + chmod +x ./kind + ./kind delete cluster || true + make clean diff --git a/.github/workflows/preview.yaml b/.github/workflows/preview.yaml index 566be3e22..6000b796d 100644 --- a/.github/workflows/preview.yaml +++ b/.github/workflows/preview.yaml @@ -47,7 +47,7 @@ jobs: echo "HUMIO_CORE_DEV_TAG=$LATEST_TAG" >> $GITHUB_OUTPUT - name: run e2e tests env: - HUMIO_CORE_DEV_TAG: ${{ steps.docker_tag.outputs.HUMIO_CORE_DEV_TAG }} + HUMIO_OPERATOR_DEFAULT_HUMIO_CORE_IMAGE: ${{ steps.docker_tag.outputs.HUMIO_CORE_DEV_TAG }} BIN_DIR: ${{ steps.bin_dir.outputs.BIN_DIR }} HUMIO_E2E_LICENSE: ${{ secrets.HUMIO_E2E_LICENSE }} E2E_KIND_K8S_VERSION: ${{ matrix.kind-k8s-version }} @@ -58,8 +58,6 @@ jobs: GINKGO_NODES: "6" run: | echo "Running operator tests against humio-core-dev:$HUMIO_CORE_DEV_TAG" - sed -i "s/humio-core:[0-9.]*/humio-core-dev:$HUMIO_CORE_DEV_TAG/g" controllers/humiocluster_defaults.go - hack/run-e2e-using-kind.sh - name: cleanup kind if: always() diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml new file mode 100644 index 000000000..67f395ac3 --- /dev/null +++ b/.github/workflows/stale.yaml @@ -0,0 +1,19 @@ +name: 'Close stale issues and PRs' +on: + schedule: + - cron: '30 1 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9 + with: + stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.' + stale-pr-message: 'This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 10 days.' + close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.' + close-pr-message: 'This PR was closed because it has been stalled for 10 days with no activity.' + days-before-issue-stale: 30 + days-before-pr-stale: 45 + days-before-issue-close: 5 + days-before-pr-close: 10 diff --git a/Dockerfile b/Dockerfile index c0c05001e..50fb1cd80 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.22 as builder +FROM golang:1.22 AS builder ARG RELEASE_VERSION=master ARG RELEASE_COMMIT=none diff --git a/Makefile b/Makefile index ac972c493..2d3e8df9e 100644 --- a/Makefile +++ b/Makefile @@ -57,7 +57,7 @@ endif eval \$$($(GOBIN)/setup-envtest use -p env ${TEST_K8S_VERSION}); \ export USE_CERTMANAGER=false; \ export TEST_USE_EXISTING_CLUSTER=false; \ - $(GINKGO) -vv --no-color --procs 3 -output-dir=${PWD} -keep-separate-reports --junit-report=test-results-junit.xml --randomize-suites --randomize-all -timeout 10m ./... -covermode=count -coverprofile cover.out \ + $(GINKGO) --label-filter=envtest -vv --no-color --procs 3 -output-dir=${PWD} -keep-separate-reports --junit-report=test-results-junit.xml --randomize-suites --randomize-all -timeout 10m ./... -covermode=count -coverprofile cover.out \ " ##@ Build @@ -131,6 +131,10 @@ docker-build-helper: cp LICENSE images/helper/ docker build --no-cache --pull -t ${IMG} ${IMG_BUILD_ARGS} images/helper +# Build the logscale dummy docker image +docker-build-dummy: + docker build --no-cache --pull -t ${IMG} ${IMG_BUILD_ARGS} images/logscale-dummy + clean: rm controllers_*.xml || true rm -r testbindir || true diff --git a/controllers/humiocluster_defaults.go b/controllers/humiocluster_defaults.go index fa262c7e8..2e35f8a24 100644 --- a/controllers/humiocluster_defaults.go +++ b/controllers/humiocluster_defaults.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" + "github.com/humio/humio-operator/controllers/versions" "github.com/humio/humio-operator/pkg/helpers" humiov1alpha1 "github.com/humio/humio-operator/api/v1alpha1" @@ -34,8 +35,6 @@ import ( ) const ( - Image = "humio/humio-core:1.131.1" - HelperImage = "humio/humio-operator-helper:8f5ef6c7e470226e77d985f36cf39be9a100afea" targetReplicationFactor = 2 digestPartitionsCount = 24 HumioPort = 8080 @@ -245,7 +244,7 @@ func (hnp *HumioNodePool) GetImage() string { return os.Getenv("HUMIO_OPERATOR_DEFAULT_HUMIO_CORE_IMAGE") } - return Image + return versions.DefaultHumioImageVersion() } func (hnp *HumioNodePool) GetImageSource() *humiov1alpha1.HumioImageSource { @@ -261,7 +260,7 @@ func (hnp *HumioNodePool) GetHelperImage() string { return os.Getenv("HUMIO_OPERATOR_DEFAULT_HUMIO_HELPER_IMAGE") } - return HelperImage + return versions.DefaultHelperImageVersion() } func (hnp *HumioNodePool) GetImagePullSecrets() []corev1.LocalObjectReference { @@ -587,7 +586,7 @@ func (hnp *HumioNodePool) GetContainerReadinessProbe() *corev1.Probe { } if hnp.humioNodeSpec.ContainerReadinessProbe == nil { - return &corev1.Probe{ + probe := &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/api/v1/is-node-up", @@ -595,12 +594,15 @@ func (hnp *HumioNodePool) GetContainerReadinessProbe() *corev1.Probe { Scheme: hnp.GetProbeScheme(), }, }, - InitialDelaySeconds: 30, - PeriodSeconds: 5, - TimeoutSeconds: 5, - SuccessThreshold: 1, - FailureThreshold: 10, + PeriodSeconds: 5, + TimeoutSeconds: 5, + SuccessThreshold: 1, + FailureThreshold: 10, + } + if os.Getenv("DUMMY_LOGSCALE_IMAGE") != "true" { + probe.InitialDelaySeconds = 30 } + return probe } return hnp.humioNodeSpec.ContainerReadinessProbe } diff --git a/controllers/humiocluster_pods.go b/controllers/humiocluster_pods.go index c7b837ad8..4e10c9fca 100644 --- a/controllers/humiocluster_pods.go +++ b/controllers/humiocluster_pods.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + "os" "reflect" "sort" "strings" @@ -479,6 +480,11 @@ func ConstructPod(hnp *HumioNodePool, humioNodeName string, attachments *podAtta pod.Spec.Volumes = append(pod.Spec.Volumes, volume) } + authIdx, err := kubernetes.GetContainerIndexByName(pod, AuthContainerName) + if err != nil { + return &corev1.Pod{}, err + } + if hnp.TLSEnabled() { pod.Annotations[certHashAnnotation] = GetDesiredCertHash(hnp) pod.Spec.Containers[humioIdx].Env = append(pod.Spec.Containers[humioIdx].Env, corev1.EnvVar{ @@ -528,11 +534,6 @@ func ConstructPod(hnp *HumioNodePool, humioNodeName string, attachments *podAtta MountPath: "/var/lib/humio/tls-certificate-secret", }) - // Configuration specific to auth container - authIdx, err := kubernetes.GetContainerIndexByName(pod, AuthContainerName) - if err != nil { - return &corev1.Pod{}, err - } // We mount in the certificate on top of default system root certs so auth container automatically uses it: // https://golang.org/src/crypto/x509/root_linux.go pod.Spec.Containers[authIdx].VolumeMounts = append(pod.Spec.Containers[authIdx].VolumeMounts, corev1.VolumeMount{ @@ -591,6 +592,10 @@ func ConstructPod(hnp *HumioNodePool, humioNodeName string, attachments *podAtta } pod.Spec.Containers[humioIdx].Args = containerArgs + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + pod.Spec.Containers[authIdx].Env = append(pod.Spec.Containers[authIdx].Env, corev1.EnvVar{Name: "DUMMY_LOGSCALE_IMAGE", Value: "true"}) + } + return &pod, nil } diff --git a/controllers/suite/clusters/humiocluster_controller_test.go b/controllers/suite/clusters/humiocluster_controller_test.go index b62b8c991..31a50b604 100644 --- a/controllers/suite/clusters/humiocluster_controller_test.go +++ b/controllers/suite/clusters/humiocluster_controller_test.go @@ -20,6 +20,7 @@ import ( "context" "fmt" cmapi "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + "github.com/humio/humio-operator/controllers/versions" "os" "reflect" "strings" @@ -41,18 +42,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -const ( - oldSupportedHumioVersion = "humio/humio-core:1.118.0" - upgradeJumpHumioVersion = "humio/humio-core:1.128.0" - oldUnsupportedHumioVersion = "humio/humio-core:1.18.4" - - upgradePatchBestEffortOldVersion = "humio/humio-core:1.124.1" - upgradePatchBestEffortNewVersion = "humio/humio-core:1.124.2" - - upgradeRollingBestEffortVersionJumpOldVersion = "humio/humio-core:1.124.1" - upgradeRollingBestEffortVersionJumpNewVersion = "humio/humio-core:1.131.1" -) - var _ = Describe("HumioCluster Controller", func() { BeforeEach(func() { @@ -69,7 +58,7 @@ var _ = Describe("HumioCluster Controller", func() { // your API definition. // Avoid adding tests for vanilla CRUD operations because they would // test Kubernetes API server, which isn't the goal here. - Context("Humio Cluster Simple", func() { + Context("Humio Cluster Simple", Label("envtest", "dummy", "real"), func() { It("Should bootstrap cluster correctly", func() { key := types.NamespacedName{ Name: "humiocluster-simple", @@ -85,7 +74,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster With Multiple Node Pools", func() { + Context("Humio Cluster With Multiple Node Pools", Label("envtest", "dummy", "real"), func() { It("Should bootstrap multi node cluster correctly", func() { key := types.NamespacedName{ Name: "humiocluster-multi-node-pool", @@ -131,7 +120,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster With Node Pools Only", func() { + Context("Humio Cluster With Node Pools Only", Label("envtest", "dummy", "real"), func() { It("Should bootstrap nodepools only cluster correctly", func() { key := types.NamespacedName{ Name: "humiocluster-node-pool-only", @@ -153,7 +142,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Without Init Container", func() { + Context("Humio Cluster Without Init Container", Label("envtest", "dummy", "real"), func() { It("Should bootstrap cluster correctly", func() { key := types.NamespacedName{ Name: "humiocluster-no-init-container", @@ -169,7 +158,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Multi Organizations", func() { + Context("Humio Cluster Multi Organizations", Label("envtest", "dummy", "real"), func() { It("Should bootstrap cluster correctly", func() { key := types.NamespacedName{ Name: "humiocluster-multi-org", @@ -192,14 +181,14 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Unsupported Version", func() { + Context("Humio Cluster Unsupported Version", Label("envtest", "dummy", "real"), func() { It("Creating cluster with unsupported version", func() { key := types.NamespacedName{ Name: "humiocluster-err-unsupp-vers", Namespace: testProcessNamespace, } toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true) - toCreate.Spec.Image = oldUnsupportedHumioVersion + toCreate.Spec.Image = versions.OldUnsupportedHumioVersion() ctx := context.Background() suite.CreateAndBootstrapCluster(ctx, k8sClient, testHumioClient, toCreate, true, humiov1alpha1.HumioClusterStateConfigError, testTimeout) @@ -221,18 +210,18 @@ var _ = Describe("HumioCluster Controller", func() { Expect(err).Should(Succeed()) } return updatedHumioCluster.Status.Message - }, testTimeout, suite.TestInterval).Should(Equal(fmt.Sprintf("Humio version must be at least %s: unsupported Humio version: %s", controllers.HumioVersionMinimumSupported, strings.Split(oldUnsupportedHumioVersion, ":")[1]))) + }, testTimeout, suite.TestInterval).Should(Equal(fmt.Sprintf("Humio version must be at least %s: unsupported Humio version: %s", controllers.HumioVersionMinimumSupported, strings.Split(strings.Split(versions.OldUnsupportedHumioVersion(), ":")[1], "-")[0]))) }) }) - Context("Humio Cluster Update Image", func() { + Context("Humio Cluster Update Image", Label("envtest", "dummy", "real"), func() { It("Update should correctly replace pods to use new image", func() { key := types.NamespacedName{ Name: "humiocluster-update-image", Namespace: testProcessNamespace, } toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true) - toCreate.Spec.Image = oldSupportedHumioVersion + toCreate.Spec.Image = versions.OldSupportedHumioVersion() toCreate.Spec.NodeCount = 2 suite.UsingClusterBy(key.Name, "Creating the cluster successfully") @@ -259,7 +248,7 @@ var _ = Describe("HumioCluster Controller", func() { if err != nil { return err } - updatedHumioCluster.Spec.Image = upgradeJumpHumioVersion + updatedHumioCluster.Spec.Image = versions.UpgradeJumpHumioVersion() return k8sClient.Update(ctx, &updatedHumioCluster) }, testTimeout, suite.TestInterval).Should(Succeed()) @@ -286,7 +275,7 @@ var _ = Describe("HumioCluster Controller", func() { Expect(updatedClusterPods).To(HaveLen(toCreate.Spec.NodeCount)) for _, pod := range updatedClusterPods { humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) - Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(upgradeJumpHumioVersion)) + Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(versions.UpgradeJumpHumioVersion())) Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2")) } @@ -297,7 +286,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update Failed Pods", func() { + Context("Humio Cluster Update Failed Pods", Label("envtest", "dummy", "real"), func() { It("Update should correctly replace pods that are in a failed state", func() { key := types.NamespacedName{ Name: "humiocluster-update-failed", @@ -407,14 +396,14 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update Image Rolling Restart", func() { + Context("Humio Cluster Update Image Rolling Restart", Label("envtest", "dummy", "real"), func() { It("Update should correctly replace pods to use new image in a rolling fashion", func() { key := types.NamespacedName{ Name: "humiocluster-update-image-rolling", Namespace: testProcessNamespace, } toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true) - toCreate.Spec.Image = oldSupportedHumioVersion + toCreate.Spec.Image = versions.OldSupportedHumioVersion() toCreate.Spec.NodeCount = 2 toCreate.Spec.UpdateStrategy = &humiov1alpha1.HumioUpdateStrategy{ Type: humiov1alpha1.HumioClusterUpdateStrategyRollingUpdate, @@ -444,7 +433,7 @@ var _ = Describe("HumioCluster Controller", func() { if err != nil { return err } - updatedHumioCluster.Spec.Image = upgradeJumpHumioVersion + updatedHumioCluster.Spec.Image = versions.UpgradeJumpHumioVersion() return k8sClient.Update(ctx, &updatedHumioCluster) }, testTimeout, suite.TestInterval).Should(Succeed()) @@ -472,7 +461,7 @@ var _ = Describe("HumioCluster Controller", func() { Expect(updatedClusterPods).To(HaveLen(toCreate.Spec.NodeCount)) for _, pod := range updatedClusterPods { humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) - Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(upgradeJumpHumioVersion)) + Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(versions.UpgradeJumpHumioVersion())) Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2")) } @@ -483,14 +472,14 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update Image Update Strategy OnDelete", func() { + Context("Humio Cluster Update Image Update Strategy OnDelete", Label("envtest", "dummy", "real"), func() { It("Update should not replace pods on image update when update strategy OnDelete is used", func() { key := types.NamespacedName{ Name: "humiocluster-update-image-on-delete", Namespace: testProcessNamespace, } toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true) - toCreate.Spec.Image = oldSupportedHumioVersion + toCreate.Spec.Image = versions.OldSupportedHumioVersion() toCreate.Spec.NodeCount = 2 toCreate.Spec.UpdateStrategy = &humiov1alpha1.HumioUpdateStrategy{ Type: humiov1alpha1.HumioClusterUpdateStrategyOnDelete, @@ -514,7 +503,7 @@ var _ = Describe("HumioCluster Controller", func() { Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1")) suite.UsingClusterBy(key.Name, "Updating the cluster image successfully") - updatedImage := controllers.Image + updatedImage := versions.DefaultHumioImageVersion() Eventually(func() error { updatedHumioCluster = humiov1alpha1.HumioCluster{} err := k8sClient.Get(ctx, key, &updatedHumioCluster) @@ -578,14 +567,14 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update Image Rolling Best Effort Patch", func() { + Context("Humio Cluster Update Image Rolling Best Effort Patch", Label("envtest", "dummy", "real"), func() { It("Update should correctly replace pods to use new image in a rolling fashion for patch updates", func() { key := types.NamespacedName{ Name: "humiocluster-update-image-rolling-patch", Namespace: testProcessNamespace, } toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true) - toCreate.Spec.Image = upgradePatchBestEffortOldVersion + toCreate.Spec.Image = versions.UpgradePatchBestEffortOldVersion() toCreate.Spec.NodeCount = 2 toCreate.Spec.UpdateStrategy = &humiov1alpha1.HumioUpdateStrategy{ Type: humiov1alpha1.HumioClusterUpdateStrategyRollingUpdateBestEffort, @@ -615,7 +604,7 @@ var _ = Describe("HumioCluster Controller", func() { if err != nil { return err } - updatedHumioCluster.Spec.Image = upgradePatchBestEffortNewVersion + updatedHumioCluster.Spec.Image = versions.UpgradePatchBestEffortNewVersion() return k8sClient.Update(ctx, &updatedHumioCluster) }, testTimeout, suite.TestInterval).Should(Succeed()) @@ -643,7 +632,7 @@ var _ = Describe("HumioCluster Controller", func() { Expect(updatedClusterPods).To(HaveLen(toCreate.Spec.NodeCount)) for _, pod := range updatedClusterPods { humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) - Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(upgradePatchBestEffortNewVersion)) + Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(versions.UpgradePatchBestEffortNewVersion())) Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2")) } @@ -654,14 +643,14 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update Image Best Effort Version Jump", func() { + Context("Humio Cluster Update Image Best Effort Version Jump", Label("envtest", "dummy", "real"), func() { It("Update should correctly replace pods in parallel to use new image for version jump updates", func() { key := types.NamespacedName{ Name: "humiocluster-update-image-vj", Namespace: testProcessNamespace, } toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true) - toCreate.Spec.Image = upgradeRollingBestEffortVersionJumpOldVersion + toCreate.Spec.Image = versions.UpgradeRollingBestEffortVersionJumpOldVersion() toCreate.Spec.NodeCount = 2 toCreate.Spec.UpdateStrategy = &humiov1alpha1.HumioUpdateStrategy{ Type: humiov1alpha1.HumioClusterUpdateStrategyRollingUpdateBestEffort, @@ -691,7 +680,7 @@ var _ = Describe("HumioCluster Controller", func() { if err != nil { return err } - updatedHumioCluster.Spec.Image = upgradeRollingBestEffortVersionJumpNewVersion + updatedHumioCluster.Spec.Image = versions.UpgradeRollingBestEffortVersionJumpNewVersion() return k8sClient.Update(ctx, &updatedHumioCluster) }, testTimeout, suite.TestInterval).Should(Succeed()) @@ -720,7 +709,7 @@ var _ = Describe("HumioCluster Controller", func() { Expect(updatedClusterPods).To(HaveLen(toCreate.Spec.NodeCount)) for _, pod := range updatedClusterPods { humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) - Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(upgradeRollingBestEffortVersionJumpNewVersion)) + Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(versions.UpgradeRollingBestEffortVersionJumpNewVersion())) Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2")) } @@ -731,7 +720,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update EXTERNAL_URL", func() { + Context("Humio Cluster Update EXTERNAL_URL", Label("envtest", "dummy", "real"), func() { It("Update should correctly replace pods to use the new EXTERNAL_URL in a non-rolling fashion", func() { if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { key := types.NamespacedName{ @@ -811,13 +800,13 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update Image Multi Node Pool", func() { + Context("Humio Cluster Update Image Multi Node Pool", Label("envtest", "dummy", "real"), func() { It("Update should correctly replace pods to use new image in multiple node pools", func() { key := types.NamespacedName{ Name: "humiocluster-update-image-np", Namespace: testProcessNamespace, } - originalImage := oldSupportedHumioVersion + originalImage := versions.OldSupportedHumioVersion() toCreate := constructBasicMultiNodePoolHumioCluster(key, true, 1) toCreate.Spec.Image = originalImage toCreate.Spec.NodeCount = 1 @@ -856,7 +845,7 @@ var _ = Describe("HumioCluster Controller", func() { Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1")) suite.UsingClusterBy(key.Name, "Updating the cluster image on the main node pool successfully") - updatedImage := upgradeJumpHumioVersion + updatedImage := versions.UpgradeJumpHumioVersion() Eventually(func() error { updatedHumioCluster = humiov1alpha1.HumioCluster{} err := k8sClient.Get(ctx, key, &updatedHumioCluster) @@ -988,14 +977,14 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update Image Source", func() { + Context("Humio Cluster Update Image Source", Label("envtest", "dummy", "real"), func() { It("Update should correctly replace pods to use new image", func() { key := types.NamespacedName{ Name: "humiocluster-update-image-source", Namespace: testProcessNamespace, } toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true) - toCreate.Spec.Image = upgradePatchBestEffortOldVersion + toCreate.Spec.Image = versions.UpgradePatchBestEffortOldVersion() toCreate.Spec.NodeCount = 2 suite.UsingClusterBy(key.Name, "Creating the cluster successfully") @@ -1038,7 +1027,7 @@ var _ = Describe("HumioCluster Controller", func() { }, testTimeout, suite.TestInterval).Should(Equal("failed to set imageFromSource: ConfigMap \"image-source-missing\" not found")) suite.UsingClusterBy(key.Name, "Creating the imageSource configmap") - updatedImage := upgradePatchBestEffortNewVersion + updatedImage := versions.UpgradePatchBestEffortNewVersion() envVarSourceConfigMap := corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "image-source", @@ -1095,7 +1084,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update Using Wrong Image", func() { + Context("Humio Cluster Update Using Wrong Image", Label("envtest", "dummy", "real"), func() { It("Update should correctly replace pods after using wrong image", func() { key := types.NamespacedName{ Name: "humiocluster-update-wrong-image", @@ -1121,7 +1110,7 @@ var _ = Describe("HumioCluster Controller", func() { Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1")) suite.UsingClusterBy(key.Name, "Updating the cluster image unsuccessfully") - updatedImage := fmt.Sprintf("%s-missing-image", controllers.Image) + updatedImage := fmt.Sprintf("%s-missing-image", versions.DefaultHumioImageVersion()) Eventually(func() error { updatedHumioCluster = humiov1alpha1.HumioCluster{} err := k8sClient.Get(ctx, key, &updatedHumioCluster) @@ -1165,7 +1154,7 @@ var _ = Describe("HumioCluster Controller", func() { }, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning)) suite.UsingClusterBy(key.Name, "Updating the cluster image successfully") - updatedImage = controllers.Image + updatedImage = versions.DefaultHumioImageVersion() Eventually(func() error { updatedHumioCluster = humiov1alpha1.HumioCluster{} err := k8sClient.Get(ctx, key, &updatedHumioCluster) @@ -1210,7 +1199,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update Helper Image", func() { + Context("Humio Cluster Update Helper Image", Label("envtest", "dummy", "real"), func() { It("Update should correctly replace pods to use new image", func() { key := types.NamespacedName{ Name: "humiocluster-update-helper-image", @@ -1238,7 +1227,7 @@ var _ = Describe("HumioCluster Controller", func() { return pod.Spec.InitContainers[initIdx].Image } return "" - }, testTimeout, suite.TestInterval).Should(Equal(controllers.HelperImage)) + }, testTimeout, suite.TestInterval).Should(Equal(versions.DefaultHelperImageVersion())) clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels()) @@ -1252,17 +1241,17 @@ var _ = Describe("HumioCluster Controller", func() { return pod.Spec.InitContainers[authIdx].Image } return "" - }, testTimeout, suite.TestInterval).Should(Equal(controllers.HelperImage)) + }, testTimeout, suite.TestInterval).Should(Equal(versions.DefaultHelperImageVersion())) suite.UsingClusterBy(key.Name, "Overriding helper image") var updatedHumioCluster humiov1alpha1.HumioCluster - customHelperImage := "humio/humio-operator-helper:master" + upgradedHelperImage := versions.UpgradeHelperImageVersion() Eventually(func() error { err := k8sClient.Get(ctx, key, &updatedHumioCluster) if err != nil { return err } - updatedHumioCluster.Spec.HelperImage = customHelperImage + updatedHumioCluster.Spec.HelperImage = upgradedHelperImage return k8sClient.Update(ctx, &updatedHumioCluster) }, testTimeout, suite.TestInterval).Should(Succeed()) @@ -1277,7 +1266,7 @@ var _ = Describe("HumioCluster Controller", func() { return pod.Spec.InitContainers[initIdx].Image } return "" - }, testTimeout, suite.TestInterval).Should(Equal(customHelperImage)) + }, testTimeout, suite.TestInterval).Should(Equal(upgradedHelperImage)) suite.UsingClusterBy(key.Name, "Validating pod is recreated using the explicitly defined helper image as auth sidecar container") Eventually(func() string { @@ -1287,7 +1276,7 @@ var _ = Describe("HumioCluster Controller", func() { return pod.Spec.InitContainers[authIdx].Image } return "" - }, testTimeout, suite.TestInterval).Should(Equal(customHelperImage)) + }, testTimeout, suite.TestInterval).Should(Equal(upgradedHelperImage)) updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels()) @@ -1298,7 +1287,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update Environment Variable", func() { + Context("Humio Cluster Update Environment Variable", Label("envtest", "dummy", "real"), func() { It("Should correctly replace pods to use new environment variable", func() { key := types.NamespacedName{ Name: "humiocluster-update-envvar", @@ -1428,339 +1417,338 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Update Environment Variable Multi Node Pool", func() { - It("Should correctly replace pods to use new environment variable for multi node pool clusters", - Label("envvar"), func() { - key := types.NamespacedName{ - Name: "humiocluster-update-envvar-np", - Namespace: testProcessNamespace, - } - toCreate := constructBasicMultiNodePoolHumioCluster(key, true, 1) - toCreate.Spec.UpdateStrategy = &humiov1alpha1.HumioUpdateStrategy{ - Type: humiov1alpha1.HumioClusterUpdateStrategyRollingUpdate, - } - toCreate.Spec.NodeCount = 1 - toCreate.Spec.NodePools[0].NodeCount = 1 - toCreate.Spec.CommonEnvironmentVariables = []corev1.EnvVar{ - { - Name: "COMMON_ENV_VAR", - Value: "value", - }, - { - Name: "HUMIO_OPTS", - Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", - }, - { - Name: "KAFKA_SERVERS", - Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", - }, - { - Name: "HUMIO_KAFKA_TOPIC_PREFIX", - Value: key.Name, - }, - { - Name: "AUTHENTICATION_METHOD", - Value: "oauth", - }, - { - Name: "ENABLE_IOC_SERVICE", - Value: "false", - }, - { - Name: "test", - Value: "common", - }, - } - toCreate.Spec.EnvironmentVariables = []corev1.EnvVar{ - { - Name: "test", - Value: "", - }, - { - Name: "HUMIO_OPTS", - Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", - }, - { - Name: "KAFKA_SERVERS", - Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", - }, - { - Name: "HUMIO_KAFKA_TOPIC_PREFIX", - Value: key.Name, - }, - { - Name: "AUTHENTICATION_METHOD", - Value: "oauth", - }, - { - Name: "ENABLE_IOC_SERVICE", - Value: "false", - }, - } - toCreate.Spec.NodePools[0].EnvironmentVariables = []corev1.EnvVar{ - { - Name: "test", - Value: "np", - }, - } + Context("Humio Cluster Update Environment Variable Multi Node Pool", Label("envtest", "dummy", "real"), func() { + It("Should correctly replace pods to use new environment variable for multi node pool clusters", func() { + key := types.NamespacedName{ + Name: "humiocluster-update-envvar-np", + Namespace: testProcessNamespace, + } + toCreate := constructBasicMultiNodePoolHumioCluster(key, true, 1) + toCreate.Spec.UpdateStrategy = &humiov1alpha1.HumioUpdateStrategy{ + Type: humiov1alpha1.HumioClusterUpdateStrategyRollingUpdate, + } + toCreate.Spec.NodeCount = 1 + toCreate.Spec.NodePools[0].NodeCount = 1 + toCreate.Spec.CommonEnvironmentVariables = []corev1.EnvVar{ + { + Name: "COMMON_ENV_VAR", + Value: "value", + }, + { + Name: "HUMIO_OPTS", + Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", + }, + { + Name: "KAFKA_SERVERS", + Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", + }, + { + Name: "HUMIO_KAFKA_TOPIC_PREFIX", + Value: key.Name, + }, + { + Name: "AUTHENTICATION_METHOD", + Value: "oauth", + }, + { + Name: "ENABLE_IOC_SERVICE", + Value: "false", + }, + { + Name: "test", + Value: "common", + }, + } + toCreate.Spec.EnvironmentVariables = []corev1.EnvVar{ + { + Name: "test", + Value: "", + }, + { + Name: "HUMIO_OPTS", + Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", + }, + { + Name: "KAFKA_SERVERS", + Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", + }, + { + Name: "HUMIO_KAFKA_TOPIC_PREFIX", + Value: key.Name, + }, + { + Name: "AUTHENTICATION_METHOD", + Value: "oauth", + }, + { + Name: "ENABLE_IOC_SERVICE", + Value: "false", + }, + } + toCreate.Spec.NodePools[0].EnvironmentVariables = []corev1.EnvVar{ + { + Name: "test", + Value: "np", + }, + } - suite.UsingClusterBy(key.Name, "Creating the cluster successfully") - ctx := context.Background() - createAndBootstrapMultiNodePoolCluster(ctx, k8sClient, testHumioClient, toCreate, true, humiov1alpha1.HumioClusterStateRunning) - defer suite.CleanupCluster(ctx, k8sClient, toCreate) + suite.UsingClusterBy(key.Name, "Creating the cluster successfully") + ctx := context.Background() + createAndBootstrapMultiNodePoolCluster(ctx, k8sClient, testHumioClient, toCreate, true, humiov1alpha1.HumioClusterStateRunning) + defer suite.CleanupCluster(ctx, k8sClient, toCreate) - mainNodePoolManager := controllers.NewHumioNodeManagerFromHumioCluster(toCreate) - customNodePoolManager := controllers.NewHumioNodeManagerFromHumioNodePool(toCreate, &toCreate.Spec.NodePools[0]) + mainNodePoolManager := controllers.NewHumioNodeManagerFromHumioCluster(toCreate) + customNodePoolManager := controllers.NewHumioNodeManagerFromHumioNodePool(toCreate, &toCreate.Spec.NodePools[0]) - expectedCommonVars := []corev1.EnvVar{ - { - Name: "COMMON_ENV_VAR", - Value: "value", - }, - { - Name: "KAFKA_SERVERS", - Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", - }, - } - clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, mainNodePoolManager.GetPodLabels()) - Expect(clusterPods).To(HaveLen(toCreate.Spec.NodeCount)) - for _, pod := range clusterPods { - humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) - Expect(pod.Spec.Containers[humioIndex].Env).To(ContainElements(append(expectedCommonVars, corev1.EnvVar{ - Name: "test", Value: ""}))) - } + expectedCommonVars := []corev1.EnvVar{ + { + Name: "COMMON_ENV_VAR", + Value: "value", + }, + { + Name: "KAFKA_SERVERS", + Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", + }, + } + clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, mainNodePoolManager.GetPodLabels()) + Expect(clusterPods).To(HaveLen(toCreate.Spec.NodeCount)) + for _, pod := range clusterPods { + humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) + Expect(pod.Spec.Containers[humioIndex].Env).To(ContainElements(append(expectedCommonVars, corev1.EnvVar{ + Name: "test", Value: ""}))) + } - customClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, customNodePoolManager.GetPodLabels()) - Expect(clusterPods).To(HaveLen(toCreate.Spec.NodeCount)) - for _, pod := range customClusterPods { - humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) - Expect(pod.Spec.Containers[humioIndex].Env).To(ContainElements(append(expectedCommonVars, corev1.EnvVar{ - Name: "test", Value: "np"}))) - } + customClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, customNodePoolManager.GetPodLabels()) + Expect(clusterPods).To(HaveLen(toCreate.Spec.NodeCount)) + for _, pod := range customClusterPods { + humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) + Expect(pod.Spec.Containers[humioIndex].Env).To(ContainElements(append(expectedCommonVars, corev1.EnvVar{ + Name: "test", Value: "np"}))) + } - suite.UsingClusterBy(key.Name, "Updating the environment variable on main node pool successfully") - updatedCommonEnvironmentVariables := []corev1.EnvVar{ - { - Name: "COMMON_ENV_VAR", - Value: "value", - }, - { - Name: "HUMIO_OPTS", - Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", - }, - { - Name: "KAFKA_SERVERS", - Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", - }, - { - Name: "HUMIO_KAFKA_TOPIC_PREFIX", - Value: key.Name, - }, - { - Name: "AUTHENTICATION_METHOD", - Value: "oauth", - }, - { - Name: "ENABLE_IOC_SERVICE", - Value: "false", - }, - } - updatedEnvironmentVariables := []corev1.EnvVar{ - { - Name: "test", - Value: "update", - }, - { - Name: "HUMIO_OPTS", - Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", - }, - { - Name: "KAFKA_SERVERS", - Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", - }, - { - Name: "HUMIO_KAFKA_TOPIC_PREFIX", - Value: key.Name, - }, - { - Name: "AUTHENTICATION_METHOD", - Value: "oauth", - }, - { - Name: "ENABLE_IOC_SERVICE", - Value: "false", - }, - } + suite.UsingClusterBy(key.Name, "Updating the environment variable on main node pool successfully") + updatedCommonEnvironmentVariables := []corev1.EnvVar{ + { + Name: "COMMON_ENV_VAR", + Value: "value", + }, + { + Name: "HUMIO_OPTS", + Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", + }, + { + Name: "KAFKA_SERVERS", + Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", + }, + { + Name: "HUMIO_KAFKA_TOPIC_PREFIX", + Value: key.Name, + }, + { + Name: "AUTHENTICATION_METHOD", + Value: "oauth", + }, + { + Name: "ENABLE_IOC_SERVICE", + Value: "false", + }, + } + updatedEnvironmentVariables := []corev1.EnvVar{ + { + Name: "test", + Value: "update", + }, + { + Name: "HUMIO_OPTS", + Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", + }, + { + Name: "KAFKA_SERVERS", + Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", + }, + { + Name: "HUMIO_KAFKA_TOPIC_PREFIX", + Value: key.Name, + }, + { + Name: "AUTHENTICATION_METHOD", + Value: "oauth", + }, + { + Name: "ENABLE_IOC_SERVICE", + Value: "false", + }, + } - var updatedHumioCluster humiov1alpha1.HumioCluster - Eventually(func() error { - updatedHumioCluster = humiov1alpha1.HumioCluster{} - err := k8sClient.Get(ctx, key, &updatedHumioCluster) - if err != nil { - return err - } - updatedHumioCluster.Spec.CommonEnvironmentVariables = updatedCommonEnvironmentVariables - updatedHumioCluster.Spec.EnvironmentVariables = updatedEnvironmentVariables - return k8sClient.Update(ctx, &updatedHumioCluster) - }, testTimeout, suite.TestInterval).Should(Succeed()) + var updatedHumioCluster humiov1alpha1.HumioCluster + Eventually(func() error { + updatedHumioCluster = humiov1alpha1.HumioCluster{} + err := k8sClient.Get(ctx, key, &updatedHumioCluster) + if err != nil { + return err + } + updatedHumioCluster.Spec.CommonEnvironmentVariables = updatedCommonEnvironmentVariables + updatedHumioCluster.Spec.EnvironmentVariables = updatedEnvironmentVariables + return k8sClient.Update(ctx, &updatedHumioCluster) + }, testTimeout, suite.TestInterval).Should(Succeed()) - Eventually(func() string { - updatedHumioCluster = humiov1alpha1.HumioCluster{} - Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) - return updatedHumioCluster.Status.State - }, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRestarting)) + Eventually(func() string { + updatedHumioCluster = humiov1alpha1.HumioCluster{} + Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) + return updatedHumioCluster.Status.State + }, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRestarting)) - suite.UsingClusterBy(key.Name, "Confirming only one node pool is in the correct state") - Eventually(func() int { - var poolsInCorrectState int - updatedHumioCluster = humiov1alpha1.HumioCluster{} - Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) - for _, poolStatus := range updatedHumioCluster.Status.NodePoolStatus { - if poolStatus.State == humiov1alpha1.HumioClusterStateRestarting { - poolsInCorrectState++ - } + suite.UsingClusterBy(key.Name, "Confirming only one node pool is in the correct state") + Eventually(func() int { + var poolsInCorrectState int + updatedHumioCluster = humiov1alpha1.HumioCluster{} + Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) + for _, poolStatus := range updatedHumioCluster.Status.NodePoolStatus { + if poolStatus.State == humiov1alpha1.HumioClusterStateRestarting { + poolsInCorrectState++ } - return poolsInCorrectState - }, testTimeout, suite.TestInterval).Should(Equal(1)) - - suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion") - ensurePodsRollingRestart(ctx, mainNodePoolManager, 2) + } + return poolsInCorrectState + }, testTimeout, suite.TestInterval).Should(Equal(1)) - Eventually(func() string { - updatedHumioCluster = humiov1alpha1.HumioCluster{} - Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) - return updatedHumioCluster.Status.State - }, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning)) + suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion") + ensurePodsRollingRestart(ctx, mainNodePoolManager, 2) - Eventually(func() bool { - clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels()) - Expect(len(clusterPods)).To(BeIdenticalTo(toCreate.Spec.NodeCount)) + Eventually(func() string { + updatedHumioCluster = humiov1alpha1.HumioCluster{} + Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) + return updatedHumioCluster.Status.State + }, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning)) - for _, pod := range clusterPods { - humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) - Expect(pod.Spec.Containers[humioIndex].Env).Should(ContainElement(updatedEnvironmentVariables[0])) - } - return true - }, testTimeout, suite.TestInterval).Should(BeTrue()) + Eventually(func() bool { + clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels()) + Expect(len(clusterPods)).To(BeIdenticalTo(toCreate.Spec.NodeCount)) - updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels()) - if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { - suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed") - Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods))) + for _, pod := range clusterPods { + humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) + Expect(pod.Spec.Containers[humioIndex].Env).Should(ContainElement(updatedEnvironmentVariables[0])) } + return true + }, testTimeout, suite.TestInterval).Should(BeTrue()) - suite.UsingClusterBy(key.Name, "Confirming pod revision did not change for the other node pool") - additionalNodePoolManager := controllers.NewHumioNodeManagerFromHumioNodePool(&updatedHumioCluster, &updatedHumioCluster.Spec.NodePools[0]) + updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels()) + if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { + suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed") + Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods))) + } - nonUpdatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels()) - Expect(nonUpdatedClusterPods).To(HaveLen(toCreate.Spec.NodePools[0].NodeCount)) - Expect(updatedHumioCluster.Spec.NodePools[0].EnvironmentVariables).To(Equal(toCreate.Spec.NodePools[0].EnvironmentVariables)) - for _, pod := range nonUpdatedClusterPods { - Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1")) - } + suite.UsingClusterBy(key.Name, "Confirming pod revision did not change for the other node pool") + additionalNodePoolManager := controllers.NewHumioNodeManagerFromHumioNodePool(&updatedHumioCluster, &updatedHumioCluster.Spec.NodePools[0]) - clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, additionalNodePoolManager.GetPodLabels()) + nonUpdatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels()) + Expect(nonUpdatedClusterPods).To(HaveLen(toCreate.Spec.NodePools[0].NodeCount)) + Expect(updatedHumioCluster.Spec.NodePools[0].EnvironmentVariables).To(Equal(toCreate.Spec.NodePools[0].EnvironmentVariables)) + for _, pod := range nonUpdatedClusterPods { + Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1")) + } - suite.UsingClusterBy(key.Name, "Updating the environment variable on additional node pool successfully") - updatedEnvironmentVariables = []corev1.EnvVar{ - { - Name: "test", - Value: "update", - }, - { - Name: "HUMIO_OPTS", - Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", - }, - { - Name: "KAFKA_SERVERS", - Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", - }, - { - Name: "HUMIO_KAFKA_TOPIC_PREFIX", - Value: key.Name, - }, - { - Name: "AUTHENTICATION_METHOD", - Value: "oauth", - }, - { - Name: "ENABLE_IOC_SERVICE", - Value: "false", - }, - } - npUpdatedEnvironmentVariables := []corev1.EnvVar{ - { - Name: "test", - Value: "np-update", - }, - } + clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, additionalNodePoolManager.GetPodLabels()) - Eventually(func() error { - updatedHumioCluster = humiov1alpha1.HumioCluster{} - err := k8sClient.Get(ctx, key, &updatedHumioCluster) - if err != nil { - return err - } - updatedHumioCluster.Spec.NodePools[0].EnvironmentVariables = npUpdatedEnvironmentVariables - return k8sClient.Update(ctx, &updatedHumioCluster) - }, testTimeout, suite.TestInterval).Should(Succeed()) + suite.UsingClusterBy(key.Name, "Updating the environment variable on additional node pool successfully") + updatedEnvironmentVariables = []corev1.EnvVar{ + { + Name: "test", + Value: "update", + }, + { + Name: "HUMIO_OPTS", + Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false", + }, + { + Name: "KAFKA_SERVERS", + Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092", + }, + { + Name: "HUMIO_KAFKA_TOPIC_PREFIX", + Value: key.Name, + }, + { + Name: "AUTHENTICATION_METHOD", + Value: "oauth", + }, + { + Name: "ENABLE_IOC_SERVICE", + Value: "false", + }, + } + npUpdatedEnvironmentVariables := []corev1.EnvVar{ + { + Name: "test", + Value: "np-update", + }, + } - Eventually(func() string { - updatedHumioCluster = humiov1alpha1.HumioCluster{} - Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) - return updatedHumioCluster.Status.State - }, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRestarting)) + Eventually(func() error { + updatedHumioCluster = humiov1alpha1.HumioCluster{} + err := k8sClient.Get(ctx, key, &updatedHumioCluster) + if err != nil { + return err + } + updatedHumioCluster.Spec.NodePools[0].EnvironmentVariables = npUpdatedEnvironmentVariables + return k8sClient.Update(ctx, &updatedHumioCluster) + }, testTimeout, suite.TestInterval).Should(Succeed()) - suite.UsingClusterBy(key.Name, "Confirming only one node pool is in the correct state") - Eventually(func() int { - var poolsInCorrectState int - updatedHumioCluster = humiov1alpha1.HumioCluster{} - Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) - for _, poolStatus := range updatedHumioCluster.Status.NodePoolStatus { - if poolStatus.State == humiov1alpha1.HumioClusterStateRestarting { - poolsInCorrectState++ - } - } - return poolsInCorrectState - }, testTimeout, suite.TestInterval).Should(Equal(1)) + Eventually(func() string { + updatedHumioCluster = humiov1alpha1.HumioCluster{} + Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) + return updatedHumioCluster.Status.State + }, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRestarting)) - suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion") - ensurePodsRollingRestart(ctx, additionalNodePoolManager, 2) + suite.UsingClusterBy(key.Name, "Confirming only one node pool is in the correct state") + Eventually(func() int { + var poolsInCorrectState int + updatedHumioCluster = humiov1alpha1.HumioCluster{} + Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) + for _, poolStatus := range updatedHumioCluster.Status.NodePoolStatus { + if poolStatus.State == humiov1alpha1.HumioClusterStateRestarting { + poolsInCorrectState++ + } + } + return poolsInCorrectState + }, testTimeout, suite.TestInterval).Should(Equal(1)) - Eventually(func() string { - updatedHumioCluster = humiov1alpha1.HumioCluster{} - Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) - return updatedHumioCluster.Status.State - }, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning)) + suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion") + ensurePodsRollingRestart(ctx, additionalNodePoolManager, 2) - Eventually(func() bool { - clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels()) - Expect(len(clusterPods)).To(BeIdenticalTo(toCreate.Spec.NodeCount)) + Eventually(func() string { + updatedHumioCluster = humiov1alpha1.HumioCluster{} + Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed()) + return updatedHumioCluster.Status.State + }, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning)) - for _, pod := range clusterPods { - humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) - Expect(pod.Spec.Containers[humioIndex].Env).Should(ContainElements(npUpdatedEnvironmentVariables)) - } - return true - }, testTimeout, suite.TestInterval).Should(BeTrue()) + Eventually(func() bool { + clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels()) + Expect(len(clusterPods)).To(BeIdenticalTo(toCreate.Spec.NodeCount)) - updatedClusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels()) - if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { - suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed") - Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods))) + for _, pod := range clusterPods { + humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName) + Expect(pod.Spec.Containers[humioIndex].Env).Should(ContainElements(npUpdatedEnvironmentVariables)) } + return true + }, testTimeout, suite.TestInterval).Should(BeTrue()) - suite.UsingClusterBy(key.Name, "Confirming pod revision did not change for the other main pool") + updatedClusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels()) + if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { + suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed") + Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods))) + } - nonUpdatedClusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels()) - Expect(nonUpdatedClusterPods).To(HaveLen(toCreate.Spec.NodeCount)) - for _, pod := range nonUpdatedClusterPods { - Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2")) - } - }) + suite.UsingClusterBy(key.Name, "Confirming pod revision did not change for the other main pool") + + nonUpdatedClusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels()) + Expect(nonUpdatedClusterPods).To(HaveLen(toCreate.Spec.NodeCount)) + for _, pod := range nonUpdatedClusterPods { + Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2")) + } + }) }) - Context("Humio Cluster Ingress", func() { + Context("Humio Cluster Ingress", Label("envtest", "dummy", "real"), func() { It("Should correctly update ingresses to use new annotations variable", func() { key := types.NamespacedName{ Name: "humiocluster-ingress", @@ -1924,7 +1912,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Pod Annotations", func() { + Context("Humio Cluster Pod Annotations", Label("envtest", "dummy", "real"), func() { It("Should be correctly annotated", func() { key := types.NamespacedName{ Name: "humiocluster-pods", @@ -1951,7 +1939,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Pod Labels", func() { + Context("Humio Cluster Pod Labels", Label("envtest", "dummy", "real"), func() { It("Should be correctly annotated", func() { key := types.NamespacedName{ Name: "humiocluster-labels", @@ -1979,7 +1967,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Custom Service", func() { + Context("Humio Cluster Custom Service", Label("envtest", "dummy", "real"), func() { It("Should correctly use default service", func() { key := types.NamespacedName{ Name: "humiocluster-custom-svc", @@ -2231,7 +2219,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Container Arguments", func() { + Context("Humio Cluster Container Arguments", Label("envtest", "dummy", "real"), func() { It("Should correctly configure container arguments and ephemeral disks env var with default vhost selection method", func() { key := types.NamespacedName{ Name: "humiocluster-container-args", @@ -2277,7 +2265,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Container Arguments Without Zone", func() { + Context("Humio Cluster Container Arguments Without Zone", Label("envtest", "dummy", "real"), func() { It("Should correctly configure container arguments", func() { key := types.NamespacedName{ Name: "humiocluster-container-without-zone-args", @@ -2321,7 +2309,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Service Account Annotations", func() { + Context("Humio Cluster Service Account Annotations", Label("envtest", "dummy", "real"), func() { It("Should correctly handle service account annotations", func() { key := types.NamespacedName{ Name: "humiocluster-sa-annotations", @@ -2376,7 +2364,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Pod Security Context", func() { + Context("Humio Cluster Pod Security Context", Label("envtest", "dummy", "real"), func() { It("Should correctly handle pod security context", func() { key := types.NamespacedName{ Name: "humiocluster-podsecuritycontext", @@ -2450,7 +2438,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Container Security Context", func() { + Context("Humio Cluster Container Security Context", Label("envtest", "dummy", "real"), func() { It("Should correctly handle container security context", func() { key := types.NamespacedName{ Name: "humiocluster-containersecuritycontext", @@ -2548,7 +2536,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Container Probes", func() { + Context("Humio Cluster Container Probes", Label("envtest", "dummy", "real"), func() { It("Should correctly handle container probes", func() { key := types.NamespacedName{ Name: "humiocluster-probes", @@ -2799,7 +2787,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Ekstra Kafka Configs", func() { + Context("Humio Cluster Extra Kafka Configs", Label("envtest", "dummy", "real"), func() { It("Should correctly handle extra kafka configs", func() { key := types.NamespacedName{ Name: "humiocluster-extrakafkaconfigs", @@ -2918,7 +2906,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster View Group Permissions", func() { + Context("Humio Cluster View Group Permissions", Label("envtest", "dummy", "real"), func() { It("Should correctly handle view group permissions", func() { key := types.NamespacedName{ Name: "humiocluster-vgp", @@ -3058,7 +3046,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Role Permissions", func() { + Context("Humio Cluster Role Permissions", Label("envtest", "dummy", "real"), func() { It("Should correctly handle role permissions", func() { key := types.NamespacedName{ Name: "humiocluster-rp", @@ -3228,7 +3216,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Persistent Volumes", func() { + Context("Humio Cluster Persistent Volumes", Label("envtest", "dummy", "real"), func() { It("Should correctly handle persistent volumes", func() { key := types.NamespacedName{ Name: "humiocluster-pvc", @@ -3301,7 +3289,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Extra Volumes", func() { + Context("Humio Cluster Extra Volumes", Label("envtest", "dummy", "real"), func() { It("Should correctly handle extra volumes", func() { key := types.NamespacedName{ Name: "humiocluster-extra-volumes", @@ -3383,7 +3371,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Custom Path", func() { + Context("Humio Cluster Custom Path", Label("envtest", "dummy", "real"), func() { It("Should correctly handle custom paths with ingress disabled", func() { key := types.NamespacedName{ Name: "humiocluster-custom-path-ing-disabled", @@ -3455,7 +3443,7 @@ var _ = Describe("HumioCluster Controller", func() { }, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateRunning)) }) - It("Should correctly handle custom paths with ingress enabled", func() { + It("Should correctly handle custom paths with ingress enabled", Label("envtest", "dummy", "real"), func() { key := types.NamespacedName{ Name: "humiocluster-custom-path-ing-enabled", Namespace: testProcessNamespace, @@ -3529,7 +3517,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Config Errors", func() { + Context("Humio Cluster Config Errors", Label("envtest", "dummy", "real"), func() { It("Creating cluster with conflicting volume mount name", func() { key := types.NamespacedName{ Name: "humiocluster-err-volmnt-name", @@ -3761,7 +3749,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Without TLS for Ingress", func() { + Context("Humio Cluster Without TLS for Ingress", Label("envtest", "dummy", "real"), func() { It("Creating cluster without TLS for ingress", func() { key := types.NamespacedName{ Name: "humiocluster-without-tls-ingress", @@ -3793,7 +3781,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster with additional hostnames for TLS", func() { + Context("Humio Cluster with additional hostnames for TLS", Label("envtest", "dummy", "real"), func() { It("Creating cluster with additional hostnames for TLS", func() { if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { key := types.NamespacedName{ @@ -3830,7 +3818,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster Ingress", func() { + Context("Humio Cluster Ingress", Label("envtest", "dummy", "real"), func() { It("Should correctly handle ingress when toggling both ESHostname and Hostname on/off", func() { key := types.NamespacedName{ Name: "humiocluster-ingress-hostname", @@ -4066,7 +4054,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster with non-existent custom service accounts", func() { + Context("Humio Cluster with non-existent custom service accounts", Label("envtest", "dummy", "real"), func() { It("Should correctly handle non-existent humio service account by marking cluster as ConfigError", func() { key := types.NamespacedName{ Name: "humiocluster-err-humio-service-account", @@ -4135,7 +4123,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster With Custom Service Accounts", func() { + Context("Humio Cluster With Custom Service Accounts", Label("envtest", "dummy", "real"), func() { It("Creating cluster with custom service accounts", func() { key := types.NamespacedName{ Name: "humiocluster-custom-service-accounts", @@ -4253,7 +4241,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster With Service Annotations", func() { + Context("Humio Cluster With Service Annotations", Label("envtest", "dummy", "real"), func() { It("Creating cluster with custom service annotations", func() { key := types.NamespacedName{ Name: "humiocluster-custom-svc-annotations", @@ -4293,7 +4281,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster With Custom Tolerations", func() { + Context("Humio Cluster With Custom Tolerations", Label("envtest", "dummy", "real"), func() { It("Creating cluster with custom tolerations", func() { key := types.NamespacedName{ Name: "humiocluster-custom-tolerations", @@ -4322,7 +4310,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster With Custom Topology Spread Constraints", func() { + Context("Humio Cluster With Custom Topology Spread Constraints", Label("envtest", "dummy", "real"), func() { It("Creating cluster with custom Topology Spread Constraints", func() { key := types.NamespacedName{ Name: "humiocluster-custom-tsc", @@ -4350,7 +4338,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster With Custom Priority Class Name", func() { + Context("Humio Cluster With Custom Priority Class Name", Label("envtest", "dummy", "real"), func() { It("Creating cluster with custom Priority Class Name", func() { key := types.NamespacedName{ Name: "humiocluster-custom-pcn", @@ -4381,7 +4369,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster With Service Labels", func() { + Context("Humio Cluster With Service Labels", Label("envtest", "dummy", "real"), func() { It("Creating cluster with custom service labels", func() { key := types.NamespacedName{ Name: "humiocluster-custom-svc-labels", @@ -4416,7 +4404,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster with shared process namespace and sidecars", func() { + Context("Humio Cluster with shared process namespace and sidecars", Label("envtest", "dummy", "real"), func() { It("Creating cluster without shared process namespace and sidecar", func() { key := types.NamespacedName{ Name: "humiocluster-custom-sidecars", @@ -4451,7 +4439,7 @@ var _ = Describe("HumioCluster Controller", func() { updatedHumioCluster.Spec.SidecarContainers = []corev1.Container{ { Name: "jmap", - Image: controllers.Image, + Image: versions.DefaultHumioImageVersion(), Command: []string{"/bin/sh"}, Args: []string{"-c", "HUMIO_PID=$(ps -e | grep java | awk '{print $1'}); while :; do sleep 30 ; jmap -histo:live $HUMIO_PID | head -n203 ; done"}, VolumeMounts: []corev1.VolumeMount{ @@ -4509,7 +4497,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster pod termination grace period", func() { + Context("Humio Cluster pod termination grace period", Label("envtest", "dummy", "real"), func() { It("Should validate default configuration", func() { key := types.NamespacedName{ Name: "humiocluster-grace-default", @@ -4560,7 +4548,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster install license", func() { + Context("Humio Cluster install license", Label("envtest", "dummy", "real"), func() { It("Should fail when no license is present", func() { key := types.NamespacedName{ Name: "humiocluster-no-license", @@ -4682,7 +4670,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster state adjustment", func() { + Context("Humio Cluster state adjustment", Label("envtest", "dummy", "real"), func() { It("Should successfully set proper state", func() { key := types.NamespacedName{ Name: "humiocluster-state", @@ -4722,7 +4710,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster with envSource configmap", func() { + Context("Humio Cluster with envSource configmap", Label("envtest", "dummy", "real"), func() { It("Creating cluster with envSource configmap", func() { key := types.NamespacedName{ Name: "humiocluster-env-source-configmap", @@ -4826,7 +4814,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster with envSource secret", func() { + Context("Humio Cluster with envSource secret", Label("envtest", "dummy", "real"), func() { It("Creating cluster with envSource secret", func() { key := types.NamespacedName{ Name: "humiocluster-env-source-secret", @@ -4930,7 +4918,7 @@ var _ = Describe("HumioCluster Controller", func() { }) }) - Context("Humio Cluster with resources without node pool name label", func() { + Context("Humio Cluster with resources without node pool name label", Label("envtest", "dummy", "real"), func() { It("Creating cluster with all node pool labels set", func() { key := types.NamespacedName{ Name: "humiocluster-nodepool-labels", diff --git a/controllers/suite/clusters/suite_test.go b/controllers/suite/clusters/suite_test.go index 9daecf0e4..ec5e49f01 100644 --- a/controllers/suite/clusters/suite_test.go +++ b/controllers/suite/clusters/suite_test.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + humioapi "github.com/humio/cli/api" "os" "path/filepath" "sort" @@ -38,7 +39,6 @@ import ( cmapi "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" "github.com/go-logr/logr" "github.com/go-logr/zapr" - humioapi "github.com/humio/cli/api" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -95,7 +95,11 @@ var _ = BeforeSuite(func() { testEnv = &envtest.Environment{ UseExistingCluster: &useExistingCluster, } - testHumioClient = humio.NewClient(log, &humioapi.Config{}, "") + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + testHumioClient = humio.NewMockClient() + } else { + testHumioClient = humio.NewClient(log, &humioapi.Config{}, "") + } } else { testTimeout = time.Second * 30 testEnv = &envtest.Environment{ diff --git a/controllers/suite/common.go b/controllers/suite/common.go index 8f88b02f5..0064f4899 100644 --- a/controllers/suite/common.go +++ b/controllers/suite/common.go @@ -5,6 +5,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "github.com/humio/humio-operator/controllers/versions" "os" "strconv" "strings" @@ -178,7 +179,7 @@ func ConstructBasicNodeSpecForHumioCluster(key types.NamespacedName) humiov1alph userID := int64(65534) nodeSpec := humiov1alpha1.HumioNodeSpec{ - Image: controllers.Image, + Image: versions.DefaultHumioImageVersion(), ExtraKafkaConfigs: "security.protocol=PLAINTEXT", NodeCount: 1, // Affinity needs to be overridden to exclude default value for kubernetes.io/arch to allow running local tests @@ -202,51 +203,6 @@ func ConstructBasicNodeSpecForHumioCluster(key types.NamespacedName) humiov1alph }, }, }, - SidecarContainers: []corev1.Container{ - { - Name: "wait-for-global-snapshot-on-disk", - Image: sidecarWaitForGlobalImageVersion, - Command: []string{"/bin/sh"}, - Args: []string{ - "-c", - "trap 'exit 0' 15; while true; do sleep 100 & wait $!; done", - }, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - Exec: &corev1.ExecAction{ - Command: []string{ - "/bin/sh", - "-c", - "ls /mnt/global*.json", - }, - }, - }, - InitialDelaySeconds: 5, - TimeoutSeconds: 5, - PeriodSeconds: 10, - SuccessThreshold: 1, - FailureThreshold: 100, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "humio-data", - MountPath: "/mnt", - ReadOnly: true, - }, - }, - SecurityContext: &corev1.SecurityContext{ - Privileged: helpers.BoolPtr(false), - AllowPrivilegeEscalation: helpers.BoolPtr(false), - ReadOnlyRootFilesystem: helpers.BoolPtr(true), - RunAsUser: &userID, - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{ - "ALL", - }, - }, - }, - }, - }, EnvironmentVariables: []corev1.EnvVar{ { Name: "KAFKA_SERVERS", @@ -294,6 +250,57 @@ func ConstructBasicNodeSpecForHumioCluster(key types.NamespacedName) humiov1alph }, } + if os.Getenv("DUMMY_LOGSCALE_IMAGE") != "true" { + nodeSpec.SidecarContainers = []corev1.Container{ + { + Name: "wait-for-global-snapshot-on-disk", + Image: sidecarWaitForGlobalImageVersion, + Command: []string{"/bin/sh"}, + Args: []string{ + "-c", + "trap 'exit 0' 15; while true; do sleep 100 & wait $!; done", + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{ + "/bin/sh", + "-c", + "ls /mnt/global*.json", + }, + }, + }, + InitialDelaySeconds: 5, + TimeoutSeconds: 5, + PeriodSeconds: 10, + SuccessThreshold: 1, + FailureThreshold: 100, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "humio-data", + MountPath: "/mnt", + ReadOnly: true, + }, + }, + SecurityContext: &corev1.SecurityContext{ + Privileged: helpers.BoolPtr(false), + AllowPrivilegeEscalation: helpers.BoolPtr(false), + ReadOnlyRootFilesystem: helpers.BoolPtr(true), + RunAsUser: &userID, + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{ + "ALL", + }, + }, + }, + }, + } + } else { + nodeSpec.Image = "humio/humio-core:dummy" + nodeSpec.HelperImage = "humio/humio-operator-helper:dummy" + } + if useDockerCredentials() { nodeSpec.ImagePullSecrets = []corev1.LocalObjectReference{ {Name: DockerRegistryCredentialsSecretName}, @@ -517,7 +524,7 @@ func CreateAndBootstrapCluster(ctx context.Context, k8sClient client.Client, hum Expect(apiTokenSecret.Annotations).Should(HaveKeyWithValue(apiTokenMethodAnnotationName, apiTokenMethodFromAPI)) } - if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { + if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" && os.Getenv("DUMMY_LOGSCALE_IMAGE") != "true" { UsingClusterBy(key.Name, "Validating cluster nodes have ZONE configured correctly") if updatedHumioCluster.Spec.DisableInitContainer { Eventually(func() []string { diff --git a/controllers/suite/resources/humioresources_controller_test.go b/controllers/suite/resources/humioresources_controller_test.go index c3d880299..56b213cd9 100644 --- a/controllers/suite/resources/humioresources_controller_test.go +++ b/controllers/suite/resources/humioresources_controller_test.go @@ -55,7 +55,7 @@ var _ = Describe("Humio Resources Controllers", func() { // your API definition. // Avoid adding tests for vanilla CRUD operations because they would // test Kubernetes API server, which isn't the goal here. - Context("Humio Ingest Token", func() { + Context("Humio Ingest Token", Label("envtest", "dummy", "real"), func() { It("should handle ingest token with target secret correctly", func() { ctx := context.Background() key := types.NamespacedName{ @@ -306,7 +306,7 @@ var _ = Describe("Humio Resources Controllers", func() { }) }) - Context("Humio Repository and View", func() { + Context("Humio Repository and View", Label("envtest", "dummy", "real"), func() { It("should handle resources correctly", func() { ctx := context.Background() suite.UsingClusterBy(clusterKey.Name, "HumioRepository: Should handle repository correctly") @@ -561,7 +561,7 @@ var _ = Describe("Humio Resources Controllers", func() { }) }) - Context("Humio Parser", func() { + Context("Humio Parser", Label("envtest", "dummy", "real"), func() { It("HumioParser: Should handle parser correctly", func() { ctx := context.Background() spec := humiov1alpha1.HumioParserSpec{ @@ -653,7 +653,7 @@ var _ = Describe("Humio Resources Controllers", func() { }) }) - Context("Humio External Cluster", func() { + Context("Humio External Cluster", Label("envtest", "dummy", "real"), func() { It("should handle resources correctly", func() { ctx := context.Background() suite.UsingClusterBy(clusterKey.Name, "HumioExternalCluster: Should handle externalcluster correctly") @@ -702,7 +702,7 @@ var _ = Describe("Humio Resources Controllers", func() { }) }) - Context("Humio resources errors", func() { + Context("Humio resources errors", Label("envtest", "dummy", "real"), func() { It("HumioParser: Creating ingest token pointing to non-existent managed cluster", func() { ctx := context.Background() keyErr := types.NamespacedName{ @@ -920,7 +920,7 @@ var _ = Describe("Humio Resources Controllers", func() { }) }) - Context("Humio Action", func() { + Context("Humio Action", Label("envtest", "dummy", "real"), func() { It("should handle email action correctly", func() { ctx := context.Background() suite.UsingClusterBy(clusterKey.Name, "HumioAction: Should handle action correctly") @@ -2673,7 +2673,7 @@ var _ = Describe("Humio Resources Controllers", func() { }) }) - Context("Humio Alert", func() { + Context("Humio Alert", Label("envtest", "dummy", "real"), func() { It("should handle alert action correctly", func() { ctx := context.Background() suite.UsingClusterBy(clusterKey.Name, "HumioAlert: Should handle alert correctly") @@ -2852,7 +2852,7 @@ var _ = Describe("Humio Resources Controllers", func() { }) }) - Context("Humio Filter Alert", func() { + Context("Humio Filter Alert", Label("envtest", "dummy", "real"), func() { It("should handle filter alert action correctly", func() { ctx := context.Background() suite.UsingClusterBy(clusterKey.Name, "HumioFilterAlert: Should handle filter alert correctly") @@ -3029,7 +3029,7 @@ var _ = Describe("Humio Resources Controllers", func() { }) }) - Context("Humio Aggregate Alert", func() { + Context("Humio Aggregate Alert", Label("envtest", "dummy", "real"), func() { It("should handle aggregate alert action correctly", func() { ctx := context.Background() suite.UsingClusterBy(clusterKey.Name, "HumioAggregateAlert: Should handle aggregate alert correctly") @@ -3215,7 +3215,7 @@ var _ = Describe("Humio Resources Controllers", func() { }) }) - Context("Humio Scheduled Search", func() { + Context("Humio Scheduled Search", Label("envtest", "dummy", "real"), func() { It("should handle scheduled search action correctly", func() { ctx := context.Background() suite.UsingClusterBy(clusterKey.Name, "HumioScheduledSearch: Should handle scheduled search correctly") diff --git a/controllers/suite/resources/suite_test.go b/controllers/suite/resources/suite_test.go index 0c49cb58f..3e6987849 100644 --- a/controllers/suite/resources/suite_test.go +++ b/controllers/suite/resources/suite_test.go @@ -104,7 +104,13 @@ var _ = BeforeSuite(func() { testEnv = &envtest.Environment{ UseExistingCluster: &useExistingCluster, } - humioClient = humio.NewClient(log, &humioapi.Config{}, "") + + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + humioClient = humio.NewMockClient() + } else { + humioClient = humio.NewClient(log, &humioapi.Config{}, "") + } + } else { testTimeout = time.Second * 30 testEnv = &envtest.Environment{ @@ -258,7 +264,11 @@ var _ = BeforeSuite(func() { suite.UsingClusterBy(clusterKey.Name, fmt.Sprintf("HumioCluster: Creating shared test cluster in namespace %s", clusterKey.Namespace)) cluster = suite.ConstructBasicSingleNodeHumioCluster(clusterKey, true) - cluster.Spec.HumioNodeSpec.Image = "humio/humio-core:1.150.0" + if os.Getenv("DUMMY_LOGSCALE_IMAGE") != "true" { + cluster.Spec.HumioNodeSpec.Image = "humio/humio-core:1.150.0" + } else { + cluster.Spec.HumioNodeSpec.Image = "humio/humio-core:dummy" + } suite.CreateAndBootstrapCluster(context.TODO(), k8sClient, humioClient, cluster, true, corev1alpha1.HumioClusterStateRunning, testTimeout) sharedCluster, err = helpers.NewCluster(context.TODO(), k8sClient, clusterKey.Name, "", clusterKey.Namespace, helpers.UseCertManager(), true) diff --git a/controllers/versions/versions.go b/controllers/versions/versions.go new file mode 100644 index 000000000..aee7e11f1 --- /dev/null +++ b/controllers/versions/versions.go @@ -0,0 +1,96 @@ +package versions + +import ( + "os" + "strings" +) + +const ( + defaultHelperImage = "humio/humio-operator-helper:8f5ef6c7e470226e77d985f36cf39be9a100afea" + defaultHumioImageVersion = "humio/humio-core:1.131.1" + + oldSupportedHumioVersion = "humio/humio-core:1.118.0" + upgradeJumpHumioVersion = "humio/humio-core:1.128.0" + oldUnsupportedHumioVersion = "humio/humio-core:1.18.4" + + upgradeHelperImageVersion = "humio/humio-operator-helper:master" + + upgradePatchBestEffortOldVersion = "humio/humio-core:1.124.1" + upgradePatchBestEffortNewVersion = "humio/humio-core:1.124.2" + + upgradeRollingBestEffortVersionJumpOldVersion = "humio/humio-core:1.124.1" + upgradeRollingBestEffortVersionJumpNewVersion = "humio/humio-core:1.131.1" + + dummyImageSuffix = "-dummy" +) + +func DefaultHelperImageVersion() string { + version := []string{defaultHelperImage} + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + version = append(version, dummyImageSuffix) + } + return strings.Join(version, "") +} +func DefaultHumioImageVersion() string { + version := []string{defaultHumioImageVersion} + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + version = append(version, dummyImageSuffix) + } + return strings.Join(version, "") +} +func OldSupportedHumioVersion() string { + version := []string{oldSupportedHumioVersion} + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + version = append(version, dummyImageSuffix) + } + return strings.Join(version, "") +} +func UpgradeJumpHumioVersion() string { + version := []string{upgradeJumpHumioVersion} + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + version = append(version, dummyImageSuffix) + } + return strings.Join(version, "") +} +func OldUnsupportedHumioVersion() string { + version := []string{oldUnsupportedHumioVersion} + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + version = append(version, dummyImageSuffix) + } + return strings.Join(version, "") +} +func UpgradeHelperImageVersion() string { + version := []string{upgradeHelperImageVersion} + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + version = append(version, dummyImageSuffix) + } + return strings.Join(version, "") +} +func UpgradePatchBestEffortOldVersion() string { + version := []string{upgradePatchBestEffortOldVersion} + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + version = append(version, dummyImageSuffix) + } + return strings.Join(version, "") +} +func UpgradePatchBestEffortNewVersion() string { + version := []string{upgradePatchBestEffortNewVersion} + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + version = append(version, dummyImageSuffix) + } + return strings.Join(version, "") +} +func UpgradeRollingBestEffortVersionJumpOldVersion() string { + version := []string{upgradeRollingBestEffortVersionJumpOldVersion} + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + version = append(version, dummyImageSuffix) + } + return strings.Join(version, "") +} +func UpgradeRollingBestEffortVersionJumpNewVersion() string { + version := []string{upgradeRollingBestEffortVersionJumpNewVersion} + if os.Getenv("DUMMY_LOGSCALE_IMAGE") == "true" { + version = append(version, dummyImageSuffix) + } + return strings.Join(version, "") +} diff --git a/examples/humiocluster-multi-nodepool-kind-local-dummy.yaml b/examples/humiocluster-multi-nodepool-kind-local-dummy.yaml new file mode 100644 index 000000000..c25aa7d39 --- /dev/null +++ b/examples/humiocluster-multi-nodepool-kind-local-dummy.yaml @@ -0,0 +1,66 @@ +apiVersion: core.humio.com/v1alpha1 +kind: HumioCluster +metadata: + name: example-humiocluster-3 +spec: + nodePools: + - name: all + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - key: kubernetes.io/os + operator: In + values: + - linux + image: "humio/humio-core-dummy:1.151.0-2024082205" + helperImage: "humio/humio-operator-helper-dummy:1.151.0-2024082100" + dataVolumeSource: + emptyDir: {} + nodeCount: 30 + environmentVariables: + - name: "HUMIO_MEMORY_OPTS" + value: "-Xss2m -Xms1g -Xmx2g -XX:MaxDirectMemorySize=1g" + - name: "ZOOKEEPER_URL" + value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181" + - name: "KAFKA_SERVERS" + value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092" + - name: ingest-only + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - key: kubernetes.io/os + operator: In + values: + - linux + image: "humio/humio-core-dummy:1.151.0-2024082205" + helperImage: "humio/humio-operator-helper-dummy:1.151.0-2024082100" + nodeCount: 30 + dataVolumeSource: + emptyDir: {} + environmentVariables: + - name: "HUMIO_MEMORY_OPTS" + value: "-Xss2m -Xms1g -Xmx2g -XX:MaxDirectMemorySize=1g" + - name: "ZOOKEEPER_URL" + value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181" + - name: "KAFKA_SERVERS" + value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092" + license: + secretKeyRef: + name: example-humiocluster-license + key: data diff --git a/hack/functions.sh b/hack/functions.sh index 7c09e0c58..fb574bea2 100644 --- a/hack/functions.sh +++ b/hack/functions.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash - declare -r kindest_node_image_multiplatform_amd64_arm64=${E2E_KIND_K8S_VERSION:-kindest/node:v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245} declare -r kind_version=0.22.0 declare -r go_version=1.22.2 @@ -120,20 +119,31 @@ wait_for_pod() { } preload_container_images() { - # Extract humio images and tags from go source - DEFAULT_IMAGE=$(grep '^\s*Image\s*=' controllers/humiocluster_defaults.go | cut -d '"' -f 2) - PRE_UPDATE_IMAGES=$(grep -R 'Version\s* = ' controllers/suite | grep -v oldUnsupportedHumioVersion | grep -v 1.x.x | cut -d '"' -f 2 | sort -u) - - # Preload default image used by tests - $docker pull $DEFAULT_IMAGE - $kind load docker-image --name kind $DEFAULT_IMAGE & - - # Preload image used by e2e update tests - for image in $PRE_UPDATE_IMAGES - do - $docker pull $image - $kind load docker-image --name kind $image & - done + if [[ $dummy_logscale_image == "true" ]]; then + make docker-build-dummy IMG=humio/humio-core:dummy + make docker-build-helper IMG=humio/humio-operator-helper:dummy + $kind load docker-image humio/humio-core:dummy & + $kind load docker-image humio/humio-operator-helper:dummy & + grep --only-matching --extended-regexp "humio/humio-core:[0-9.]+" controllers/versions/versions.go | awk '{print $1"-dummy"}' | xargs -I{} docker tag humio/humio-core:dummy {} + grep --only-matching --extended-regexp "humio/humio-core:[0-9.]+" controllers/versions/versions.go | awk '{print $1"-dummy"}' | xargs -I{} kind load docker-image {} + grep --only-matching --extended-regexp "humio/humio-operator-helper:[^\"]+" controllers/versions/versions.go | awk '{print $1"-dummy"}' | xargs -I{} docker tag humio/humio-operator-helper:dummy {} + grep --only-matching --extended-regexp "humio/humio-operator-helper:[^\"]+" controllers/versions/versions.go | awk '{print $1"-dummy"}' | xargs -I{} kind load docker-image {} + else + # Extract humio images and tags from go source + DEFAULT_IMAGE=$(grep '^\s*Image\s*=' controllers/humiocluster_defaults.go | cut -d '"' -f 2) + PRE_UPDATE_IMAGES=$(grep -R 'Version\s* = ' controllers/suite | grep -v oldUnsupportedHumioVersion | grep -v 1.x.x | cut -d '"' -f 2 | sort -u) + + # Preload default image used by tests + $docker pull $DEFAULT_IMAGE + $kind load docker-image --name kind $DEFAULT_IMAGE & + + # Preload image used by e2e update tests + for image in $PRE_UPDATE_IMAGES + do + $docker pull $image + $kind load docker-image --name kind $image & + done + fi # Preload image we will run e2e tests from within $docker build --no-cache --pull -t testcontainer -f test.Dockerfile . diff --git a/hack/run-e2e-using-kind-dummy.sh b/hack/run-e2e-using-kind-dummy.sh new file mode 100755 index 000000000..d2d290356 --- /dev/null +++ b/hack/run-e2e-using-kind-dummy.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +set -euxo pipefail +PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.." +cd $PROJECT_ROOT + +source ./hack/functions.sh + +trap "cleanup_kind_cluster" EXIT + +declare -r ginkgo_nodes=${GINKGO_NODES:-12} +declare -r docker=$(which docker) +declare -r humio_e2e_license=${HUMIO_E2E_LICENSE} +declare -r e2e_run_ref=${GITHUB_REF:-outside-github-$(hostname)} +declare -r e2e_run_id=${GITHUB_RUN_ID:-none} +declare -r e2e_run_attempt=${GITHUB_RUN_ATTEMPT:-none} +declare -r humio_hostname=${E2E_LOGS_HUMIO_HOSTNAME:-none} +declare -r humio_ingest_token=${E2E_LOGS_HUMIO_INGEST_TOKEN:-none} +declare -r docker_username=${DOCKER_USERNAME:-none} +declare -r docker_password=${DOCKER_PASSWORD:-none} +declare -r dummy_logscale_image=${DUMMY_LOGSCALE_IMAGE:-true} + +if [ ! -x "${docker}" ] ; then + echo "'docker' is not installed. Install it and rerun the script." + exit 1 +fi +$docker login + +mkdir -p $bin_dir + +install_kind +install_kubectl +install_helm + +start_kind_cluster +preload_container_images +kubectl_create_dockerhub_secret + +helm_install_shippers +helm_install_cert_manager + +wait_for_pod -l app.kubernetes.io/name=cert-manager +wait_for_pod -l app.kubernetes.io/name=cainjector +wait_for_pod -l app.kubernetes.io/name=webhook + +$kubectl create -k config/crd/ +$kubectl run test-pod --env="HUMIO_E2E_LICENSE=$humio_e2e_license" --env="E2E_LOGS_HUMIO_HOSTNAME=$humio_hostname" --env="E2E_LOGS_HUMIO_INGEST_TOKEN=$humio_ingest_token" --env="E2E_RUN_ID=$e2e_run_id" --env="GINKGO_NODES=$ginkgo_nodes" --env="DOCKER_USERNAME=$docker_username" --env="DOCKER_PASSWORD=$docker_password" --restart=Never --image=testcontainer --image-pull-policy=Never -- sleep 86400 +while [[ $($kubectl get pods test-pod -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do echo "waiting for pod" ; $kubectl describe pod test-pod ; sleep 1 ; done +$kubectl exec test-pod -- hack/run-e2e-within-kind-test-pod-dummy.sh diff --git a/hack/run-e2e-using-kind.sh b/hack/run-e2e-using-kind.sh index 478f09d69..685afded5 100755 --- a/hack/run-e2e-using-kind.sh +++ b/hack/run-e2e-using-kind.sh @@ -18,6 +18,7 @@ declare -r humio_hostname=${E2E_LOGS_HUMIO_HOSTNAME:-none} declare -r humio_ingest_token=${E2E_LOGS_HUMIO_INGEST_TOKEN:-none} declare -r docker_username=${DOCKER_USERNAME:-none} declare -r docker_password=${DOCKER_PASSWORD:-none} +declare -r dummy_logscale_image=${DUMMY_LOGSCALE_IMAGE:-false} if [ ! -x "${docker}" ] ; then echo "'docker' is not installed. Install it and rerun the script." diff --git a/hack/run-e2e-within-kind-test-pod-dummy.sh b/hack/run-e2e-within-kind-test-pod-dummy.sh new file mode 100755 index 000000000..a547f8cee --- /dev/null +++ b/hack/run-e2e-within-kind-test-pod-dummy.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -x -o pipefail + +source hack/functions.sh + +# We skip the helpers package as those tests assumes the environment variable USE_CERT_MANAGER is not set. +USE_CERTMANAGER=true TEST_USE_EXISTING_CLUSTER=true DUMMY_LOGSCALE_IMAGE=true ginkgo --label-filter=dummy -timeout 120m -nodes=$GINKGO_NODES --no-color --skip-package helpers -race -v ./controllers/suite/... -covermode=count -coverprofile cover.out -progress | tee /proc/1/fd/1 diff --git a/hack/run-e2e-within-kind-test-pod.sh b/hack/run-e2e-within-kind-test-pod.sh index 5ccc01b1d..97666e06d 100755 --- a/hack/run-e2e-within-kind-test-pod.sh +++ b/hack/run-e2e-within-kind-test-pod.sh @@ -5,4 +5,4 @@ set -x -o pipefail source hack/functions.sh # We skip the helpers package as those tests assumes the environment variable USE_CERT_MANAGER is not set. -USE_CERTMANAGER=true TEST_USE_EXISTING_CLUSTER=true ginkgo -timeout 120m -nodes=$GINKGO_NODES --no-color --skip-package helpers -race -v ./controllers/suite/... -covermode=count -coverprofile cover.out -progress | tee /proc/1/fd/1 +USE_CERTMANAGER=true TEST_USE_EXISTING_CLUSTER=true ginkgo --label-filter=real -timeout 120m -nodes=$GINKGO_NODES --no-color --skip-package helpers -race -v ./controllers/suite/... -covermode=count -coverprofile cover.out -progress | tee /proc/1/fd/1 diff --git a/images/helper/Dockerfile b/images/helper/Dockerfile index 9bbc29904..4b5a05681 100644 --- a/images/helper/Dockerfile +++ b/images/helper/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22 as builder +FROM golang:1.22 AS builder ARG RELEASE_VERSION=master ARG RELEASE_COMMIT=none diff --git a/images/helper/main.go b/images/helper/main.go index 7f992a96d..f3c1ab6ad 100644 --- a/images/helper/main.go +++ b/images/helper/main.go @@ -354,69 +354,78 @@ func authMode() { kubernetesClient := newKubernetesClientset() - for { - // Check required files exist before we continue - if !fileExists(localAdminTokenFile) { - fmt.Printf("Waiting on the Humio container to create the files %s. Retrying in 5 seconds.\n", localAdminTokenFile) - time.Sleep(5 * time.Second) - continue - } + var apiToken, methodUsed string - // Get local admin token and create humio client with it - localAdminToken := getFileContent(localAdminTokenFile) - if localAdminToken == "" { - fmt.Printf("Local admin token file is empty. This might be due to Humio not being fully started up yet. Retrying in 5 seconds.\n") - time.Sleep(5 * time.Second) - continue - } + for { + if os.Getenv("DUMMY_LOGSCALE_IMAGE") != "true" { + var err error + + // Check required files exist before we continue + if !fileExists(localAdminTokenFile) { + fmt.Printf("Waiting on the Humio container to create the files %s. Retrying in 5 seconds.\n", localAdminTokenFile) + time.Sleep(5 * time.Second) + continue + } - nodeURL, err := url.Parse(humioNodeURL) - if err != nil { - fmt.Printf("Unable to parse URL %s: %s\n", humioNodeURL, err) - time.Sleep(5 * time.Second) - continue - } + // Get local admin token and create humio client with it + localAdminToken := getFileContent(localAdminTokenFile) + if localAdminToken == "" { + fmt.Printf("Local admin token file is empty. This might be due to Humio not being fully started up yet. Retrying in 5 seconds.\n") + time.Sleep(5 * time.Second) + continue + } - err = validateAdminSecretContent(ctx, kubernetesClient, namespace, clusterName, adminSecretNameSuffix, nodeURL) - if err == nil { - fmt.Printf("Existing token is still valid, thus no changes required. Will confirm again in 30 seconds.\n") - time.Sleep(30 * time.Second) - continue - } + nodeURL, err := url.Parse(humioNodeURL) + if err != nil { + fmt.Printf("Unable to parse URL %s: %s\n", humioNodeURL, err) + time.Sleep(5 * time.Second) + continue + } - fmt.Printf("Could not validate existing admin secret: %s\n", err) - fmt.Printf("Continuing to create/update token.\n") + err = validateAdminSecretContent(ctx, kubernetesClient, namespace, clusterName, adminSecretNameSuffix, nodeURL) + if err == nil { + fmt.Printf("Existing token is still valid, thus no changes required. Will confirm again in 30 seconds.\n") + time.Sleep(30 * time.Second) + continue + } - clientNotReady := humioClient == nil || - humioClient.Token() != localAdminToken || - humioClient.Address() == nil // Auth container uses pod name for the address, and pod names are immutable. - if clientNotReady { - fmt.Printf("Updating humioClient to use localAdminToken\n") - humioClient = humio.NewClient(humio.Config{ - Address: nodeURL, - UserAgent: fmt.Sprintf("humio-operator-helper/%s (%s on %s)", version, commit, date), - Token: localAdminToken, - }) - } + fmt.Printf("Could not validate existing admin secret: %s\n", err) + fmt.Printf("Continuing to create/update token.\n") + + clientNotReady := humioClient == nil || + humioClient.Token() != localAdminToken || + humioClient.Address() == nil // Auth container uses pod name for the address, and pod names are immutable. + if clientNotReady { + fmt.Printf("Updating humioClient to use localAdminToken\n") + humioClient = humio.NewClient(humio.Config{ + Address: nodeURL, + UserAgent: fmt.Sprintf("humio-operator-helper/%s (%s on %s)", version, commit, date), + Token: localAdminToken, + }) + } - // Get user ID of admin account - userID, err := createAndGetAdminAccountUserID(humioClient, organizationMode) - if err != nil { - fmt.Printf("Got err trying to obtain user ID of admin user: %s\n", err) - time.Sleep(5 * time.Second) - continue - } + // Get user ID of admin account + userID, err := createAndGetAdminAccountUserID(humioClient, organizationMode) + if err != nil { + fmt.Printf("Got err trying to obtain user ID of admin user: %s\n", err) + time.Sleep(5 * time.Second) + continue + } - // Get API token for user ID of admin account - apiToken, methodUsed, err := getApiTokenForUserID(humioClient, userID) - if err != nil { - fmt.Printf("Got err trying to obtain api token of admin user: %s\n", err) - time.Sleep(5 * time.Second) - continue + // Get API token for user ID of admin account + apiToken, methodUsed, err = getApiTokenForUserID(humioClient, userID) + if err != nil { + fmt.Printf("Got err trying to obtain api token of admin user: %s\n", err) + time.Sleep(5 * time.Second) + continue + } + } else { + apiToken = "DUMMY" + methodUsed = apiTokenMethodFromAPI } // Update Kubernetes secret if needed - err = ensureAdminSecretContent(ctx, kubernetesClient, namespace, clusterName, adminSecretNameSuffix, apiToken, methodUsed) + err := ensureAdminSecretContent(ctx, kubernetesClient, namespace, clusterName, adminSecretNameSuffix, apiToken, methodUsed) if err != nil { fmt.Printf("Got error ensuring k8s secret contains apiToken: %s\n", err) time.Sleep(5 * time.Second) diff --git a/images/logscale-dummy/Dockerfile b/images/logscale-dummy/Dockerfile new file mode 100644 index 000000000..78d2f8eb6 --- /dev/null +++ b/images/logscale-dummy/Dockerfile @@ -0,0 +1,7 @@ +FROM golang:1.22 AS builder + +WORKDIR /app/humio +COPY . /app/humio +RUN go run "$(go env GOROOT)/src/crypto/tls/generate_cert.go" -host dummy +RUN chmod a+r key.pem +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /app/humio/dummy /app/humio/*.go \ No newline at end of file diff --git a/images/logscale-dummy/main.go b/images/logscale-dummy/main.go new file mode 100644 index 000000000..eb8fa2943 --- /dev/null +++ b/images/logscale-dummy/main.go @@ -0,0 +1,57 @@ +package main + +import ( + "fmt" + "net/http" + "os" +) + +func main() { + http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "\n") + }) + + humioPort := os.Getenv("HUMIO_PORT") + esPort := os.Getenv("ELASTIC_PORT") + _, tlsEnabled := os.LookupEnv("TLS_KEYSTORE_LOCATION") + + if humioPort != "" { + humioPort = "8080" + } + + if tlsEnabled { + fmt.Println("HTTPS") + runHTTPS(humioPort, esPort) + } else { + fmt.Println("HTTP") + runHTTP(humioPort, esPort) + } +} + +func runHTTPS(humioPort, esPort string) { + if esPort != "" { + go http.ListenAndServeTLS(fmt.Sprintf(":%s", esPort), "cert.pem", "key.pem", nil) + } + err := http.ListenAndServeTLS(fmt.Sprintf(":%s", humioPort), "cert.pem", "key.pem", nil) + if err != nil { + fmt.Printf("got err=%v", err) + } +} + +func runHTTP(humioPort, esPort string) { + if esPort != "" { + go http.ListenAndServe(fmt.Sprintf(":%s", esPort), nil) + + } + err := http.ListenAndServe(fmt.Sprintf(":%s", humioPort), nil) + if err != nil { + fmt.Printf("got err=%v", err) + } +} + +/* + TODO: Consider loading in the "real" certificate from the keystore instead of baking in a cert.pem and key.pem during build. + + TODO: Consider adding functionality that writes a file so "wait for global file in test cases" will pass. + "ls /mnt/global*.json", +*/ diff --git a/images/logscale-dummy/run.sh b/images/logscale-dummy/run.sh new file mode 100644 index 000000000..14ec27a0b --- /dev/null +++ b/images/logscale-dummy/run.sh @@ -0,0 +1 @@ +exec /app/humio/dummy \ No newline at end of file diff --git a/pkg/humio/client_mock.go b/pkg/humio/client_mock.go index 68f413e4c..4b0fdef9c 100644 --- a/pkg/humio/client_mock.go +++ b/pkg/humio/client_mock.go @@ -94,7 +94,7 @@ func (h *MockClientConfig) Status(config *humioapi.Config, req reconcile.Request } func (h *MockClientConfig) GetClusters(config *humioapi.Config, req reconcile.Request) (humioapi.Cluster, error) { - return humioapi.Cluster{}, fmt.Errorf("not implemented") + return humioapi.Cluster{}, nil } func (h *MockClientConfig) GetBaseURL(config *humioapi.Config, req reconcile.Request, hc *humiov1alpha1.HumioCluster) *url.URL {