Skip to content

Commit

Permalink
Bump minimum supported version to LogScale 1.118.0 (#792)
Browse files Browse the repository at this point in the history
  • Loading branch information
SaaldjorMike authored May 6, 2024
1 parent 46aaaf6 commit 6880268
Show file tree
Hide file tree
Showing 21 changed files with 160 additions and 580 deletions.
22 changes: 11 additions & 11 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,17 @@ jobs:
- uses: actions/setup-go@v4
with:
go-version: '1.22.2'
- name: Run Gosec Security Scanner
run: |
export PATH=$PATH:$(go env GOPATH)/bin
go get github.com/securego/gosec/cmd/gosec
go install github.com/securego/gosec/cmd/gosec
gosec ./...
- name: Run Staticcheck
uses: dominikh/[email protected]
with:
version: "2023.1.7"
install-go: false
- name: operator image
run: make docker-build-operator IMG=humio/humio-operator:${{ github.sha }}
- name: helper image
Expand Down Expand Up @@ -67,14 +78,3 @@ jobs:
container_tag: ${{ github.sha }}
env:
FALCON_CLIENT_SECRET: "${{ secrets.FALCON_CLIENT_SECRET }}"
- name: Run Gosec Security Scanner
run: |
export PATH=$PATH:$(go env GOPATH)/bin
go get github.com/securego/gosec/cmd/gosec
go install github.com/securego/gosec/cmd/gosec
gosec ./...
- name: Run Staticcheck
uses: dominikh/[email protected]
with:
version: "2023.1.7"
install-go: false
2 changes: 2 additions & 0 deletions api/v1alpha1/humiocluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,12 @@ const (
type HumioClusterSpec struct {
// AutoRebalancePartitions will enable auto-rebalancing of both digest and storage partitions assigned to humio cluster nodes.
// If all Kubernetes worker nodes are located in the same availability zone, you must set DisableInitContainer to true to use auto rebalancing of partitions.
// Deprecated: No longer needed as of 1.89.0 as partitions and segment distribution is now automatically managed by LogScale itself.
AutoRebalancePartitions bool `json:"autoRebalancePartitions,omitempty"`
// TargetReplicationFactor is the desired number of replicas of both storage and ingest partitions
TargetReplicationFactor int `json:"targetReplicationFactor,omitempty"`
// StoragePartitionsCount is the desired number of storage partitions
// Deprecated: No longer needed as LogScale now automatically redistributes segments
StoragePartitionsCount int `json:"storagePartitionsCount,omitempty"`
// DigestPartitionsCount is the desired number of digest partitions
DigestPartitionsCount int `json:"digestPartitionsCount,omitempty"`
Expand Down
6 changes: 4 additions & 2 deletions charts/humio-operator/crds/core.humio.com_humioclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -830,6 +830,7 @@ spec:
description: |-
AutoRebalancePartitions will enable auto-rebalancing of both digest and storage partitions assigned to humio cluster nodes.
If all Kubernetes worker nodes are located in the same availability zone, you must set DisableInitContainer to true to use auto rebalancing of partitions.
Deprecated: No longer needed as of 1.89.0 as partitions and segment distribution is now automatically managed by LogScale itself.
type: boolean
commonEnvironmentVariables:
description: |-
Expand Down Expand Up @@ -14102,8 +14103,9 @@ spec:
type: object
type: array
storagePartitionsCount:
description: StoragePartitionsCount is the desired number of storage
partitions
description: |-
StoragePartitionsCount is the desired number of storage partitions
Deprecated: No longer needed as LogScale now automatically redistributes segments
type: integer
targetReplicationFactor:
description: TargetReplicationFactor is the desired number of replicas
Expand Down
6 changes: 4 additions & 2 deletions config/crd/bases/core.humio.com_humioclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -830,6 +830,7 @@ spec:
description: |-
AutoRebalancePartitions will enable auto-rebalancing of both digest and storage partitions assigned to humio cluster nodes.
If all Kubernetes worker nodes are located in the same availability zone, you must set DisableInitContainer to true to use auto rebalancing of partitions.
Deprecated: No longer needed as of 1.89.0 as partitions and segment distribution is now automatically managed by LogScale itself.
type: boolean
commonEnvironmentVariables:
description: |-
Expand Down Expand Up @@ -14102,8 +14103,9 @@ spec:
type: object
type: array
storagePartitionsCount:
description: StoragePartitionsCount is the desired number of storage
partitions
description: |-
StoragePartitionsCount is the desired number of storage partitions
Deprecated: No longer needed as LogScale now automatically redistributes segments
type: integer
targetReplicationFactor:
description: TargetReplicationFactor is the desired number of replicas
Expand Down
60 changes: 6 additions & 54 deletions controllers/humiocluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -343,11 +343,6 @@ func (r *HumioClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
}
}

if err = r.ensurePartitionsAreBalanced(hc, cluster.Config(), req); err != nil {
return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions().
withMessage(err.Error()))
}

for _, fun := range []ctxHumioClusterFunc{
r.cleanupUnusedTLSCertificates,
r.cleanupUnusedTLSSecrets,
Expand Down Expand Up @@ -1429,19 +1424,19 @@ func (r *HumioClusterReconciler) ensureOrphanedPvcsAreDeleted(ctx context.Contex
if err != nil {
return r.logErrorAndReturn(err, "failed to list pvcs")
}
for _, pvc := range pvcList {
pvcOrphaned, err := r.isPvcOrphaned(ctx, hnp, hc, pvc)
for idx := range pvcList {
pvcOrphaned, err := r.isPvcOrphaned(ctx, hnp, hc, pvcList[idx])
if err != nil {
return r.logErrorAndReturn(err, "could not check if pvc is orphaned")
}
if pvcOrphaned {
if pvc.DeletionTimestamp == nil {
if pvcList[idx].DeletionTimestamp == nil {
r.Log.Info(fmt.Sprintf("node cannot be found for pvc. deleting pvc %s as "+
"dataVolumePersistentVolumeClaimPolicy is set to %s", pvc.Name,
"dataVolumePersistentVolumeClaimPolicy is set to %s", pvcList[idx].Name,
humiov1alpha1.HumioPersistentVolumeReclaimTypeOnNodeDelete))
err = r.Client.Delete(ctx, &pvc)
err = r.Client.Delete(ctx, &pvcList[idx])
if err != nil {
return r.logErrorAndReturn(err, fmt.Sprintf("cloud not delete pvc %s", pvc.Name))
return r.logErrorAndReturn(err, fmt.Sprintf("cloud not delete pvc %s", pvcList[idx].Name))
}
}
}
Expand Down Expand Up @@ -1555,49 +1550,6 @@ func (r *HumioClusterReconciler) ensureLicense(ctx context.Context, hc *humiov1a
return reconcile.Result{}, nil
}

func (r *HumioClusterReconciler) ensurePartitionsAreBalanced(hc *humiov1alpha1.HumioCluster, config *humioapi.Config, req reconcile.Request) error {
humioVersion, _ := HumioVersionFromString(NewHumioNodeManagerFromHumioCluster(hc).GetImage())
if ok, _ := humioVersion.AtLeast(HumioVersionWithAutomaticPartitionManagement); ok {
return nil
}

if !hc.Spec.AutoRebalancePartitions {
r.Log.Info("partition auto-rebalancing not enabled, skipping")
return nil
}

currentClusterInfo, err := r.HumioClient.GetClusters(config, req)
if err != nil {
return r.logErrorAndReturn(err, "could not get cluster info")
}

suggestedStorageLayout, err := r.HumioClient.SuggestedStoragePartitions(config, req)
if err != nil {
return r.logErrorAndReturn(err, "could not get suggested storage layout")
}
currentStorageLayoutInput := helpers.MapStoragePartition(currentClusterInfo.StoragePartitions, helpers.ToStoragePartitionInput)
if !reflect.DeepEqual(currentStorageLayoutInput, suggestedStorageLayout) {
r.Log.Info(fmt.Sprintf("triggering update of storage partitions to use suggested layout, current: %#+v, suggested: %#+v", currentClusterInfo.StoragePartitions, suggestedStorageLayout))
if err = r.HumioClient.UpdateStoragePartitionScheme(config, req, suggestedStorageLayout); err != nil {
return r.logErrorAndReturn(err, "could not update storage partition scheme")
}
}

suggestedIngestLayout, err := r.HumioClient.SuggestedIngestPartitions(config, req)
if err != nil {
return r.logErrorAndReturn(err, "could not get suggested ingest layout")
}
currentIngestLayoutInput := helpers.MapIngestPartition(currentClusterInfo.IngestPartitions, helpers.ToIngestPartitionInput)
if !reflect.DeepEqual(currentIngestLayoutInput, suggestedIngestLayout) {
r.Log.Info(fmt.Sprintf("triggering update of ingest partitions to use suggested layout, current: %#+v, suggested: %#+v", currentClusterInfo.IngestPartitions, suggestedIngestLayout))
if err = r.HumioClient.UpdateIngestPartitionScheme(config, req, suggestedIngestLayout); err != nil {
return r.logErrorAndReturn(err, "could not update ingest partition scheme")
}
}

return nil
}

func (r *HumioClusterReconciler) ensureService(ctx context.Context, hc *humiov1alpha1.HumioCluster, hnp *HumioNodePool) error {
r.Log.Info("ensuring service")
existingService, err := kubernetes.GetService(ctx, r, hnp.GetNodePoolName(), hnp.GetNamespace())
Expand Down
Loading

0 comments on commit 6880268

Please sign in to comment.