Skip to content

Commit

Permalink
Merge pull request #221 from gianlucam76/main
Browse files Browse the repository at this point in the history
Merge dev to main
  • Loading branch information
gianlucam76 authored Aug 18, 2024
2 parents c8a21f1 + 744b8f7 commit d81667c
Show file tree
Hide file tree
Showing 11 changed files with 456 additions and 215 deletions.
18 changes: 9 additions & 9 deletions .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # tag=v4.1.5
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # tag=v4.1.7
- name: Set up Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # tag=v5.0.1
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # tag=v5.0.2
with:
go-version: 1.22.0
go-version: 1.22.5
- name: Build
run: make build
- name: FMT
Expand All @@ -33,11 +33,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # tag=v4.1.5
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # tag=v4.1.7
- name: Set up Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # tag=v5.0.1
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # tag=v5.0.2
with:
go-version: 1.22.0
go-version: 1.22.5
- name: ut
run: make test
env:
Expand All @@ -46,11 +46,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # tag=v4.1.5
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # tag=v4.1.7
- name: Set up Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # tag=v5.0.1
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # tag=v5.0.2
with:
go-version: 1.22.0
go-version: 1.22.5
- name: fv
run: make create-cluster fv
env:
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Build the manager binary
FROM golang:1.22 AS builder
FROM golang:1.22.5 AS builder

ARG BUILDOS
ARG TARGETARCH
Expand Down
10 changes: 5 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
# KUBEBUILDER_ENVTEST_KUBERNETES_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
KUBEBUILDER_ENVTEST_KUBERNETES_VERSION = 1.30.0
KUBEBUILDER_ENVTEST_KUBERNETES_VERSION = 1.31.0

# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
Expand Down Expand Up @@ -68,8 +68,8 @@ CLUSTERCTL := $(TOOLS_BIN_DIR)/clusterctl
KIND := $(TOOLS_BIN_DIR)/kind
KUBECTL := $(TOOLS_BIN_DIR)/kubectl

GOLANGCI_LINT_VERSION := "v1.57.2"
CLUSTERCTL_VERSION := "v1.7.4"
GOLANGCI_LINT_VERSION := "v1.59.0"
CLUSTERCTL_VERSION := "v1.8.1"

KUSTOMIZE_VER := v5.3.0
KUSTOMIZE_BIN := kustomize
Expand All @@ -78,7 +78,7 @@ KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v5
$(KUSTOMIZE): # Build kustomize from tools folder.
CGO_ENABLED=0 GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(KUSTOMIZE_PKG) $(KUSTOMIZE_BIN) $(KUSTOMIZE_VER)

SETUP_ENVTEST_VER := v0.0.0-20240215143116-d0396a3d6f9f
SETUP_ENVTEST_VER := v0.0.0-20240522175850-2e9781e9fc60
SETUP_ENVTEST_BIN := setup-envtest
SETUP_ENVTEST := $(abspath $(TOOLS_BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER))
SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest
Expand Down Expand Up @@ -166,7 +166,7 @@ endif
# K8S_VERSION for the Kind cluster can be set as environment variable. If not defined,
# this default value is used
ifndef K8S_VERSION
K8S_VERSION := v1.30.0
K8S_VERSION := v1.31.0
endif

CONTROL_CLUSTER_NAME ?= sveltos-management
Expand Down
4 changes: 4 additions & 0 deletions controllers/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,7 @@ package controllers
var (
ShouldRenewTokenRequest = (*SveltosClusterReconciler).shouldRenewTokenRequest
)

var (
HandleAutomaticPauseUnPause = handleAutomaticPauseUnPause
)
103 changes: 103 additions & 0 deletions controllers/sveltoscluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"github.com/Masterminds/semver"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"github.com/robfig/cron/v3"
authenticationv1 "k8s.io/api/authentication/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -140,11 +141,14 @@ func (r *SveltosClusterReconciler) reconcileNormal(
logger := sveltosClusterScope.Logger
logger.V(logs.LogInfo).Info("Reconciling SveltosCluster")

defer handleAutomaticPauseUnPause(sveltosClusterScope.SveltosCluster, time.Now(), logger)

s := runtime.NewScheme()
if err := clientgoscheme.AddToScheme(s); err != nil {
errorMessage := err.Error()
logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get scheme: %v", err))
sveltosClusterScope.SveltosCluster.Status.FailureMessage = &errorMessage
updateConnectionStatus(sveltosClusterScope, logger)
return
}

Expand All @@ -155,6 +159,7 @@ func (r *SveltosClusterReconciler) reconcileNormal(
errorMessage := err.Error()
logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get client: %v", err))
sveltosClusterScope.SveltosCluster.Status.FailureMessage = &errorMessage
updateConnectionStatus(sveltosClusterScope, logger)
return
}

Expand All @@ -165,6 +170,7 @@ func (r *SveltosClusterReconciler) reconcileNormal(
errorMessage := err.Error()
logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get client: %v", err))
sveltosClusterScope.SveltosCluster.Status.FailureMessage = &errorMessage
updateConnectionStatus(sveltosClusterScope, logger)
return
}

Expand Down Expand Up @@ -203,6 +209,22 @@ func (r *SveltosClusterReconciler) reconcileNormal(
}
}
}

updateConnectionStatus(sveltosClusterScope, logger)
}

func updateConnectionStatus(sveltosClusterScope *scope.SveltosClusterScope, logger logr.Logger) {
if sveltosClusterScope.SveltosCluster.Status.FailureMessage != nil {
logger.V(logs.LogDebug).Info("increasing connectionFailures")
sveltosClusterScope.SveltosCluster.Status.ConnectionFailures++
if sveltosClusterScope.SveltosCluster.Status.ConnectionFailures >= sveltosClusterScope.SveltosCluster.Spec.ConsecutiveFailureThreshold {
logger.V(logs.LogDebug).Info("connectionFailures is higher than consecutiveFailureThreshold. Set connectionStatus to down")
sveltosClusterScope.SveltosCluster.Status.ConnectionStatus = libsveltosv1beta1.ConnectionDown
}
} else {
sveltosClusterScope.SveltosCluster.Status.ConnectionStatus = libsveltosv1beta1.ConnectionHealthy
sveltosClusterScope.SveltosCluster.Status.ConnectionFailures = 0
}
}

// SetupWithManager sets up the controller with the Manager.
Expand Down Expand Up @@ -388,3 +410,84 @@ current-context: sveltos-context`

return data
}

func handleAutomaticPauseUnPause(sveltosCluster *libsveltosv1beta1.SveltosCluster,
currentTime time.Time, logger logr.Logger) {

if sveltosCluster.Spec.ActiveWindow == nil {
return
}

if sveltosCluster.Status.NextPause != nil && sveltosCluster.Status.NextUnpause != nil {
if currentTime.After(sveltosCluster.Status.NextUnpause.Time) &&
currentTime.Before(sveltosCluster.Status.NextPause.Time) {

sveltosCluster.Spec.Paused = false
return
} else if currentTime.Before(sveltosCluster.Status.NextUnpause.Time) {
sveltosCluster.Spec.Paused = true
return
} else if currentTime.After(sveltosCluster.Status.NextPause.Time) {
sveltosCluster.Spec.Paused = true
} else if currentTime.Before(sveltosCluster.Status.NextPause.Time) {
// Updates NextFrom and NextTo only once current time is past NextTo
return
}
} else {
sveltosCluster.Spec.Paused = true
}

if sveltosCluster.Status.NextUnpause == nil || currentTime.After(sveltosCluster.Status.NextUnpause.Time) {
lastRunTime := sveltosCluster.CreationTimestamp
if sveltosCluster.Status.NextUnpause != nil {
lastRunTime = *sveltosCluster.Status.NextUnpause
}

nextFromTime, err := getNextScheduleTime(sveltosCluster.Spec.ActiveWindow.From, &lastRunTime, currentTime)
if err != nil {
logger.V(logs.LogInfo).Error(err, "failed to get next from time")
return
}
sveltosCluster.Status.NextUnpause = &metav1.Time{Time: *nextFromTime}
}

if sveltosCluster.Status.NextPause == nil || currentTime.After(sveltosCluster.Status.NextPause.Time) {
lastRunTime := sveltosCluster.CreationTimestamp
if sveltosCluster.Status.NextPause != nil {
lastRunTime = *sveltosCluster.Status.NextPause
}

nextToTime, err := getNextScheduleTime(sveltosCluster.Spec.ActiveWindow.To, &lastRunTime, currentTime)
if err != nil {
logger.V(logs.LogInfo).Error(err, "failed to get next to time")
}

sveltosCluster.Status.NextPause = &metav1.Time{Time: *nextToTime}
}
}

// getNextScheduleTime gets the time of next schedule after last scheduled and before now
func getNextScheduleTime(schedule string, lastRunTime *metav1.Time, now time.Time) (*time.Time, error) {
sched, err := cron.ParseStandard(schedule)
if err != nil {
return nil, fmt.Errorf("unparseable schedule %q: %w", schedule, err)
}

if lastRunTime == nil {
return nil, fmt.Errorf("last run time must be specified")
}

starts := 0
for t := sched.Next(lastRunTime.Time); t.Before(now); t = sched.Next(t) {
const maxNumberOfFailures = 100
starts++
if starts > maxNumberOfFailures {
return nil,
fmt.Errorf("too many missed start times (> %d). Set or check clock skew",
maxNumberOfFailures)
}
}

next := sched.Next(now)
return &next, nil
}
131 changes: 131 additions & 0 deletions controllers/sveltoscluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,87 @@ var _ = Describe("SveltosCluster: Reconciler", func() {
return err == nil &&
currentSveltosCluster.Status.Ready
}, timeout, pollingInterval).Should(BeTrue())

currentSveltosCluster := &libsveltosv1beta1.SveltosCluster{}
err = testEnv.Get(context.TODO(), sveltosClusterName, currentSveltosCluster)
Expect(err).To(BeNil())
Expect(currentSveltosCluster.Status.ConnectionFailures).To(Equal(0))
Expect(currentSveltosCluster.Status.ConnectionStatus).To(Equal(libsveltosv1beta1.ConnectionHealthy))
})

It("reconcile set connection down after enough consecutive failed connection", func() {
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: sveltosCluster.Namespace,
},
}

Expect(testEnv.Create(context.TODO(), ns)).To(Succeed())
Expect(waitForObject(context.TODO(), testEnv.Client, ns)).To(Succeed())

// Create Secret containing Kubeconfig to access SveltosCluster

sveltosSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: sveltosCluster.Namespace,
Name: sveltosCluster.Name + "-sveltos-kubeconfig",
},
Data: map[string][]byte{
"data": []byte("not a valid kubeconfig"),
},
}

Expect(testEnv.Create(context.TODO(), &sveltosSecret)).To(Succeed())
Expect(waitForObject(context.TODO(), testEnv.Client, &sveltosSecret)).To(Succeed())

Expect(testEnv.Create(context.TODO(), sveltosCluster)).To(Succeed())
Expect(waitForObject(context.TODO(), testEnv.Client, sveltosCluster)).To(Succeed())

reconciler := getClusterProfileReconciler(testEnv.Client)

sveltosClusterName := client.ObjectKey{
Name: sveltosCluster.Name,
Namespace: sveltosCluster.Namespace,
}
_, err := reconciler.Reconcile(context.TODO(), ctrl.Request{
NamespacedName: sveltosClusterName,
})
Expect(err).ToNot(HaveOccurred())

Eventually(func() bool {
currentSveltosCluster := &libsveltosv1beta1.SveltosCluster{}
err := testEnv.Get(context.TODO(), sveltosClusterName, currentSveltosCluster)
return err == nil &&
!currentSveltosCluster.Status.Ready &&
currentSveltosCluster.Status.ConnectionFailures == 1
}, timeout, pollingInterval).Should(BeTrue())

_, err = reconciler.Reconcile(context.TODO(), ctrl.Request{
NamespacedName: sveltosClusterName,
})
Expect(err).ToNot(HaveOccurred())

Eventually(func() bool {
currentSveltosCluster := &libsveltosv1beta1.SveltosCluster{}
err := testEnv.Get(context.TODO(), sveltosClusterName, currentSveltosCluster)
return err == nil &&
!currentSveltosCluster.Status.Ready &&
currentSveltosCluster.Status.ConnectionFailures == 2
}, timeout, pollingInterval).Should(BeTrue())

_, err = reconciler.Reconcile(context.TODO(), ctrl.Request{
NamespacedName: sveltosClusterName,
})
Expect(err).ToNot(HaveOccurred())

Eventually(func() bool {
currentSveltosCluster := &libsveltosv1beta1.SveltosCluster{}
err := testEnv.Get(context.TODO(), sveltosClusterName, currentSveltosCluster)
return err == nil &&
!currentSveltosCluster.Status.Ready &&
currentSveltosCluster.Status.ConnectionFailures == 3 &&
currentSveltosCluster.Status.ConnectionStatus == libsveltosv1beta1.ConnectionDown
}, timeout, pollingInterval).Should(BeTrue())
})

It("shouldRenewTokenRequest returns true when enough time has passed since last TokenRequest renewal", func() {
Expand Down Expand Up @@ -141,6 +222,56 @@ var _ = Describe("SveltosCluster: Reconciler", func() {
Expect(controllers.ShouldRenewTokenRequest(reconciler, sveltosClusterScope, logger)).To(BeFalse())
})

It("handleAutomaticPauseUnPause updates Spec.Paused based on Spec.Schedule", func() {
sveltosCluster.Spec.ActiveWindow = &libsveltosv1beta1.ActiveWindow{
From: "0 20 * * 5", // every friday 8PM
To: "0 7 * * 1", // every monday 7AM
}
loc, err := time.LoadLocation("Europe/Rome") // Replace with your desired time zone
Expect(err).To(BeNil())
tuesday8AM := time.Date(2024, time.July, 30, 8, 0, 0, 0, loc)
sveltosCluster.CreationTimestamp = metav1.Time{Time: tuesday8AM}

controllers.HandleAutomaticPauseUnPause(sveltosCluster, tuesday8AM.Add(time.Hour), logger)

// Next unpause coming friday at 8PM
Expect(sveltosCluster.Status.NextUnpause).ToNot(BeNil())
expectedUnpause := time.Date(2024, time.August, 2, 20, 0, 0, 0, loc)
Expect(sveltosCluster.Status.NextUnpause.Time).To(Equal(expectedUnpause))

// Next pause coming monday at 7AM
Expect(sveltosCluster.Status.NextPause).ToNot(BeNil())
expectedPause := time.Date(2024, time.August, 5, 7, 0, 0, 0, loc)
Expect(sveltosCluster.Status.NextPause.Time).To(Equal(expectedPause))

Expect(sveltosCluster.Spec.Paused).To(BeTrue())

// when time is before unpause, Paused will remain set to false and NextPause
// NextUnpause will not be updated
thursday8AM := time.Date(2024, time.August, 1, 8, 0, 0, 0, loc)
controllers.HandleAutomaticPauseUnPause(sveltosCluster, thursday8AM, logger)
Expect(sveltosCluster.Spec.Paused).To(BeTrue())
Expect(sveltosCluster.Status.NextPause.Time).To(Equal(expectedPause))
Expect(sveltosCluster.Status.NextUnpause.Time).To(Equal(expectedUnpause))

// when time is past unpause but before pause, Paused will be set to false and NextPause
// NextUnpause will not be updated
saturday8AM := time.Date(2024, time.August, 3, 8, 0, 0, 0, loc)
controllers.HandleAutomaticPauseUnPause(sveltosCluster, saturday8AM, logger)
Expect(sveltosCluster.Spec.Paused).To(BeFalse())
Expect(sveltosCluster.Status.NextPause.Time).To(Equal(expectedPause))
Expect(sveltosCluster.Status.NextUnpause.Time).To(Equal(expectedUnpause))

// when time is past next pause, Paused will be set to true and NextPause
// NextUnpause updated
monday8AM := time.Date(2024, time.August, 5, 8, 0, 0, 0, loc)
controllers.HandleAutomaticPauseUnPause(sveltosCluster, monday8AM, logger)
Expect(sveltosCluster.Spec.Paused).To(BeTrue())
expectedUnpause = time.Date(2024, time.August, 9, 20, 0, 0, 0, loc)
expectedPause = time.Date(2024, time.August, 12, 7, 0, 0, 0, loc)
Expect(sveltosCluster.Status.NextPause.Time).To(Equal(expectedPause))
Expect(sveltosCluster.Status.NextUnpause.Time).To(Equal(expectedUnpause))
})
})

func getSveltosClusterInstance(namespace, name string) *libsveltosv1beta1.SveltosCluster {
Expand Down
Loading

0 comments on commit d81667c

Please sign in to comment.