Skip to content

Commit

Permalink
fixups
Browse files Browse the repository at this point in the history
  • Loading branch information
sbueringer committed Oct 1, 2024
1 parent 508d9ea commit 85a6f24
Show file tree
Hide file tree
Showing 11 changed files with 76 additions and 20 deletions.
2 changes: 1 addition & 1 deletion controllers/clustercache/cluster_accessor.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ type clusterAccessorConfig struct {
// with the following name format: "<cluster-name>-kubeconfig".
// Ideally this is a client that caches only kubeconfig secrets, it is highly recommended to avoid caching all secrets.
// An example on how to create an ideal secret caching client can be found in the core Cluster API controller main.go file.
SecretClient client.Client
SecretClient client.Reader

// ControllerPodMetadata is the Pod metadata of the controller using this ClusterCache.
// This is only set when the POD_NAMESPACE, POD_NAME and POD_UID environment variables are set.
Expand Down
4 changes: 2 additions & 2 deletions controllers/clustercache/cluster_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ type Options struct {
// with the following name format: "<cluster-name>-kubeconfig".
// Ideally this is a client that caches only kubeconfig secrets, it is highly recommended to avoid caching all secrets.
// An example on how to create an ideal secret caching client can be found in the core Cluster API controller main.go file.
SecretClient client.Client
SecretClient client.Reader

// WatchFilterValue is the label value used to filter events prior to reconciliation.
WatchFilterValue string
Expand Down Expand Up @@ -276,7 +276,7 @@ func SetupWithManager(ctx context.Context, mgr manager.Manager, options Options,
}

type clusterCache struct {
client client.Client
client client.Reader

// clusterAccessorConfig is the config for clusterAccessors.
clusterAccessorConfig *clusterAccessorConfig
Expand Down
5 changes: 3 additions & 2 deletions internal/controllers/cluster/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,9 @@ func TestMain(m *testing.M) {
}

if err := (&Reconciler{
Client: mgr.GetClient(),
APIReader: mgr.GetClient(),
Client: mgr.GetClient(),
APIReader: mgr.GetClient(),
ClusterCache: clusterCache,
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil {
panic(fmt.Sprintf("Failed to start ClusterReconciler: %v", err))
}
Expand Down
34 changes: 23 additions & 11 deletions internal/controllers/machine/machine_controller_noderef_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,11 @@ func TestGetNode(t *testing.T) {
}

g.Expect(env.Create(ctx, testCluster)).To(Succeed())
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
patch := client.MergeFrom(testCluster.DeepCopy())
testCluster.Status.InfrastructureReady = true
g.Expect(env.Status().Patch(ctx, testCluster, patch)).To(Succeed())

g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed())
defer func(do ...client.Object) {
g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
Expand Down Expand Up @@ -137,7 +142,7 @@ func TestGetNode(t *testing.T) {
Client: clustercache.ClientOptions{
UserAgent: remote.DefaultClusterAPIUserAgent("test-controller-manager"),
},
}, controller.Options{MaxConcurrentReconciles: 10})
}, controller.Options{MaxConcurrentReconciles: 10, SkipNameValidation: ptr.To(true)})
if err != nil {
panic(fmt.Sprintf("Failed to create new cluster cache tracker: %v", err))
}
Expand All @@ -150,15 +155,18 @@ func TestGetNode(t *testing.T) {
w, err := ctrl.NewControllerManagedBy(env.Manager).For(&corev1.Node{}).Build(r)
g.Expect(err).ToNot(HaveOccurred())

g.Expect(clusterCache.Watch(ctx, clustercache.WatchInput{
Name: "TestGetNode",
Cluster: util.ObjectKey(testCluster),
Watcher: w,
Kind: &corev1.Node{},
EventHandler: handler.EnqueueRequestsFromMapFunc(func(context.Context, client.Object) []reconcile.Request {
return nil
}),
})).To(Succeed())
// Retry because the ClusterCache might not have immediately created the clusterAccessor.
g.Eventually(func(g Gomega) {
g.Expect(clusterCache.Watch(ctx, clustercache.WatchInput{
Name: "TestGetNode",
Cluster: util.ObjectKey(testCluster),
Watcher: w,
Kind: &corev1.Node{},
EventHandler: handler.EnqueueRequestsFromMapFunc(func(context.Context, client.Object) []reconcile.Request {
return nil
}),
})).To(Succeed())
}, 1*time.Minute, 5*time.Second).Should(Succeed())

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
Expand Down Expand Up @@ -339,7 +347,11 @@ func TestNodeLabelSync(t *testing.T) {

g.Expect(env.Create(ctx, cluster)).To(Succeed())
defaultKubeconfigSecret := kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster))
g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed())
g.Expect(env.CreateAndWait(ctx, defaultKubeconfigSecret)).To(Succeed())
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
patch := client.MergeFrom(cluster.DeepCopy())
cluster.Status.InfrastructureReady = true
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())

g.Expect(env.Create(ctx, infraMachine)).To(Succeed())
// Set InfrastructureMachine .status.interruptible and .status.ready to true.
Expand Down
12 changes: 12 additions & 0 deletions internal/controllers/machine/machine_controller_phases_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,10 @@ func TestReconcileMachinePhases(t *testing.T) {
g.Expect(env.Create(ctx, cluster)).To(Succeed())
defaultKubeconfigSecret = kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster))
g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed())
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
patch := client.MergeFrom(cluster.DeepCopy())
cluster.Status.InfrastructureReady = true
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())

g.Expect(env.Create(ctx, bootstrapConfig)).To(Succeed())
g.Expect(env.Create(ctx, infraMachine)).To(Succeed())
Expand Down Expand Up @@ -363,6 +367,10 @@ func TestReconcileMachinePhases(t *testing.T) {
g.Expect(env.Create(ctx, cluster)).To(Succeed())
defaultKubeconfigSecret = kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster))
g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed())
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
patch := client.MergeFrom(cluster.DeepCopy())
cluster.Status.InfrastructureReady = true
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())

g.Expect(env.Create(ctx, bootstrapConfig)).To(Succeed())
g.Expect(env.Create(ctx, infraMachine)).To(Succeed())
Expand Down Expand Up @@ -437,6 +445,10 @@ func TestReconcileMachinePhases(t *testing.T) {
g.Expect(env.Create(ctx, cluster)).To(Succeed())
defaultKubeconfigSecret = kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster))
g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed())
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
patch := client.MergeFrom(cluster.DeepCopy())
cluster.Status.InfrastructureReady = true
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())

g.Expect(env.Create(ctx, bootstrapConfig)).To(Succeed())
g.Expect(env.Create(ctx, infraMachine)).To(Succeed())
Expand Down
5 changes: 5 additions & 0 deletions internal/controllers/machine/machine_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,11 @@ func TestWatches(t *testing.T) {

g.Expect(env.Create(ctx, testCluster)).To(Succeed())
g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed())
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
testClusterOriginal := client.MergeFrom(testCluster.DeepCopy())
testCluster.Status.InfrastructureReady = true
g.Expect(env.Status().Patch(ctx, testCluster, testClusterOriginal)).To(Succeed())

g.Expect(env.Create(ctx, defaultBootstrap)).To(Succeed())
g.Expect(env.Create(ctx, node)).To(Succeed())
g.Expect(env.Create(ctx, infraMachine)).To(Succeed())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2377,6 +2377,12 @@ func createCluster(g *WithT, namespaceName string) *clusterv1.Cluster {

g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed())

// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
patchHelper, err = patch.NewHelper(cluster, env.Client)
g.Expect(err).ToNot(HaveOccurred())
cluster.Status.InfrastructureReady = true
g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed())

return cluster
}

Expand Down
5 changes: 3 additions & 2 deletions internal/controllers/machinehealthcheck/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,9 @@ func TestMain(m *testing.M) {
}

if err := (&clustercontroller.Reconciler{
Client: mgr.GetClient(),
APIReader: mgr.GetClient(),
Client: mgr.GetClient(),
APIReader: mgr.GetClient(),
ClusterCache: clusterCache,
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil {
panic(fmt.Sprintf("Failed to start ClusterReconciler: %v", err))
}
Expand Down
5 changes: 5 additions & 0 deletions internal/controllers/machineset/machineset_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,11 @@ func TestMachineSetReconciler(t *testing.T) {
t.Log("Creating the Cluster Kubeconfig Secret")
g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed())

// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
patch := client.MergeFrom(cluster.DeepCopy())
cluster.Status.InfrastructureReady = true
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())

return ns, cluster
}

Expand Down
14 changes: 13 additions & 1 deletion internal/controllers/topology/cluster/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -894,6 +894,18 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error)
return cleanup, err
}
}
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessors.
patch := client.MergeFrom(cluster1.DeepCopy())
cluster1.Status.InfrastructureReady = true
if err := env.Status().Patch(ctx, cluster1, patch); err != nil {
return nil, err
}
patch = client.MergeFrom(cluster2.DeepCopy())
cluster2.Status.InfrastructureReady = true
if err := env.Status().Patch(ctx, cluster2, patch); err != nil {
return nil, err
}

return cleanup, nil
}

Expand Down Expand Up @@ -1041,7 +1053,7 @@ func assertMachineDeploymentsReconcile(cluster *clusterv1.Cluster) error {

// Check replicas and version for the MachineDeployment.
if *md.Spec.Replicas != *topologyMD.Replicas {
return fmt.Errorf("replicas %v does not match expected %v", md.Spec.Replicas, topologyMD.Replicas)
return fmt.Errorf("replicas %v does not match expected %v", *md.Spec.Replicas, *topologyMD.Replicas)
}
if *md.Spec.Template.Spec.Version != cluster.Spec.Topology.Version {
return fmt.Errorf("version %v does not match expected %v", *md.Spec.Template.Spec.Version, cluster.Spec.Topology.Version)
Expand Down
4 changes: 3 additions & 1 deletion internal/test/envtest/environment.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,9 @@ func init() {
// Otherwise it would fall back and log to os.Stderr.
// This would lead to race conditions because input.M.Run() writes os.Stderr
// while some go routines in controller-runtime use os.Stderr to write logs.
if err := logsv1.ValidateAndApply(logs.NewOptions(), nil); err != nil {
logOptions := logs.NewOptions()
logOptions.Verbosity = logsv1.VerbosityLevel(6) // FIXME: change to 2 before merge
if err := logsv1.ValidateAndApply(logOptions, nil); err != nil {
klog.ErrorS(err, "Unable to validate and apply log options")
os.Exit(1)
}
Expand Down

0 comments on commit 85a6f24

Please sign in to comment.