From 67ca656fc2d766d7a744e5b45f88ee151d69abbe Mon Sep 17 00:00:00 2001 From: Chris Randles Date: Sat, 20 Nov 2021 19:10:32 -0500 Subject: [PATCH] add condition helper for checking deployment status and methods for waiting on object lists Signed-off-by: Chris Randles --- examples/wait_for_resources/README.md | 86 +++++++++++++++ examples/wait_for_resources/main_test.go | 43 ++++++++ examples/wait_for_resources/wait_test.go | 128 +++++++++++++++++++++ klient/internal/testutil/setup.go | 76 ++++++++++++- klient/wait/conditions/conditions.go | 135 +++++++++++++++++++++++ klient/wait/wait_test.go | 121 ++++++++++++++++++++ pkg/envfuncs/kind_funcs.go | 63 +++++++++-- 7 files changed, 639 insertions(+), 13 deletions(-) create mode 100644 examples/wait_for_resources/README.md create mode 100644 examples/wait_for_resources/main_test.go create mode 100644 examples/wait_for_resources/wait_test.go diff --git a/examples/wait_for_resources/README.md b/examples/wait_for_resources/README.md new file mode 100644 index 00000000..93179bc1 --- /dev/null +++ b/examples/wait_for_resources/README.md @@ -0,0 +1,86 @@ +# Waiting for Resource Changes + +The test harness supports several methods for querying Kubernetes object types and waiting for conditions to be met. This example shows how to create various wait conditions to drive your tests. + +## Waiting for a single object + +The wait package has built-in with utilities for waiting on Pods, Jobs, and Deployments: + +```go +func TestPodRunning(t *testing.T) { + var err error + pod := v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "my-pod"}} + err = wait.For(conditions.New(client.Resources()).PodRunning(pod), WithImmediate()) + if err != nil { + t.Error(err) + } +} +``` + +Additionally, it is easy to wait for changes to any resource type with the `ResourceMatch` method: + +```go +func TestResourceMatch(t *testing.T) { + ... + deployment := appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "deploy-name"}} + err = wait.For(conditions.New(client.Resources()).ResourceMatch(deployment, func(object k8s.Object) bool { + d := object.(*appsv1.Deployment) + return d.Status.AvailableReplicas == 2 && d.Status.ReadyReplicas == 2 + })) + if err != nil { + t.Error(err) + } + ... +} +``` + +## Waiting for a lists of objects + +It is common to need to check for the existence of a set of objects by name: + +```go +func TestResourcesFound(t *testing.T) { + ... + pods := &v1.PodList{ + Items: []v1.Pod{ + {ObjectMeta: metav1.ObjectMeta{Name: "p9", Namespace: namespace}}, + {ObjectMeta: metav1.ObjectMeta{Name: "p10", Namespace: namespace}}, + {ObjectMeta: metav1.ObjectMeta{Name: "p11", Namespace: namespace}}, + }, + } + // wait for the set of pods to exist + err = wait.For(conditions.New(client.Resources()).ResourcesFound(pods)) + if err != nil { + t.Error(err) + } + ... +} +``` + +Or to check for their absence: + +```go +func TestResourcesDeleted(t *testing.T) { + ... + pods := &v1.PodList{} + // wait for 1 pod with the label `"app": "d5"` + err = wait.For(conditions.New(client.Resources()).ResourceListN( + pods, + 1, + resources.WithLabelSelector(labels.FormatLabels(map[string]string{"app": "d5"}))), + ) + if err != nil { + t.Error(err) + } + err = client.Resources().Delete(context.Background(), deployment) + if err != nil { + t.Error(err) + } + // wait for the set of pods to finish deleting + err = wait.For(conditions.New(client.Resources()).ResourcesDeleted(pods)) + if err != nil { + t.Error(err) + } + ... +} +``` diff --git a/examples/wait_for_resources/main_test.go b/examples/wait_for_resources/main_test.go new file mode 100644 index 00000000..fe10358c --- /dev/null +++ b/examples/wait_for_resources/main_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait_for_resources + +import ( + "os" + "testing" + + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" +) + +var testenv env.Environment + +func TestMain(m *testing.M) { + testenv = env.New() + kindClusterName := envconf.RandomName("wait-for-resources", 16) + namespace := envconf.RandomName("kind-ns", 16) + testenv.Setup( + envfuncs.CreateKindCluster(kindClusterName), + envfuncs.CreateNamespace(namespace), + ) + testenv.Finish( + envfuncs.DeleteNamespace(namespace), + envfuncs.DestroyKindCluster(kindClusterName), + ) + os.Exit(testenv.Run(m)) +} diff --git a/examples/wait_for_resources/wait_test.go b/examples/wait_for_resources/wait_test.go new file mode 100644 index 00000000..ef244c4b --- /dev/null +++ b/examples/wait_for_resources/wait_test.go @@ -0,0 +1,128 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait_for_resources + +import ( + "context" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +func TestWaitForResources(t *testing.T) { + depFeature := features.New("appsv1/deployment").WithLabel("env", "dev"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + // create a deployment + deployment := newDeployment(cfg.Namespace(), "test-deployment", 10) + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + if err := client.Resources().Create(ctx, deployment); err != nil { + t.Fatal(err) + } + return ctx + }). + Assess("deployment >=50% available", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + dep := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment", Namespace: cfg.Namespace()}, + } + // wait for the deployment to become at least 50% + err = wait.For(conditions.New(client.Resources()).ResourceMatch(&dep, func(object k8s.Object) bool { + d := object.(*appsv1.Deployment) + return float64(d.Status.ReadyReplicas)/float64(*d.Spec.Replicas) >= 0.50 + }), wait.WithTimeout(time.Minute*1)) + if err != nil { + t.Fatal(err) + } + t.Logf("deployment availability: %.2f%%", float64(dep.Status.ReadyReplicas)/float64(*dep.Spec.Replicas)*100) + return ctx + }). + Assess("deployment available", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + dep := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment", Namespace: cfg.Namespace()}, + } + // wait for the deployment to finish becoming available + err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&dep, appsv1.DeploymentAvailable, v1.ConditionTrue), wait.WithTimeout(time.Minute*1)) + if err != nil { + t.Fatal(err) + } + return ctx + }). + Assess("deployment pod garbage collection", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + // get list of pods + var pods v1.PodList + err = client.Resources(cfg.Namespace()).List(context.TODO(), &pods, resources.WithLabelSelector(labels.FormatLabels(map[string]string{"app": "wait-for-resources"}))) + if err != nil { + t.Fatal(err) + } + // delete the deployment + dep := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "test-deployment", Namespace: cfg.Namespace()}, + } + err = client.Resources(cfg.Namespace()).Delete(context.TODO(), &dep) + if err != nil { + t.Fatal(err) + } + // wait for the deployment pods to be deleted + err = wait.For(conditions.New(client.Resources()).ResourcesDeleted(&pods), wait.WithTimeout(time.Minute*1)) + if err != nil { + t.Fatal(err) + } + return ctx + }).Feature() + + testenv.Test(t, depFeature) +} + +func newDeployment(namespace string, name string, replicas int32) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, Labels: map[string]string{"app": "wait-for-resources"}}, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "wait-for-resources"}, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "wait-for-resources"}}, + Spec: v1.PodSpec{Containers: []v1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } +} diff --git a/klient/internal/testutil/setup.go b/klient/internal/testutil/setup.go index 5bea92dd..e3be1a04 100644 --- a/klient/internal/testutil/setup.go +++ b/klient/internal/testutil/setup.go @@ -17,12 +17,15 @@ limitations under the License. package testutil import ( + "context" "time" - log "k8s.io/klog/v2" - + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + log "k8s.io/klog/v2" "sigs.k8s.io/e2e-framework/klient/conf" "sigs.k8s.io/e2e-framework/support/kind" ) @@ -57,6 +60,9 @@ func SetupTestCluster(path string) *TestCluster { log.Fatalln("failed to create new Client set for kind cluster", err) } tc.Clientset = clientSet + if err := waitForControlPlane(clientSet); err != nil { + log.Fatalln("failed to wait for Kind Cluster control-plane components", err) + } return tc } @@ -73,9 +79,67 @@ func setupKind() (kc *kind.Cluster, err error) { if _, err = kc.Create(); err != nil { return } - - waitPeriod := 10 * time.Second - log.Info("Waiting for kind pods to be initialized...") - time.Sleep(waitPeriod) return } + +func waitForControlPlane(c kubernetes.Interface) error { + selector, err := metav1.LabelSelectorAsSelector( + &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: "component", Operator: metav1.LabelSelectorOpIn, Values: []string{"etcd", "kube-apiserver", "kube-controller-manager", "kube-scheduler"}}, + }, + }, + ) + if err != nil { + return err + } + options := metav1.ListOptions{LabelSelector: selector.String()} + log.Info("Waiting for kind control-plane pods to be initialized...") + err = wait.Poll(5*time.Second, time.Minute*2, + func() (bool, error) { + pods, err := c.CoreV1().Pods("kube-system").List(context.TODO(), options) + if err != nil { + return false, err + } + running := 0 + for i := range pods.Items { + if pods.Items[i].Status.Phase == v1.PodRunning { + running++ + } + } + // a kind cluster with one control-plane node will have 4 pods running the core apiserver components + return running >= 4, nil + }) + if err != nil { + return err + } + + selector, err = metav1.LabelSelectorAsSelector( + &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: "k8s-app", Operator: metav1.LabelSelectorOpIn, Values: []string{"kindnet", "kube-dns", "kube-proxy"}}, + }, + }, + ) + if err != nil { + return err + } + options = metav1.ListOptions{LabelSelector: selector.String()} + log.Info("Waiting for kind networking pods to be initialized...") + err = wait.Poll(5*time.Second, time.Minute*2, + func() (bool, error) { + pods, err := c.CoreV1().Pods("kube-system").List(context.TODO(), options) + if err != nil { + return false, err + } + running := 0 + for i := range pods.Items { + if pods.Items[i].Status.Phase == v1.PodRunning { + running++ + } + } + // a kind cluster with one control-plane node will have 4 k8s-app pods running networking components + return running >= 4, nil + }) + return err +} diff --git a/klient/wait/conditions/conditions.go b/klient/wait/conditions/conditions.go index 61ee5401..bcab5d53 100644 --- a/klient/wait/conditions/conditions.go +++ b/klient/wait/conditions/conditions.go @@ -22,9 +22,11 @@ import ( log "k8s.io/klog/v2" + appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" apimachinerywait "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/e2e-framework/klient/k8s" @@ -58,6 +60,124 @@ func (c *Condition) ResourceScaled(obj k8s.Object, scaleFetcher func(object k8s. } } +// ResourceMatch is a helper function used to check if the resource under question has met a pre-defined state. This can +// be leveraged for checking fields on a resource that may not be immediately present upon creation. +func (c *Condition) ResourceMatch(obj k8s.Object, matchFetcher func(object k8s.Object) bool) apimachinerywait.ConditionFunc { + return func() (done bool, err error) { + if err := c.resources.Get(context.TODO(), obj.GetName(), obj.GetNamespace(), obj); err != nil { + return false, nil + } + return matchFetcher(obj), nil + } +} + +// ResourceListN is a helper function that can be used to check for a minimum number of returned objects in a list. This function +// accepts list options that can be used to adjust the set of objects queried for in the List resource operation. +func (c *Condition) ResourceListN(list k8s.ObjectList, n int, listOptions ...resources.ListOption) apimachinerywait.ConditionFunc { + return c.ResourceListMatchN(list, n, func(object k8s.Object) bool { return true }, listOptions...) +} + +// ResourceListMatchN is a helper function that can be used to check for a minimum number of returned objects in a list. This function +// accepts list options and a match function that can be used to adjust the set of objects queried for in the List resource operation. +func (c *Condition) ResourceListMatchN(list k8s.ObjectList, n int, matchFetcher func(object k8s.Object) bool, listOptions ...resources.ListOption) apimachinerywait.ConditionFunc { + return func() (done bool, err error) { + if err := c.resources.List(context.TODO(), list, listOptions...); err != nil { + return false, nil + } + var found int + metaList, err := meta.ExtractList(list) + if err != nil { + return false, err + } + for _, obj := range metaList { + if o, ok := obj.(k8s.Object); ok && matchFetcher(o) { + found++ + } else if !ok { + return false, fmt.Errorf("condition: unexpected type %T in list, does not satisfy k8s.Object", obj) + } + } + return found >= n, nil + } +} + +// ResourcesFound is a helper function that can be used to check for a set of objects. This function accepts a list +// of named objects and will wait until it is able to retrieve each. +func (c *Condition) ResourcesFound(list k8s.ObjectList) apimachinerywait.ConditionFunc { + return c.ResourcesMatch(list, func(object k8s.Object) bool { return true }) +} + +// ResourcesMatch is a helper function that can be used to check for a set of objects. This function accepts a list +// of named objects and a match function, and will wait until it is able to retrieve each while passing the match validation. +func (c *Condition) ResourcesMatch(list k8s.ObjectList, matchFetcher func(object k8s.Object) bool) apimachinerywait.ConditionFunc { + metaList, err := meta.ExtractList(list) + if err != nil { + return func() (done bool, err error) { return false, err } + } + objects := make(map[k8s.Object]bool) + for _, o := range metaList { + obj, ok := o.(k8s.Object) + if !ok { + return func() (done bool, err error) { + return false, fmt.Errorf("condition: unexpected type %T in list, does not satisfy k8s.Object", obj) + } + } + if obj.GetName() != "" { + objects[obj] = false + } + } + return func() (done bool, err error) { + found := 0 + for obj, created := range objects { + if !created { + if err := c.resources.Get(context.TODO(), obj.GetName(), obj.GetNamespace(), obj); errors.IsNotFound(err) { + continue + } else if err != nil { + return false, err + } + if !matchFetcher(obj) { + continue + } + } + objects[obj] = true + found++ + } + return len(objects) == found, nil + } +} + +// ResourcesDeleted is a helper function that can be used to check for if a set of objects has been deleted. This function +// accepts a list of named objects and will wait until it is not able to find each. +func (c *Condition) ResourcesDeleted(list k8s.ObjectList) apimachinerywait.ConditionFunc { + metaList, err := meta.ExtractList(list) + if err != nil { + return func() (done bool, err error) { return false, err } + } + objects := make(map[k8s.Object]bool) + for _, o := range metaList { + obj, ok := o.(k8s.Object) + if !ok { + return func() (done bool, err error) { + return false, fmt.Errorf("condition: unexpected type %T in list, does not satisfy k8s.Object", obj) + } + } + if obj.GetName() != "" { + objects[obj] = true + } + } + return func() (done bool, err error) { + for obj, created := range objects { + if created { + if err := c.resources.Get(context.TODO(), obj.GetName(), obj.GetNamespace(), obj); errors.IsNotFound(err) { + delete(objects, obj) + } else if err != nil { + return false, err + } + } + } + return len(objects) == 0, nil + } +} + // ResourceDeleted is a helper function used to check if a resource under question has been deleted. This will enable // testing cases where the resource have a finalizer and the DELETE operation of such resource have been triggered and // you want to wait until the resource has been deleted. @@ -97,6 +217,21 @@ func (c *Condition) JobConditionMatch(job k8s.Object, conditionType batchv1.JobC } } +// DeploymentConditionMatch is a helper function that can be used to check a specific condition match for the Deployment in question. +func (c *Condition) DeploymentConditionMatch(deployment k8s.Object, conditionType appsv1.DeploymentConditionType, conditionState v1.ConditionStatus) apimachinerywait.ConditionFunc { + return func() (done bool, err error) { + if err := c.resources.Get(context.TODO(), deployment.GetName(), deployment.GetNamespace(), deployment); err != nil { + return false, err + } + for _, cond := range deployment.(*appsv1.Deployment).Status.Conditions { + if cond.Type == conditionType && cond.Status == conditionState { + done = true + } + } + return + } +} + // PodConditionMatch is a helper function that can be used to check a specific condition match for the Pod in question. // This is extended into a few simplified match helpers such as PodReady and ContainersReady as well. func (c *Condition) PodConditionMatch(pod k8s.Object, conditionType v1.PodConditionType, conditionState v1.ConditionStatus) apimachinerywait.ConditionFunc { diff --git a/klient/wait/wait_test.go b/klient/wait/wait_test.go index 4ea6f562..76bb8cdb 100644 --- a/klient/wait/wait_test.go +++ b/klient/wait/wait_test.go @@ -25,8 +25,11 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" "sigs.k8s.io/e2e-framework/klient/wait/conditions" ) @@ -115,3 +118,121 @@ func TestResourceScaled(t *testing.T) { } log.Info("Done") } + +func TestDeploymentConditionMatch(t *testing.T) { + var err error + deployment := createDeployment("d2", 3, t) + err = For(conditions.New(getResourceManager()).DeploymentConditionMatch(deployment, appsv1.DeploymentAvailable, v1.ConditionTrue)) + if err != nil { + t.Error("failed waiting for deployment to become available", err) + } + log.Info("Done") +} + +func TestResourceListN(t *testing.T) { + var err error + createDeployment("d3", 4, t) + pods := &v1.PodList{} + err = For(conditions.New(getResourceManager()).ResourceListN(pods, 4, resources.WithLabelSelector(labels.FormatLabels(map[string]string{"app": "d3"})))) + if err != nil { + t.Error("failed waiting for deployment pods to be created", err) + } + log.Info("Done") +} + +func TestResourceListMatchN(t *testing.T) { + var err error + createDeployment("d4", 5, t) + pods := &v1.PodList{} + err = For(conditions.New(getResourceManager()).ResourceListMatchN(pods, 5, func(object k8s.Object) bool { + for _, c := range object.(*v1.Pod).Spec.Containers { + if c.Image == "nginx" { + return true + } + } + return false + }, resources.WithLabelSelector(labels.FormatLabels(map[string]string{"app": "d4"})))) + if err != nil { + t.Error("failed waiting for deployment pods with nginx containers to be created", err) + } + log.Info("Done") +} + +func TestResourcesMatch(t *testing.T) { + var err error + go func() { + createPod("p6", t) + createPod("p7", t) + createPod("p8", t) + }() + pods := &v1.PodList{ + Items: []v1.Pod{ + {ObjectMeta: metav1.ObjectMeta{Name: "p6", Namespace: namespace}}, + {ObjectMeta: metav1.ObjectMeta{Name: "p7", Namespace: namespace}}, + {ObjectMeta: metav1.ObjectMeta{Name: "p8", Namespace: namespace}}, + }, + } + err = For(conditions.New(getResourceManager()).ResourcesMatch(pods, func(object k8s.Object) bool { + return object.(*v1.Pod).Status.Phase == v1.PodRunning + })) + if err != nil { + t.Error("failed waiting for deployment pods to start running", err) + } + log.Info("Done") +} + +func TestResourcesFound(t *testing.T) { + var err error + go func() { + createPod("p9", t) + createPod("p10", t) + createPod("p11", t) + }() + pods := &v1.PodList{ + Items: []v1.Pod{ + {ObjectMeta: metav1.ObjectMeta{Name: "p9", Namespace: namespace}}, + {ObjectMeta: metav1.ObjectMeta{Name: "p10", Namespace: namespace}}, + {ObjectMeta: metav1.ObjectMeta{Name: "p11", Namespace: namespace}}, + }, + } + err = For(conditions.New(getResourceManager()).ResourcesFound(pods)) + if err != nil { + t.Error("failed waiting for deployment pods to be created", err) + } + log.Info("Done") +} + +func TestResourcesDeleted(t *testing.T) { + var err error + deployment := createDeployment("d5", 1, t) + pods := &v1.PodList{} + err = For(conditions.New(getResourceManager()).ResourceListN(pods, 1, resources.WithLabelSelector(labels.FormatLabels(map[string]string{"app": "d5"})))) + if err != nil { + t.Error("failed waiting for deployment pods to be created", err) + } + err = getResourceManager().Delete(context.Background(), deployment) + if err != nil { + t.Error("failed to delete deployment due to an error", err) + } + err = For(conditions.New(getResourceManager()).ResourcesDeleted(pods)) + if err != nil { + t.Error("failed waiting for pods to be deleted", err) + } + log.Info("Done") +} + +func TestResourceMatch(t *testing.T) { + var err error + deployment := createDeployment("d6", 2, t) + err = For(conditions.New(getResourceManager()).ResourceMatch(deployment, func(object k8s.Object) bool { + d, ok := object.(*appsv1.Deployment) + if !ok { + t.Fatalf("unexpected type %T in list, does not satisfy *appsv1.Deployment", object) + } + return d.Status.AvailableReplicas == 2 && d.Status.ReadyReplicas == 2 + })) + if err != nil { + t.Error("failed waiting for deployment replicas", err) + } + log.Info("Done") +} diff --git a/pkg/envfuncs/kind_funcs.go b/pkg/envfuncs/kind_funcs.go index c227eae5..a70e563a 100644 --- a/pkg/envfuncs/kind_funcs.go +++ b/pkg/envfuncs/kind_funcs.go @@ -19,8 +19,13 @@ package envfuncs import ( "context" "fmt" - "time" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/e2e-framework/klient" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" "sigs.k8s.io/e2e-framework/pkg/env" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/support/kind" @@ -43,11 +48,14 @@ func CreateKindCluster(clusterName string) env.Func { return ctx, err } - // stall, wait for pods initializations - time.Sleep(7 * time.Second) - // update envconfig with kubeconfig cfg.WithKubeconfigFile(kubecfg) + + // stall, wait for pods initializations + if err := waitForControlPlane(cfg.Client()); err != nil { + return ctx, err + } + // store entire cluster value in ctx for future access using the cluster name return context.WithValue(ctx, kindContextKey(clusterName), k), nil } @@ -68,16 +76,57 @@ func CreateKindClusterWithConfig(clusterName, image, configFilePath string) env. return ctx, err } - // stall, wait for pods initializations - time.Sleep(7 * time.Second) - // update envconfig with kubeconfig cfg.WithKubeconfigFile(kubecfg) + + // stall, wait for pods initializations + if err := waitForControlPlane(cfg.Client()); err != nil { + return ctx, err + } + // store entire cluster value in ctx for future access using the cluster name return context.WithValue(ctx, kindContextKey(clusterName), k), nil } } +func waitForControlPlane(client klient.Client) error { + r, err := resources.New(client.RESTConfig()) + if err != nil { + return err + } + selector, err := metav1.LabelSelectorAsSelector( + &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: "component", Operator: metav1.LabelSelectorOpIn, Values: []string{"etcd", "kube-apiserver", "kube-controller-manager", "kube-scheduler"}}, + }, + }, + ) + if err != nil { + return err + } + // a kind cluster with one control-plane node will have 4 pods running the core apiserver components + err = wait.For(conditions.New(r).ResourceListN(&v1.PodList{}, 4, resources.WithLabelSelector(selector.String()))) + if err != nil { + return err + } + selector, err = metav1.LabelSelectorAsSelector( + &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: "k8s-app", Operator: metav1.LabelSelectorOpIn, Values: []string{"kindnet", "kube-dns", "kube-proxy"}}, + }, + }, + ) + if err != nil { + return err + } + // a kind cluster with one control-plane node will have 4 k8s-app pods running networking components + err = wait.For(conditions.New(r).ResourceListN(&v1.PodList{}, 4, resources.WithLabelSelector(selector.String()))) + if err != nil { + return err + } + return nil +} + // DestroyKindCluster returns an EnvFunc that // retrieves a previously saved kind Cluster in the context (using the name), then deletes it. //