From 6a755b5f6e9008f2f78707ea2c4bec801e5ee088 Mon Sep 17 00:00:00 2001 From: Harsha Narayana Date: Sat, 13 Jul 2024 20:03:34 +0530 Subject: [PATCH] provider: added k3d provider and lifecycle handlers This change includes the following changes and features. 1. Added a new Interface type `E2EClusterProviderWithImageLoaderAndNodeLifecycle` which can be used to setup providers that extend the cluster lifecycle function around the nodes. 2. Enabled a `k3d` based provider with support for Node lifecycle management. 3. Existing Image loader related function and interfaces were augmented with `args ...string` to be able to provide additional arguments in case if the image load handlers need some of the additional config. --- examples/k3d/k3d_test.go | 95 +++++++ examples/k3d/main_test.go | 51 ++++ pkg/envfuncs/provider_funcs.go | 76 ++++- pkg/stepfuncs/nodelifecycle_funcs.go | 67 +++++ support/k3d/k3d.go | 399 +++++++++++++++++++++++++++ support/kind/kind.go | 4 +- support/types.go | 60 +++- support/utils/command.go | 68 +++-- 8 files changed, 786 insertions(+), 34 deletions(-) create mode 100644 examples/k3d/k3d_test.go create mode 100644 examples/k3d/main_test.go create mode 100644 pkg/stepfuncs/nodelifecycle_funcs.go create mode 100644 support/k3d/k3d.go diff --git a/examples/k3d/k3d_test.go b/examples/k3d/k3d_test.go new file mode 100644 index 00000000..4d8a8989 --- /dev/null +++ b/examples/k3d/k3d_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k3d + +import ( + "context" + "fmt" + "testing" + "time" + + "sigs.k8s.io/e2e-framework/pkg/stepfuncs" + "sigs.k8s.io/e2e-framework/support" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +func newDeployment(namespace string, name string, replicaCount int32) *appsv1.Deployment { + podSpec := corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "my-container", + Image: "nginx", + }, + }, + } + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, Labels: map[string]string{"app": "test-app"}}, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test-app"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "test-app"}}, + Spec: podSpec, + }, + }, + } +} + +func TestK3DCluster(t *testing.T) { + deploymentFeature := features.New("Should be able to create a new deployment in the k3d cluster"). + Assess("Create a new deployment", func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + deployment := newDeployment(c.Namespace(), "test-deployment", 1) + if err := c.Client().Resources().Create(ctx, deployment); err != nil { + t.Fatal(err) + } + var dep appsv1.Deployment + if err := c.Client().Resources().Get(ctx, "test-deployment", c.Namespace(), &dep); err != nil { + t.Fatal(err) + } + err := wait.For(conditions.New(c.Client().Resources()).DeploymentConditionMatch(&dep, appsv1.DeploymentAvailable, corev1.ConditionTrue), wait.WithTimeout(time.Minute*3)) + if err != nil { + t.Fatal(err) + } + return context.WithValue(ctx, "test-deployment", &dep) + }). + Feature() + + nodeAddFeature := features.New("Should be able to add a new node to the k3d cluster"). + Setup(stepfuncs.PerformNodeOperation(clusterName, support.AddNode, &support.Node{ + Name: fmt.Sprintf("%s-agent", clusterName), + Cluster: clusterName, + Role: "agent", + })). + Assess("Check if the node is added to the cluster", func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + var node corev1.Node + if err := c.Client().Resources().Get(ctx, fmt.Sprintf("k3d-%s-agent-0", clusterName), c.Namespace(), &node); err != nil { + t.Fatal(err) + } + return ctx + }).Feature() + + testEnv.Test(t, deploymentFeature, nodeAddFeature) +} diff --git a/examples/k3d/main_test.go b/examples/k3d/main_test.go new file mode 100644 index 00000000..7c585310 --- /dev/null +++ b/examples/k3d/main_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k3d + +import ( + "os" + "testing" + + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" + "sigs.k8s.io/e2e-framework/support/k3d" +) + +var ( + testEnv env.Environment + clusterName string +) + +func TestMain(m *testing.M) { + testEnv = env.New() + clusterName = envconf.RandomName("test", 16) + namespace := envconf.RandomName("k3d-ns", 16) + + testEnv.Setup( + envfuncs.CreateClusterWithOpts(k3d.NewProvider(), clusterName, k3d.WithImage("rancher/k3s:v1.29.6-k3s1")), + envfuncs.CreateNamespace(namespace), + envfuncs.LoadImageToCluster(clusterName, "rancher/k3s:v1.29.6-k3s1", "--verbose", "--mode", "direct"), + ) + + testEnv.Finish( + envfuncs.DeleteNamespace(namespace), + envfuncs.DestroyCluster(clusterName), + ) + + os.Exit(testEnv.Run(m)) +} diff --git a/pkg/envfuncs/provider_funcs.go b/pkg/envfuncs/provider_funcs.go index 4d2b6ac6..9eb81b14 100644 --- a/pkg/envfuncs/provider_funcs.go +++ b/pkg/envfuncs/provider_funcs.go @@ -25,14 +25,12 @@ import ( "sigs.k8s.io/e2e-framework/support" ) -type clusterNameContextKey string - var LoadDockerImageToCluster = LoadImageToCluster // GetClusterFromContext helps extract the E2EClusterProvider object from the context. // This can be used to setup and run tests of multi cluster e2e Prioviders. func GetClusterFromContext(ctx context.Context, clusterName string) (support.E2EClusterProvider, bool) { - c := ctx.Value(clusterNameContextKey(clusterName)) + c := ctx.Value(support.ClusterNameContextKey(clusterName)) if c == nil { return nil, false } @@ -47,8 +45,19 @@ func GetClusterFromContext(ctx context.Context, clusterName string) (support.E2E // NOTE: the returned function will update its env config with the // kubeconfig file for the config client. func CreateCluster(p support.E2EClusterProvider, clusterName string) env.Func { + return CreateClusterWithOpts(p, clusterName) +} + +// CreateClusterWithOpts returns an env.Func that is used to +// create an E2E provider cluster that is then injected in the context +// using the name as a key. This can be provided with additional opts to extend the create +// workflow of the cluster. +// +// NOTE: the returned function will update its env config with the +// kubeconfig file for the config client. +func CreateClusterWithOpts(p support.E2EClusterProvider, clusterName string, opts ...support.ClusterOpts) env.Func { return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { - k := p.SetDefaults().WithName(clusterName) + k := p.SetDefaults().WithName(clusterName).WithOpts(opts...) kubecfg, err := k.Create(ctx) if err != nil { return ctx, err @@ -63,7 +72,7 @@ func CreateCluster(p support.E2EClusterProvider, clusterName string) env.Func { } // store entire cluster value in ctx for future access using the cluster name - return context.WithValue(ctx, clusterNameContextKey(clusterName), k), nil + return context.WithValue(ctx, support.ClusterNameContextKey(clusterName), k), nil } } @@ -90,7 +99,7 @@ func CreateClusterWithConfig(p support.E2EClusterProvider, clusterName, configFi } // store entire cluster value in ctx for future access using the cluster name - return context.WithValue(ctx, clusterNameContextKey(clusterName), k), nil + return context.WithValue(ctx, support.ClusterNameContextKey(clusterName), k), nil } } @@ -100,7 +109,7 @@ func CreateClusterWithConfig(p support.E2EClusterProvider, clusterName, configFi // NOTE: this should be used in a Environment.Finish step. func DestroyCluster(name string) env.Func { return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { - clusterVal := ctx.Value(clusterNameContextKey(name)) + clusterVal := ctx.Value(support.ClusterNameContextKey(name)) if clusterVal == nil { return ctx, fmt.Errorf("destroy e2e provider cluster func: context cluster is nil") } @@ -121,9 +130,9 @@ func DestroyCluster(name string) env.Func { // LoadImageToCluster returns an EnvFunc that // retrieves a previously saved e2e provider Cluster in the context (using the name), and then loads a container image // from the host into the cluster. -func LoadImageToCluster(name, image string) env.Func { +func LoadImageToCluster(name, image string, args ...string) env.Func { return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { - clusterVal := ctx.Value(clusterNameContextKey(name)) + clusterVal := ctx.Value(support.ClusterNameContextKey(name)) if clusterVal == nil { return ctx, fmt.Errorf("load image func: context cluster is nil") } @@ -133,7 +142,7 @@ func LoadImageToCluster(name, image string) env.Func { return ctx, fmt.Errorf("load image archive func: cluster provider does not support LoadImage helper") } - if err := cluster.LoadImage(ctx, image); err != nil { + if err := cluster.LoadImage(ctx, image, args...); err != nil { return ctx, fmt.Errorf("load image: %w", err) } @@ -144,9 +153,9 @@ func LoadImageToCluster(name, image string) env.Func { // LoadImageArchiveToCluster returns an EnvFunc that // retrieves a previously saved e2e provider Cluster in the context (using the name), and then loads a container image TAR archive // from the host into the cluster. -func LoadImageArchiveToCluster(name, imageArchive string) env.Func { +func LoadImageArchiveToCluster(name, imageArchive string, args ...string) env.Func { return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { - clusterVal := ctx.Value(clusterNameContextKey(name)) + clusterVal := ctx.Value(support.ClusterNameContextKey(name)) if clusterVal == nil { return ctx, fmt.Errorf("load image archive func: context cluster is nil") } @@ -156,7 +165,7 @@ func LoadImageArchiveToCluster(name, imageArchive string) env.Func { return ctx, fmt.Errorf("load image archive func: cluster provider does not support LoadImageArchive helper") } - if err := cluster.LoadImageArchive(ctx, imageArchive); err != nil { + if err := cluster.LoadImageArchive(ctx, imageArchive, args...); err != nil { return ctx, fmt.Errorf("load image archive: %w", err) } @@ -169,7 +178,7 @@ func LoadImageArchiveToCluster(name, imageArchive string) env.Func { // in the provided destination. func ExportClusterLogs(name, dest string) env.Func { return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { - clusterVal := ctx.Value(clusterNameContextKey(name)) + clusterVal := ctx.Value(support.ClusterNameContextKey(name)) if clusterVal == nil { return ctx, fmt.Errorf("export e2e provider cluster logs: context cluster is nil") } @@ -186,3 +195,42 @@ func ExportClusterLogs(name, dest string) env.Func { return ctx, nil } } + +// PerformNodeOperation returns an EnvFunc that can be used to perform some node lifecycle operations. +// This can be used to add/remove/start/stop nodes in the cluster. +func PerformNodeOperation(clusterName string, action support.NodeOperation, node *support.Node, args ...string) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + clusterVal := ctx.Value(support.ClusterNameContextKey(clusterName)) + if clusterVal == nil { + return ctx, fmt.Errorf("%s node to cluster: context cluster is nil", action) + } + + cluster, ok := clusterVal.(support.E2EClusterProviderWithImageLoaderAndNodeLifecycle) + if !ok { + return ctx, fmt.Errorf("%s node to cluster: unexpected type for cluster value", action) + } + + switch action { + case support.AddNode: + if err := cluster.AddNode(ctx, node, args...); err != nil { + return ctx, fmt.Errorf("add node to cluster: %w", err) + } + case support.RemoveNode: + if err := cluster.RemoveNode(ctx, node, args...); err != nil { + return ctx, fmt.Errorf("remove node from cluster: %w", err) + } + case support.StartNode: + if err := cluster.StartNode(ctx, node, args...); err != nil { + return ctx, fmt.Errorf("start node in cluster: %w", err) + } + case support.StopNode: + if err := cluster.StopNode(ctx, node, args...); err != nil { + return ctx, fmt.Errorf("stop node in cluster: %w", err) + } + default: + return ctx, fmt.Errorf("unknown node operation: %s", action) + } + + return ctx, nil + } +} diff --git a/pkg/stepfuncs/nodelifecycle_funcs.go b/pkg/stepfuncs/nodelifecycle_funcs.go new file mode 100644 index 00000000..4dd65305 --- /dev/null +++ b/pkg/stepfuncs/nodelifecycle_funcs.go @@ -0,0 +1,67 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stepfuncs + +import ( + "context" + "testing" + + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/types" + "sigs.k8s.io/e2e-framework/support" +) + +// PerformNodeOperation returns a step function that performs a node operation on a cluster. +// This can be integrated as a setup function for a feature in question before the feature +// is tested. +func PerformNodeOperation(clusterName string, action support.NodeOperation, node *support.Node, args ...string) types.StepFunc { + return func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + t.Helper() + + clusterVal := ctx.Value(support.ClusterNameContextKey(clusterName)) + if clusterVal == nil { + t.Fatalf("%s node to cluster: context cluster is nil", action) + } + + cluster, ok := clusterVal.(support.E2EClusterProviderWithImageLoaderAndNodeLifecycle) + if !ok { + t.Fatalf("%s node to cluster: unexpected type for cluster value", action) + } + + switch action { + case support.AddNode: + if err := cluster.AddNode(ctx, node, args...); err != nil { + t.Fatalf("add node to cluster: %s", err) + } + case support.RemoveNode: + if err := cluster.RemoveNode(ctx, node, args...); err != nil { + t.Fatalf("remove node from cluster: %s", err) + } + case support.StartNode: + if err := cluster.StartNode(ctx, node, args...); err != nil { + t.Fatalf("start node in cluster: %s", err) + } + case support.StopNode: + if err := cluster.StopNode(ctx, node, args...); err != nil { + t.Fatalf("stop node in cluster: %s", err) + } + default: + t.Fatalf("unknown node operation: %s", action) + } + return ctx + } +} diff --git a/support/k3d/k3d.go b/support/k3d/k3d.go new file mode 100644 index 00000000..5d5fb494 --- /dev/null +++ b/support/k3d/k3d.go @@ -0,0 +1,399 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k3d + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "os" + "strings" + + "k8s.io/apimachinery/pkg/util/json" + + "k8s.io/client-go/rest" + + "sigs.k8s.io/e2e-framework/klient" + "sigs.k8s.io/e2e-framework/klient/conf" + "sigs.k8s.io/e2e-framework/support" + "sigs.k8s.io/e2e-framework/support/utils" + + log "k8s.io/klog/v2" +) + +var k3dVersion = "v5.7.2" + +type Cluster struct { + path string + name string + kubeConfigFile string + version string + image string + rc *rest.Config + args []string +} + +// k3dNode is a struct containing a subset of values that are part of the k3d node list -o json +// data. This currently contains only those fields that are of interest to generate the +// support.Node struct in return for performing node operations. +type k3dNode struct { + Name string `json:"name"` + Role string `json:"role"` + IP struct { + IP string `json:"IP"` + } `json:"IP"` + State struct { + Running bool `json:"Running"` + Status string `json:"Status"` + } `json:"State"` +} + +var ( + _ support.E2EClusterProviderWithImageLoader = &Cluster{} + _ support.E2EClusterProviderWithImageLoaderAndNodeLifecycle = &Cluster{} +) + +func WithArgs(args ...string) support.ClusterOpts { + return func(c support.E2EClusterProvider) { + k, ok := c.(*Cluster) + if ok { + k.args = append(k.args, args...) + } + } +} + +func WithImage(image string) support.ClusterOpts { + return func(c support.E2EClusterProvider) { + k, ok := c.(*Cluster) + if ok { + k.image = image + } + } +} + +func NewCluster(name string) *Cluster { + return &Cluster{name: name} +} + +func NewProvider() support.E2EClusterProvider { + return &Cluster{} +} + +func (c *Cluster) findOrInstallK3D() error { + if c.version != "" { + k3dVersion = c.version + } + path, err := utils.FindOrInstallGoBasedProvider(c.path, "k3d", "github.com/k3d-io/k3d/v5", k3dVersion) + if path != "" { + c.path = path + } + return err +} + +func (c *Cluster) getKubeConfig() (string, error) { + kubeCfg := fmt.Sprintf("%s-kubecfg", c.name) + + var stdout, stderr bytes.Buffer + err := utils.RunCommandWithSeperatedOutput(fmt.Sprintf("%s kubeconfig get %s", c.path, c.name), &stdout, &stderr) + if err != nil { + return "", fmt.Errorf("failed to get kubeconfig: %s", stderr.String()) + } + log.V(4).Info("k3d kubeconfig get stderr \n", stderr.String()) + + file, err := os.CreateTemp("", fmt.Sprintf("k3d-cluster-%s", kubeCfg)) + if err != nil { + return "", fmt.Errorf("k3d kubeconfig file: %w", err) + } + defer file.Close() + + c.kubeConfigFile = file.Name() + + if n, err := io.WriteString(file, stdout.String()); n == 0 || err != nil { + return "", fmt.Errorf("k3d kubeconfig file: bytes copied: %d: %w]", n, err) + } + + return file.Name(), nil +} + +func (c *Cluster) clusterExists(name string) (string, bool) { + clusters := utils.FetchCommandOutput(fmt.Sprintf("%s cluster get --no-headers", c.path)) + for _, c := range strings.Split(clusters, "\n") { + if strings.HasPrefix(c, name) { + return clusters, true + } + } + return clusters, false +} + +func (c *Cluster) startCluster(name string) error { + cmd := fmt.Sprintf("%s cluster start %s", c.path, name) + log.V(4).InfoS("Starting k3d cluster", "command", cmd) + p := utils.RunCommand(cmd) + if p.Err() != nil { + return fmt.Errorf("k3d: failed to start cluster %q: %s: %s", name, p.Err(), p.Result()) + } + return nil +} + +func (c *Cluster) initKubernetesAccessClients() error { + cfg, err := conf.New(c.kubeConfigFile) + if err != nil { + return err + } + c.rc = cfg + return nil +} + +func (c *Cluster) WithName(name string) support.E2EClusterProvider { + c.name = name + return c +} + +func (c *Cluster) WithVersion(version string) support.E2EClusterProvider { + c.version = version + return c +} + +func (c *Cluster) WithPath(path string) support.E2EClusterProvider { + c.path = path + return c +} + +func (c *Cluster) WithOpts(opts ...support.ClusterOpts) support.E2EClusterProvider { + for _, o := range opts { + o(c) + } + return c +} + +func (c *Cluster) Create(ctx context.Context, args ...string) (string, error) { + log.V(4).InfoS("Creating k3d cluster", "name", c.name) + if err := c.findOrInstallK3D(); err != nil { + return "", fmt.Errorf("failed to find or install k3d: %w", err) + } + + if _, ok := c.clusterExists(c.name); ok { + // This is being done as an extra step to ensure that in case you have the cluster by the same name, but it is not up. + // Starting an already started cluster won't cause any harm. So, we will just start it once before continuing + // further down the line and process rest of the workflows + if err := c.startCluster(c.name); err != nil { + return "", err + } + log.V(4).InfoS("Skipping k3d cluster creation. Cluster already exists", "name", c.name) + kConfig, err := c.getKubeConfig() + if err != nil { + return "", err + } + return kConfig, c.initKubernetesAccessClients() + } + + if c.image != "" { + args = append(args, "--image", c.image) + } + + args = append(args, c.args...) + cmd := fmt.Sprintf("%s cluster create %s", c.path, c.name) + + if len(args) > 0 { + cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " ")) + } + log.V(4).InfoS("Launching k3d cluster", "command", cmd) + + var stdout, stderr bytes.Buffer + p := utils.RunCommandWithCustomWriter(cmd, &stdout, &stderr) + if p.Err() != nil { + return "", fmt.Errorf("k3d: failed to create cluster %q: %s: %s: %s %s", c.name, p.Err(), p.Result(), stdout.String(), stderr.String()) + } + clusters, ok := c.clusterExists(c.name) + if !ok { + return "", fmt.Errorf("k3d cluster create: cluster %v still not in 'cluster list' after creation: %v", c.name, clusters) + } + log.V(4).Info("k3d clusters available: ", clusters) + + kConfig, err := c.getKubeConfig() + if err != nil { + return "", err + } + return kConfig, c.initKubernetesAccessClients() +} + +func (c *Cluster) CreateWithConfig(ctx context.Context, configFile string) (string, error) { + var args []string + if configFile != "" { + args = append(args, "--config", configFile) + } + return c.Create(ctx, args...) +} + +func (c *Cluster) GetKubeconfig() string { + return c.kubeConfigFile +} + +func (c *Cluster) GetKubectlContext() string { + return fmt.Sprintf("k3d-%s", c.name) +} + +func (c *Cluster) ExportLogs(ctx context.Context, dest string) error { + log.Warning("ExportLogs not implemented for k3d. Please use regular kubectl like commands to extract the logs from the cluster") + return nil +} + +func (c *Cluster) Destroy(ctx context.Context) error { + log.V(4).InfoS("Destroying k3d cluster", "name", c.name) + if err := c.findOrInstallK3D(); err != nil { + return fmt.Errorf("failed to find or install k3d: %w", err) + } + + if _, ok := c.clusterExists(c.name); !ok { + log.V(4).InfoS("Skipping k3d cluster destruction. Cluster does not exist", "name", c.name) + return nil + } + + cmd := fmt.Sprintf("%s cluster delete %s", c.path, c.name) + log.V(4).InfoS("Destroying k3d cluster", "command", cmd) + p := utils.RunCommand(cmd) + if p.Err() != nil { + outBytes, err := io.ReadAll(p.Out()) + if err != nil { + log.ErrorS(err, "failed to read data from the k3d cluster delete process output due to an error") + } + return fmt.Errorf("k3d: failed to delete cluster %q: %s: %s: %s", c.name, p.Err(), p.Result(), string(outBytes)) + } + + log.V(4).InfoS("Removing kubeconfig file", "configFile", c.kubeConfigFile) + if err := os.RemoveAll(c.kubeConfigFile); err != nil { + return fmt.Errorf("k3d: failed to remove kubeconfig file %q: %w", c.kubeConfigFile, err) + } + return nil +} + +func (c *Cluster) SetDefaults() support.E2EClusterProvider { + if c.path == "" { + c.path = "k3d" + } + return c +} + +func (c *Cluster) WaitForControlPlane(ctx context.Context, client klient.Client) error { + log.V(4).Info("k3d doesn't implement a WaitForControlPlane handler. The --wait argument passed to the `kwokctl` should take care of this already") + return nil +} + +func (c *Cluster) KubernetesRestConfig() *rest.Config { + return c.rc +} + +func (c *Cluster) LoadImage(ctx context.Context, image string, args ...string) error { + log.V(4).InfoS("Performing Image load operation", "cluster", c.name, "image", image, "args", args) + p := utils.RunCommand(fmt.Sprintf("%s image import --cluster %s %s %s", c.path, c.name, strings.Join(args, " "), image)) + if p.Err() != nil { + return fmt.Errorf("k3d: load docker-image %v failed: %s: %s", image, p.Err(), p.Result()) + } + return nil +} + +func (c *Cluster) LoadImageArchive(ctx context.Context, imageArchive string, args ...string) error { + return c.LoadImage(ctx, imageArchive, args...) +} + +func (c *Cluster) AddNode(ctx context.Context, node *support.Node, args ...string) error { + cmd := fmt.Sprintf("%s node create %s --cluster %s", c.path, node.Name, c.name) + + if len(args) > 0 { + cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " ")) + } + if node.Role != "" { + cmd = fmt.Sprintf("%s --role %s", cmd, node.Role) + } + + log.V(4).InfoS("Adding node to k3d cluster", "command", cmd) + p, stdout, stderr := utils.FetchSeperatedCommandOutput(cmd) + if p.Err() != nil || (p.Exited() && p.ExitCode() != 0) { + log.ErrorS(p.Err(), "failed to add node to k3d cluster", "stdout", stdout.String(), "stderr", stderr.String()) + return fmt.Errorf("k3d: failed to add node %q to cluster %q: %s: %s", node.Name, c.name, p.Err(), p.Result()) + } + return nil +} + +func (c *Cluster) RemoveNode(ctx context.Context, node *support.Node, args ...string) error { + cmd := fmt.Sprintf("%s node delete %s", c.path, node.Name) + + if len(args) > 0 { + cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " ")) + } + + log.V(4).InfoS("Removing node from k3d cluster", "command", cmd) + p, stdout, stderr := utils.FetchSeperatedCommandOutput(cmd) + if p.Err() != nil || (p.Exited() && p.ExitCode() != 0) { + log.ErrorS(p.Err(), "failed to remove node from k3d cluster", "stdout", stdout.String(), "stderr", stderr.String()) + return fmt.Errorf("k3d: failed to remove node %q from cluster %q: %s: %s", node.Name, c.name, p.Err(), p.Result()) + } + return nil +} + +func (c *Cluster) StartNode(ctx context.Context, node *support.Node, args ...string) error { + cmd := fmt.Sprintf("%s node start %s", c.path, node.Name) + if len(args) > 0 { + cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " ")) + } + log.V(4).InfoS("Starting node in k3d cluster", "command", cmd) + p, stdout, stderr := utils.FetchSeperatedCommandOutput(cmd) + if p.Err() != nil || (p.Exited() && p.ExitCode() != 0) { + log.ErrorS(p.Err(), "failed to start node in k3d cluster", "stdout", stdout.String(), "stderr", stderr.String()) + return fmt.Errorf("k3d: failed to start node %q in cluster %q: %s: %s", node.Name, c.name, p.Err(), p.Result()) + } + return nil +} + +func (c *Cluster) StopNode(ctx context.Context, node *support.Node, args ...string) error { + cmd := fmt.Sprintf("%s node stop %s", c.path, node.Name) + if len(args) > 0 { + cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " ")) + } + log.V(4).InfoS("Stopping node in k3d cluster", "command", cmd) + p, stdout, stderr := utils.FetchSeperatedCommandOutput(cmd) + if p.Err() != nil || (p.Exited() && p.ExitCode() != 0) { + log.ErrorS(p.Err(), "failed to stop node in k3d cluster", "stdout", stdout.String(), "stderr", stderr.String()) + return fmt.Errorf("k3d: failed to stop node %q in cluster %q: %s: %s", node.Name, c.name, p.Err(), p.Result()) + } + return nil +} + +func (c *Cluster) ListNode(ctx context.Context, args ...string) ([]support.Node, error) { + cmd := fmt.Sprintf("%s node list -o json", c.path) + p := utils.RunCommand(cmd) + if p.Err() != nil || (p.Exited() && p.ExitCode() != 0) { + return nil, fmt.Errorf("k3d: failed to list nodes: %s: %s", p.Err(), p.Result()) + } + var nodeInfo []k3dNode + if err := json.Unmarshal([]byte(p.Result()), &nodeInfo); err != nil { + return nil, fmt.Errorf("k3d: failed to unmarshal node list: %s", err) + } + nodes := make([]support.Node, len(nodeInfo)) + for _, n := range nodeInfo { + nodes = append(nodes, support.Node{ + Name: n.Name, + Role: n.Role, + IP: net.ParseIP(n.IP.IP), + State: n.State.Status, + Cluster: c.name, + }) + } + return nodes, nil +} diff --git a/support/kind/kind.go b/support/kind/kind.go index 4ca3d2a1..a691fdb4 100644 --- a/support/kind/kind.go +++ b/support/kind/kind.go @@ -261,7 +261,7 @@ func (k *Cluster) findOrInstallKind() error { return err } -func (k *Cluster) LoadImage(ctx context.Context, image string) error { +func (k *Cluster) LoadImage(ctx context.Context, image string, args ...string) error { p := utils.RunCommand(fmt.Sprintf(`%s load docker-image --name %s %s`, k.path, k.name, image)) if p.Err() != nil { return fmt.Errorf("kind: load docker-image %v failed: %s: %s", image, p.Err(), p.Result()) @@ -269,7 +269,7 @@ func (k *Cluster) LoadImage(ctx context.Context, image string) error { return nil } -func (k *Cluster) LoadImageArchive(ctx context.Context, imageArchive string) error { +func (k *Cluster) LoadImageArchive(ctx context.Context, imageArchive string, args ...string) error { p := utils.RunCommand(fmt.Sprintf(`%s load image-archive --name %s %s`, k.path, k.name, imageArchive)) if p.Err() != nil { return fmt.Errorf("kind: load image-archive %v failed: %s: %s", imageArchive, p.Err(), p.Result()) diff --git a/support/types.go b/support/types.go index 25fcae26..aa03e503 100644 --- a/support/types.go +++ b/support/types.go @@ -18,6 +18,7 @@ package support import ( "context" + "net" "k8s.io/client-go/rest" "sigs.k8s.io/e2e-framework/klient" @@ -25,6 +26,25 @@ import ( type ClusterOpts func(c E2EClusterProvider) +type Node struct { + Name string + Role string + Cluster string + State string + IP net.IP +} + +type NodeOperation string + +const ( + AddNode NodeOperation = "add" + RemoveNode NodeOperation = "remove" + StartNode NodeOperation = "start" + StopNode NodeOperation = "stop" +) + +type ClusterNameContextKey string + type E2EClusterProvider interface { // WithName is used to configure the cluster Name that should be used while setting up the cluster. Might // Not apply for all providers. @@ -94,12 +114,48 @@ type E2EClusterProviderWithImageLoader interface { // LoadImage is used to load a set of Docker images to the cluster via the cluster provider native workflow // Not every provider will have a mechanism like this/need to do this. So, providers that do not have this support // can just provide a no-op implementation to be compliant with the interface - LoadImage(ctx context.Context, image string) error + LoadImage(ctx context.Context, image string, args ...string) error // LoadImageArchive is used to provide a mechanism where a tar.gz archive containing the docker images used // by the services running on the cluster can be imported and loaded into the cluster prior to the execution of // test if required. // Not every provider will have a mechanism like this/need to do this. So, providers that do not have this support // can just provide a no-op implementation to be compliant with the interface - LoadImageArchive(ctx context.Context, archivePath string) error + LoadImageArchive(ctx context.Context, archivePath string, args ...string) error +} + +// E2EClusterProviderWithImageLoaderAndNodeLifecycle is an interface that extends the E2EClusterProviderWithImageLoader +// interface to provide a mechanism to add/remove nodes from the cluster as part of the E2E Test workflow. +// +// This can be useful while performing the e2e test that revolves around the node lifecycle events. +// eg: You have a kubernetes controller that acts upon the v1.Node resource of the k8s and you want to +// test out how the Remove operation impacts your workflow. +// Or you want to simulate a case where one or more node of your cluster is down and you want to see how +// your application reacts to such failure events. +type E2EClusterProviderWithImageLoaderAndNodeLifecycle interface { + E2EClusterProviderWithImageLoader + + // AddNode is used to add a new node to the existing cluster as part of the E2E Test workflow. + // Not every provider will have a mechanism to support this. e.g Kind. But k3d has support for this. + // This will be implemented as an optional interface depending on the provider in question. + AddNode(ctx context.Context, node *Node, args ...string) error + + // RemoveNode can be used to remove a node from an existing cluster as part of the E2E Test workflow. + // Not every provider will have a mechanism to support this. e.g Kind. But k3d has support for this. + // This will be implemented as an optional interface depending on the provider in question. + RemoveNode(ctx context.Context, node *Node, args ...string) error + + // StartNode is used to start a node that was shutdown/powered down as part of the E2E Test workflow. + // Not every provider will have a mechanism to support this. e.g Kind. But k3d has support for this. + // This will be implemented as an optional interface depending on the provider in question. + StartNode(ctx context.Context, node *Node, args ...string) error + + // StopNode can be used to stop an running node from the cluster as part of the E2E test Workflow. + // Not every provider will have a mechanism to support this. e.g Kind. But k3d has support for this. + // This will be implemented as an optional interface depending on the provider in question. + StopNode(ctx context.Context, node *Node, args ...string) error + + // ListNode can be used to fetch the list of nodes in the cluster. This can be used to extract the + // List of existing nodes on the cluster and their state before they can be operated on. + ListNode(ctx context.Context, args ...string) ([]Node, error) } diff --git a/support/utils/command.go b/support/utils/command.go index f23aabeb..0d9724b3 100644 --- a/support/utils/command.go +++ b/support/utils/command.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "bytes" "fmt" "io" @@ -34,41 +35,57 @@ var commandRunner = gexe.New() // be set in the in the invoker to make sure the right path is used for the binaries while invoking // rest of the workfow after this helper is triggered. func FindOrInstallGoBasedProvider(pPath, provider, module, version string) (string, error) { - if commandRunner.Prog().Avail(pPath) != "" { + if gexe.ProgAvail(pPath) != "" { log.V(4).InfoS("Found Provider tooling already installed on the machine", "command", pPath) return pPath, nil } + var stdout, stderr bytes.Buffer installCommand := fmt.Sprintf("go install %s@%s", module, version) log.V(4).InfoS("Installing provider tooling using go install", "command", installCommand) - p := commandRunner.RunProc(installCommand) - if p.Err() != nil { - return "", fmt.Errorf("failed to install %s: %s", pPath, p.Err()) + p := commandRunner.NewProc(installCommand) + p.SetStdout(&stdout) + p.SetStderr(&stderr) + result := p.Run() + if result.Err() != nil { + return "", fmt.Errorf("failed to install %s: %s: \n %s", pPath, result.Result(), stderr.String()) } - if !p.IsSuccess() || p.ExitCode() != 0 { - return "", fmt.Errorf("failed to install %s: %s", pPath, p.Result()) + if !result.IsSuccess() || result.ExitCode() != 0 { + return "", fmt.Errorf("failed to install %s: %s \n %s", pPath, result.Result(), stderr.String()) } - if providerPath := commandRunner.Prog().Avail(provider); providerPath != "" { + log.V(4).InfoS("Installed provider tooling using go install", "command", installCommand, "output", stdout.String()) + + if providerPath := gexe.ProgAvail(provider); providerPath != "" { log.V(4).Infof("Installed %s at %s", pPath, providerPath) return provider, nil } - p = commandRunner.RunProc("ls $GOPATH/bin") - if p.Err() != nil { - return "", fmt.Errorf("failed to install %s: %s", pPath, p.Err()) + p = commandRunner.NewProc("ls $GOPATH/bin") + stdout.Reset() + stderr.Reset() + p.SetStdout(&stdout) + p.SetStderr(&stderr) + result = p.Run() + if result.Err() != nil { + return "", fmt.Errorf("failed to install %s: %s \n %ss", pPath, result.Result(), stderr.String()) } - p = commandRunner.RunProc("echo $PATH:$GOPATH/bin") - if p.Err() != nil { - return "", fmt.Errorf("failed to install %s: %s", pPath, p.Err()) + p = commandRunner.NewProc("echo $PATH:$GOPATH/bin") + stdout.Reset() + stderr.Reset() + p.SetStdout(&stdout) + p.SetStderr(&stderr) + result = p.Run() + if result.Err() != nil { + return "", fmt.Errorf("failed to install %s: %s \n %s", pPath, result.Result(), stderr.String()) } - log.V(4).Info(`Setting path to include $GOPATH/bin:`, p.Result()) - commandRunner.SetEnv("PATH", p.Result()) + log.V(4).Info(`Setting path to include $GOPATH/bin:`, result.Result()) + commandRunner.SetEnv("PATH", result.Result()) - if providerPath := commandRunner.Prog().Avail(provider); providerPath != "" { + if providerPath := gexe.ProgAvail(provider); providerPath != "" { log.V(4).Infof("Installed %s at %s", pPath, providerPath) return provider, nil } @@ -92,7 +109,26 @@ func RunCommandWithSeperatedOutput(command string, stdout, stderr io.Writer) err return result.Err() } +// RunCommandWithCustomWriter run command and returns an *exec.Proc with information about the executed process. +// This helps map the STDOUT/STDERR to custom writer to extract data from the output. +func RunCommandWithCustomWriter(command string, stdout, stderr io.Writer) *exec.Proc { + p := commandRunner.NewProc(command) + p.SetStdout(stdout) + p.SetStderr(stderr) + return p.Run() +} + // FetchCommandOutput run command and returns the combined stderr and stdout output. func FetchCommandOutput(command string) string { return commandRunner.Run(command) } + +// FetchSeperatedCommandOutput run command and returns the command by splitting the stdout and stderr +// into different buffers and returns the Process with the buffer that can be ready from to extract +// the data set on the respective buffers +func FetchSeperatedCommandOutput(command string) (p *exec.Proc, stdout, stderr bytes.Buffer) { + p = commandRunner.NewProc(command) + p.SetStdout(&stdout) + p.SetStderr(&stderr) + return p.Run(), stdout, stderr +}