diff --git a/contrib/cmd/runkperf/commands/bench/node100_dp5_pod10k.go b/contrib/cmd/runkperf/commands/bench/node100_dp5_pod10k.go deleted file mode 100644 index 60b8e50..0000000 --- a/contrib/cmd/runkperf/commands/bench/node100_dp5_pod10k.go +++ /dev/null @@ -1,108 +0,0 @@ -package bench - -import ( - "context" - "fmt" - "sync" - "time" - - internaltypes "github.com/Azure/kperf/contrib/internal/types" - "github.com/Azure/kperf/contrib/internal/utils" - - "github.com/urfave/cli" -) - -var benchNode100Deployment5Pod10KCase = cli.Command{ - Name: "node100_dp5_pod10k", - Usage: ` - -The test suite is to setup 100 virtual nodes and deploy 5 deployments for 10k -pods on that nodes. It repeats to rolling-update deployments one by one during -benchmark. - `, - Flags: []cli.Flag{ - cli.IntFlag{ - Name: "total", - Usage: "Total requests per runner (There are 10 runners totally and runner's rate is 10)", - Value: 36000, - }, - cli.IntFlag{ - Name: "podsize", - Usage: "Add in pod's annotation to increase pod size. The value is close to pod's size", - Value: 0, - }, - }, - Action: func(cliCtx *cli.Context) error { - _, err := renderBenchmarkReportInterceptor( - addAPIServerCoresInfoInterceptor(benchNode100Deployment5Pod10KRun), - )(cliCtx) - return err - }, -} - -// benchNode100Deployment5Pod10KCase is for subcommand benchNode100Deployment5Pod10KCase. -func benchNode100Deployment5Pod10KRun(cliCtx *cli.Context) (*internaltypes.BenchmarkReport, error) { - ctx := context.Background() - kubeCfgPath := cliCtx.GlobalString("kubeconfig") - - rgCfgFile, rgSpec, rgCfgFileDone, err := newLoadProfileFromEmbed(cliCtx, - "loadprofile/node100_dp5_pod10k.yaml") - if err != nil { - return nil, err - } - defer func() { _ = rgCfgFileDone() }() - - vcDone, err := deployVirtualNodepool(ctx, cliCtx, "node100dp5pod10k", 100, 150) - if err != nil { - return nil, fmt.Errorf("failed to deploy virtual node: %w", err) - } - defer func() { _ = vcDone() }() - - var wg sync.WaitGroup - wg.Add(1) - - restartInterval := 10 * time.Second - dpCtx, dpCancel := context.WithCancel(ctx) - - podSize := cliCtx.Int("podsize") - rollingUpdateFn, err := utils.RepeatRollingUpdate10KPod(dpCtx, kubeCfgPath, "dp5pod10k", podSize, restartInterval) - if err != nil { - dpCancel() - return nil, fmt.Errorf("failed to setup workload: %w", err) - } - - go func() { - defer wg.Done() - - // FIXME(weifu): - // - // DeployRunnerGroup should return ready notification. - // The rolling update should run after runners. - rollingUpdateFn() - }() - - rgResult, derr := utils.DeployRunnerGroup(ctx, - cliCtx.GlobalString("kubeconfig"), - cliCtx.GlobalString("runner-image"), - rgCfgFile, - cliCtx.GlobalString("runner-flowcontrol"), - cliCtx.GlobalString("rg-affinity"), - ) - dpCancel() - wg.Wait() - - if derr != nil { - return nil, derr - } - - return &internaltypes.BenchmarkReport{ - Description: fmt.Sprintf(` -Environment: 100 virtual nodes managed by kwok-controller, -Workload: Deploy 5 deployments with 10,000 pods. Rolling-update deployments one by one and the interval is %v`, restartInterval), - LoadSpec: *rgSpec, - Result: *rgResult, - Info: map[string]interface{}{ - "podSizeInBytes": podSize, - }, - }, nil -} diff --git a/contrib/cmd/runkperf/commands/bench/node100_job1_pod3k.go b/contrib/cmd/runkperf/commands/bench/node100_job1_pod3k.go index 40ed5eb..0487874 100644 --- a/contrib/cmd/runkperf/commands/bench/node100_job1_pod3k.go +++ b/contrib/cmd/runkperf/commands/bench/node100_job1_pod3k.go @@ -46,7 +46,7 @@ func benchNode100Job1Pod3KCaseRun(cliCtx *cli.Context) (*internaltypes.Benchmark } defer func() { _ = rgCfgFileDone() }() - vcDone, err := deployVirtualNodepool(ctx, cliCtx, "node100job1pod3k", 100, 110) + vcDone, err := deployVirtualNodepool(ctx, cliCtx, "node100job1pod3k", 100, 32, 96, 110) if err != nil { return nil, fmt.Errorf("failed to deploy virtual node: %w", err) } diff --git a/contrib/cmd/runkperf/commands/bench/node100_pod10k.go b/contrib/cmd/runkperf/commands/bench/node100_pod10k.go new file mode 100644 index 0000000..f3c5132 --- /dev/null +++ b/contrib/cmd/runkperf/commands/bench/node100_pod10k.go @@ -0,0 +1,206 @@ +package bench + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + internaltypes "github.com/Azure/kperf/contrib/internal/types" + "github.com/Azure/kperf/contrib/internal/utils" + + "github.com/urfave/cli" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +var benchNode100DeploymentNPod10KCase = cli.Command{ + Name: "node100_pod10k", + Usage: ` + +The test suite is to setup 100 virtual nodes and deploy N deployments for 10k +pods on that nodes. It repeats to rolling-update deployments one by one during +benchmark. + `, + Flags: []cli.Flag{ + cli.IntFlag{ + Name: "deployments", + Usage: "The total number of deployments for 10k pods", + Value: 20, + }, + cli.IntFlag{ + Name: "total", + Usage: "Total requests per runner (There are 10 runners totally and runner's rate is 10)", + Value: 36000, + }, + cli.IntFlag{ + Name: "padding-bytes", + Usage: "Add in pod's annotation to increase pod size", + Value: 0, + }, + cli.DurationFlag{ + Name: "interval", + Usage: "Interval to restart deployments", + Value: time.Second * 10, + }, + }, + Action: func(cliCtx *cli.Context) error { + _, err := renderBenchmarkReportInterceptor( + addAPIServerCoresInfoInterceptor(benchNode100DeploymentNPod10KRun), + )(cliCtx) + return err + }, +} + +// benchNode100DeploymentNPod10KCase is for subcommand benchNode100DeploymentNPod10KCase. +func benchNode100DeploymentNPod10KRun(cliCtx *cli.Context) (*internaltypes.BenchmarkReport, error) { + ctx := context.Background() + kubeCfgPath := cliCtx.GlobalString("kubeconfig") + + rgCfgFile, rgSpec, rgCfgFileDone, err := newLoadProfileFromEmbed(cliCtx, + "loadprofile/node100_pod10k.yaml") + if err != nil { + return nil, err + } + defer func() { _ = rgCfgFileDone() }() + + // NOTE: The nodepool name should be aligned with ../../../../internal/manifests/loadprofile/node100_pod10k.yaml. + vcDone, err := deployVirtualNodepool(ctx, cliCtx, "node100pod10k", 100, 16, 64, 110) + if err != nil { + return nil, fmt.Errorf("failed to deploy virtual node: %w", err) + } + defer func() { _ = vcDone() }() + + dpCtx, dpCancel := context.WithCancel(ctx) + defer dpCancel() + + var wg sync.WaitGroup + wg.Add(1) + + restartInterval := cliCtx.Duration("interval") + klog.V(0).Infof("The interval is %v for restaring deployments", restartInterval) + + paddingBytes := cliCtx.Int("padding-bytes") + total := cliCtx.Int("deployments") + replica := 10000 / total + + // NOTE: The name pattern should be aligned with ../../../../internal/manifests/loadprofile/node100_pod10k.yaml. + deploymentNamePattern := "benchmark" + + rollingUpdateFn, ruCleanupFn, err := utils.DeployAndRepeatRollingUpdateDeployments(dpCtx, + kubeCfgPath, deploymentNamePattern, total, replica, paddingBytes, restartInterval) + if err != nil { + dpCancel() + return nil, fmt.Errorf("failed to setup workload: %w", err) + } + defer ruCleanupFn() + + err = dumpDeploymentReplicas(ctx, kubeCfgPath, deploymentNamePattern, total) + if err != nil { + return nil, err + } + + podSize, err := getDeploymentPodSize(ctx, kubeCfgPath, deploymentNamePattern) + if err != nil { + return nil, err + } + + podSize = (podSize / 1024) * 1024 + + go func() { + defer wg.Done() + + // FIXME(weifu): + // + // DeployRunnerGroup should return ready notification. + // The rolling update should run after runners. + rollingUpdateFn() + }() + + rgResult, derr := utils.DeployRunnerGroup(ctx, + cliCtx.GlobalString("kubeconfig"), + cliCtx.GlobalString("runner-image"), + rgCfgFile, + cliCtx.GlobalString("runner-flowcontrol"), + cliCtx.GlobalString("rg-affinity"), + ) + dpCancel() + wg.Wait() + + if derr != nil { + return nil, derr + } + + return &internaltypes.BenchmarkReport{ + Description: fmt.Sprintf(` +Environment: 100 virtual nodes managed by kwok-controller, +Workload: Deploy %d deployments with %d pods. Rolling-update deployments one by one and the interval is %v`, + total, total*replica, restartInterval), + + LoadSpec: *rgSpec, + Result: *rgResult, + Info: map[string]interface{}{ + "podSizeInBytes": podSize, + "interval": restartInterval.String(), + }, + }, nil +} + +// dumpDeploymentReplicas dumps deployment's replica. +func dumpDeploymentReplicas(ctx context.Context, kubeCfgPath string, namePattern string, total int) error { + klog.V(0).Info("Dump deployment's replica information") + + cli, err := utils.BuildClientset(kubeCfgPath) + if err != nil { + return err + } + + for i := 0; i < total; i++ { + name := fmt.Sprintf("%s-%d", namePattern, i) + ns := name + + dp, err := cli.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get deployment %s in namespace %s: %w", + name, ns, err) + } + + klog.V(0).InfoS("Deployment", "name", name, "ns", ns, + "replica", *dp.Spec.Replicas, "readyReplicas", dp.Status.ReadyReplicas) + } + return nil +} + +// getDeploymentPodSize gets the size of pod created by deployment. +func getDeploymentPodSize(ctx context.Context, kubeCfgPath string, namePattern string) (int, error) { + ns := fmt.Sprintf("%s-0", namePattern) + labelSelector := fmt.Sprintf("app=%s", namePattern) + + klog.V(0).InfoS("Get the size of pod", "labelSelector", labelSelector, "namespace", ns) + + cli, err := utils.BuildClientset(kubeCfgPath) + if err != nil { + return 0, err + } + + resp, err := cli.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{ + LabelSelector: labelSelector, + Limit: 1, + }) + if err != nil { + return 0, fmt.Errorf("failed to list pods with labelSelector %s: %w", + labelSelector, err) + } + if len(resp.Items) == 0 { + return 0, fmt.Errorf("no pod with labelSelector %s in namespace %s: %w", + labelSelector, ns, err) + } + + pod := resp.Items[0] + data, err := json.Marshal(pod) + if err != nil { + return 0, fmt.Errorf("failed to json.Marshal pod: %w", err) + } + return len(data), nil +} diff --git a/contrib/cmd/runkperf/commands/bench/root.go b/contrib/cmd/runkperf/commands/bench/root.go index 945d7e0..9be6dc6 100644 --- a/contrib/cmd/runkperf/commands/bench/root.go +++ b/contrib/cmd/runkperf/commands/bench/root.go @@ -53,6 +53,6 @@ var Command = cli.Command{ }, Subcommands: []cli.Command{ benchNode100Job1Pod3KCase, - benchNode100Deployment5Pod10KCase, + benchNode100DeploymentNPod10KCase, }, } diff --git a/contrib/cmd/runkperf/commands/bench/utils.go b/contrib/cmd/runkperf/commands/bench/utils.go index 0674d06..adb6e3a 100644 --- a/contrib/cmd/runkperf/commands/bench/utils.go +++ b/contrib/cmd/runkperf/commands/bench/utils.go @@ -90,7 +90,7 @@ func renderBenchmarkReportInterceptor(handler subcmdActionFunc) subcmdActionFunc } // deployVirtualNodepool deploys virtual nodepool. -func deployVirtualNodepool(ctx context.Context, cliCtx *cli.Context, target string, nodes, maxPods int) (func() error, error) { +func deployVirtualNodepool(ctx context.Context, cliCtx *cli.Context, target string, nodes, cpu, memory, maxPods int) (func() error, error) { klog.V(0).InfoS("Deploying virtual nodepool", "name", target) kubeCfgPath := cliCtx.GlobalString("kubeconfig") @@ -114,7 +114,7 @@ func deployVirtualNodepool(ctx context.Context, cliCtx *cli.Context, target stri klog.V(0).ErrorS(err, "failed to delete nodepool", "name", target) } - err = kr.NewNodepool(ctx, 0, target, nodes, maxPods, virtualNodeAffinity, sharedProviderID) + err = kr.NewNodepool(ctx, 0, target, nodes, cpu, memory, maxPods, virtualNodeAffinity, sharedProviderID) if err != nil { return nil, fmt.Errorf("failed to create nodepool %s: %w", target, err) } diff --git a/contrib/cmd/runkperf/commands/ekswarmup/command.go b/contrib/cmd/runkperf/commands/ekswarmup/command.go index ee87d12..4382ae1 100644 --- a/contrib/cmd/runkperf/commands/ekswarmup/command.go +++ b/contrib/cmd/runkperf/commands/ekswarmup/command.go @@ -168,7 +168,7 @@ func deployWarmupVirtualNodepool(ctx context.Context, kubeCfgPath string) (func( klog.V(0).ErrorS(err, "failed to delete", "nodepool", target) } - err = kr.NewNodepool(ctx, 0, target, 100, 110, + err = kr.NewNodepool(ctx, 0, target, 100, 32, 96, 110, "node.kubernetes.io/instance-type=m4.2xlarge", sharedProviderID) if err != nil { return nil, fmt.Errorf("failed to create nodepool %s: %w", target, err) diff --git a/contrib/internal/manifests/loadprofile/node100_dp5_pod10k.yaml b/contrib/internal/manifests/loadprofile/node100_pod10k.yaml similarity index 56% rename from contrib/internal/manifests/loadprofile/node100_dp5_pod10k.yaml rename to contrib/internal/manifests/loadprofile/node100_pod10k.yaml index 4b75cd5..71a92f4 100644 --- a/contrib/internal/manifests/loadprofile/node100_dp5_pod10k.yaml +++ b/contrib/internal/manifests/loadprofile/node100_pod10k.yaml @@ -1,7 +1,7 @@ count: 10 loadProfile: version: 1 - description: "node100-deployment5-pod10k" + description: "node100-pod10k" spec: rate: 10 total: 36000 @@ -14,12 +14,7 @@ loadProfile: - staleList: version: v1 resource: pods - # NOTE: Please align with ../../utils/utils.go#RepeatRollingUpdate10KPod - seletor: "app=benchmark" - # NOTE: Please align with ../../../cmd/runkperf/commands/bench/node100_dp5_pod10k.go. - # And there are only 100 nodes and each node can run 150 pods. It should - # have items in the response. - fieldSelector: "spec.nodeName=node100dp5pod10k-49" + fieldSelector: "spec.nodeName=node100pod10k-49" shares: 1000 # 1000 / (1000 + 100 + 200) * 10 = 7.7 req/s - staleList: version: v1 @@ -33,8 +28,5 @@ loadProfile: # including kubelet, when they want to get pods from ETCD. The limit # is 100 because it's close to MaxPods value. limit: 100 - # NOTE: Please align with ../../../cmd/runkperf/commands/bench/node100_dp5_pod10k.go. - fieldSelector: "spec.nodeName=node100dp5pod10k-49" - # And there are only 100 nodes and each node can run 150 pods. It should - # have items in the response. + seletor: "app=benchmark" shares: 200 # 200 / (1000 + 100 + 200) * 10 = 1.5 req/s diff --git a/contrib/internal/manifests/workload/2kpodper1deployment/Chart.yaml b/contrib/internal/manifests/workload/2kpodper1deployment/Chart.yaml deleted file mode 100644 index 2979c76..0000000 --- a/contrib/internal/manifests/workload/2kpodper1deployment/Chart.yaml +++ /dev/null @@ -1,3 +0,0 @@ -apiVersion: v1 -name: "2k-pods-per-1-deployment" -version: "0.0.1" diff --git a/contrib/internal/manifests/workload/2kpodper1deployment/values.yaml b/contrib/internal/manifests/workload/2kpodper1deployment/values.yaml deleted file mode 100644 index 84d38e4..0000000 --- a/contrib/internal/manifests/workload/2kpodper1deployment/values.yaml +++ /dev/null @@ -1,3 +0,0 @@ -pattern: "benchmark" -total: 5 -podSizeInBytes: 2048 diff --git a/contrib/internal/manifests/workload/deployments/Chart.yaml b/contrib/internal/manifests/workload/deployments/Chart.yaml new file mode 100644 index 0000000..e56f985 --- /dev/null +++ b/contrib/internal/manifests/workload/deployments/Chart.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +name: "deployment" +version: "0.0.1" diff --git a/contrib/internal/manifests/workload/2kpodper1deployment/templates/deployments.tpl b/contrib/internal/manifests/workload/deployments/templates/deployments.tpl similarity index 83% rename from contrib/internal/manifests/workload/2kpodper1deployment/templates/deployments.tpl rename to contrib/internal/manifests/workload/deployments/templates/deployments.tpl index 92c2f3a..38df56c 100644 --- a/contrib/internal/manifests/workload/2kpodper1deployment/templates/deployments.tpl +++ b/contrib/internal/manifests/workload/deployments/templates/deployments.tpl @@ -1,5 +1,6 @@ -{{- $pattern := .Values.pattern }} -{{- $podSizeInBytes := int .Values.podSizeInBytes }} +{{- $pattern := .Values.namePattern }} +{{- $replica := int .Values.replica }} +{{- $paddingBytes := int .Values.paddingBytes }} {{- range $index := (untilStep 0 (int .Values.total) 1) }} apiVersion: v1 kind: Namespace @@ -16,7 +17,7 @@ metadata: labels: app: {{ $pattern }} spec: - replicas: 2000 + replicas: {{ $replica }} strategy: rollingUpdate: maxSurge: 100 @@ -31,7 +32,7 @@ spec: app: {{ $pattern }} index: "{{ $index }}" annotations: - data: "{{ randAlphaNum $podSizeInBytes | nospace }}" + data: "{{ randAlphaNum $paddingBytes | nospace }}" spec: affinity: nodeAffinity: diff --git a/contrib/internal/manifests/workload/deployments/values.yaml b/contrib/internal/manifests/workload/deployments/values.yaml new file mode 100644 index 0000000..bab7505 --- /dev/null +++ b/contrib/internal/manifests/workload/deployments/values.yaml @@ -0,0 +1,4 @@ +namePattern: "benchmark" +total: 5 +replica: 2000 +paddingBytes: 0 diff --git a/contrib/internal/utils/kperf_cmd.go b/contrib/internal/utils/kperf_cmd.go index 2330d94..39d56d8 100644 --- a/contrib/internal/utils/kperf_cmd.go +++ b/contrib/internal/utils/kperf_cmd.go @@ -23,7 +23,7 @@ func NewKperfRunner(kubeCfgPath string, runnerImage string) *KperfRunner { func (kr *KperfRunner) NewNodepool( ctx context.Context, timeout time.Duration, - name string, nodes int, maxPods int, + name string, nodes int, cpu, memory, maxPods int, affinity string, sharedProviderID string, ) error { @@ -33,8 +33,8 @@ func (kr *KperfRunner) NewNodepool( } args = append(args, "add", name, fmt.Sprintf("--nodes=%v", nodes), - fmt.Sprintf("--cpu=%v", 32), - fmt.Sprintf("--memory=%v", 96), + fmt.Sprintf("--cpu=%v", cpu), + fmt.Sprintf("--memory=%v", memory), fmt.Sprintf("--max-pods=%v", maxPods), ) if affinity != "" { diff --git a/contrib/internal/utils/utils.go b/contrib/internal/utils/utils.go index 9d89787..1ae7d43 100644 --- a/contrib/internal/utils/utils.go +++ b/contrib/internal/utils/utils.go @@ -108,18 +108,22 @@ func RepeatJobWith3KPod(ctx context.Context, kubeCfgPath string, namespace strin } } -// RepeatRollingUpdate10KPod repeats to rolling-update 10k pods. -// -// NOTE: please align with ../manifests/loadprofile/node100_dp5_pod10k.yaml. -func RepeatRollingUpdate10KPod(ctx context.Context, kubeCfgPath string, releaseName string, podSizeInBytes int, internal time.Duration) (_rollingUpdateFn func(), retErr error) { - target := "workload/2kpodper1deployment" +// DeployAndRepeatRollingUpdateDeployments deploys and repeats to rolling-update deployments. +func DeployAndRepeatRollingUpdateDeployments( + ctx context.Context, + kubeCfgPath string, + releaseName string, + total, replica, paddingBytes int, + internal time.Duration, +) (rollingUpdateFn, cleanupFn func(), retErr error) { + + target := "workload/deployments" ch, err := manifests.LoadChart(target) if err != nil { - return nil, fmt.Errorf("failed to load virtual node chart: %w", err) + return nil, nil, fmt.Errorf("failed to load %s chart: %w", target, err) } - namePattern := "benchmark" - total := 5 + namePattern := releaseName releaseCli, err := helmcli.NewReleaseCli( kubeCfgPath, @@ -130,35 +134,40 @@ func RepeatRollingUpdate10KPod(ctx context.Context, kubeCfgPath string, releaseN ch, nil, helmcli.StringPathValuesApplier( - fmt.Sprintf("pattern=%s", namePattern), + fmt.Sprintf("namePattern=%s", namePattern), fmt.Sprintf("total=%d", total), - fmt.Sprintf("podSizeInBytes=%d", podSizeInBytes), + fmt.Sprintf("replica=%d", replica), + fmt.Sprintf("paddingBytes=%d", paddingBytes), ), ) if err != nil { - return nil, fmt.Errorf("failed to create a new helm release cli: %w", err) + return nil, nil, fmt.Errorf("failed to create a new helm release cli: %w", err) } - klog.V(0).InfoS("Deploying deployments", "deployments", total, "podSizeInBytes", podSizeInBytes) + klog.V(0).InfoS("Deploying deployments", + "total", total, + "replica", replica, + "paddingBytes", paddingBytes, + ) err = releaseCli.Deploy(ctx, 10*time.Minute) if err != nil { if errors.Is(err, context.Canceled) { klog.V(0).Info("Deploy is canceled") - return func() {}, nil + return func() {}, func() {}, nil } - return nil, fmt.Errorf("failed to deploy 10k pods by helm chart %s: %w", target, err) + return nil, nil, fmt.Errorf("failed to deploy helm chart %s: %w", target, err) } - klog.V(0).InfoS("Deployed deployments", "deployments", total) + klog.V(0).InfoS("Deployed deployments") - return func() { - defer func() { - klog.V(0).Infof("Cleanup helm chart %s", target) - err := releaseCli.Uninstall() - if err != nil { - klog.V(0).ErrorS(err, "failed to cleanup", "chart", target) - } - }() + cleanupFn = func() { + klog.V(0).Infof("Cleanup helm chart %s", target) + err := releaseCli.Uninstall() + if err != nil { + klog.V(0).ErrorS(err, "failed to cleanup", "chart", target) + } + } + rollingUpdateFn = func() { for { select { case <-ctx.Done(): @@ -193,7 +202,8 @@ func RepeatRollingUpdate10KPod(ctx context.Context, kubeCfgPath string, releaseN } } } - }, nil + } + return rollingUpdateFn, cleanupFn, nil } // NewLoadProfileFromEmbed reads load profile from embed memory. @@ -326,7 +336,7 @@ func FetchAPIServerCores(ctx context.Context, kubeCfgPath string) (map[string]in // FetchNodeProviderIDByType is used to get one node's provider id with a given // instance type. func FetchNodeProviderIDByType(ctx context.Context, kubeCfgPath string, instanceType string) (string, error) { - clientset, err := buildClientset(kubeCfgPath) + clientset, err := BuildClientset(kubeCfgPath) if err != nil { return "", err } @@ -345,8 +355,8 @@ func FetchNodeProviderIDByType(ctx context.Context, kubeCfgPath string, instance return listResp.Items[0].Spec.ProviderID, nil } -// buildClientset returns kubernetes clientset. -func buildClientset(kubeCfgPath string) (*kubernetes.Clientset, error) { +// BuildClientset returns kubernetes clientset. +func BuildClientset(kubeCfgPath string) (*kubernetes.Clientset, error) { config, err := clientcmd.BuildConfigFromFlags("", kubeCfgPath) if err != nil { return nil, fmt.Errorf("failed to build client-go config: %w", err)