From 565dfaa418d93367454e60c93a3ec6b1a4783e53 Mon Sep 17 00:00:00 2001 From: ravjot07 Date: Sat, 15 Mar 2025 04:47:26 +0530 Subject: [PATCH 1/9] e2e: Add E2E test for Locality Load Balancing (PreferClose mode) Signed-off-by: ravjot07 --- test/e2e/locality_lb_test.go | 290 +++++++++++++++++++++++++++++++++++ 1 file changed, 290 insertions(+) create mode 100644 test/e2e/locality_lb_test.go diff --git a/test/e2e/locality_lb_test.go b/test/e2e/locality_lb_test.go new file mode 100644 index 000000000..02967d298 --- /dev/null +++ b/test/e2e/locality_lb_test.go @@ -0,0 +1,290 @@ +//go:build integ +// +build integ + +/* +Production-grade End-to-End Test for Kmesh Locality Load Balancing (PreferClose mode). + +This test performs the following steps: +1. Create a dedicated namespace "sample". +2. Deploy a Service ("helloworld") annotated with PreferClose load balancing. +3. Deploy three helloworld Deployments on three different nodes: + - A local instance on node "ambient-worker" with version "region.zone1.subzone1". + - A remote instance on node "ambient-worker2" with version "region.zone1.subzone2". + - A remote instance on node "ambient-worker3" with version "region.zone2.subzone3". +4. Deploy a sleep client on node "ambient-worker". +5. Verify that: + - Initially, the sleep client’s request is served by the local instance. + - After deleting the local instance, traffic fails over to the remote instance. +6. Cleanup all resources at the end of the test. +*/ + +package kmesh + +import ( + "os" + "strings" + "testing" + "time" + + "istio.io/istio/pkg/test/framework" + "istio.io/istio/pkg/test/shell" + "istio.io/istio/pkg/test/util/retry" +) + +// applyManifest writes the provided manifest into a temporary file and applies it using kubectl. +func applyManifest(ns, manifest string) error { + tmpFile, err := os.CreateTemp("", "manifest-*.yaml") + if err != nil { + return err + } + // Ensure the temporary file is removed after use. + defer os.Remove(tmpFile.Name()) + + // Write the manifest to the temporary file. + if _, err := tmpFile.WriteString(manifest); err != nil { + tmpFile.Close() + return err + } + tmpFile.Close() + + cmd := "kubectl apply -n " + ns + " -f " + tmpFile.Name() + _, err = shell.Execute(true, cmd) + return err +} + +func TestLocalityLoadBalancing(t *testing.T) { + // Declare err at the beginning so it can be reused throughout the test. + var err error + + framework.NewTest(t).Run(func(t framework.TestContext) { + const ns = "sample" + + // Create the test namespace "sample". Log but ignore errors if it already exists. + if _, err := shell.Execute(true, "kubectl create namespace "+ns); err != nil { + t.Logf("Namespace %s might already exist: %v", ns, err) + } + + // Apply the Service manifest with PreferClose load balancing. + serviceYAML := ` +apiVersion: v1 +kind: Service +metadata: + name: helloworld + labels: + app: helloworld +spec: + ports: + - port: 5000 + name: http + selector: + app: helloworld + trafficDistribution: PreferClose +` + if err = applyManifest(ns, serviceYAML); err != nil { + t.Fatalf("Failed to apply Service manifest: %v", err) + } + + // Deploy three helloworld instances with specific localities. + // Instance 1: Local instance on ambient-worker with version region.zone1.subzone1. + dep1 := ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: helloworld-region-zone1-subzone1 + labels: + app: helloworld + version: region.zone1.subzone1 +spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: region.zone1.subzone1 + template: + metadata: + labels: + app: helloworld + version: region.zone1.subzone1 + spec: + containers: + - name: helloworld + env: + - name: SERVICE_VERSION + value: region.zone1.subzone1 + image: docker.io/istio/examples-helloworld-v1 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 5000 + nodeSelector: + kubernetes.io/hostname: ambient-worker +` + if err = applyManifest(ns, dep1); err != nil { + t.Fatalf("Failed to deploy dep1: %v", err) + } + + // Instance 2: Remote instance on ambient-worker2 with version region.zone1.subzone2. + dep2 := ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: helloworld-region-zone1-subzone2 + labels: + app: helloworld + version: region.zone1.subzone2 +spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: region.zone1.subzone2 + template: + metadata: + labels: + app: helloworld + version: region.zone1.subzone2 + spec: + containers: + - name: helloworld + env: + - name: SERVICE_VERSION + value: region.zone1.subzone2 + image: docker.io/istio/examples-helloworld-v1 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 5000 + nodeSelector: + kubernetes.io/hostname: ambient-worker2 +` + if err = applyManifest(ns, dep2); err != nil { + t.Fatalf("Failed to deploy dep2: %v", err) + } + + // Instance 3: Remote instance on ambient-worker3 with version region.zone2.subzone3. + dep3 := ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: helloworld-region-zone2-subzone3 + labels: + app: helloworld + version: region.zone2.subzone3 +spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: region.zone2.subzone3 + template: + metadata: + labels: + app: helloworld + version: region.zone2.subzone3 + spec: + containers: + - name: helloworld + env: + - name: SERVICE_VERSION + value: region.zone2.subzone3 + image: docker.io/istio/examples-helloworld-v1 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 5000 + nodeSelector: + kubernetes.io/hostname: ambient-worker3 +` + if err = applyManifest(ns, dep3); err != nil { + t.Fatalf("Failed to deploy dep3: %v", err) + } + + // Deploy a sleep client on ambient-worker. + clientDep := ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sleep + labels: + app: sleep +spec: + replicas: 1 + selector: + matchLabels: + app: sleep + template: + metadata: + labels: + app: sleep + spec: + containers: + - name: sleep + image: curlimages/curl + command: ["/bin/sleep", "infinity"] + imagePullPolicy: IfNotPresent + nodeSelector: + kubernetes.io/hostname: ambient-worker +` + if err = applyManifest(ns, clientDep); err != nil { + t.Fatalf("Failed to deploy sleep client: %v", err) + } + + // Wait for all deployments to be available. + deployments := []string{ + "helloworld-region-zone1-subzone1", + "helloworld-region-zone1-subzone2", + "helloworld-region-zone2-subzone3", + "sleep", + } + for _, dep := range deployments { + cmd := "kubectl wait --for=condition=available deployment/" + dep + " -n " + ns + " --timeout=120s" + if _, err := shell.Execute(true, cmd); err != nil { + t.Fatalf("Deployment %s not ready: %v", dep, err) + } + } + + // Test Locality Preferred: + // From the sleep client, access the helloworld service and expect a response from the local instance (dep1). + t.Log("Testing locality preferred (expect response from region.zone1.subzone1)...") + var localResponse string + err = retry.Until(func() bool { + out, execErr := shell.Execute(true, "kubectl exec -n "+ns+" $(kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -sSL http://helloworld:5000/hello") + if execErr != nil { + t.Logf("Curl error: %v", execErr) + return false + } + t.Logf("Curl output: %s", out) + if strings.Contains(out, "region.zone1.subzone1") { + localResponse = out + return true + } + return false + }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)) + if err != nil { + t.Fatalf("Locality preferred test failed: expected response from region.zone1.subzone1, got: %s", localResponse) + } + t.Log("Locality preferred test passed.") + + // Test Locality Failover: + // Delete the local deployment (dep1) and expect traffic to fail over to the remote instance (dep2). + t.Log("Testing locality failover (expect response from region.zone1.subzone2)...") + if _, err := shell.Execute(true, "kubectl delete deployment helloworld-region-zone1-subzone1 -n "+ns); err != nil { + t.Fatalf("Failed to delete local deployment: %v", err) + } + var failoverResponse string + err = retry.Until(func() bool { + out, execErr := shell.Execute(true, "kubectl exec -n "+ns+" $(kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -sSL http://helloworld:5000/hello") + if execErr != nil { + t.Logf("Curl error after failover: %v", execErr) + return false + } + t.Logf("Curl output after failover: %s", out) + if strings.Contains(out, "region.zone1.subzone2") { + failoverResponse = out + return true + } + return false + }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)) + if err != nil { + t.Fatalf("Locality failover test failed: expected response from region.zone1.subzone2, got: %s", failoverResponse) + } + t.Log("Locality failover test passed.") + }) +} From a45f6d4724b55f10d62dae40c3c5efa520021c31 Mon Sep 17 00:00:00 2001 From: ravjot07 Date: Sat, 15 Mar 2025 04:58:36 +0530 Subject: [PATCH 2/9] fix copyright issue Signed-off-by: ravjot07 --- test/e2e/locality_lb_test.go | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/test/e2e/locality_lb_test.go b/test/e2e/locality_lb_test.go index 02967d298..0f347a5dc 100644 --- a/test/e2e/locality_lb_test.go +++ b/test/e2e/locality_lb_test.go @@ -1,23 +1,25 @@ //go:build integ // +build integ -/* -Production-grade End-to-End Test for Kmesh Locality Load Balancing (PreferClose mode). - -This test performs the following steps: -1. Create a dedicated namespace "sample". -2. Deploy a Service ("helloworld") annotated with PreferClose load balancing. -3. Deploy three helloworld Deployments on three different nodes: - - A local instance on node "ambient-worker" with version "region.zone1.subzone1". - - A remote instance on node "ambient-worker2" with version "region.zone1.subzone2". - - A remote instance on node "ambient-worker3" with version "region.zone2.subzone3". -4. Deploy a sleep client on node "ambient-worker". -5. Verify that: - - Initially, the sleep client’s request is served by the local instance. - - After deleting the local instance, traffic fails over to the remote instance. -6. Cleanup all resources at the end of the test. -*/ +//go:build integ +// +build integ +/* + * Copyright The Kmesh Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package kmesh import ( From 9d1489f3585e7e1f3466b0b60199de5c5f92455f Mon Sep 17 00:00:00 2001 From: ravjot07 Date: Sat, 15 Mar 2025 05:09:06 +0530 Subject: [PATCH 3/9] minor changes Signed-off-by: ravjot07 --- test/e2e/locality_lb_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/e2e/locality_lb_test.go b/test/e2e/locality_lb_test.go index 0f347a5dc..100b638ef 100644 --- a/test/e2e/locality_lb_test.go +++ b/test/e2e/locality_lb_test.go @@ -1,9 +1,6 @@ //go:build integ // +build integ -//go:build integ -// +build integ - /* * Copyright The Kmesh Authors. * @@ -19,7 +16,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - + package kmesh import ( From 991fd4d4e094c19efbae2b6d0eeb8fd5846da15f Mon Sep 17 00:00:00 2001 From: ravjot07 Date: Thu, 27 Mar 2025 03:40:44 +0530 Subject: [PATCH 4/9] minor changes Signed-off-by: ravjot07 --- test/e2e/locality_lb_test.go | 506 ++++++++++++++++------------------- 1 file changed, 236 insertions(+), 270 deletions(-) diff --git a/test/e2e/locality_lb_test.go b/test/e2e/locality_lb_test.go index 100b638ef..cb279140d 100644 --- a/test/e2e/locality_lb_test.go +++ b/test/e2e/locality_lb_test.go @@ -17,273 +17,239 @@ * limitations under the License. */ -package kmesh - -import ( - "os" - "strings" - "testing" - "time" - - "istio.io/istio/pkg/test/framework" - "istio.io/istio/pkg/test/shell" - "istio.io/istio/pkg/test/util/retry" -) - -// applyManifest writes the provided manifest into a temporary file and applies it using kubectl. -func applyManifest(ns, manifest string) error { - tmpFile, err := os.CreateTemp("", "manifest-*.yaml") - if err != nil { - return err - } - // Ensure the temporary file is removed after use. - defer os.Remove(tmpFile.Name()) - - // Write the manifest to the temporary file. - if _, err := tmpFile.WriteString(manifest); err != nil { - tmpFile.Close() - return err - } - tmpFile.Close() - - cmd := "kubectl apply -n " + ns + " -f " + tmpFile.Name() - _, err = shell.Execute(true, cmd) - return err -} - -func TestLocalityLoadBalancing(t *testing.T) { - // Declare err at the beginning so it can be reused throughout the test. - var err error - - framework.NewTest(t).Run(func(t framework.TestContext) { - const ns = "sample" - - // Create the test namespace "sample". Log but ignore errors if it already exists. - if _, err := shell.Execute(true, "kubectl create namespace "+ns); err != nil { - t.Logf("Namespace %s might already exist: %v", ns, err) - } - - // Apply the Service manifest with PreferClose load balancing. - serviceYAML := ` -apiVersion: v1 -kind: Service -metadata: - name: helloworld - labels: - app: helloworld -spec: - ports: - - port: 5000 - name: http - selector: - app: helloworld - trafficDistribution: PreferClose -` - if err = applyManifest(ns, serviceYAML); err != nil { - t.Fatalf("Failed to apply Service manifest: %v", err) - } - - // Deploy three helloworld instances with specific localities. - // Instance 1: Local instance on ambient-worker with version region.zone1.subzone1. - dep1 := ` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: helloworld-region-zone1-subzone1 - labels: - app: helloworld - version: region.zone1.subzone1 -spec: - replicas: 1 - selector: - matchLabels: - app: helloworld - version: region.zone1.subzone1 - template: - metadata: - labels: - app: helloworld - version: region.zone1.subzone1 - spec: - containers: - - name: helloworld - env: - - name: SERVICE_VERSION - value: region.zone1.subzone1 - image: docker.io/istio/examples-helloworld-v1 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 5000 - nodeSelector: - kubernetes.io/hostname: ambient-worker -` - if err = applyManifest(ns, dep1); err != nil { - t.Fatalf("Failed to deploy dep1: %v", err) - } - - // Instance 2: Remote instance on ambient-worker2 with version region.zone1.subzone2. - dep2 := ` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: helloworld-region-zone1-subzone2 - labels: - app: helloworld - version: region.zone1.subzone2 -spec: - replicas: 1 - selector: - matchLabels: - app: helloworld - version: region.zone1.subzone2 - template: - metadata: - labels: - app: helloworld - version: region.zone1.subzone2 - spec: - containers: - - name: helloworld - env: - - name: SERVICE_VERSION - value: region.zone1.subzone2 - image: docker.io/istio/examples-helloworld-v1 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 5000 - nodeSelector: - kubernetes.io/hostname: ambient-worker2 -` - if err = applyManifest(ns, dep2); err != nil { - t.Fatalf("Failed to deploy dep2: %v", err) - } - - // Instance 3: Remote instance on ambient-worker3 with version region.zone2.subzone3. - dep3 := ` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: helloworld-region-zone2-subzone3 - labels: - app: helloworld - version: region.zone2.subzone3 -spec: - replicas: 1 - selector: - matchLabels: - app: helloworld - version: region.zone2.subzone3 - template: - metadata: - labels: - app: helloworld - version: region.zone2.subzone3 - spec: - containers: - - name: helloworld - env: - - name: SERVICE_VERSION - value: region.zone2.subzone3 - image: docker.io/istio/examples-helloworld-v1 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 5000 - nodeSelector: - kubernetes.io/hostname: ambient-worker3 -` - if err = applyManifest(ns, dep3); err != nil { - t.Fatalf("Failed to deploy dep3: %v", err) - } - - // Deploy a sleep client on ambient-worker. - clientDep := ` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sleep - labels: - app: sleep -spec: - replicas: 1 - selector: - matchLabels: - app: sleep - template: - metadata: - labels: - app: sleep - spec: - containers: - - name: sleep - image: curlimages/curl - command: ["/bin/sleep", "infinity"] - imagePullPolicy: IfNotPresent - nodeSelector: - kubernetes.io/hostname: ambient-worker -` - if err = applyManifest(ns, clientDep); err != nil { - t.Fatalf("Failed to deploy sleep client: %v", err) - } - - // Wait for all deployments to be available. - deployments := []string{ - "helloworld-region-zone1-subzone1", - "helloworld-region-zone1-subzone2", - "helloworld-region-zone2-subzone3", - "sleep", - } - for _, dep := range deployments { - cmd := "kubectl wait --for=condition=available deployment/" + dep + " -n " + ns + " --timeout=120s" - if _, err := shell.Execute(true, cmd); err != nil { - t.Fatalf("Deployment %s not ready: %v", dep, err) - } - } - - // Test Locality Preferred: - // From the sleep client, access the helloworld service and expect a response from the local instance (dep1). - t.Log("Testing locality preferred (expect response from region.zone1.subzone1)...") - var localResponse string - err = retry.Until(func() bool { - out, execErr := shell.Execute(true, "kubectl exec -n "+ns+" $(kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -sSL http://helloworld:5000/hello") - if execErr != nil { - t.Logf("Curl error: %v", execErr) - return false - } - t.Logf("Curl output: %s", out) - if strings.Contains(out, "region.zone1.subzone1") { - localResponse = out - return true - } - return false - }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)) - if err != nil { - t.Fatalf("Locality preferred test failed: expected response from region.zone1.subzone1, got: %s", localResponse) - } - t.Log("Locality preferred test passed.") - - // Test Locality Failover: - // Delete the local deployment (dep1) and expect traffic to fail over to the remote instance (dep2). - t.Log("Testing locality failover (expect response from region.zone1.subzone2)...") - if _, err := shell.Execute(true, "kubectl delete deployment helloworld-region-zone1-subzone1 -n "+ns); err != nil { - t.Fatalf("Failed to delete local deployment: %v", err) - } - var failoverResponse string - err = retry.Until(func() bool { - out, execErr := shell.Execute(true, "kubectl exec -n "+ns+" $(kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -sSL http://helloworld:5000/hello") - if execErr != nil { - t.Logf("Curl error after failover: %v", execErr) - return false - } - t.Logf("Curl output after failover: %s", out) - if strings.Contains(out, "region.zone1.subzone2") { - failoverResponse = out - return true - } - return false - }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)) - if err != nil { - t.Fatalf("Locality failover test failed: expected response from region.zone1.subzone2, got: %s", failoverResponse) - } - t.Log("Locality failover test passed.") - }) -} + package kmesh + + import ( + "os" + "strings" + "testing" + "time" + + "istio.io/istio/pkg/test/framework" + "istio.io/istio/pkg/test/shell" + "istio.io/istio/pkg/test/util/retry" + ) + + // applyManifest writes the provided manifest into a temporary file and applies it using kubectl. + func applyManifest(ns, manifest string) error { + tmpFile, err := os.CreateTemp("", "manifest-*.yaml") + if err != nil { + return err + } + // Ensure the temporary file is removed after use. + defer os.Remove(tmpFile.Name()) + + // Write the manifest to the temporary file. + if _, err := tmpFile.WriteString(manifest); err != nil { + tmpFile.Close() + return err + } + tmpFile.Close() + + cmd := "kubectl apply -n " + ns + " -f " + tmpFile.Name() + _, err = shell.Execute(true, cmd) + return err + } + + func TestLocalityLoadBalancing(t *testing.T) { + framework.NewTest(t).Run(func(t framework.TestContext) { + const ns = "sample" + + // Create the test namespace "sample". Log but ignore errors if it already exists. + if _, err := shell.Execute(true, "kubectl create namespace "+ns); err != nil { + t.Logf("Namespace %s might already exist: %v", ns, err) + } + + // Apply the Service manifest with PreferClose load balancing. + serviceYAML := ` + apiVersion: v1 + kind: Service + metadata: + name: helloworld + labels: + app: helloworld + spec: + ports: + - port: 5000 + name: http + selector: + app: helloworld + trafficDistribution: PreferClose + ` + if err := applyManifest(ns, serviceYAML); err != nil { + t.Fatalf("Failed to apply Service manifest: %v", err) + } + + // Deploy the local instance (dep1) on the worker node. + // This instance simulates the "local" deployment. + dep1 := ` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: helloworld-region-zone1-subzone1 + labels: + app: helloworld + version: region.zone1.subzone1 + spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: region.zone1.subzone1 + template: + metadata: + labels: + app: helloworld + version: region.zone1.subzone1 + spec: + containers: + - name: helloworld + env: + - name: SERVICE_VERSION + value: region.zone1.subzone1 + image: docker.io/istio/examples-helloworld-v1 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 5000 + nodeSelector: + kubernetes.io/hostname: kmesh-testing-worker + ` + if err := applyManifest(ns, dep1); err != nil { + t.Fatalf("Failed to deploy local instance (dep1): %v", err) + } + + // Deploy the remote instance (dep2) on the control-plane node. + // Note: A toleration is added here so that the pod can be scheduled on the control-plane. + dep2 := ` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: helloworld-region-zone1-subzone2 + labels: + app: helloworld + version: region.zone1.subzone2 + spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: region.zone1.subzone2 + template: + metadata: + labels: + app: helloworld + version: region.zone1.subzone2 + spec: + containers: + - name: helloworld + env: + - name: SERVICE_VERSION + value: region.zone1.subzone2 + image: docker.io/istio/examples-helloworld-v1 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 5000 + nodeSelector: + kubernetes.io/hostname: kmesh-testing-control-plane + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + ` + if err := applyManifest(ns, dep2); err != nil { + t.Fatalf("Failed to deploy remote instance (dep2): %v", err) + } + + // Deploy a sleep client on the worker node. + // This client will originate requests from the "local" node. + clientDep := ` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: sleep + labels: + app: sleep + spec: + replicas: 1 + selector: + matchLabels: + app: sleep + template: + metadata: + labels: + app: sleep + spec: + containers: + - name: sleep + image: curlimages/curl + command: ["/bin/sleep", "infinity"] + imagePullPolicy: IfNotPresent + nodeSelector: + kubernetes.io/hostname: kmesh-testing-worker + ` + if err := applyManifest(ns, clientDep); err != nil { + t.Fatalf("Failed to deploy sleep client: %v", err) + } + + // Wait for all deployments to become available. + deployments := []string{ + "helloworld-region-zone1-subzone1", + "helloworld-region-zone1-subzone2", + "sleep", + } + for _, dep := range deployments { + cmd := "kubectl wait --for=condition=available deployment/" + dep + " -n " + ns + " --timeout=120s" + if _, err := shell.Execute(true, cmd); err != nil { + t.Fatalf("Deployment %s not ready: %v", dep, err) + } + } + + // Test Locality Preferred: + // From the sleep client, access the helloworld service and expect a response from the local instance. + t.Log("Testing locality preferred (expect response from region.zone1.subzone1)...") + var localResponse string + if err := retry.Until(func() bool { + out, execErr := shell.Execute(true, + "kubectl exec -n "+ns+" $(kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -sSL http://helloworld:5000/hello") + if execErr != nil { + t.Logf("Curl error: %v", execErr) + return false + } + t.Logf("Curl output: %s", out) + if strings.Contains(out, "region.zone1.subzone1") { + localResponse = out + return true + } + return false + }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)); err != nil { + t.Fatalf("Locality preferred test failed: expected response from region.zone1.subzone1, got: %s", localResponse) + } + t.Log("Locality preferred test passed.") + + // Test Locality Failover: + // Delete the local deployment and expect traffic to fail over to the remote instance. + t.Log("Testing locality failover (expect response from region.zone1.subzone2)...") + if _, err := shell.Execute(true, "kubectl delete deployment helloworld-region-zone1-subzone1 -n "+ns); err != nil { + t.Fatalf("Failed to delete local instance (dep1): %v", err) + } + var failoverResponse string + if err := retry.Until(func() bool { + out, execErr := shell.Execute(true, + "kubectl exec -n "+ns+" $(kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -sSL http://helloworld:5000/hello") + if execErr != nil { + t.Logf("Curl error after failover: %v", execErr) + return false + } + t.Logf("Curl output after failover: %s", out) + if strings.Contains(out, "region.zone1.subzone2") { + failoverResponse = out + return true + } + return false + }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)); err != nil { + t.Fatalf("Locality failover test failed: expected response from region.zone1.subzone2, got: %s", failoverResponse) + } + t.Log("Locality failover test passed.") + }) + } + \ No newline at end of file From 1d17adb78eb511da8a98bedd38a367688deb8af2 Mon Sep 17 00:00:00 2001 From: ravjot07 Date: Fri, 28 Mar 2025 06:00:41 +0530 Subject: [PATCH 5/9] test(e2e): add locality load balancing end-to-end test Signed-off-by: ravjot07 --- test/e2e/locality_lb_test.go | 92 +++++++++++++++++++++--------------- 1 file changed, 54 insertions(+), 38 deletions(-) diff --git a/test/e2e/locality_lb_test.go b/test/e2e/locality_lb_test.go index cb279140d..297bb7482 100644 --- a/test/e2e/locality_lb_test.go +++ b/test/e2e/locality_lb_test.go @@ -1,4 +1,3 @@ -//go:build integ // +build integ /* @@ -36,10 +35,8 @@ if err != nil { return err } - // Ensure the temporary file is removed after use. defer os.Remove(tmpFile.Name()) - // Write the manifest to the temporary file. if _, err := tmpFile.WriteString(manifest); err != nil { tmpFile.Close() return err @@ -51,16 +48,35 @@ return err } + // extractResolvedIP parses the nslookup output to extract the IP address for the service. + func extractResolvedIP(nslookup string) string { + lines := strings.Split(nslookup, "\n") + var addresses []string + for _, line := range lines { + trimmed := strings.TrimSpace(line) + if strings.HasPrefix(trimmed, "Address:") { + addr := strings.TrimSpace(strings.TrimPrefix(trimmed, "Address:")) + if strings.Contains(addr, ":53") || strings.HasPrefix(addr, "[") { + continue + } + addresses = append(addresses, addr) + } + } + if len(addresses) > 0 { + return addresses[0] + } + return "" + } + func TestLocalityLoadBalancing(t *testing.T) { framework.NewTest(t).Run(func(t framework.TestContext) { const ns = "sample" + const fqdn = "helloworld." + ns + ".svc.cluster.local" - // Create the test namespace "sample". Log but ignore errors if it already exists. - if _, err := shell.Execute(true, "kubectl create namespace "+ns); err != nil { - t.Logf("Namespace %s might already exist: %v", ns, err) - } + // Create the namespace. + shell.Execute(true, "kubectl create namespace "+ns) - // Apply the Service manifest with PreferClose load balancing. + // Apply the Service manifest. serviceYAML := ` apiVersion: v1 kind: Service @@ -80,9 +96,8 @@ t.Fatalf("Failed to apply Service manifest: %v", err) } - // Deploy the local instance (dep1) on the worker node. - // This instance simulates the "local" deployment. - dep1 := ` + // Deploy the local instance on the worker node. + depLocal := ` apiVersion: apps/v1 kind: Deployment metadata: @@ -114,13 +129,12 @@ nodeSelector: kubernetes.io/hostname: kmesh-testing-worker ` - if err := applyManifest(ns, dep1); err != nil { - t.Fatalf("Failed to deploy local instance (dep1): %v", err) + if err := applyManifest(ns, depLocal); err != nil { + t.Fatalf("Failed to deploy local instance: %v", err) } - // Deploy the remote instance (dep2) on the control-plane node. - // Note: A toleration is added here so that the pod can be scheduled on the control-plane. - dep2 := ` + // Deploy the remote instance on the control-plane node. + depRemote := ` apiVersion: apps/v1 kind: Deployment metadata: @@ -156,12 +170,11 @@ operator: "Exists" effect: "NoSchedule" ` - if err := applyManifest(ns, dep2); err != nil { - t.Fatalf("Failed to deploy remote instance (dep2): %v", err) + if err := applyManifest(ns, depRemote); err != nil { + t.Fatalf("Failed to deploy remote instance: %v", err) } - // Deploy a sleep client on the worker node. - // This client will originate requests from the "local" node. + // Deploy the sleep client on the worker node. clientDep := ` apiVersion: apps/v1 kind: Deployment @@ -191,7 +204,7 @@ t.Fatalf("Failed to deploy sleep client: %v", err) } - // Wait for all deployments to become available. + // Wait for deployments. deployments := []string{ "helloworld-region-zone1-subzone1", "helloworld-region-zone1-subzone2", @@ -204,52 +217,55 @@ } } - // Test Locality Preferred: - // From the sleep client, access the helloworld service and expect a response from the local instance. - t.Log("Testing locality preferred (expect response from region.zone1.subzone1)...") + // Get the sleep pod name. + sleepPod, err := shell.Execute(true, "kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}'") + if err != nil || sleepPod == "" { + t.Fatalf("Failed to get sleep pod: %v", err) + } + + // Extract the resolved IP via nslookup. + nslookup, _ := shell.Execute(true, "kubectl exec -n "+ns+" "+sleepPod+" -- nslookup "+fqdn) + resolvedIP := extractResolvedIP(nslookup) + if resolvedIP == "" { + t.Fatalf("Failed to extract resolved IP from nslookup output") + } + + // Test Locality Preference. var localResponse string if err := retry.Until(func() bool { out, execErr := shell.Execute(true, - "kubectl exec -n "+ns+" $(kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -sSL http://helloworld:5000/hello") + "kubectl exec -n "+ns+" "+sleepPod+" -- curl -v -sSL --resolve "+fqdn+":5000:"+resolvedIP+" http://"+fqdn+":5000/hello") if execErr != nil { - t.Logf("Curl error: %v", execErr) return false } - t.Logf("Curl output: %s", out) if strings.Contains(out, "region.zone1.subzone1") { localResponse = out return true } return false }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)); err != nil { - t.Fatalf("Locality preferred test failed: expected response from region.zone1.subzone1, got: %s", localResponse) + t.Fatalf("Locality preferred test failed: expected response from region.zone1/subzone1, got: %s", localResponse) } - t.Log("Locality preferred test passed.") - // Test Locality Failover: - // Delete the local deployment and expect traffic to fail over to the remote instance. - t.Log("Testing locality failover (expect response from region.zone1.subzone2)...") + // Test Locality Failover. if _, err := shell.Execute(true, "kubectl delete deployment helloworld-region-zone1-subzone1 -n "+ns); err != nil { - t.Fatalf("Failed to delete local instance (dep1): %v", err) + t.Fatalf("Failed to delete local instance: %v", err) } var failoverResponse string if err := retry.Until(func() bool { out, execErr := shell.Execute(true, - "kubectl exec -n "+ns+" $(kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -sSL http://helloworld:5000/hello") + "kubectl exec -n "+ns+" "+sleepPod+" -- curl -v -sSL --resolve "+fqdn+":5000:"+resolvedIP+" http://"+fqdn+":5000/hello") if execErr != nil { - t.Logf("Curl error after failover: %v", execErr) return false } - t.Logf("Curl output after failover: %s", out) if strings.Contains(out, "region.zone1.subzone2") { failoverResponse = out return true } return false }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)); err != nil { - t.Fatalf("Locality failover test failed: expected response from region.zone1.subzone2, got: %s", failoverResponse) + t.Fatalf("Locality failover test failed: expected response from region.zone1/subzone2, got: %s", failoverResponse) } - t.Log("Locality failover test passed.") }) } \ No newline at end of file From 8a602a4b9577d9302d5ba87432aafdb6e99601e7 Mon Sep 17 00:00:00 2001 From: ravjot07 Date: Fri, 28 Mar 2025 06:38:10 +0530 Subject: [PATCH 6/9] test(e2e): add locality load balancing end-to-end test Signed-off-by: ravjot07 --- test/e2e/locality_lb_test.go | 547 ++++++++++++++++++----------------- 1 file changed, 280 insertions(+), 267 deletions(-) diff --git a/test/e2e/locality_lb_test.go b/test/e2e/locality_lb_test.go index 297bb7482..4073a1c01 100644 --- a/test/e2e/locality_lb_test.go +++ b/test/e2e/locality_lb_test.go @@ -1,271 +1,284 @@ // +build integ -/* - * Copyright The Kmesh Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +package kmesh - package kmesh +import ( + "os" + "strings" + "testing" + "time" - import ( - "os" - "strings" - "testing" - "time" - - "istio.io/istio/pkg/test/framework" - "istio.io/istio/pkg/test/shell" - "istio.io/istio/pkg/test/util/retry" - ) - - // applyManifest writes the provided manifest into a temporary file and applies it using kubectl. - func applyManifest(ns, manifest string) error { - tmpFile, err := os.CreateTemp("", "manifest-*.yaml") - if err != nil { - return err - } - defer os.Remove(tmpFile.Name()) - - if _, err := tmpFile.WriteString(manifest); err != nil { - tmpFile.Close() - return err - } - tmpFile.Close() - - cmd := "kubectl apply -n " + ns + " -f " + tmpFile.Name() - _, err = shell.Execute(true, cmd) - return err - } - - // extractResolvedIP parses the nslookup output to extract the IP address for the service. - func extractResolvedIP(nslookup string) string { - lines := strings.Split(nslookup, "\n") - var addresses []string - for _, line := range lines { - trimmed := strings.TrimSpace(line) - if strings.HasPrefix(trimmed, "Address:") { - addr := strings.TrimSpace(strings.TrimPrefix(trimmed, "Address:")) - if strings.Contains(addr, ":53") || strings.HasPrefix(addr, "[") { - continue - } - addresses = append(addresses, addr) - } - } - if len(addresses) > 0 { - return addresses[0] - } - return "" - } - - func TestLocalityLoadBalancing(t *testing.T) { - framework.NewTest(t).Run(func(t framework.TestContext) { - const ns = "sample" - const fqdn = "helloworld." + ns + ".svc.cluster.local" - - // Create the namespace. - shell.Execute(true, "kubectl create namespace "+ns) - - // Apply the Service manifest. - serviceYAML := ` - apiVersion: v1 - kind: Service - metadata: - name: helloworld - labels: - app: helloworld - spec: - ports: - - port: 5000 - name: http - selector: - app: helloworld - trafficDistribution: PreferClose - ` - if err := applyManifest(ns, serviceYAML); err != nil { - t.Fatalf("Failed to apply Service manifest: %v", err) - } - - // Deploy the local instance on the worker node. - depLocal := ` - apiVersion: apps/v1 - kind: Deployment - metadata: - name: helloworld-region-zone1-subzone1 - labels: - app: helloworld - version: region.zone1.subzone1 - spec: - replicas: 1 - selector: - matchLabels: - app: helloworld - version: region.zone1.subzone1 - template: - metadata: - labels: - app: helloworld - version: region.zone1.subzone1 - spec: - containers: - - name: helloworld - env: - - name: SERVICE_VERSION - value: region.zone1.subzone1 - image: docker.io/istio/examples-helloworld-v1 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 5000 - nodeSelector: - kubernetes.io/hostname: kmesh-testing-worker - ` - if err := applyManifest(ns, depLocal); err != nil { - t.Fatalf("Failed to deploy local instance: %v", err) - } - - // Deploy the remote instance on the control-plane node. - depRemote := ` - apiVersion: apps/v1 - kind: Deployment - metadata: - name: helloworld-region-zone1-subzone2 - labels: - app: helloworld - version: region.zone1.subzone2 - spec: - replicas: 1 - selector: - matchLabels: - app: helloworld - version: region.zone1.subzone2 - template: - metadata: - labels: - app: helloworld - version: region.zone1.subzone2 - spec: - containers: - - name: helloworld - env: - - name: SERVICE_VERSION - value: region.zone1.subzone2 - image: docker.io/istio/examples-helloworld-v1 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 5000 - nodeSelector: - kubernetes.io/hostname: kmesh-testing-control-plane - tolerations: - - key: "node-role.kubernetes.io/control-plane" - operator: "Exists" - effect: "NoSchedule" - ` - if err := applyManifest(ns, depRemote); err != nil { - t.Fatalf("Failed to deploy remote instance: %v", err) - } - - // Deploy the sleep client on the worker node. - clientDep := ` - apiVersion: apps/v1 - kind: Deployment - metadata: - name: sleep - labels: - app: sleep - spec: - replicas: 1 - selector: - matchLabels: - app: sleep - template: - metadata: - labels: - app: sleep - spec: - containers: - - name: sleep - image: curlimages/curl - command: ["/bin/sleep", "infinity"] - imagePullPolicy: IfNotPresent - nodeSelector: - kubernetes.io/hostname: kmesh-testing-worker - ` - if err := applyManifest(ns, clientDep); err != nil { - t.Fatalf("Failed to deploy sleep client: %v", err) - } - - // Wait for deployments. - deployments := []string{ - "helloworld-region-zone1-subzone1", - "helloworld-region-zone1-subzone2", - "sleep", - } - for _, dep := range deployments { - cmd := "kubectl wait --for=condition=available deployment/" + dep + " -n " + ns + " --timeout=120s" - if _, err := shell.Execute(true, cmd); err != nil { - t.Fatalf("Deployment %s not ready: %v", dep, err) - } - } - - // Get the sleep pod name. - sleepPod, err := shell.Execute(true, "kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}'") - if err != nil || sleepPod == "" { - t.Fatalf("Failed to get sleep pod: %v", err) - } - - // Extract the resolved IP via nslookup. - nslookup, _ := shell.Execute(true, "kubectl exec -n "+ns+" "+sleepPod+" -- nslookup "+fqdn) - resolvedIP := extractResolvedIP(nslookup) - if resolvedIP == "" { - t.Fatalf("Failed to extract resolved IP from nslookup output") - } - - // Test Locality Preference. - var localResponse string - if err := retry.Until(func() bool { - out, execErr := shell.Execute(true, - "kubectl exec -n "+ns+" "+sleepPod+" -- curl -v -sSL --resolve "+fqdn+":5000:"+resolvedIP+" http://"+fqdn+":5000/hello") - if execErr != nil { - return false - } - if strings.Contains(out, "region.zone1.subzone1") { - localResponse = out - return true - } - return false - }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)); err != nil { - t.Fatalf("Locality preferred test failed: expected response from region.zone1/subzone1, got: %s", localResponse) - } - - // Test Locality Failover. - if _, err := shell.Execute(true, "kubectl delete deployment helloworld-region-zone1-subzone1 -n "+ns); err != nil { - t.Fatalf("Failed to delete local instance: %v", err) - } - var failoverResponse string - if err := retry.Until(func() bool { - out, execErr := shell.Execute(true, - "kubectl exec -n "+ns+" "+sleepPod+" -- curl -v -sSL --resolve "+fqdn+":5000:"+resolvedIP+" http://"+fqdn+":5000/hello") - if execErr != nil { - return false - } - if strings.Contains(out, "region.zone1.subzone2") { - failoverResponse = out - return true - } - return false - }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)); err != nil { - t.Fatalf("Locality failover test failed: expected response from region.zone1/subzone2, got: %s", failoverResponse) - } - }) - } - \ No newline at end of file + "istio.io/istio/pkg/test/framework" + "istio.io/istio/pkg/test/shell" + "istio.io/istio/pkg/test/util/retry" +) + +// applyManifest writes the provided manifest into a temporary file and applies it using kubectl. +func applyManifest(ns, manifest string) error { + tmpFile, err := os.CreateTemp("", "manifest-*.yaml") + if err != nil { + return err + } + defer os.Remove(tmpFile.Name()) + + if _, err := tmpFile.WriteString(manifest); err != nil { + tmpFile.Close() + return err + } + tmpFile.Close() + + cmd := "kubectl apply -n " + ns + " -f " + tmpFile.Name() + _, err = shell.Execute(true, cmd) + return err +} + +// extractResolvedIP parses the nslookup output to extract the IP address for the service. +func extractResolvedIP(nslookup string) string { + // nslookup output typically contains two "Address:" lines. + // The first is the DNS server; the second is the resolved IP. + lines := strings.Split(nslookup, "\n") + var addresses []string + for _, line := range lines { + trimmed := strings.TrimSpace(line) + if strings.HasPrefix(trimmed, "Address:") { + // Remove the "Address:" prefix and trim again. + addr := strings.TrimSpace(strings.TrimPrefix(trimmed, "Address:")) + // If the address is enclosed in brackets (e.g. "[fd00:10:96::a]:53"), skip it. + if strings.Contains(addr, ":53") || strings.HasPrefix(addr, "[") { + continue + } + addresses = append(addresses, addr) + } + } + if len(addresses) > 0 { + return addresses[0] + } + return "" +} + +func TestLocalityLoadBalancing(t *testing.T) { + framework.NewTest(t).Run(func(t framework.TestContext) { + const ns = "sample" + // Fully qualified domain name for the service. + const fqdn = "helloworld." + ns + ".svc.cluster.local" + + // Create the test namespace. + if _, err := shell.Execute(true, "kubectl create namespace "+ns); err != nil { + t.Logf("Namespace %s might already exist: %v", ns, err) + } + + // Debug: List current pods and endpoints. + pods, _ := shell.Execute(true, "kubectl get pods -n "+ns) + t.Logf("Initial pods in namespace %s:\n%s", ns, pods) + endpoints, _ := shell.Execute(true, "kubectl get endpoints helloworld -n "+ns) + t.Logf("Initial endpoints for helloworld service:\n%s", endpoints) + + // Apply the Service manifest with PreferClose locality load balancing. + serviceYAML := ` +apiVersion: v1 +kind: Service +metadata: + name: helloworld + labels: + app: helloworld +spec: + ports: + - port: 5000 + name: http + selector: + app: helloworld + trafficDistribution: PreferClose +` + if err := applyManifest(ns, serviceYAML); err != nil { + t.Fatalf("Failed to apply Service manifest: %v", err) + } + + // Deploy the local instance (dep1) on the worker node. + depLocal := ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: helloworld-region-zone1-subzone1 + labels: + app: helloworld + version: region.zone1.subzone1 +spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: region.zone1.subzone1 + template: + metadata: + labels: + app: helloworld + version: region.zone1.subzone1 + spec: + containers: + - name: helloworld + env: + - name: SERVICE_VERSION + value: region.zone1.subzone1 + image: docker.io/istio/examples-helloworld-v1 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 5000 + nodeSelector: + kubernetes.io/hostname: kmesh-testing-worker +` + if err := applyManifest(ns, depLocal); err != nil { + t.Fatalf("Failed to deploy local instance (dep1): %v", err) + } + + // Deploy the remote instance (dep2) on the control-plane node. + depRemote := ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: helloworld-region-zone1-subzone2 + labels: + app: helloworld + version: region.zone1.subzone2 +spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: region.zone1.subzone2 + template: + metadata: + labels: + app: helloworld + version: region.zone1.subzone2 + spec: + containers: + - name: helloworld + env: + - name: SERVICE_VERSION + value: region.zone1.subzone2 + image: docker.io/istio/examples-helloworld-v1 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 5000 + nodeSelector: + kubernetes.io/hostname: kmesh-testing-control-plane + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" +` + if err := applyManifest(ns, depRemote); err != nil { + t.Fatalf("Failed to deploy remote instance (dep2): %v", err) + } + + // Deploy a sleep client on the worker node. + clientDep := ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sleep + labels: + app: sleep +spec: + replicas: 1 + selector: + matchLabels: + app: sleep + template: + metadata: + labels: + app: sleep + spec: + containers: + - name: sleep + image: curlimages/curl + command: ["/bin/sleep", "infinity"] + imagePullPolicy: IfNotPresent + nodeSelector: + kubernetes.io/hostname: kmesh-testing-worker +` + if err := applyManifest(ns, clientDep); err != nil { + t.Fatalf("Failed to deploy sleep client: %v", err) + } + + // Wait for all deployments to be available. + deployments := []string{ + "helloworld-region-zone1-subzone1", + "helloworld-region-zone1-subzone2", + "sleep", + } + for _, dep := range deployments { + cmd := "kubectl wait --for=condition=available deployment/" + dep + " -n " + ns + " --timeout=120s" + if _, err := shell.Execute(true, cmd); err != nil { + t.Fatalf("Deployment %s not ready: %v", dep, err) + } + } + + // Debug: List pods and endpoints after deployments. + pods, _ = shell.Execute(true, "kubectl get pods -n "+ns) + t.Logf("Pods after deployment in namespace %s:\n%s", ns, pods) + endpoints, _ = shell.Execute(true, "kubectl get endpoints helloworld -n "+ns) + t.Logf("Endpoints for service helloworld after deployment:\n%s", endpoints) + + // Debug: Check DNS resolution from the sleep pod. + sleepPod, err := shell.Execute(true, "kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}'") + if err != nil || sleepPod == "" { + t.Fatalf("Failed to get sleep pod: %v", err) + } + nslookup, _ := shell.Execute(true, "kubectl exec -n "+ns+" "+sleepPod+" -- nslookup "+fqdn) + t.Logf("nslookup output for %s:\n%s", fqdn, nslookup) + resolvedIP := extractResolvedIP(nslookup) + if resolvedIP == "" { + t.Fatalf("Failed to extract resolved IP from nslookup output") + } + t.Logf("Extracted resolved IP: %s", resolvedIP) + + // Test Locality Preference (PreferClose): + t.Log("Testing locality preferred (expect response from region.zone1/subzone1)...") + var localResponse string + if err := retry.Until(func() bool { + t.Logf("Attempting curl request at %s...", time.Now().Format(time.RFC3339)) + // Use --resolve to force curl to use the extracted IP. + out, execErr := shell.Execute(true, + "kubectl exec -n "+ns+" "+sleepPod+" -- curl -v -sSL --resolve "+fqdn+":5000:"+resolvedIP+" http://"+fqdn+":5000/hello") + if execErr != nil { + t.Logf("Curl error: %v", execErr) + return false + } + t.Logf("Curl output: %s", out) + if strings.Contains(out, "region.zone1.subzone1") { + localResponse = out + return true + } + return false + }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)); err != nil { + t.Fatalf("Locality preferred test failed: expected response from region.zone1/subzone1, got: %s", localResponse) + } + t.Log("Locality preferred test passed.") + + // Test Locality Failover: + t.Log("Testing locality failover (expect response from region.zone1/subzone2)...") + if _, err := shell.Execute(true, "kubectl delete deployment helloworld-region-zone1-subzone1 -n "+ns); err != nil { + t.Fatalf("Failed to delete local instance (dep1): %v", err) + } + var failoverResponse string + if err := retry.Until(func() bool { + t.Logf("Attempting curl (failover) at %s...", time.Now().Format(time.RFC3339)) + out, execErr := shell.Execute(true, + "kubectl exec -n "+ns+" "+sleepPod+" -- curl -v -sSL --resolve "+fqdn+":5000:"+resolvedIP+" http://"+fqdn+":5000/hello") + if execErr != nil { + t.Logf("Curl error after failover: %v", execErr) + return false + } + t.Logf("Curl output after failover: %s", out) + if strings.Contains(out, "region.zone1.subzone2") { + failoverResponse = out + return true + } + return false + }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)); err != nil { + t.Fatalf("Locality failover test failed: expected response from region.zone1/subzone2, got: %s", failoverResponse) + } + t.Log("Locality failover test passed.") + }) +} From 8a6619977005a27698837823a8b4d7df7266bc3e Mon Sep 17 00:00:00 2001 From: ravjot07 Date: Fri, 28 Mar 2025 06:56:43 +0530 Subject: [PATCH 7/9] test(e2e): add locality load balancing end-to-end test Signed-off-by: ravjot07 --- test/e2e/locality_lb_test.go | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/test/e2e/locality_lb_test.go b/test/e2e/locality_lb_test.go index 4073a1c01..b4120091d 100644 --- a/test/e2e/locality_lb_test.go +++ b/test/e2e/locality_lb_test.go @@ -1,5 +1,21 @@ // +build integ +/* + * Copyright The Kmesh Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package kmesh import ( @@ -214,13 +230,13 @@ spec: } } - // Debug: List pods and endpoints after deployments. + // List pods and endpoints after deployments. pods, _ = shell.Execute(true, "kubectl get pods -n "+ns) t.Logf("Pods after deployment in namespace %s:\n%s", ns, pods) endpoints, _ = shell.Execute(true, "kubectl get endpoints helloworld -n "+ns) t.Logf("Endpoints for service helloworld after deployment:\n%s", endpoints) - // Debug: Check DNS resolution from the sleep pod. + // Check DNS resolution from the sleep pod. sleepPod, err := shell.Execute(true, "kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}'") if err != nil || sleepPod == "" { t.Fatalf("Failed to get sleep pod: %v", err) @@ -238,7 +254,6 @@ spec: var localResponse string if err := retry.Until(func() bool { t.Logf("Attempting curl request at %s...", time.Now().Format(time.RFC3339)) - // Use --resolve to force curl to use the extracted IP. out, execErr := shell.Execute(true, "kubectl exec -n "+ns+" "+sleepPod+" -- curl -v -sSL --resolve "+fqdn+":5000:"+resolvedIP+" http://"+fqdn+":5000/hello") if execErr != nil { From 46df40e53331221a9bcaede95bd843aa0fd4e6a4 Mon Sep 17 00:00:00 2001 From: ravjot07 Date: Mon, 26 May 2025 17:04:27 +0530 Subject: [PATCH 8/9] e2e test for locality_lb Signed-off-by: ravjot07 --- test/e2e/locality_lb_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/test/e2e/locality_lb_test.go b/test/e2e/locality_lb_test.go index b4120091d..4c8eb322b 100644 --- a/test/e2e/locality_lb_test.go +++ b/test/e2e/locality_lb_test.go @@ -1,3 +1,4 @@ +//go:build integ // +build integ /* From cd0eeeeb8b9d8e44235d28fa7951b2cf0c9f48c1 Mon Sep 17 00:00:00 2001 From: ravjot07 Date: Mon, 26 May 2025 17:08:05 +0530 Subject: [PATCH 9/9] e2e test for locality_lb Signed-off-by: ravjot07 --- test/e2e/locality_lb_test.go | 834 +++++++++++++++++++++++------------ 1 file changed, 555 insertions(+), 279 deletions(-) diff --git a/test/e2e/locality_lb_test.go b/test/e2e/locality_lb_test.go index 4c8eb322b..1bc5e42dd 100644 --- a/test/e2e/locality_lb_test.go +++ b/test/e2e/locality_lb_test.go @@ -1,4 +1,3 @@ -//go:build integ // +build integ /* @@ -17,284 +16,561 @@ * limitations under the License. */ -package kmesh + package kmesh -import ( - "os" - "strings" - "testing" - "time" + import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "istio.io/istio/pkg/test/framework" + "istio.io/istio/pkg/test/shell" + "istio.io/istio/pkg/test/util/retry" + ) + + func runCommand(ctx framework.TestContext, cmd string) string { + out, err := shell.Execute(true, cmd) + if err != nil { + ctx.Fatalf("Command %q failed: %v\n%s", cmd, err, out) + } + ctx.Logf(">>> Command succeeded: %s\n%s", cmd, out) + return out + } - "istio.io/istio/pkg/test/framework" - "istio.io/istio/pkg/test/shell" - "istio.io/istio/pkg/test/util/retry" -) + func applyManifest(ctx framework.TestContext, ns, mani string) { + ctx.Logf(">>> Applying to namespace %q manifest:\n%s", ns, mani) + dir := ctx.CreateTmpDirectoryOrFail("kmesh-lb") + path := filepath.Join(dir, "m.yaml") + if err := os.WriteFile(path, []byte(mani), 0644); err != nil { + ctx.Fatalf("WriteFile(%s) failed: %v", path, err) + } + content, _ := os.ReadFile(path) + ctx.Logf(">>> On-disk manifest at %s:\n%s", path, content) + runCommand(ctx, fmt.Sprintf("kubectl apply -n %s -f %s", ns, path)) + } -// applyManifest writes the provided manifest into a temporary file and applies it using kubectl. -func applyManifest(ns, manifest string) error { - tmpFile, err := os.CreateTemp("", "manifest-*.yaml") - if err != nil { - return err - } - defer os.Remove(tmpFile.Name()) - - if _, err := tmpFile.WriteString(manifest); err != nil { - tmpFile.Close() - return err - } - tmpFile.Close() - - cmd := "kubectl apply -n " + ns + " -f " + tmpFile.Name() - _, err = shell.Execute(true, cmd) - return err -} - -// extractResolvedIP parses the nslookup output to extract the IP address for the service. -func extractResolvedIP(nslookup string) string { - // nslookup output typically contains two "Address:" lines. - // The first is the DNS server; the second is the resolved IP. - lines := strings.Split(nslookup, "\n") - var addresses []string - for _, line := range lines { - trimmed := strings.TrimSpace(line) - if strings.HasPrefix(trimmed, "Address:") { - // Remove the "Address:" prefix and trim again. - addr := strings.TrimSpace(strings.TrimPrefix(trimmed, "Address:")) - // If the address is enclosed in brackets (e.g. "[fd00:10:96::a]:53"), skip it. - if strings.Contains(addr, ":53") || strings.HasPrefix(addr, "[") { - continue - } - addresses = append(addresses, addr) - } - } - if len(addresses) > 0 { - return addresses[0] - } - return "" -} - -func TestLocalityLoadBalancing(t *testing.T) { - framework.NewTest(t).Run(func(t framework.TestContext) { - const ns = "sample" - // Fully qualified domain name for the service. - const fqdn = "helloworld." + ns + ".svc.cluster.local" - - // Create the test namespace. - if _, err := shell.Execute(true, "kubectl create namespace "+ns); err != nil { - t.Logf("Namespace %s might already exist: %v", ns, err) - } - - // Debug: List current pods and endpoints. - pods, _ := shell.Execute(true, "kubectl get pods -n "+ns) - t.Logf("Initial pods in namespace %s:\n%s", ns, pods) - endpoints, _ := shell.Execute(true, "kubectl get endpoints helloworld -n "+ns) - t.Logf("Initial endpoints for helloworld service:\n%s", endpoints) - - // Apply the Service manifest with PreferClose locality load balancing. - serviceYAML := ` -apiVersion: v1 -kind: Service -metadata: - name: helloworld - labels: - app: helloworld -spec: - ports: - - port: 5000 - name: http - selector: - app: helloworld - trafficDistribution: PreferClose -` - if err := applyManifest(ns, serviceYAML); err != nil { - t.Fatalf("Failed to apply Service manifest: %v", err) - } - - // Deploy the local instance (dep1) on the worker node. - depLocal := ` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: helloworld-region-zone1-subzone1 - labels: - app: helloworld - version: region.zone1.subzone1 -spec: - replicas: 1 - selector: - matchLabels: - app: helloworld - version: region.zone1.subzone1 - template: - metadata: - labels: - app: helloworld - version: region.zone1.subzone1 - spec: - containers: - - name: helloworld - env: - - name: SERVICE_VERSION - value: region.zone1.subzone1 - image: docker.io/istio/examples-helloworld-v1 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 5000 - nodeSelector: - kubernetes.io/hostname: kmesh-testing-worker -` - if err := applyManifest(ns, depLocal); err != nil { - t.Fatalf("Failed to deploy local instance (dep1): %v", err) - } - - // Deploy the remote instance (dep2) on the control-plane node. - depRemote := ` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: helloworld-region-zone1-subzone2 - labels: - app: helloworld - version: region.zone1.subzone2 -spec: - replicas: 1 - selector: - matchLabels: - app: helloworld - version: region.zone1.subzone2 - template: - metadata: - labels: - app: helloworld - version: region.zone1.subzone2 - spec: - containers: - - name: helloworld - env: - - name: SERVICE_VERSION - value: region.zone1.subzone2 - image: docker.io/istio/examples-helloworld-v1 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 5000 - nodeSelector: - kubernetes.io/hostname: kmesh-testing-control-plane - tolerations: - - key: "node-role.kubernetes.io/control-plane" - operator: "Exists" - effect: "NoSchedule" -` - if err := applyManifest(ns, depRemote); err != nil { - t.Fatalf("Failed to deploy remote instance (dep2): %v", err) - } - - // Deploy a sleep client on the worker node. - clientDep := ` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sleep - labels: - app: sleep -spec: - replicas: 1 - selector: - matchLabels: - app: sleep - template: - metadata: - labels: - app: sleep - spec: - containers: - - name: sleep - image: curlimages/curl - command: ["/bin/sleep", "infinity"] - imagePullPolicy: IfNotPresent - nodeSelector: - kubernetes.io/hostname: kmesh-testing-worker -` - if err := applyManifest(ns, clientDep); err != nil { - t.Fatalf("Failed to deploy sleep client: %v", err) - } - - // Wait for all deployments to be available. - deployments := []string{ - "helloworld-region-zone1-subzone1", - "helloworld-region-zone1-subzone2", - "sleep", - } - for _, dep := range deployments { - cmd := "kubectl wait --for=condition=available deployment/" + dep + " -n " + ns + " --timeout=120s" - if _, err := shell.Execute(true, cmd); err != nil { - t.Fatalf("Deployment %s not ready: %v", dep, err) - } - } - - // List pods and endpoints after deployments. - pods, _ = shell.Execute(true, "kubectl get pods -n "+ns) - t.Logf("Pods after deployment in namespace %s:\n%s", ns, pods) - endpoints, _ = shell.Execute(true, "kubectl get endpoints helloworld -n "+ns) - t.Logf("Endpoints for service helloworld after deployment:\n%s", endpoints) - - // Check DNS resolution from the sleep pod. - sleepPod, err := shell.Execute(true, "kubectl get pod -n "+ns+" -l app=sleep -o jsonpath='{.items[0].metadata.name}'") - if err != nil || sleepPod == "" { - t.Fatalf("Failed to get sleep pod: %v", err) - } - nslookup, _ := shell.Execute(true, "kubectl exec -n "+ns+" "+sleepPod+" -- nslookup "+fqdn) - t.Logf("nslookup output for %s:\n%s", fqdn, nslookup) - resolvedIP := extractResolvedIP(nslookup) - if resolvedIP == "" { - t.Fatalf("Failed to extract resolved IP from nslookup output") - } - t.Logf("Extracted resolved IP: %s", resolvedIP) - - // Test Locality Preference (PreferClose): - t.Log("Testing locality preferred (expect response from region.zone1/subzone1)...") - var localResponse string - if err := retry.Until(func() bool { - t.Logf("Attempting curl request at %s...", time.Now().Format(time.RFC3339)) - out, execErr := shell.Execute(true, - "kubectl exec -n "+ns+" "+sleepPod+" -- curl -v -sSL --resolve "+fqdn+":5000:"+resolvedIP+" http://"+fqdn+":5000/hello") - if execErr != nil { - t.Logf("Curl error: %v", execErr) - return false - } - t.Logf("Curl output: %s", out) - if strings.Contains(out, "region.zone1.subzone1") { - localResponse = out - return true - } - return false - }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)); err != nil { - t.Fatalf("Locality preferred test failed: expected response from region.zone1/subzone1, got: %s", localResponse) - } - t.Log("Locality preferred test passed.") - - // Test Locality Failover: - t.Log("Testing locality failover (expect response from region.zone1/subzone2)...") - if _, err := shell.Execute(true, "kubectl delete deployment helloworld-region-zone1-subzone1 -n "+ns); err != nil { - t.Fatalf("Failed to delete local instance (dep1): %v", err) - } - var failoverResponse string - if err := retry.Until(func() bool { - t.Logf("Attempting curl (failover) at %s...", time.Now().Format(time.RFC3339)) - out, execErr := shell.Execute(true, - "kubectl exec -n "+ns+" "+sleepPod+" -- curl -v -sSL --resolve "+fqdn+":5000:"+resolvedIP+" http://"+fqdn+":5000/hello") - if execErr != nil { - t.Logf("Curl error after failover: %v", execErr) - return false - } - t.Logf("Curl output after failover: %s", out) - if strings.Contains(out, "region.zone1.subzone2") { - failoverResponse = out - return true - } - return false - }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)); err != nil { - t.Fatalf("Locality failover test failed: expected response from region.zone1/subzone2, got: %s", failoverResponse) - } - t.Log("Locality failover test passed.") - }) -} + func getClusterIP(ctx framework.TestContext, ns, svc string) string { + ip := runCommand(ctx, fmt.Sprintf( + "kubectl get svc %s -n %s -o jsonpath={.spec.clusterIP}", svc, ns)) + if ip == "" { + ctx.Fatalf("Empty ClusterIP for %s/%s", ns, svc) + } + if strings.Contains(ip, ":") { + ip = "[" + ip + "]" + } + ctx.Logf("ClusterIP for %s/%s = %s", ns, svc, ip) + return ip + } + + func getSleepPod(ctx framework.TestContext, ns string) string { + pod := runCommand(ctx, fmt.Sprintf( + "kubectl get pod -n %s -l app=sleep -o jsonpath={.items[0].metadata.name}", ns)) + if pod == "" { + ctx.Fatalf("No sleep pod found in %s", ns) + } + ctx.Logf("sleep pod = %s", pod) + return pod + } + + func waitForDeployment(ctx framework.TestContext, ns, name string) { + runCommand(ctx, fmt.Sprintf( + "kubectl wait --for=condition=available deployment/%s -n %s --timeout=120s", + name, ns)) + } + + func curlHello(ctx framework.TestContext, ns, pod, fqdn, ip string) (string, error) { + cmd := fmt.Sprintf( + "kubectl exec -n %s %s -- curl -sSL -v --resolve %s:5000:%s http://%s:5000/hello", + ns, pod, fqdn, ip, fqdn) + return shell.Execute(false, cmd) + } + + // Test 1: PreferClose via annotation + func TestLocality_PreferClose_Annotation(t *testing.T) { + framework.NewTest(t).Run(func(ctx framework.TestContext) { + // Label nodes subzone1 (worker) & subzone2 (control-plane) + runCommand(ctx, "kubectl label node kmesh-testing-worker topology.kubernetes.io/region=region topology.kubernetes.io/zone=zone1 topology.kubernetes.io/subzone=subzone1 --overwrite") + runCommand(ctx, "kubectl label node kmesh-testing-control-plane topology.kubernetes.io/region=region topology.kubernetes.io/zone=zone1 topology.kubernetes.io/subzone=subzone2 --overwrite") + + ns, svc := "sample-pc-annot", "helloworld" + fqdn := svc + "." + ns + ".svc.cluster.local" + localVer, remoteVer := "sub1", "sub2" + + runCommand(ctx, "kubectl create namespace "+ns) + + // Service with PreferClose via annotation + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: v1 + kind: Service + metadata: + name: %s + namespace: %s + annotations: + networking.istio.io/traffic-distribution: PreferClose + labels: + app: helloworld + spec: + selector: + app: helloworld + ports: + - name: http + port: 5000 + targetPort: 5000 + `, svc, ns)) + + // Local deployment (sub1) on worker + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: helloworld-%s + namespace: %s + labels: + app: helloworld + version: %s + spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: %s + template: + metadata: + labels: + app: helloworld + version: %s + spec: + nodeSelector: + kubernetes.io/hostname: kmesh-testing-worker + containers: + - name: helloworld + image: docker.io/istio/examples-helloworld-v1 + imagePullPolicy: IfNotPresent + env: + - name: SERVICE_VERSION + value: %s + ports: + - containerPort: 5000 + `, localVer, ns, localVer, localVer, localVer, localVer)) + + // Remote deployment (sub2) on control-plane (with toleration) + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: helloworld-%s + namespace: %s + labels: + app: helloworld + version: %s + spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: %s + template: + metadata: + labels: + app: helloworld + version: %s + spec: + nodeSelector: + kubernetes.io/hostname: kmesh-testing-control-plane + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: NoSchedule + containers: + - name: helloworld + image: docker.io/istio/examples-helloworld-v1 + imagePullPolicy: IfNotPresent + env: + - name: SERVICE_VERSION + value: %s + ports: + - containerPort: 5000 + `, remoteVer, ns, remoteVer, remoteVer, remoteVer, remoteVer)) + + // Sleep client on worker + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: sleep + namespace: %s + labels: + app: sleep + spec: + replicas: 1 + selector: + matchLabels: + app: sleep + template: + metadata: + labels: + app: sleep + spec: + nodeSelector: + kubernetes.io/hostname: kmesh-testing-worker + containers: + - name: sleep + image: curlimages/curl + command: ["/bin/sleep","infinity"] + `, ns)) + + waitForDeployment(ctx, ns, "helloworld-"+localVer) + waitForDeployment(ctx, ns, "helloworld-"+remoteVer) + waitForDeployment(ctx, ns, "sleep") + + ip := getClusterIP(ctx, ns, svc) + pod := getSleepPod(ctx, ns) + + // Expect only local → no remote yet + sawLocal := false + for i := 0; i < 10; i++ { + out, _ := curlHello(ctx, ns, pod, fqdn, ip) + ctx.Logf("curl #%d → %q", i+1, out) + if strings.Contains(out, remoteVer) { + ctx.Fatalf("remote seen before deletion: %q", out) + } + if strings.Contains(out, localVer) { + sawLocal = true + break + } + time.Sleep(2 * time.Second) + } + if !sawLocal { + ctx.Fatalf("never saw local (%q)", localVer) + } + + // Delete local → should fail over to remote + runCommand(ctx, "kubectl delete deployment helloworld-"+localVer+" -n "+ns) + retry.UntilSuccessOrFail(ctx, func() error { + out, _ := curlHello(ctx, ns, pod, fqdn, ip) + if !strings.Contains(out, remoteVer) { + return fmt.Errorf("still not remote: %q", out) + } + return nil + }, retry.Timeout(60*time.Second), retry.Delay(2*time.Second)) + }) + } + + // Test 2: Local strict via internalTrafficPolicy: Local + func TestLocality_LocalStrict(t *testing.T) { + framework.NewTest(t).Run(func(ctx framework.TestContext) { + runCommand(ctx, "kubectl label node kmesh-testing-worker topology.kubernetes.io/region=region topology.kubernetes.io/zone=zone1 topology.kubernetes.io/subzone=subzone1 --overwrite") + runCommand(ctx, "kubectl label node kmesh-testing-control-plane topology.kubernetes.io/region=region topology.kubernetes.io/zone=zone1 topology.kubernetes.io/subzone=subzone2 --overwrite") + + ns, svc := "sample-local", "helloworld" + fqdn := svc + "." + ns + ".svc.cluster.local" + localVer, remoteVer := "sub1", "sub2" + + runCommand(ctx, "kubectl create namespace "+ns) + + // Service in strict Local mode + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: v1 + kind: Service + metadata: + name: %s + namespace: %s + labels: + app: helloworld + spec: + selector: + app: helloworld + ports: + - name: http + port: 5000 + targetPort: 5000 + internalTrafficPolicy: Local + `, svc, ns)) + + // Local deployment + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: helloworld-%s + namespace: %s + labels: + app: helloworld + version: %s + spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: %s + template: + metadata: + labels: + app: helloworld + version: %s + spec: + nodeSelector: + kubernetes.io/hostname: kmesh-testing-worker + containers: + - name: helloworld + image: docker.io/istio/examples-helloworld-v1 + imagePullPolicy: IfNotPresent + env: + - name: SERVICE_VERSION + value: %s + ports: + - containerPort: 5000 + `, localVer, ns, localVer, localVer, localVer, localVer)) + + // Remote deployment (to prove strict mode blocks it) + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: helloworld-%s + namespace: %s + labels: + app: helloworld + version: %s + spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: %s + template: + metadata: + labels: + app: helloworld + version: %s + spec: + nodeSelector: + kubernetes.io/hostname: kmesh-testing-control-plane + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: NoSchedule + containers: + - name: helloworld + image: docker.io/istio/examples-helloworld-v1 + imagePullPolicy: IfNotPresent + env: + - name: SERVICE_VERSION + value: %s + ports: + - containerPort: 5000 + `, remoteVer, ns, remoteVer, remoteVer, remoteVer, remoteVer)) + + // Sleep client + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: sleep + namespace: %s + labels: + app: sleep + spec: + replicas: 1 + selector: + matchLabels: + app: sleep + template: + metadata: + labels: + app: sleep + spec: + nodeSelector: + kubernetes.io/hostname: kmesh-testing-worker + containers: + - name: sleep + image: curlimages/curl + command: ["/bin/sleep","infinity"] + `, ns)) + + waitForDeployment(ctx, ns, "helloworld-"+localVer) + waitForDeployment(ctx, ns, "helloworld-"+remoteVer) + waitForDeployment(ctx, ns, "sleep") + + pod := getSleepPod(ctx, ns) + ip := getClusterIP(ctx, ns, svc) + + // Must initially hit local + out, _ := curlHello(ctx, ns, pod, fqdn, ip) + if !strings.Contains(out, localVer) { + ctx.Fatalf("Local strict initial: expected %q, got %q", localVer, out) + } + + // Delete local → should now fail (no remote fallback) + runCommand(ctx, "kubectl delete deployment helloworld-"+localVer+" -n "+ns) + time.Sleep(5 * time.Second) + if out, err := curlHello(ctx, ns, pod, fqdn, ip); err == nil { + ctx.Fatalf("Local strict should fail, but got %q", out) + } + }) + } + + // Test 3: Subzone distribution across two fallback pods + func TestLocality_SubzoneDistribution(t *testing.T) { + framework.NewTest(t).Run(func(ctx framework.TestContext) { + runCommand(ctx, "kubectl label node kmesh-testing-worker topology.kubernetes.io/region=region topology.kubernetes.io/zone=zone1 topology.kubernetes.io/subzone=subzone1 --overwrite") + runCommand(ctx, "kubectl label node kmesh-testing-control-plane topology.kubernetes.io/region=region topology.kubernetes.io/zone=zone1 topology.kubernetes.io/subzone=subzone2 --overwrite") + + ns, svc := "sample-dist", "helloworld" + fqdn := svc + "." + ns + ".svc.cluster.local" + localVer := "sub1" + rem1, rem2 := "sub2-a", "sub2-b" + + runCommand(ctx, "kubectl create namespace "+ns) + + // Service again via annotation + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: v1 + kind: Service + metadata: + name: %s + namespace: %s + annotations: + networking.istio.io/traffic-distribution: PreferClose + labels: + app: helloworld + spec: + selector: + app: helloworld + ports: + - name: http + port: 5000 + targetPort: 5000 + `, svc, ns)) + + // Local + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: helloworld-%s + namespace: %s + labels: + app: helloworld + version: %s + spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: %s + template: + metadata: + labels: + app: helloworld + version: %s + spec: + nodeSelector: + kubernetes.io/hostname: kmesh-testing-worker + containers: + - name: helloworld + image: docker.io/istio/examples-helloworld-v1 + env: + - name: SERVICE_VERSION + value: %s + ports: + - containerPort: 5000 + `, localVer, ns, localVer, localVer, localVer, localVer)) + + // Two fallback (lowercase!) + for _, v := range []string{rem1, rem2} { + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: helloworld-%s + namespace: %s + labels: + app: helloworld + version: %s + spec: + replicas: 1 + selector: + matchLabels: + app: helloworld + version: %s + template: + metadata: + labels: + app: helloworld + version: %s + spec: + nodeSelector: + kubernetes.io/hostname: kmesh-testing-control-plane + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: NoSchedule + containers: + - name: helloworld + image: docker.io/istio/examples-helloworld-v1 + env: + - name: SERVICE_VERSION + value: %s + ports: + - containerPort: 5000 + `, v, ns, v, v, v, v)) + } + + // Sleep client + applyManifest(ctx, ns, fmt.Sprintf(` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: sleep + namespace: %s + labels: + app: sleep + spec: + replicas: 1 + selector: + matchLabels: + app: sleep + template: + metadata: + labels: + app: sleep + spec: + nodeSelector: + kubernetes.io/hostname: kmesh-testing-worker + containers: + - name: sleep + image: curlimages/curl + command: ["/bin/sleep","infinity"] + `, ns)) + + waitForDeployment(ctx, ns, "helloworld-"+localVer) + waitForDeployment(ctx, ns, "helloworld-"+rem1) + waitForDeployment(ctx, ns, "helloworld-"+rem2) + waitForDeployment(ctx, ns, "sleep") + + // Delete local → exercise distribution + runCommand(ctx, "kubectl delete deployment helloworld-"+localVer+" -n "+ns) + ip := getClusterIP(ctx, ns, svc) + pod := getSleepPod(ctx, ns) + + counts := map[string]int{} + for i := 0; i < 20; i++ { + out, _ := curlHello(ctx, ns, pod, fqdn, ip) + for _, v := range []string{rem1, rem2} { + if strings.Contains(out, v) { + counts[v]++ + } + } + time.Sleep(200 * time.Millisecond) + } + ctx.Logf("Distribution: %+v", counts) + if counts[rem1] == 0 || counts[rem2] == 0 { + ctx.Fatalf("Expected both %q and %q, got %+v", rem1, rem2, counts) + } + }) + } + \ No newline at end of file