diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 925af7022a7f..0314ad234288 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,6 +1,6 @@ { "name": "istio build-tools", - "image": "gcr.io/istio-testing/build-tools:release-1.19-997ea5ffb748b3d6499f3d91f236e15f0dcaaab6", + "image": "gcr.io/istio-testing/build-tools:release-1.19-ae4f4b509789426724c404fba8b228a1b487fd0a", "privileged": true, "remoteEnv": { "USE_GKE_GCLOUD_AUTH_PLUGIN": "True", diff --git a/Makefile.core.mk b/Makefile.core.mk index b437bd580d13..8eddfa639477 100644 --- a/Makefile.core.mk +++ b/Makefile.core.mk @@ -49,7 +49,7 @@ endif export VERSION # Base version of Istio image to use -BASE_VERSION ?= master-2023-07-20T20-50-43 +BASE_VERSION ?= 1.19-2023-11-06T19-02-47 ISTIO_BASE_REGISTRY ?= gcr.io/istio-release export GO111MODULE ?= on diff --git a/cni/cmd/istio-cni/main.go b/cni/cmd/istio-cni/main.go index 5af7a72d43f3..17deef2a468a 100644 --- a/cni/cmd/istio-cni/main.go +++ b/cni/cmd/istio-cni/main.go @@ -26,8 +26,6 @@ import ( "istio.io/istio/cni/pkg/plugin" "istio.io/istio/pkg/log" istioversion "istio.io/istio/pkg/version" - "istio.io/istio/tools/istio-iptables/pkg/cmd" - "istio.io/istio/tools/istio-iptables/pkg/constants" ) func main() { @@ -42,24 +40,6 @@ func main() { _ = log.Sync() }() - // configure-routes allows setting up the iproute2 configuration. - // This is an old workaround and kept in place to preserve old behavior. - // It is called standalone when HostNSEnterExec=true. - // Default behavior is to use go netns, which is not in need for this: - - // Older versions of Go < 1.10 cannot change network namespaces safely within a go program - // (see https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix). - // As a result, the flow is: - // * CNI plugin is called with no args, skipping this section. - // * CNI code invokes iptables code with CNIMode=true. This in turn runs 'nsenter -- istio-cni configure-routes' - if len(os.Args) > 1 && os.Args[1] == constants.CommandConfigureRoutes { - if err := cmd.GetRouteCommand().Execute(); err != nil { - log.Errorf("failed to configure routes: %v", err) - os.Exit(1) - } - return - } - // TODO: implement plugin version skel.PluginMain(plugin.CmdAdd, plugin.CmdCheck, plugin.CmdDelete, version.All, fmt.Sprintf("CNI plugin istio-cni %v", istioversion.Info.Version)) diff --git a/cni/pkg/config/config.go b/cni/pkg/config/config.go index 994153791e8c..5e0322d77f28 100644 --- a/cni/pkg/config/config.go +++ b/cni/pkg/config/config.go @@ -76,9 +76,6 @@ type InstallConfig struct { // Whether ebpf is enabled EbpfEnabled bool - - // Use the external nsenter command for network namespace switching - HostNSEnterExec bool } // RepairConfig struct defines the Istio CNI race repair configuration @@ -132,7 +129,6 @@ func (c InstallConfig) String() string { b.WriteString("K8sNodeName: " + c.K8sNodeName + "\n") b.WriteString("MonitoringPort: " + fmt.Sprint(c.MonitoringPort) + "\n") b.WriteString("LogUDSAddress: " + fmt.Sprint(c.LogUDSAddress) + "\n") - b.WriteString("HostNSEnterExec: " + fmt.Sprint(c.HostNSEnterExec) + "\n") b.WriteString("AmbientEnabled: " + fmt.Sprint(c.AmbientEnabled) + "\n") diff --git a/cni/pkg/plugin/iptables_linux.go b/cni/pkg/plugin/iptables_linux.go index 3407942a8701..23cd1c632927 100644 --- a/cni/pkg/plugin/iptables_linux.go +++ b/cni/pkg/plugin/iptables_linux.go @@ -35,7 +35,6 @@ var getNs = ns.GetNS // provided in Redirect. func (ipt *iptables) Program(podName, netns string, rdrct *Redirect) error { viper.Set(constants.CNIMode, true) - viper.Set(constants.HostNSEnterExec, rdrct.hostNSEnterExec) viper.Set(constants.NetworkNamespace, netns) viper.Set(constants.EnvoyPort, rdrct.targetPort) viper.Set(constants.ProxyUID, rdrct.noRedirectUID) diff --git a/cni/pkg/plugin/plugin.go b/cni/pkg/plugin/plugin.go index bf927e8c6c00..82f246cce0c0 100644 --- a/cni/pkg/plugin/plugin.go +++ b/cni/pkg/plugin/plugin.go @@ -79,11 +79,10 @@ type Config struct { PrevResult *cniv1.Result `json:"-"` // Add plugin-specific flags here - LogLevel string `json:"log_level"` - LogUDSAddress string `json:"log_uds_address"` - AmbientEnabled bool `json:"ambient_enabled"` - Kubernetes Kubernetes `json:"kubernetes"` - HostNSEnterExec bool `json:"hostNSEnterExec"` + LogLevel string `json:"log_level"` + LogUDSAddress string `json:"log_uds_address"` + AmbientEnabled bool `json:"ambient_enabled"` + Kubernetes Kubernetes `json:"kubernetes"` } // K8sArgs is the valid CNI_ARGS used for Kubernetes @@ -323,7 +322,6 @@ func doRun(args *skel.CmdArgs, conf *Config) error { return fmt.Errorf("redirect failed to find InterceptRuleMgr") } - redirect.hostNSEnterExec = conf.HostNSEnterExec rulesMgr := interceptMgrCtor() if err := rulesMgr.Program(podName, args.Netns, redirect); err != nil { return err diff --git a/cni/pkg/plugin/redirect.go b/cni/pkg/plugin/redirect.go index ecd381ee7926..c832ed06af15 100644 --- a/cni/pkg/plugin/redirect.go +++ b/cni/pkg/plugin/redirect.go @@ -90,7 +90,6 @@ type Redirect struct { dnsRedirect bool dualStack bool invalidDrop bool - hostNSEnterExec bool } type annotationValidationFunc func(value string) error diff --git a/common/.commonfiles.sha b/common/.commonfiles.sha index 3a76cffe28b6..a2967d90e2c0 100644 --- a/common/.commonfiles.sha +++ b/common/.commonfiles.sha @@ -1 +1 @@ -b8589936d6d3cd77e3675d62b4288e053a68242e +48593220d84a5f57cfb381e026f8fded7b559348 diff --git a/common/scripts/setup_env.sh b/common/scripts/setup_env.sh index 8ed23f0718c4..a9091490c79c 100755 --- a/common/scripts/setup_env.sh +++ b/common/scripts/setup_env.sh @@ -75,7 +75,7 @@ fi TOOLS_REGISTRY_PROVIDER=${TOOLS_REGISTRY_PROVIDER:-gcr.io} PROJECT_ID=${PROJECT_ID:-istio-testing} if [[ "${IMAGE_VERSION:-}" == "" ]]; then - IMAGE_VERSION=release-1.19-997ea5ffb748b3d6499f3d91f236e15f0dcaaab6 + IMAGE_VERSION=release-1.19-ae4f4b509789426724c404fba8b228a1b487fd0a fi if [[ "${IMAGE_NAME:-}" == "" ]]; then IMAGE_NAME=build-tools diff --git a/docker/Dockerfile.distroless b/docker/Dockerfile.distroless index 52b9e50ba50c..ad1d77926a78 100644 --- a/docker/Dockerfile.distroless +++ b/docker/Dockerfile.distroless @@ -1,5 +1,5 @@ # prepare a distroless source context to copy files from -FROM gcr.io/distroless/static-debian11@sha256:e7e79fb2947f38ce0fab6061733f7e1959c12b843079042fe13f56ca7b9d178c as distroless_source +FROM gcr.io/distroless/static-debian11@sha256:6706c73aae2afaa8201d63cc3dda48753c09bcd6c300762251065c0f7e602b25 as distroless_source # prepare a base dev to modify file contents FROM ubuntu:focal as ubuntu_source diff --git a/go.mod b/go.mod index 8a920ae917ce..961f4aaa0d14 100644 --- a/go.mod +++ b/go.mod @@ -108,8 +108,8 @@ require ( gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 helm.sh/helm/v3 v3.12.2 - istio.io/api v1.19.2-0.20231011000955-f3015ebb5bd4 - istio.io/client-go v1.19.2-0.20231011002333-b819e2de19ef + istio.io/api v1.19.4-0.20231108024616-33a6ca44a2ff + istio.io/client-go v1.19.4-0.20231108025610-1df49f5c7eba k8s.io/api v0.28.1 k8s.io/apiextensions-apiserver v0.28.1 k8s.io/apimachinery v0.28.1 diff --git a/go.sum b/go.sum index 48e6a19f3bdd..6d3642e56447 100644 --- a/go.sum +++ b/go.sum @@ -1388,10 +1388,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -istio.io/api v1.19.2-0.20231011000955-f3015ebb5bd4 h1:NoiArVONh9DPs/DovhCCl771BUeEkKp+/GhsRB1YbOk= -istio.io/api v1.19.2-0.20231011000955-f3015ebb5bd4/go.mod h1:KstZe4bKbXouALUJ5PqpjNEhu5nj90HrDFitZfpNhlU= -istio.io/client-go v1.19.2-0.20231011002333-b819e2de19ef h1:TjsXNObHEPEGfoMwlzXS9dXNDmuesBSk25CpNa/gKCY= -istio.io/client-go v1.19.2-0.20231011002333-b819e2de19ef/go.mod h1:ra3fVlXcquh7EuQnNssuLxfp6lFv/nx5314PvNEzOUs= +istio.io/api v1.19.4-0.20231108024616-33a6ca44a2ff h1:2LcDGnXumTFMJSErooHOAvN7eo0eB3nHia7xfhVY3Q0= +istio.io/api v1.19.4-0.20231108024616-33a6ca44a2ff/go.mod h1:KstZe4bKbXouALUJ5PqpjNEhu5nj90HrDFitZfpNhlU= +istio.io/client-go v1.19.4-0.20231108025610-1df49f5c7eba h1:+6W4x4PGf8DL0Oii1atAJqC1652RdgCM1aeYy9DU6D4= +istio.io/client-go v1.19.4-0.20231108025610-1df49f5c7eba/go.mod h1:PQVSJB3q5BX8A6HC5sOQ3oA5CA7CB1c22EZ+8f+IC8M= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/api v0.18.4/go.mod h1:lOIQAKYgai1+vz9J7YcDZwC26Z0zQewYOGWdyIPUUQ4= k8s.io/api v0.28.1 h1:i+0O8k2NPBCPYaMB+uCkseEbawEt/eFaiRqUx8aB108= diff --git a/istio.deps b/istio.deps index e22a83f9a453..a767a055d722 100644 --- a/istio.deps +++ b/istio.deps @@ -4,13 +4,13 @@ "name": "PROXY_REPO_SHA", "repoName": "proxy", "file": "", - "lastStableSHA": "a1ff538a63890e27dd2add4b2680ba8dc49293ca" + "lastStableSHA": "f9707e29aa0a36b1430b373d95e6c9abb5deca75" }, { "_comment": "", "name": "ZTUNNEL_REPO_SHA", "repoName": "ztunnel", "file": "", - "lastStableSHA": "3ca9690f7a66cc551f5c2638c5e761873f64948e" + "lastStableSHA": "723c07f433d2e54ff973302c3ab655ecef0642f1" } ] diff --git a/istioctl/pkg/tag/tag.go b/istioctl/pkg/tag/tag.go index 97881d808b07..bee3085c01bf 100644 --- a/istioctl/pkg/tag/tag.go +++ b/istioctl/pkg/tag/tag.go @@ -227,6 +227,8 @@ func tagListCommand(ctx cli.Context) *cobra.Command { }, } + cmd.PersistentFlags().StringVarP(&outputFormat, "output", "o", util.TableFormat, "Output format for tag description "+ + "(available formats: table,json)") return cmd } diff --git a/istioctl/pkg/util/configdump/endpoint.go b/istioctl/pkg/util/configdump/endpoint.go index aaf7788f6848..a5c2fc97bca4 100644 --- a/istioctl/pkg/util/configdump/endpoint.go +++ b/istioctl/pkg/util/configdump/endpoint.go @@ -15,8 +15,6 @@ package configdump import ( - "fmt" - admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3" ) @@ -24,7 +22,7 @@ import ( func (w *Wrapper) GetEndpointsConfigDump() (*admin.EndpointsConfigDump, error) { endpointsDumpAny, err := w.getSection(endpoints) if err != nil { - return nil, fmt.Errorf("endpoints not found (was include_eds=true used?): %v", err) + return nil, nil } endpointsDump := &admin.EndpointsConfigDump{} err = endpointsDumpAny.UnmarshalTo(endpointsDump) diff --git a/istioctl/pkg/writer/envoy/configdump/endpoint.go b/istioctl/pkg/writer/envoy/configdump/endpoint.go index 30b5cff2230a..bdf7fcf2050f 100644 --- a/istioctl/pkg/writer/envoy/configdump/endpoint.go +++ b/istioctl/pkg/writer/envoy/configdump/endpoint.go @@ -145,6 +145,9 @@ func (c *ConfigWriter) retrieveSortedEndpointsSlice(filter EndpointFilter) ([]*e if err != nil { return nil, err } + if dump == nil { + return nil, nil + } endpoints := make([]*endpoint.ClusterLoadAssignment, 0, len(dump.DynamicEndpointConfigs)) for _, e := range dump.GetDynamicEndpointConfigs() { cla, epCount := retrieveEndpoint(e.EndpointConfig, filter) diff --git a/manifests/charts/gateway/templates/deployment.yaml b/manifests/charts/gateway/templates/deployment.yaml index 4baa37d17d43..a864535ef089 100644 --- a/manifests/charts/gateway/templates/deployment.yaml +++ b/manifests/charts/gateway/templates/deployment.yaml @@ -108,3 +108,6 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} terminationGracePeriodSeconds: {{ $.Values.terminationGracePeriodSeconds }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} diff --git a/manifests/charts/gateway/values.schema.json b/manifests/charts/gateway/values.schema.json index a6e880bf289d..e29bfba18ee3 100644 --- a/manifests/charts/gateway/values.schema.json +++ b/manifests/charts/gateway/values.schema.json @@ -227,6 +227,9 @@ }, "terminationGracePeriodSeconds": { "type": "number" + }, + "priorityClassName": { + "type": "string" } } } diff --git a/manifests/charts/gateway/values.yaml b/manifests/charts/gateway/values.yaml index 2245789d488b..e785dcb52fb5 100644 --- a/manifests/charts/gateway/values.yaml +++ b/manifests/charts/gateway/values.yaml @@ -131,3 +131,9 @@ imagePullSecrets: [] podDisruptionBudget: {} terminationGracePeriodSeconds: 30 + +# Configure this to a higher priority class in order to make sure your Istio gateway pods +# will not be killed because of low priority class. +# Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +# for more detail. +priorityClassName: "" diff --git a/pilot/docker/Dockerfile.proxyv2 b/pilot/docker/Dockerfile.proxyv2 index 7e4321fac6b2..33347a31befe 100644 --- a/pilot/docker/Dockerfile.proxyv2 +++ b/pilot/docker/Dockerfile.proxyv2 @@ -12,8 +12,8 @@ FROM ${ISTIO_BASE_REGISTRY}/base:${BASE_VERSION} as debug # This image is a custom built debian11 distroless image with multiarchitecture support. # It is built on the base distroless image, with iptables binary and libraries added # The source can be found at https://github.com/istio/distroless/tree/iptables -# This version is from commit 5bf244c0cf5dd11e4113d564272ed657d4784f95. -FROM ${ISTIO_BASE_REGISTRY}/iptables@sha256:02ee2ed907aae3c08aabf9a95d10e2dbc1b6514954c5df6a00d3c9d3ebc5caeb as distroless +# This version is from commit 973bd8e0048479c770ab4332063921e22d222c9b. +FROM ${ISTIO_BASE_REGISTRY}/iptables@sha256:1696a2570c01a00f8910b3d9d68039ff22ae8ddbe7903fa78005740251bddd9f as distroless # This will build the final image based on either debug or distroless from above # hadolint ignore=DL3006 diff --git a/pilot/docker/Dockerfile.ztunnel b/pilot/docker/Dockerfile.ztunnel index 7f9d52184e0b..a49a81b41e44 100644 --- a/pilot/docker/Dockerfile.ztunnel +++ b/pilot/docker/Dockerfile.ztunnel @@ -12,8 +12,8 @@ FROM ${ISTIO_BASE_REGISTRY}/base:${BASE_VERSION} as debug # This image is a custom built debian11 distroless image with multiarchitecture support. # It is built on the base distroless image, with iptables binary and libraries added # The source can be found at https://github.com/istio/distroless/tree/iptables -# This version is from commit 5bf244c0cf5dd11e4113d564272ed657d4784f95. -FROM ${ISTIO_BASE_REGISTRY}/iptables@sha256:02ee2ed907aae3c08aabf9a95d10e2dbc1b6514954c5df6a00d3c9d3ebc5caeb as distroless +# This version is from commit 973bd8e0048479c770ab4332063921e22d222c9b. +FROM ${ISTIO_BASE_REGISTRY}/iptables@sha256:1696a2570c01a00f8910b3d9d68039ff22ae8ddbe7903fa78005740251bddd9f as distroless # This will build the final image based on either debug or distroless from above # hadolint ignore=DL3006 diff --git a/pilot/pkg/bootstrap/configcontroller.go b/pilot/pkg/bootstrap/configcontroller.go index 0e87110f05a5..78267f3195de 100644 --- a/pilot/pkg/bootstrap/configcontroller.go +++ b/pilot/pkg/bootstrap/configcontroller.go @@ -291,13 +291,13 @@ func (s *Server) initInprocessAnalysisController(args *PilotArgs) error { s.addStartFunc("analysis controller", func(stop <-chan struct{}) error { go leaderelection. NewLeaderElection(args.Namespace, args.PodName, leaderelection.AnalyzeController, args.Revision, s.kubeClient). - AddRunFunction(func(stop <-chan struct{}) { - cont, err := incluster.NewController(stop, s.RWConfigStore, + AddRunFunction(func(leaderStop <-chan struct{}) { + cont, err := incluster.NewController(leaderStop, s.RWConfigStore, s.kubeClient, args.Revision, args.Namespace, s.statusManager, args.RegistryOptions.KubeOptions.DomainSuffix) if err != nil { return } - cont.Run(stop) + cont.Run(leaderStop) }).Run(stop) return nil }) diff --git a/pilot/pkg/config/kube/crdclient/client.go b/pilot/pkg/config/kube/crdclient/client.go index a84e0d1b3677..e10732d2877a 100644 --- a/pilot/pkg/config/kube/crdclient/client.go +++ b/pilot/pkg/config/kube/crdclient/client.go @@ -28,6 +28,7 @@ import ( "time" jsonmerge "github.com/evanphx/json-patch/v5" + "go.uber.org/atomic" "golang.org/x/exp/maps" "gomodules.xyz/jsonpatch/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -71,6 +72,8 @@ type Client struct { kinds map[config.GroupVersionKind]kclient.Untyped kindsMu sync.RWMutex queue queue.Instance + // a flag indicates whether this client has been run, it is to prevent run queue twice + started *atomic.Bool // handlers defines a list of event handlers per-type handlers map[config.GroupVersionKind][]model.EventHandler @@ -115,6 +118,7 @@ func NewForSchemas(client kube.Client, opts Option, schemas collection.Schemas) schemasByCRDName: schemasByCRDName, revision: opts.Revision, queue: queue.NewQueue(1 * time.Second), + started: atomic.NewBool(false), kinds: map[config.GroupVersionKind]kclient.Untyped{}, handlers: map[config.GroupVersionKind][]model.EventHandler{}, client: client, @@ -138,6 +142,11 @@ func (cl *Client) RegisterEventHandler(kind config.GroupVersionKind, handler mod // Run the queue and all informers. Callers should wait for HasSynced() before depending on results. func (cl *Client) Run(stop <-chan struct{}) { + if cl.started.Swap(true) { + // was already started by other thread + return + } + t0 := time.Now() cl.logger.Infof("Starting Pilot K8S CRD controller") @@ -146,7 +155,6 @@ func (cl *Client) Run(stop <-chan struct{}) { return } cl.logger.Infof("Pilot K8S CRD controller synced in %v", time.Since(t0)) - cl.queue.Run(stop) cl.logger.Infof("controller terminated") } diff --git a/pilot/pkg/features/pilot.go b/pilot/pkg/features/pilot.go index a30a9a4b727d..23dac6c95de9 100644 --- a/pilot/pkg/features/pilot.go +++ b/pilot/pkg/features/pilot.go @@ -679,6 +679,11 @@ var ( // User should not rely on builtin resource labels, this flag will be removed in future releases(1.20). EnableOTELBuiltinResourceLables = env.Register("ENABLE_OTEL_BUILTIN_RESOURCE_LABELS", false, "If enabled, envoy will send builtin lables(e.g. node_name) via OTel sink.").Get() + + // Useful for IPv6-only EKS clusters. See https://aws.github.io/aws-eks-best-practices/networking/ipv6/ why it assigns an additional IPv4 NAT address. + // Also see https://github.com/istio/istio/issues/46719 why this flag is required + EnableAdditionalIpv4OutboundListenerForIpv6Only = env.RegisterBoolVar("ISTIO_ENABLE_IPV4_OUTBOUND_LISTENER_FOR_IPV6_CLUSTERS", false, + "If true, pilot will configure an additional IPv4 listener for outbound traffic in IPv6 only clusters, e.g. AWS EKS IPv6 only clusters.").Get() ) // UnsafeFeaturesEnabled returns true if any unsafe features are enabled. diff --git a/pilot/pkg/model/gateway.go b/pilot/pkg/model/gateway.go index 61a5e6b91362..2e586b8a8769 100644 --- a/pilot/pkg/model/gateway.go +++ b/pilot/pkg/model/gateway.go @@ -267,19 +267,20 @@ func MergeGateways(gateways []gatewayWithInstances, proxy *Proxy, ps *PushContex serversByRouteName[routeName] = []*networking.Server{s} } // build the port bind map for none plain text protocol, thus can avoid protocol conflict if it's different bind - if bindsPortMap, ok := nonPlainTextGatewayPortsBindMap[resolvedPort]; ok && !bindsPortMap.Contains(serverPort.Bind) { - bindsPortMap.Insert(serverPort.Bind) + var newBind bool + if bindsPortMap, ok := nonPlainTextGatewayPortsBindMap[resolvedPort]; ok { + newBind = !bindsPortMap.InsertContains(serverPort.Bind) } else { - bindsPortMap := sets.String{} - bindsPortMap.Insert(serverPort.Bind) - nonPlainTextGatewayPortsBindMap[resolvedPort] = bindsPortMap + nonPlainTextGatewayPortsBindMap[resolvedPort] = sets.New(serverPort.Bind) + newBind = true } // If the bind/port combination is not being used as non-plaintext, they are different // listeners and won't get conflicted even with same port different protocol // i.e 0.0.0.0:443:GRPC/1.0.0.1:443:GRPC/1.0.0.2:443:HTTPS they are not conflicted, otherwise // We have another TLS server on the same port. Can differentiate servers using SNI - if s.Tls == nil && !nonPlainTextGatewayPortsBindMap[resolvedPort].Contains(serverPort.Bind) { + if s.Tls == nil && !newBind { log.Warnf("TLS server without TLS options %s %s", gatewayName, s.String()) + RecordRejectedConfig(gatewayName) continue } if mergedServers[serverPort] == nil { diff --git a/pilot/pkg/model/virtualservice.go b/pilot/pkg/model/virtualservice.go index 4d08237184a4..505321cd83cd 100644 --- a/pilot/pkg/model/virtualservice.go +++ b/pilot/pkg/model/virtualservice.go @@ -365,7 +365,8 @@ func mergeHTTPMatchRequests(root, delegate []*networking.HTTPMatchRequest) (out } func mergeHTTPMatchRequest(root, delegate *networking.HTTPMatchRequest) *networking.HTTPMatchRequest { - out := delegate + // nolint: govet + out := *delegate if out.Name == "" { out.Name = root.Name } else if root.Name != "" { @@ -410,7 +411,7 @@ func mergeHTTPMatchRequest(root, delegate *networking.HTTPMatchRequest) *network if len(out.StatPrefix) == 0 { out.StatPrefix = root.StatPrefix } - return out + return &out } func hasConflict(root, leaf *networking.HTTPMatchRequest) bool { diff --git a/pilot/pkg/model/virtualservice_test.go b/pilot/pkg/model/virtualservice_test.go index 942ed0b22af5..4cb6cccb97bc 100644 --- a/pilot/pkg/model/virtualservice_test.go +++ b/pilot/pkg/model/virtualservice_test.go @@ -1066,6 +1066,82 @@ func TestMergeHttpRoutes(t *testing.T) { }, }, }, + { + name: "multiple header merge", + root: &networking.HTTPRoute{ + Match: []*networking.HTTPMatchRequest{ + { + Headers: map[string]*networking.StringMatch{ + "header1": { + MatchType: &networking.StringMatch_Regex{ + Regex: "regex", + }, + }, + }, + }, + { + Headers: map[string]*networking.StringMatch{ + "header2": { + MatchType: &networking.StringMatch_Exact{ + Exact: "exact", + }, + }, + }, + }, + }, + Delegate: &networking.Delegate{ + Name: "delegate", + Namespace: "default", + }, + }, + delegate: []*networking.HTTPRoute{ + { + Match: []*networking.HTTPMatchRequest{ + { + Uri: &networking.StringMatch{ + MatchType: &networking.StringMatch_Prefix{ + Prefix: "/", + }, + }, + }, + }, + }, + }, + expected: []*networking.HTTPRoute{ + { + Match: []*networking.HTTPMatchRequest{ + { + Uri: &networking.StringMatch{ + MatchType: &networking.StringMatch_Prefix{ + Prefix: "/", + }, + }, + Headers: map[string]*networking.StringMatch{ + "header1": { + MatchType: &networking.StringMatch_Regex{ + Regex: "regex", + }, + }, + }, + }, + { + Uri: &networking.StringMatch{ + MatchType: &networking.StringMatch_Prefix{ + Prefix: "/", + }, + }, + Headers: map[string]*networking.StringMatch{ + "header2": { + MatchType: &networking.StringMatch_Exact{ + Exact: "exact", + }, + }, + }, + }, + }, + }, + }, + }, } for _, tc := range cases { diff --git a/pilot/pkg/networking/core/v1alpha3/cluster.go b/pilot/pkg/networking/core/v1alpha3/cluster.go index cc463c0464f9..db006899b9d6 100644 --- a/pilot/pkg/networking/core/v1alpha3/cluster.go +++ b/pilot/pkg/networking/core/v1alpha3/cluster.go @@ -623,8 +623,36 @@ func (configgen *ConfigGeneratorImpl) buildInboundClusters(cb *ClusterBuilder, p if endpointAddress == "" { endpointAddress = model.LocalhostIPv6AddressPrefix } - } else if hostIP == model.LocalhostAddressPrefix || hostIP == model.LocalhostIPv6AddressPrefix { - endpointAddress = actualLocalHosts[0] + } else if hostIP == model.LocalhostAddressPrefix { + // prefer 127.0.0.1 to ::1, but if given no option choose ::1 + ipV6EndpointAddress := "" + for _, host := range actualLocalHosts { + if netutil.IsIPv4Address(host) { + endpointAddress = host + break + } + if netutil.IsIPv6Address(host) { + ipV6EndpointAddress = host + } + } + if endpointAddress == "" { + endpointAddress = ipV6EndpointAddress + } + } else if hostIP == model.LocalhostIPv6AddressPrefix { + // prefer ::1 to 127.0.0.1, but if given no option choose 127.0.0.1 + ipV4EndpointAddress := "" + for _, host := range actualLocalHosts { + if netutil.IsIPv6Address(host) { + endpointAddress = host + break + } + if netutil.IsIPv4Address(host) { + ipV4EndpointAddress = host + } + } + if endpointAddress == "" { + endpointAddress = ipV4EndpointAddress + } } } // Find the service instance that corresponds to this ingress listener by looking diff --git a/pilot/pkg/networking/core/v1alpha3/cluster_test.go b/pilot/pkg/networking/core/v1alpha3/cluster_test.go index f3c5f5344e04..ab493ad62238 100644 --- a/pilot/pkg/networking/core/v1alpha3/cluster_test.go +++ b/pilot/pkg/networking/core/v1alpha3/cluster_test.go @@ -1696,6 +1696,114 @@ func TestBuildInboundClustersPortLevelCircuitBreakerThresholds(t *testing.T) { } } +func TestInboundClustersLocalhostDefaultEndpoint(t *testing.T) { + ipv4Proxy := getProxy() + ipv6Proxy := getIPv6Proxy() + dsProxy := model.Proxy{ + Type: model.SidecarProxy, + IPAddresses: []string{"1.1.1.1", "1111:2222::1"}, + ID: "v0.default", + DNSDomain: "default.example.org", + Metadata: &model.NodeMetadata{ + Namespace: "not-default", + }, + ConfigNamespace: "not-default", + } + + inboundFilter := func(c *cluster.Cluster) bool { + return strings.HasPrefix(c.Name, "inbound|") + } + + cases := []struct { + name string + proxy *model.Proxy + defaultEndpoint string + expectedAddr string + expectedPort uint32 + }{ + // ipv4 use cases + { + name: "ipv4 host: defaultEndpoint set to 127.0.0.1:7073", + proxy: ipv4Proxy, + defaultEndpoint: "127.0.0.1:7073", + expectedAddr: "127.0.0.1", + expectedPort: 7073, + }, + { + name: "ipv4 host: defaultEndpoint set to [::1]:7073", + proxy: ipv4Proxy, + defaultEndpoint: "[::1]:7073", + expectedAddr: "127.0.0.1", + expectedPort: 7073, + }, + // ipv6 use cases + { + name: "ipv6 host: defaultEndpoint set to 127.0.0.1:7073", + proxy: ipv6Proxy, + defaultEndpoint: "127.0.0.1:7073", + expectedAddr: "::1", + expectedPort: 7073, + }, + { + name: "ipv6 host: defaultEndpoint set to [::1]:7073", + proxy: ipv6Proxy, + defaultEndpoint: "[::1]:7073", + expectedAddr: "::1", + expectedPort: 7073, + }, // dual-stack use cases + { + name: "dual-stack host: defaultEndpoint set to 127.0.0.1:7073", + proxy: &dsProxy, + defaultEndpoint: "127.0.0.1:7073", + expectedAddr: "127.0.0.1", + expectedPort: 7073, + }, + { + name: "dual-stack host: defaultEndpoint set to [::1]:7073", + proxy: &dsProxy, + defaultEndpoint: "[::1]:7073", + expectedAddr: "::1", + expectedPort: 7073, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + g := NewWithT(t) + cg := NewConfigGenTest(t, TestOptions{}) + proxy := cg.SetupProxy(c.proxy) + proxy.Metadata.InterceptionMode = model.InterceptionNone + proxy.SidecarScope = &model.SidecarScope{ + Sidecar: &networking.Sidecar{ + Ingress: []*networking.IstioIngressListener{ + { + CaptureMode: networking.CaptureMode_NONE, + DefaultEndpoint: c.defaultEndpoint, + Port: &networking.Port{ + Number: 7443, + Name: "http", + Protocol: "HTTP", + }, + }, + }, + }, + } + + clusters := cg.Clusters(proxy) + xdstest.ValidateClusters(t, clusters) + + clusters = xdstest.FilterClusters(clusters, inboundFilter) + g.Expect(len(clusters)).ShouldNot(Equal(0)) + + for _, cluster := range clusters { + socket := cluster.GetLoadAssignment().GetEndpoints()[0].LbEndpoints[0].GetEndpoint().GetAddress().GetSocketAddress() + g.Expect(socket.GetAddress()).To(Equal(c.expectedAddr)) + g.Expect(socket.GetPortValue()).To(Equal(c.expectedPort)) + } + }) + } +} + func TestRedisProtocolWithPassThroughResolutionAtGateway(t *testing.T) { servicePort := &model.Port{ Name: "redis-port", diff --git a/pilot/pkg/networking/core/v1alpha3/gateway_test.go b/pilot/pkg/networking/core/v1alpha3/gateway_test.go index 51ac2ac86516..c482f182eafb 100644 --- a/pilot/pkg/networking/core/v1alpha3/gateway_test.go +++ b/pilot/pkg/networking/core/v1alpha3/gateway_test.go @@ -2895,6 +2895,67 @@ func TestBuildGatewayListeners(t *testing.T) { }, []string{"10.0.0.1_443", "10.0.0.2_443", "0.0.0.0_443"}, }, + { + "gateway with HTTPS/TCP invalid configuration", + &pilot_model.Proxy{}, + []config.Config{ + { + Meta: config.Meta{Name: "gateway1", Namespace: "testns", GroupVersionKind: gvk.Gateway}, + Spec: &networking.Gateway{ + Servers: []*networking.Server{ + { + Port: &networking.Port{Name: "https", Number: 443, Protocol: "HTTPS"}, + Hosts: []string{"*.1.example.com"}, + Tls: &networking.ServerTLSSettings{CredentialName: "test", Mode: networking.ServerTLSSettings_SIMPLE}, + }, + { + Port: &networking.Port{Name: "tcp", Number: 443, Protocol: "TCP"}, + Hosts: []string{"*.1.example.com"}, + }, + }, + }, + }, + { + Meta: config.Meta{Name: "gateway2", Namespace: "testns", GroupVersionKind: gvk.Gateway}, + Spec: &networking.Gateway{ + Servers: []*networking.Server{ + { + Port: &networking.Port{Name: "https", Number: 443, Protocol: "HTTPS"}, + Hosts: []string{"*.2.example.com"}, + Tls: &networking.ServerTLSSettings{CredentialName: "test", Mode: networking.ServerTLSSettings_SIMPLE}, + }, + { + Port: &networking.Port{Name: "tcp", Number: 443, Protocol: "TCP"}, + Hosts: []string{"*.2.example.com"}, + }, + }, + }, + }, + }, + []config.Config{ + { + Meta: config.Meta{Name: uuid.NewString(), Namespace: uuid.NewString(), GroupVersionKind: gvk.VirtualService}, + Spec: &networking.VirtualService{ + Gateways: []string{"testns/gateway1"}, + Hosts: []string{"*"}, + Tcp: []*networking.TCPRoute{{ + Route: []*networking.RouteDestination{{Destination: &networking.Destination{Host: "example.com"}}}, + }}, + }, + }, + { + Meta: config.Meta{Name: uuid.NewString(), Namespace: uuid.NewString(), GroupVersionKind: gvk.VirtualService}, + Spec: &networking.VirtualService{ + Gateways: []string{"testns/gateway2"}, + Hosts: []string{"*"}, + Tcp: []*networking.TCPRoute{{ + Route: []*networking.RouteDestination{{Destination: &networking.Destination{Host: "example.com"}}}, + }}, + }, + }, + }, + []string{"0.0.0.0_443"}, + }, { "gateway with multiple HTTPS servers with bind and same host", &pilot_model.Proxy{}, diff --git a/pilot/pkg/networking/core/v1alpha3/listener_builder.go b/pilot/pkg/networking/core/v1alpha3/listener_builder.go index 7dadabcea238..f0396cc4da9f 100644 --- a/pilot/pkg/networking/core/v1alpha3/listener_builder.go +++ b/pilot/pkg/networking/core/v1alpha3/listener_builder.go @@ -137,6 +137,10 @@ func (lb *ListenerBuilder) buildVirtualOutboundListener() *ListenerBuilder { // add extra addresses for the listener if features.EnableDualStack && len(actualWildcards) > 1 { ipTablesListener.AdditionalAddresses = util.BuildAdditionalAddresses(actualWildcards[1:], uint32(lb.push.Mesh.ProxyListenPort)) + } else if features.EnableAdditionalIpv4OutboundListenerForIpv6Only && (lb.node.GetIPMode() == model.IPv6) { + // add an additional IPv4 outbound listener for IPv6 only clusters + ipv4Wildcards, _ := getWildcardsAndLocalHost(model.IPv4) // get the IPv4 based wildcards + ipTablesListener.AdditionalAddresses = util.BuildAdditionalAddresses(ipv4Wildcards[0:], uint32(lb.push.Mesh.ProxyListenPort)) } class := model.OutboundListenerClass(lb.node.Type) diff --git a/pilot/pkg/networking/core/v1alpha3/listener_builder_test.go b/pilot/pkg/networking/core/v1alpha3/listener_builder_test.go index 476deb997176..64cf0144acc1 100644 --- a/pilot/pkg/networking/core/v1alpha3/listener_builder_test.go +++ b/pilot/pkg/networking/core/v1alpha3/listener_builder_test.go @@ -886,3 +886,18 @@ func TestHCMInternalAddressConfig(t *testing.T) { }) } } + +func TestAdditionalAddressesForIPv6(t *testing.T) { + test.SetForTest(t, &features.EnableAdditionalIpv4OutboundListenerForIpv6Only, true) + cg := NewConfigGenTest(t, TestOptions{Services: testServices}) + proxy := cg.SetupProxy(&model.Proxy{IPAddresses: []string{"1111:2222::1"}}) + + listeners := buildListeners(t, TestOptions{Services: testServices}, proxy) + vo := xdstest.ExtractListener(model.VirtualOutboundListenerName, listeners) + if vo == nil { + t.Fatalf("didn't find virtual outbound listener") + } + if vo.AdditionalAddresses == nil || len(vo.AdditionalAddresses) != 1 { + t.Fatalf("expected additional ipv4 bind addresse") + } +} diff --git a/pilot/pkg/networking/core/v1alpha3/route/route.go b/pilot/pkg/networking/core/v1alpha3/route/route.go index 539050154025..016ebd3ceb22 100644 --- a/pilot/pkg/networking/core/v1alpha3/route/route.go +++ b/pilot/pkg/networking/core/v1alpha3/route/route.go @@ -977,7 +977,7 @@ func translateQueryParamMatch(name string, in *networking.StringMatch) *route.Qu // isCatchAllStringMatch determines if the given matcher is matched with all strings or not. // Currently, if the regex has "*" value, it returns true func isCatchAllStringMatch(in *networking.StringMatch) bool { - if in == nil { + if in == nil || in.MatchType == nil { return true } diff --git a/pilot/pkg/networking/core/v1alpha3/route/route_test.go b/pilot/pkg/networking/core/v1alpha3/route/route_test.go index cc99ccfc6d0a..a25e700ed45f 100644 --- a/pilot/pkg/networking/core/v1alpha3/route/route_test.go +++ b/pilot/pkg/networking/core/v1alpha3/route/route_test.go @@ -312,6 +312,15 @@ func TestBuildHTTPRoutes(t *testing.T) { g.Expect(routes[0].GetMatch().GetHeaders()[0].GetName()).To(gomega.Equal("FOO-HEADER")) g.Expect(routes[0].GetMatch().GetHeaders()[0].GetPresentMatch()).To(gomega.Equal(true)) g.Expect(routes[0].GetMatch().GetHeaders()[0].GetInvertMatch()).To(gomega.Equal(false)) + + routes, err = route.BuildHTTPRoutesForVirtualService(node(cg), virtualServiceWithPresentMatchingOnHeader2, + serviceRegistry, nil, 8080, gatewayNames, route.RouteOptions{}) + g.Expect(err).NotTo(gomega.HaveOccurred()) + xdstest.ValidateRoutes(t, routes) + g.Expect(len(routes)).To(gomega.Equal(1)) + g.Expect(routes[0].GetMatch().GetHeaders()[0].GetName()).To(gomega.Equal("FOO-HEADER")) + g.Expect(routes[0].GetMatch().GetHeaders()[0].GetPresentMatch()).To(gomega.Equal(true)) + g.Expect(routes[0].GetMatch().GetHeaders()[0].GetInvertMatch()).To(gomega.Equal(false)) }) t.Run("for virtual service with presence matching on header and without_header", func(t *testing.T) { @@ -2160,6 +2169,34 @@ var virtualServiceWithPresentMatchingOnHeader = config.Config{ }, } +var virtualServiceWithPresentMatchingOnHeader2 = config.Config{ + Meta: config.Meta{ + GroupVersionKind: gvk.VirtualService, + Name: "acme", + }, + Spec: &networking.VirtualService{ + Hosts: []string{}, + Gateways: []string{"some-gateway"}, + Http: []*networking.HTTPRoute{ + { + Match: []*networking.HTTPMatchRequest{ + { + Name: "presence", + Headers: map[string]*networking.StringMatch{ + "FOO-HEADER": {}, + }, + }, + }, + Redirect: &networking.HTTPRedirect{ + Uri: "example.org", + Authority: "some-authority.default.svc.cluster.local", + RedirectCode: 308, + }, + }, + }, + }, +} + var virtualServiceWithPresentMatchingOnWithoutHeader = config.Config{ Meta: config.Meta{ GroupVersionKind: gvk.VirtualService, diff --git a/pilot/pkg/security/trustdomain/bundle.go b/pilot/pkg/security/trustdomain/bundle.go index ed5664196877..7f9b149e7ec1 100644 --- a/pilot/pkg/security/trustdomain/bundle.go +++ b/pilot/pkg/security/trustdomain/bundle.go @@ -18,6 +18,7 @@ import ( "fmt" "strings" + "istio.io/istio/pilot/pkg/features" "istio.io/istio/pkg/config/constants" istiolog "istio.io/istio/pkg/log" ) @@ -71,8 +72,15 @@ func (t Bundle) ReplaceTrustDomainAliases(principals []string) []string { // Generate configuration for trust domain and trust domain aliases. principalsIncludingAliases = append(principalsIncludingAliases, t.replaceTrustDomains(principal, trustDomainFromPrincipal)...) } else { - authzLog.Warnf("Trust domain %s from principal %s does not match the current trust "+ + msg := fmt.Sprintf("Trust domain %s from principal %s does not match the current trust "+ "domain or its aliases", trustDomainFromPrincipal, principal) + // when SkipValidateTrustDomain is being used the message isn't very meaningful so we'll log it at a lower level + // logging it at this level may help users who are looking to disable skipping validation understand if it's safe + if !features.SkipValidateTrustDomain { + authzLog.Warn(msg) + } else { + authzLog.Debug(msg) + } // If the trust domain from the existing doesn't match with the new trust domain aliases or "cluster.local", // keep the policy as it is. principalsIncludingAliases = append(principalsIncludingAliases, principal) diff --git a/pilot/pkg/serviceregistry/kube/controller/endpointslice.go b/pilot/pkg/serviceregistry/kube/controller/endpointslice.go index 24262530ddbc..4a823503b625 100644 --- a/pilot/pkg/serviceregistry/kube/controller/endpointslice.go +++ b/pilot/pkg/serviceregistry/kube/controller/endpointslice.go @@ -187,13 +187,6 @@ func (esc *endpointSliceController) updateEndpointCacheForSlice(hostName host.Na // Draining tracking is only enabled if persistent sessions is enabled. // If we start using them for other features, this can be adjusted. healthStatus := endpointHealthStatus(svc, e) - if !features.SendUnhealthyEndpoints.Load() { - if healthStatus == model.UnHealthy { - // Ignore not ready endpoints. Draining endpoints are tracked, but not returned - // except for persistent-session clusters. - continue - } - } for _, a := range e.Addresses { pod, expectedPod := getPod(esc.c, a, &metav1.ObjectMeta{Name: slice.Name, Namespace: slice.Namespace}, e.TargetRef, hostName) if pod == nil && expectedPod { diff --git a/pilot/pkg/xds/eds.go b/pilot/pkg/xds/eds.go index 2eace61e8ab7..7fc8151d2bce 100644 --- a/pilot/pkg/xds/eds.go +++ b/pilot/pkg/xds/eds.go @@ -121,10 +121,13 @@ func (s *DiscoveryServer) edsCacheUpdate(shard model.ShardKey, hostname string, ep.Lock() defer ep.Unlock() newIstioEndpoints := istioEndpoints - if features.SendUnhealthyEndpoints.Load() { - oldIstioEndpoints := ep.Shards[shard] + oldIstioEndpoints := ep.Shards[shard] + needPush := false + if oldIstioEndpoints == nil { + // If there are no old endpoints, we should push with incoming endpoints as there is nothing to compare. + needPush = true + } else { newIstioEndpoints = make([]*model.IstioEndpoint, 0, len(istioEndpoints)) - // Check if new Endpoints are ready to be pushed. This check // will ensure that if a new pod comes with a non ready endpoint, // we do not unnecessarily push that config to Envoy. @@ -140,7 +143,6 @@ func (s *DiscoveryServer) edsCacheUpdate(shard model.ShardKey, hostname string, for _, nie := range istioEndpoints { nmap[nie.Address] = nie } - needPush := false for _, nie := range istioEndpoints { if oie, exists := emap[nie.Address]; exists { // If endpoint exists already, we should push if it's health status changes. @@ -148,11 +150,14 @@ func (s *DiscoveryServer) edsCacheUpdate(shard model.ShardKey, hostname string, needPush = true } newIstioEndpoints = append(newIstioEndpoints, nie) - } else if nie.HealthStatus == model.Healthy { + } else { // If the endpoint does not exist in shards that means it is a - // new endpoint. Only send if it is healthy to avoid pushing endpoints - // that are not ready to start with. - needPush = true + // new endpoint. Always send new endpoints even if they are not healthy. + // This is OK since we disable panic threshold when SendUnhealthyEndpoints is enabled. + // Without SendUnhealthyEndpoints we do not need this; headless services will trigger the push in the Kubernetes controller. + if features.SendUnhealthyEndpoints.Load() { + needPush = true + } newIstioEndpoints = append(newIstioEndpoints, nie) } } @@ -163,12 +168,11 @@ func (s *DiscoveryServer) edsCacheUpdate(shard model.ShardKey, hostname string, needPush = true } } + } - if pushType != FullPush && !needPush { - log.Debugf("No push, either old endpoint health status did not change or new endpoint came with unhealthy status, %v", hostname) - pushType = NoPush - } - + if pushType != FullPush && !needPush { + log.Debugf("No push, either old endpoint health status did not change or new endpoint came with unhealthy status, %v", hostname) + pushType = NoPush } ep.Shards[shard] = newIstioEndpoints diff --git a/pilot/pkg/xds/eds_test.go b/pilot/pkg/xds/eds_test.go index 633c8f94443c..900efb1dd514 100644 --- a/pilot/pkg/xds/eds_test.go +++ b/pilot/pkg/xds/eds_test.go @@ -350,170 +350,186 @@ func TestEDSOverlapping(t *testing.T) { } func TestEDSUnhealthyEndpoints(t *testing.T) { - test.SetAtomicBoolForTest(t, features.SendUnhealthyEndpoints, true) - s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{}) - addUnhealthyCluster(s) - adscon := s.Connect(nil, nil, watchEds) - _, err := adscon.Wait(5 * time.Second) - if err != nil { - t.Fatalf("Error in push %v", err) - } + for _, sendUnhealthy := range []bool{true, false} { + t.Run(fmt.Sprint(sendUnhealthy), func(t *testing.T) { + test.SetAtomicBoolForTest(t, features.SendUnhealthyEndpoints, sendUnhealthy) + s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{}) + addUnhealthyCluster(s) + adscon := s.Connect(nil, nil, watchEds) + _, err := adscon.Wait(5 * time.Second) + if err != nil { + t.Fatalf("Error in push %v", err) + } - validateEndpoints := func(expectPush bool, healthy []string, unhealthy []string) { - t.Helper() - // Normalize lists to make comparison easier - if healthy == nil { - healthy = []string{} - } - if unhealthy == nil { - unhealthy = []string{} - } - sort.Strings(healthy) - sort.Strings(unhealthy) - if expectPush { - upd, _ := adscon.Wait(5*time.Second, v3.EndpointType) + validateEndpoints := func(expectPush bool, healthy []string, unhealthy []string) { + t.Helper() + // Normalize lists to make comparison easier + if healthy == nil { + healthy = []string{} + } + if unhealthy == nil { + unhealthy = []string{} + } + sort.Strings(healthy) + sort.Strings(unhealthy) + if expectPush { + upd, _ := adscon.Wait(5*time.Second, v3.EndpointType) - if len(upd) > 0 && !slices.Contains(upd, v3.EndpointType) { - t.Fatalf("Expecting EDS push as endpoint health is changed. But received %v", upd) + if len(upd) > 0 && !slices.Contains(upd, v3.EndpointType) { + t.Fatalf("Expecting EDS push as endpoint health is changed. But received %v", upd) + } + } else { + upd, _ := adscon.Wait(50*time.Millisecond, v3.EndpointType) + if slices.Contains(upd, v3.EndpointType) { + t.Fatalf("Expected no EDS push, got %v", upd) + } + } + + // Validate that endpoints are pushed. + lbe := adscon.GetEndpoints()["outbound|53||unhealthy.svc.cluster.local"] + eh, euh := xdstest.ExtractHealthEndpoints(lbe) + gotHealthy := sets.SortedList(sets.New(eh...)) + gotUnhealthy := sets.SortedList(sets.New(euh...)) + if !reflect.DeepEqual(gotHealthy, healthy) { + t.Fatalf("did not get expected endpoints: got %v, want %v", gotHealthy, healthy) + } + if !reflect.DeepEqual(gotUnhealthy, unhealthy) { + t.Fatalf("did not get expected unhealthy endpoints: got %v, want %v", gotUnhealthy, unhealthy) + } } - } else { - upd, _ := adscon.Wait(50*time.Millisecond, v3.EndpointType) - if slices.Contains(upd, v3.EndpointType) { - t.Fatalf("Expected no EDS push, got %v", upd) + + // Validate that we do not send initial unhealthy endpoints. + if sendUnhealthy { + validateEndpoints(false, nil, []string{"10.0.0.53:53"}) + } else { + validateEndpoints(true, nil, nil) } - } + adscon.WaitClear() - // Validate that endpoints are pushed. - lbe := adscon.GetEndpoints()["outbound|53||unhealthy.svc.cluster.local"] - eh, euh := xdstest.ExtractHealthEndpoints(lbe) - gotHealthy := sets.SortedList(sets.New(eh...)) - gotUnhealthy := sets.SortedList(sets.New(euh...)) - if !reflect.DeepEqual(gotHealthy, healthy) { - t.Fatalf("did not get expected endpoints: got %v, want %v", gotHealthy, healthy) - } - if !reflect.DeepEqual(gotUnhealthy, unhealthy) { - t.Fatalf("did not get expected unhealthy endpoints: got %v, want %v", gotUnhealthy, unhealthy) - } - } + // Set additional unhealthy endpoint and validate Eds update is not triggered. + s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", + []*model.IstioEndpoint{ + { + Address: "10.0.0.53", + EndpointPort: 53, + ServicePortName: "tcp-dns", + HealthStatus: model.UnHealthy, + }, + { + Address: "10.0.0.54", + EndpointPort: 53, + ServicePortName: "tcp-dns", + HealthStatus: model.UnHealthy, + }, + }) - // Validate that we do not send initial unhealthy endpoints. - validateEndpoints(false, nil, nil) - adscon.WaitClear() + // Validate that endpoint is not pushed. + if sendUnhealthy { + validateEndpoints(true, nil, []string{"10.0.0.53:53", "10.0.0.54:53"}) + } else { + validateEndpoints(false, nil, nil) + } - // Set additional unhealthy endpoint and validate Eds update is not triggered. - s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", - []*model.IstioEndpoint{ - { - Address: "10.0.0.53", - EndpointPort: 53, - ServicePortName: "tcp-dns", - HealthStatus: model.UnHealthy, - }, - { - Address: "10.0.0.54", - EndpointPort: 53, - ServicePortName: "tcp-dns", - HealthStatus: model.UnHealthy, - }, - }) + // Change the status of endpoint to Healthy and validate Eds is pushed. + s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", + []*model.IstioEndpoint{ + { + Address: "10.0.0.53", + EndpointPort: 53, + ServicePortName: "tcp-dns", + HealthStatus: model.Healthy, + }, + { + Address: "10.0.0.54", + EndpointPort: 53, + ServicePortName: "tcp-dns", + HealthStatus: model.Healthy, + }, + }) - // Validate that endpoint is not pushed. - validateEndpoints(false, nil, nil) + // Validate that endpoints are pushed. + validateEndpoints(true, []string{"10.0.0.53:53", "10.0.0.54:53"}, nil) - // Change the status of endpoint to Healthy and validate Eds is pushed. - s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", - []*model.IstioEndpoint{ - { - Address: "10.0.0.53", - EndpointPort: 53, - ServicePortName: "tcp-dns", - HealthStatus: model.Healthy, - }, - { - Address: "10.0.0.54", - EndpointPort: 53, - ServicePortName: "tcp-dns", - HealthStatus: model.Healthy, - }, - }) + // Set to exact same endpoints + s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", + []*model.IstioEndpoint{ + { + Address: "10.0.0.53", + EndpointPort: 53, + ServicePortName: "tcp-dns", + HealthStatus: model.Healthy, + }, + { + Address: "10.0.0.54", + EndpointPort: 53, + ServicePortName: "tcp-dns", + HealthStatus: model.Healthy, + }, + }) + // Validate that endpoint is not pushed. + validateEndpoints(false, []string{"10.0.0.53:53", "10.0.0.54:53"}, nil) - // Validate that endpoints are pushed. - validateEndpoints(true, []string{"10.0.0.53:53", "10.0.0.54:53"}, nil) + // Now change the status of endpoint to UnHealthy and validate Eds is pushed. + s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", + []*model.IstioEndpoint{ + { + Address: "10.0.0.53", + EndpointPort: 53, + ServicePortName: "tcp-dns", + HealthStatus: model.UnHealthy, + }, + { + Address: "10.0.0.54", + EndpointPort: 53, + ServicePortName: "tcp-dns", + HealthStatus: model.Healthy, + }, + }) - // Set to exact same endpoints - s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", - []*model.IstioEndpoint{ - { - Address: "10.0.0.53", - EndpointPort: 53, - ServicePortName: "tcp-dns", - HealthStatus: model.Healthy, - }, - { - Address: "10.0.0.54", - EndpointPort: 53, - ServicePortName: "tcp-dns", - HealthStatus: model.Healthy, - }, - }) - // Validate that endpoint is not pushed. - validateEndpoints(false, []string{"10.0.0.53:53", "10.0.0.54:53"}, nil) + // Validate that endpoints are pushed. + if sendUnhealthy { + validateEndpoints(true, []string{"10.0.0.54:53"}, []string{"10.0.0.53:53"}) + } else { + validateEndpoints(true, []string{"10.0.0.54:53"}, nil) + } - // Now change the status of endpoint to UnHealthy and validate Eds is pushed. - s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", - []*model.IstioEndpoint{ - { - Address: "10.0.0.53", - EndpointPort: 53, - ServicePortName: "tcp-dns", - HealthStatus: model.UnHealthy, - }, - { - Address: "10.0.0.54", - EndpointPort: 53, - ServicePortName: "tcp-dns", - HealthStatus: model.Healthy, - }, - }) + // Change the status of endpoint to Healthy and validate Eds is pushed. + s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", + []*model.IstioEndpoint{ + { + Address: "10.0.0.53", + EndpointPort: 53, + ServicePortName: "tcp-dns", + HealthStatus: model.Healthy, + }, + { + Address: "10.0.0.54", + EndpointPort: 53, + ServicePortName: "tcp-dns", + HealthStatus: model.Healthy, + }, + }) - // Validate that endpoints are pushed. - validateEndpoints(true, []string{"10.0.0.54:53"}, []string{"10.0.0.53:53"}) + validateEndpoints(true, []string{"10.0.0.54:53", "10.0.0.53:53"}, nil) - // Change the status of endpoint to Healthy and validate Eds is pushed. - s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", - []*model.IstioEndpoint{ - { - Address: "10.0.0.53", - EndpointPort: 53, - ServicePortName: "tcp-dns", - HealthStatus: model.Healthy, - }, - { - Address: "10.0.0.54", - EndpointPort: 53, - ServicePortName: "tcp-dns", - HealthStatus: model.Healthy, - }, - }) + // Remove a healthy endpoint + s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", + []*model.IstioEndpoint{ + { + Address: "10.0.0.53", + EndpointPort: 53, + ServicePortName: "tcp-dns", + HealthStatus: model.Healthy, + }, + }) - validateEndpoints(true, []string{"10.0.0.54:53", "10.0.0.53:53"}, nil) + validateEndpoints(true, []string{"10.0.0.53:53"}, nil) - // Remove a healthy endpoint - s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", - []*model.IstioEndpoint{ - { - Address: "10.0.0.53", - EndpointPort: 53, - ServicePortName: "tcp-dns", - HealthStatus: model.Healthy, - }, + // Remove last healthy endpoint + s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", []*model.IstioEndpoint{}) + validateEndpoints(true, nil, nil) }) - - validateEndpoints(true, []string{"10.0.0.53:53"}, nil) - - // Remove last healthy endpoint - s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", []*model.IstioEndpoint{}) - validateEndpoints(true, nil, nil) + } } // Validates the behavior when Service resolution type is updated after initial EDS push. diff --git a/pilot/pkg/xds/endpoint_builder.go b/pilot/pkg/xds/endpoint_builder.go index 9179426881bf..6e93c9949107 100644 --- a/pilot/pkg/xds/endpoint_builder.go +++ b/pilot/pkg/xds/endpoint_builder.go @@ -291,6 +291,10 @@ func (b *EndpointBuilder) buildLocalityLbEndpointsFromShards( if ep.Address == "" && ep.Network == b.network { continue } + // Filter out unhealthy endpoints + if !features.SendUnhealthyEndpoints.Load() && ep.HealthStatus == model.UnHealthy { + continue + } // Draining endpoints are only sent to 'persistent session' clusters. draining := ep.HealthStatus == model.Draining || features.DrainingLabel != "" && ep.Labels[features.DrainingLabel] != "" diff --git a/pkg/dns/client/dns.go b/pkg/dns/client/dns.go index aaa562e2d179..449d1d98be89 100644 --- a/pkg/dns/client/dns.go +++ b/pkg/dns/client/dns.go @@ -507,7 +507,7 @@ func generateAltHosts(hostname string, nameinfo *dnsProto.NameTable_NameInfo, pr // If it is not part of the registry, return nil so that caller queries upstream. If it is part // of registry, we will look it up in one of our tables, failing which we will return NXDOMAIN. func (table *LookupTable) lookupHost(qtype uint16, hostname string) ([]dns.RR, bool) { - question := host.Name(hostname) + question := string(host.Name(hostname)) wildcard := false // First check if host exists in all hosts. hostFound := table.allHosts.Contains(hostname) @@ -531,16 +531,21 @@ func (table *LookupTable) lookupHost(qtype uint16, hostname string) ([]dns.RR, b } var out []dns.RR + var ipAnswers []dns.RR + var wcAnswers []dns.RR + var cnAnswers []dns.RR + // Odds are, the first query will always be an expanded hostname // (productpage.ns1.svc.cluster.local.ns1.svc.cluster.local) // So lookup the cname table first - cn := table.cname[hostname] - if len(cn) > 0 { + for _, cn := range table.cname[hostname] { // this was a cname match - hostname = cn[0].(*dns.CNAME).Target + copied := dns.Copy(cn).(*dns.CNAME) + copied.Header().Name = question + cnAnswers = append(cnAnswers, copied) + hostname = copied.Target } - var ipAnswers []dns.RR - var wcAnswers []dns.RR + switch qtype { case dns.TypeA: ipAnswers = table.name4[hostname] @@ -556,16 +561,23 @@ func (table *LookupTable) lookupHost(qtype uint16, hostname string) ([]dns.RR, b if wildcard { for _, answer := range ipAnswers { copied := dns.Copy(answer) - copied.Header().Name = string(question) + /// If there is a CNAME record for the wildcard host, we will sent a chained response of CNAME + A/AAAA pointer + /// Otherwhise we expand the wildcard to the original question domain + if len(cnAnswers) > 0 { + copied.Header().Name = hostname + } else { + copied.Header().Name = question + } wcAnswers = append(wcAnswers, copied) } } + // We will return a chained response. In a chained response, the first entry is the cname record, // and the second one is the A/AAAA record itself. Some clients do not follow cname redirects // with additional DNS queries. Instead, they expect all the resolved records to be in the same // big DNS response (presumably assuming that a recursive DNS query should do the deed, resolve // cname et al and return the composite response). - out = append(out, cn...) + out = append(out, cnAnswers...) if wildcard { out = append(out, wcAnswers...) } else { diff --git a/pkg/dns/client/dns_test.go b/pkg/dns/client/dns_test.go index 208e0aa60db9..cd3ac61408aa 100644 --- a/pkg/dns/client/dns_test.go +++ b/pkg/dns/client/dns_test.go @@ -181,6 +181,12 @@ func testDNS(t *testing.T, d *LocalDNSServer) { host: "a.b.wildcard.", expected: a("a.b.wildcard.", []netip.Addr{netip.MustParseAddr("11.11.11.11")}), }, + { + name: "success: wild card with with search namespace chained pointer correctly", + host: "foo.wildcard.ns1.svc.cluster.local.", + expected: append(cname("foo.wildcard.ns1.svc.cluster.local.", "*.wildcard."), + a("*.wildcard.", []netip.Addr{netip.MustParseAddr("10.10.10.10")})...), + }, { name: "success: wild card with domain returns A record correctly", host: "foo.svc.mesh.company.net.", @@ -194,8 +200,8 @@ func testDNS(t *testing.T, d *LocalDNSServer) { { name: "success: wild card with search domain returns A record correctly", host: "foo.svc.mesh.company.net.ns1.svc.cluster.local.", - expected: append(cname("*.svc.mesh.company.net.ns1.svc.cluster.local.", "*.svc.mesh.company.net."), - a("foo.svc.mesh.company.net.ns1.svc.cluster.local.", []netip.Addr{netip.MustParseAddr("10.1.2.3")})...), + expected: append(cname("foo.svc.mesh.company.net.ns1.svc.cluster.local.", "*.svc.mesh.company.net."), + a("*.svc.mesh.company.net.", []netip.Addr{netip.MustParseAddr("10.1.2.3")})...), }, { name: "success: TypeAAAA query returns AAAA records only", diff --git a/pkg/kube/multicluster/secretcontroller.go b/pkg/kube/multicluster/secretcontroller.go index da1364337e7b..c226885adc33 100644 --- a/pkg/kube/multicluster/secretcontroller.go +++ b/pkg/kube/multicluster/secretcontroller.go @@ -111,6 +111,7 @@ func NewController(kubeclientset kube.Client, namespace string, clusterID cluste } secrets := kclient.NewFiltered[*corev1.Secret](informerClient, kclient.Filter{ + Namespace: namespace, LabelSelector: MultiClusterSecretLabel + "=true", }) diff --git a/pkg/kube/multicluster/secretcontroller_test.go b/pkg/kube/multicluster/secretcontroller_test.go index 4003d4903299..338c55494549 100644 --- a/pkg/kube/multicluster/secretcontroller_test.go +++ b/pkg/kube/multicluster/secretcontroller_test.go @@ -41,11 +41,11 @@ type clusterCredential struct { kubeconfig []byte } -func makeSecret(secret string, clusterConfigs ...clusterCredential) *v1.Secret { +func makeSecret(namespace string, secret string, clusterConfigs ...clusterCredential) *v1.Secret { s := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secret, - Namespace: secretNamespace, + Namespace: namespace, Labels: map[string]string{ MultiClusterSecretLabel: "true", }, @@ -102,15 +102,24 @@ func Test_SecretController(t *testing.T) { clientset := kube.NewFakeClient() var ( - secret0 = makeSecret("s0", clusterCredential{"c0", []byte("kubeconfig0-0")}) - secret0UpdateKubeconfigChanged = makeSecret("s0", clusterCredential{"c0", []byte("kubeconfig0-1")}) - secret0UpdateKubeconfigSame = makeSecret("s0", clusterCredential{"c0", []byte("kubeconfig0-1")}) - secret0AddCluster = makeSecret("s0", clusterCredential{"c0", []byte("kubeconfig0-1")}, clusterCredential{"c0-1", []byte("kubeconfig0-2")}) - secret0DeleteCluster = secret0UpdateKubeconfigChanged // "c0-1" cluster deleted - secret0ReAddCluster = makeSecret("s0", clusterCredential{"c0", []byte("kubeconfig0-1")}, clusterCredential{"c0-1", []byte("kubeconfig0-2")}) - secret0ReDeleteCluster = secret0UpdateKubeconfigChanged // "c0-1" cluster re-deleted - secret1 = makeSecret("s1", clusterCredential{"c1", []byte("kubeconfig1-0")}) + secret0 = makeSecret(secretNamespace, "s0", + clusterCredential{"c0", []byte("kubeconfig0-0")}) + secret0UpdateKubeconfigChanged = makeSecret(secretNamespace, "s0", + clusterCredential{"c0", []byte("kubeconfig0-1")}) + secret0UpdateKubeconfigSame = makeSecret(secretNamespace, "s0", + clusterCredential{"c0", []byte("kubeconfig0-1")}) + secret0AddCluster = makeSecret(secretNamespace, "s0", + clusterCredential{"c0", []byte("kubeconfig0-1")}, clusterCredential{"c0-1", []byte("kubeconfig0-2")}) + secret0DeleteCluster = secret0UpdateKubeconfigChanged // "c0-1" cluster deleted + secret0ReAddCluster = makeSecret(secretNamespace, "s0", + clusterCredential{"c0", []byte("kubeconfig0-1")}, clusterCredential{"c0-1", []byte("kubeconfig0-2")}) + secret0ReDeleteCluster = secret0UpdateKubeconfigChanged // "c0-1" cluster re-deleted + secret1 = makeSecret(secretNamespace, "s1", + clusterCredential{"c1", []byte("kubeconfig1-0")}) + otherNSSecret = makeSecret("some-other-namespace", "s2", + clusterCredential{"c1", []byte("kubeconfig1-0")}) ) + secret0UpdateKubeconfigSame.Annotations = map[string]string{"foo": "bar"} steps := []struct { @@ -178,6 +187,10 @@ func Test_SecretController(t *testing.T) { delete: secret1, wantDeleted: "c1", }, + { + name: "Add secret from another namespace", + add: otherNSSecret, + }, } // Start the secret controller and sleep to allow secret process to start. @@ -200,13 +213,13 @@ func Test_SecretController(t *testing.T) { switch { case step.add != nil: - _, err := clientset.Kube().CoreV1().Secrets(secretNamespace).Create(context.TODO(), step.add, metav1.CreateOptions{}) + _, err := clientset.Kube().CoreV1().Secrets(step.add.Namespace).Create(context.TODO(), step.add, metav1.CreateOptions{}) g.Expect(err).Should(BeNil()) case step.update != nil: - _, err := clientset.Kube().CoreV1().Secrets(secretNamespace).Update(context.TODO(), step.update, metav1.UpdateOptions{}) + _, err := clientset.Kube().CoreV1().Secrets(step.update.Namespace).Update(context.TODO(), step.update, metav1.UpdateOptions{}) g.Expect(err).Should(BeNil()) case step.delete != nil: - g.Expect(clientset.Kube().CoreV1().Secrets(secretNamespace).Delete(context.TODO(), step.delete.Name, metav1.DeleteOptions{})). + g.Expect(clientset.Kube().CoreV1().Secrets(step.delete.Namespace).Delete(context.TODO(), step.delete.Name, metav1.DeleteOptions{})). Should(Succeed()) } diff --git a/prow/release-commit.sh b/prow/release-commit.sh index 5296bdfbc81c..8ae827c1a64f 100755 --- a/prow/release-commit.sh +++ b/prow/release-commit.sh @@ -36,7 +36,7 @@ docker run --rm --privileged "${DOCKER_HUB}/qemu-user-static" --reset -p yes export ISTIO_DOCKER_QEMU=true # Use a pinned version in case breaking changes are needed -BUILDER_SHA=a458471e80de1fd22c0b270a8b2a8b91f37b3eab +BUILDER_SHA=8ba9ecb578e3a9b0f8b84aff03b6dab3c4889b64 # Reference to the next minor version of Istio # This will create a version like 1.4-alpha.sha diff --git a/releasenotes/notes/46719.yaml b/releasenotes/notes/46719.yaml new file mode 100644 index 000000000000..fc437ffd4331 --- /dev/null +++ b/releasenotes/notes/46719.yaml @@ -0,0 +1,9 @@ +apiVersion: release-notes/v2 +kind: bug-fix +area: traffic-management +issue: + - 46719 +releaseNotes: + - | + **Added** gated flag `ISTIO_ENABLE_IPV4_OUTBOUND_LISTENER_FOR_IPV6_CLUSTERS` to manage an additional outbound listener for IPv6-only clusters to deal with IPv4 NAT outbound traffic. + This is useful for IPv6-only cluster environments such as EKS which manages both egress-only IPv4 as well as IPv6 IPs. diff --git a/releasenotes/notes/47148.yaml b/releasenotes/notes/47148.yaml new file mode 100644 index 000000000000..37ce08ffb2bf --- /dev/null +++ b/releasenotes/notes/47148.yaml @@ -0,0 +1,8 @@ +apiVersion: release-notes/v2 +kind: bug-fix +area: traffic-management +issue: + - 47148 +releaseNotes: + - | + **Fixed** An issue where multiple header matches in root virtual service generates incorrect routes. \ No newline at end of file diff --git a/releasenotes/notes/47290.yaml b/releasenotes/notes/47290.yaml new file mode 100644 index 000000000000..762e7c04335f --- /dev/null +++ b/releasenotes/notes/47290.yaml @@ -0,0 +1,13 @@ +apiVersion: release-notes/v2 +kind: bug-fix +area: traffic-management +issue: + - 47290 + - 47264 + - 31250 + - 33360 + - 30531 + - 38484 +releaseNotes: +- | + **Fixed** DNS Proxy resolution for wildcard ServiceEntry with the search domain suffix for glibc based containers. diff --git a/releasenotes/notes/47412.yaml b/releasenotes/notes/47412.yaml new file mode 100644 index 000000000000..f4ba6511e22c --- /dev/null +++ b/releasenotes/notes/47412.yaml @@ -0,0 +1,9 @@ +apiVersion: release-notes/v2 +kind: bug-fix +area: traffic-management +issue: + - 47412 +releaseNotes: + - | + **Fixed** an issue where using a Sidecar resource using IstioIngressListener.defaultEndpoint cannot use [::1]:PORT if the default IP addressing is not IPv6. + diff --git a/releasenotes/notes/47515.yaml b/releasenotes/notes/47515.yaml new file mode 100644 index 000000000000..c711dc1e4975 --- /dev/null +++ b/releasenotes/notes/47515.yaml @@ -0,0 +1,8 @@ +apiVersion: release-notes/v2 +kind: bug-fix +area: istioctl +issue: +- 47505 +releaseNotes: +- | + **Fixed** an issue where `istioctl proxy-config` fails to process a config dump from file if EDS endpoints were not provided. diff --git a/releasenotes/notes/47705.yaml b/releasenotes/notes/47705.yaml new file mode 100644 index 000000000000..2547db1f677a --- /dev/null +++ b/releasenotes/notes/47705.yaml @@ -0,0 +1,8 @@ +apiVersion: release-notes/v2 +kind: bug-fix +area: istioctl +issue: +- 47696 +releaseNotes: +- | + **Fixed** an issue where `istioctl tag list` command didn't accept `--output` flag. diff --git a/releasenotes/notes/fix-multicluster-secret-filtering.yaml b/releasenotes/notes/fix-multicluster-secret-filtering.yaml new file mode 100644 index 000000000000..044001e309bf --- /dev/null +++ b/releasenotes/notes/fix-multicluster-secret-filtering.yaml @@ -0,0 +1,10 @@ +apiVersion: release-notes/v2 +kind: bug-fix +area: traffic-management + +issue: + - https://github.com/istio/istio/issues/47433 + +releaseNotes: +- | + **Fixed** Fixed multicluster secret filtering causing Istio to pick up secrets from every namespace. diff --git a/releasenotes/notes/header-present.yaml b/releasenotes/notes/header-present.yaml new file mode 100644 index 000000000000..b695410194bc --- /dev/null +++ b/releasenotes/notes/header-present.yaml @@ -0,0 +1,8 @@ +apiVersion: release-notes/v2 +kind: bug-fix +area: traffic-management +issue: + - 47341 +releaseNotes: +- | + **Fixed** VirtualService http header present match does not work with `header-name: {}` set. diff --git a/releasenotes/notes/iptables-lock.yaml b/releasenotes/notes/iptables-lock.yaml new file mode 100644 index 000000000000..7faf986c7ff0 --- /dev/null +++ b/releasenotes/notes/iptables-lock.yaml @@ -0,0 +1,6 @@ +apiVersion: release-notes/v2 +kind: bug-fix +area: traffic-management +releaseNotes: + - | + **Improved** `iptables` locking. The new implementation uses `iptables` builtin lock waiting when needed, and disables locking entirely when not needed. diff --git a/releasenotes/notes/terminating-headless.yaml b/releasenotes/notes/terminating-headless.yaml new file mode 100644 index 000000000000..e7bee2b5d28c --- /dev/null +++ b/releasenotes/notes/terminating-headless.yaml @@ -0,0 +1,8 @@ +apiVersion: release-notes/v2 +kind: bug-fix +area: traffic-management +issue: + - 47348 +releaseNotes: + - | + **Fixed** an issue causing traffic to terminating headless service instances to not function correctly. diff --git a/samples/open-telemetry/tracing/README.md b/samples/open-telemetry/tracing/README.md index 66b37ca3e041..fe82bb44b716 100644 --- a/samples/open-telemetry/tracing/README.md +++ b/samples/open-telemetry/tracing/README.md @@ -12,7 +12,8 @@ kubectl -n apply -f ../otel.yaml In this example, we use `otel-collector` as the namespace to deploy the `otel-collector` backend: -```ba +```bash +kubectl create namespace otel-collector kubectl -n otel-collector apply -f ../otel.yaml ``` @@ -66,7 +67,7 @@ You may also choose any existing tracing system if you have, and you should chan You may also choose to use your own otel collector if you have, and the key part is to have the `otlp` grpc protocol receiver to receive the traces. One important thing is to make sure your otel collector service's grpc port starts with `grpc-` prefix, which is like: -```ya +```yaml spec: ports: - name: grpc-otlp @@ -102,8 +103,15 @@ Make sure the service name matches the one you deployed if you select a differen Next, add a Telemetry resource that tells Istio to send trace records to the OpenTelemetry collector. -```yaml -kubectl -n otel-collector apply -f ./telemetry.yaml +```bash +kubectl -n apply -f ./telemetry.yaml +``` + +In this example, we deploy it to the default namespace, which is where the sample apps +from the [getting started](https://istio.io/latest/docs/setup/getting-started) are also deployed. + +```bash +kubectl apply -f ./telemetry.yaml ``` The core config is: diff --git a/tools/istio-clean-iptables/pkg/cmd/root.go b/tools/istio-clean-iptables/pkg/cmd/root.go index ade4f20693ab..e83bc011063b 100644 --- a/tools/istio-clean-iptables/pkg/cmd/root.go +++ b/tools/istio-clean-iptables/pkg/cmd/root.go @@ -105,46 +105,27 @@ func bindFlags(cmd *cobra.Command, args []string) { viper.AutomaticEnv() // Replace - with _; so that environment variables are looked up correctly. viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) - - if err := viper.BindPFlag(constants.DryRun, cmd.Flags().Lookup(constants.DryRun)); err != nil { - handleError(err) - } - viper.SetDefault(constants.DryRun, false) - - if err := viper.BindPFlag(constants.ProxyUID, cmd.Flags().Lookup(constants.ProxyUID)); err != nil { - handleError(err) - } - viper.SetDefault(constants.ProxyUID, "") - - if err := viper.BindPFlag(constants.ProxyGID, cmd.Flags().Lookup(constants.ProxyGID)); err != nil { - handleError(err) - } - viper.SetDefault(constants.ProxyGID, "") - - if err := viper.BindPFlag(constants.RedirectDNS, cmd.Flags().Lookup(constants.RedirectDNS)); err != nil { - handleError(err) - } - viper.SetDefault(constants.RedirectDNS, dnsCaptureByAgent) - - if err := viper.BindEnv(constants.OwnerGroupsInclude.Name); err != nil { - handleError(err) - } - viper.SetDefault(constants.OwnerGroupsInclude.Name, constants.OwnerGroupsInclude.DefaultValue) - - if err := viper.BindEnv(constants.OwnerGroupsExclude.Name); err != nil { - handleError(err) + bind := func(name string, def any) { + if err := viper.BindPFlag(name, cmd.Flags().Lookup(name)); err != nil { + handleError(err) + } + viper.SetDefault(name, def) } - viper.SetDefault(constants.OwnerGroupsExclude.Name, constants.OwnerGroupsExclude.DefaultValue) - - if err := viper.BindEnv(constants.IstioInboundInterceptionMode.Name); err != nil { - handleError(err) + bindEnv := func(name string, def any) { + if err := viper.BindEnv(name); err != nil { + handleError(err) + } + viper.SetDefault(name, def) } - viper.SetDefault(constants.IstioInboundInterceptionMode.Name, constants.IstioInboundInterceptionMode.DefaultValue) - if err := viper.BindEnv(constants.IstioInboundTproxyMark.Name); err != nil { - handleError(err) - } - viper.SetDefault(constants.IstioInboundTproxyMark.Name, constants.IstioInboundTproxyMark.DefaultValue) + bind(constants.DryRun, false) + bind(constants.ProxyUID, "") + bind(constants.ProxyGID, "") + bind(constants.RedirectDNS, dnsCaptureByAgent) + bindEnv(constants.OwnerGroupsInclude.Name, constants.OwnerGroupsInclude.DefaultValue) + bindEnv(constants.OwnerGroupsExclude.Name, constants.OwnerGroupsExclude.DefaultValue) + bindEnv(constants.IstioInboundInterceptionMode.Name, constants.IstioInboundInterceptionMode.DefaultValue) + bindEnv(constants.IstioInboundTproxyMark.Name, constants.IstioInboundTproxyMark.DefaultValue) } // https://github.com/spf13/viper/issues/233. diff --git a/tools/istio-iptables/pkg/capture/run_linux.go b/tools/istio-iptables/pkg/capture/run_linux.go index a1f25bb6371d..384029bc7bec 100644 --- a/tools/istio-iptables/pkg/capture/run_linux.go +++ b/tools/istio-iptables/pkg/capture/run_linux.go @@ -16,7 +16,6 @@ package capture import ( "fmt" "net" - "os" "strconv" "github.com/containernetworking/plugins/pkg/ns" @@ -95,11 +94,6 @@ func ConfigureRoutes(cfg *config.Config, ext dep.Dependencies) error { return nil } if ext != nil && cfg.CNIMode { - if cfg.HostNSEnterExec { - command := os.Args[0] - return ext.Run(command, nil, constants.CommandConfigureRoutes) - } - nsContainer, err := ns.GetNS(cfg.NetworkNamespace) if err != nil { return err diff --git a/tools/istio-iptables/pkg/cmd/root.go b/tools/istio-iptables/pkg/cmd/root.go index ab3fbe2b6f49..1cfd2eb56bc0 100644 --- a/tools/istio-iptables/pkg/cmd/root.go +++ b/tools/istio-iptables/pkg/cmd/root.go @@ -67,10 +67,14 @@ var rootCmd = &cobra.Command{ if cfg.DryRun { ext = &dep.StdoutStubDependencies{} } else { + ipv, err := dep.DetectIptablesVersion(cfg.IPTablesVersion) + if err != nil { + handleErrorWithCode(err, 1) + } ext = &dep.RealDependencies{ CNIMode: cfg.CNIMode, - HostNSEnterExec: cfg.HostNSEnterExec, NetworkNamespace: cfg.NetworkNamespace, + IptablesVersion: ipv, } } @@ -105,23 +109,6 @@ Otherwise, this pod will need to be manually removed so that it is scheduled on }, } -var configureRoutesCommand = &cobra.Command{ - Use: "configure-routes", - Short: "Configures iproute2 rules for the Istio sidecar", - PreRun: bindFlags, - Run: func(cmd *cobra.Command, args []string) { - cfg := constructConfig() - if err := cfg.Validate(); err != nil { - handleErrorWithCode(err, 1) - } - if !cfg.SkipRuleApply { - if err := capture.ConfigureRoutes(cfg, nil); err != nil { - handleErrorWithCode(err, 1) - } - } - }, -} - func constructConfig() *config.Config { cfg := &config.Config{ DryRun: viper.GetBool(constants.DryRun), @@ -155,7 +142,6 @@ func constructConfig() *config.Config { NetworkNamespace: viper.GetString(constants.NetworkNamespace), CNIMode: viper.GetBool(constants.CNIMode), DualStack: viper.GetBool(constants.DualStack), - HostNSEnterExec: viper.GetBool(constants.HostNSEnterExec), } // TODO: Make this more configurable, maybe with an allowlist of users to be captured for output instead of a denylist. @@ -252,169 +238,51 @@ func bindFlags(cmd *cobra.Command, args []string) { // Replace - with _; so that environment variables are looked up correctly. viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) - envoyPort := "15001" - inboundPort := "15006" - inboundTunnelPort := "15008" - - if err := viper.BindPFlag(constants.EnvoyPort, cmd.Flags().Lookup(constants.EnvoyPort)); err != nil { - handleError(err) - } - viper.SetDefault(constants.EnvoyPort, envoyPort) - - if err := viper.BindPFlag(constants.InboundCapturePort, cmd.Flags().Lookup(constants.InboundCapturePort)); err != nil { - handleError(err) - } - viper.SetDefault(constants.InboundCapturePort, inboundPort) - - if err := viper.BindPFlag(constants.InboundTunnelPort, cmd.Flags().Lookup(constants.InboundTunnelPort)); err != nil { - handleError(err) - } - viper.SetDefault(constants.InboundTunnelPort, inboundTunnelPort) - - if err := viper.BindPFlag(constants.ProxyUID, cmd.Flags().Lookup(constants.ProxyUID)); err != nil { - handleError(err) - } - viper.SetDefault(constants.ProxyUID, "") - - if err := viper.BindPFlag(constants.ProxyGID, cmd.Flags().Lookup(constants.ProxyGID)); err != nil { - handleError(err) - } - viper.SetDefault(constants.ProxyGID, "") - - if err := viper.BindPFlag(constants.InboundInterceptionMode, cmd.Flags().Lookup(constants.InboundInterceptionMode)); err != nil { - handleError(err) - } - viper.SetDefault(constants.InboundInterceptionMode, "") - - if err := viper.BindPFlag(constants.InboundPorts, cmd.Flags().Lookup(constants.InboundPorts)); err != nil { - handleError(err) - } - viper.SetDefault(constants.InboundPorts, "") - - if err := viper.BindPFlag(constants.LocalExcludePorts, cmd.Flags().Lookup(constants.LocalExcludePorts)); err != nil { - handleError(err) - } - viper.SetDefault(constants.LocalExcludePorts, "") - - if err := viper.BindPFlag(constants.ExcludeInterfaces, cmd.Flags().Lookup(constants.ExcludeInterfaces)); err != nil { - handleError(err) - } - viper.SetDefault(constants.ExcludeInterfaces, "") - - if err := viper.BindPFlag(constants.ServiceCidr, cmd.Flags().Lookup(constants.ServiceCidr)); err != nil { - handleError(err) - } - viper.SetDefault(constants.ServiceCidr, "") - - if err := viper.BindPFlag(constants.ServiceExcludeCidr, cmd.Flags().Lookup(constants.ServiceExcludeCidr)); err != nil { - handleError(err) - } - viper.SetDefault(constants.ServiceExcludeCidr, "") - - if err := viper.BindEnv(constants.OwnerGroupsInclude.Name); err != nil { - handleError(err) - } - viper.SetDefault(constants.OwnerGroupsInclude.Name, constants.OwnerGroupsInclude.DefaultValue) - - if err := viper.BindEnv(constants.OwnerGroupsExclude.Name); err != nil { - handleError(err) - } - viper.SetDefault(constants.OwnerGroupsExclude.Name, constants.OwnerGroupsExclude.DefaultValue) - - if err := viper.BindPFlag(constants.OutboundPorts, cmd.Flags().Lookup(constants.OutboundPorts)); err != nil { - handleError(err) - } - viper.SetDefault(constants.OutboundPorts, "") - - if err := viper.BindPFlag(constants.LocalOutboundPortsExclude, cmd.Flags().Lookup(constants.LocalOutboundPortsExclude)); err != nil { - handleError(err) - } - viper.SetDefault(constants.LocalOutboundPortsExclude, "") - - if err := viper.BindPFlag(constants.KubeVirtInterfaces, cmd.Flags().Lookup(constants.KubeVirtInterfaces)); err != nil { - handleError(err) - } - viper.SetDefault(constants.KubeVirtInterfaces, "") - - if err := viper.BindPFlag(constants.InboundTProxyMark, cmd.Flags().Lookup(constants.InboundTProxyMark)); err != nil { - handleError(err) - } - viper.SetDefault(constants.InboundTProxyMark, "1337") - - if err := viper.BindPFlag(constants.InboundTProxyRouteTable, cmd.Flags().Lookup(constants.InboundTProxyRouteTable)); err != nil { - handleError(err) - } - viper.SetDefault(constants.InboundTProxyRouteTable, "133") - - if err := viper.BindPFlag(constants.DryRun, cmd.Flags().Lookup(constants.DryRun)); err != nil { - handleError(err) - } - viper.SetDefault(constants.DryRun, false) - - if err := viper.BindPFlag(constants.TraceLogging, cmd.Flags().Lookup(constants.TraceLogging)); err != nil { - handleError(err) - } - viper.SetDefault(constants.TraceLogging, false) - - if err := viper.BindPFlag(constants.RestoreFormat, cmd.Flags().Lookup(constants.RestoreFormat)); err != nil { - handleError(err) - } - viper.SetDefault(constants.RestoreFormat, true) - - if err := viper.BindPFlag(constants.IptablesProbePort, cmd.Flags().Lookup(constants.IptablesProbePort)); err != nil { - handleError(err) - } - viper.SetDefault(constants.IptablesProbePort, constants.DefaultIptablesProbePort) - - if err := viper.BindPFlag(constants.ProbeTimeout, cmd.Flags().Lookup(constants.ProbeTimeout)); err != nil { - handleError(err) - } - viper.SetDefault(constants.ProbeTimeout, constants.DefaultProbeTimeout) - - if err := viper.BindPFlag(constants.SkipRuleApply, cmd.Flags().Lookup(constants.SkipRuleApply)); err != nil { - handleError(err) - } - viper.SetDefault(constants.SkipRuleApply, false) - - if err := viper.BindPFlag(constants.RunValidation, cmd.Flags().Lookup(constants.RunValidation)); err != nil { - handleError(err) - } - viper.SetDefault(constants.RunValidation, false) - - if err := viper.BindPFlag(constants.RedirectDNS, cmd.Flags().Lookup(constants.RedirectDNS)); err != nil { - handleError(err) - } - viper.SetDefault(constants.RedirectDNS, dnsCaptureByAgent) - - if err := viper.BindPFlag(constants.DropInvalid, cmd.Flags().Lookup(constants.DropInvalid)); err != nil { - handleError(err) - } - viper.SetDefault(constants.DropInvalid, InvalidDropByIptables) - - if err := viper.BindPFlag(constants.CaptureAllDNS, cmd.Flags().Lookup(constants.CaptureAllDNS)); err != nil { - handleError(err) - } - viper.SetDefault(constants.CaptureAllDNS, false) - - if err := viper.BindPFlag(constants.NetworkNamespace, cmd.Flags().Lookup(constants.NetworkNamespace)); err != nil { - handleError(err) - } - viper.SetDefault(constants.NetworkNamespace, "") - - if err := viper.BindPFlag(constants.CNIMode, cmd.Flags().Lookup(constants.CNIMode)); err != nil { - handleError(err) - } - viper.SetDefault(constants.CNIMode, false) - - if err := viper.BindPFlag(constants.HostNSEnterExec, cmd.Flags().Lookup(constants.HostNSEnterExec)); err != nil { - handleError(err) - } - viper.SetDefault(constants.HostNSEnterExec, false) - - if err := viper.BindPFlag(constants.DualStack, cmd.Flags().Lookup(constants.DualStack)); err != nil { - handleError(err) + bind := func(name string, def any) { + if err := viper.BindPFlag(name, cmd.Flags().Lookup(name)); err != nil { + handleError(err) + } + viper.SetDefault(name, def) } - viper.SetDefault(constants.DualStack, DualStack) + bindEnv := func(name string, def any) { + if err := viper.BindEnv(name); err != nil { + handleError(err) + } + viper.SetDefault(name, def) + } + + bind(constants.EnvoyPort, "15001") + bind(constants.InboundCapturePort, "15006") + bind(constants.InboundTunnelPort, "15008") + bind(constants.ProxyUID, "") + bind(constants.ProxyGID, "") + bind(constants.InboundInterceptionMode, "") + bind(constants.InboundPorts, "") + bind(constants.LocalExcludePorts, "") + bind(constants.ExcludeInterfaces, "") + bind(constants.ServiceCidr, "") + bind(constants.ServiceExcludeCidr, "") + bindEnv(constants.OwnerGroupsInclude.Name, constants.OwnerGroupsInclude.DefaultValue) + bindEnv(constants.OwnerGroupsExclude.Name, constants.OwnerGroupsExclude.DefaultValue) + bind(constants.OutboundPorts, "") + bind(constants.LocalOutboundPortsExclude, "") + bind(constants.KubeVirtInterfaces, "") + bind(constants.InboundTProxyMark, "1337") + bind(constants.InboundTProxyRouteTable, "133") + bind(constants.DryRun, false) + bind(constants.TraceLogging, false) + bind(constants.RestoreFormat, true) + bind(constants.IptablesProbePort, constants.DefaultIptablesProbePort) + bind(constants.ProbeTimeout, constants.DefaultProbeTimeout) + bind(constants.SkipRuleApply, false) + bind(constants.RunValidation, false) + bind(constants.RedirectDNS, dnsCaptureByAgent) + bind(constants.DropInvalid, InvalidDropByIptables) + bind(constants.CaptureAllDNS, false) + bind(constants.NetworkNamespace, "") + bind(constants.CNIMode, false) + bind(constants.IptablesVersion, "") + bind(constants.DualStack, DualStack) } // https://github.com/spf13/viper/issues/233. @@ -422,7 +290,6 @@ func bindFlags(cmd *cobra.Command, args []string) { // Otherwise, the flag with the same name shared across subcommands will be overwritten by the last. func init() { bindCmdlineFlags(rootCmd) - bindCmdlineFlags(configureRoutesCommand) } func bindCmdlineFlags(rootCmd *cobra.Command) { @@ -502,19 +369,9 @@ func bindCmdlineFlags(rootCmd *cobra.Command) { rootCmd.Flags().Bool(constants.CNIMode, false, "Whether to run as CNI plugin.") - rootCmd.Flags().Bool(constants.HostNSEnterExec, false, "Instead of using the internal go netns, use the nsenter command for switching network namespaces.") + rootCmd.Flags().String(constants.IptablesVersion, "", "version of iptables command. If not set, this is automatically detected.") } func GetCommand() *cobra.Command { return rootCmd } - -func GetRouteCommand() *cobra.Command { - return configureRoutesCommand -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - handleError(err) - } -} diff --git a/tools/istio-iptables/pkg/config/config.go b/tools/istio-iptables/pkg/config/config.go index 190d9990015e..8e1f057d408f 100644 --- a/tools/istio-iptables/pkg/config/config.go +++ b/tools/istio-iptables/pkg/config/config.go @@ -59,7 +59,7 @@ type Config struct { DNSServersV6 []string `json:"DNS_SERVERS_V6"` NetworkNamespace string `json:"NETWORK_NAMESPACE"` CNIMode bool `json:"CNI_MODE"` - HostNSEnterExec bool `json:"HOST_NSENTER_EXEC"` + IPTablesVersion string `json:"IPTABLES_VERSION"` TraceLogging bool `json:"IPTABLES_TRACE_LOGGING"` DualStack bool `json:"DUAL_STACK"` } @@ -74,6 +74,7 @@ func (c *Config) String() string { func (c *Config) Print() { var b strings.Builder + b.WriteString(fmt.Sprintf("IPTABLES_VERSION=%s\n", c.IPTablesVersion)) b.WriteString(fmt.Sprintf("PROXY_PORT=%s\n", c.ProxyPort)) b.WriteString(fmt.Sprintf("PROXY_INBOUND_CAPTURE_PORT=%s\n", c.InboundCapturePort)) b.WriteString(fmt.Sprintf("PROXY_TUNNEL_PORT=%s\n", c.InboundTunnelPort)) @@ -99,7 +100,6 @@ func (c *Config) Print() { b.WriteString(fmt.Sprintf("DNS_SERVERS=%s,%s\n", c.DNSServersV4, c.DNSServersV6)) b.WriteString(fmt.Sprintf("NETWORK_NAMESPACE=%s\n", c.NetworkNamespace)) b.WriteString(fmt.Sprintf("CNI_MODE=%s\n", strconv.FormatBool(c.CNIMode))) - b.WriteString(fmt.Sprintf("HOST_NSENTER_EXEC=%s\n", strconv.FormatBool(c.HostNSEnterExec))) b.WriteString(fmt.Sprintf("EXCLUDE_INTERFACES=%s\n", c.ExcludeInterfaces)) log.Infof("Istio iptables variables:\n%s", b.String()) } diff --git a/tools/istio-iptables/pkg/constants/constants.go b/tools/istio-iptables/pkg/constants/constants.go index c1394cfc6faf..848536f016a5 100644 --- a/tools/istio-iptables/pkg/constants/constants.go +++ b/tools/istio-iptables/pkg/constants/constants.go @@ -96,7 +96,6 @@ const ( KubeVirtInterfaces = "kube-virt-interfaces" DryRun = "dry-run" TraceLogging = "iptables-trace-logging" - Clean = "clean" RestoreFormat = "restore-format" SkipRuleApply = "skip-rule-apply" RunValidation = "run-validation" @@ -108,7 +107,7 @@ const ( CaptureAllDNS = "capture-all-dns" NetworkNamespace = "network-namespace" CNIMode = "cni-mode" - HostNSEnterExec = "host-nsenter-exec" + IptablesVersion = "iptables-version" ) // Environment variables that deliberately have no equivalent command-line flags. @@ -140,8 +139,7 @@ const ( // Constants used in environment variables const ( - DisableRedirectionOnLocalLoopback = "DISABLE_REDIRECTION_ON_LOCAL_LOOPBACK" - EnvoyUser = "ENVOY_USER" + EnvoyUser = "ENVOY_USER" ) // Constants for iptables commands @@ -152,7 +150,6 @@ const ( IP6TABLES = "ip6tables" IP6TABLESRESTORE = "ip6tables-restore" IP6TABLESSAVE = "ip6tables-save" - NSENTER = "nsenter" ) // Constants for syscall @@ -175,7 +172,3 @@ const ( const ( IstioAgentDNSListenerPort = "15053" ) - -const ( - CommandConfigureRoutes = "configure-routes" -) diff --git a/tools/istio-iptables/pkg/dependencies/implementation.go b/tools/istio-iptables/pkg/dependencies/implementation.go index 28b0ec82f0c8..5b496a4cd951 100644 --- a/tools/istio-iptables/pkg/dependencies/implementation.go +++ b/tools/istio-iptables/pkg/dependencies/implementation.go @@ -15,13 +15,15 @@ package dependencies import ( - "bytes" "fmt" "io" "os" "os/exec" + "regexp" "strings" + utilversion "k8s.io/apimachinery/pkg/util/version" + "istio.io/istio/pkg/log" "istio.io/istio/pkg/util/sets" "istio.io/istio/tools/istio-iptables/pkg/constants" @@ -61,13 +63,60 @@ var XTablesCmds = sets.New( constants.IP6TABLESSAVE, ) +// XTablesWriteCmds contains all xtables commands that do write actions (and thus need a lock) +var XTablesWriteCmds = sets.New( + constants.IPTABLES, + constants.IP6TABLES, + constants.IPTABLESRESTORE, + constants.IP6TABLESRESTORE, +) + // RealDependencies implementation of interface Dependencies, which is used in production type RealDependencies struct { + IptablesVersion IptablesVersion NetworkNamespace string - HostNSEnterExec bool CNIMode bool } +const iptablesVersionPattern = `v([0-9]+(\.[0-9]+)+)` + +type IptablesVersion struct { + // the actual version + version *utilversion.Version + // true if legacy mode, false if nf_tables + legacy bool +} + +// NoLocks returns true if this version does not use or support locks +func (v IptablesVersion) NoLocks() bool { + // nf_tables does not use locks + // legacy added locks in 1.6.2 + return !v.legacy || v.version.LessThan(IptablesRestoreLocking) +} + +func DetectIptablesVersion(ver string) (IptablesVersion, error) { + if ver == "" { + var err error + verb, err := exec.Command("iptables", "--version").CombinedOutput() + if err != nil { + return IptablesVersion{}, err + } + ver = string(verb) + } + // Legacy will have no marking or 'legacy', so just look for nf_tables + nft := strings.Contains(ver, "nf_tables") + versionMatcher := regexp.MustCompile(iptablesVersionPattern) + match := versionMatcher.FindStringSubmatch(ver) + if match == nil { + return IptablesVersion{}, fmt.Errorf("no iptables version found: %q", ver) + } + version, err := utilversion.ParseGeneric(match[1]) + if err != nil { + return IptablesVersion{}, fmt.Errorf("iptables version %q is not a valid version string: %v", match[1], err) + } + return IptablesVersion{version: version, legacy: !nft}, nil +} + // transformToXTablesErrorMessage returns an updated error message with explicit xtables error hints, if applicable. func transformToXTablesErrorMessage(stderr string, err error) string { ee, ok := err.(*exec.ExitError) @@ -96,37 +145,6 @@ func transformToXTablesErrorMessage(stderr string, err error) string { return stderr } -func isXTablesLockError(stderr *bytes.Buffer, exitcode int) bool { - // xtables lock acquire failure maps to resource problem exit code. - // https://git.netfilter.org/iptables/tree/iptables/iptables.c?h=v1.6.0#n1769 - if exitcode != int(XTablesResourceProblem) { - return false - } - - // check stderr output and see if there is `xtables lock` in it. - // https://git.netfilter.org/iptables/tree/iptables/iptables.c?h=v1.6.0#n1763 - if strings.Contains(stderr.String(), "xtables lock") { - return true - } - - return false -} - -func exitCode(err error) (int, bool) { - if err == nil { - return 0, false - } - - ee, ok := err.(*exec.ExitError) - if !ok { - // Not common, but can happen if file not found error, etc - return 0, false - } - - exitcode := ee.ExitCode() - return exitcode, true -} - // RunOrFail runs a command and exits with an error message, if it fails func (r *RealDependencies) RunOrFail(cmd string, stdin io.ReadSeeker, args ...string) { var err error diff --git a/tools/istio-iptables/pkg/dependencies/implementation_linux.go b/tools/istio-iptables/pkg/dependencies/implementation_linux.go index 7967c52b1e1e..c224cd689a29 100644 --- a/tools/istio-iptables/pkg/dependencies/implementation_linux.go +++ b/tools/istio-iptables/pkg/dependencies/implementation_linux.go @@ -16,27 +16,21 @@ package dependencies import ( "bytes" - "context" "fmt" "io" "os/exec" "strings" - "time" + "syscall" "github.com/containernetworking/plugins/pkg/ns" "github.com/spf13/viper" + "golang.org/x/sys/unix" + utilversion "k8s.io/apimachinery/pkg/util/version" - "istio.io/istio/pkg/backoff" "istio.io/istio/pkg/log" - "istio.io/istio/tools/istio-iptables/pkg/constants" ) func (r *RealDependencies) execute(cmd string, ignoreErrors bool, stdin io.Reader, args ...string) error { - if r.CNIMode && r.HostNSEnterExec { - originalCmd := cmd - cmd = constants.NSENTER - args = append([]string{fmt.Sprintf("--net=%v", r.NetworkNamespace), "--", originalCmd}, args...) - } log.Infof("Running command: %s %s", cmd, strings.Join(args, " ")) externalCommand := exec.Command(cmd, args...) @@ -55,22 +49,7 @@ func (r *RealDependencies) execute(cmd string, ignoreErrors bool, stdin io.Reade } externalCommand.Env = append(externalCommand.Env, fmt.Sprintf("%s=%v", strings.ToUpper(repl.Replace(k)), v)) } - var err error - var nsContainer ns.NetNS - if r.CNIMode && !r.HostNSEnterExec { - nsContainer, err = ns.GetNS(r.NetworkNamespace) - if err != nil { - return err - } - - err = nsContainer.Do(func(ns.NetNS) error { - return externalCommand.Run() - }) - nsContainer.Close() - } else { - err = externalCommand.Run() - } - + err := r.runCommand(externalCommand) if len(stdout.String()) != 0 { log.Infof("Command output: \n%v", stdout.String()) } @@ -82,72 +61,73 @@ func (r *RealDependencies) execute(cmd string, ignoreErrors bool, stdin io.Reade return err } -func (r *RealDependencies) executeXTables(cmd string, ignoreErrors bool, stdin io.ReadSeeker, args ...string) error { - if r.CNIMode && r.HostNSEnterExec { - originalCmd := cmd - cmd = constants.NSENTER - args = append([]string{fmt.Sprintf("--net=%v", r.NetworkNamespace), "--", originalCmd}, args...) - } - log.Infof("Running command: %s %s", cmd, strings.Join(args, " ")) - - var stdout, stderr *bytes.Buffer - var err error - var nsContainer ns.NetNS - - if r.CNIMode && !r.HostNSEnterExec { - nsContainer, err = ns.GetNS(r.NetworkNamespace) +func (r *RealDependencies) runCommand(c *exec.Cmd) error { + if r.CNIMode { + n, err := ns.GetNS(r.NetworkNamespace) if err != nil { return err } - defer nsContainer.Close() - } - o := backoff.Option{ - InitialInterval: 100 * time.Millisecond, - MaxInterval: 2 * time.Second, + defer n.Close() + + return n.Do(func(ns.NetNS) error { + return c.Run() + }) } - b := backoff.NewExponentialBackOff(o) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - backoffError := b.RetryWithContext(ctx, func() error { - externalCommand := exec.Command(cmd, args...) - stdout = &bytes.Buffer{} - stderr = &bytes.Buffer{} - externalCommand.Stdout = stdout - externalCommand.Stderr = stderr - externalCommand.Stdin = stdin - if stdin != nil { - if _, err := stdin.Seek(0, io.SeekStart); err != nil { - return err + return c.Run() +} + +var ( + // IptablesRestoreLocking is the version where locking and -w is added to iptables-restore + IptablesRestoreLocking = utilversion.MustParseGeneric("1.6.2") + // IptablesLockfileEnv is the version where XTABLES_LOCKFILE is added to iptables. + IptablesLockfileEnv = utilversion.MustParseGeneric("1.8.6") +) + +func (r *RealDependencies) executeXTables(cmd string, ignoreErrors bool, stdin io.ReadSeeker, args ...string) error { + mode := "without lock" + var c *exec.Cmd + _, needLock := XTablesWriteCmds[cmd] + if !needLock || r.IptablesVersion.NoLocks() { + // No locking supported/needed, just run as is. Nothing special + c = exec.Command(cmd, args...) + } else { + if r.CNIMode { + // In CNI, we are running the pod network namespace, but the host filesystem. Locking the host is both useless and harmful, + // as it opens the risk of lock contention with other node actors (such as kube-proxy), and isn't actually needed at all. + // In both cases we are setting the lockfile to `r.NetworkNamespace`. + // * /dev/null looks like a good option, but actually doesn't work (it will ensure only one actor can access it) + // * `mktemp` works, but it is slightly harder to deal with cleanup and in some platforms we may not have write access. + if r.IptablesVersion.version.LessThan(IptablesLockfileEnv) { + // Older iptables cannot turn off the lock explicitly, so we hack around it... + // Overwrite the lock file with /dev/null + // cmd is repeated twice as the first 'cmd' instance becomes $0 + args := append([]string{"-c", fmt.Sprintf("mount --bind %s /run/xtables.lock; exec $@", r.NetworkNamespace), cmd, cmd}, args...) + c = exec.Command("sh", args...) + // Run in a new mount namespace so our mount doesn't impact any other processes. + c.SysProcAttr = &syscall.SysProcAttr{Unshareflags: unix.CLONE_NEWNS} + mode = "without lock by mount" + } else { + // Available since iptables 1.8.6+, just point to a different file directly + c = exec.Command(cmd, args...) + c.Env = append(c.Env, "XTABLES_LOCKFILE="+r.NetworkNamespace) + mode = "without lock by environment" } - } - if r.CNIMode && !r.HostNSEnterExec { - err = nsContainer.Do(func(ns.NetNS) error { - return externalCommand.Run() - }) } else { - err = externalCommand.Run() - } - exitCode, ok := exitCode(err) - if !ok { - // cannot get exit code. consider this as non-retriable. - return nil - } - - if !isXTablesLockError(stderr, exitCode) { - // Command succeeded, or failed not because of xtables lock. - return nil + // We want the lock. Wait up to 30s for it. + args = append(args, "--wait=30") + c = exec.Command(cmd, args...) + log.Debugf("running with lock") + mode = "with wait lock" } - - // If command failed because xtables was locked, try the command again. - // Note we retry invoking iptables command explicitly instead of using the `-w` option of iptables, - // because as of iptables 1.6.x (version shipped with bionic), iptables-restore does not support `-w`. - log.Debugf("Failed to acquire XTables lock, retry iptables command..") - return err - }) - if backoffError != nil { - return fmt.Errorf("timed out trying to acquire XTables lock: %v", err) } + log.Infof("Running command (%s): %s %s", mode, cmd, strings.Join(args, " ")) + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + c.Stdout = stdout + c.Stderr = stderr + c.Stdin = stdin + err := r.runCommand(c) if len(stdout.String()) != 0 { log.Infof("Command output: \n%v", stdout.String()) } diff --git a/tools/istio-iptables/pkg/dependencies/implementation_test.go b/tools/istio-iptables/pkg/dependencies/implementation_test.go index b7ee00d0403b..53361a1a451f 100644 --- a/tools/istio-iptables/pkg/dependencies/implementation_test.go +++ b/tools/istio-iptables/pkg/dependencies/implementation_test.go @@ -15,48 +15,69 @@ package dependencies import ( - "bytes" "testing" + + utilversion "k8s.io/apimachinery/pkg/util/version" + + "istio.io/istio/pkg/test/util/assert" ) -func TestIsXTablesLockError(t *testing.T) { - xtableError := []struct { - name string - errMsg string - errCode XTablesExittype - want bool +func TestDetectIptablesVersion(t *testing.T) { + cases := []struct { + name string + ver string + want IptablesVersion }{ { - name: "no error", - errMsg: "", - errCode: 0, - want: false, + name: "jammy nft", + ver: "iptables v1.8.7 (nf_tables)", + want: IptablesVersion{version: utilversion.MustParseGeneric("1.8.7"), legacy: false}, + }, + { + name: "jammy legacy", + ver: "iptables v1.8.7 (legacy)", + + want: IptablesVersion{version: utilversion.MustParseGeneric("1.8.7"), legacy: true}, + }, + { + name: "xenial", + ver: "iptables v1.6.0", + + want: IptablesVersion{version: utilversion.MustParseGeneric("1.6.0"), legacy: true}, }, { - name: "error message mismatch", - errMsg: "some error", - errCode: XTablesResourceProblem, - want: false, + name: "bionic", + ver: "iptables v1.6.1", + + want: IptablesVersion{version: utilversion.MustParseGeneric("1.6.1"), legacy: true}, }, { - name: "error code mismatch", - errMsg: "some error", - errCode: XTablesOtherProblem, - want: false, + name: "centos 7", + ver: "iptables v1.4.21", + + want: IptablesVersion{version: utilversion.MustParseGeneric("1.4.21"), legacy: true}, }, { - name: "is xtables lock error", - errMsg: "Another app is currently holding the xtables lock.", - errCode: XTablesResourceProblem, - want: true, + name: "centos 8", + ver: "iptables v1.8.4 (nf_tables)", + + want: IptablesVersion{version: utilversion.MustParseGeneric("1.8.4"), legacy: false}, + }, + { + name: "alpine 3.18", + ver: "iptables v1.8.9 (legacy)", + + want: IptablesVersion{version: utilversion.MustParseGeneric("1.8.9"), legacy: true}, }, } - for _, tt := range xtableError { + for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - bs := bytes.NewBufferString(tt.errMsg) - if got, want := isXTablesLockError(bs, int(tt.errCode)), tt.want; got != want { - t.Errorf("xtables lock error got %v, want %v", got, want) + got, err := DetectIptablesVersion(tt.ver) + if err != nil { + t.Fatal(err) } + assert.Equal(t, got.version.String(), tt.want.version.String()) + assert.Equal(t, got.legacy, tt.want.legacy) }) } }