From 667b3a2857bc530e37ea9246492ebd9995ce7ed3 Mon Sep 17 00:00:00 2001 From: Pasan Weerasinghe Date: Fri, 23 Aug 2024 14:14:56 -0700 Subject: [PATCH] Respect HTTP proxies when rendering Guardian policy --- .../clusterconnection_controller.go | 122 ++++++- .../clusterconnection_controller_test.go | 323 +++++++++++++++++- .../crd.projectcalico.org_bgpfilters.yaml | 52 +++ ...ojectcalico.org_globalnetworkpolicies.yaml | 16 +- ...crd.projectcalico.org_networkpolicies.yaml | 16 +- .../calico/crd.projectcalico.org_tiers.yaml | 54 +++ .../crd.projectcalico.org_bgpfilters.yaml | 52 +++ pkg/render/guardian.go | 37 +- pkg/url/url.go | 43 +++ test/utils.go | 77 +++++ 10 files changed, 753 insertions(+), 39 deletions(-) create mode 100644 pkg/crds/calico/crd.projectcalico.org_tiers.yaml diff --git a/pkg/controller/clusterconnection/clusterconnection_controller.go b/pkg/controller/clusterconnection/clusterconnection_controller.go index 283cbe75f5..cd3d22cd70 100644 --- a/pkg/controller/clusterconnection/clusterconnection_controller.go +++ b/pkg/controller/clusterconnection/clusterconnection_controller.go @@ -19,6 +19,12 @@ import ( "fmt" "net" + "github.com/tigera/operator/pkg/url" + "golang.org/x/net/http/httpproxy" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -166,6 +172,10 @@ func add(mgr manager.Manager, c ctrlruntime.Controller) error { return fmt.Errorf("%s failed to watch ImageSet: %w", controllerName, err) } + if err := utils.AddDeploymentWatch(c, render.GuardianDeploymentName, render.GuardianNamespace); err != nil { + return fmt.Errorf("%s failed to watch Guardian deployment: %w", controllerName, err) + } + // Watch for changes to TigeraStatus. if err = utils.AddTigeraStatusWatch(c, ResourceName); err != nil { return fmt.Errorf("clusterconnection-controller failed to watch management-cluster-connection Tigerastatus: %w", err) @@ -179,12 +189,14 @@ var _ reconcile.Reconciler = &ReconcileConnection{} // ReconcileConnection reconciles a ManagementClusterConnection object type ReconcileConnection struct { - Client client.Client - Scheme *runtime.Scheme - Provider operatorv1.Provider - status status.StatusManager - clusterDomain string - tierWatchReady *utils.ReadyFlag + Client client.Client + Scheme *runtime.Scheme + Provider operatorv1.Provider + status status.StatusManager + clusterDomain string + tierWatchReady *utils.ReadyFlag + resolvedProxy *httpproxy.Config + lastAvailabilityTransition metav1.Time } // Reconcile reads that state of the cluster for a ManagementClusterConnection object and makes changes based on the @@ -323,6 +335,67 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R trustedCertBundle.AddCertificates(secret) } + // Determine the current deployment availability. + var currentAvailabilityTransition metav1.Time + var currentlyAvailable bool + guardianDeployment := v1.Deployment{} + err = r.Client.Get(ctx, client.ObjectKey{Name: render.GuardianDeploymentName, Namespace: render.GuardianNamespace}, &guardianDeployment) + if err != nil && !k8serrors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to read the deployment status of Guardian", err, reqLogger) + return reconcile.Result{}, nil + } else if err == nil { + for _, condition := range guardianDeployment.Status.Conditions { + if condition.Type == v1.DeploymentAvailable { + currentAvailabilityTransition = condition.LastTransitionTime + if condition.Status == corev1.ConditionTrue { + currentlyAvailable = true + } + break + } + } + } + + // If the deployment availability has changed and is currently available, we update the resolved proxy configuration. + // We only update the resolved proxy configuration in this scenario (rather than every reconcile) to limit the number + // of pod queries we make. + if !currentAvailabilityTransition.Equal(&r.lastAvailabilityTransition) && currentlyAvailable { + // Query guardian pods. + labelSelector := labels.SelectorFromSet(map[string]string{ + "app.kubernetes.io/name": render.GuardianDeploymentName, + }) + pods := corev1.PodList{} + err := r.Client.List(ctx, &pods, &client.ListOptions{ + LabelSelector: labelSelector, + Namespace: render.GuardianNamespace, + }) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to list the pods of the Guardian deployment", err, reqLogger) + return reconcile.Result{}, nil + } + + // Parse the pod spec to resolve the proxy config. + var proxyConfig *httpproxy.Config + for _, pod := range pods.Items { + for _, container := range pod.Spec.Containers { + if container.Name == render.GuardianDeploymentName { + proxyConfig = &httpproxy.Config{} + for _, env := range container.Env { + switch env.Name { + case "https_proxy", "HTTPS_PROXY": + proxyConfig.HTTPSProxy = env.Value + case "no_proxy", "NO_PROXY": + proxyConfig.NoProxy = env.Value + } + } + break + } + } + } + + r.resolvedProxy = proxyConfig + } + r.lastAvailabilityTransition = currentAvailabilityTransition + // Validate that the tier watch is ready before querying the tier to ensure we utilize the cache. if !r.tierWatchReady.IsReady() { r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Tier watch to be established", nil, reqLogger) @@ -345,7 +418,7 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R // The Tier has been created, which means that this controller's reconciliation should no longer be a dependency // of the License being deployed. If NetworkPolicy requires license features, it should now be safe to validate // License presence and sufficiency. - if networkPolicyRequiresEgressAccessControl(managementClusterConnection, log) { + if networkPolicyRequiresEgressAccessControl(managementClusterConnection.Spec.ManagementClusterAddr, r.resolvedProxy, log) { license, err := utils.FetchLicenseKey(ctx, r.Client) if err != nil { if k8serrors.IsNotFound(err) { @@ -366,6 +439,7 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R ch := utils.NewComponentHandler(log, r.Client, r.Scheme, managementClusterConnection) guardianCfg := &render.GuardianConfiguration{ URL: managementClusterConnection.Spec.ManagementClusterAddr, + HTTPProxyConfig: r.resolvedProxy, TunnelCAType: managementClusterConnection.Spec.TLS.CA, PullSecrets: pullSecrets, OpenShift: r.Provider.IsOpenShift(), @@ -417,23 +491,43 @@ func fillDefaults(mcc *operatorv1.ManagementClusterConnection) { } } -func networkPolicyRequiresEgressAccessControl(connection *operatorv1.ManagementClusterConnection, log logr.Logger) bool { - if clusterAddrHasDomain, err := managementClusterAddrHasDomain(connection); err == nil && clusterAddrHasDomain { - return true - } else { +func networkPolicyRequiresEgressAccessControl(target string, httpProxyConfig *httpproxy.Config, log logr.Logger) bool { + var destinationHostPort string + if httpProxyConfig != nil && httpProxyConfig.HTTPSProxy != "" { + // HTTPS proxy is specified as a URL. + proxyHostPort, err := url.ParseHostPortFromHTTPProxyString(httpProxyConfig.HTTPSProxy) if err != nil { log.Error(err, fmt.Sprintf( - "Failed to parse ManagementClusterAddr. Assuming %s does not require license feature %s", + "Failed to parse HTTP Proxy URL (%s). Assuming %s does not require license feature %s", + httpProxyConfig.HTTPSProxy, render.GuardianPolicyName, common.EgressAccessControlFeature, )) + return false } + + destinationHostPort = proxyHostPort + } else { + // Target is already specified as host:port. + destinationHostPort = target + } + + // Determine if the host in the host:port is a domain name. + hostPortHasDomain, err := hostPortUsesDomainName(destinationHostPort) + if err != nil { + log.Error(err, fmt.Sprintf( + "Failed to parse resolved host:port (%s) for remote tunnel endpoint. Assuming %s does not require license feature %s", + destinationHostPort, + render.GuardianPolicyName, + common.EgressAccessControlFeature, + )) return false } + return hostPortHasDomain } -func managementClusterAddrHasDomain(connection *operatorv1.ManagementClusterConnection) (bool, error) { - host, _, err := net.SplitHostPort(connection.Spec.ManagementClusterAddr) +func hostPortUsesDomainName(hostPort string) (bool, error) { + host, _, err := net.SplitHostPort(hostPort) if err != nil { return false, err } diff --git a/pkg/controller/clusterconnection/clusterconnection_controller_test.go b/pkg/controller/clusterconnection/clusterconnection_controller_test.go index f7974d92a1..c656caecb5 100644 --- a/pkg/controller/clusterconnection/clusterconnection_controller_test.go +++ b/pkg/controller/clusterconnection/clusterconnection_controller_test.go @@ -17,11 +17,19 @@ package clusterconnection_test import ( "context" "fmt" + "net" + "net/url" + "strconv" + "strings" + "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/stretchr/testify/mock" + "github.com/tigera/operator/pkg/ptr" + "github.com/tigera/operator/pkg/render/common/networkpolicy" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" appsv1 "k8s.io/api/apps/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -52,9 +60,10 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { var ctx context.Context var cfg *operatorv1.ManagementClusterConnection var r reconcile.Reconciler - var scheme *runtime.Scheme + var clientScheme *runtime.Scheme var dpl *appsv1.Deployment var mockStatus *status.MockStatus + var objTrackerWithCalls test.ObjectTrackerWithCalls notReady := &utils.ReadyFlag{} ready := &utils.ReadyFlag{} @@ -62,13 +71,14 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { BeforeEach(func() { // Create a Kubernetes client. - scheme = runtime.NewScheme() - Expect(apis.AddToScheme(scheme)).ShouldNot(HaveOccurred()) - Expect(appsv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) - Expect(rbacv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) - err := operatorv1.SchemeBuilder.AddToScheme(scheme) + clientScheme = runtime.NewScheme() + Expect(apis.AddToScheme(clientScheme)).ShouldNot(HaveOccurred()) + Expect(appsv1.SchemeBuilder.AddToScheme(clientScheme)).ShouldNot(HaveOccurred()) + Expect(rbacv1.SchemeBuilder.AddToScheme(clientScheme)).ShouldNot(HaveOccurred()) + err := operatorv1.SchemeBuilder.AddToScheme(clientScheme) Expect(err).NotTo(HaveOccurred()) - c = ctrlrfake.DefaultFakeClientBuilder(scheme).Build() + objTrackerWithCalls = test.NewObjectTrackerWithCalls(clientScheme) + c = ctrlrfake.DefaultFakeClientBuilder(clientScheme).WithObjectTracker(&objTrackerWithCalls).Build() ctx = context.Background() mockStatus = &status.MockStatus{} mockStatus.On("Run").Return() @@ -84,7 +94,7 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { Expect(c.Create(ctx, &operatorv1.Monitor{ ObjectMeta: metav1.ObjectMeta{Name: "tigera-secure"}, })) - r = clusterconnection.NewReconcilerWithShims(c, scheme, mockStatus, operatorv1.ProviderNone, ready) + r = clusterconnection.NewReconcilerWithShims(c, clientScheme, mockStatus, operatorv1.ProviderNone, ready) dpl = &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, ObjectMeta: metav1.ObjectMeta{ @@ -162,7 +172,7 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { Context("image reconciliation", func() { It("should use builtin images", func() { - r = clusterconnection.NewReconcilerWithShims(c, scheme, mockStatus, operatorv1.ProviderNone, ready) + r = clusterconnection.NewReconcilerWithShims(c, clientScheme, mockStatus, operatorv1.ProviderNone, ready) _, err := r.Reconcile(ctx, reconcile.Request{}) Expect(err).ShouldNot(HaveOccurred()) @@ -193,7 +203,7 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { }, })).ToNot(HaveOccurred()) - r = clusterconnection.NewReconcilerWithShims(c, scheme, mockStatus, operatorv1.ProviderNone, ready) + r = clusterconnection.NewReconcilerWithShims(c, clientScheme, mockStatus, operatorv1.ProviderNone, ready) _, err := r.Reconcile(ctx, reconcile.Request{}) Expect(err).ShouldNot(HaveOccurred()) @@ -229,7 +239,7 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { } Expect(c.Create(ctx, licenseKey)).NotTo(HaveOccurred()) Expect(c.Create(ctx, &v3.Tier{ObjectMeta: metav1.ObjectMeta{Name: "allow-tigera"}})).NotTo(HaveOccurred()) - r = clusterconnection.NewReconcilerWithShims(c, scheme, mockStatus, operatorv1.ProviderNone, ready) + r = clusterconnection.NewReconcilerWithShims(c, clientScheme, mockStatus, operatorv1.ProviderNone, ready) }) Context("IP-based management cluster address", func() { @@ -261,7 +271,7 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { mockStatus.On("OnCRFound").Return() mockStatus.On("SetMetaData", mock.Anything).Return() - r = clusterconnection.NewReconcilerWithShims(c, scheme, mockStatus, operatorv1.ProviderNone, notReady) + r = clusterconnection.NewReconcilerWithShims(c, clientScheme, mockStatus, operatorv1.ProviderNone, notReady) test.ExpectWaitForTierWatch(ctx, r, mockStatus) policies := v3.NetworkPolicyList{} @@ -298,7 +308,7 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { mockStatus.On("SetDegraded", operatorv1.ResourceReadError, "Feature is not active - License does not support feature: egress-access-control", mock.Anything, mock.Anything).Return() mockStatus.On("SetMetaData", mock.Anything).Return() - r = clusterconnection.NewReconcilerWithShims(c, scheme, mockStatus, operatorv1.ProviderNone, ready) + r = clusterconnection.NewReconcilerWithShims(c, clientScheme, mockStatus, operatorv1.ProviderNone, ready) _, err := r.Reconcile(ctx, reconcile.Request{}) Expect(err).ShouldNot(HaveOccurred()) mockStatus.AssertExpectations(GinkgoT()) @@ -310,7 +320,7 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { mockStatus.On("OnCRFound").Return() mockStatus.On("SetMetaData", mock.Anything).Return() - r = clusterconnection.NewReconcilerWithShims(c, scheme, mockStatus, operatorv1.ProviderNone, notReady) + r = clusterconnection.NewReconcilerWithShims(c, clientScheme, mockStatus, operatorv1.ProviderNone, notReady) test.ExpectWaitForTierWatch(ctx, r, mockStatus) policies := v3.NetworkPolicyList{} @@ -338,7 +348,141 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { Expect(policies.Items).To(HaveLen(0)) }) }) + + Context("Proxy detection", func() { + // Generate test cases based on the combinations of proxy address forms and proxy settings. + // Here we specify the base targets along with the base proxy IP, domain, and port that will be used for generation. + testCases := generateCases("voltron.io:9000", "192.168.1.2:9000", "proxy.io", "10.1.2.3", "8080") + + for _, testCase := range testCases { + Describe(fmt.Sprintf("Proxy detection when %+v", testCase), func() { + // Set up the test based on the test case. + BeforeEach(func() { + pod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tigera-guardian", + Namespace: "tigera-guardian", + Labels: map[string]string{ + "k8s-app": "tigera-guardian", + "app.kubernetes.io/name": "tigera-guardian", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Name: "tigera-guardian", + Env: []v1.EnvVar{}, + }}, + }, + } + + // Set the env vars. + httpsProxyVarName := "HTTPS_PROXY" + noProxyVarName := "NO_PROXY" + if testCase.lowercase { + httpsProxyVarName = strings.ToLower(httpsProxyVarName) + noProxyVarName = strings.ToLower(noProxyVarName) + } + if testCase.httpsProxy != nil { + pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, v1.EnvVar{ + Name: httpsProxyVarName, + Value: *testCase.httpsProxy, + }) + } + if testCase.noProxy != nil { + pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, v1.EnvVar{ + Name: noProxyVarName, + Value: *testCase.noProxy, + }) + } + + err := c.Create(ctx, &pod) + Expect(err).NotTo(HaveOccurred()) + + // Set the target + cfg.Spec.ManagementClusterAddr = testCase.target + err = c.Update(ctx, cfg) + Expect(err).NotTo(HaveOccurred()) + }) + + It(fmt.Sprintf("detects proxy correctly when %+v", testCase), func() { + // First reconcile creates the guardian deployment without any availability condition. + _, err := r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + + // Validate that we made no calls to get Pods at this stage. + podGVR := schema.GroupVersionResource{ + Version: "v1", + Resource: "pods", + } + Expect(objTrackerWithCalls.CallCount(podGVR, test.ObjectTrackerCallList)).To(BeZero()) + + // Set the deployment to be unavailable. We need to recreate the deployment otherwise the status update is ignored. + gd := appsv1.Deployment{} + err = c.Get(ctx, client.ObjectKey{Name: "tigera-guardian", Namespace: "tigera-guardian"}, &gd) + Expect(err).NotTo(HaveOccurred()) + err = c.Delete(ctx, &gd) + Expect(err).NotTo(HaveOccurred()) + gd.ResourceVersion = "" + gd.Status.Conditions = []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + Status: v1.ConditionFalse, + LastTransitionTime: metav1.Time{Time: time.Now()}, + }} + err = c.Create(ctx, &gd) + Expect(err).NotTo(HaveOccurred()) + + // Reconcile again. We should see no calls since the deployment has not transitioned to available. + _, err = r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(objTrackerWithCalls.CallCount(podGVR, test.ObjectTrackerCallList)).To(Equal(0)) + + // Set the deployment to available. + err = c.Delete(ctx, &gd) + Expect(err).NotTo(HaveOccurred()) + gd.ResourceVersion = "" + gd.Status.Conditions = []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + Status: v1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: time.Now().Add(time.Minute)}, + }} + err = c.Create(ctx, &gd) + Expect(err).NotTo(HaveOccurred()) + + // Reconcile again. The proxy detection logic should kick in since the guardian deployment is ready. + _, err = r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(objTrackerWithCalls.CallCount(podGVR, test.ObjectTrackerCallList)).To(Equal(1)) + + // Resolve the rendered rule that governs egress from guardian to voltron. + policies := v3.NetworkPolicyList{} + Expect(c.List(ctx, &policies)).ToNot(HaveOccurred()) + Expect(policies.Items).To(HaveLen(2)) + Expect(policies.Items[1].Name).To(Equal("allow-tigera.guardian-access")) + policy := policies.Items[1] + Expect(policy.Spec.Egress).To(HaveLen(7)) + managementClusterEgressRule := policy.Spec.Egress[5] + + // Generate the expectation based on the test case, and compare the rendered rule to our expectation. + expected := getExpectedProxyPolicyFromCase(testCase) + if expected.hostIsIP { + Expect(managementClusterEgressRule.Destination.Nets).To(HaveLen(1)) + Expect(managementClusterEgressRule.Destination.Nets[0]).To(Equal(fmt.Sprintf("%s/32", expected.host))) + Expect(managementClusterEgressRule.Destination.Ports).To(Equal(networkpolicy.Ports(expected.port))) + } else { + Expect(managementClusterEgressRule.Destination.Domains).To(Equal([]string{expected.host})) + Expect(managementClusterEgressRule.Destination.Ports).To(Equal(networkpolicy.Ports(expected.port))) + } + + // Reconcile again. Verify that we do not cause any additional query for pods now that we have resolved the proxy. + _, err = r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(objTrackerWithCalls.CallCount(podGVR, test.ObjectTrackerCallList)).To(Equal(1)) + }) + }) + } + }) }) + Context("Reconcile for Condition status", func() { generation := int64(2) It("should reconcile with empty tigerastatus conditions ", func() { @@ -513,3 +657,152 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { }) }) }) + +type proxyTestCase struct { + lowercase bool + target string + httpsProxy *string + noProxy *string +} + +type expectedProxyPolicy struct { + host string + port uint16 + hostIsIP bool + isProxied bool +} + +func generateCases(targetDomain, targetIP, proxyDomain, proxyIP, proxyPort string) []proxyTestCase { + var cases []proxyTestCase + // We will collect the cases by target type. Targets are in the form of ip:port or domain:port. + for _, target := range []string{targetDomain, targetIP} { + var casesByTargetType []proxyTestCase + // Generate the proxy strings. They can be http or https, use a domain or IP host, and can optionally specify a port. + var proxyStrings []*string + for _, scheme := range []string{"http", "https"} { + for _, host := range []string{proxyDomain, proxyIP} { + for _, port := range []string{"", proxyPort} { + proxyString := fmt.Sprintf("%s://%s", scheme, host) + if port != "" { + proxyString = fmt.Sprintf("%s:%s", proxyString, port) + } + proxyStrings = append(proxyStrings, ptr.ToPtr[string](proxyString)) + } + } + } + // Add base cases: proxy is unset, or an empty proxy is set. + proxyStrings = append(proxyStrings, nil, ptr.ToPtr[string]("")) + + // Generate the "no proxy" strings. They can either match or not match the target, can list one or many exemptions, + // and can optionally specify a port. + var noProxyStrings []*string + for _, matchesTarget := range []bool{true, false} { + noProxyContainsPort := []bool{false} + if matchesTarget { + noProxyContainsPort = append(noProxyContainsPort, true) + } + for _, containsPort := range noProxyContainsPort { + for _, multipleExemptions := range []bool{true, false} { + host, port, err := net.SplitHostPort(target) + Expect(err).NotTo(HaveOccurred()) + matchString := host + if containsPort { + matchString = fmt.Sprintf("%s:%s", matchString, port) + } + + var noProxyString string + if matchesTarget { + noProxyString = matchString + } else { + noProxyString = "nomatch.com" + } + + if multipleExemptions { + noProxyString = fmt.Sprintf("1.1.1.1,%s,nobueno.com", noProxyString) + } + + noProxyStrings = append(noProxyStrings, ptr.ToPtr[string](noProxyString)) + } + } + } + // Add base cases: no-proxy is unset, or an empty no-proxy is set. + noProxyStrings = append(noProxyStrings, nil, ptr.ToPtr[string]("")) + + // Create the cases based on the generated combinations of proxy strings. + // The env vars can be set as either lowercase or uppercase on the container, we express that possibility here. + for _, lowercase := range []bool{true, false} { + for _, proxyString := range proxyStrings { + for _, noProxyString := range noProxyStrings { + casesByTargetType = append(casesByTargetType, proxyTestCase{ + lowercase: lowercase, + target: target, + httpsProxy: proxyString, + noProxy: noProxyString, + }) + } + } + } + cases = append(cases, casesByTargetType...) + } + return cases +} + +func getExpectedProxyPolicyFromCase(c proxyTestCase) expectedProxyPolicy { + var isProxied bool + if c.httpsProxy != nil && *c.httpsProxy != "" { + if c.noProxy == nil || *c.noProxy == "" { + isProxied = true + } else { + var proxyIsExempt bool + for _, noProxySubstring := range strings.Split(*c.noProxy, ",") { + if strings.Contains(c.target, noProxySubstring) { + proxyIsExempt = true + break + } + } + if !proxyIsExempt { + isProxied = true + } + } + } + + var host string + var port uint16 + if isProxied { + proxyURL, err := url.ParseRequestURI(*c.httpsProxy) + Expect(err).NotTo(HaveOccurred()) + + // Resolve port + hostSplit := strings.Split(proxyURL.Host, ":") + switch { + case len(hostSplit) == 2: + port64, err := strconv.ParseUint(hostSplit[1], 10, 16) + Expect(err).NotTo(HaveOccurred()) + host = hostSplit[0] + port = uint16(port64) + case proxyURL.Scheme == "https": + host = proxyURL.Host + port = 443 + default: + host = proxyURL.Host + port = 80 + } + + Expect(err).NotTo(HaveOccurred()) + } else { + var portString string + var err error + host, portString, err = net.SplitHostPort(c.target) + Expect(err).NotTo(HaveOccurred()) + port64, err := strconv.ParseUint(portString, 10, 16) + Expect(err).NotTo(HaveOccurred()) + port = uint16(port64) + } + + return expectedProxyPolicy{ + host: host, + port: port, + isProxied: isProxied, + hostIsIP: net.ParseIP(host) != nil, + } +} diff --git a/pkg/crds/calico/crd.projectcalico.org_bgpfilters.yaml b/pkg/crds/calico/crd.projectcalico.org_bgpfilters.yaml index 49dc53f8e8..09db5e3eff 100644 --- a/pkg/crds/calico/crd.projectcalico.org_bgpfilters.yaml +++ b/pkg/crds/calico/crd.projectcalico.org_bgpfilters.yaml @@ -49,6 +49,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 32 + minimum: 0 + type: integer + min: + format: int32 + maximum: 32 + minimum: 0 + type: integer + type: object source: type: string required: @@ -70,6 +83,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 128 + minimum: 0 + type: integer + min: + format: int32 + maximum: 128 + minimum: 0 + type: integer + type: object source: type: string required: @@ -91,6 +117,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 32 + minimum: 0 + type: integer + min: + format: int32 + maximum: 32 + minimum: 0 + type: integer + type: object source: type: string required: @@ -112,6 +151,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 128 + minimum: 0 + type: integer + min: + format: int32 + maximum: 128 + minimum: 0 + type: integer + type: object source: type: string required: diff --git a/pkg/crds/calico/crd.projectcalico.org_globalnetworkpolicies.yaml b/pkg/crds/calico/crd.projectcalico.org_globalnetworkpolicies.yaml index bf4420ca1b..039119087d 100644 --- a/pkg/crds/calico/crd.projectcalico.org_globalnetworkpolicies.yaml +++ b/pkg/crds/calico/crd.projectcalico.org_globalnetworkpolicies.yaml @@ -794,10 +794,10 @@ spec: order: description: Order is an optional field that specifies the order in which the policy is applied. Policies with higher "order" are applied - after those with lower order. If the order is omitted, it may be - considered to be "infinite" - i.e. the policy will be applied last. Policies - with identical order will be applied in alphanumerical order based - on the Policy "Name". + after those with lower order within the same tier. If the order + is omitted, it may be considered to be "infinite" - i.e. the policy + will be applied last. Policies with identical order will be applied + in alphanumerical order based on the Policy "Name" within the tier. type: number performanceHints: description: "PerformanceHints contains a list of hints to Calico's @@ -839,6 +839,14 @@ spec: description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. type: string + tier: + description: The name of the tier that this policy belongs to. If + this is omitted, the default tier (name is "default") is assumed. The + specified tier must exist in order to create security policies within + the tier, the "default" tier is created automatically if it does + not exist, this means for deployments requiring only a single Tier, + the tier name may be omitted on all policy management requests. + type: string types: description: "Types indicates whether this policy applies to ingress, or to egress, or to both. When not explicitly specified (and so diff --git a/pkg/crds/calico/crd.projectcalico.org_networkpolicies.yaml b/pkg/crds/calico/crd.projectcalico.org_networkpolicies.yaml index 6a92754520..b2a4c07797 100644 --- a/pkg/crds/calico/crd.projectcalico.org_networkpolicies.yaml +++ b/pkg/crds/calico/crd.projectcalico.org_networkpolicies.yaml @@ -779,10 +779,10 @@ spec: order: description: Order is an optional field that specifies the order in which the policy is applied. Policies with higher "order" are applied - after those with lower order. If the order is omitted, it may be - considered to be "infinite" - i.e. the policy will be applied last. Policies - with identical order will be applied in alphanumerical order based - on the Policy "Name". + after those with lower order within the same tier. If the order + is omitted, it may be considered to be "infinite" - i.e. the policy + will be applied last. Policies with identical order will be applied + in alphanumerical order based on the Policy "Name" within the tier. type: number performanceHints: description: "PerformanceHints contains a list of hints to Calico's @@ -820,6 +820,14 @@ spec: description: ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. type: string + tier: + description: The name of the tier that this policy belongs to. If + this is omitted, the default tier (name is "default") is assumed. The + specified tier must exist in order to create security policies within + the tier, the "default" tier is created automatically if it does + not exist, this means for deployments requiring only a single Tier, + the tier name may be omitted on all policy management requests. + type: string types: description: "Types indicates whether this policy applies to ingress, or to egress, or to both. When not explicitly specified (and so diff --git a/pkg/crds/calico/crd.projectcalico.org_tiers.yaml b/pkg/crds/calico/crd.projectcalico.org_tiers.yaml new file mode 100644 index 0000000000..323e092467 --- /dev/null +++ b/pkg/crds/calico/crd.projectcalico.org_tiers.yaml @@ -0,0 +1,54 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: tiers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: Tier + listKind: TierList + plural: tiers + singular: tier + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TierSpec contains the specification for a security policy + tier resource. + properties: + order: + description: Order is an optional field that specifies the order in + which the tier is applied. Tiers with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the tier will be applied last. Tiers + with identical order will be applied in alphanumerical order based + on the Tier "Name". + type: number + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/pkg/crds/enterprise/crd.projectcalico.org_bgpfilters.yaml b/pkg/crds/enterprise/crd.projectcalico.org_bgpfilters.yaml index 49dc53f8e8..09db5e3eff 100644 --- a/pkg/crds/enterprise/crd.projectcalico.org_bgpfilters.yaml +++ b/pkg/crds/enterprise/crd.projectcalico.org_bgpfilters.yaml @@ -49,6 +49,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 32 + minimum: 0 + type: integer + min: + format: int32 + maximum: 32 + minimum: 0 + type: integer + type: object source: type: string required: @@ -70,6 +83,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 128 + minimum: 0 + type: integer + min: + format: int32 + maximum: 128 + minimum: 0 + type: integer + type: object source: type: string required: @@ -91,6 +117,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 32 + minimum: 0 + type: integer + min: + format: int32 + maximum: 32 + minimum: 0 + type: integer + type: object source: type: string required: @@ -112,6 +151,19 @@ spec: type: string matchOperator: type: string + prefixLength: + properties: + max: + format: int32 + maximum: 128 + minimum: 0 + type: integer + min: + format: int32 + maximum: 128 + minimum: 0 + type: integer + type: object source: type: string required: diff --git a/pkg/render/guardian.go b/pkg/render/guardian.go index 0a1d1d6a4e..d108acdb09 100644 --- a/pkg/render/guardian.go +++ b/pkg/render/guardian.go @@ -18,6 +18,10 @@ package render import ( "net" + "net/url" + + operatorurl "github.com/tigera/operator/pkg/url" + "golang.org/x/net/http/httpproxy" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -82,6 +86,7 @@ func GuardianPolicy(cfg *GuardianConfiguration) (Component, error) { // GuardianConfiguration contains all the config information needed to render the component. type GuardianConfiguration struct { URL string + HTTPProxyConfig *httpproxy.Config PullSecrets []*corev1.Secret OpenShift bool Installation *operatorv1.InstallationSpec @@ -378,8 +383,36 @@ func guardianAllowTigeraPolicy(cfg *GuardianConfiguration) (*v3.NetworkPolicy, e }, }...) - // Assumes address has the form "host:port", required by net.Dial for TCP. - host, port, err := net.SplitHostPort(cfg.URL) + var proxyURL *url.URL + var err error + if cfg.HTTPProxyConfig != nil && cfg.HTTPProxyConfig.HTTPSProxy != "" { + targetURL := &url.URL{ + // The scheme should be HTTPS, as we are establishing an mTLS session with the target. + Scheme: "https", + + // We expect `target` to be of the form host:port. + Host: cfg.URL, + } + + proxyURL, err = cfg.HTTPProxyConfig.ProxyFunc()(targetURL) + if err != nil { + return nil, err + } + } + + var tunnelDestinationHostPort string + if proxyURL != nil { + proxyHostPort, err := operatorurl.ParseHostPortFromHTTPProxyURL(proxyURL) + if err != nil { + return nil, err + } + tunnelDestinationHostPort = proxyHostPort + } else { + // cfg.URL has host:port form + tunnelDestinationHostPort = cfg.URL + } + + host, port, err := net.SplitHostPort(tunnelDestinationHostPort) if err != nil { return nil, err } diff --git a/pkg/url/url.go b/pkg/url/url.go index 8bec1a5851..126f2f8373 100644 --- a/pkg/url/url.go +++ b/pkg/url/url.go @@ -1,7 +1,22 @@ +// Copyright (c) 2021-2024 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package url import ( "fmt" + "net" "net/url" "strings" ) @@ -18,3 +33,31 @@ func ParseEndpoint(endpoint string) (string, string, string, error) { } return url.Scheme, splits[0], splits[1], nil } + +func ParseHostPortFromHTTPProxyString(proxyURL string) (string, error) { + parsedProxyURL, err := url.ParseRequestURI(proxyURL) + if err != nil { + return "", err + } + + return ParseHostPortFromHTTPProxyURL(parsedProxyURL) +} + +func ParseHostPortFromHTTPProxyURL(url *url.URL) (string, error) { + parsedScheme := url.Scheme + if parsedScheme == "" || (parsedScheme != "http" && parsedScheme != "https") { + return "", fmt.Errorf("unexpected scheme for HTTP proxy URL: %s", parsedScheme) + } + + if url.Port() != "" { + // Host is already in host:port form. + return url.Host, nil + } + + // Scheme is either http or https at this point. + if url.Scheme == "http" { + return net.JoinHostPort(url.Host, "80"), nil + } else { + return net.JoinHostPort(url.Host, "443"), nil + } +} diff --git a/test/utils.go b/test/utils.go index 086f158be2..84e114de25 100644 --- a/test/utils.go +++ b/test/utils.go @@ -23,6 +23,9 @@ import ( "time" "github.com/stretchr/testify/mock" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/testing" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -250,3 +253,77 @@ func ExpectWaitForWatch(ctx context.Context, r reconcile.Reconciler, mockStatus Expect(err).ShouldNot(HaveOccurred()) mockStatus.AssertExpectations(GinkgoT()) } + +type ObjectTrackerCall string + +const ( + ObjectTrackerCallGet ObjectTrackerCall = "get" + ObjectTrackerCallCreate ObjectTrackerCall = "create" + ObjectTrackerCallUpdate ObjectTrackerCall = "update" + ObjectTrackerCallList ObjectTrackerCall = "list" + ObjectTrackerCallDelete ObjectTrackerCall = "delete" + ObjectTrackerCallWatch ObjectTrackerCall = "watch" +) + +func NewObjectTrackerWithCalls(clientScheme testing.ObjectScheme) ObjectTrackerWithCalls { + return ObjectTrackerWithCalls{ + ObjectTracker: testing.NewObjectTracker(clientScheme, scheme.Codecs.UniversalDecoder()), + callsByGVR: make(map[schema.GroupVersionResource]map[ObjectTrackerCall]int), + } +} + +// ObjectTrackerWithCalls wraps the default implementation of testing.ObjectTracker to track the calls made. +type ObjectTrackerWithCalls struct { + testing.ObjectTracker + callsByGVR map[schema.GroupVersionResource]map[ObjectTrackerCall]int +} + +func (o *ObjectTrackerWithCalls) Add(obj runtime.Object) error { + return o.ObjectTracker.Add(obj) +} + +func (o *ObjectTrackerWithCalls) inc(gvr schema.GroupVersionResource, call ObjectTrackerCall) { + if o.callsByGVR == nil { + o.callsByGVR = make(map[schema.GroupVersionResource]map[ObjectTrackerCall]int) + } + + if o.callsByGVR[gvr] == nil { + o.callsByGVR[gvr] = make(map[ObjectTrackerCall]int) + } + + o.callsByGVR[gvr][call]++ +} + +func (o *ObjectTrackerWithCalls) CallCount(gvr schema.GroupVersionResource, call ObjectTrackerCall) int { + return o.callsByGVR[gvr][call] +} + +func (o *ObjectTrackerWithCalls) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { + o.inc(gvr, ObjectTrackerCallGet) + return o.ObjectTracker.Get(gvr, ns, name) +} + +func (o *ObjectTrackerWithCalls) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + o.inc(gvr, ObjectTrackerCallCreate) + return o.ObjectTracker.Create(gvr, obj, ns) +} + +func (o *ObjectTrackerWithCalls) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + o.inc(gvr, ObjectTrackerCallUpdate) + return o.ObjectTracker.Update(gvr, obj, ns) +} + +func (o *ObjectTrackerWithCalls) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { + o.inc(gvr, ObjectTrackerCallList) + return o.ObjectTracker.List(gvr, gvk, ns) +} + +func (o *ObjectTrackerWithCalls) Delete(gvr schema.GroupVersionResource, ns, name string) error { + o.inc(gvr, ObjectTrackerCallDelete) + return o.ObjectTracker.Delete(gvr, ns, name) +} + +func (o *ObjectTrackerWithCalls) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { + o.inc(gvr, ObjectTrackerCallWatch) + return o.ObjectTracker.Watch(gvr, ns) +}