Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Respect HTTP proxies when rendering Guardian policy #3475

Merged
merged 3 commits into from
Sep 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
140 changes: 126 additions & 14 deletions pkg/controller/clusterconnection/clusterconnection_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,12 @@ import (
"fmt"
"net"

"github.com/tigera/operator/pkg/url"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Needs formatting

"golang.org/x/net/http/httpproxy"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"

"github.com/go-logr/logr"

corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -166,6 +172,10 @@ func add(mgr manager.Manager, c ctrlruntime.Controller) error {
return fmt.Errorf("%s failed to watch ImageSet: %w", controllerName, err)
}

if err := utils.AddDeploymentWatch(c, render.GuardianDeploymentName, render.GuardianNamespace); err != nil {
return fmt.Errorf("%s failed to watch Guardian deployment: %w", controllerName, err)
}

// Watch for changes to TigeraStatus.
if err = utils.AddTigeraStatusWatch(c, ResourceName); err != nil {
return fmt.Errorf("clusterconnection-controller failed to watch management-cluster-connection Tigerastatus: %w", err)
Expand All @@ -179,12 +189,14 @@ var _ reconcile.Reconciler = &ReconcileConnection{}

// ReconcileConnection reconciles a ManagementClusterConnection object
type ReconcileConnection struct {
Client client.Client
Scheme *runtime.Scheme
Provider operatorv1.Provider
status status.StatusManager
clusterDomain string
tierWatchReady *utils.ReadyFlag
Client client.Client
Scheme *runtime.Scheme
Provider operatorv1.Provider
status status.StatusManager
clusterDomain string
tierWatchReady *utils.ReadyFlag
resolvedPodProxies []*httpproxy.Config
lastAvailabilityTransition metav1.Time
}

// Reconcile reads that state of the cluster for a ManagementClusterConnection object and makes changes based on the
Expand Down Expand Up @@ -323,6 +335,75 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R
trustedCertBundle.AddCertificates(secret)
}

// Determine the current deployment availability.
var currentAvailabilityTransition metav1.Time
var currentlyAvailable bool
guardianDeployment := v1.Deployment{}
err = r.Client.Get(ctx, client.ObjectKey{Name: render.GuardianDeploymentName, Namespace: render.GuardianNamespace}, &guardianDeployment)
if err != nil && !k8serrors.IsNotFound(err) {
r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to read the deployment status of Guardian", err, reqLogger)
return reconcile.Result{}, nil
} else if err == nil {
for _, condition := range guardianDeployment.Status.Conditions {
if condition.Type == v1.DeploymentAvailable {
currentAvailabilityTransition = condition.LastTransitionTime
if condition.Status == corev1.ConditionTrue {
currentlyAvailable = true
}
break
}
}
}

// Resolve the proxies used by each Guardian pod. We only update the resolved proxies if the availability of the
// Guardian deployment has changed since our last reconcile and the deployment is currently available. We restrict
// the resolution of pod proxies in this way to limit the number of pod queries we make.
if !currentAvailabilityTransition.Equal(&r.lastAvailabilityTransition) && currentlyAvailable {
// Query guardian pods.
labelSelector := labels.SelectorFromSet(map[string]string{
"app.kubernetes.io/name": render.GuardianDeploymentName,
})
pods := corev1.PodList{}
err := r.Client.List(ctx, &pods, &client.ListOptions{
LabelSelector: labelSelector,
Namespace: render.GuardianNamespace,
})
if err != nil {
r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to list the pods of the Guardian deployment", err, reqLogger)
return reconcile.Result{}, nil
}

// Resolve the proxy config for each pod. Pods without a proxy will have a nil proxy config value.
var podProxies []*httpproxy.Config
for _, pod := range pods.Items {
Brian-McM marked this conversation as resolved.
Show resolved Hide resolved
for _, container := range pod.Spec.Containers {
if container.Name == render.GuardianDeploymentName {
var podProxyConfig *httpproxy.Config
var httpsProxy, noProxy string
for _, env := range container.Env {
switch env.Name {
case "https_proxy", "HTTPS_PROXY":
httpsProxy = env.Value
case "no_proxy", "NO_PROXY":
noProxy = env.Value
}
}
if httpsProxy != "" || noProxy != "" {
podProxyConfig = &httpproxy.Config{
HTTPSProxy: httpsProxy,
NoProxy: noProxy,
}
}

podProxies = append(podProxies, podProxyConfig)
}
}
}

r.resolvedPodProxies = podProxies
}
r.lastAvailabilityTransition = currentAvailabilityTransition

// Validate that the tier watch is ready before querying the tier to ensure we utilize the cache.
if !r.tierWatchReady.IsReady() {
r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Tier watch to be established", nil, reqLogger)
Expand All @@ -345,7 +426,7 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R
// The Tier has been created, which means that this controller's reconciliation should no longer be a dependency
// of the License being deployed. If NetworkPolicy requires license features, it should now be safe to validate
// License presence and sufficiency.
if networkPolicyRequiresEgressAccessControl(managementClusterConnection, log) {
if egressAccessControlRequired(managementClusterConnection.Spec.ManagementClusterAddr, r.resolvedPodProxies, log) {
license, err := utils.FetchLicenseKey(ctx, r.Client)
if err != nil {
if k8serrors.IsNotFound(err) {
Expand All @@ -366,6 +447,7 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R
ch := utils.NewComponentHandler(log, r.Client, r.Scheme, managementClusterConnection)
guardianCfg := &render.GuardianConfiguration{
URL: managementClusterConnection.Spec.ManagementClusterAddr,
PodProxies: r.resolvedPodProxies,
TunnelCAType: managementClusterConnection.Spec.TLS.CA,
PullSecrets: pullSecrets,
OpenShift: r.Provider.IsOpenShift(),
Expand Down Expand Up @@ -417,23 +499,53 @@ func fillDefaults(mcc *operatorv1.ManagementClusterConnection) {
}
}

func networkPolicyRequiresEgressAccessControl(connection *operatorv1.ManagementClusterConnection, log logr.Logger) bool {
if clusterAddrHasDomain, err := managementClusterAddrHasDomain(connection); err == nil && clusterAddrHasDomain {
return true
} else {
func egressAccessControlRequired(target string, podProxies []*httpproxy.Config, log logr.Logger) bool {
processedPodProxies := render.ProcessPodProxies(podProxies)
for _, httpProxyConfig := range processedPodProxies {
if egressAccessControlRequiredByProxy(target, httpProxyConfig, log) {
return true
}
}
return false
}

func egressAccessControlRequiredByProxy(target string, httpProxyConfig *httpproxy.Config, log logr.Logger) bool {
var destinationHostPort string
if httpProxyConfig != nil && httpProxyConfig.HTTPSProxy != "" {
// HTTPS proxy is specified as a URL.
proxyHostPort, err := url.ParseHostPortFromHTTPProxyString(httpProxyConfig.HTTPSProxy)
if err != nil {
log.Error(err, fmt.Sprintf(
"Failed to parse ManagementClusterAddr. Assuming %s does not require license feature %s",
"Failed to parse HTTP Proxy URL (%s). Assuming %s does not require license feature %s",
httpProxyConfig.HTTPSProxy,
render.GuardianPolicyName,
common.EgressAccessControlFeature,
))
return false
}

destinationHostPort = proxyHostPort
} else {
// Target is already specified as host:port.
destinationHostPort = target
}

// Determine if the host in the host:port is a domain name.
hostPortHasDomain, err := hostPortUsesDomainName(destinationHostPort)
if err != nil {
log.Error(err, fmt.Sprintf(
"Failed to parse resolved host:port (%s) for remote tunnel endpoint. Assuming %s does not require license feature %s",
destinationHostPort,
render.GuardianPolicyName,
common.EgressAccessControlFeature,
))
return false
}
return hostPortHasDomain
}

func managementClusterAddrHasDomain(connection *operatorv1.ManagementClusterConnection) (bool, error) {
host, _, err := net.SplitHostPort(connection.Spec.ManagementClusterAddr)
func hostPortUsesDomainName(hostPort string) (bool, error) {
host, _, err := net.SplitHostPort(hostPort)
if err != nil {
return false, err
}
Expand Down
Loading
Loading