Skip to content

Commit

Permalink
Respect HTTP proxies when rendering Guardian policy
Browse files Browse the repository at this point in the history
  • Loading branch information
pasanw committed Sep 5, 2024
1 parent a0208eb commit 667b3a2
Show file tree
Hide file tree
Showing 10 changed files with 753 additions and 39 deletions.
122 changes: 108 additions & 14 deletions pkg/controller/clusterconnection/clusterconnection_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,12 @@ import (
"fmt"
"net"

"github.com/tigera/operator/pkg/url"
"golang.org/x/net/http/httpproxy"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"

"github.com/go-logr/logr"

corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -166,6 +172,10 @@ func add(mgr manager.Manager, c ctrlruntime.Controller) error {
return fmt.Errorf("%s failed to watch ImageSet: %w", controllerName, err)
}

if err := utils.AddDeploymentWatch(c, render.GuardianDeploymentName, render.GuardianNamespace); err != nil {
return fmt.Errorf("%s failed to watch Guardian deployment: %w", controllerName, err)
}

// Watch for changes to TigeraStatus.
if err = utils.AddTigeraStatusWatch(c, ResourceName); err != nil {
return fmt.Errorf("clusterconnection-controller failed to watch management-cluster-connection Tigerastatus: %w", err)
Expand All @@ -179,12 +189,14 @@ var _ reconcile.Reconciler = &ReconcileConnection{}

// ReconcileConnection reconciles a ManagementClusterConnection object
type ReconcileConnection struct {
Client client.Client
Scheme *runtime.Scheme
Provider operatorv1.Provider
status status.StatusManager
clusterDomain string
tierWatchReady *utils.ReadyFlag
Client client.Client
Scheme *runtime.Scheme
Provider operatorv1.Provider
status status.StatusManager
clusterDomain string
tierWatchReady *utils.ReadyFlag
resolvedProxy *httpproxy.Config
lastAvailabilityTransition metav1.Time
}

// Reconcile reads that state of the cluster for a ManagementClusterConnection object and makes changes based on the
Expand Down Expand Up @@ -323,6 +335,67 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R
trustedCertBundle.AddCertificates(secret)
}

// Determine the current deployment availability.
var currentAvailabilityTransition metav1.Time
var currentlyAvailable bool
guardianDeployment := v1.Deployment{}
err = r.Client.Get(ctx, client.ObjectKey{Name: render.GuardianDeploymentName, Namespace: render.GuardianNamespace}, &guardianDeployment)
if err != nil && !k8serrors.IsNotFound(err) {
r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to read the deployment status of Guardian", err, reqLogger)
return reconcile.Result{}, nil
} else if err == nil {
for _, condition := range guardianDeployment.Status.Conditions {
if condition.Type == v1.DeploymentAvailable {
currentAvailabilityTransition = condition.LastTransitionTime
if condition.Status == corev1.ConditionTrue {
currentlyAvailable = true
}
break
}
}
}

// If the deployment availability has changed and is currently available, we update the resolved proxy configuration.
// We only update the resolved proxy configuration in this scenario (rather than every reconcile) to limit the number
// of pod queries we make.
if !currentAvailabilityTransition.Equal(&r.lastAvailabilityTransition) && currentlyAvailable {
// Query guardian pods.
labelSelector := labels.SelectorFromSet(map[string]string{
"app.kubernetes.io/name": render.GuardianDeploymentName,
})
pods := corev1.PodList{}
err := r.Client.List(ctx, &pods, &client.ListOptions{
LabelSelector: labelSelector,
Namespace: render.GuardianNamespace,
})
if err != nil {
r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to list the pods of the Guardian deployment", err, reqLogger)
return reconcile.Result{}, nil
}

// Parse the pod spec to resolve the proxy config.
var proxyConfig *httpproxy.Config
for _, pod := range pods.Items {
for _, container := range pod.Spec.Containers {
if container.Name == render.GuardianDeploymentName {
proxyConfig = &httpproxy.Config{}
for _, env := range container.Env {
switch env.Name {
case "https_proxy", "HTTPS_PROXY":
proxyConfig.HTTPSProxy = env.Value
case "no_proxy", "NO_PROXY":
proxyConfig.NoProxy = env.Value
}
}
break
}
}
}

r.resolvedProxy = proxyConfig
}
r.lastAvailabilityTransition = currentAvailabilityTransition

// Validate that the tier watch is ready before querying the tier to ensure we utilize the cache.
if !r.tierWatchReady.IsReady() {
r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Tier watch to be established", nil, reqLogger)
Expand All @@ -345,7 +418,7 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R
// The Tier has been created, which means that this controller's reconciliation should no longer be a dependency
// of the License being deployed. If NetworkPolicy requires license features, it should now be safe to validate
// License presence and sufficiency.
if networkPolicyRequiresEgressAccessControl(managementClusterConnection, log) {
if networkPolicyRequiresEgressAccessControl(managementClusterConnection.Spec.ManagementClusterAddr, r.resolvedProxy, log) {
license, err := utils.FetchLicenseKey(ctx, r.Client)
if err != nil {
if k8serrors.IsNotFound(err) {
Expand All @@ -366,6 +439,7 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R
ch := utils.NewComponentHandler(log, r.Client, r.Scheme, managementClusterConnection)
guardianCfg := &render.GuardianConfiguration{
URL: managementClusterConnection.Spec.ManagementClusterAddr,
HTTPProxyConfig: r.resolvedProxy,
TunnelCAType: managementClusterConnection.Spec.TLS.CA,
PullSecrets: pullSecrets,
OpenShift: r.Provider.IsOpenShift(),
Expand Down Expand Up @@ -417,23 +491,43 @@ func fillDefaults(mcc *operatorv1.ManagementClusterConnection) {
}
}

func networkPolicyRequiresEgressAccessControl(connection *operatorv1.ManagementClusterConnection, log logr.Logger) bool {
if clusterAddrHasDomain, err := managementClusterAddrHasDomain(connection); err == nil && clusterAddrHasDomain {
return true
} else {
func networkPolicyRequiresEgressAccessControl(target string, httpProxyConfig *httpproxy.Config, log logr.Logger) bool {
var destinationHostPort string
if httpProxyConfig != nil && httpProxyConfig.HTTPSProxy != "" {
// HTTPS proxy is specified as a URL.
proxyHostPort, err := url.ParseHostPortFromHTTPProxyString(httpProxyConfig.HTTPSProxy)
if err != nil {
log.Error(err, fmt.Sprintf(
"Failed to parse ManagementClusterAddr. Assuming %s does not require license feature %s",
"Failed to parse HTTP Proxy URL (%s). Assuming %s does not require license feature %s",
httpProxyConfig.HTTPSProxy,
render.GuardianPolicyName,
common.EgressAccessControlFeature,
))
return false
}

destinationHostPort = proxyHostPort
} else {
// Target is already specified as host:port.
destinationHostPort = target
}

// Determine if the host in the host:port is a domain name.
hostPortHasDomain, err := hostPortUsesDomainName(destinationHostPort)
if err != nil {
log.Error(err, fmt.Sprintf(
"Failed to parse resolved host:port (%s) for remote tunnel endpoint. Assuming %s does not require license feature %s",
destinationHostPort,
render.GuardianPolicyName,
common.EgressAccessControlFeature,
))
return false
}
return hostPortHasDomain
}

func managementClusterAddrHasDomain(connection *operatorv1.ManagementClusterConnection) (bool, error) {
host, _, err := net.SplitHostPort(connection.Spec.ManagementClusterAddr)
func hostPortUsesDomainName(hostPort string) (bool, error) {
host, _, err := net.SplitHostPort(hostPort)
if err != nil {
return false, err
}
Expand Down
Loading

0 comments on commit 667b3a2

Please sign in to comment.