diff --git a/.github/workflows/prbuild.yaml b/.github/workflows/prbuild.yaml
index 171be9ed48c..ce0b03b9e63 100644
--- a/.github/workflows/prbuild.yaml
+++ b/.github/workflows/prbuild.yaml
@@ -32,7 +32,7 @@ jobs:
version: v1.55.2
# TODO: re-enable linting tools package once https://github.com/projectcontour/contour/issues/5077
# is resolved
- args: --build-tags=e2e,conformance,gcp,oidc,none
+ args: --build-tags=e2e,conformance,gcp,oidc,none --out-${NO_FUTURE}format=colored-line-number
- uses: act10ns/slack@ed1309ab9862e57e9e583e51c7889486b9a00b0f # v2.0.0
with:
status: ${{ job.status }}
diff --git a/apis/projectcontour/v1alpha1/contourdeployment.go b/apis/projectcontour/v1alpha1/contourdeployment.go
index 7a37ef5cecd..80b17ab3f70 100644
--- a/apis/projectcontour/v1alpha1/contourdeployment.go
+++ b/apis/projectcontour/v1alpha1/contourdeployment.go
@@ -122,6 +122,11 @@ type ContourSettings struct {
// the annotations for Prometheus will be appended or overwritten with predefined value.
// +optional
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
+
+ // WatchNamespaces is an array of namespaces. Setting it will instruct the contour instance
+ // to only watch these set of namespaces
+ // +optional
+ WatchNamespaces []string `json:"watchNamespaces,omitempty"`
}
// DeploymentSettings contains settings for Deployment resources.
diff --git a/apis/projectcontour/v1alpha1/zz_generated.deepcopy.go b/apis/projectcontour/v1alpha1/zz_generated.deepcopy.go
index 8647d3d36fb..3d3e0acc50d 100644
--- a/apis/projectcontour/v1alpha1/zz_generated.deepcopy.go
+++ b/apis/projectcontour/v1alpha1/zz_generated.deepcopy.go
@@ -387,6 +387,11 @@ func (in *ContourSettings) DeepCopyInto(out *ContourSettings) {
(*out)[key] = val
}
}
+ if in.WatchNamespaces != nil {
+ in, out := &in.WatchNamespaces, &out.WatchNamespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContourSettings.
diff --git a/changelogs/unreleased/6073-lubronzhan-small.md b/changelogs/unreleased/6073-lubronzhan-small.md
new file mode 100644
index 00000000000..f9e86422557
--- /dev/null
+++ b/changelogs/unreleased/6073-lubronzhan-small.md
@@ -0,0 +1 @@
+Allow gatewayProvisioner to create contour that only watch limited namespaces of resources
diff --git a/design/automated-provisioning-design.md b/design/automated-provisioning-design.md
index 59536a376d6..3820e1259ad 100644
--- a/design/automated-provisioning-design.md
+++ b/design/automated-provisioning-design.md
@@ -60,7 +60,7 @@ It will handle the full provisioning lifecycle for these Gateways: creating a Co
![drawing](images/gatewayapi-provisioner-overview.png)
Importantly, this provisioning model allows for (a) any number of GatewayClasses to be controlled by the Gateway provisioner; and (b) any number of Gateways per controlled GatewayClass.
-While the exact use cases for many Contour Gateways remain unclear, these one-to-many relationships between controller and GatewayClass, and GatewayClass and Gateway, offer users the full flexibility that the Gateway API spec intends.
+While the exact use cases for many Contour Gateways remain unclear, these one-to-many relationships between controller and GatewayClass, and GatewayClass and Gateway, offer users the full flexibility that the Gateway API spec intends.
Additionally, support for this one-to-many relationship is required in order to pass any of the Gateway API conformance tests.
![drawing](images/gatewayapi-resource-relationships.png)
@@ -77,7 +77,7 @@ Those users can continue to statically provision their Contour + Envoy instances
There will be two ways to configure Contour for Gateway API support in the static provisioning scenario:
- **Controller name** - this is the model implemented today, where Contour is configured with a controller name, and it continuously looks for the oldest GatewayClass with that controller, and the oldest Gateway using that GatewayClass. This model is appropriate for users who expect their GatewayClasses and Gateways to come and go, and who want their Contour instance to dynamically pick up the appropriate Gateway as those changes occur.
-- **Gateway name** - Contour can alternately be directly configured with a specific Gateway name, which avoids the multiple levels of indirection of the previous model. This model is appropriate for users who expect their Contour instance to correspond to a single static Gateway; the lifecycle of the Gateway and the lifecycle of the Contour instance are tied together.
+- **Gateway name** - Contour can alternately be directly configured with a specific Gateway name, which avoids the multiple levels of indirection of the previous model. This model is appropriate for users who expect their Contour instance to correspond to a single static Gateway; the lifecycle of the Gateway and the lifecycle of the Contour instance are tied together.
Note that the Gateway provisioner will make use of the **Gateway name** mode of configuring Contour, to tie each instance of Contour it provisions directly to a specific Gateway.
@@ -88,13 +88,13 @@ This custom resource definition embeds a ContourConfiguration spec, as well as a
This ContourDeployment resource serves as a template that defines exactly how to customize each Contour + Envoy instance that is created for this GatewayClass.
When a Gateway is provisioned, the Gateway provisioner will use the configuration options specified to customize the YAML that is applied, and will pass through a copy of the ContourConfiguration data to the Gateway’s Contour instance.
-Note that, according to the Gateway API documentation:
+Note that, according to the Gateway API documentation:
> It is recommended that [GatewayClass] be used as a template for Gateways.
> This means that a Gateway is based on the state of the GatewayClass at the time it was created and changes to the GatewayClass or associated parameters are not propagated down to existing Gateways.
-> This recommendation is intended to limit the blast radius of changes to GatewayClass or associated parameters.
-(ref. [GatewayClass API reference documentation](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.GatewayClass))
+> This recommendation is intended to limit the blast radius of changes to GatewayClass or associated parameters.
+(ref. [GatewayClass API reference documentation](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.GatewayClass))
-For Contour, this means that after a Gateway has been provisioned, the Gateway provisioner will not apply subsequent changes to the GatewayClass/ContourDeployment to it.
+For Contour, this means that after a Gateway has been provisioned, the Gateway provisioner will not apply subsequent changes to the GatewayClass/ContourDeployment to it.
This also means that Contour users can modify the ContourConfigurations used by their running Contours after instantiation, without having those changes overwritten by the Gateway provisioner.
Since the Gateway provisioner supports multiple GatewayClasses, each GatewayClass can have a different ContourDeployment reference, corresponding to different sets of Gateway configuration profiles that the infrastructure provider offers (e.g. an external vs. internal profile).
@@ -109,7 +109,7 @@ This proposal is related to, but separate from, the managed Envoy proposal:
- If we don’t implement managed Envoy, then the Gateway provisioner implements the Envoy provisioning logic.
- Either way, the logic needs to be implemented and live somewhere.
-Advantages of doing managed Envoy:
+Advantages of doing managed Envoy:
- Users who don’t want automated Gateway provisioning, but do want automated Envoy provisioning, can have it
- Users who don’t want to use Gateway API can still take advantage of automated Envoy provisioning
- Listener programming (combo of Envoy service + Envoy listeners) can be done in one place
@@ -122,7 +122,7 @@ Disadvantages of doing managed Envoy:
We considered continuing to invest in the Contour Operator, including the Contour CRD, with an eye towards bringing it to beta and eventually GA, and implementing the Gateway provisioner within the Operator (since they would share much of the underlying logic).
Our first challenge with this option is that we have failed to establish a community of contributors around the Operator, so the work would need to be done by the core Contour team of maintainers, which would detract from Contour development.
Our second challenge is that we have seen and heard of only limited usage of the Operator in the wild, so it’s not clear to us that continuing to develop it is an important priority for our users.
-Finally, continuing to maintain the Operator in a separate repository creates development overhead, since various bits of code and configuration must be manually kept in sync between Contour and the Operator.
+Finally, continuing to maintain the Operator in a separate repository creates development overhead, since various bits of code and configuration must be manually kept in sync between Contour and the Operator.
### Alternative 2
We also considered converting the existing Contour Operator into a Gateway provisioner, dropping the Contour CRD, and only supporting a Gateway API-based dynamic provisioning workflow.
diff --git a/examples/contour/01-crds.yaml b/examples/contour/01-crds.yaml
index 9787514aa67..e2998b0d84d 100644
--- a/examples/contour/01-crds.yaml
+++ b/examples/contour/01-crds.yaml
@@ -1620,6 +1620,13 @@ spec:
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
type: object
type: object
+ watchNamespaces:
+ description: |-
+ WatchNamespaces is an array of namespaces. Setting it will instruct the contour instance
+ to only watch these set of namespaces
+ items:
+ type: string
+ type: array
type: object
envoy:
description: |-
diff --git a/examples/namespaced-gatewayapi/gatewayclass-clusterrole.yaml b/examples/namespaced-gatewayapi/gatewayclass-clusterrole.yaml
new file mode 100644
index 00000000000..aef9c689852
--- /dev/null
+++ b/examples/namespaced-gatewayapi/gatewayclass-clusterrole.yaml
@@ -0,0 +1,34 @@
+# GatewayClass is cluster-scope. So split its necessary cluster role into a single file
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: contour-gatewayclass
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: contour-gatewayclass
+subjects:
+- kind: ServiceAccount
+ name: contour
+ namespace: projectcontour
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: contour-gatewayclass
+rules:
+- apiGroups:
+ - gateway.networking.k8s.io
+ resources:
+ - gatewayclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - gateway.networking.k8s.io
+ resources:
+ - gatewayclasses/status
+ verbs:
+ - update
diff --git a/examples/namespaced-gatewayapi/kustomization.yaml b/examples/namespaced-gatewayapi/kustomization.yaml
new file mode 100644
index 00000000000..b2c3f6173cd
--- /dev/null
+++ b/examples/namespaced-gatewayapi/kustomization.yaml
@@ -0,0 +1,79 @@
+# This kustomize file can be used as an example for deploying Contour with custom ClusterRole and Role that could
+# change Contour to watch namespaced resources plus cluster-scoped GatewayClass
+# It changes the current cluster-wide RBAC rules in the example deployment manifest to namespaced RBAC rules.
+# It is meant to be used together with contour serve --watch-namespaces= option to restrict
+# Contour to a certain namespace.
+# It also add a ClusterRole and ClusterRoleBinding to Role that only watches GatwayClass
+# Run with:
+# kubectl kustomize examples/namespaced-gatewayapi/
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+ - ../render/
+ - gatewayclass-clusterrole.yaml
+patches:
+ - patch: |-
+ - op: replace
+ path: /kind
+ value: RoleBinding
+ - op: replace
+ path: /metadata/name
+ value: contour-resources
+ - op: replace
+ path: /roleRef/kind
+ value: Role
+ - op: replace
+ path: /roleRef/name
+ value: contour-resources
+ - op: add
+ path: /metadata/namespace
+ value: projectcontour
+ target:
+ group: rbac.authorization.k8s.io
+ kind: ClusterRoleBinding
+ name: contour
+ version: v1
+ - patch: |-
+ - op: replace
+ path: /kind
+ value: Role
+ - op: replace
+ path: /metadata/name
+ value: contour-resources
+ - op: add
+ path: /metadata/namespace
+ value: projectcontour
+ - op: replace
+ path: /rules/1
+ value: |-
+ - apiGroups:
+ - gateway.networking.k8s.io
+ resources:
+ - gateways
+ - grpcroutes
+ - httproutes
+ - referencegrants
+ - tcproutes
+ - tlsroutes
+ verbs:
+ - get
+ - list
+ - watch
+ - op: replace
+ path: /rules/2
+ value: |-
+ - apiGroups:
+ - gateway.networking.k8s.io
+ resources:
+ - gateways/status
+ - grpcroutes/status
+ - httproutes/status
+ - tcproutes/status
+ - tlsroutes/status
+ verbs:
+ - update
+ target:
+ group: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: contour
+ version: v1
diff --git a/examples/render/contour-deployment.yaml b/examples/render/contour-deployment.yaml
index ed807e79e0d..c09441f19fc 100644
--- a/examples/render/contour-deployment.yaml
+++ b/examples/render/contour-deployment.yaml
@@ -1839,6 +1839,13 @@ spec:
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
type: object
type: object
+ watchNamespaces:
+ description: |-
+ WatchNamespaces is an array of namespaces. Setting it will instruct the contour instance
+ to only watch these set of namespaces
+ items:
+ type: string
+ type: array
type: object
envoy:
description: |-
diff --git a/examples/render/contour-gateway-provisioner.yaml b/examples/render/contour-gateway-provisioner.yaml
index 518e9803a5e..2997c06f8f0 100644
--- a/examples/render/contour-gateway-provisioner.yaml
+++ b/examples/render/contour-gateway-provisioner.yaml
@@ -1631,6 +1631,13 @@ spec:
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
type: object
type: object
+ watchNamespaces:
+ description: |-
+ WatchNamespaces is an array of namespaces. Setting it will instruct the contour instance
+ to only watch these set of namespaces
+ items:
+ type: string
+ type: array
type: object
envoy:
description: |-
diff --git a/examples/render/contour-gateway.yaml b/examples/render/contour-gateway.yaml
index 1e2bdedfd16..37504a02bde 100644
--- a/examples/render/contour-gateway.yaml
+++ b/examples/render/contour-gateway.yaml
@@ -1842,6 +1842,13 @@ spec:
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
type: object
type: object
+ watchNamespaces:
+ description: |-
+ WatchNamespaces is an array of namespaces. Setting it will instruct the contour instance
+ to only watch these set of namespaces
+ items:
+ type: string
+ type: array
type: object
envoy:
description: |-
diff --git a/examples/render/contour.yaml b/examples/render/contour.yaml
index 49970d39bdd..1583d0dc866 100644
--- a/examples/render/contour.yaml
+++ b/examples/render/contour.yaml
@@ -1839,6 +1839,13 @@ spec:
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
type: object
type: object
+ watchNamespaces:
+ description: |-
+ WatchNamespaces is an array of namespaces. Setting it will instruct the contour instance
+ to only watch these set of namespaces
+ items:
+ type: string
+ type: array
type: object
envoy:
description: |-
diff --git a/internal/provisioner/controller/gateway.go b/internal/provisioner/controller/gateway.go
index 8907dbea37c..3b004adbd20 100644
--- a/internal/provisioner/controller/gateway.go
+++ b/internal/provisioner/controller/gateway.go
@@ -261,6 +261,8 @@ func (r *gatewayReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
contourModel.Spec.KubernetesLogLevel = contourParams.KubernetesLogLevel
+ contourModel.Spec.WatchNamespaces = contourParams.WatchNamespaces
+
if contourParams.Deployment != nil &&
contourParams.Deployment.Strategy != nil {
contourModel.Spec.ContourDeploymentStrategy = *contourParams.Deployment.Strategy
diff --git a/internal/provisioner/model/model.go b/internal/provisioner/model/model.go
index 026802104b7..7392013b345 100644
--- a/internal/provisioner/model/model.go
+++ b/internal/provisioner/model/model.go
@@ -14,6 +14,8 @@
package model
import (
+ "slices"
+
contourv1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1"
"github.com/projectcontour/contour/internal/ref"
@@ -134,6 +136,10 @@ func (c *Contour) EnvoyTolerationsExist() bool {
return false
}
+func (c *Contour) WatchAllNamespaces() bool {
+ return c.Spec.WatchNamespaces == nil || slices.Contains(c.Spec.WatchNamespaces, corev1.NamespaceAll)
+}
+
// ContourSpec defines the desired state of Contour.
type ContourSpec struct {
// ContourReplicas is the desired number of Contour replicas. If unset,
@@ -245,6 +251,11 @@ type ContourSpec struct {
// If the value is 0, the overload manager is disabled.
// defaults to 0.
EnvoyMaxHeapSizeBytes uint64
+
+ // WatchNamespaces is an array of namespaces. Setting it will instruct the contour instance
+ // to only watch these set of namespaces
+ // default is nil, contour will watch resource of all namespaces
+ WatchNamespaces []string
}
// WorkloadType is the type of Kubernetes workload to use for a component.
diff --git a/internal/provisioner/objects/deployment/deployment.go b/internal/provisioner/objects/deployment/deployment.go
index 8a14230aa04..7ef3477e480 100644
--- a/internal/provisioner/objects/deployment/deployment.go
+++ b/internal/provisioner/objects/deployment/deployment.go
@@ -17,6 +17,7 @@ import (
"context"
"fmt"
"path/filepath"
+ "strings"
"github.com/projectcontour/contour/apis/projectcontour/v1alpha1"
"github.com/projectcontour/contour/internal/provisioner/equality"
@@ -100,6 +101,10 @@ func DesiredDeployment(contour *model.Contour, image string) *appsv1.Deployment
args = append(args, "--debug")
}
+ if !contour.WatchAllNamespaces() {
+ args = append(args, fmt.Sprintf("--watch-namespaces=%s", strings.Join(contour.Spec.WatchNamespaces, ",")))
+ }
+
// Pass the insecure/secure flags to Contour if using non-default ports.
for _, port := range contour.Spec.NetworkPublishing.Envoy.Ports {
switch {
diff --git a/internal/provisioner/objects/deployment/deployment_test.go b/internal/provisioner/objects/deployment/deployment_test.go
index ab5843ce9ff..bbea2a3fb73 100644
--- a/internal/provisioner/objects/deployment/deployment_test.go
+++ b/internal/provisioner/objects/deployment/deployment_test.go
@@ -15,6 +15,7 @@ package deployment
import (
"fmt"
+ "strings"
"testing"
"github.com/projectcontour/contour/apis/projectcontour/v1alpha1"
@@ -134,6 +135,16 @@ func checkDeploymentHasStrategy(t *testing.T, ds *appsv1.Deployment, expected ap
t.Errorf("deployment has unexpected strategy %q", expected)
}
+func ensureContainerDoesntHaveArg(t *testing.T, container *corev1.Container, arg string) {
+ t.Helper()
+
+ for _, a := range container.Args {
+ if a == arg {
+ t.Errorf("container has arg %q", arg)
+ }
+ }
+}
+
func TestDesiredDeployment(t *testing.T) {
name := "deploy-test"
cntr := model.Default(fmt.Sprintf("%s-ns", name), name)
@@ -212,6 +223,49 @@ func TestDesiredDeployment(t *testing.T) {
checkDeploymentHasStrategy(t, deploy, cntr.Spec.ContourDeploymentStrategy)
}
+func TestDesiredDeploymentWhenSettingWatchNamespaces(t *testing.T) {
+ testCases := []struct {
+ description string
+ namespaces []string
+ expectArgExist bool
+ }{
+ {
+ description: "several valid namespaces",
+ namespaces: []string{"ns1", "ns2"},
+ expectArgExist: true,
+ },
+ {
+ description: "single valid namespace",
+ namespaces: []string{"ns1", "ns2"},
+ expectArgExist: true,
+ },
+ {
+ description: "include empty namespace",
+ namespaces: []string{"ns1", ""},
+ expectArgExist: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.description, func(t *testing.T) {
+ name := "deploy-test"
+ cntr := model.Default(fmt.Sprintf("%s-ns", name), name)
+ icName := "test-ic"
+ cntr.Spec.IngressClassName = &icName
+ // Change the Contour watch namespaces flag
+ cntr.Spec.WatchNamespaces = tc.namespaces
+ deploy := DesiredDeployment(cntr, "ghcr.io/projectcontour/contour:test")
+ container := checkDeploymentHasContainer(t, deploy, contourContainerName, true)
+ arg := fmt.Sprintf("--watch-namespaces=%s", strings.Join(tc.namespaces, ","))
+ if tc.expectArgExist {
+ checkContainerHasArg(t, container, arg)
+ } else {
+ ensureContainerDoesntHaveArg(t, container, arg)
+ }
+ })
+ }
+}
+
func TestNodePlacementDeployment(t *testing.T) {
name := "selector-test"
cntr := model.Default(fmt.Sprintf("%s-ns", name), name)
diff --git a/internal/provisioner/objects/rbac/clusterrole/cluster_role.go b/internal/provisioner/objects/rbac/clusterrole/cluster_role.go
index b610bc7e687..7ab6885e27d 100644
--- a/internal/provisioner/objects/rbac/clusterrole/cluster_role.go
+++ b/internal/provisioner/objects/rbac/clusterrole/cluster_role.go
@@ -21,23 +21,16 @@ import (
"github.com/projectcontour/contour/internal/provisioner/labels"
"github.com/projectcontour/contour/internal/provisioner/model"
"github.com/projectcontour/contour/internal/provisioner/objects"
- corev1 "k8s.io/api/core/v1"
- discoveryv1 "k8s.io/api/discovery/v1"
- networkingv1 "k8s.io/api/networking/v1"
+ "github.com/projectcontour/contour/internal/provisioner/objects/rbac/util"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- gatewayv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2"
-)
-
-const (
- contourV1GroupName = "projectcontour.io"
)
// EnsureClusterRole ensures a ClusterRole resource exists with the provided name
// and contour namespace/name for the owning contour labels.
-func EnsureClusterRole(ctx context.Context, cli client.Client, name string, contour *model.Contour) error {
- desired := desiredClusterRole(name, contour)
+func EnsureClusterRole(ctx context.Context, cli client.Client, name string, contour *model.Contour, gatewayClassOnly bool) error {
+ desired := desiredClusterRole(name, contour, gatewayClassOnly)
// Enclose contour.
updater := func(ctx context.Context, cli client.Client, current, desired *rbacv1.ClusterRole) error {
@@ -49,22 +42,8 @@ func EnsureClusterRole(ctx context.Context, cli client.Client, name string, cont
// desiredClusterRole constructs an instance of the desired ClusterRole resource with
// the provided name and contour namespace/name for the owning contour labels.
-func desiredClusterRole(name string, contour *model.Contour) *rbacv1.ClusterRole {
- var (
- createGetUpdate = []string{"create", "get", "update"}
- getListWatch = []string{"get", "list", "watch"}
- update = []string{"update"}
- )
-
- policyRuleFor := func(apiGroup string, verbs []string, resources ...string) rbacv1.PolicyRule {
- return rbacv1.PolicyRule{
- Verbs: verbs,
- APIGroups: []string{apiGroup},
- Resources: resources,
- }
- }
-
- return &rbacv1.ClusterRole{
+func desiredClusterRole(name string, contour *model.Contour, gatewayClassOnly bool) *rbacv1.ClusterRole {
+ role := &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
Kind: "Role",
},
@@ -73,27 +52,15 @@ func desiredClusterRole(name string, contour *model.Contour) *rbacv1.ClusterRole
Labels: contour.CommonLabels(),
Annotations: contour.CommonAnnotations(),
},
- Rules: []rbacv1.PolicyRule{
- // Core Contour-watched resources.
- policyRuleFor(corev1.GroupName, getListWatch, "secrets", "endpoints", "services", "namespaces"),
-
- // Discovery Contour-watched resources.
- policyRuleFor(discoveryv1.GroupName, getListWatch, "endpointslices"),
-
- // Gateway API resources.
- // Note, ReferenceGrant does not currently have a .status field so it's omitted from the status rule.
- policyRuleFor(gatewayv1alpha2.GroupName, getListWatch, "gatewayclasses", "gateways", "httproutes", "tlsroutes", "grpcroutes", "tcproutes", "referencegrants"),
- policyRuleFor(gatewayv1alpha2.GroupName, update, "gatewayclasses/status", "gateways/status", "httproutes/status", "tlsroutes/status", "grpcroutes/status", "tcproutes/status"),
-
- // Ingress resources.
- policyRuleFor(networkingv1.GroupName, getListWatch, "ingresses"),
- policyRuleFor(networkingv1.GroupName, createGetUpdate, "ingresses/status"),
-
- // Contour CRDs.
- policyRuleFor(contourV1GroupName, getListWatch, "httpproxies", "tlscertificatedelegations", "extensionservices", "contourconfigurations"),
- policyRuleFor(contourV1GroupName, createGetUpdate, "httpproxies/status", "extensionservices/status", "contourconfigurations/status"),
- },
+ Rules: util.ClusterScopePolicyRulesForContour(),
+ }
+ if gatewayClassOnly {
+ return role
}
+
+ // add basic rules to role
+ role.Rules = append(role.Rules, util.BasicPolicyRulesForContour()...)
+ return role
}
// updateClusterRoleIfNeeded updates a ClusterRole resource if current does not match desired,
diff --git a/internal/provisioner/objects/rbac/clusterrole/cluster_role_test.go b/internal/provisioner/objects/rbac/clusterrole/cluster_role_test.go
index 2f6a961d32f..3a953238b99 100644
--- a/internal/provisioner/objects/rbac/clusterrole/cluster_role_test.go
+++ b/internal/provisioner/objects/rbac/clusterrole/cluster_role_test.go
@@ -46,7 +46,7 @@ func checkClusterRoleLabels(t *testing.T, cr *rbacv1.ClusterRole, expected map[s
func TestDesiredClusterRole(t *testing.T) {
name := "test-cr"
cntr := model.Default(fmt.Sprintf("%s-ns", name), name)
- cr := desiredClusterRole(name, cntr)
+ cr := desiredClusterRole(name, cntr, false)
checkClusterRoleName(t, cr, name)
ownerLabels := map[string]string{
model.ContourOwningGatewayNameLabel: cntr.Name,
diff --git a/internal/provisioner/objects/rbac/rbac.go b/internal/provisioner/objects/rbac/rbac.go
index a582980a349..c485bff4bec 100644
--- a/internal/provisioner/objects/rbac/rbac.go
+++ b/internal/provisioner/objects/rbac/rbac.go
@@ -53,10 +53,31 @@ func ensureContourRBAC(ctx context.Context, cli client.Client, contour *model.Co
return fmt.Errorf("failed to ensure service account %s/%s: %w", contour.Namespace, names.ServiceAccount, err)
}
- // Ensure cluster role & binding.
- if err := clusterrole.EnsureClusterRole(ctx, cli, names.ClusterRole, contour); err != nil {
- return fmt.Errorf("failed to ensure cluster role %s: %w", names.ClusterRole, err)
+ // By default, Contour watches all namespaces, use default cluster role and rolebinding
+ clusterRoleForGatewayclassOnly := true
+ if contour.WatchAllNamespaces() {
+ // Ensure cluster role & binding.
+ if err := clusterrole.EnsureClusterRole(ctx, cli, names.ClusterRole, contour, !clusterRoleForGatewayclassOnly); err != nil {
+ return fmt.Errorf("failed to ensure cluster role %s: %w", names.ClusterRole, err)
+ }
+ } else {
+ // Ensure cluster role & cluster binding for gatewayclass first since it's cluster scope variables
+ if err := clusterrole.EnsureClusterRole(ctx, cli, names.ClusterRole, contour, clusterRoleForGatewayclassOnly); err != nil {
+ return fmt.Errorf("failed to ensure cluster role %s: %w", names.ClusterRole, err)
+ }
+
+ // Ensures role and rolebinding for set of namespaces in contour.spec.watchNamespaces variable
+ // Ensure cluster role & cluster binding for gatewayclass first since it's cluster scope variables
+ if err := role.EnsureRolesInNamespaces(ctx, cli, names.Role, contour, contour.Spec.WatchNamespaces); err != nil {
+ return fmt.Errorf("failed to ensure cluster role %s: %w", names.ClusterRole, err)
+ }
+ // Ensures role and rolebinding for set of namespaces in contour.spec.watchNamespaces variable
+ // Ensure cluster role & cluster binding for gatewayclass first since it's cluster scope variables
+ if err := rolebinding.EnsureRoleBindingsInNamespaces(ctx, cli, names.RoleBinding, names.ServiceAccount, names.Role, contour, contour.Spec.WatchNamespaces); err != nil {
+ return fmt.Errorf("failed to ensure cluster role %s: %w", names.ClusterRole, err)
+ }
}
+
if err := clusterrolebinding.EnsureClusterRoleBinding(ctx, cli, names.ClusterRoleBinding, names.ClusterRole, names.ServiceAccount, contour); err != nil {
return fmt.Errorf("failed to ensure cluster role binding %s: %w", names.ClusterRoleBinding, err)
}
diff --git a/internal/provisioner/objects/rbac/role/role.go b/internal/provisioner/objects/rbac/role/role.go
index d326348036c..9e7c60dc932 100644
--- a/internal/provisioner/objects/rbac/role/role.go
+++ b/internal/provisioner/objects/rbac/role/role.go
@@ -21,10 +21,12 @@ import (
"github.com/projectcontour/contour/internal/provisioner/labels"
"github.com/projectcontour/contour/internal/provisioner/model"
"github.com/projectcontour/contour/internal/provisioner/objects"
+ "github.com/projectcontour/contour/internal/provisioner/objects/rbac/util"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kerrors "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -34,15 +36,36 @@ func EnsureControllerRole(ctx context.Context, cli client.Client, name string, c
desired := desiredControllerRole(name, contour)
updater := func(ctx context.Context, cli client.Client, current, desired *rbacv1.Role) error {
- _, err := updateRoleIfNeeded(ctx, cli, contour, current, desired)
+ err := updateRoleIfNeeded(ctx, cli, contour, current, desired)
return err
}
return objects.EnsureObject(ctx, cli, desired, updater, &rbacv1.Role{})
}
+// EnsureRolesInNamespaces ensures a set of Role resources exist in namespaces
+// specified, for contour to manage resources under these namespaces. And
+// contour namespace/name for the owning contour labels for the Contour
+// controller
+func EnsureRolesInNamespaces(ctx context.Context, cli client.Client, name string, contour *model.Contour, namespaces []string) error {
+ errs := []error{}
+ for _, ns := range namespaces {
+ desired := desiredRoleForContourInNamespace(name, ns, contour)
+
+ updater := func(ctx context.Context, cli client.Client, current, desired *rbacv1.Role) error {
+ err := updateRoleIfNeeded(ctx, cli, contour, current, desired)
+ return err
+ }
+ if err := objects.EnsureObject(ctx, cli, desired, updater, &rbacv1.Role{}); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ return kerrors.NewAggregate(errs)
+}
+
// desiredControllerRole constructs an instance of the desired Role resource with the
-// provided ns/name and contour namespace/name for the owning contour labels for
+// provided ns/name and using contour namespace/name for the owning contour labels for
// the Contour controller.
func desiredControllerRole(name string, contour *model.Contour) *rbacv1.Role {
role := &rbacv1.Role{
@@ -72,17 +95,34 @@ func desiredControllerRole(name string, contour *model.Contour) *rbacv1.Role {
return role
}
+// desiredRoleForContour constructs an instance of the desired Role resource with the
+// provided ns/name and using contour namespace/name for the corresponding Contour instance
+func desiredRoleForContourInNamespace(name, namespace string, contour *model.Contour) *rbacv1.Role {
+ return &rbacv1.Role{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Role",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ Labels: contour.CommonLabels(),
+ Annotations: contour.CommonAnnotations(),
+ },
+ Rules: util.BasicPolicyRulesForContour(),
+ }
+}
+
// updateRoleIfNeeded updates a Role resource if current does not match desired,
// using contour to verify the existence of owner labels.
-func updateRoleIfNeeded(ctx context.Context, cli client.Client, contour *model.Contour, current, desired *rbacv1.Role) (*rbacv1.Role, error) {
+func updateRoleIfNeeded(ctx context.Context, cli client.Client, contour *model.Contour, current, desired *rbacv1.Role) error {
if labels.AnyExist(current, model.OwnerLabels(contour)) {
role, updated := equality.RoleConfigChanged(current, desired)
if updated {
if err := cli.Update(ctx, role); err != nil {
- return nil, fmt.Errorf("failed to update cluster role %s/%s: %w", role.Namespace, role.Name, err)
+ return fmt.Errorf("failed to update cluster role %s/%s: %w", role.Namespace, role.Name, err)
}
- return role, nil
+ return nil
}
}
- return current, nil
+ return nil
}
diff --git a/internal/provisioner/objects/rbac/rolebinding/role_binding.go b/internal/provisioner/objects/rbac/rolebinding/role_binding.go
index 19315dac051..edf63fac366 100644
--- a/internal/provisioner/objects/rbac/rolebinding/role_binding.go
+++ b/internal/provisioner/objects/rbac/rolebinding/role_binding.go
@@ -21,18 +21,18 @@ import (
"github.com/projectcontour/contour/internal/provisioner/labels"
"github.com/projectcontour/contour/internal/provisioner/model"
"github.com/projectcontour/contour/internal/provisioner/objects"
-
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kerrors "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// EnsureRoleBinding ensures a RoleBinding resource exists with the provided
-// ns/name and contour namespace/name for the owning contour labels.
+// ns/name and using contour namespace/name for the owning contour labels.
// The RoleBinding will use svcAct for the subject and role for the role reference.
func EnsureRoleBinding(ctx context.Context, cli client.Client, name, svcAct, role string, contour *model.Contour) error {
- desired := desiredRoleBinding(name, svcAct, role, contour)
+ desired := desiredRoleBindingInNamespace(name, svcAct, role, contour.Namespace, contour)
// Enclose contour.
updater := func(ctx context.Context, cli client.Client, current, desired *rbacv1.RoleBinding) error {
@@ -42,26 +42,46 @@ func EnsureRoleBinding(ctx context.Context, cli client.Client, name, svcAct, rol
return objects.EnsureObject(ctx, cli, desired, updater, &rbacv1.RoleBinding{})
}
-// desiredRoleBinding constructs an instance of the desired RoleBinding resource
-// with the provided name in Contour spec Namespace, using contour namespace/name
+// EnsureRoleBindingsInNamespaces ensures a set of RoleBindings resource exist with the provided
+// namespaces/name using contour namespace/name for the owning contour labels.
+// The RoleBindings will use same svcAct for the subject and role for the role reference.
+func EnsureRoleBindingsInNamespaces(ctx context.Context, cli client.Client, name, svcAct, role string, contour *model.Contour, namespaces []string) error {
+ errs := []error{}
+ for _, ns := range namespaces {
+ desired := desiredRoleBindingInNamespace(name, svcAct, role, ns, contour)
+
+ // Enclose contour.
+ updater := func(ctx context.Context, cli client.Client, current, desired *rbacv1.RoleBinding) error {
+ return updateRoleBindingIfNeeded(ctx, cli, contour, current, desired)
+ }
+ err := objects.EnsureObject(ctx, cli, desired, updater, &rbacv1.RoleBinding{})
+ errs = append(errs, err)
+ }
+
+ return kerrors.NewAggregate(errs)
+}
+
+// desiredRoleBindingInNamespace constructs an instance of the desired RoleBinding resource
+// with the provided name in provided namespace, using contour namespace/name
// for the owning contour labels. The RoleBinding will use svcAct for the subject
// and role for the role reference.
-func desiredRoleBinding(name, svcAcctRef, roleRef string, contour *model.Contour) *rbacv1.RoleBinding {
+func desiredRoleBindingInNamespace(name, svcAcctRef, roleRef, namespace string, contour *model.Contour) *rbacv1.RoleBinding {
rb := &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
- Namespace: contour.Namespace,
+ Namespace: namespace,
Name: name,
Labels: contour.CommonLabels(),
Annotations: contour.CommonAnnotations(),
},
}
rb.Subjects = []rbacv1.Subject{{
- Kind: "ServiceAccount",
- APIGroup: corev1.GroupName,
- Name: svcAcctRef,
+ Kind: "ServiceAccount",
+ APIGroup: corev1.GroupName,
+ Name: svcAcctRef,
+ // service account will be the same one
Namespace: contour.Namespace,
}}
rb.RoleRef = rbacv1.RoleRef{
diff --git a/internal/provisioner/objects/rbac/rolebinding/role_binding_test.go b/internal/provisioner/objects/rbac/rolebinding/role_binding_test.go
index 557c94eb36b..7d81c0a8f0f 100644
--- a/internal/provisioner/objects/rbac/rolebinding/role_binding_test.go
+++ b/internal/provisioner/objects/rbac/rolebinding/role_binding_test.go
@@ -33,6 +33,16 @@ func checkRoleBindingName(t *testing.T, rb *rbacv1.RoleBinding, expected string)
t.Errorf("role binding %q has unexpected name", rb.Name)
}
+func checkRoleBindingNamespace(t *testing.T, rb *rbacv1.RoleBinding, expected string) {
+ t.Helper()
+
+ if rb.Namespace == expected {
+ return
+ }
+
+ t.Errorf("role binding %q has unexpected namespace", rb.Namespace)
+}
+
func checkRoleBindingLabels(t *testing.T, rb *rbacv1.RoleBinding, expected map[string]string) {
t.Helper()
@@ -66,11 +76,13 @@ func checkRoleBindingRole(t *testing.T, rb *rbacv1.RoleBinding, expected string)
func TestDesiredRoleBinding(t *testing.T) {
name := "job-test"
cntr := model.Default(fmt.Sprintf("%s-ns", name), name)
+ testns := "test-ns"
rbName := "test-rb"
svcAcct := "test-svc-acct-ref"
roleRef := "test-role-ref"
- rb := desiredRoleBinding(rbName, svcAcct, roleRef, cntr)
+ rb := desiredRoleBindingInNamespace(rbName, svcAcct, roleRef, testns, cntr)
checkRoleBindingName(t, rb, rbName)
+ checkRoleBindingNamespace(t, rb, testns)
ownerLabels := map[string]string{
model.ContourOwningGatewayNameLabel: cntr.Name,
model.GatewayAPIOwningGatewayNameLabel: cntr.Name,
diff --git a/internal/provisioner/objects/rbac/util/util.go b/internal/provisioner/objects/rbac/util/util.go
new file mode 100644
index 00000000000..493ad9c9706
--- /dev/null
+++ b/internal/provisioner/objects/rbac/util/util.go
@@ -0,0 +1,74 @@
+// Copyright Project Contour Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ discoveryv1 "k8s.io/api/discovery/v1"
+ networkingv1 "k8s.io/api/networking/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ gatewayv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2"
+)
+
+const (
+ ContourV1GroupName = "projectcontour.io"
+)
+
+var (
+ createGetUpdate = []string{"create", "get", "update"}
+ getListWatch = []string{"get", "list", "watch"}
+ update = []string{"update"}
+)
+
+// PolicyRuleFor returns PolicyRule object with provided apiGroup, verbs and resources
+func PolicyRuleFor(apiGroup string, verbs []string, resources ...string) rbacv1.PolicyRule {
+ return rbacv1.PolicyRule{
+ Verbs: verbs,
+ APIGroups: []string{apiGroup},
+ Resources: resources,
+ }
+}
+
+// BasicPolicyRulesForContour returns set of basic rules that contour requires
+func BasicPolicyRulesForContour() []rbacv1.PolicyRule {
+ return []rbacv1.PolicyRule{
+ // Core Contour-watched resources.
+ PolicyRuleFor(corev1.GroupName, getListWatch, "secrets", "endpoints", "services", "namespaces"),
+
+ // Discovery Contour-watched resources.
+ PolicyRuleFor(discoveryv1.GroupName, getListWatch, "endpointslices"),
+
+ // Gateway API resources.
+ // Note, ReferenceGrant does not currently have a .status field so it's omitted from the status rule.
+ PolicyRuleFor(gatewayv1alpha2.GroupName, getListWatch, "gateways", "httproutes", "tlsroutes", "grpcroutes", "tcproutes", "referencegrants"),
+ PolicyRuleFor(gatewayv1alpha2.GroupName, update, "gateways/status", "httproutes/status", "tlsroutes/status", "grpcroutes/status", "tcproutes/status"),
+
+ // Ingress resources.
+ PolicyRuleFor(networkingv1.GroupName, getListWatch, "ingresses"),
+ PolicyRuleFor(networkingv1.GroupName, createGetUpdate, "ingresses/status"),
+
+ // Contour CRDs.
+ PolicyRuleFor(ContourV1GroupName, getListWatch, "httpproxies", "tlscertificatedelegations", "extensionservices", "contourconfigurations"),
+ PolicyRuleFor(ContourV1GroupName, createGetUpdate, "httpproxies/status", "extensionservices/status", "contourconfigurations/status"),
+ }
+}
+
+// ClusterScopePolicyRulesForContour returns set of rules only for cluster scope object
+func ClusterScopePolicyRulesForContour() []rbacv1.PolicyRule {
+ return []rbacv1.PolicyRule{
+ // GatewayClass only.
+ PolicyRuleFor(gatewayv1alpha2.GroupName, getListWatch, "gatewayclasses"),
+ PolicyRuleFor(gatewayv1alpha2.GroupName, update, "gatewayclasses/status"),
+ }
+}
diff --git a/site/content/docs/main/config/api-reference.html b/site/content/docs/main/config/api-reference.html
index 0f55c8b0e46..a542f9de9d7 100644
--- a/site/content/docs/main/config/api-reference.html
+++ b/site/content/docs/main/config/api-reference.html
@@ -6247,6 +6247,20 @@ ContourSettings
the annotations for Prometheus will be appended or overwritten with predefined value.
+
+
+watchNamespaces
+
+
+[]string
+
+ |
+
+(Optional)
+ WatchNamespaces is an array of namespaces. Setting it will instruct the contour instance
+to only watch these set of namespaces
+ |
+
CustomTag