Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat: adding configurable parameter for custom role label in multiple resources #2802

Draft
wants to merge 16 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions charts/postgres-operator/crds/operatorconfigurations.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -327,6 +327,9 @@ spec:
pod_role_label:
type: string
default: "spilo-role"
pod_leader_label_value:
type: string
default: "master"
pod_service_account_definition:
type: string
default: ""
Expand Down
1 change: 1 addition & 0 deletions charts/postgres-operator/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,7 @@ configKubernetes:
pod_management_policy: "ordered_ready"
# label assigned to the Postgres pods (and services/endpoints)
pod_role_label: spilo-role
pod_leader_label_value: master
# service account definition as JSON/YAML string to be used by postgres cluster pods
# pod_service_account_definition: ""

Expand Down
4 changes: 4 additions & 0 deletions docs/reference/operator_parameters.md
Original file line number Diff line number Diff line change
Expand Up @@ -437,6 +437,10 @@ configuration they are grouped under the `kubernetes` key.
name of the label assigned to the Postgres pods (and services/endpoints) by
the operator. The default is `spilo-role`.

* **pod_leader_label_value**
value of the pod label if Postgres role is primary when running on Kubernetes.
The default is 'master'.

* **cluster_labels**
list of `name:value` pairs for additional labels assigned to the cluster
objects. The default is `application:spilo`.
Expand Down
1 change: 1 addition & 0 deletions manifests/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ data:
pod_management_policy: "ordered_ready"
# pod_priority_class_name: "postgres-pod-priority"
pod_role_label: spilo-role
pod_leader_label_value: master
pod_service_account_definition: ""
pod_service_account_name: "postgres-pod"
pod_service_account_role_binding_definition: ""
Expand Down
3 changes: 3 additions & 0 deletions manifests/operatorconfiguration.crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,9 @@ spec:
pod_role_label:
type: string
default: "spilo-role"
pod_leader_label_value:
type: string
default: "master"
pod_service_account_definition:
type: string
default: ""
Expand Down
1 change: 1 addition & 0 deletions manifests/postgresql-operator-default-configuration.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ configuration:
pod_management_policy: "ordered_ready"
# pod_priority_class_name: "postgres-pod-priority"
pod_role_label: spilo-role
pod_leader_label_value: master
# pod_service_account_definition: ""
pod_service_account_name: postgres-pod
# pod_service_account_role_binding_definition: ""
Expand Down
3 changes: 3 additions & 0 deletions pkg/apis/acid.zalan.do/v1/crds.go
Original file line number Diff line number Diff line change
Expand Up @@ -1497,6 +1497,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
"pod_role_label": {
Type: "string",
},
"pod_leader_label_value": {
Type: "string",
},
"pod_service_account_definition": {
Type: "string",
},
Expand Down
1 change: 1 addition & 0 deletions pkg/apis/acid.zalan.do/v1/operator_configuration_type.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ type KubernetesMetaConfiguration struct {
InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"`
InfrastructureRolesDefs []*config.InfrastructureRole `json:"infrastructure_roles_secrets,omitempty"`
PodRoleLabel string `json:"pod_role_label,omitempty"`
PodLeaderLabelValue string `json:"pod_leader_label_value,omitempty"`
ClusterLabels map[string]string `json:"cluster_labels,omitempty"`
InheritedLabels []string `json:"inherited_labels,omitempty"`
InheritedAnnotations []string `json:"inherited_annotations,omitempty"`
Expand Down
16 changes: 12 additions & 4 deletions pkg/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,14 @@ func (c *Cluster) clusterNamespace() string {
return c.ObjectMeta.Namespace
}

func (c *Cluster) masterRole() PostgresRole {
return PostgresRole(c.OpConfig.PodLeaderLabelValue)
}

func (c *Cluster) replicaRole() PostgresRole {
return PostgresRole("replica")
}

func (c *Cluster) teamName() string {
// TODO: check Teams API for the actual name (in case the user passes an integer Id).
return c.Spec.TeamID
Expand Down Expand Up @@ -294,15 +302,15 @@ func (c *Cluster) Create() (err error) {
}
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources")

for _, role := range []PostgresRole{Master, Replica} {
for _, role := range []PostgresRole{c.masterRole(), c.replicaRole()} {

// if kubernetes_use_configmaps is set Patroni will create configmaps
// otherwise it will use endpoints
if !c.patroniKubernetesUseConfigMaps() {
if c.Endpoints[role] != nil {
return fmt.Errorf("%s endpoint already exists in the cluster", role)
}
if role == Master {
if role == c.masterRole() {
// replica endpoint will be created by the replica service. Master endpoint needs to be created by us,
// since the corresponding master service does not define any selectors.
ep, err = c.createEndpoint(role)
Expand Down Expand Up @@ -1213,7 +1221,7 @@ func (c *Cluster) Delete() error {
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budget: %v", err)
}

for _, role := range []PostgresRole{Master, Replica} {
for _, role := range []PostgresRole{c.masterRole(), c.replicaRole()} {
if !c.patroniKubernetesUseConfigMaps() {
if err := c.deleteEndpoint(role); err != nil {
anyErrors = true
Expand All @@ -1238,7 +1246,7 @@ func (c *Cluster) Delete() error {
// Delete connection pooler objects anyway, even if it's not mentioned in the
// manifest, just to not keep orphaned components in case if something went
// wrong
for _, role := range [2]PostgresRole{Master, Replica} {
for _, role := range [2]PostgresRole{c.masterRole(), c.replicaRole()} {
if err := c.deleteConnectionPooler(role); err != nil {
anyErrors = true
c.logger.Warningf("could not remove connection pooler: %v", err)
Expand Down
29 changes: 16 additions & 13 deletions pkg/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ const (
adminUserName = "admin"
exampleSpiloConfig = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`
spiloConfigDiff = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`
leaderLabelValue = "primary"
)

var logger = logrus.New().WithField("test", "cluster")
Expand All @@ -55,6 +56,7 @@ var cl = New(
},
Resources: config.Resources{
DownscalerAnnotations: []string{"downscaler/*"},
PodLeaderLabelValue: leaderLabelValue,
},
ConnectionPooler: config.ConnectionPooler{
User: poolerUserName,
Expand Down Expand Up @@ -126,7 +128,7 @@ func TestCreate(t *testing.T) {
Labels: map[string]string{
"application": "spilo",
"cluster-name": clusterName,
"spilo-role": "master",
"spilo-role": leaderLabelValue,
},
},
}
Expand All @@ -147,6 +149,7 @@ func TestCreate(t *testing.T) {
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
PodRoleLabel: "spilo-role",
PodLeaderLabelValue: leaderLabelValue,
ResourceCheckInterval: time.Duration(3),
ResourceCheckTimeout: time.Duration(10),
},
Expand Down Expand Up @@ -663,7 +666,7 @@ func TestServiceAnnotations(t *testing.T) {
//MASTER
{
about: "Master with no annotations and EnableMasterLoadBalancer disabled on spec and OperatorConfig",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerSpec: &disabled,
enableMasterLoadBalancerOC: false,
enableTeamIdClusterPrefix: false,
Expand All @@ -673,7 +676,7 @@ func TestServiceAnnotations(t *testing.T) {
},
{
about: "Master with no annotations and EnableMasterLoadBalancer enabled on spec",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerSpec: &enabled,
enableMasterLoadBalancerOC: false,
enableTeamIdClusterPrefix: false,
Expand All @@ -686,7 +689,7 @@ func TestServiceAnnotations(t *testing.T) {
},
{
about: "Master with no annotations and EnableMasterLoadBalancer enabled only on operator config",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerSpec: &disabled,
enableMasterLoadBalancerOC: true,
enableTeamIdClusterPrefix: false,
Expand All @@ -696,7 +699,7 @@ func TestServiceAnnotations(t *testing.T) {
},
{
about: "Master with no annotations and EnableMasterLoadBalancer defined only on operator config",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerOC: true,
enableTeamIdClusterPrefix: false,
operatorAnnotations: make(map[string]string),
Expand All @@ -708,7 +711,7 @@ func TestServiceAnnotations(t *testing.T) {
},
{
about: "Master with cluster annotations and load balancer enabled",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerOC: true,
enableTeamIdClusterPrefix: false,
operatorAnnotations: make(map[string]string),
Expand All @@ -721,7 +724,7 @@ func TestServiceAnnotations(t *testing.T) {
},
{
about: "Master with cluster annotations and load balancer disabled",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerSpec: &disabled,
enableMasterLoadBalancerOC: true,
enableTeamIdClusterPrefix: false,
Expand All @@ -731,7 +734,7 @@ func TestServiceAnnotations(t *testing.T) {
},
{
about: "Master with operator annotations and load balancer enabled",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerOC: true,
enableTeamIdClusterPrefix: false,
operatorAnnotations: map[string]string{"foo": "bar"},
Expand All @@ -744,7 +747,7 @@ func TestServiceAnnotations(t *testing.T) {
},
{
about: "Master with operator annotations override default annotations",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerOC: true,
enableTeamIdClusterPrefix: false,
operatorAnnotations: map[string]string{
Expand All @@ -758,7 +761,7 @@ func TestServiceAnnotations(t *testing.T) {
},
{
about: "Master with cluster annotations override default annotations",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerOC: true,
enableTeamIdClusterPrefix: false,
operatorAnnotations: make(map[string]string),
Expand All @@ -772,7 +775,7 @@ func TestServiceAnnotations(t *testing.T) {
},
{
about: "Master with cluster annotations do not override external-dns annotations",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerOC: true,
enableTeamIdClusterPrefix: false,
operatorAnnotations: make(map[string]string),
Expand All @@ -786,7 +789,7 @@ func TestServiceAnnotations(t *testing.T) {
},
{
about: "Master with cluster name teamId prefix enabled",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerOC: true,
enableTeamIdClusterPrefix: true,
serviceAnnotations: make(map[string]string),
Expand All @@ -798,7 +801,7 @@ func TestServiceAnnotations(t *testing.T) {
},
{
about: "Master with master service annotations override service annotations",
role: "master",
role: leaderLabelValue,
enableMasterLoadBalancerOC: true,
enableTeamIdClusterPrefix: false,
operatorAnnotations: make(map[string]string),
Expand Down
14 changes: 7 additions & 7 deletions pkg/cluster/connection_pooler.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ type ConnectionPoolerObjects struct {

func (c *Cluster) connectionPoolerName(role PostgresRole) string {
name := fmt.Sprintf("%s-%s", c.Name, constants.ConnectionPoolerResourceSuffix)
if role == Replica {
if role == c.replicaRole() {
name = fmt.Sprintf("%s-%s", name, "repl")
}
return name
Expand Down Expand Up @@ -537,8 +537,8 @@ func (c *Cluster) generatePoolerServiceAnnotations(role PostgresRole, spec *acid
annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue
}
// -repl suffix will be added by replicaDNSName
clusterNameWithPoolerSuffix := c.connectionPoolerName(Master)
if role == Master {
clusterNameWithPoolerSuffix := c.connectionPoolerName(c.masterRole())
if role == c.masterRole() {
dnsString = c.masterDNSName(clusterNameWithPoolerSuffix)
} else {
dnsString = c.replicaDNSName(clusterNameWithPoolerSuffix)
Expand All @@ -557,15 +557,15 @@ func (c *Cluster) shouldCreateLoadBalancerForPoolerService(role PostgresRole, sp

switch role {

case Replica:
case c.replicaRole():
// if the value is explicitly set in a Postgresql manifest, follow this setting
if spec.EnableReplicaPoolerLoadBalancer != nil {
return *spec.EnableReplicaPoolerLoadBalancer
}
// otherwise, follow the operator configuration
return c.OpConfig.EnableReplicaPoolerLoadBalancer

case Master:
case c.masterRole():
if spec.EnableMasterPoolerLoadBalancer != nil {
return *spec.EnableMasterPoolerLoadBalancer
}
Expand Down Expand Up @@ -877,9 +877,9 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
logPoolerEssentials(c.logger, oldSpec, newSpec)

// Check and perform the sync requirements for each of the roles.
for _, role := range [2]PostgresRole{Master, Replica} {
for _, role := range [2]PostgresRole{c.masterRole(), c.replicaRole()} {

if role == Master {
if role == c.masterRole() {
connectionPoolerNeeded = needMasterConnectionPoolerWorker(&newSpec.Spec)
} else {
connectionPoolerNeeded = needReplicaConnectionPoolerWorker(&newSpec.Spec)
Expand Down
Loading