diff --git a/install/helm/agones/templates/service/allocation.yaml b/install/helm/agones/templates/service/allocation.yaml index e360a125d8..8c538029f8 100644 --- a/install/helm/agones/templates/service/allocation.yaml +++ b/install/helm/agones/templates/service/allocation.yaml @@ -65,7 +65,7 @@ spec: targetPort: {{ .Values.agones.allocator.service.grpc.targetPort }} {{- if .Values.agones.allocator.service.grpc.appProtocol }} appProtocol: {{.Values.agones.allocator.service.grpc.appProtocol}} - {{- end}} + {{- end}} {{- if eq .Values.agones.allocator.service.serviceType "NodePort" }} nodePort: {{ .Values.agones.allocator.service.grpc.nodePort }} {{- end }} @@ -79,6 +79,7 @@ spec: loadBalancerIP: {{ .Values.agones.allocator.service.loadBalancerIP }} {{- end }} {{- if eq .Values.agones.allocator.service.serviceType "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.agones.allocator.service.externalTrafficPolicy }} {{- if .Values.agones.allocator.service.loadBalancerSourceRanges }} loadBalancerSourceRanges: {{ toYaml .Values.agones.allocator.service.loadBalancerSourceRanges | indent 4 }} diff --git a/install/helm/agones/values.yaml b/install/helm/agones/values.yaml index b63431b440..ec9dda4c83 100644 --- a/install/helm/agones/values.yaml +++ b/install/helm/agones/values.yaml @@ -231,6 +231,7 @@ agones: service: name: agones-allocator serviceType: LoadBalancer + externalTrafficPolicy: Cluster clusterIP: "" loadBalancerIP: "" loadBalancerSourceRanges: [] diff --git a/install/yaml/install.yaml b/install/yaml/install.yaml index 5ed11789a3..19d3c9596f 100644 --- a/install/yaml/install.yaml +++ b/install/yaml/install.yaml @@ -17822,6 +17822,7 @@ spec: targetPort: 8443 protocol: TCP type: LoadBalancer + externalTrafficPolicy: Cluster --- # Source: agones/templates/service/allocation.yaml apiVersion: v1 diff --git a/pkg/fleetautoscalers/fleetautoscalers_test.go b/pkg/fleetautoscalers/fleetautoscalers_test.go index eec82195c0..5a67717363 100644 --- a/pkg/fleetautoscalers/fleetautoscalers_test.go +++ b/pkg/fleetautoscalers/fleetautoscalers_test.go @@ -772,6 +772,31 @@ func TestApplyCounterPolicy(t *testing.T) { wantErr: true, }, }, + "Counter based fleet does not have any replicas": { + fleet: modifiedFleet(func(f *agonesv1.Fleet) { + f.Spec.Template.Spec.Counters = make(map[string]agonesv1.CounterStatus) + f.Spec.Template.Spec.Counters["gamers"] = agonesv1.CounterStatus{ + Count: 0, + Capacity: 7} + f.Status.Replicas = 0 + f.Status.ReadyReplicas = 0 + f.Status.AllocatedReplicas = 0 + f.Status.Counters = make(map[string]agonesv1.AggregatedCounterStatus) + f.Status.Counters["gamers"] = agonesv1.AggregatedCounterStatus{} + }), + featureFlags: string(utilruntime.FeatureCountsAndLists) + "=true", + cp: &autoscalingv1.CounterPolicy{ + Key: "gamers", + MaxCapacity: 100, + MinCapacity: 10, + BufferSize: intstr.FromInt(10), + }, + want: expected{ + replicas: 2, + limited: true, + wantErr: false, + }, + }, "fleet spec does not have counter": { fleet: modifiedFleet(func(f *agonesv1.Fleet) { f.Spec.Template.Spec.Counters = make(map[string]agonesv1.CounterStatus) @@ -1570,7 +1595,7 @@ func TestApplyListPolicy(t *testing.T) { wantErr: true, }, }, - "fleet does not have any replicas": { + "List based fleet does not have any replicas": { fleet: modifiedFleet(func(f *agonesv1.Fleet) { f.Spec.Template.Spec.Lists = make(map[string]agonesv1.ListStatus) f.Spec.Template.Spec.Lists["gamers"] = agonesv1.ListStatus{ diff --git a/pkg/gameserversets/controller.go b/pkg/gameserversets/controller.go index dc0b169a7b..0feef01729 100644 --- a/pkg/gameserversets/controller.go +++ b/pkg/gameserversets/controller.go @@ -639,6 +639,7 @@ func computeStatus(gsSet *agonesv1.GameServerSet, list []*agonesv1.GameServer) a // Initialize list status with empty lists from spec if runtime.FeatureEnabled(runtime.FeatureCountsAndLists) { status.Lists = createInitialListStatus(gsSet) + status.Counters = createInitialCounterStatus(gsSet) } for _, gs := range list { if gs.IsBeingDeleted() { @@ -700,6 +701,14 @@ func createInitialListStatus(gsSet *agonesv1.GameServerSet) map[string]agonesv1. return list } +func createInitialCounterStatus(gsSet *agonesv1.GameServerSet) map[string]agonesv1.AggregatedCounterStatus { + counters := make(map[string]agonesv1.AggregatedCounterStatus) + for name := range gsSet.Spec.Template.Spec.Counters { + counters[name] = agonesv1.AggregatedCounterStatus{} + } + return counters +} + // aggregateCounters adds the contents of a CounterStatus map to an AggregatedCounterStatus map. func aggregateCounters(aggCounterStatus map[string]agonesv1.AggregatedCounterStatus, counterStatus map[string]agonesv1.CounterStatus, diff --git a/pkg/gameserversets/controller_test.go b/pkg/gameserversets/controller_test.go index 511be442d9..d7fe7a2395 100644 --- a/pkg/gameserversets/controller_test.go +++ b/pkg/gameserversets/controller_test.go @@ -412,6 +412,44 @@ func TestComputeStatus(t *testing.T) { assert.Equal(t, expected, computeStatus(gsSet, list)) }) + t.Run("counters with no gameservers", func(t *testing.T) { + utilruntime.FeatureTestMutex.Lock() + defer utilruntime.FeatureTestMutex.Unlock() + + require.NoError(t, utilruntime.ParseFeatures(fmt.Sprintf("%s=true", utilruntime.FeatureCountsAndLists))) + + gsSet := defaultFixture() + gsSet.Spec.Template.Spec.Counters = map[string]agonesv1.CounterStatus{ + "firstCounter": {Capacity: 10, Count: 1}, + "secondCounter": {Capacity: 10, Count: 1}, + } + var list []*agonesv1.GameServer + + expected := agonesv1.GameServerSetStatus{ + Replicas: 0, + ReadyReplicas: 0, + ReservedReplicas: 0, + AllocatedReplicas: 0, + Lists: map[string]agonesv1.AggregatedListStatus{}, + Counters: map[string]agonesv1.AggregatedCounterStatus{ + "firstCounter": { + AllocatedCount: 0, + AllocatedCapacity: 0, + Capacity: 0, + Count: 0, + }, + "secondCounter": { + AllocatedCount: 0, + AllocatedCapacity: 0, + Capacity: 0, + Count: 0, + }, + }, + } + + assert.Equal(t, expected, computeStatus(gsSet, list)) + }) + t.Run("lists", func(t *testing.T) { utilruntime.FeatureTestMutex.Lock() defer utilruntime.FeatureTestMutex.Unlock() @@ -484,7 +522,7 @@ func TestComputeStatus(t *testing.T) { ReadyReplicas: 0, ReservedReplicas: 0, AllocatedReplicas: 0, - Counters: nil, + Counters: map[string]agonesv1.AggregatedCounterStatus{}, Lists: map[string]agonesv1.AggregatedListStatus{ "firstList": { AllocatedCount: 0, diff --git a/site/content/en/docs/Installation/Install Agones/helm.md b/site/content/en/docs/Installation/Install Agones/helm.md index 497d7a382b..15f458663b 100644 --- a/site/content/en/docs/Installation/Install Agones/helm.md +++ b/site/content/en/docs/Installation/Install Agones/helm.md @@ -232,6 +232,7 @@ The following tables lists the configurable parameters of the Agones chart and t ### Allocator Service +{{% feature expiryVersion="1.46.0" %}} | Parameter | Description | Default | |----------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------| | `agones.allocator.apiServerQPS` | Maximum sustained queries per second that an allocator should be making against API Server | `400` | @@ -288,6 +289,66 @@ The following tables lists the configurable parameters of the Agones chart and t | `agones.allocator.pdb.maxUnavailable` | Description of the number of pods from that set that can be unavailable after the eviction. It can be either an absolute number or a percentage. Mutually Exclusive with `minAvailable` | \`\` | | `agones.allocator.topologySpreadConstraints` | Ensures better resource utilization and high availability by evenly distributing Pods in the agones-system namespace | `{}` | +{{% /feature %}} +{{% feature publishVersion="1.46.0" %}} +| Parameter | Description | Default | +| ----------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------- | +| `agones.allocator.apiServerQPS` | Maximum sustained queries per second that an allocator should be making against API Server | `400` | +| `agones.allocator.apiServerQPSBurst` | Maximum burst queries per second that an allocator should be making against API Server | `500` | +| `agones.allocator.remoteAllocationTimeout` | Remote allocation call timeout. | `10s` | +| `agones.allocator.totalRemoteAllocationTimeout` | Total remote allocation timeout including retries. | `30s` | +| `agones.allocator.logLevel` | Agones Allocator Log level. Log only entries with that severity and above | `info` | +| `agones.allocator.install` | Whether to install the [allocator service][allocator] | `true` | +| `agones.allocator.replicas` | The number of replicas to run in the deployment | `3` | +| `agones.allocator.service.name` | Service name for the allocator | `agones-allocator` | +| `agones.allocator.service.serviceType` | The [Service Type][service] of the HTTP Service | `LoadBalancer` | +| `agones.allocator.service.clusterIP` | The [Cluster IP][clusterIP] of the Agones allocator. If you want [Headless Service][headless-service] for Agones Allocator, you can set `None` to clusterIP. | \`\` | +| `agones.allocator.service.loadBalancerIP` | The [Load Balancer IP][loadBalancer] of the Agones allocator load balancer. Only works if the Kubernetes provider supports this option. | \`\` | +| `agones.allocator.service.loadBalancerSourceRanges` | The [Load Balancer SourceRanges][loadBalancer] of the Agones allocator load balancer. Only works if the Kubernetes provider supports this option. | `[]` | +| `agones.allocator.service.annotations` | [Annotations][annotations] added to the Agones allocator service | `{}` | +| `agones.allocator.service.http.enabled` | If true the [allocator service][allocator] will respond to [REST requests][rest-requests] | `true` | +| `agones.allocator.service.http.appProtocol` | The `appProtocol` to set on the Service for the http allocation port. If left blank, no value is set. | `` | +| `agones.allocator.service.http.port` | The port that is exposed externally by the [allocator service][allocator] for [REST requests][rest-requests] | `443` | +| `agones.allocator.service.http.portName` | The name of exposed port | `http` | +| `agones.allocator.service.http.targetPort` | The port that is used by the allocator pod to listen for [REST requests][rest-requests]. Note that the allocator server cannot bind to low numbered ports. | `8443` | +| `agones.allocator.service.http.nodePort` | If the ServiceType is set to "NodePort", this is the NodePort that the allocator http service is exposed on. | `30000-32767` | +| `agones.allocator.service.http.unallocatedStatusCode` | HTTP status code to return when no GameServer is available for allocation. This setting allows for custom responses when a game server allocation fails, offering flexibility in handling these situations. | `429` | +| `agones.allocator.service.grpc.enabled` | If true the [allocator service][allocator] will respond to [gRPC requests][grpc-requests] | `true` | +| `agones.allocator.service.grpc.port` | The port that is exposed externally by the [allocator service][allocator] for [gRPC requests][grpc-requests] | `443` | +| `agones.allocator.service.grpc.portName` | The name of exposed port | `` | +| `agones.allocator.service.grpc.appProtocol` | The `appProtocol` to set on the Service for the gRPC allocation port. If left blank, no value is set. | `` | +| `agones.allocator.service.grpc.nodePort` | If the ServiceType is set to "NodePort", this is the NodePort that the allocator gRPC service is exposed on. | `30000-32767` | +| `agones.allocator.service.grpc.targetPort` | The port that is used by the allocator pod to listen for [gRPC requests][grpc-requests]. Note that the allocator server cannot bind to low numbered ports. | `8443` | +| `agones.allocator.generateClientTLS` | Set to true to generate client TLS certificates or false to provide certificates in `certs/allocator/allocator-client.default/*` | `true` | +| `agones.allocator.generateTLS` | Set to true to generate TLS certificates or false to provide your own certificates | `true` | +| `agones.allocator.disableMTLS` | Turns off client cert authentication for incoming connections to the allocator. | `false` | +| `agones.allocator.disableTLS` | Turns off TLS security for incoming connections to the allocator. | `false` | +| `agones.allocator.disableSecretCreation` | Disables the creation of any allocator secrets. If true, you MUST provide the `allocator-tls`, `allocator-tls-ca`, and `allocator-client-ca` secrets before installation. | `false` | +| `agones.allocator.tlsCert` | Custom TLS certificate provided as a string | \`\` | +| `agones.allocator.tlsKey` | Custom TLS private key provided as a string | \`\` | +| `agones.allocator.clientCAs` | A map of secret key names to allowed client CA certificates provided as strings | `{}` | +| `agones.allocator.tolerations` | Allocator [toleration][toleration] labels for pod assignment | `[]` | +| `agones.allocator.affinity` | Allocator [affinity][affinity] settings for pod assignment | `{}` | +| `agones.allocator.annotations` | [Annotations][annotations] added to the Agones allocator pods | `{}` | +| `agones.allocator.resources` | Allocator pods [resource requests/limit][resources] | `{}` | +| `agones.allocator.labels` | [Labels][labels] Added to the Agones Allocator pods | `{}` | +| `agones.allocator.readiness.initialDelaySeconds` | Initial delay before performing the first probe (in seconds) | `3` | +| `agones.allocator.readiness.periodSeconds` | Seconds between every liveness probe (in seconds) | `3` | +| `agones.allocator.readiness.failureThreshold` | Number of times before giving up (in seconds) | `3` | +| `agones.allocator.nodeSelector` | Allocator [node labels][nodeSelector] for pod assignment | `{}` | +| `agones.allocator.serviceMetrics.name` | Second Service name for the allocator | `agones-allocator-metrics-service` | +| `agones.allocator.serviceMetrics.annotations` | [Annotations][annotations] added to the Agones allocator second Service | `{}` | +| `agones.allocator.serviceMetrics.http.port` | The port that is exposed within cluster by the [allocator service][allocator] for http requests | `8080` | +| `agones.allocator.serviceMetrics.http.portName` | The name of exposed port | `http` | +| `agones.allocator.allocationBatchWaitTime` | Wait time between each allocation batch when performing allocations in allocator mode | `500ms` | +| `agones.allocator.updateStrategy` | The [strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) to apply to the ping deployment | `{}` | +| `agones.allocator.pdb.enabled` | Set to `true` to enable the creation of a [PodDisruptionBudget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for the allocator deployment | `false` | +| `agones.allocator.pdb.minAvailable` | Description of the number of pods from that set that must still be available after the eviction, even in the absence of the evicted pod. Can be either an absolute number or a percentage. Mutually Exclusive with `maxUnavailable` | `1` | +| `agones.allocator.pdb.maxUnavailable` | Description of the number of pods from that set that can be unavailable after the eviction. It can be either an absolute number or a percentage. Mutually Exclusive with `minAvailable` | \`\` | +| `agones.allocator.topologySpreadConstraints` | Ensures better resource utilization and high availability by evenly distributing Pods in the agones-system namespace | `{}` | +| `agones.allocator.externalTrafficPolicy` | The `externalTrafficPolicy` for the Agones allocator service | `Cluster` | + +{{% /feature %}} ### Extensions diff --git a/test/e2e/fleetautoscaler_test.go b/test/e2e/fleetautoscaler_test.go index 78a164ea12..2ea0adc25f 100644 --- a/test/e2e/fleetautoscaler_test.go +++ b/test/e2e/fleetautoscaler_test.go @@ -944,6 +944,88 @@ func TestCounterAutoscaler(t *testing.T) { } } +// nolint:dupl // Linter errors on lines are duplicate of TestListAutoscalerWithNoReplicas +func TestCounterAutoscalerWithNoReplicas(t *testing.T) { + if !runtime.FeatureEnabled(runtime.FeatureCountsAndLists) { + t.SkipNow() + } + t.Parallel() + + ctx := context.Background() + client := framework.AgonesClient.AgonesV1() + log := e2e.TestLogger(t) + + flt := defaultEmptyFleet(framework.Namespace) + flt.Spec.Template.Spec.Counters = map[string]agonesv1.CounterStatus{ + "games": { + Capacity: 5, + }, + } + + flt, err := client.Fleets(framework.Namespace).Create(ctx, flt.DeepCopy(), metav1.CreateOptions{}) + require.NoError(t, err) + defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck + framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas)) + + fleetautoscalers := framework.AgonesClient.AutoscalingV1().FleetAutoscalers(framework.Namespace) + + counterFas := func(f func(fap *autoscalingv1.FleetAutoscalerPolicy)) *autoscalingv1.FleetAutoscaler { + fas := autoscalingv1.FleetAutoscaler{ + ObjectMeta: metav1.ObjectMeta{Name: flt.ObjectMeta.Name + "-counter-autoscaler", Namespace: framework.Namespace}, + Spec: autoscalingv1.FleetAutoscalerSpec{ + FleetName: flt.ObjectMeta.Name, + Policy: autoscalingv1.FleetAutoscalerPolicy{ + Type: autoscalingv1.CounterPolicyType, + }, + Sync: &autoscalingv1.FleetAutoscalerSync{ + Type: autoscalingv1.FixedIntervalSyncType, + FixedInterval: autoscalingv1.FixedIntervalSync{ + Seconds: 1, + }, + }, + }, + } + f(&fas.Spec.Policy) + return &fas + } + testCases := map[string]struct { + fas *autoscalingv1.FleetAutoscaler + wantFasErr bool + wantReplicas int32 + }{ + "Scale Up to MinCapacity": { + fas: counterFas(func(fap *autoscalingv1.FleetAutoscalerPolicy) { + fap.Counter = &autoscalingv1.CounterPolicy{ + Key: "games", + BufferSize: intstr.FromInt(3), + MinCapacity: 16, + MaxCapacity: 100, + } + }), + wantFasErr: false, + wantReplicas: 4, // Capacity:20 + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + + fas, err := fleetautoscalers.Create(ctx, testCase.fas, metav1.CreateOptions{}) + if testCase.wantFasErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + + framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(testCase.wantReplicas)) + fleetautoscalers.Delete(ctx, fas.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck + + // Return to starting 0 replicas + framework.ScaleFleet(t, log, flt, 0) + framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(0)) + }) + } +} + func TestCounterAutoscalerAllocated(t *testing.T) { if !runtime.FeatureEnabled(runtime.FeatureCountsAndLists) { t.SkipNow() @@ -1209,6 +1291,7 @@ func TestListAutoscaler(t *testing.T) { } } +// nolint:dupl // Linter errors on lines are duplicate of TestCounterAutoscalerWithNoReplicas func TestListAutoscalerWithNoReplicas(t *testing.T) { if !runtime.FeatureEnabled(runtime.FeatureCountsAndLists) { t.SkipNow()