Skip to content

Commit

Permalink
Addressing issue #291 - Adding custom headers with the ratelimit trig… (
Browse files Browse the repository at this point in the history
#292)

* Addressing issue #291 - Adding custom headers with the ratelimit triggered

Signed-off-by: Jesper Söderlund <[email protected]>

* Fixing doc format

Signed-off-by: Jesper Söderlund <[email protected]>

* Fixing data race condition during config-reload

Signed-off-by: Jesper Söderlund <[email protected]>

* Review comments

Signed-off-by: Jesper Söderlund <[email protected]>

* Review comments

Signed-off-by: Jesper Söderlund <[email protected]>

* Changed settings approach. Refactored custom clock to use already existing TimeSource

Signed-off-by: Jesper Söderlund <[email protected]>

* Cleanup after timesource refactoring

Signed-off-by: Jesper Söderlund <[email protected]>

* Fixed review comments

Signed-off-by: Jesper Söderlund <[email protected]>

Co-authored-by: Jesper Söderlund <[email protected]>
  • Loading branch information
jespersoderlund and Jesper Söderlund authored Sep 29, 2021
1 parent ced4263 commit 35b6056
Show file tree
Hide file tree
Showing 9 changed files with 279 additions and 62 deletions.
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
- [One Redis Instance](#one-redis-instance)
- [Two Redis Instances](#two-redis-instances)
- [Memcache](#memcache)
- [Custom headers](#custom-headers)
- [Contact](#contact)

<!-- END doctoc generated TOC please keep comment here to allow auto update -->
Expand Down Expand Up @@ -625,6 +626,15 @@ descriptors will fail. Descriptors sent to Memcache should not contain whitespac
When using multiple memcache nodes in `MEMCACHE_HOST_PORT=`, one should provide the identical list of memcache nodes
to all ratelimiter instances to ensure that a particular cache key is always hashed to the same memcache node.

# Custom headers
Ratelimit service can be configured to return custom headers with the ratelimit information. It will populate the response_headers_to_add as part of the [RateLimitResponse](https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/ratelimit/v3/rls.proto#service-ratelimit-v3-ratelimitresponse).

The following environment variables control the custom response feature:
1. `LIMIT_RESPONSE_HEADERS_ENABLED` - Enables the custom response headers
1. `LIMIT_LIMIT_HEADER` - The default value is "RateLimit-Limit", setting the environment variable will specify an alternative header name
1. `LIMIT_REMAINING_HEADER` - The default value is "RateLimit-Remaining", setting the environment variable will specify an alternative header name
1. `LIMIT_RESET_HEADER` - The default value is "RateLimit-Reset", setting the environment variable will specify an alternative header name

# Contact

* [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing
Expand Down
7 changes: 4 additions & 3 deletions src/limiter/base_limiter.go
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
package limiter

import (
"math"
"math/rand"

"github.com/coocood/freecache"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
"github.com/envoyproxy/ratelimit/src/assert"
"github.com/envoyproxy/ratelimit/src/config"
"github.com/envoyproxy/ratelimit/src/stats"
"github.com/envoyproxy/ratelimit/src/utils"
logger "github.com/sirupsen/logrus"
"math"
"math/rand"
)

type BaseRateLimiter struct {
Expand Down Expand Up @@ -168,7 +169,7 @@ func (this *BaseRateLimiter) generateResponseDescriptorStatus(responseCode pb.Ra
Code: responseCode,
CurrentLimit: limit,
LimitRemaining: limitRemaining,
DurationUntilReset: utils.CalculateReset(limit, this.timeSource),
DurationUntilReset: utils.CalculateReset(&limit.Unit, this.timeSource),
}
} else {
return &pb.RateLimitResponse_DescriptorStatus{
Expand Down
98 changes: 88 additions & 10 deletions src/service/ratelimit.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,16 @@ package ratelimit

import (
"fmt"
"github.com/envoyproxy/ratelimit/src/stats"
"math"
"strconv"
"strings"
"sync"

"github.com/envoyproxy/ratelimit/src/settings"
"github.com/envoyproxy/ratelimit/src/stats"
"github.com/envoyproxy/ratelimit/src/utils"

core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
"github.com/envoyproxy/ratelimit/src/assert"
"github.com/envoyproxy/ratelimit/src/config"
Expand All @@ -23,14 +28,19 @@ type RateLimitServiceServer interface {
}

type service struct {
runtime loader.IFace
configLock sync.RWMutex
configLoader config.RateLimitConfigLoader
config config.RateLimitConfig
runtimeUpdateEvent chan int
cache limiter.RateLimitCache
stats stats.ServiceStats
runtimeWatchRoot bool
runtime loader.IFace
configLock sync.RWMutex
configLoader config.RateLimitConfigLoader
config config.RateLimitConfig
runtimeUpdateEvent chan int
cache limiter.RateLimitCache
stats stats.ServiceStats
runtimeWatchRoot bool
customHeadersEnabled bool
customHeaderLimitHeader string
customHeaderRemainingHeader string
customHeaderResetHeader string
customHeaderClock utils.TimeSource
}

func (this *service) reloadConfig(statsManager stats.Manager) {
Expand Down Expand Up @@ -58,8 +68,20 @@ func (this *service) reloadConfig(statsManager stats.Manager) {

newConfig := this.configLoader.Load(files, statsManager)
this.stats.ConfigLoadSuccess.Inc()

this.configLock.Lock()
this.config = newConfig
rlSettings := settings.NewSettings()

if rlSettings.RateLimitResponseHeadersEnabled {
this.customHeadersEnabled = true

this.customHeaderLimitHeader = rlSettings.HeaderRatelimitLimit

this.customHeaderRemainingHeader = rlSettings.HeaderRatelimitRemaining

this.customHeaderResetHeader = rlSettings.HeaderRatelimitReset
}
this.configLock.Unlock()
}

Expand Down Expand Up @@ -118,6 +140,8 @@ func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx co
return limitsToCheck, isUnlimited
}

const MaxUint32 = uint32(1<<32 - 1)

func (this *service) shouldRateLimitWorker(
ctx context.Context, request *pb.RateLimitRequest) *pb.RateLimitResponse {

Expand All @@ -132,7 +156,20 @@ func (this *service) shouldRateLimitWorker(
response := &pb.RateLimitResponse{}
response.Statuses = make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors))
finalCode := pb.RateLimitResponse_OK

// Keep track of the descriptor which is closest to hit the ratelimit
minLimitRemaining := MaxUint32
var minimumDescriptor *pb.RateLimitResponse_DescriptorStatus = nil

for i, descriptorStatus := range responseDescriptorStatuses {
// Keep track of the descriptor closest to hit the ratelimit
if this.customHeadersEnabled &&
descriptorStatus.CurrentLimit != nil &&
descriptorStatus.LimitRemaining < minLimitRemaining {
minimumDescriptor = descriptorStatus
minLimitRemaining = descriptorStatus.LimitRemaining
}

if isUnlimited[i] {
response.Statuses[i] = &pb.RateLimitResponse_DescriptorStatus{
Code: pb.RateLimitResponse_OK,
Expand All @@ -142,14 +179,54 @@ func (this *service) shouldRateLimitWorker(
response.Statuses[i] = descriptorStatus
if descriptorStatus.Code == pb.RateLimitResponse_OVER_LIMIT {
finalCode = descriptorStatus.Code

minimumDescriptor = descriptorStatus
minLimitRemaining = 0
}
}
}

// Add Headers if requested
if this.customHeadersEnabled && minimumDescriptor != nil {
response.ResponseHeadersToAdd = []*core.HeaderValue{
this.rateLimitLimitHeader(minimumDescriptor),
this.rateLimitRemainingHeader(minimumDescriptor),
this.rateLimitResetHeader(minimumDescriptor),
}
}

response.OverallCode = finalCode
return response
}

func (this *service) rateLimitLimitHeader(descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue {

// Limit header only provides the mandatory part from the spec, the actual limit
// the optional quota policy is currently not provided
return &core.HeaderValue{
Key: this.customHeaderLimitHeader,
Value: strconv.FormatUint(uint64(descriptor.CurrentLimit.RequestsPerUnit), 10),
}
}

func (this *service) rateLimitRemainingHeader(descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue {

// How much of the limit is remaining
return &core.HeaderValue{
Key: this.customHeaderRemainingHeader,
Value: strconv.FormatUint(uint64(descriptor.LimitRemaining), 10),
}
}

func (this *service) rateLimitResetHeader(
descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue {

return &core.HeaderValue{
Key: this.customHeaderResetHeader,
Value: strconv.FormatInt(utils.CalculateReset(&descriptor.CurrentLimit.Unit, this.customHeaderClock).GetSeconds(), 10),
}
}

func (this *service) ShouldRateLimit(
ctx context.Context,
request *pb.RateLimitRequest) (finalResponse *pb.RateLimitResponse, finalError error) {
Expand Down Expand Up @@ -190,7 +267,7 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig {
}

func NewService(runtime loader.IFace, cache limiter.RateLimitCache,
configLoader config.RateLimitConfigLoader, statsManager stats.Manager, runtimeWatchRoot bool) RateLimitServiceServer {
configLoader config.RateLimitConfigLoader, statsManager stats.Manager, runtimeWatchRoot bool, clock utils.TimeSource) RateLimitServiceServer {

newService := &service{
runtime: runtime,
Expand All @@ -201,6 +278,7 @@ func NewService(runtime loader.IFace, cache limiter.RateLimitCache,
cache: cache,
stats: statsManager.NewServiceStats(),
runtimeWatchRoot: runtimeWatchRoot,
customHeaderClock: clock,
}

runtime.AddUpdateCallback(newService.runtimeUpdateEvent)
Expand Down
6 changes: 4 additions & 2 deletions src/service_cmd/runner/runner.go
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
package runner

import (
"github.com/envoyproxy/ratelimit/src/metrics"
"github.com/envoyproxy/ratelimit/src/stats"
"io"
"math/rand"
"net/http"
"strings"
"sync"
"time"

"github.com/envoyproxy/ratelimit/src/metrics"
"github.com/envoyproxy/ratelimit/src/stats"

gostats "github.com/lyft/gostats"

"github.com/coocood/freecache"
Expand Down Expand Up @@ -107,6 +108,7 @@ func (runner *Runner) Run() {
config.NewRateLimitConfigLoaderImpl(),
runner.statsManager,
s.RuntimeWatchRoot,
utils.NewTimeSourceImpl(),
)

srv.AddDebugHttpEndpoint(
Expand Down
9 changes: 9 additions & 0 deletions src/settings/settings.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,15 @@ type Settings struct {
CacheKeyPrefix string `envconfig:"CACHE_KEY_PREFIX" default:""`
BackendType string `envconfig:"BACKEND_TYPE" default:"redis"`

// Settings for optional returning of custom headers
RateLimitResponseHeadersEnabled bool `envconfig:"LIMIT_RESPONSE_HEADERS_ENABLED" default:"false"`
// value: the current limit
HeaderRatelimitLimit string `envconfig:"LIMIT_LIMIT_HEADER" default:"RateLimit-Limit"`
// value: remaining count
HeaderRatelimitRemaining string `envconfig:"LIMIT_REMAINING_HEADER" default:"RateLimit-Remaining"`
// value: remaining seconds
HeaderRatelimitReset string `envconfig:"LIMIT_RESET_HEADER" default:"RateLimit-Reset"`

// Redis settings
RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"`
RedisType string `envconfig:"REDIS_TYPE" default:"SINGLE"`
Expand Down
4 changes: 2 additions & 2 deletions src/utils/utilities.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ func UnitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 {
panic("should not get here")
}

func CalculateReset(currentLimit *pb.RateLimitResponse_RateLimit, timeSource TimeSource) *duration.Duration {
sec := UnitToDivider(currentLimit.Unit)
func CalculateReset(unit *pb.RateLimitResponse_RateLimit_Unit, timeSource TimeSource) *duration.Duration {
sec := UnitToDivider(*unit)
now := timeSource.UnixNow()
return &duration.Duration{Seconds: sec - now%sec}
}
Expand Down
Loading

0 comments on commit 35b6056

Please sign in to comment.