Skip to content

Commit

Permalink
Rework and simplify lrucache package
Browse files Browse the repository at this point in the history
  • Loading branch information
vasayxtx committed Sep 3, 2024
1 parent 54ecab8 commit ccc00e8
Show file tree
Hide file tree
Showing 9 changed files with 352 additions and 367 deletions.
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ require (
github.com/RussellLuo/slidingwindow v0.0.0-20200528002341-535bb99d338b
github.com/cenkalti/backoff/v4 v4.3.0
github.com/go-chi/chi/v5 v5.1.0
github.com/hashicorp/golang-lru v1.0.2
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.19.1
github.com/rs/xid v1.5.0
Expand All @@ -28,6 +27,7 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
Expand Down
19 changes: 5 additions & 14 deletions httpserver/middleware/in_flight_limit.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,10 @@ import (
"math"
"net/http"
"strconv"
"sync"
"time"

"github.com/hashicorp/golang-lru/simplelru"

"github.com/acronis/go-appkit/log"
"github.com/acronis/go-appkit/lrucache"
"github.com/acronis/go-appkit/restapi"
)

Expand Down Expand Up @@ -292,24 +290,17 @@ func makeInFlightLimitSlotsProvider(limit, backlogLimit, maxKeys int) (func(key
backlogSlots chan struct{}
}

keysZone, err := simplelru.NewLRU(maxKeys, nil)
keysZone, err := lrucache.New[string, KeysZoneItem](maxKeys, nil)
if err != nil {
return nil, fmt.Errorf("new LRU in-memory store for keys: %w", err)
}
var keysZoneMu sync.Mutex
return func(key string) (chan struct{}, chan struct{}) {
keysZoneMu.Lock()
defer keysZoneMu.Unlock()
var keysZoneItem KeysZoneItem
if val, ok := keysZone.Get(key); ok {
keysZoneItem = val.(KeysZoneItem)
} else {
keysZoneItem = KeysZoneItem{
keysZoneItem, _ := keysZone.GetOrAdd(key, func() KeysZoneItem {
return KeysZoneItem{
slots: make(chan struct{}, limit),
backlogSlots: make(chan struct{}, limit+backlogLimit),
}
keysZone.Add(key, keysZoneItem)
}
})
return keysZoneItem.slots, keysZoneItem.backlogSlots
}, nil
}
Expand Down
36 changes: 13 additions & 23 deletions httpserver/middleware/rate_limit.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,14 @@ import (
"math"
"net/http"
"strconv"
"sync"
"time"

"github.com/RussellLuo/slidingwindow"
"github.com/hashicorp/golang-lru/simplelru"
"github.com/throttled/throttled/v2"
"github.com/throttled/throttled/v2/store/memstore"

"github.com/acronis/go-appkit/log"
"github.com/acronis/go-appkit/lrucache"
"github.com/acronis/go-appkit/restapi"
)

Expand Down Expand Up @@ -355,19 +354,14 @@ func makeRateLimitBacklogSlotsProvider(backlogLimit, maxKeys int) (func(key stri
}, nil
}

keysZone, err := simplelru.NewLRU(maxKeys, nil)
keysZone, err := lrucache.New[string, chan struct{}](maxKeys, nil)
if err != nil {
return nil, fmt.Errorf("new LRU in-memory store for keys: %w", err)
}
var mu sync.Mutex
return func(key string) chan struct{} {
mu.Lock()
defer mu.Unlock()
if val, ok := keysZone.Get(key); ok {
return val.(chan struct{})
}
backlogSlots := make(chan struct{}, backlogLimit)
keysZone.Add(key, backlogSlots)
backlogSlots, _ := keysZone.GetOrAdd(key, func() chan struct{} {
return make(chan struct{}, backlogLimit)
})
return backlogSlots
}, nil
}
Expand Down Expand Up @@ -423,24 +417,20 @@ func newSlidingWindowLimiter(maxRate Rate, maxKeys int) (*slidingWindowLimiter,
}, nil
}

store, err := simplelru.NewLRU(maxKeys, nil)
store, err := lrucache.New[string, *slidingwindow.Limiter](maxKeys, nil)
if err != nil {
return nil, fmt.Errorf("new LRU in-memory store for keys: %w", err)
}
var mu sync.Mutex
return &slidingWindowLimiter{
maxRate: maxRate,
getLimiter: func(key string) *slidingwindow.Limiter {
mu.Lock()
defer mu.Unlock()
if val, ok := store.Get(key); ok {
return val.(*slidingwindow.Limiter)
}
lim, _ := slidingwindow.NewLimiter(
maxRate.Duration, int64(maxRate.Count), func() (slidingwindow.Window, slidingwindow.StopFunc) {
return slidingwindow.NewLocalWindow()
})
store.Add(key, lim)
lim, _ := store.GetOrAdd(key, func() *slidingwindow.Limiter {
lim, _ := slidingwindow.NewLimiter(
maxRate.Duration, int64(maxRate.Count), func() (slidingwindow.Window, slidingwindow.StopFunc) {
return slidingwindow.NewLocalWindow()
})
return lim
})
return lim
},
}, nil
Expand Down
166 changes: 74 additions & 92 deletions lrucache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,175 +12,157 @@ import (
"sync"
)

// EntryType is a type of storing in cache entries.
type EntryType int

// EntryTypeDefault is a default entry type.
const EntryTypeDefault EntryType = 0

type cacheKey[K comparable] struct {
key K
entryType EntryType
}

type cacheEntry[K comparable] struct {
key cacheKey[K]
value interface{}
type cacheEntry[K comparable, V any] struct {
key K
value V
}

// LRUCache represents an LRU cache with eviction mechanism and Prometheus metrics.
type LRUCache[K comparable] struct {
type LRUCache[K comparable, V any] struct {
maxEntries int

mu sync.RWMutex
lruList *list.List
cache map[cacheKey[K]]*list.Element // map of cache entries, value is a lruList element
cache map[K]*list.Element // map of cache entries, value is a lruList element

MetricsCollector *MetricsCollector
metricsCollector MetricsCollector
}

// New creates a new LRUCache with the provided maximum number of entries.
func New[K comparable](maxEntries int, metricsCollector *MetricsCollector) (*LRUCache[K], error) {
func New[K comparable, V any](maxEntries int, metricsCollector MetricsCollector) (*LRUCache[K, V], error) {
if maxEntries <= 0 {
return nil, fmt.Errorf("maxEntries must be greater than 0")
}
return &LRUCache[K]{
if metricsCollector == nil {
metricsCollector = disabledMetricsCollector{}
}
return &LRUCache[K, V]{
maxEntries: maxEntries,
lruList: list.New(),
cache: make(map[cacheKey[K]]*list.Element),
MetricsCollector: metricsCollector,
cache: make(map[K]*list.Element),
metricsCollector: metricsCollector,
}, nil
}

// Get returns a value from the cache by the provided key and type.
func (c *LRUCache[K]) Get(key K, entryType EntryType) (value interface{}, ok bool) {
metrics := c.MetricsCollector.getEntryTypeMetrics(entryType)

defer func() {
if ok {
metrics.HitsTotal.Inc()
} else {
metrics.MissesTotal.Inc()
}
}()

cKey := cacheKey[K]{key, entryType}

func (c *LRUCache[K, V]) Get(key K) (value V, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()

if elem, hit := c.cache[cKey]; hit {
c.lruList.MoveToFront(elem)
return elem.Value.(*cacheEntry[K]).value, true
}
return nil, false
return c.get(key)
}

// Add adds a value to the cache with the provided key and type.
// If the cache is full, the oldest entry will be removed.
func (c *LRUCache[K]) Add(key K, value interface{}, entryType EntryType) {
var evictedEntry *cacheEntry[K]

defer func() {
if evictedEntry != nil {
c.MetricsCollector.getEntryTypeMetrics(evictedEntry.key.entryType).EvictionsTotal.Inc()
}
}()

cKey := cacheKey[K]{key, entryType}
entry := &cacheEntry[K]{key: cKey, value: value}

func (c *LRUCache[K, V]) Add(key K, value V) {
c.mu.Lock()
defer c.mu.Unlock()

if elem, ok := c.cache[cKey]; ok {
if elem, ok := c.cache[key]; ok {
c.lruList.MoveToFront(elem)
elem.Value = entry
elem.Value = &cacheEntry[K, V]{key: key, value: value}
return
}
c.addNew(key, value)
}

c.cache[cKey] = c.lruList.PushFront(entry)
c.MetricsCollector.getEntryTypeMetrics(cKey.entryType).Amount.Inc()
if len(c.cache) <= c.maxEntries {
return
}
if evictedEntry = c.removeOldest(); evictedEntry != nil {
c.MetricsCollector.getEntryTypeMetrics(evictedEntry.key.entryType).Amount.Dec()
func (c *LRUCache[K, V]) GetOrAdd(key K, valueProvider func() V) (value V, exists bool) {
c.mu.Lock()
defer c.mu.Unlock()

if value, exists = c.get(key); exists {
return value, exists
}
value = valueProvider()
c.addNew(key, value)
return value, false
}

// Remove removes a value from the cache by the provided key and type.
func (c *LRUCache[K]) Remove(key K, entryType EntryType) bool {
cKey := cacheKey[K]{key, entryType}

func (c *LRUCache[K, V]) Remove(key K) bool {
c.mu.Lock()
defer c.mu.Unlock()

elem, ok := c.cache[cKey]
elem, ok := c.cache[key]
if !ok {
return false
}

c.lruList.Remove(elem)
delete(c.cache, cKey)
c.MetricsCollector.getEntryTypeMetrics(entryType).Amount.Dec()
delete(c.cache, key)
c.metricsCollector.SetAmount(len(c.cache))
return true
}

// Purge clears the cache.
func (c *LRUCache[K]) Purge() {
// Keep in mind that this method does not reset the cache size
// and does not reset Prometheus metrics except for the total number of entries.
// All removed entries will not be counted as evictions.
func (c *LRUCache[K, V]) Purge() {
c.mu.Lock()
defer c.mu.Unlock()

for _, etMetrics := range c.MetricsCollector.entryTypeMetrics {
etMetrics.Amount.Set(0)
}
c.cache = make(map[cacheKey[K]]*list.Element)
c.metricsCollector.SetAmount(0)
c.cache = make(map[K]*list.Element)
c.lruList.Init()
}

// Resize changes the cache size.
// Note that resizing the cache may cause some entries to be evicted.
func (c *LRUCache[K]) Resize(size int) {
// Resize changes the cache size and returns the number of evicted entries.
func (c *LRUCache[K, V]) Resize(size int) (evicted int) {
if size <= 0 {
return
return 0
}

c.mu.Lock()
defer c.mu.Unlock()

c.maxEntries = size
diff := len(c.cache) - size
if diff <= 0 {
evicted = len(c.cache) - size
if evicted <= 0 {
return
}

rmCounts := make([]int, len(c.MetricsCollector.entryTypeMetrics))
for i := 0; i < diff; i++ {
if rmEntry := c.removeOldest(); rmEntry != nil {
rmCounts[rmEntry.key.entryType]++
}
}
for et, cnt := range rmCounts {
typeMetrics := c.MetricsCollector.getEntryTypeMetrics(EntryType(et))
typeMetrics.Amount.Sub(float64(cnt))
typeMetrics.EvictionsTotal.Add(float64(cnt))
for i := 0; i < evicted; i++ {
_ = c.removeOldest()
}
c.metricsCollector.SetAmount(len(c.cache))
c.metricsCollector.AddEvictions(evicted)
return evicted
}

// Len returns the number of items in the cache.
func (c *LRUCache[K]) Len() int {
func (c *LRUCache[K, V]) Len() int {
c.mu.RLock()
defer c.mu.RUnlock()
return len(c.cache)
}

func (c *LRUCache[K]) removeOldest() *cacheEntry[K] {
func (c *LRUCache[K, V]) get(key K) (value V, ok bool) {
if elem, hit := c.cache[key]; hit {
c.lruList.MoveToFront(elem)
c.metricsCollector.IncHits()
return elem.Value.(*cacheEntry[K, V]).value, true
}
c.metricsCollector.IncMisses()
return value, false
}

func (c *LRUCache[K, V]) addNew(key K, value V) {
c.cache[key] = c.lruList.PushFront(&cacheEntry[K, V]{key: key, value: value})
if len(c.cache) <= c.maxEntries {
c.metricsCollector.SetAmount(len(c.cache))
return
}
if evictedEntry := c.removeOldest(); evictedEntry != nil {
c.metricsCollector.AddEvictions(1)
}
}

func (c *LRUCache[K, V]) removeOldest() *cacheEntry[K, V] {
elem := c.lruList.Back()
if elem == nil {
return nil
}
c.lruList.Remove(elem)
entry := elem.Value.(*cacheEntry[K])
entry := elem.Value.(*cacheEntry[K, V])
delete(c.cache, entry.key)
return entry
}
Loading

0 comments on commit ccc00e8

Please sign in to comment.