Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add caching for PriceReader #372

Open
wants to merge 21 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .mockery.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ packages:
interfaces:
ChainSupport:
PluginProcessor:
github.com/smartcontractkit/chainlink-ccip/internal/cache:
interfaces:
Cache:
github.com/smartcontractkit/chainlink-ccip/commit/merkleroot/rmn:
interfaces:
Controller:
Expand Down
136 changes: 136 additions & 0 deletions internal/cache/cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
package cache

import (
"sync"
"time"
)

// Cache defines the interface for cache operations
type Cache[K comparable, V any] interface {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I checked the libs you provided and they look quite advanced. I'm not sure about our future requirements but our current needs are quite simple. It might makes sense to go homemade for our usecase.

Do you have any experience with those libs? What do you recommend?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I almost used ristretto for a project that needed high performance, but you're probably right that it's excessive for our needs.

Two trains of thought right now:

  1. go-cache seems like an excellent fit. It's extremely lightweight (a single file with ~1000 LOC), the interface is almost identical to the one you have, the project has the most stars.

  2. If we do roll our own, I bet we could come up with a clever optimization to time based expiration utilizing sequence numbers and multiple maps.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can go with a with go-cache and overload it with a custom logic to implement the customPolicy. Something like this:

type CustomCache[K comparable, V any] struct {
	cache        *cache.Cache
	customPolicy func(V) bool
	mutex        sync.RWMutex
}

func (c *CustomCache[K, V]) Get(key K) (V, bool) {
	c.mutex.RLock()
	defer c.mutex.RUnlock()

        ...

	// Check custom policy
	if c.customPolicy != nil && c.customPolicy(typedValue) {
		c.cache.Delete(interface{}(key))
		return zero, false
	}

	return typedValue, true
}

// Get retrieves a value from the cache
// Returns the value and true if found, zero value and false if not found
Get(key K) (V, bool)

// Set adds or updates a value in the cache
// Returns true if a new entry was created, false if an existing entry was updated
Set(key K, value V) bool

// Delete removes a value from the cache
// Returns true if an entry was deleted, false if the key wasn't found
Delete(key K) bool
}

// EvictionPolicy defines how entries should be evicted from the cache
type EvictionPolicy interface {
// TODO: async process needed
ShouldEvict(entry *cacheEntry) bool
}

// cacheEntry represents a single entry in the cache
type cacheEntry struct {
value interface{}
createdAt time.Time
}

// inMemoryCache is an in-memory implementation of the Cache interface
type inMemoryCache[K comparable, V any] struct {
data map[K]*cacheEntry
policy EvictionPolicy
mutex sync.RWMutex
}

// NewInMemoryCache creates a new cache with the specified eviction policy
// The cache is thread-safe and can be used concurrently
func NewInMemoryCache[K comparable, V any](policy EvictionPolicy) Cache[K, V] {
return &inMemoryCache[K, V]{
data: make(map[K]*cacheEntry),
policy: policy,
}
}

// Set adds or updates a value in the cache
func (c *inMemoryCache[K, V]) Set(key K, value V) bool {
c.mutex.Lock()
defer c.mutex.Unlock()

_, exists := c.data[key]
c.data[key] = &cacheEntry{
value: value,
createdAt: time.Now(),
}
return !exists
}

// Get retrieves a value from the cache
func (c *inMemoryCache[K, V]) Get(key K) (V, bool) {
c.mutex.RLock()
0xnogo marked this conversation as resolved.
Show resolved Hide resolved
defer c.mutex.RUnlock()

entry, exists := c.data[key]
if !exists {
var zero V
return zero, false
}

if c.policy.ShouldEvict(entry) {
0xnogo marked this conversation as resolved.
Show resolved Hide resolved
var zero V
return zero, false
}

value, ok := entry.value.(V)
if !ok {
var zero V
return zero, false
}

return value, true
}

// Delete removes a value from the cache
func (c *inMemoryCache[K, V]) Delete(key K) bool {
c.mutex.Lock()
defer c.mutex.Unlock()

_, exists := c.data[key]
if exists {
delete(c.data, key)
}
return exists
}

// neverExpirePolicy implements EvictionPolicy for entries that should never expire
type neverExpirePolicy struct{}

func NewNeverExpirePolicy() EvictionPolicy {
return neverExpirePolicy{}
}

func (p neverExpirePolicy) ShouldEvict(_ *cacheEntry) bool {
return false
}

// timeBasedPolicy implements EvictionPolicy for time-based expiration
type timeBasedPolicy struct {
ttl time.Duration
}

func NewTimeBasedPolicy(ttl time.Duration) EvictionPolicy {
return &timeBasedPolicy{ttl: ttl}
}

func (p timeBasedPolicy) ShouldEvict(entry *cacheEntry) bool {
return time.Since(entry.createdAt) > p.ttl
}

// customPolicy implements EvictionPolicy with a custom eviction function
type customPolicy struct {
evictFunc func(interface{}) bool
}

func NewCustomPolicy(evictFunc func(interface{}) bool) EvictionPolicy {
return &customPolicy{evictFunc: evictFunc}
}

func (p customPolicy) ShouldEvict(entry *cacheEntry) bool {
return p.evictFunc(entry.value)
}
168 changes: 168 additions & 0 deletions internal/cache/cache_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
package cache

import (
"sync"
"testing"
"time"

"github.com/stretchr/testify/assert"
)

func TestInMemoryCache_Basic(t *testing.T) {
t.Run("never expire policy", func(t *testing.T) {
cache := NewInMemoryCache[string, int](NewNeverExpirePolicy())

// Test Set - new entry
isNew := cache.Set("key1", 100)
assert.True(t, isNew, "should be a new entry")

// Test Set - update existing
isNew = cache.Set("key1", 200)
assert.False(t, isNew, "should be an update")

// Test Get - existing key
value, exists := cache.Get("key1")
assert.True(t, exists, "should exist")
assert.Equal(t, 200, value)

// Test Get - non-existing key
value, exists = cache.Get("non-existing")
assert.False(t, exists, "should not exist")
assert.Equal(t, 0, value)

// Test Delete - existing key
deleted := cache.Delete("key1")
assert.True(t, deleted, "should be deleted")

// Test Delete - non-existing key
deleted = cache.Delete("key1")
assert.False(t, deleted, "should not be deleted")
})
}

func TestInMemoryCache_TimeBased(t *testing.T) {
t.Run("time based policy", func(t *testing.T) {
cache := NewInMemoryCache[string, int](NewTimeBasedPolicy(100 * time.Millisecond))

cache.Set("key1", 100)

// Immediate get should succeed
value, exists := cache.Get("key1")
assert.True(t, exists)
assert.Equal(t, 100, value)

// Wait for TTL to expire
time.Sleep(150 * time.Millisecond)

// Get after expiry should fail
value, exists = cache.Get("key1")
assert.False(t, exists)
assert.Equal(t, 0, value)
})
}

func TestInMemoryCache_Custom(t *testing.T) {
t.Run("custom policy", func(t *testing.T) {
// Custom policy that evicts odd numbers
cache := NewInMemoryCache[string, int](NewCustomPolicy(func(v interface{}) bool {
if val, ok := v.(int); ok {
return val%2 != 0
}
return false
}))

// Even number should stay
cache.Set("even", 2)
value, exists := cache.Get("even")
assert.True(t, exists)
assert.Equal(t, 2, value)

// Odd number should be evicted on get
cache.Set("odd", 3)
value, exists = cache.Get("odd")
assert.False(t, exists)
assert.Equal(t, 0, value)
})
}

func TestInMemoryCache_Concurrent(t *testing.T) {
t.Run("concurrent access", func(t *testing.T) {
cache := NewInMemoryCache[int, int](NewNeverExpirePolicy())
var wg sync.WaitGroup
numGoroutines := 10
numOperations := 100

// Concurrent writes
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(routine int) {
defer wg.Done()
for j := 0; j < numOperations; j++ {
key := routine*numOperations + j
cache.Set(key, key)
}
}(i)
}

// Concurrent reads
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(routine int) {
defer wg.Done()
for j := 0; j < numOperations; j++ {
key := routine*numOperations + j
_, _ = cache.Get(key)
}
}(i)
}

wg.Wait()
})
}

func TestInMemoryCache_Types(t *testing.T) {
t.Run("different types", func(t *testing.T) {
// String cache
stringCache := NewInMemoryCache[string, string](NewNeverExpirePolicy())
stringCache.Set("hello", "world")
val, exists := stringCache.Get("hello")
assert.True(t, exists)
assert.Equal(t, "world", val)

// Struct cache
type Person struct {
Name string
Age int
}
structCache := NewInMemoryCache[string, Person](NewNeverExpirePolicy())
structCache.Set("person", Person{Name: "John", Age: 30})
person, exists := structCache.Get("person")
assert.True(t, exists)
assert.Equal(t, "John", person.Name)
assert.Equal(t, 30, person.Age)
})
}

func TestInMemoryCache_EdgeCases(t *testing.T) {
t.Run("edge cases", func(t *testing.T) {
cache := NewInMemoryCache[string, *string](NewNeverExpirePolicy())

// Nil value
var nilStr *string
cache.Set("nil", nilStr)
val, exists := cache.Get("nil")
assert.True(t, exists)
assert.Nil(t, val)

// Empty string key
str := "value"
cache.Set("", &str)
val, exists = cache.Get("")
assert.True(t, exists)
assert.Equal(t, &str, val)

// Delete empty key
deleted := cache.Delete("")
assert.True(t, deleted)
})
}
12 changes: 12 additions & 0 deletions internal/cache/keys/keys.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
package cachekeys

import (
"fmt"

"github.com/smartcontractkit/chainlink-ccip/pkg/types/ccipocr3"
)

// TokenDecimals creates a cache key for token decimals
func TokenDecimals(token ccipocr3.UnknownEncodedAddress, address string) string {
return fmt.Sprintf("token-decimals:%s:%s", token, address)
}
56 changes: 56 additions & 0 deletions internal/cache/keys/keys_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
package cachekeys

import (
"testing"

"github.com/stretchr/testify/assert"

"github.com/smartcontractkit/chainlink-ccip/pkg/types/ccipocr3"
)

func TestTokenDecimals(t *testing.T) {
testCases := []struct {
name string
token ccipocr3.UnknownEncodedAddress
address string
expectedKey string
}{
{
name: "basic key generation",
token: ccipocr3.UnknownEncodedAddress("0x1234"),
address: "0xabcd",
expectedKey: "token-decimals:0x1234:0xabcd",
},
{
name: "empty token address",
token: ccipocr3.UnknownEncodedAddress(""),
address: "0xabcd",
expectedKey: "token-decimals::0xabcd",
},
{
name: "empty contract address",
token: ccipocr3.UnknownEncodedAddress("0x1234"),
address: "",
expectedKey: "token-decimals:0x1234:",
},
{
name: "both addresses empty",
token: ccipocr3.UnknownEncodedAddress(""),
address: "",
expectedKey: "token-decimals::",
},
{
name: "long addresses",
token: ccipocr3.UnknownEncodedAddress("0x1234567890abcdef1234567890abcdef12345678"),
address: "0xfedcba0987654321fedcba0987654321fedcba09",
expectedKey: "token-decimals:0x1234567890abcdef1234567890abcdef12345678:0xfedcba0987654321fedcba0987654321fedcba09",
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
key := TokenDecimals(tc.token, tc.address)
assert.Equal(t, tc.expectedKey, key)
})
}
}
Loading
Loading