This repository was archived by the owner on Oct 3, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcache.go
197 lines (172 loc) · 5.21 KB
/
cache.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
package recache
import (
"compress/flate"
"sync"
"time"
)
var (
// Registry of all created caches. Require cacheMu to be held for access.
cacheMu sync.RWMutex
caches = make([]*Cache, 1)
// Global deflate compression level configuration.
//
// Can only be changed before the first Cache is constructed and must not be
// mutated after.
CompressionLevel = flate.DefaultCompression
)
// Get cache from registry by ID
func getCache(id int) *Cache {
cacheMu.RLock()
defer cacheMu.RUnlock()
return caches[id]
}
// Unified storage for cached records with specific eviction parameters
type Cache struct {
// Locks for all cache access, excluding the contained records
mu sync.Mutex
// Global ID of cache
id int
// Total used memory and limit
memoryLimit, memoryUsed int
// Linked list and limit for quick LRU data order modifications and lookup
lruLimit time.Duration
lruList linkedList
// Storage for each individual frontend
frontends []map[Key]recordWithMeta
}
// Options for new cache creation
type CacheOptions struct {
// Maximum amount of memory the cache can consume without forcing eviction
MemoryLimit uint
// Maximum last use time of record without forcing eviction
LRULimit time.Duration
}
// Create new cache with specified memory and LRU eviction limits. After either
// of these are exceeded, the least recently used cache records will be evicted,
// until the requirements are satisfied again. Note that this eviction is
// eventual and not immediate for optimisation purposes.
//
// Pass in zero values to ignore either or both eviction limits.
func NewCache(opts CacheOptions) (c *Cache) {
cacheMu.Lock()
defer cacheMu.Unlock()
c = &Cache{
id: len(caches),
memoryLimit: int(opts.MemoryLimit),
lruLimit: opts.LRULimit,
}
caches = append(caches, c)
return c
}
// Create new Frontend for accessing the cache.
// A Frontend must only be created using this method.
//
// get() will be used for generating fresh cache records for the given key by
// the cache engine. These records will be stored by the cache engine and
// must not be modified after Get() returns. Get() must be thread-safe.
func (c *Cache) NewFrontend(get Getter) *Frontend {
c.mu.Lock()
defer c.mu.Unlock()
f := &Frontend{
id: len(c.frontends),
cache: c,
getter: get,
}
c.frontends = append(c.frontends, make(map[Key]recordWithMeta))
return f
}
// Get or create a new record in the cache.
// fresh=true, if record is freshly created and requires population.
func (c *Cache) getRecord(loc recordLocation) (rec *Record, fresh bool) {
c.mu.Lock()
defer c.mu.Unlock()
recWithMeta, ok := c.record(loc)
if !ok {
recWithMeta = recordWithMeta{
node: c.lruList.Prepend(loc),
rec: new(Record),
}
recWithMeta.rec.semaphore.Init() // Block all reads until population
} else {
c.lruList.MoveToFront(recWithMeta.node)
}
now := time.Now()
recWithMeta.lastUsed = now
c.frontends[loc.frontend][loc.key] = recWithMeta
// Attempt to evict up to the last 2 records due to LRU or memory
// constraints. Doing this here simplifies locking patterns while retaining
// good enough eviction eventuality.
for i := 0; i < 2; i++ {
last, ok := c.lruList.Last()
if !ok {
break
}
if c.memoryLimit != 0 && c.memoryUsed > c.memoryLimit {
c.evictWithLock(last, 0)
continue
}
if c.lruLimit != 0 {
lruRec, ok := c.record(last)
if !ok {
panic("linked list points to evicted record")
}
if lruRec.lastUsed.Add(c.lruLimit).Before(now) {
c.evictWithLock(last, 0)
continue
}
}
break
}
return recWithMeta.rec, !ok
}
// Shorthand for retrieving record by its location.
//
// Requires lock on c.mu.
func (c *Cache) record(loc recordLocation) (recordWithMeta, bool) {
rec, ok := c.frontends[loc.frontend][loc.key]
return rec, ok
}
// Set record used memory
func (c *Cache) setUsedMemory(src *Record, loc recordLocation, memoryUsed int) {
c.mu.Lock()
defer c.mu.Unlock()
// It is possible for the record to be evicted and possibly then a new
// record to be inserted into the same spot while the current record is
// being populated. Need to assert the former is still in the cache.
//
// This is needed here, because the used memory value of the record directly
// effects the total used memory of the cache w/o recording what parts of
// the cache contribute what amount to the total.
//
// All other cases of such possible concurrent evictions and overridden
// inclusions will simply NOP on their respective operations.
rec, ok := c.record(loc)
if !ok || rec.rec != src {
return
}
rec.memoryUsed = memoryUsed
c.frontends[loc.frontend][loc.key] = rec
c.memoryUsed += memoryUsed
}
// Register a record as being used in another record
func registerDependance(parent, child intercacheRecordLocation) {
c := getCache(child.cache)
c.mu.Lock()
defer c.mu.Unlock()
rec, ok := c.record(child.recordLocation)
if !ok {
return // Already evicted
}
rec.includedIn = append(rec.includedIn, parent)
c.frontends[child.frontend][child.key] = rec
}
// Make copy of frontend keys to prevent itterator invalidation.
// Requires lock on c.mu.
func (c *Cache) keys(frontend int) []Key {
m := c.frontends[frontend]
keys := make([]Key, 0, len(m))
for k := range m {
keys = append(keys, k)
}
return keys
}