-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.go
121 lines (103 loc) · 2.66 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
package main
import (
"sync"
"time"
"go-cache-benchmark/cache"
)
const workloadMultiplier = 15
type NewCacheFunc func(size int) cache.Cache
func main() {
zipfAlphas := []float64{0.99}
items := []int{1e5 * 5}
concurrencies := []int{1, 2, 4, 8, 16}
cacheSizeMultiplier := []float64{0.001, 0.01, 0.1}
caches := []NewCacheFunc{
cache.NewSieve,
cache.NewS3FIFO,
cache.NewOtter,
cache.NewLRU,
cache.NewTwoQueue,
cache.NewLRUGroupCache,
cache.NewTinyLFU,
cache.NewSLRU,
cache.NewS4LRU,
cache.NewClock,
cache.NewFreeLRUSynced,
cache.NewFreeLRUSharded,
}
for _, itemSize := range items {
for _, multiplier := range cacheSizeMultiplier {
for _, curr := range concurrencies {
for _, alpha := range zipfAlphas {
runBenchmark(itemSize, multiplier, alpha, caches, curr)
}
}
}
}
}
func runBenchmark(itemSize int, cacheMultiplier float64, zipfAlpha float64, caches []NewCacheFunc, concurrency int) {
b := &Benchmark{
ItemSize: itemSize,
CacheSizeMultiplier: cacheMultiplier,
ZipfAlpha: zipfAlpha,
Concurrency: concurrency,
Results: make([]*BenchmarkResult, 0),
}
for _, newCache := range caches {
b.Results = append(b.Results, run(newCache, itemSize, cacheMultiplier, zipfAlpha, concurrency))
}
b.WriteToConsole()
}
func run(newCache NewCacheFunc, itemSize int, cacheSizeMultiplier float64, zipfAlpha float64, concurrency int) *BenchmarkResult {
gen := NewZipfGenerator(uint64(itemSize), zipfAlpha)
total := itemSize * workloadMultiplier
each := total / concurrency
// create keys in advance to not taint the QPS
keys := make([][]string, concurrency)
for i := 0; i < concurrency; i++ {
keys[i] = make([]string, 0, each)
for j := 0; j < each; j++ {
keys[i] = append(keys[i], gen.Next())
}
}
cacheSize := int(float64(itemSize) * cacheSizeMultiplier)
c := newCache(cacheSize)
defer c.Close()
start := time.Now()
bench := func(c cache.Cache, gen *ZipfGenerator) (int64, int64) {
var wg sync.WaitGroup
hits := make([]int64, concurrency)
misses := make([]int64, concurrency)
for i := 0; i < concurrency; i++ {
wg.Add(1)
go func(k int) {
for j := 0; j < each; j++ {
key := keys[k][j]
if c.Get(key) {
hits[k]++
} else {
misses[k]++
c.Set(key)
}
}
wg.Done()
}(i)
}
wg.Wait()
var totalHits, totalMisses int64
for i := 0; i < concurrency; i++ {
totalHits += hits[i]
totalMisses += misses[i]
}
return totalHits, totalMisses
}
hits, misses := bench(c, gen)
elapsed := time.Since(start)
keys = nil
return &BenchmarkResult{
CacheName: c.Name(),
Duration: elapsed,
Hits: hits,
Misses: misses,
}
}