Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

more realistic benchmarks #93

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,24 @@ func (cache *Cache) Get(key []byte) (value []byte, err error) {
return
}

// GetFn is equivalent to Get or GetWithBuf, but it attempts to be zero-copy,
// calling the provided function with slice view over the current underlying
// value of the key in memory. The slice is constrained in length and capacity.
//
// In moth cases, this method will not alloc a byte buffer. The only exception
// is when the value wraps around the underlying segment ring buffer.
//
// The method will return ErrNotFound is there's a miss, and the function will
// not be called. Errors returned by the function will be propagated.
func (cache *Cache) GetFn(key []byte, fn func([]byte) error) (err error) {
hashVal := hashFunc(key)
segID := hashVal & segmentAndOpVal
cache.locks[segID].Lock()
err = cache.segments[segID].view(key, fn, hashVal, false)
cache.locks[segID].Unlock()
return
}

// GetOrSet returns existing value or if record doesn't exist
// it sets a new key, value and expiration for a cache entry and stores it in the cache, returns nil in that case
func (cache *Cache) GetOrSet(key, value []byte, expireSeconds int) (retValue []byte, err error) {
Expand All @@ -109,6 +127,24 @@ func (cache *Cache) Peek(key []byte) (value []byte, err error) {
return
}

// PeekFn is equivalent to Peek, but it attempts to be zero-copy, calling the
// provided function with slice view over the current underlying value of the
// key in memory. The slice is constrained in length and capacity.
//
// In moth cases, this method will not alloc a byte buffer. The only exception
// is when the value wraps around the underlying segment ring buffer.
//
// The method will return ErrNotFound is there's a miss, and the function will
// not be called. Errors returned by the function will be propagated.
func (cache *Cache) PeekFn(key []byte, fn func([]byte) error) (err error) {
hashVal := hashFunc(key)
segID := hashVal & segmentAndOpVal
cache.locks[segID].Lock()
err = cache.segments[segID].view(key, fn, hashVal, true)
cache.locks[segID].Unlock()
return
}

// GetWithBuf copies the value to the buf or returns not found error.
// This method doesn't allocate memory when the capacity of buf is greater or equal to value.
func (cache *Cache) GetWithBuf(key, buf []byte) (value []byte, err error) {
Expand Down
145 changes: 91 additions & 54 deletions cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,12 @@ func TestFreeCache(t *testing.T) {
t.Errorf("value is %v, expected %v", string(value), expectedValStr)
}
}
err = cache.GetFn([]byte(keyStr), func(val []byte) error {
if string(val) != expectedValStr {
t.Errorf("getfn: value is %v, expected %v", string(val), expectedValStr)
}
return nil
})
}

t.Logf("hit rate is %v, evacuates %v, entries %v, average time %v, expire count %v\n",
Expand Down Expand Up @@ -156,6 +162,15 @@ func TestGetOrSet(t *testing.T) {
if err != nil || string(r) != "efgh" {
t.Errorf("Expected to get old record, got: value=%v, err=%v", string(r), err)
}
err = cache.GetFn(key, func(val []byte) error {
if string(val) != "efgh" {
t.Errorf("getfn: Expected to get old record, got: value=%v, err=%v", string(r), err)
}
return nil
})
if err != nil {
t.Errorf("did not expect error from GetFn, got: %s", err)
}
}

func TestGetWithExpiration(t *testing.T) {
Expand Down Expand Up @@ -616,6 +631,7 @@ func TestEvacuateCount(t *testing.T) {
func BenchmarkCacheSet(b *testing.B) {
cache := NewCache(256 * 1024 * 1024)
var key [8]byte
b.ReportAllocs()
for i := 0; i < b.N; i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i))
cache.Set(key[:], make([]byte, 8), 0)
Expand All @@ -640,108 +656,128 @@ func BenchmarkParallelCacheSet(b *testing.B) {
func BenchmarkMapSet(b *testing.B) {
m := make(map[string][]byte)
var key [8]byte
b.ReportAllocs()
for i := 0; i < b.N; i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i))
m[string(key[:])] = make([]byte, 8)
}
}

func BenchmarkCacheGet(b *testing.B) {
cache, count := populateCache()

b.ResetTimer()
b.ReportAllocs()
b.StopTimer()
cache := NewCache(256 * 1024 * 1024)

var key [8]byte
buf := make([]byte, 64)
for i := 0; i < b.N; i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i))
cache.Set(key[:], buf, 0)
binary.LittleEndian.PutUint64(key[:], uint64(i%count))
_, _ = cache.Get(key[:])
}
b.StartTimer()
for i := 0; i < b.N; i++ {
b.Logf("b.N: %d; hit rate: %f", b.N, cache.HitRate())
}

func populateCache() (*Cache, int) {
var (
cache = NewCache(256 * 1024 * 1024)
buf = make([]byte, 64)
key [8]byte
)

// number of entries that can fit with the above parameters before an
// eviction is needed, with the standard hash function and sequential
// uint64 keys.
const maxEntries = 2739652
for i := 0; i < maxEntries; i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i))
cache.Get(key[:])
_ = cache.Set(key[:], buf, 0)
}
return cache, int(cache.EntryCount())
}

func BenchmarkParallelCacheGet(b *testing.B) {
func BenchmarkCacheGetFn(b *testing.B) {
cache, count := populateCache()

b.ResetTimer()
b.ReportAllocs()
b.StopTimer()
cache := NewCache(256 * 1024 * 1024)

fn := func(val []byte) error {
_ = val
return nil
}

var key [8]byte
buf := make([]byte, 64)
for i := 0; i < b.N; i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i))
cache.Set(key[:], buf, 0)
binary.LittleEndian.PutUint64(key[:], uint64(i%count))
_ = cache.GetFn(key[:], fn)
}
b.StartTimer()
b.Logf("b.N: %d; hit rate: %f", b.N, cache.HitRate())
}

func BenchmarkParallelCacheGet(b *testing.B) {
cache, count := populateCache()

b.ResetTimer()
b.ReportAllocs()

b.RunParallel(func(pb *testing.PB) {
counter := 0
b.ReportAllocs()
for pb.Next() {
binary.LittleEndian.PutUint64(key[:], uint64(counter))
cache.Get(key[:])
counter = counter + 1
var key [8]byte
for i := 0; pb.Next(); i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i%count))
_, _ = cache.Get(key[:])
}
})
b.Logf("b.N: %d; hit rate: %f", b.N, cache.HitRate())
}

func BenchmarkCacheGetWithBuf(b *testing.B) {
cache, count := populateCache()

b.ResetTimer()
b.ReportAllocs()
b.StopTimer()
cache := NewCache(256 * 1024 * 1024)

var key [8]byte
buf := make([]byte, 64)
for i := 0; i < b.N; i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i))
cache.Set(key[:], buf, 0)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i))
cache.GetWithBuf(key[:], buf)
binary.LittleEndian.PutUint64(key[:], uint64(i%count))
_, _ = cache.GetWithBuf(key[:], buf)
}
b.Logf("b.N: %d; hit rate: %f", b.N, cache.HitRate())
}

func BenchmarkParallelCacheGetWithBuf(b *testing.B) {
cache, count := populateCache()

b.ResetTimer()
b.ReportAllocs()
b.StopTimer()
cache := NewCache(256 * 1024 * 1024)
var key [8]byte
buf := make([]byte, 64)
for i := 0; i < b.N; i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i))
cache.Set(key[:], buf, 0)
}
b.StartTimer()

b.RunParallel(func(pb *testing.PB) {
counter := 0
b.ReportAllocs()
for pb.Next() {
binary.LittleEndian.PutUint64(key[:], uint64(counter))
cache.GetWithBuf(key[:], buf)
counter = counter + 1
var key [8]byte
buf := make([]byte, 64)
for i := 0; pb.Next(); i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i%count))
_, _ = cache.GetWithBuf(key[:], buf)
}
})
b.Logf("b.N: %d; hit rate: %f", b.N, cache.HitRate())
}

func BenchmarkCacheGetWithExpiration(b *testing.B) {
b.StopTimer()
cache := NewCache(256 * 1024 * 1024)
cache, count := populateCache()

b.ResetTimer()
b.ReportAllocs()

var key [8]byte
for i := 0; i < b.N; i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i))
cache.Set(key[:], make([]byte, 8), 0)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
binary.LittleEndian.PutUint64(key[:], uint64(i))
cache.GetWithExpiration(key[:])
binary.LittleEndian.PutUint64(key[:], uint64(i%count))
_, _, _ = cache.GetWithExpiration(key[:])
}
b.Logf("b.N: %d; hit rate: %f", b.N, cache.HitRate())
}

func BenchmarkMapGet(b *testing.B) {
b.ReportAllocs()
b.StopTimer()
m := make(map[string][]byte)
var key [8]byte
Expand All @@ -764,6 +800,7 @@ func BenchmarkHashFunc(b *testing.B) {
rand.Read(key)

b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
hashFunc(key)
}
Expand Down
30 changes: 30 additions & 0 deletions ringbuf.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,36 @@ func (rb *RingBuf) ReadAt(p []byte, off int64) (n int, err error) {
return
}

// Slice returns a slice of the supplied range of the ring buffer. It will
// not alloc unless the requested range wraps the ring buffer.
func (rb *RingBuf) Slice(off, length int64) ([]byte, error) {
if off > rb.end || off < rb.begin {
return nil, ErrOutOfRange
}
var readOff int
if rb.end-rb.begin < int64(len(rb.data)) {
readOff = int(off - rb.begin)
} else {
readOff = rb.index + int(off-rb.begin)
}
if readOff >= len(rb.data) {
readOff -= len(rb.data)
}
readEnd := readOff + int(length)
if readEnd <= len(rb.data) {
return rb.data[readOff:readEnd:readEnd], nil
}
buf := make([]byte, length)
n := copy(buf, rb.data[readOff:])
if n < int(length) {
n += copy(buf[n:], rb.data[:readEnd-len(rb.data)])
}
if n < int(length) {
return nil, io.EOF
}
return buf, nil
}

func (rb *RingBuf) Write(p []byte) (n int, err error) {
if len(p) > len(rb.data) {
err = ErrOutOfRange
Expand Down
Loading