From 49cbf81eb453fc72378c6f290da8142ce49a9dcd Mon Sep 17 00:00:00 2001 From: Marko Date: Wed, 26 Feb 2020 12:57:58 +0100 Subject: [PATCH 01/30] release: minor release (#50) * release: v0.4.1 --- CHANGELOG.md | 11 +++++++++-- go.sum | 8 -------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7bcc7418..b303a9c90 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,17 @@ ## Unreleased +## 0.4.1 + +**2020-2-26** + ### Breaking Changes -- [fsdb] [\#43](https://github.com/tendermint/tm-db/pull/43#event-2954801205) Remove FSDB +- [fsdb] [\#43](https://github.com/tendermint/tm-db/pull/43) Remove FSDB + +### Bug Fixes + +- [boltdb] [\#45](https://github.com/tendermint/tm-db/pull/45) Bring BoltDB to adhere to the db interfaces ## 0.4 @@ -31,7 +39,6 @@ - Iterator interface: - `Error() error` - ### IMPROVEMENTS - [remotedb] [\#34](https://github.com/tendermint/tm-db/pull/34) Add proto file tests and regenerate remotedb.pb.go diff --git a/go.sum b/go.sum index b445b2919..3c3d2e5ce 100644 --- a/go.sum +++ b/go.sum @@ -48,10 +48,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.0 h1:DMOzIV76tmoDNE9pX6RSN0aDtCYeCg5VueieJaAo1uw= -github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= @@ -95,10 +91,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2El google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From cf95b40c4df61a06ea721a0ad4d2bcee211a24aa Mon Sep 17 00:00:00 2001 From: Marko Date: Sun, 1 Mar 2020 15:38:19 +0100 Subject: [PATCH 02/30] tools: add mergify (#51) * tools: add mergify more jobs disappearing Signed-off-by: Marko Baricevic * forgot to hit save --- .mergify.yml | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .mergify.yml diff --git a/.mergify.yml b/.mergify.yml new file mode 100644 index 000000000..136bb1148 --- /dev/null +++ b/.mergify.yml @@ -0,0 +1,9 @@ +pull_request_rules: + - name: automerge to master with label S:automerge and branch protection passing + conditions: + - base=master + - label=S:automerge + actions: + merge: + method: squash + strict: true From a2b9135d6f1238be6fee4869d3d5af6c6207436e Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Mon, 9 Mar 2020 13:20:41 +0100 Subject: [PATCH 03/30] memdb: use a B-tree for storage (#53) --- CHANGELOG.md | 4 + backend_test.go | 72 ++++++++++++++ common_test.go | 46 ++++++++- go.mod | 1 + go.sum | 2 + mem_db.go | 248 ++++++++++++++++++++++++++++++------------------ mem_db_test.go | 21 ++++ 7 files changed, 300 insertions(+), 94 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b303a9c90..c44adc479 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## Unreleased +### Improvements + +- [memdb] [\#53](https://github.com/tendermint/tm-db/pull/53) Use a B-tree for storage, which significantly improves range scan performance + ## 0.4.1 **2020-2-26** diff --git a/backend_test.go b/backend_test.go index 9bba5f290..1d530ecb0 100644 --- a/backend_test.go +++ b/backend_test.go @@ -204,6 +204,7 @@ func TestDBIterator(t *testing.T) { for dbType := range backends { t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) { testDBIterator(t, dbType) + testDBIteratorBlankKey(t, dbType) }) } } @@ -311,6 +312,18 @@ func testDBIterator(t *testing.T, backend BackendType) { verifyIterator(t, ritr, []int64(nil), "reverse iterator from 7 (ex) to 6") + ritr, err = db.ReverseIterator(int642Bytes(10), nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64(nil), "reverse iterator to 10") + + ritr, err = db.ReverseIterator(int642Bytes(6), nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{9, 8, 7}, "reverse iterator to 6") + + ritr, err = db.ReverseIterator(int642Bytes(5), nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{9, 8, 7, 5}, "reverse iterator to 5") + // verifyIterator(t, db.Iterator(int642Bytes(0), int642Bytes(1)), []int64{0}, "forward iterator from 0 to 1") ritr, err = db.ReverseIterator(int642Bytes(8), int642Bytes(9)) @@ -329,7 +342,56 @@ func testDBIterator(t *testing.T, backend BackendType) { require.NoError(t, err) verifyIterator(t, ritr, []int64(nil), "reverse iterator from 2 (ex) to 4") +} + +func testDBIteratorBlankKey(t *testing.T, backend BackendType) { + name := fmt.Sprintf("test_%x", randStr(12)) + dir := os.TempDir() + db := NewDB(name, backend, dir) + defer cleanupDBDir(dir, name) + + err := db.Set([]byte(""), []byte{0}) + require.NoError(t, err) + err = db.Set([]byte("a"), []byte{1}) + require.NoError(t, err) + err = db.Set([]byte("b"), []byte{2}) + require.NoError(t, err) + + value, err := db.Get([]byte("")) + require.NoError(t, err) + assert.Equal(t, []byte{0}, value) + + i, err := db.Iterator(nil, nil) + require.NoError(t, err) + verifyIteratorStrings(t, i, []string{"", "a", "b"}, "forward") + i, err = db.Iterator([]byte(""), nil) + require.NoError(t, err) + verifyIteratorStrings(t, i, []string{"", "a", "b"}, "forward from blank") + + i, err = db.Iterator([]byte("a"), nil) + require.NoError(t, err) + verifyIteratorStrings(t, i, []string{"a", "b"}, "forward from a") + + i, err = db.Iterator([]byte(""), []byte("b")) + require.NoError(t, err) + verifyIteratorStrings(t, i, []string{"", "a"}, "forward from blank to b") + + i, err = db.ReverseIterator(nil, nil) + require.NoError(t, err) + verifyIteratorStrings(t, i, []string{"b", "a", ""}, "reverse") + + i, err = db.ReverseIterator([]byte(""), nil) + require.NoError(t, err) + verifyIteratorStrings(t, i, []string{"b", "a", ""}, "reverse to blank") + + i, err = db.ReverseIterator([]byte(""), []byte("a")) + require.NoError(t, err) + verifyIteratorStrings(t, i, []string{""}, "reverse to blank from a") + + i, err = db.ReverseIterator([]byte("a"), nil) + require.NoError(t, err) + verifyIteratorStrings(t, i, []string{"b", "a"}, "reverse to a") } func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) { @@ -341,3 +403,13 @@ func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) { } assert.Equal(t, expected, list, msg) } + +func verifyIteratorStrings(t *testing.T, itr Iterator, expected []string, msg string) { + var list []string + for itr.Valid() { + key := itr.Key() + list = append(list, string(key)) + itr.Next() + } + assert.Equal(t, expected, list, msg) +} diff --git a/common_test.go b/common_test.go index d17c49e00..895acb198 100644 --- a/common_test.go +++ b/common_test.go @@ -201,6 +201,38 @@ func (mockIterator) Error() error { func (mockIterator) Close() { } +func benchmarkRangeScans(b *testing.B, db DB, dbSize int64) { + b.StopTimer() + + rangeSize := int64(10000) + if dbSize < rangeSize { + b.Errorf("db size %v cannot be less than range size %v", dbSize, rangeSize) + } + + for i := int64(0); i < dbSize; i++ { + bytes := int642Bytes(i) + err := db.Set(bytes, bytes) + if err != nil { + // require.NoError() is very expensive (according to profiler), so check manually + b.Fatal(b, err) + } + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + start := rand.Int63n(dbSize - rangeSize) + end := start + rangeSize + iter, err := db.Iterator(int642Bytes(start), int642Bytes(end)) + require.NoError(b, err) + count := 0 + for ; iter.Valid(); iter.Next() { + count++ + } + iter.Close() + require.EqualValues(b, rangeSize, count) + } +} + func benchmarkRandomReadsWrites(b *testing.B, db DB) { b.StopTimer() @@ -217,23 +249,29 @@ func benchmarkRandomReadsWrites(b *testing.B, db DB) { for i := 0; i < b.N; i++ { // Write something { - idx := int64(rand.Int()) % numItems // nolint:gosec testing file, so accepting weak random number generator + idx := rand.Int63n(numItems) // nolint:gosec testing file, so accepting weak random number generator internal[idx]++ val := internal[idx] idxBytes := int642Bytes(idx) valBytes := int642Bytes(val) //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) err := db.Set(idxBytes, valBytes) - b.Error(err) + if err != nil { + // require.NoError() is very expensive (according to profiler), so check manually + b.Fatal(b, err) + } } // Read something { - idx := int64(rand.Int()) % numItems // nolint:gosec testing file, so accepting weak random number generator + idx := rand.Int63n(numItems) // nolint:gosec testing file, so accepting weak random number generator valExp := internal[idx] idxBytes := int642Bytes(idx) valBytes, err := db.Get(idxBytes) - b.Error(err) + if err != nil { + // require.NoError() is very expensive (according to profiler), so check manually + b.Fatal(b, err) + } //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) if valExp == 0 { if !bytes.Equal(valBytes, nil) { diff --git a/go.mod b/go.mod index 6d3062630..56e87e583 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/gogo/protobuf v1.3.1 + github.com/google/btree v1.0.0 github.com/jmhodges/levigo v1.0.0 github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.5.1 diff --git a/go.sum b/go.sum index 3c3d2e5ce..5b70eeea6 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,8 @@ github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= diff --git a/mem_db.go b/mem_db.go index a88e764ef..286145810 100644 --- a/mem_db.go +++ b/mem_db.go @@ -1,11 +1,47 @@ package db import ( + "bytes" + "context" "fmt" - "sort" "sync" + + "github.com/google/btree" +) + +const ( + // The approximate number of items and children per B-tree node. Tuned with benchmarks. + bTreeDegree = 32 + + // Size of the channel buffer between traversal goroutine and iterator. Using an unbuffered + // channel causes two context switches per item sent, while buffering allows more work per + // context switch. Tuned with benchmarks. + chBufferSize = 64 ) +// item is a btree.Item with byte slices as keys and values +type item struct { + key []byte + value []byte +} + +// Less implements btree.Item. +func (i *item) Less(other btree.Item) bool { + // this considers nil == []byte{}, but that's ok since we handle nil endpoints + // in iterators specially anyway + return bytes.Compare(i.key, other.(*item).key) == -1 +} + +// newKey creates a new key item +func newKey(key []byte) *item { + return &item{key: key} +} + +// newPair creates a new pair item +func newPair(key, value []byte) *item { + return &item{key: key, value: value} +} + func init() { registerDBCreator(MemDBBackend, func(name, dir string) (DB, error) { return NewMemDB(), nil @@ -15,13 +51,13 @@ func init() { var _ DB = (*MemDB)(nil) type MemDB struct { - mtx sync.Mutex - db map[string][]byte + mtx sync.Mutex + btree *btree.BTree } func NewMemDB() *MemDB { database := &MemDB{ - db: make(map[string][]byte), + btree: btree.New(bTreeDegree), } return database } @@ -37,8 +73,11 @@ func (db *MemDB) Get(key []byte) ([]byte, error) { defer db.mtx.Unlock() key = nonNilBytes(key) - value := db.db[string(key)] - return value, nil + i := db.btree.Get(newKey(key)) + if i != nil { + return i.(*item).value, nil + } + return nil, nil } // Implements DB. @@ -47,8 +86,7 @@ func (db *MemDB) Has(key []byte) (bool, error) { defer db.mtx.Unlock() key = nonNilBytes(key) - _, ok := db.db[string(key)] - return ok, nil + return db.btree.Has(newKey(key)), nil } // Implements DB. @@ -79,7 +117,7 @@ func (db *MemDB) SetNoLockSync(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) - db.db[string(key)] = value + db.btree.ReplaceOrInsert(newPair(key, value)) } // Implements DB. @@ -109,7 +147,7 @@ func (db *MemDB) DeleteNoLock(key []byte) { func (db *MemDB) DeleteNoLockSync(key []byte) { key = nonNilBytes(key) - delete(db.db, string(key)) + db.btree.Delete(newKey(key)) } // Implements DB. @@ -127,9 +165,11 @@ func (db *MemDB) Print() error { db.mtx.Lock() defer db.mtx.Unlock() - for key, value := range db.db { - fmt.Printf("[%X]:\t[%X]\n", []byte(key), value) - } + db.btree.Ascend(func(i btree.Item) bool { + item := i.(*item) + fmt.Printf("[%X]:\t[%X]\n", item.key, item.value) + return true + }) return nil } @@ -140,15 +180,12 @@ func (db *MemDB) Stats() map[string]string { stats := make(map[string]string) stats["database.type"] = "memDB" - stats["database.size"] = fmt.Sprintf("%d", len(db.db)) + stats["database.size"] = fmt.Sprintf("%d", db.btree.Len()) return stats } // Implements DB. func (db *MemDB) NewBatch() Batch { - db.mtx.Lock() - defer db.mtx.Unlock() - return &memBatch{db, nil} } @@ -160,8 +197,7 @@ func (db *MemDB) Iterator(start, end []byte) (Iterator, error) { db.mtx.Lock() defer db.mtx.Unlock() - keys := db.getSortedKeys(start, end, false) - return newMemDBIterator(db, keys, start, end), nil + return newMemDBIterator(db.btree, start, end, false), nil } // Implements DB. @@ -169,101 +205,133 @@ func (db *MemDB) ReverseIterator(start, end []byte) (Iterator, error) { db.mtx.Lock() defer db.mtx.Unlock() - keys := db.getSortedKeys(start, end, true) - return newMemDBIterator(db, keys, start, end), nil + return newMemDBIterator(db.btree, start, end, true), nil } -// We need a copy of all of the keys. -// Not the best, but probably not a bottleneck depending. type memDBIterator struct { - db DB - cur int - keys []string - start []byte - end []byte + ch <-chan *item + cancel context.CancelFunc + item *item + start []byte + end []byte } var _ Iterator = (*memDBIterator)(nil) -// Keys is expected to be in reverse order for reverse iterators. -func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator { - return &memDBIterator{ - db: db, - cur: 0, - keys: keys, - start: start, - end: end, +func newMemDBIterator(bt *btree.BTree, start []byte, end []byte, reverse bool) *memDBIterator { + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan *item, chBufferSize) + iter := &memDBIterator{ + ch: ch, + cancel: cancel, + start: start, + end: end, } -} -// Implements Iterator. -func (itr *memDBIterator) Domain() ([]byte, []byte) { - return itr.start, itr.end -} + go func() { + // Because we use [start, end) for reverse ranges, while btree uses (start, end], we need + // the following variables to handle some reverse iteration conditions ourselves. + var ( + skipEqual []byte + abortLessThan []byte + ) + visitor := func(i btree.Item) bool { + item := i.(*item) + if skipEqual != nil && bytes.Equal(item.key, skipEqual) { + skipEqual = nil + return true + } + if abortLessThan != nil && bytes.Compare(item.key, abortLessThan) == -1 { + return false + } + select { + case <-ctx.Done(): + return false + case ch <- item: + return true + } + } + s := newKey(start) + e := newKey(end) + switch { + case start == nil && end == nil && !reverse: + bt.Ascend(visitor) + case start == nil && end == nil && reverse: + bt.Descend(visitor) + case end == nil && !reverse: + // must handle this specially, since nil is considered less than anything else + bt.AscendGreaterOrEqual(s, visitor) + case !reverse: + bt.AscendRange(s, e, visitor) + case end == nil: + // abort after start, since we use [start, end) while btree uses (start, end] + abortLessThan = s.key + bt.Descend(visitor) + default: + // skip end and abort after start, since we use [start, end) while btree uses (start, end] + skipEqual = e.key + abortLessThan = s.key + bt.DescendLessOrEqual(e, visitor) + } + close(ch) + }() + + // prime the iterator with the first value, if any + if item, ok := <-ch; ok { + iter.item = item + } -// Implements Iterator. -func (itr *memDBIterator) Valid() bool { - return 0 <= itr.cur && itr.cur < len(itr.keys) + return iter } -// Implements Iterator. -func (itr *memDBIterator) Next() { - itr.assertIsValid() - itr.cur++ +// Close implements Iterator. +func (i *memDBIterator) Close() { + i.cancel() + for range i.ch { // drain channel + } + i.item = nil } -// Implements Iterator. -func (itr *memDBIterator) Key() []byte { - itr.assertIsValid() - return []byte(itr.keys[itr.cur]) +// Domain implements Iterator. +func (i *memDBIterator) Domain() ([]byte, []byte) { + return i.start, i.end } -// Implements Iterator. -func (itr *memDBIterator) Value() []byte { - itr.assertIsValid() - key := []byte(itr.keys[itr.cur]) - bytes, err := itr.db.Get(key) - if err != nil { - return nil - } - return bytes +// Valid implements Iterator. +func (i *memDBIterator) Valid() bool { + return i.item != nil } -func (itr *memDBIterator) Error() error { - return nil +// Next implements Iterator. +func (i *memDBIterator) Next() { + item, ok := <-i.ch + switch { + case ok: + i.item = item + case i.item == nil: + panic("called Next() on invalid iterator") + default: + i.item = nil + } } -// Implements Iterator. -func (itr *memDBIterator) Close() { - itr.keys = nil - itr.db = nil +// Error implements Iterator. +func (i *memDBIterator) Error() error { + return nil // famous last words } -func (itr *memDBIterator) assertIsValid() { - if !itr.Valid() { - panic("memDBIterator is invalid") +// Key implements Iterator. +func (i *memDBIterator) Key() []byte { + if i.item == nil { + panic("called Key() on invalid iterator") } + return i.item.key } -//---------------------------------------- -// Misc. - -func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { - keys := []string{} - for key := range db.db { - inDomain := IsKeyInDomain([]byte(key), start, end) - if inDomain { - keys = append(keys, key) - } - } - sort.Strings(keys) - if reverse { - nkeys := len(keys) - for i := 0; i < nkeys/2; i++ { - temp := keys[i] - keys[i] = keys[nkeys-i-1] - keys[nkeys-i-1] = temp - } +// Value implements Iterator. +func (i *memDBIterator) Value() []byte { + if i.item == nil { + panic("called Value() on invalid iterator") } - return keys + return i.item.value } diff --git a/mem_db_test.go b/mem_db_test.go index 7f6468ee6..ee2eab9a7 100644 --- a/mem_db_test.go +++ b/mem_db_test.go @@ -32,3 +32,24 @@ func TestMemDB_Iterator(t *testing.T) { itr.Next() assert.False(t, itr.Valid()) } + +func BenchmarkMemDBRangeScans1M(b *testing.B) { + db := NewMemDB() + defer db.Close() + + benchmarkRangeScans(b, db, int64(1e6)) +} + +func BenchmarkMemDBRangeScans10M(b *testing.B) { + db := NewMemDB() + defer db.Close() + + benchmarkRangeScans(b, db, int64(10e6)) +} + +func BenchmarkMemDBRandomReadsWrites(b *testing.B) { + db := NewMemDB() + defer db.Close() + + benchmarkRandomReadsWrites(b, db) +} From 961a91c9fd7e81a9ae2750e1ef0862cdfff1712e Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Mon, 9 Mar 2020 15:55:37 +0100 Subject: [PATCH 04/30] README: add database descriptions (#61) --- README.md | 45 ++++++++++++--------------------------------- 1 file changed, 12 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index f729f54b6..88a79fe22 100644 --- a/README.md +++ b/README.md @@ -1,50 +1,29 @@ # Tendermint DB -Data Base abstractions to be used in applications. -These abstractions are not only meant to be used in applications built on [Tendermint](https://github.com/tendermint/tendermint), but can be used in a variety of applications. +Common database interface for various database backends. Primarily meant for applications built on [Tendermint](https://github.com/tendermint/tendermint), such as the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk), but can be used independently of these as well. ### Minimum Go Version Go 1.13+ -## Supported Databases +## Supported Database Backends -- [GolevelDB](https://github.com/syndtr/goleveldb) -- [ClevelDB](https://github.com/google/leveldb) -- [BoltDB](https://github.com/etcd-io/bbolt) -- [GoRocksDB](https://github.com/tecbot/gorocksdb) -- [MemDB](#memdb) -- [RemoteDB](#remotedb) +* **[GoLevelDB](https://github.com/syndtr/goleveldb) [stable]**: A pure Go implementation of [LevelDB](https://github.com/google/leveldb) (see below). Currently the default on-disk database used in the Cosmos SDK. +* **MemDB [stable]:** An in-memory database using [Google's B-tree package](https://github.com/google/btree). Has very high performance both for reads, writes, and range scans, but is not durable and will lose all data on process exit. Does not support transactions. Suitable for e.g. caches, working sets, and tests. Used for [IAVL](https://github.com/tendermint/iavl) working sets when the pruning strategy allows it. -## Using Databases +* **[LevelDB](https://github.com/google/leveldb) [experimental]:** A [Go wrapper](https://github.com/jmhodges/levigo) around [LevelDB](https://github.com/google/leveldb). Uses LSM-trees for on-disk storage, which have good performance for write-heavy workloads, particularly on spinning disks, but requires periodic compaction to maintain decent read performance and reclaim disk space. Does not support transactions. -### GolevelDB +* **[BoltDB](https://github.com/etcd-io/bbolt) [experimental]:** A [fork](https://github.com/etcd-io/bbolt) of [BoltDB](https://github.com/boltdb/bolt). Uses B+trees for on-disk storage, which have good performance for read-heavy workloads and range scans. Supports serializable ACID transactions. -To use goleveldb there is no need to install anything and you can run the tests by simply running `make test` +* **[RocksDB](https://github.com/tecbot/gorocksdb) [experimental]:** A [Go wrapper](https://github.com/tecbot/gorocksdb) around [RocksDB](https://rocksdb.org). Similarly to LevelDB (above) it uses LSM-trees for on-disk storage, but is optimized for fast storage media such as SSDs and memory. Supports atomic transactions, but not full ACID transactions. -### ClevelDB +## Meta-databases -To use cleveldb leveldb must be installed on your machine. Please use you local package manager (brew, snap) to install the db. Once you have installed it you can run tests with `make test-cleveldb`. +* **PrefixDB [stable]:** A database which wraps another database and uses a static prefix for all keys. This allows multiple logical databases to be stored in a common underlying databases by using different namespaces. Used by the Cosmos SDK to give different modules their own namespaced database in a single application database. -### BoltDB +* **RemoteDB [experimental]:** A database that connects to distributed Tendermint db instances via [gRPC](https://grpc.io/). This can help with detaching difficult deployments such as LevelDB, and can also ease dependency management for Tendermint developers. -The BoltDB implementation uses bbolt from etcd. +## Tests -You can test boltdb by running `make test-boltdb` - -### RocksDB - -To use RocksDB, you must have it installed on your machine. You can install rocksdb by using your machines package manager (brew, snap). Once you have it installed you can run tests using `make test-rocksdb`. - -### MemDB - -MemDB is a go implementation of a in memory database. It is mainly used for testing purposes but can be used for other purposes as well. To test the database you can run `make test`. - -### RemoteDB - -RemoteDB is a database meant for connecting to distributed Tendermint db instances. This can help with detaching difficult deployments such as cleveldb, it can also ease -the burden and cost of deployment of dependencies for databases -to be used by Tendermint developers. It is built with [gRPC](https://grpc.io/). To test this data base you can run `make test`. - -If you have all the databases installed on your machine then you can run tests with `make test-all` +To test common databases, run `make test`. If all databases are available on the local machine, use `make test-all` to test them all. From 310f1ac3d25b5a3518aeac4cd3dffa0d24cc2df7 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Mon, 9 Mar 2020 16:16:43 +0100 Subject: [PATCH 05/30] memdb: API cleanups and minor improvements (#56) --- CHANGELOG.md | 6 + backend_test.go | 82 +++++++++++ common_test.go | 129 ------------------ db_test.go | 73 ---------- mem_batch.go | 76 ----------- mem_db.go | 337 ---------------------------------------------- mem_db_test.go | 55 -------- memdb.go | 168 +++++++++++++++++++++++ memdb_batch.go | 63 +++++++++ memdb_iterator.go | 143 ++++++++++++++++++++ memdb_test.go | 26 ++++ prefix_db.go | 22 --- 12 files changed, 488 insertions(+), 692 deletions(-) delete mode 100644 mem_batch.go delete mode 100644 mem_db.go delete mode 100644 mem_db_test.go create mode 100644 memdb.go create mode 100644 memdb_batch.go create mode 100644 memdb_iterator.go create mode 100644 memdb_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index c44adc479..bd232af25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,10 +2,16 @@ ## Unreleased +### Breaking Changes + +- [memdb] [\#56](https://github.com/tendermint/tm-db/pull/56) Removed some exported methods that were mainly meant for internal use: `Mutex()`, `SetNoLock()`, `SetNoLockSync()`, `DeleteNoLock()`, and `DeleteNoLockSync()` + ### Improvements - [memdb] [\#53](https://github.com/tendermint/tm-db/pull/53) Use a B-tree for storage, which significantly improves range scan performance +- [memdb] [\#56](https://github.com/tendermint/tm-db/pull/56) Use an RWMutex for improved performance with highly concurrent read-heavy workloads + ## 0.4.1 **2020-2-26** diff --git a/backend_test.go b/backend_test.go index 1d530ecb0..3d92d9361 100644 --- a/backend_test.go +++ b/backend_test.go @@ -413,3 +413,85 @@ func verifyIteratorStrings(t *testing.T, itr Iterator, expected []string, msg st } assert.Equal(t, expected, list, msg) } + +func TestDBBatch(t *testing.T) { + for dbType := range backends { + t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) { + testDBBatch(t, dbType) + }) + } +} + +func testDBBatch(t *testing.T, backend BackendType) { + name := fmt.Sprintf("test_%x", randStr(12)) + dir := os.TempDir() + db := NewDB(name, backend, dir) + defer cleanupDBDir(dir, name) + + // create a new batch, and some items - they should not be visible until we write + batch := db.NewBatch() + batch.Set([]byte("a"), []byte{1}) + batch.Set([]byte("b"), []byte{2}) + batch.Set([]byte("c"), []byte{3}) + assertKeyValues(t, db, map[string][]byte{}) + + err := batch.Write() + require.NoError(t, err) + assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}, "c": {3}}) + + // the batch still keeps these values internally, so changing values and rewriting batch + // should set the values again + err = db.Set([]byte("a"), []byte{9}) + require.NoError(t, err) + err = db.Delete([]byte("c")) + require.NoError(t, err) + err = batch.WriteSync() + require.NoError(t, err) + assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}, "c": {3}}) + + // but when we close, it should no longer set the values + batch.Close() + err = db.Delete([]byte("c")) + require.NoError(t, err) + // FIXME Disabled because goleveldb is failing this test currently + //err = batch.Write() + //require.NoError(t, err) + assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) + + // it should be possible to re-close the batch + batch.Close() + + // batches should also write changes in order + batch = db.NewBatch() + batch.Delete([]byte("a")) + batch.Set([]byte("a"), []byte{1}) + batch.Set([]byte("b"), []byte{1}) + batch.Set([]byte("b"), []byte{2}) + batch.Set([]byte("c"), []byte{3}) + batch.Delete([]byte("c")) + err = batch.Write() + require.NoError(t, err) + batch.Close() + assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) + + // and writing an empty batch should not fail + batch = db.NewBatch() + err = batch.Write() + require.NoError(t, err) + err = batch.WriteSync() + require.NoError(t, err) + assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) +} + +func assertKeyValues(t *testing.T, db DB, expect map[string][]byte) { + iter, err := db.Iterator(nil, nil) + require.NoError(t, err) + + actual := make(map[string][]byte) + for ; iter.Valid(); iter.Next() { + require.NoError(t, iter.Error()) + actual[string(iter.Key())] = iter.Value() + } + + assert.Equal(t, expect, actual) +} diff --git a/common_test.go b/common_test.go index 895acb198..aa903a5da 100644 --- a/common_test.go +++ b/common_test.go @@ -3,10 +3,8 @@ package db import ( "bytes" "encoding/binary" - "fmt" "io/ioutil" "math/rand" - "sync" "testing" "github.com/stretchr/testify/assert" @@ -74,133 +72,6 @@ func newTempDB(t *testing.T, backend BackendType) (db DB, dbDir string) { return NewDB("testdb", backend, dirname), dirname } -//---------------------------------------- -// mockDB - -// NOTE: not actually goroutine safe. -// If you want something goroutine safe, maybe you just want a MemDB. -type mockDB struct { - mtx sync.Mutex - calls map[string]int -} - -func newMockDB() *mockDB { - return &mockDB{ - calls: make(map[string]int), - } -} - -func (mdb *mockDB) Mutex() *sync.Mutex { - return &(mdb.mtx) -} - -func (mdb *mockDB) Get([]byte) []byte { - mdb.calls["Get"]++ - return nil -} - -func (mdb *mockDB) Has([]byte) bool { - mdb.calls["Has"]++ - return false -} - -func (mdb *mockDB) Set([]byte, []byte) { - mdb.calls["Set"]++ -} - -func (mdb *mockDB) SetSync([]byte, []byte) { - mdb.calls["SetSync"]++ -} - -func (mdb *mockDB) SetNoLock([]byte, []byte) { - mdb.calls["SetNoLock"]++ -} - -func (mdb *mockDB) SetNoLockSync([]byte, []byte) { - mdb.calls["SetNoLockSync"]++ -} - -func (mdb *mockDB) Delete([]byte) { - mdb.calls["Delete"]++ -} - -func (mdb *mockDB) DeleteSync([]byte) { - mdb.calls["DeleteSync"]++ -} - -func (mdb *mockDB) DeleteNoLock([]byte) { - mdb.calls["DeleteNoLock"]++ -} - -func (mdb *mockDB) DeleteNoLockSync([]byte) { - mdb.calls["DeleteNoLockSync"]++ -} - -func (mdb *mockDB) Iterator(start, end []byte) (Iterator, error) { - mdb.calls["Iterator"]++ - return &mockIterator{}, nil -} - -func (mdb *mockDB) ReverseIterator(start, end []byte) (Iterator, error) { - mdb.calls["ReverseIterator"]++ - return &mockIterator{}, nil -} - -func (mdb *mockDB) Close() { - mdb.calls["Close"]++ -} - -func (mdb *mockDB) NewBatch() Batch { - mdb.calls["NewBatch"]++ - return &memBatch{db: mdb} -} - -func (mdb *mockDB) Print() error { - mdb.calls["Print"]++ - fmt.Printf("mockDB{%v}", mdb.Stats()) - return nil -} - -func (mdb *mockDB) Stats() map[string]string { - mdb.calls["Stats"]++ - - res := make(map[string]string) - for key, count := range mdb.calls { - res[key] = fmt.Sprintf("%d", count) - } - return res -} - -//---------------------------------------- -// mockIterator - -type mockIterator struct{} - -func (mockIterator) Domain() (start []byte, end []byte) { - return nil, nil -} - -func (mockIterator) Valid() bool { - return false -} - -func (mockIterator) Next() {} - -func (mockIterator) Key() []byte { - return nil -} - -func (mockIterator) Value() []byte { - return nil -} - -func (mockIterator) Error() error { - return nil -} - -func (mockIterator) Close() { -} - func benchmarkRangeScans(b *testing.B, db DB, dbSize int64) { b.StopTimer() diff --git a/db_test.go b/db_test.go index 1e531509d..dc81d663b 100644 --- a/db_test.go +++ b/db_test.go @@ -137,76 +137,3 @@ func TestDBIteratorNonemptyBeginAfter(t *testing.T) { }) } } - -func TestDBBatchWrite(t *testing.T) { - //nolint:errcheck - testCases := []struct { - modify func(batch Batch) - calls map[string]int - }{ - 0: { - func(batch Batch) { - batch.Set(bz("1"), bz("1")) - batch.Set(bz("2"), bz("2")) - batch.Delete(bz("3")) - batch.Set(bz("4"), bz("4")) - batch.Write() - }, - map[string]int{ - "Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0, - "Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0, - }, - }, - 1: { - func(batch Batch) { - batch.Set(bz("1"), bz("1")) - batch.Set(bz("2"), bz("2")) - batch.Set(bz("4"), bz("4")) - batch.Delete(bz("3")) - batch.Write() - }, - map[string]int{ - "Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0, - "Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0, - }, - }, - 2: { - func(batch Batch) { - batch.Set(bz("1"), bz("1")) - batch.Set(bz("2"), bz("2")) - batch.Delete(bz("3")) - batch.Set(bz("4"), bz("4")) - batch.WriteSync() - }, - map[string]int{ - "Set": 0, "SetSync": 0, "SetNoLock": 2, "SetNoLockSync": 1, - "Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0, - }, - }, - 3: { - func(batch Batch) { - batch.Set(bz("1"), bz("1")) - batch.Set(bz("2"), bz("2")) - batch.Set(bz("4"), bz("4")) - batch.Delete(bz("3")) - batch.WriteSync() - }, - map[string]int{ - "Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0, - "Delete": 0, "DeleteSync": 0, "DeleteNoLock": 0, "DeleteNoLockSync": 1, - }, - }, - } - - for i, tc := range testCases { - mdb := newMockDB() - batch := mdb.NewBatch() - - tc.modify(batch) - - for call, exp := range tc.calls { - got := mdb.calls[call] - assert.Equal(t, exp, got, "#%v - key: %s", i, call) - } - } -} diff --git a/mem_batch.go b/mem_batch.go deleted file mode 100644 index a00baaf7b..000000000 --- a/mem_batch.go +++ /dev/null @@ -1,76 +0,0 @@ -package db - -import "sync" - -type atomicSetDeleter interface { - Mutex() *sync.Mutex - SetNoLock(key, value []byte) - SetNoLockSync(key, value []byte) - DeleteNoLock(key []byte) - DeleteNoLockSync(key []byte) -} - -type memBatch struct { - db atomicSetDeleter - ops []operation -} - -type opType int - -const ( - opTypeSet opType = 1 - opTypeDelete opType = 2 -) - -type operation struct { - opType - key []byte - value []byte -} - -func (mBatch *memBatch) Set(key, value []byte) { - mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value}) -} - -func (mBatch *memBatch) Delete(key []byte) { - mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil}) -} - -func (mBatch *memBatch) Write() error { - mBatch.write(false) - return nil -} - -func (mBatch *memBatch) WriteSync() error { - mBatch.write(true) - return nil -} - -func (mBatch *memBatch) Close() { - mBatch.ops = nil -} - -func (mBatch *memBatch) write(doSync bool) { - if mtx := mBatch.db.Mutex(); mtx != nil { - mtx.Lock() - defer mtx.Unlock() - } - - for i, op := range mBatch.ops { - if doSync && i == (len(mBatch.ops)-1) { - switch op.opType { - case opTypeSet: - mBatch.db.SetNoLockSync(op.key, op.value) - case opTypeDelete: - mBatch.db.DeleteNoLockSync(op.key) - } - break // we're done. - } - switch op.opType { - case opTypeSet: - mBatch.db.SetNoLock(op.key, op.value) - case opTypeDelete: - mBatch.db.DeleteNoLock(op.key) - } - } -} diff --git a/mem_db.go b/mem_db.go deleted file mode 100644 index 286145810..000000000 --- a/mem_db.go +++ /dev/null @@ -1,337 +0,0 @@ -package db - -import ( - "bytes" - "context" - "fmt" - "sync" - - "github.com/google/btree" -) - -const ( - // The approximate number of items and children per B-tree node. Tuned with benchmarks. - bTreeDegree = 32 - - // Size of the channel buffer between traversal goroutine and iterator. Using an unbuffered - // channel causes two context switches per item sent, while buffering allows more work per - // context switch. Tuned with benchmarks. - chBufferSize = 64 -) - -// item is a btree.Item with byte slices as keys and values -type item struct { - key []byte - value []byte -} - -// Less implements btree.Item. -func (i *item) Less(other btree.Item) bool { - // this considers nil == []byte{}, but that's ok since we handle nil endpoints - // in iterators specially anyway - return bytes.Compare(i.key, other.(*item).key) == -1 -} - -// newKey creates a new key item -func newKey(key []byte) *item { - return &item{key: key} -} - -// newPair creates a new pair item -func newPair(key, value []byte) *item { - return &item{key: key, value: value} -} - -func init() { - registerDBCreator(MemDBBackend, func(name, dir string) (DB, error) { - return NewMemDB(), nil - }, false) -} - -var _ DB = (*MemDB)(nil) - -type MemDB struct { - mtx sync.Mutex - btree *btree.BTree -} - -func NewMemDB() *MemDB { - database := &MemDB{ - btree: btree.New(bTreeDegree), - } - return database -} - -// Implements atomicSetDeleter. -func (db *MemDB) Mutex() *sync.Mutex { - return &(db.mtx) -} - -// Implements DB. -func (db *MemDB) Get(key []byte) ([]byte, error) { - db.mtx.Lock() - defer db.mtx.Unlock() - key = nonNilBytes(key) - - i := db.btree.Get(newKey(key)) - if i != nil { - return i.(*item).value, nil - } - return nil, nil -} - -// Implements DB. -func (db *MemDB) Has(key []byte) (bool, error) { - db.mtx.Lock() - defer db.mtx.Unlock() - key = nonNilBytes(key) - - return db.btree.Has(newKey(key)), nil -} - -// Implements DB. -func (db *MemDB) Set(key []byte, value []byte) error { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.SetNoLock(key, value) - return nil -} - -// Implements DB. -func (db *MemDB) SetSync(key []byte, value []byte) error { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.SetNoLock(key, value) - return nil -} - -// Implements atomicSetDeleter. -func (db *MemDB) SetNoLock(key []byte, value []byte) { - db.SetNoLockSync(key, value) -} - -// Implements atomicSetDeleter. -func (db *MemDB) SetNoLockSync(key []byte, value []byte) { - key = nonNilBytes(key) - value = nonNilBytes(value) - - db.btree.ReplaceOrInsert(newPair(key, value)) -} - -// Implements DB. -func (db *MemDB) Delete(key []byte) error { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.DeleteNoLock(key) - return nil -} - -// Implements DB. -func (db *MemDB) DeleteSync(key []byte) error { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.DeleteNoLock(key) - return nil -} - -// Implements atomicSetDeleter. -func (db *MemDB) DeleteNoLock(key []byte) { - db.DeleteNoLockSync(key) -} - -// Implements atomicSetDeleter. -func (db *MemDB) DeleteNoLockSync(key []byte) { - key = nonNilBytes(key) - - db.btree.Delete(newKey(key)) -} - -// Implements DB. -func (db *MemDB) Close() error { - // Close is a noop since for an in-memory - // database, we don't have a destination - // to flush contents to nor do we want - // any data loss on invoking Close() - // See the discussion in https://github.com/tendermint/tendermint/libs/pull/56 - return nil -} - -// Implements DB. -func (db *MemDB) Print() error { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.btree.Ascend(func(i btree.Item) bool { - item := i.(*item) - fmt.Printf("[%X]:\t[%X]\n", item.key, item.value) - return true - }) - return nil -} - -// Implements DB. -func (db *MemDB) Stats() map[string]string { - db.mtx.Lock() - defer db.mtx.Unlock() - - stats := make(map[string]string) - stats["database.type"] = "memDB" - stats["database.size"] = fmt.Sprintf("%d", db.btree.Len()) - return stats -} - -// Implements DB. -func (db *MemDB) NewBatch() Batch { - return &memBatch{db, nil} -} - -//---------------------------------------- -// Iterator - -// Implements DB. -func (db *MemDB) Iterator(start, end []byte) (Iterator, error) { - db.mtx.Lock() - defer db.mtx.Unlock() - - return newMemDBIterator(db.btree, start, end, false), nil -} - -// Implements DB. -func (db *MemDB) ReverseIterator(start, end []byte) (Iterator, error) { - db.mtx.Lock() - defer db.mtx.Unlock() - - return newMemDBIterator(db.btree, start, end, true), nil -} - -type memDBIterator struct { - ch <-chan *item - cancel context.CancelFunc - item *item - start []byte - end []byte -} - -var _ Iterator = (*memDBIterator)(nil) - -func newMemDBIterator(bt *btree.BTree, start []byte, end []byte, reverse bool) *memDBIterator { - ctx, cancel := context.WithCancel(context.Background()) - ch := make(chan *item, chBufferSize) - iter := &memDBIterator{ - ch: ch, - cancel: cancel, - start: start, - end: end, - } - - go func() { - // Because we use [start, end) for reverse ranges, while btree uses (start, end], we need - // the following variables to handle some reverse iteration conditions ourselves. - var ( - skipEqual []byte - abortLessThan []byte - ) - visitor := func(i btree.Item) bool { - item := i.(*item) - if skipEqual != nil && bytes.Equal(item.key, skipEqual) { - skipEqual = nil - return true - } - if abortLessThan != nil && bytes.Compare(item.key, abortLessThan) == -1 { - return false - } - select { - case <-ctx.Done(): - return false - case ch <- item: - return true - } - } - s := newKey(start) - e := newKey(end) - switch { - case start == nil && end == nil && !reverse: - bt.Ascend(visitor) - case start == nil && end == nil && reverse: - bt.Descend(visitor) - case end == nil && !reverse: - // must handle this specially, since nil is considered less than anything else - bt.AscendGreaterOrEqual(s, visitor) - case !reverse: - bt.AscendRange(s, e, visitor) - case end == nil: - // abort after start, since we use [start, end) while btree uses (start, end] - abortLessThan = s.key - bt.Descend(visitor) - default: - // skip end and abort after start, since we use [start, end) while btree uses (start, end] - skipEqual = e.key - abortLessThan = s.key - bt.DescendLessOrEqual(e, visitor) - } - close(ch) - }() - - // prime the iterator with the first value, if any - if item, ok := <-ch; ok { - iter.item = item - } - - return iter -} - -// Close implements Iterator. -func (i *memDBIterator) Close() { - i.cancel() - for range i.ch { // drain channel - } - i.item = nil -} - -// Domain implements Iterator. -func (i *memDBIterator) Domain() ([]byte, []byte) { - return i.start, i.end -} - -// Valid implements Iterator. -func (i *memDBIterator) Valid() bool { - return i.item != nil -} - -// Next implements Iterator. -func (i *memDBIterator) Next() { - item, ok := <-i.ch - switch { - case ok: - i.item = item - case i.item == nil: - panic("called Next() on invalid iterator") - default: - i.item = nil - } -} - -// Error implements Iterator. -func (i *memDBIterator) Error() error { - return nil // famous last words -} - -// Key implements Iterator. -func (i *memDBIterator) Key() []byte { - if i.item == nil { - panic("called Key() on invalid iterator") - } - return i.item.key -} - -// Value implements Iterator. -func (i *memDBIterator) Value() []byte { - if i.item == nil { - panic("called Value() on invalid iterator") - } - return i.item.value -} diff --git a/mem_db_test.go b/mem_db_test.go deleted file mode 100644 index ee2eab9a7..000000000 --- a/mem_db_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package db - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestMemDB_Iterator(t *testing.T) { - db := NewMemDB() - defer db.Close() - - // if db is empty, iterator is invalid - itr, err := db.Iterator(nil, nil) - assert.NoError(t, err) - defer itr.Close() - assert.False(t, itr.Valid()) - - err = db.Set([]byte("foo"), []byte("bar")) - assert.NoError(t, err) - - // single iteration - itr, err = db.Iterator(nil, nil) - assert.NoError(t, err) - defer itr.Close() - key := itr.Key() - assert.True(t, itr.Valid()) - assert.Equal(t, []byte("foo"), key) - - value := itr.Value() - assert.Equal(t, []byte("bar"), value) - itr.Next() - assert.False(t, itr.Valid()) -} - -func BenchmarkMemDBRangeScans1M(b *testing.B) { - db := NewMemDB() - defer db.Close() - - benchmarkRangeScans(b, db, int64(1e6)) -} - -func BenchmarkMemDBRangeScans10M(b *testing.B) { - db := NewMemDB() - defer db.Close() - - benchmarkRangeScans(b, db, int64(10e6)) -} - -func BenchmarkMemDBRandomReadsWrites(b *testing.B) { - db := NewMemDB() - defer db.Close() - - benchmarkRandomReadsWrites(b, db) -} diff --git a/memdb.go b/memdb.go new file mode 100644 index 000000000..9cbb99ab8 --- /dev/null +++ b/memdb.go @@ -0,0 +1,168 @@ +package db + +import ( + "bytes" + "fmt" + "sync" + + "github.com/google/btree" +) + +const ( + // The approximate number of items and children per B-tree node. Tuned with benchmarks. + bTreeDegree = 32 +) + +func init() { + registerDBCreator(MemDBBackend, func(name, dir string) (DB, error) { + return NewMemDB(), nil + }, false) +} + +// item is a btree.Item with byte slices as keys and values +type item struct { + key []byte + value []byte +} + +// Less implements btree.Item. +func (i *item) Less(other btree.Item) bool { + // this considers nil == []byte{}, but that's ok since we handle nil endpoints + // in iterators specially anyway + return bytes.Compare(i.key, other.(*item).key) == -1 +} + +// newKey creates a new key item. +func newKey(key []byte) *item { + return &item{key: nonNilBytes(key)} +} + +// newPair creates a new pair item. +func newPair(key, value []byte) *item { + return &item{key: nonNilBytes(key), value: nonNilBytes(value)} +} + +// MemDB is an in-memory database backend using a B-tree for storage. +type MemDB struct { + mtx sync.RWMutex + btree *btree.BTree +} + +var _ DB = (*MemDB)(nil) + +// NewMemDB creates a new in-memory database. +func NewMemDB() *MemDB { + database := &MemDB{ + btree: btree.New(bTreeDegree), + } + return database +} + +// Get implements DB. +func (db *MemDB) Get(key []byte) ([]byte, error) { + db.mtx.RLock() + defer db.mtx.RUnlock() + + i := db.btree.Get(newKey(key)) + if i != nil { + return i.(*item).value, nil + } + return nil, nil +} + +// Has implements DB. +func (db *MemDB) Has(key []byte) (bool, error) { + db.mtx.RLock() + defer db.mtx.RUnlock() + + return db.btree.Has(newKey(key)), nil +} + +// Set implements DB. +func (db *MemDB) Set(key []byte, value []byte) error { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.set(key, value) + return nil +} + +// set sets a value without locking the mutex. +func (db *MemDB) set(key []byte, value []byte) { + db.btree.ReplaceOrInsert(newPair(key, value)) +} + +// SetSync implements DB. +func (db *MemDB) SetSync(key []byte, value []byte) error { + return db.Set(key, value) +} + +// Delete implements DB. +func (db *MemDB) Delete(key []byte) error { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.delete(key) + return nil +} + +// delete deletes a key without locking the mutex. +func (db *MemDB) delete(key []byte) { + db.btree.Delete(newKey(key)) +} + +// DeleteSync implements DB. +func (db *MemDB) DeleteSync(key []byte) error { + return db.Delete(key) +} + +// Close implements DB. +func (db *MemDB) Close() error { + // Close is a noop since for an in-memory database, we don't have a destination to flush + // contents to nor do we want any data loss on invoking Close(). + // See the discussion in https://github.com/tendermint/tendermint/libs/pull/56 + return nil +} + +// Print implements DB. +func (db *MemDB) Print() error { + db.mtx.RLock() + defer db.mtx.RUnlock() + + db.btree.Ascend(func(i btree.Item) bool { + item := i.(*item) + fmt.Printf("[%X]:\t[%X]\n", item.key, item.value) + return true + }) + return nil +} + +// Stats implements DB. +func (db *MemDB) Stats() map[string]string { + db.mtx.RLock() + defer db.mtx.RUnlock() + + stats := make(map[string]string) + stats["database.type"] = "memDB" + stats["database.size"] = fmt.Sprintf("%d", db.btree.Len()) + return stats +} + +// NewBatch implements DB. +func (db *MemDB) NewBatch() Batch { + return &memDBBatch{db, nil} +} + +// Iterator implements DB. +func (db *MemDB) Iterator(start, end []byte) (Iterator, error) { + db.mtx.RLock() + defer db.mtx.RUnlock() + return newMemDBIterator(db.btree, start, end, false), nil +} + +// ReverseIterator implements DB. +func (db *MemDB) ReverseIterator(start, end []byte) (Iterator, error) { + db.mtx.RLock() + defer db.mtx.RUnlock() + return newMemDBIterator(db.btree, start, end, true), nil +} diff --git a/memdb_batch.go b/memdb_batch.go new file mode 100644 index 000000000..4bd1f1d0c --- /dev/null +++ b/memdb_batch.go @@ -0,0 +1,63 @@ +package db + +import "github.com/pkg/errors" + +// memDBBatch operations +type opType int + +const ( + opTypeSet opType = iota + 1 + opTypeDelete +) + +type operation struct { + opType + key []byte + value []byte +} + +// memDBBatch handles in-memory batching. +type memDBBatch struct { + db *MemDB + ops []operation +} + +var _ Batch = (*memDBBatch)(nil) + +// Set implements Batch. +func (b *memDBBatch) Set(key, value []byte) { + b.ops = append(b.ops, operation{opTypeSet, key, value}) +} + +// Delete implements Batch. +func (b *memDBBatch) Delete(key []byte) { + b.ops = append(b.ops, operation{opTypeDelete, key, nil}) +} + +// Write implements Batch. +func (b *memDBBatch) Write() error { + b.db.mtx.Lock() + defer b.db.mtx.Unlock() + + for _, op := range b.ops { + switch op.opType { + case opTypeSet: + b.db.set(op.key, op.value) + case opTypeDelete: + b.db.delete(op.key) + default: + return errors.Errorf("unknown operation type %v (%v)", op.opType, op) + } + } + return nil +} + +// WriteSync implements Batch. +func (b *memDBBatch) WriteSync() error { + return b.Write() +} + +// Close implements Batch. +func (b *memDBBatch) Close() { + b.ops = nil +} diff --git a/memdb_iterator.go b/memdb_iterator.go new file mode 100644 index 000000000..b5e12abd8 --- /dev/null +++ b/memdb_iterator.go @@ -0,0 +1,143 @@ +package db + +import ( + "bytes" + "context" + + "github.com/google/btree" +) + +const ( + // Size of the channel buffer between traversal goroutine and iterator. Using an unbuffered + // channel causes two context switches per item sent, while buffering allows more work per + // context switch. Tuned with benchmarks. + chBufferSize = 64 +) + +// memDBIterator is a memDB iterator. +type memDBIterator struct { + ch <-chan *item + cancel context.CancelFunc + item *item + start []byte + end []byte +} + +var _ Iterator = (*memDBIterator)(nil) + +// newMemDBIterator creates a new memDBIterator. +func newMemDBIterator(bt *btree.BTree, start []byte, end []byte, reverse bool) *memDBIterator { + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan *item, chBufferSize) + iter := &memDBIterator{ + ch: ch, + cancel: cancel, + start: start, + end: end, + } + + go func() { + // Because we use [start, end) for reverse ranges, while btree uses (start, end], we need + // the following variables to handle some reverse iteration conditions ourselves. + var ( + skipEqual []byte + abortLessThan []byte + ) + visitor := func(i btree.Item) bool { + item := i.(*item) + if skipEqual != nil && bytes.Equal(item.key, skipEqual) { + skipEqual = nil + return true + } + if abortLessThan != nil && bytes.Compare(item.key, abortLessThan) == -1 { + return false + } + select { + case <-ctx.Done(): + return false + case ch <- item: + return true + } + } + switch { + case start == nil && end == nil && !reverse: + bt.Ascend(visitor) + case start == nil && end == nil && reverse: + bt.Descend(visitor) + case end == nil && !reverse: + // must handle this specially, since nil is considered less than anything else + bt.AscendGreaterOrEqual(newKey(start), visitor) + case !reverse: + bt.AscendRange(newKey(start), newKey(end), visitor) + case end == nil: + // abort after start, since we use [start, end) while btree uses (start, end] + abortLessThan = start + bt.Descend(visitor) + default: + // skip end and abort after start, since we use [start, end) while btree uses (start, end] + skipEqual = end + abortLessThan = start + bt.DescendLessOrEqual(newKey(end), visitor) + } + close(ch) + }() + + // prime the iterator with the first value, if any + if item, ok := <-ch; ok { + iter.item = item + } + + return iter +} + +// Close implements Iterator. +func (i *memDBIterator) Close() { + i.cancel() + for range i.ch { // drain channel + } + i.item = nil +} + +// Domain implements Iterator. +func (i *memDBIterator) Domain() ([]byte, []byte) { + return i.start, i.end +} + +// Valid implements Iterator. +func (i *memDBIterator) Valid() bool { + return i.item != nil +} + +// Next implements Iterator. +func (i *memDBIterator) Next() { + item, ok := <-i.ch + switch { + case ok: + i.item = item + case i.item == nil: + panic("called Next() on invalid iterator") + default: + i.item = nil + } +} + +// Error implements Iterator. +func (i *memDBIterator) Error() error { + return nil // famous last words +} + +// Key implements Iterator. +func (i *memDBIterator) Key() []byte { + if i.item == nil { + panic("called Key() on invalid iterator") + } + return i.item.key +} + +// Value implements Iterator. +func (i *memDBIterator) Value() []byte { + if i.item == nil { + panic("called Value() on invalid iterator") + } + return i.item.value +} diff --git a/memdb_test.go b/memdb_test.go new file mode 100644 index 000000000..4e67e813d --- /dev/null +++ b/memdb_test.go @@ -0,0 +1,26 @@ +package db + +import ( + "testing" +) + +func BenchmarkMemDBRangeScans1M(b *testing.B) { + db := NewMemDB() + defer db.Close() + + benchmarkRangeScans(b, db, int64(1e6)) +} + +func BenchmarkMemDBRangeScans10M(b *testing.B) { + db := NewMemDB() + defer db.Close() + + benchmarkRangeScans(b, db, int64(10e6)) +} + +func BenchmarkMemDBRandomReadsWrites(b *testing.B) { + db := NewMemDB() + defer db.Close() + + benchmarkRandomReadsWrites(b, db) +} diff --git a/prefix_db.go b/prefix_db.go index b5fe41ac3..fd6e5800e 100644 --- a/prefix_db.go +++ b/prefix_db.go @@ -167,28 +167,6 @@ func (pdb *PrefixDB) NewBatch() Batch { return newPrefixBatch(pdb.prefix, pdb.db.NewBatch()) } -/* NOTE: Uncomment to use memBatch instead of prefixBatch -// Implements atomicSetDeleter. -func (pdb *PrefixDB) SetNoLock(key []byte, value []byte) { - pdb.db.(atomicSetDeleter).SetNoLock(pdb.prefixed(key), value) -} - -// Implements atomicSetDeleter. -func (pdb *PrefixDB) SetNoLockSync(key []byte, value []byte) { - pdb.db.(atomicSetDeleter).SetNoLockSync(pdb.prefixed(key), value) -} - -// Implements atomicSetDeleter. -func (pdb *PrefixDB) DeleteNoLock(key []byte) { - pdb.db.(atomicSetDeleter).DeleteNoLock(pdb.prefixed(key)) -} - -// Implements atomicSetDeleter. -func (pdb *PrefixDB) DeleteNoLockSync(key []byte) { - pdb.db.(atomicSetDeleter).DeleteNoLockSync(pdb.prefixed(key)) -} -*/ - // Implements DB. func (pdb *PrefixDB) Close() error { pdb.mtx.Lock() From bbb58391f75affe67a42f3f1811c9eec354f6332 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Mon, 9 Mar 2020 16:22:42 +0100 Subject: [PATCH 06/30] goleveldb: make Batch.Close() actually remove batch contents (#58) --- CHANGELOG.md | 4 ++++ backend_test.go | 5 ++--- go_level_db.go | 5 +++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bd232af25..6a0cde3a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,10 @@ - [memdb] [\#56](https://github.com/tendermint/tm-db/pull/56) Use an RWMutex for improved performance with highly concurrent read-heavy workloads +### Bug Fixes + +- [goleveldb] [\#58](https://github.com/tendermint/tm-db/pull/58) Make `Batch.Close()` actually remove the batch contents + ## 0.4.1 **2020-2-26** diff --git a/backend_test.go b/backend_test.go index 3d92d9361..5e62c2eaa 100644 --- a/backend_test.go +++ b/backend_test.go @@ -453,9 +453,8 @@ func testDBBatch(t *testing.T, backend BackendType) { batch.Close() err = db.Delete([]byte("c")) require.NoError(t, err) - // FIXME Disabled because goleveldb is failing this test currently - //err = batch.Write() - //require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) // it should be possible to re-close the batch diff --git a/go_level_db.go b/go_level_db.go index 0a162c084..126096f96 100644 --- a/go_level_db.go +++ b/go_level_db.go @@ -196,8 +196,9 @@ func (mBatch *goLevelDBBatch) WriteSync() error { } // Implements Batch. -// Close is no-op for goLevelDBBatch. -func (mBatch *goLevelDBBatch) Close() {} +func (mBatch *goLevelDBBatch) Close() { + mBatch.batch.Reset() +} //---------------------------------------- // Iterator From ec6088889ba00fae556a214fc214d7c33c0066a9 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Mon, 9 Mar 2020 18:02:06 +0100 Subject: [PATCH 07/30] Split out backend files for batch and iterator (#63) --- boltdb.go | 185 +----------- boltdb_batch.go | 52 ++++ boltdb_iterator.go | 137 +++++++++ c_level_db.go | 339 ---------------------- cleveldb.go | 176 +++++++++++ cleveldb_batch.go | 42 +++ cleveldb_iterator.go | 140 +++++++++ c_level_db_test.go => cleveldb_test.go | 12 - go_level_db.go | 353 ----------------------- goleveldb.go | 170 +++++++++++ goleveldb_batch.go | 46 +++ goleveldb_iterator.go | 143 +++++++++ go_level_db_test.go => goleveldb_test.go | 0 prefix_db.go | 342 ---------------------- prefixdb.go | 174 +++++++++++ prefixdb_batch.go | 42 +++ prefixdb_iterator.go | 127 ++++++++ prefix_db_test.go => prefixdb_test.go | 0 remotedb/batch.go | 54 ++++ remotedb/iterator.go | 131 +++++++++ remotedb/remotedb.go | 152 ---------- rocks_db.go => rocksdb.go | 209 +------------- rocksdb_batch.go | 45 +++ rocksdb_iterator.go | 151 ++++++++++ rocks_db_test.go => rocksdb_test.go | 0 25 files changed, 1658 insertions(+), 1564 deletions(-) create mode 100644 boltdb_batch.go create mode 100644 boltdb_iterator.go delete mode 100644 c_level_db.go create mode 100644 cleveldb.go create mode 100644 cleveldb_batch.go create mode 100644 cleveldb_iterator.go rename c_level_db_test.go => cleveldb_test.go (90%) delete mode 100644 go_level_db.go create mode 100644 goleveldb.go create mode 100644 goleveldb_batch.go create mode 100644 goleveldb_iterator.go rename go_level_db_test.go => goleveldb_test.go (100%) delete mode 100644 prefix_db.go create mode 100644 prefixdb.go create mode 100644 prefixdb_batch.go create mode 100644 prefixdb_iterator.go rename prefix_db_test.go => prefixdb_test.go (100%) create mode 100644 remotedb/batch.go create mode 100644 remotedb/iterator.go rename rocks_db.go => rocksdb.go (50%) create mode 100644 rocksdb_batch.go create mode 100644 rocksdb_iterator.go rename rocks_db_test.go => rocksdb_test.go (100%) diff --git a/boltdb.go b/boltdb.go index 0c28bf291..c1a430082 100644 --- a/boltdb.go +++ b/boltdb.go @@ -3,7 +3,6 @@ package db import ( - "bytes" "fmt" "os" "path/filepath" @@ -20,8 +19,7 @@ func init() { }, false) } -// BoltDB is a wrapper around etcd's fork of bolt -// (https://github.com/etcd-io/bbolt). +// BoltDB is a wrapper around etcd's fork of bolt (https://github.com/etcd-io/bbolt). // // NOTE: All operations (including Set, Delete) are synchronous by default. One // can globally turn it off by using NoSync config option (not recommended). @@ -32,6 +30,8 @@ type BoltDB struct { db *bbolt.DB } +var _ DB = (*BoltDB)(nil) + // NewBoltDB returns a BoltDB with default options. func NewBoltDB(name, dir string) (DB, error) { return NewBoltDBWithOpts(name, dir, bbolt.DefaultOptions) @@ -62,6 +62,7 @@ func NewBoltDBWithOpts(name string, dir string, opts *bbolt.Options) (DB, error) return &BoltDB{db: db}, nil } +// Get implements DB. func (bdb *BoltDB) Get(key []byte) (value []byte, err error) { key = nonEmptyKey(nonNilBytes(key)) err = bdb.db.View(func(tx *bbolt.Tx) error { @@ -77,6 +78,7 @@ func (bdb *BoltDB) Get(key []byte) (value []byte, err error) { return } +// Has implements DB. func (bdb *BoltDB) Has(key []byte) (bool, error) { bytes, err := bdb.Get(key) if err != nil { @@ -85,6 +87,7 @@ func (bdb *BoltDB) Has(key []byte) (bool, error) { return bytes != nil, nil } +// Set implements DB. func (bdb *BoltDB) Set(key, value []byte) error { key = nonEmptyKey(nonNilBytes(key)) value = nonNilBytes(value) @@ -98,10 +101,12 @@ func (bdb *BoltDB) Set(key, value []byte) error { return nil } +// SetSync implements DB. func (bdb *BoltDB) SetSync(key, value []byte) error { return bdb.Set(key, value) } +// Delete implements DB. func (bdb *BoltDB) Delete(key []byte) error { key = nonEmptyKey(nonNilBytes(key)) err := bdb.db.Update(func(tx *bbolt.Tx) error { @@ -113,14 +118,17 @@ func (bdb *BoltDB) Delete(key []byte) error { return nil } +// DeleteSync implements DB. func (bdb *BoltDB) DeleteSync(key []byte) error { return bdb.Delete(key) } +// Close implements DB. func (bdb *BoltDB) Close() error { return bdb.db.Close() } +// Print implements DB. func (bdb *BoltDB) Print() error { stats := bdb.db.Stats() fmt.Printf("%v\n", stats) @@ -138,6 +146,7 @@ func (bdb *BoltDB) Print() error { return nil } +// Stats implements DB. func (bdb *BoltDB) Stats() map[string]string { stats := bdb.db.Stats() m := make(map[string]string) @@ -155,14 +164,7 @@ func (bdb *BoltDB) Stats() map[string]string { return m } -// boltDBBatch stores key values in sync.Map and dumps them to the underlying -// DB upon Write call. -type boltDBBatch struct { - db *BoltDB - ops []operation -} - -// NewBatch returns a new batch. +// NewBatch implements DB. func (bdb *BoltDB) NewBatch() Batch { return &boltDBBatch{ ops: nil, @@ -170,49 +172,6 @@ func (bdb *BoltDB) NewBatch() Batch { } } -// It is safe to modify the contents of the argument after Set returns but not -// before. -func (bdb *boltDBBatch) Set(key, value []byte) { - bdb.ops = append(bdb.ops, operation{opTypeSet, key, value}) -} - -// It is safe to modify the contents of the argument after Delete returns but -// not before. -func (bdb *boltDBBatch) Delete(key []byte) { - bdb.ops = append(bdb.ops, operation{opTypeDelete, key, nil}) -} - -// NOTE: the operation is synchronous (see BoltDB for reasons) -func (bdb *boltDBBatch) Write() error { - err := bdb.db.db.Batch(func(tx *bbolt.Tx) error { - b := tx.Bucket(bucket) - for _, op := range bdb.ops { - key := nonEmptyKey(nonNilBytes(op.key)) - switch op.opType { - case opTypeSet: - if putErr := b.Put(key, op.value); putErr != nil { - return putErr - } - case opTypeDelete: - if delErr := b.Delete(key); delErr != nil { - return delErr - } - } - } - return nil - }) - if err != nil { - return err - } - return nil -} - -func (bdb *boltDBBatch) WriteSync() error { - return bdb.Write() -} - -func (bdb *boltDBBatch) Close() {} - // WARNING: Any concurrent writes or reads will block until the iterator is // closed. func (bdb *BoltDB) Iterator(start, end []byte) (Iterator, error) { @@ -233,124 +192,6 @@ func (bdb *BoltDB) ReverseIterator(start, end []byte) (Iterator, error) { return newBoltDBIterator(tx, start, end, true), nil } -// boltDBIterator allows you to iterate on range of keys/values given some -// start / end keys (nil & nil will result in doing full scan). -type boltDBIterator struct { - tx *bbolt.Tx - - itr *bbolt.Cursor - start []byte - end []byte - - currentKey []byte - currentValue []byte - - isInvalid bool - isReverse bool -} - -func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator { - itr := tx.Bucket(bucket).Cursor() - - var ck, cv []byte - if isReverse { - if end == nil { - ck, cv = itr.Last() - } else { - _, _ = itr.Seek(end) // after key - ck, cv = itr.Prev() // return to end key - } - } else { - if start == nil { - ck, cv = itr.First() - } else { - ck, cv = itr.Seek(start) - } - } - - return &boltDBIterator{ - tx: tx, - itr: itr, - start: start, - end: end, - currentKey: ck, - currentValue: cv, - isReverse: isReverse, - isInvalid: false, - } -} - -func (itr *boltDBIterator) Domain() ([]byte, []byte) { - return itr.start, itr.end -} - -func (itr *boltDBIterator) Valid() bool { - if itr.isInvalid { - return false - } - - // iterated to the end of the cursor - if len(itr.currentKey) == 0 { - itr.isInvalid = true - return false - } - - if itr.isReverse { - if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 { - itr.isInvalid = true - return false - } - } else { - if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 { - itr.isInvalid = true - return false - } - } - - // Valid - return true -} - -func (itr *boltDBIterator) Next() { - itr.assertIsValid() - if itr.isReverse { - itr.currentKey, itr.currentValue = itr.itr.Prev() - } else { - itr.currentKey, itr.currentValue = itr.itr.Next() - } -} - -func (itr *boltDBIterator) Key() []byte { - itr.assertIsValid() - return append([]byte{}, itr.currentKey...) -} - -func (itr *boltDBIterator) Value() []byte { - itr.assertIsValid() - var value []byte - if itr.currentValue != nil { - value = append([]byte{}, itr.currentValue...) - } - return value -} - -func (itr *boltDBIterator) Error() error { - return nil -} - -func (itr *boltDBIterator) Close() { - err := itr.tx.Rollback() - if err != nil { - panic(err) - } -} - -func (itr *boltDBIterator) assertIsValid() { - if !itr.Valid() { - panic("boltdb-iterator is invalid") - } -} - // nonEmptyKey returns a []byte("nil") if key is empty. // WARNING: this may collude with "nil" user key! func nonEmptyKey(key []byte) []byte { diff --git a/boltdb_batch.go b/boltdb_batch.go new file mode 100644 index 000000000..a5996fe38 --- /dev/null +++ b/boltdb_batch.go @@ -0,0 +1,52 @@ +// +build boltdb + +package db + +import "github.com/etcd-io/bbolt" + +// boltDBBatch stores operations internally and dumps them to BoltDB on Write(). +type boltDBBatch struct { + db *BoltDB + ops []operation +} + +var _ Batch = (*boltDBBatch)(nil) + +// Set implements Batch. +func (b *boltDBBatch) Set(key, value []byte) { + b.ops = append(b.ops, operation{opTypeSet, key, value}) +} + +// Delete implements Batch. +func (b *boltDBBatch) Delete(key []byte) { + b.ops = append(b.ops, operation{opTypeDelete, key, nil}) +} + +// Write implements Batch. +func (b *boltDBBatch) Write() error { + return b.db.db.Batch(func(tx *bbolt.Tx) error { + bkt := tx.Bucket(bucket) + for _, op := range b.ops { + key := nonEmptyKey(nonNilBytes(op.key)) + switch op.opType { + case opTypeSet: + if err := bkt.Put(key, op.value); err != nil { + return err + } + case opTypeDelete: + if err := bkt.Delete(key); err != nil { + return err + } + } + } + return nil + }) +} + +// WriteSync implements Batch. +func (b *boltDBBatch) WriteSync() error { + return b.Write() +} + +// Close implements Batch. +func (b *boltDBBatch) Close() {} diff --git a/boltdb_iterator.go b/boltdb_iterator.go new file mode 100644 index 000000000..4f56a0da0 --- /dev/null +++ b/boltdb_iterator.go @@ -0,0 +1,137 @@ +// +build boltdb + +package db + +import ( + "bytes" + + "github.com/etcd-io/bbolt" +) + +// boltDBIterator allows you to iterate on range of keys/values given some +// start / end keys (nil & nil will result in doing full scan). +type boltDBIterator struct { + tx *bbolt.Tx + + itr *bbolt.Cursor + start []byte + end []byte + + currentKey []byte + currentValue []byte + + isInvalid bool + isReverse bool +} + +var _ Iterator = (*boltDBIterator)(nil) + +// newBoltDBIterator creates a new boltDBIterator. +func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator { + itr := tx.Bucket(bucket).Cursor() + + var ck, cv []byte + if isReverse { + if end == nil { + ck, cv = itr.Last() + } else { + _, _ = itr.Seek(end) // after key + ck, cv = itr.Prev() // return to end key + } + } else { + if start == nil { + ck, cv = itr.First() + } else { + ck, cv = itr.Seek(start) + } + } + + return &boltDBIterator{ + tx: tx, + itr: itr, + start: start, + end: end, + currentKey: ck, + currentValue: cv, + isReverse: isReverse, + isInvalid: false, + } +} + +// Domain implements Iterator. +func (itr *boltDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +// Valid implements Iterator. +func (itr *boltDBIterator) Valid() bool { + if itr.isInvalid { + return false + } + + // iterated to the end of the cursor + if len(itr.currentKey) == 0 { + itr.isInvalid = true + return false + } + + if itr.isReverse { + if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 { + itr.isInvalid = true + return false + } + } else { + if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 { + itr.isInvalid = true + return false + } + } + + // Valid + return true +} + +// Next implements Iterator. +func (itr *boltDBIterator) Next() { + itr.assertIsValid() + if itr.isReverse { + itr.currentKey, itr.currentValue = itr.itr.Prev() + } else { + itr.currentKey, itr.currentValue = itr.itr.Next() + } +} + +// Key implements Iterator. +func (itr *boltDBIterator) Key() []byte { + itr.assertIsValid() + return append([]byte{}, itr.currentKey...) +} + +// Value implements Iterator. +func (itr *boltDBIterator) Value() []byte { + itr.assertIsValid() + var value []byte + if itr.currentValue != nil { + value = append([]byte{}, itr.currentValue...) + } + return value +} + +// Error implements Iterator. +func (itr *boltDBIterator) Error() error { + return nil +} + +// Close implements Iterator. +func (itr *boltDBIterator) Close() { + err := itr.tx.Rollback() + if err != nil { + panic(err) + } +} + +func (itr *boltDBIterator) assertIsValid() { + if !itr.Valid() { + panic("boltdb-iterator is invalid") + } +} diff --git a/c_level_db.go b/c_level_db.go deleted file mode 100644 index ca558fb9e..000000000 --- a/c_level_db.go +++ /dev/null @@ -1,339 +0,0 @@ -// +build cleveldb - -package db - -import ( - "bytes" - "fmt" - "path/filepath" - - "github.com/jmhodges/levigo" -) - -func init() { - dbCreator := func(name string, dir string) (DB, error) { - return NewCLevelDB(name, dir) - } - registerDBCreator(CLevelDBBackend, dbCreator, false) -} - -var _ DB = (*CLevelDB)(nil) - -type CLevelDB struct { - db *levigo.DB - ro *levigo.ReadOptions - wo *levigo.WriteOptions - woSync *levigo.WriteOptions -} - -func NewCLevelDB(name string, dir string) (*CLevelDB, error) { - dbPath := filepath.Join(dir, name+".db") - - opts := levigo.NewOptions() - opts.SetCache(levigo.NewLRUCache(1 << 30)) - opts.SetCreateIfMissing(true) - db, err := levigo.Open(dbPath, opts) - if err != nil { - return nil, err - } - ro := levigo.NewReadOptions() - wo := levigo.NewWriteOptions() - woSync := levigo.NewWriteOptions() - woSync.SetSync(true) - database := &CLevelDB{ - db: db, - ro: ro, - wo: wo, - woSync: woSync, - } - return database, nil -} - -// Implements DB. -func (db *CLevelDB) Get(key []byte) ([]byte, error) { - key = nonNilBytes(key) - res, err := db.db.Get(db.ro, key) - if err != nil { - return nil, err - } - return res, nil -} - -// Implements DB. -func (db *CLevelDB) Has(key []byte) (bool, error) { - bytes, err := db.Get(key) - if err != nil { - return false, err - } - return bytes != nil, nil -} - -// Implements DB. -func (db *CLevelDB) Set(key []byte, value []byte) error { - key = nonNilBytes(key) - value = nonNilBytes(value) - if err := db.db.Put(db.wo, key, value); err != nil { - return err - } - return nil -} - -// Implements DB. -func (db *CLevelDB) SetSync(key []byte, value []byte) error { - key = nonNilBytes(key) - value = nonNilBytes(value) - if err := db.db.Put(db.woSync, key, value); err != nil { - return err - } - return nil -} - -// Implements DB. -func (db *CLevelDB) Delete(key []byte) error { - key = nonNilBytes(key) - if err := db.db.Delete(db.wo, key); err != nil { - return err - } - return nil -} - -// Implements DB. -func (db *CLevelDB) DeleteSync(key []byte) error { - key = nonNilBytes(key) - if err := db.db.Delete(db.woSync, key); err != nil { - return err - } - return nil -} - -func (db *CLevelDB) DB() *levigo.DB { - return db.db -} - -// Implements DB. -func (db *CLevelDB) Close() error { - db.db.Close() - db.ro.Close() - db.wo.Close() - db.woSync.Close() - return nil -} - -// Implements DB. -func (db *CLevelDB) Print() error { - itr, err := db.Iterator(nil, nil) - if err != nil { - return err - } - defer itr.Close() - for ; itr.Valid(); itr.Next() { - key := itr.Key() - value := itr.Value() - fmt.Printf("[%X]:\t[%X]\n", key, value) - } - return nil -} - -// Implements DB. -func (db *CLevelDB) Stats() map[string]string { - keys := []string{ - "leveldb.aliveiters", - "leveldb.alivesnaps", - "leveldb.blockpool", - "leveldb.cachedblock", - "leveldb.num-files-at-level{n}", - "leveldb.openedtables", - "leveldb.sstables", - "leveldb.stats", - } - - stats := make(map[string]string, len(keys)) - for _, key := range keys { - str := db.db.PropertyValue(key) - stats[key] = str - } - return stats -} - -//---------------------------------------- -// Batch - -// Implements DB. -func (db *CLevelDB) NewBatch() Batch { - batch := levigo.NewWriteBatch() - return &cLevelDBBatch{db, batch} -} - -type cLevelDBBatch struct { - db *CLevelDB - batch *levigo.WriteBatch -} - -// Implements Batch. -func (mBatch *cLevelDBBatch) Set(key, value []byte) { - mBatch.batch.Put(key, value) -} - -// Implements Batch. -func (mBatch *cLevelDBBatch) Delete(key []byte) { - mBatch.batch.Delete(key) -} - -// Implements Batch. -func (mBatch *cLevelDBBatch) Write() error { - if err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch); err != nil { - return err - } - return nil -} - -// Implements Batch. -func (mBatch *cLevelDBBatch) WriteSync() error { - if err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch); err != nil { - return err - } - return nil -} - -// Implements Batch. -func (mBatch *cLevelDBBatch) Close() { - mBatch.batch.Close() -} - -//---------------------------------------- -// Iterator -// NOTE This is almost identical to db/go_level_db.Iterator -// Before creating a third version, refactor. - -func (db *CLevelDB) Iterator(start, end []byte) (Iterator, error) { - itr := db.db.NewIterator(db.ro) - return newCLevelDBIterator(itr, start, end, false), nil -} - -func (db *CLevelDB) ReverseIterator(start, end []byte) (Iterator, error) { - itr := db.db.NewIterator(db.ro) - return newCLevelDBIterator(itr, start, end, true), nil -} - -var _ Iterator = (*cLevelDBIterator)(nil) - -type cLevelDBIterator struct { - source *levigo.Iterator - start, end []byte - isReverse bool - isInvalid bool -} - -func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { - if isReverse { - if end == nil { - source.SeekToLast() - } else { - source.Seek(end) - if source.Valid() { - eoakey := source.Key() // end or after key - if bytes.Compare(end, eoakey) <= 0 { - source.Prev() - } - } else { - source.SeekToLast() - } - } - } else { - if start == nil { - source.SeekToFirst() - } else { - source.Seek(start) - } - } - return &cLevelDBIterator{ - source: source, - start: start, - end: end, - isReverse: isReverse, - isInvalid: false, - } -} - -func (itr cLevelDBIterator) Domain() ([]byte, []byte) { - return itr.start, itr.end -} - -func (itr cLevelDBIterator) Valid() bool { - - // Once invalid, forever invalid. - if itr.isInvalid { - return false - } - - // Panic on DB error. No way to recover. - itr.assertNoError() - - // If source is invalid, invalid. - if !itr.source.Valid() { - itr.isInvalid = true - return false - } - - // If key is end or past it, invalid. - var start = itr.start - var end = itr.end - var key = itr.source.Key() - if itr.isReverse { - if start != nil && bytes.Compare(key, start) < 0 { - itr.isInvalid = true - return false - } - } else { - if end != nil && bytes.Compare(end, key) <= 0 { - itr.isInvalid = true - return false - } - } - - // It's valid. - return true -} - -func (itr cLevelDBIterator) Key() []byte { - itr.assertNoError() - itr.assertIsValid() - return itr.source.Key() -} - -func (itr cLevelDBIterator) Value() []byte { - itr.assertNoError() - itr.assertIsValid() - return itr.source.Value() -} - -func (itr cLevelDBIterator) Next() { - itr.assertNoError() - itr.assertIsValid() - if itr.isReverse { - itr.source.Prev() - } else { - itr.source.Next() - } -} - -func (itr cLevelDBIterator) Error() error { - return itr.source.GetError() -} - -func (itr cLevelDBIterator) Close() { - itr.source.Close() -} - -func (itr cLevelDBIterator) assertNoError() { - err := itr.source.GetError() - if err != nil { - panic(err) - } -} - -func (itr cLevelDBIterator) assertIsValid() { - if !itr.Valid() { - panic("cLevelDBIterator is invalid") - } -} diff --git a/cleveldb.go b/cleveldb.go new file mode 100644 index 000000000..066126c1f --- /dev/null +++ b/cleveldb.go @@ -0,0 +1,176 @@ +// +build cleveldb + +package db + +import ( + "fmt" + "path/filepath" + + "github.com/jmhodges/levigo" +) + +func init() { + dbCreator := func(name string, dir string) (DB, error) { + return NewCLevelDB(name, dir) + } + registerDBCreator(CLevelDBBackend, dbCreator, false) +} + +// CLevelDB uses the C LevelDB database via a Go wrapper. +type CLevelDB struct { + db *levigo.DB + ro *levigo.ReadOptions + wo *levigo.WriteOptions + woSync *levigo.WriteOptions +} + +var _ DB = (*CLevelDB)(nil) + +// NewCLevelDB creates a new CLevelDB. +func NewCLevelDB(name string, dir string) (*CLevelDB, error) { + dbPath := filepath.Join(dir, name+".db") + + opts := levigo.NewOptions() + opts.SetCache(levigo.NewLRUCache(1 << 30)) + opts.SetCreateIfMissing(true) + db, err := levigo.Open(dbPath, opts) + if err != nil { + return nil, err + } + ro := levigo.NewReadOptions() + wo := levigo.NewWriteOptions() + woSync := levigo.NewWriteOptions() + woSync.SetSync(true) + database := &CLevelDB{ + db: db, + ro: ro, + wo: wo, + woSync: woSync, + } + return database, nil +} + +// Get implements DB. +func (db *CLevelDB) Get(key []byte) ([]byte, error) { + key = nonNilBytes(key) + res, err := db.db.Get(db.ro, key) + if err != nil { + return nil, err + } + return res, nil +} + +// Has implements DB. +func (db *CLevelDB) Has(key []byte) (bool, error) { + bytes, err := db.Get(key) + if err != nil { + return false, err + } + return bytes != nil, nil +} + +// Set implements DB. +func (db *CLevelDB) Set(key []byte, value []byte) error { + key = nonNilBytes(key) + value = nonNilBytes(value) + if err := db.db.Put(db.wo, key, value); err != nil { + return err + } + return nil +} + +// SetSync implements DB. +func (db *CLevelDB) SetSync(key []byte, value []byte) error { + key = nonNilBytes(key) + value = nonNilBytes(value) + if err := db.db.Put(db.woSync, key, value); err != nil { + return err + } + return nil +} + +// Delete implements DB. +func (db *CLevelDB) Delete(key []byte) error { + key = nonNilBytes(key) + if err := db.db.Delete(db.wo, key); err != nil { + return err + } + return nil +} + +// DeleteSync implements DB. +func (db *CLevelDB) DeleteSync(key []byte) error { + key = nonNilBytes(key) + if err := db.db.Delete(db.woSync, key); err != nil { + return err + } + return nil +} + +// FIXME This should not be exposed +func (db *CLevelDB) DB() *levigo.DB { + return db.db +} + +// Close implements DB. +func (db *CLevelDB) Close() error { + db.db.Close() + db.ro.Close() + db.wo.Close() + db.woSync.Close() + return nil +} + +// Print implements DB. +func (db *CLevelDB) Print() error { + itr, err := db.Iterator(nil, nil) + if err != nil { + return err + } + defer itr.Close() + for ; itr.Valid(); itr.Next() { + key := itr.Key() + value := itr.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } + return nil +} + +// Stats implements DB. +func (db *CLevelDB) Stats() map[string]string { + keys := []string{ + "leveldb.aliveiters", + "leveldb.alivesnaps", + "leveldb.blockpool", + "leveldb.cachedblock", + "leveldb.num-files-at-level{n}", + "leveldb.openedtables", + "leveldb.sstables", + "leveldb.stats", + } + + stats := make(map[string]string, len(keys)) + for _, key := range keys { + str := db.db.PropertyValue(key) + stats[key] = str + } + return stats +} + +// NewBatch implements DB. +func (db *CLevelDB) NewBatch() Batch { + batch := levigo.NewWriteBatch() + return &cLevelDBBatch{db, batch} +} + +// Iterator implements DB. +func (db *CLevelDB) Iterator(start, end []byte) (Iterator, error) { + itr := db.db.NewIterator(db.ro) + return newCLevelDBIterator(itr, start, end, false), nil +} + +// ReverseIterator implements DB. +func (db *CLevelDB) ReverseIterator(start, end []byte) (Iterator, error) { + itr := db.db.NewIterator(db.ro) + return newCLevelDBIterator(itr, start, end, true), nil +} diff --git a/cleveldb_batch.go b/cleveldb_batch.go new file mode 100644 index 000000000..c98fe3354 --- /dev/null +++ b/cleveldb_batch.go @@ -0,0 +1,42 @@ +// +build cleveldb + +package db + +import "github.com/jmhodges/levigo" + +// cLevelDBBatch is a LevelDB batch. +type cLevelDBBatch struct { + db *CLevelDB + batch *levigo.WriteBatch +} + +// Set implements Batch. +func (b *cLevelDBBatch) Set(key, value []byte) { + b.batch.Put(key, value) +} + +// Delete implements Batch. +func (b *cLevelDBBatch) Delete(key []byte) { + b.batch.Delete(key) +} + +// Write implements Batch. +func (b *cLevelDBBatch) Write() error { + if err := b.db.db.Write(b.db.wo, b.batch); err != nil { + return err + } + return nil +} + +// WriteSync implements Batch. +func (b *cLevelDBBatch) WriteSync() error { + if err := b.db.db.Write(b.db.woSync, b.batch); err != nil { + return err + } + return nil +} + +// Close implements Batch. +func (b *cLevelDBBatch) Close() { + b.batch.Close() +} diff --git a/cleveldb_iterator.go b/cleveldb_iterator.go new file mode 100644 index 000000000..951fd0407 --- /dev/null +++ b/cleveldb_iterator.go @@ -0,0 +1,140 @@ +// +build cleveldb + +package db + +import ( + "bytes" + + "github.com/jmhodges/levigo" +) + +// cLevelDBIterator is a cLevelDB iterator. +type cLevelDBIterator struct { + source *levigo.Iterator + start, end []byte + isReverse bool + isInvalid bool +} + +var _ Iterator = (*cLevelDBIterator)(nil) + +func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { + if isReverse { + if end == nil { + source.SeekToLast() + } else { + source.Seek(end) + if source.Valid() { + eoakey := source.Key() // end or after key + if bytes.Compare(end, eoakey) <= 0 { + source.Prev() + } + } else { + source.SeekToLast() + } + } + } else { + if start == nil { + source.SeekToFirst() + } else { + source.Seek(start) + } + } + return &cLevelDBIterator{ + source: source, + start: start, + end: end, + isReverse: isReverse, + isInvalid: false, + } +} + +// Domain implements Iterator. +func (itr cLevelDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +// Valid implements Iterator. +func (itr cLevelDBIterator) Valid() bool { + + // Once invalid, forever invalid. + if itr.isInvalid { + return false + } + + // Panic on DB error. No way to recover. + itr.assertNoError() + + // If source is invalid, invalid. + if !itr.source.Valid() { + itr.isInvalid = true + return false + } + + // If key is end or past it, invalid. + var start = itr.start + var end = itr.end + var key = itr.source.Key() + if itr.isReverse { + if start != nil && bytes.Compare(key, start) < 0 { + itr.isInvalid = true + return false + } + } else { + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true + return false + } + } + + // It's valid. + return true +} + +// Key implements Iterator. +func (itr cLevelDBIterator) Key() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Key() +} + +// Value implements Iterator. +func (itr cLevelDBIterator) Value() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Value() +} + +// Next implements Iterator. +func (itr cLevelDBIterator) Next() { + itr.assertNoError() + itr.assertIsValid() + if itr.isReverse { + itr.source.Prev() + } else { + itr.source.Next() + } +} + +// Error implements Iterator. +func (itr cLevelDBIterator) Error() error { + return itr.source.GetError() +} + +// Close implements Iterator. +func (itr cLevelDBIterator) Close() { + itr.source.Close() +} + +func (itr cLevelDBIterator) assertNoError() { + err := itr.source.GetError() + if err != nil { + panic(err) + } +} + +func (itr cLevelDBIterator) assertIsValid() { + if !itr.Valid() { + panic("cLevelDBIterator is invalid") + } +} diff --git a/c_level_db_test.go b/cleveldb_test.go similarity index 90% rename from c_level_db_test.go rename to cleveldb_test.go index 57f3ebb4f..e893c09af 100644 --- a/c_level_db_test.go +++ b/cleveldb_test.go @@ -78,18 +78,6 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { db.Close() } -/* -func int642Bytes(i int64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(i)) - return buf -} - -func bytes2Int64(buf []byte) int64 { - return int64(binary.BigEndian.Uint64(buf)) -} -*/ - func TestCLevelDBBackend(t *testing.T) { name := fmt.Sprintf("test_%x", randStr(12)) // Can't use "" (current directory) or "./" here because levigo.Open returns: diff --git a/go_level_db.go b/go_level_db.go deleted file mode 100644 index 126096f96..000000000 --- a/go_level_db.go +++ /dev/null @@ -1,353 +0,0 @@ -package db - -import ( - "bytes" - "fmt" - "path/filepath" - - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -func init() { - dbCreator := func(name string, dir string) (DB, error) { - return NewGoLevelDB(name, dir) - } - registerDBCreator(GoLevelDBBackend, dbCreator, false) -} - -var _ DB = (*GoLevelDB)(nil) - -type GoLevelDB struct { - db *leveldb.DB -} - -func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { - return NewGoLevelDBWithOpts(name, dir, nil) -} - -func NewGoLevelDBWithOpts(name string, dir string, o *opt.Options) (*GoLevelDB, error) { - dbPath := filepath.Join(dir, name+".db") - db, err := leveldb.OpenFile(dbPath, o) - if err != nil { - return nil, err - } - database := &GoLevelDB{ - db: db, - } - return database, nil -} - -// Implements DB. -func (db *GoLevelDB) Get(key []byte) ([]byte, error) { - key = nonNilBytes(key) - res, err := db.db.Get(key, nil) - if err != nil { - if err == errors.ErrNotFound { - return nil, nil - } - return nil, err - } - return res, nil -} - -// Implements DB. -func (db *GoLevelDB) Has(key []byte) (bool, error) { - bytes, err := db.Get(key) - if err != nil { - return false, err - } - return bytes != nil, nil -} - -// Implements DB. -func (db *GoLevelDB) Set(key []byte, value []byte) error { - key = nonNilBytes(key) - value = nonNilBytes(value) - if err := db.db.Put(key, value, nil); err != nil { - return err - } - return nil -} - -// Implements DB. -func (db *GoLevelDB) SetSync(key []byte, value []byte) error { - key = nonNilBytes(key) - value = nonNilBytes(value) - if err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}); err != nil { - return err - } - return nil -} - -// Implements DB. -func (db *GoLevelDB) Delete(key []byte) error { - key = nonNilBytes(key) - if err := db.db.Delete(key, nil); err != nil { - return err - } - return nil -} - -// Implements DB. -func (db *GoLevelDB) DeleteSync(key []byte) error { - key = nonNilBytes(key) - err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) - if err != nil { - return err - } - return nil -} - -func (db *GoLevelDB) DB() *leveldb.DB { - return db.db -} - -// Implements DB. -func (db *GoLevelDB) Close() error { - if err := db.db.Close(); err != nil { - return err - } - return nil -} - -// Implements DB. -func (db *GoLevelDB) Print() error { - str, err := db.db.GetProperty("leveldb.stats") - if err != nil { - return err - } - fmt.Printf("%v\n", str) - - itr := db.db.NewIterator(nil, nil) - for itr.Next() { - key := itr.Key() - value := itr.Value() - fmt.Printf("[%X]:\t[%X]\n", key, value) - } - return nil -} - -// Implements DB. -func (db *GoLevelDB) Stats() map[string]string { - keys := []string{ - "leveldb.num-files-at-level{n}", - "leveldb.stats", - "leveldb.sstables", - "leveldb.blockpool", - "leveldb.cachedblock", - "leveldb.openedtables", - "leveldb.alivesnaps", - "leveldb.aliveiters", - } - - stats := make(map[string]string) - for _, key := range keys { - str, err := db.db.GetProperty(key) - if err == nil { - stats[key] = str - } - } - return stats -} - -//---------------------------------------- -// Batch - -// Implements DB. -func (db *GoLevelDB) NewBatch() Batch { - batch := new(leveldb.Batch) - return &goLevelDBBatch{db, batch} -} - -type goLevelDBBatch struct { - db *GoLevelDB - batch *leveldb.Batch -} - -// Implements Batch. -func (mBatch *goLevelDBBatch) Set(key, value []byte) { - mBatch.batch.Put(key, value) -} - -// Implements Batch. -func (mBatch *goLevelDBBatch) Delete(key []byte) { - mBatch.batch.Delete(key) -} - -// Implements Batch. -func (mBatch *goLevelDBBatch) Write() error { - err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: false}) - if err != nil { - return err - } - return nil -} - -// Implements Batch. -func (mBatch *goLevelDBBatch) WriteSync() error { - err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: true}) - if err != nil { - return err - } - return nil -} - -// Implements Batch. -func (mBatch *goLevelDBBatch) Close() { - mBatch.batch.Reset() -} - -//---------------------------------------- -// Iterator -// NOTE This is almost identical to db/c_level_db.Iterator -// Before creating a third version, refactor. - -// Implements DB. -func (db *GoLevelDB) Iterator(start, end []byte) (Iterator, error) { - itr := db.db.NewIterator(nil, nil) - return newGoLevelDBIterator(itr, start, end, false), nil -} - -// Implements DB. -func (db *GoLevelDB) ReverseIterator(start, end []byte) (Iterator, error) { - itr := db.db.NewIterator(nil, nil) - return newGoLevelDBIterator(itr, start, end, true), nil -} - -type goLevelDBIterator struct { - source iterator.Iterator - start []byte - end []byte - isReverse bool - isInvalid bool -} - -var _ Iterator = (*goLevelDBIterator)(nil) - -func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { - if isReverse { - if end == nil { - source.Last() - } else { - valid := source.Seek(end) - if valid { - eoakey := source.Key() // end or after key - if bytes.Compare(end, eoakey) <= 0 { - source.Prev() - } - } else { - source.Last() - } - } - } else { - if start == nil { - source.First() - } else { - source.Seek(start) - } - } - return &goLevelDBIterator{ - source: source, - start: start, - end: end, - isReverse: isReverse, - isInvalid: false, - } -} - -// Implements Iterator. -func (itr *goLevelDBIterator) Domain() ([]byte, []byte) { - return itr.start, itr.end -} - -// Implements Iterator. -func (itr *goLevelDBIterator) Valid() bool { - - // Once invalid, forever invalid. - if itr.isInvalid { - return false - } - - // Panic on DB error. No way to recover. - itr.assertNoError() - - // If source is invalid, invalid. - if !itr.source.Valid() { - itr.isInvalid = true - return false - } - - // If key is end or past it, invalid. - var start = itr.start - var end = itr.end - var key = itr.source.Key() - - if itr.isReverse { - if start != nil && bytes.Compare(key, start) < 0 { - itr.isInvalid = true - return false - } - } else { - if end != nil && bytes.Compare(end, key) <= 0 { - itr.isInvalid = true - return false - } - } - - // Valid - return true -} - -// Implements Iterator. -func (itr *goLevelDBIterator) Key() []byte { - // Key returns a copy of the current key. - // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 - itr.assertNoError() - itr.assertIsValid() - return cp(itr.source.Key()) -} - -// Implements Iterator. -func (itr *goLevelDBIterator) Value() []byte { - // Value returns a copy of the current value. - // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 - itr.assertNoError() - itr.assertIsValid() - return cp(itr.source.Value()) -} - -// Implements Iterator. -func (itr *goLevelDBIterator) Next() { - itr.assertNoError() - itr.assertIsValid() - if itr.isReverse { - itr.source.Prev() - } else { - itr.source.Next() - } -} - -func (itr *goLevelDBIterator) Error() error { - return itr.source.Error() -} - -// Implements Iterator. -func (itr *goLevelDBIterator) Close() { - itr.source.Release() -} - -func (itr *goLevelDBIterator) assertNoError() { - err := itr.source.Error() - if err != nil { - panic(err) - } -} - -func (itr goLevelDBIterator) assertIsValid() { - if !itr.Valid() { - panic("goLevelDBIterator is invalid") - } -} diff --git a/goleveldb.go b/goleveldb.go new file mode 100644 index 000000000..0c54cdee9 --- /dev/null +++ b/goleveldb.go @@ -0,0 +1,170 @@ +package db + +import ( + "fmt" + "path/filepath" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +func init() { + dbCreator := func(name string, dir string) (DB, error) { + return NewGoLevelDB(name, dir) + } + registerDBCreator(GoLevelDBBackend, dbCreator, false) +} + +type GoLevelDB struct { + db *leveldb.DB +} + +var _ DB = (*GoLevelDB)(nil) + +func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { + return NewGoLevelDBWithOpts(name, dir, nil) +} + +func NewGoLevelDBWithOpts(name string, dir string, o *opt.Options) (*GoLevelDB, error) { + dbPath := filepath.Join(dir, name+".db") + db, err := leveldb.OpenFile(dbPath, o) + if err != nil { + return nil, err + } + database := &GoLevelDB{ + db: db, + } + return database, nil +} + +// Get implements DB. +func (db *GoLevelDB) Get(key []byte) ([]byte, error) { + key = nonNilBytes(key) + res, err := db.db.Get(key, nil) + if err != nil { + if err == errors.ErrNotFound { + return nil, nil + } + return nil, err + } + return res, nil +} + +// Has implements DB. +func (db *GoLevelDB) Has(key []byte) (bool, error) { + bytes, err := db.Get(key) + if err != nil { + return false, err + } + return bytes != nil, nil +} + +// Set implements DB. +func (db *GoLevelDB) Set(key []byte, value []byte) error { + key = nonNilBytes(key) + value = nonNilBytes(value) + if err := db.db.Put(key, value, nil); err != nil { + return err + } + return nil +} + +// SetSync implements DB. +func (db *GoLevelDB) SetSync(key []byte, value []byte) error { + key = nonNilBytes(key) + value = nonNilBytes(value) + if err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}); err != nil { + return err + } + return nil +} + +// Delete implements DB. +func (db *GoLevelDB) Delete(key []byte) error { + key = nonNilBytes(key) + if err := db.db.Delete(key, nil); err != nil { + return err + } + return nil +} + +// DeleteSync implements DB. +func (db *GoLevelDB) DeleteSync(key []byte) error { + key = nonNilBytes(key) + err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) + if err != nil { + return err + } + return nil +} + +func (db *GoLevelDB) DB() *leveldb.DB { + return db.db +} + +// Close implements DB. +func (db *GoLevelDB) Close() error { + if err := db.db.Close(); err != nil { + return err + } + return nil +} + +// Print implements DB. +func (db *GoLevelDB) Print() error { + str, err := db.db.GetProperty("leveldb.stats") + if err != nil { + return err + } + fmt.Printf("%v\n", str) + + itr := db.db.NewIterator(nil, nil) + for itr.Next() { + key := itr.Key() + value := itr.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } + return nil +} + +// Stats implements DB. +func (db *GoLevelDB) Stats() map[string]string { + keys := []string{ + "leveldb.num-files-at-level{n}", + "leveldb.stats", + "leveldb.sstables", + "leveldb.blockpool", + "leveldb.cachedblock", + "leveldb.openedtables", + "leveldb.alivesnaps", + "leveldb.aliveiters", + } + + stats := make(map[string]string) + for _, key := range keys { + str, err := db.db.GetProperty(key) + if err == nil { + stats[key] = str + } + } + return stats +} + +// NewBatch implements DB. +func (db *GoLevelDB) NewBatch() Batch { + batch := new(leveldb.Batch) + return &goLevelDBBatch{db, batch} +} + +// Iterator implements DB. +func (db *GoLevelDB) Iterator(start, end []byte) (Iterator, error) { + itr := db.db.NewIterator(nil, nil) + return newGoLevelDBIterator(itr, start, end, false), nil +} + +// ReverseIterator implements DB. +func (db *GoLevelDB) ReverseIterator(start, end []byte) (Iterator, error) { + itr := db.db.NewIterator(nil, nil) + return newGoLevelDBIterator(itr, start, end, true), nil +} diff --git a/goleveldb_batch.go b/goleveldb_batch.go new file mode 100644 index 000000000..ec290fe10 --- /dev/null +++ b/goleveldb_batch.go @@ -0,0 +1,46 @@ +package db + +import ( + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +type goLevelDBBatch struct { + db *GoLevelDB + batch *leveldb.Batch +} + +var _ Batch = (*goLevelDBBatch)(nil) + +// Set implements Batch. +func (b *goLevelDBBatch) Set(key, value []byte) { + b.batch.Put(key, value) +} + +// Delete implements Batch. +func (b *goLevelDBBatch) Delete(key []byte) { + b.batch.Delete(key) +} + +// Write implements Batch. +func (b *goLevelDBBatch) Write() error { + err := b.db.db.Write(b.batch, &opt.WriteOptions{Sync: false}) + if err != nil { + return err + } + return nil +} + +// WriteSync implements Batch. +func (b *goLevelDBBatch) WriteSync() error { + err := b.db.db.Write(b.batch, &opt.WriteOptions{Sync: true}) + if err != nil { + return err + } + return nil +} + +// Close implements Batch. +func (b *goLevelDBBatch) Close() { + b.batch.Reset() +} diff --git a/goleveldb_iterator.go b/goleveldb_iterator.go new file mode 100644 index 000000000..3a13c4d6f --- /dev/null +++ b/goleveldb_iterator.go @@ -0,0 +1,143 @@ +package db + +import ( + "bytes" + + "github.com/syndtr/goleveldb/leveldb/iterator" +) + +type goLevelDBIterator struct { + source iterator.Iterator + start []byte + end []byte + isReverse bool + isInvalid bool +} + +var _ Iterator = (*goLevelDBIterator)(nil) + +func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { + if isReverse { + if end == nil { + source.Last() + } else { + valid := source.Seek(end) + if valid { + eoakey := source.Key() // end or after key + if bytes.Compare(end, eoakey) <= 0 { + source.Prev() + } + } else { + source.Last() + } + } + } else { + if start == nil { + source.First() + } else { + source.Seek(start) + } + } + return &goLevelDBIterator{ + source: source, + start: start, + end: end, + isReverse: isReverse, + isInvalid: false, + } +} + +// Domain implements Iterator. +func (itr *goLevelDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +// Valid implements Iterator. +func (itr *goLevelDBIterator) Valid() bool { + + // Once invalid, forever invalid. + if itr.isInvalid { + return false + } + + // Panic on DB error. No way to recover. + itr.assertNoError() + + // If source is invalid, invalid. + if !itr.source.Valid() { + itr.isInvalid = true + return false + } + + // If key is end or past it, invalid. + var start = itr.start + var end = itr.end + var key = itr.source.Key() + + if itr.isReverse { + if start != nil && bytes.Compare(key, start) < 0 { + itr.isInvalid = true + return false + } + } else { + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true + return false + } + } + + // Valid + return true +} + +// Key implements Iterator. +func (itr *goLevelDBIterator) Key() []byte { + // Key returns a copy of the current key. + // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 + itr.assertNoError() + itr.assertIsValid() + return cp(itr.source.Key()) +} + +// Value implements Iterator. +func (itr *goLevelDBIterator) Value() []byte { + // Value returns a copy of the current value. + // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 + itr.assertNoError() + itr.assertIsValid() + return cp(itr.source.Value()) +} + +// Next implements Iterator. +func (itr *goLevelDBIterator) Next() { + itr.assertNoError() + itr.assertIsValid() + if itr.isReverse { + itr.source.Prev() + } else { + itr.source.Next() + } +} + +// Error implements Iterator. +func (itr *goLevelDBIterator) Error() error { + return itr.source.Error() +} + +// Close implements Iterator. +func (itr *goLevelDBIterator) Close() { + itr.source.Release() +} + +func (itr *goLevelDBIterator) assertNoError() { + err := itr.source.Error() + if err != nil { + panic(err) + } +} + +func (itr goLevelDBIterator) assertIsValid() { + if !itr.Valid() { + panic("goLevelDBIterator is invalid") + } +} diff --git a/go_level_db_test.go b/goleveldb_test.go similarity index 100% rename from go_level_db_test.go rename to goleveldb_test.go diff --git a/prefix_db.go b/prefix_db.go deleted file mode 100644 index fd6e5800e..000000000 --- a/prefix_db.go +++ /dev/null @@ -1,342 +0,0 @@ -package db - -import ( - "bytes" - "fmt" - "sync" -) - -// IteratePrefix is a convenience function for iterating over a key domain -// restricted by prefix. -func IteratePrefix(db DB, prefix []byte) (Iterator, error) { - var start, end []byte - if len(prefix) == 0 { - start = nil - end = nil - } else { - start = cp(prefix) - end = cpIncr(prefix) - } - itr, err := db.Iterator(start, end) - if err != nil { - return nil, err - } - return itr, nil -} - -/* -TODO: Make test, maybe rename. -// Like IteratePrefix but the iterator strips the prefix from the keys. -func IteratePrefixStripped(db DB, prefix []byte) Iterator { - start, end := ... - return newPrefixIterator(prefix, start, end, IteratePrefix(db, prefix)) -} -*/ - -//---------------------------------------- -// prefixDB - -type PrefixDB struct { - mtx sync.Mutex - prefix []byte - db DB -} - -// NewPrefixDB lets you namespace multiple DBs within a single DB. -func NewPrefixDB(db DB, prefix []byte) *PrefixDB { - return &PrefixDB{ - prefix: prefix, - db: db, - } -} - -// Implements atomicSetDeleter. -func (pdb *PrefixDB) Mutex() *sync.Mutex { - return &(pdb.mtx) -} - -// Implements DB. -func (pdb *PrefixDB) Get(key []byte) ([]byte, error) { - pdb.mtx.Lock() - defer pdb.mtx.Unlock() - - pkey := pdb.prefixed(key) - value, err := pdb.db.Get(pkey) - if err != nil { - return nil, err - } - return value, nil -} - -// Implements DB. -func (pdb *PrefixDB) Has(key []byte) (bool, error) { - pdb.mtx.Lock() - defer pdb.mtx.Unlock() - - ok, err := pdb.db.Has(pdb.prefixed(key)) - if err != nil { - return ok, err - } - - return ok, nil -} - -// Implements DB. -func (pdb *PrefixDB) Set(key []byte, value []byte) error { - pdb.mtx.Lock() - defer pdb.mtx.Unlock() - - pkey := pdb.prefixed(key) - if err := pdb.db.Set(pkey, value); err != nil { - return err - } - return nil -} - -// Implements DB. -func (pdb *PrefixDB) SetSync(key []byte, value []byte) error { - pdb.mtx.Lock() - defer pdb.mtx.Unlock() - - return pdb.db.SetSync(pdb.prefixed(key), value) -} - -// Implements DB. -func (pdb *PrefixDB) Delete(key []byte) error { - pdb.mtx.Lock() - defer pdb.mtx.Unlock() - - return pdb.db.Delete(pdb.prefixed(key)) -} - -// Implements DB. -func (pdb *PrefixDB) DeleteSync(key []byte) error { - pdb.mtx.Lock() - defer pdb.mtx.Unlock() - - return pdb.db.DeleteSync(pdb.prefixed(key)) -} - -// Implements DB. -func (pdb *PrefixDB) Iterator(start, end []byte) (Iterator, error) { - pdb.mtx.Lock() - defer pdb.mtx.Unlock() - - var pstart, pend []byte - pstart = append(cp(pdb.prefix), start...) - if end == nil { - pend = cpIncr(pdb.prefix) - } else { - pend = append(cp(pdb.prefix), end...) - } - itr, err := pdb.db.Iterator(pstart, pend) - if err != nil { - return nil, err - } - - return newPrefixIterator(pdb.prefix, start, end, itr) -} - -// Implements DB. -func (pdb *PrefixDB) ReverseIterator(start, end []byte) (Iterator, error) { - pdb.mtx.Lock() - defer pdb.mtx.Unlock() - - var pstart, pend []byte - pstart = append(cp(pdb.prefix), start...) - if end == nil { - pend = cpIncr(pdb.prefix) - } else { - pend = append(cp(pdb.prefix), end...) - } - ritr, err := pdb.db.ReverseIterator(pstart, pend) - if err != nil { - return nil, err - } - - return newPrefixIterator(pdb.prefix, start, end, ritr) -} - -// Implements DB. -// Panics if the underlying DB is not an -// atomicSetDeleter. -func (pdb *PrefixDB) NewBatch() Batch { - pdb.mtx.Lock() - defer pdb.mtx.Unlock() - - return newPrefixBatch(pdb.prefix, pdb.db.NewBatch()) -} - -// Implements DB. -func (pdb *PrefixDB) Close() error { - pdb.mtx.Lock() - defer pdb.mtx.Unlock() - - return pdb.db.Close() -} - -// Implements DB. -func (pdb *PrefixDB) Print() error { - fmt.Printf("prefix: %X\n", pdb.prefix) - - itr, err := pdb.Iterator(nil, nil) - if err != nil { - return err - } - defer itr.Close() - for ; itr.Valid(); itr.Next() { - key := itr.Key() - value := itr.Value() - fmt.Printf("[%X]:\t[%X]\n", key, value) - } - return nil -} - -// Implements DB. -func (pdb *PrefixDB) Stats() map[string]string { - stats := make(map[string]string) - stats["prefixdb.prefix.string"] = string(pdb.prefix) - stats["prefixdb.prefix.hex"] = fmt.Sprintf("%X", pdb.prefix) - source := pdb.db.Stats() - for key, value := range source { - stats["prefixdb.source."+key] = value - } - return stats -} - -func (pdb *PrefixDB) prefixed(key []byte) []byte { - return append(cp(pdb.prefix), key...) -} - -//---------------------------------------- -// prefixBatch - -type prefixBatch struct { - prefix []byte - source Batch -} - -func newPrefixBatch(prefix []byte, source Batch) prefixBatch { - return prefixBatch{ - prefix: prefix, - source: source, - } -} - -func (pb prefixBatch) Set(key, value []byte) { - pkey := append(cp(pb.prefix), key...) - pb.source.Set(pkey, value) -} - -func (pb prefixBatch) Delete(key []byte) { - pkey := append(cp(pb.prefix), key...) - pb.source.Delete(pkey) -} - -func (pb prefixBatch) Write() error { - return pb.source.Write() -} - -func (pb prefixBatch) WriteSync() error { - return pb.source.WriteSync() -} - -func (pb prefixBatch) Close() { - pb.source.Close() -} - -//---------------------------------------- -// prefixIterator - -var _ Iterator = (*prefixIterator)(nil) - -// Strips prefix while iterating from Iterator. -type prefixIterator struct { - prefix []byte - start []byte - end []byte - source Iterator - valid bool -} - -func newPrefixIterator(prefix, start, end []byte, source Iterator) (*prefixIterator, error) { - - pitrInvalid := &prefixIterator{ - prefix: prefix, - start: start, - end: end, - source: source, - valid: false, - } - - if !source.Valid() { - return pitrInvalid, nil - } - key := source.Key() - - if !bytes.HasPrefix(key, prefix) { - return pitrInvalid, nil - } - return &prefixIterator{ - prefix: prefix, - start: start, - end: end, - source: source, - valid: true, - }, nil -} - -func (itr *prefixIterator) Domain() (start []byte, end []byte) { - return itr.start, itr.end -} - -func (itr *prefixIterator) Valid() bool { - return itr.valid && itr.source.Valid() -} - -func (itr *prefixIterator) Next() { - if !itr.valid { - panic("prefixIterator invalid; cannot call Next()") - } - itr.source.Next() - - if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) { - itr.valid = false - } -} - -func (itr *prefixIterator) Key() (key []byte) { - if !itr.valid { - panic("prefixIterator invalid; cannot call Key()") - } - key = itr.source.Key() - return stripPrefix(key, itr.prefix) -} - -func (itr *prefixIterator) Value() (value []byte) { - if !itr.valid { - panic("prefixIterator invalid; cannot call Value()") - } - value = itr.source.Value() - return value -} - -func (itr *prefixIterator) Error() error { - return itr.source.Error() -} - -func (itr *prefixIterator) Close() { - itr.source.Close() -} - -//---------------------------------------- - -func stripPrefix(key []byte, prefix []byte) (stripped []byte) { - if len(key) < len(prefix) { - panic("should not happen") - } - if !bytes.Equal(key[:len(prefix)], prefix) { - panic("should not happen") - } - return key[len(prefix):] -} diff --git a/prefixdb.go b/prefixdb.go new file mode 100644 index 000000000..2fcacc1a9 --- /dev/null +++ b/prefixdb.go @@ -0,0 +1,174 @@ +package db + +import ( + "fmt" + "sync" +) + +// PrefixDB wraps a namespace of another database as a logical database. +type PrefixDB struct { + mtx sync.Mutex + prefix []byte + db DB +} + +var _ DB = (*PrefixDB)(nil) + +// NewPrefixDB lets you namespace multiple DBs within a single DB. +func NewPrefixDB(db DB, prefix []byte) *PrefixDB { + return &PrefixDB{ + prefix: prefix, + db: db, + } +} + +// Get implements DB. +func (pdb *PrefixDB) Get(key []byte) ([]byte, error) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pkey := pdb.prefixed(key) + value, err := pdb.db.Get(pkey) + if err != nil { + return nil, err + } + return value, nil +} + +// Has implements DB. +func (pdb *PrefixDB) Has(key []byte) (bool, error) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + ok, err := pdb.db.Has(pdb.prefixed(key)) + if err != nil { + return ok, err + } + + return ok, nil +} + +// Set implements DB. +func (pdb *PrefixDB) Set(key []byte, value []byte) error { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + pkey := pdb.prefixed(key) + if err := pdb.db.Set(pkey, value); err != nil { + return err + } + return nil +} + +// SetSync implements DB. +func (pdb *PrefixDB) SetSync(key []byte, value []byte) error { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return pdb.db.SetSync(pdb.prefixed(key), value) +} + +// Delete implements DB. +func (pdb *PrefixDB) Delete(key []byte) error { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return pdb.db.Delete(pdb.prefixed(key)) +} + +// DeleteSync implements DB. +func (pdb *PrefixDB) DeleteSync(key []byte) error { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return pdb.db.DeleteSync(pdb.prefixed(key)) +} + +// Iterator implements DB. +func (pdb *PrefixDB) Iterator(start, end []byte) (Iterator, error) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + var pstart, pend []byte + pstart = append(cp(pdb.prefix), start...) + if end == nil { + pend = cpIncr(pdb.prefix) + } else { + pend = append(cp(pdb.prefix), end...) + } + itr, err := pdb.db.Iterator(pstart, pend) + if err != nil { + return nil, err + } + + return newPrefixIterator(pdb.prefix, start, end, itr) +} + +// ReverseIterator implements DB. +func (pdb *PrefixDB) ReverseIterator(start, end []byte) (Iterator, error) { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + var pstart, pend []byte + pstart = append(cp(pdb.prefix), start...) + if end == nil { + pend = cpIncr(pdb.prefix) + } else { + pend = append(cp(pdb.prefix), end...) + } + ritr, err := pdb.db.ReverseIterator(pstart, pend) + if err != nil { + return nil, err + } + + return newPrefixIterator(pdb.prefix, start, end, ritr) +} + +// NewBatch implements DB. +func (pdb *PrefixDB) NewBatch() Batch { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return newPrefixBatch(pdb.prefix, pdb.db.NewBatch()) +} + +// Close implements DB. +func (pdb *PrefixDB) Close() error { + pdb.mtx.Lock() + defer pdb.mtx.Unlock() + + return pdb.db.Close() +} + +// Print implements DB. +func (pdb *PrefixDB) Print() error { + fmt.Printf("prefix: %X\n", pdb.prefix) + + itr, err := pdb.Iterator(nil, nil) + if err != nil { + return err + } + defer itr.Close() + for ; itr.Valid(); itr.Next() { + key := itr.Key() + value := itr.Value() + fmt.Printf("[%X]:\t[%X]\n", key, value) + } + return nil +} + +// Stats implements DB. +func (pdb *PrefixDB) Stats() map[string]string { + stats := make(map[string]string) + stats["prefixdb.prefix.string"] = string(pdb.prefix) + stats["prefixdb.prefix.hex"] = fmt.Sprintf("%X", pdb.prefix) + source := pdb.db.Stats() + for key, value := range source { + stats["prefixdb.source."+key] = value + } + return stats +} + +func (pdb *PrefixDB) prefixed(key []byte) []byte { + return append(cp(pdb.prefix), key...) +} diff --git a/prefixdb_batch.go b/prefixdb_batch.go new file mode 100644 index 000000000..a3547de18 --- /dev/null +++ b/prefixdb_batch.go @@ -0,0 +1,42 @@ +package db + +type prefixDBBatch struct { + prefix []byte + source Batch +} + +var _ Batch = (*prefixDBBatch)(nil) + +func newPrefixBatch(prefix []byte, source Batch) prefixDBBatch { + return prefixDBBatch{ + prefix: prefix, + source: source, + } +} + +// Set implements Batch. +func (pb prefixDBBatch) Set(key, value []byte) { + pkey := append(cp(pb.prefix), key...) + pb.source.Set(pkey, value) +} + +// Delete implements Batch. +func (pb prefixDBBatch) Delete(key []byte) { + pkey := append(cp(pb.prefix), key...) + pb.source.Delete(pkey) +} + +// Write implements Batch. +func (pb prefixDBBatch) Write() error { + return pb.source.Write() +} + +// WriteSync implements Batch. +func (pb prefixDBBatch) WriteSync() error { + return pb.source.WriteSync() +} + +// Close implements Batch. +func (pb prefixDBBatch) Close() { + pb.source.Close() +} diff --git a/prefixdb_iterator.go b/prefixdb_iterator.go new file mode 100644 index 000000000..9b50d4cd4 --- /dev/null +++ b/prefixdb_iterator.go @@ -0,0 +1,127 @@ +package db + +import "bytes" + +// IteratePrefix is a convenience function for iterating over a key domain +// restricted by prefix. +func IteratePrefix(db DB, prefix []byte) (Iterator, error) { + var start, end []byte + if len(prefix) == 0 { + start = nil + end = nil + } else { + start = cp(prefix) + end = cpIncr(prefix) + } + itr, err := db.Iterator(start, end) + if err != nil { + return nil, err + } + return itr, nil +} + +/* +TODO: Make test, maybe rename. +// Like IteratePrefix but the iterator strips the prefix from the keys. +func IteratePrefixStripped(db DB, prefix []byte) Iterator { + start, end := ... + return newPrefixIterator(prefix, start, end, IteratePrefix(db, prefix)) +} +*/ + +// Strips prefix while iterating from Iterator. +type prefixDBIterator struct { + prefix []byte + start []byte + end []byte + source Iterator + valid bool +} + +var _ Iterator = (*prefixDBIterator)(nil) + +func newPrefixIterator(prefix, start, end []byte, source Iterator) (*prefixDBIterator, error) { + pitrInvalid := &prefixDBIterator{ + prefix: prefix, + start: start, + end: end, + source: source, + valid: false, + } + + if !source.Valid() { + return pitrInvalid, nil + } + key := source.Key() + + if !bytes.HasPrefix(key, prefix) { + return pitrInvalid, nil + } + return &prefixDBIterator{ + prefix: prefix, + start: start, + end: end, + source: source, + valid: true, + }, nil +} + +// Domain implements Iterator. +func (itr *prefixDBIterator) Domain() (start []byte, end []byte) { + return itr.start, itr.end +} + +// Valid implements Iterator. +func (itr *prefixDBIterator) Valid() bool { + return itr.valid && itr.source.Valid() +} + +// Next implements Iterator. +func (itr *prefixDBIterator) Next() { + if !itr.valid { + panic("prefixIterator invalid; cannot call Next()") + } + itr.source.Next() + + if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) { + itr.valid = false + } +} + +// Next implements Iterator. +func (itr *prefixDBIterator) Key() (key []byte) { + if !itr.valid { + panic("prefixIterator invalid; cannot call Key()") + } + key = itr.source.Key() + return stripPrefix(key, itr.prefix) +} + +// Value implements Iterator. +func (itr *prefixDBIterator) Value() (value []byte) { + if !itr.valid { + panic("prefixIterator invalid; cannot call Value()") + } + value = itr.source.Value() + return value +} + +// Error implements Iterator. +func (itr *prefixDBIterator) Error() error { + return itr.source.Error() +} + +// Close implements Iterator. +func (itr *prefixDBIterator) Close() { + itr.source.Close() +} + +func stripPrefix(key []byte, prefix []byte) (stripped []byte) { + if len(key) < len(prefix) { + panic("should not happen") + } + if !bytes.Equal(key[:len(prefix)], prefix) { + panic("should not happen") + } + return key[len(prefix):] +} diff --git a/prefix_db_test.go b/prefixdb_test.go similarity index 100% rename from prefix_db_test.go rename to prefixdb_test.go diff --git a/remotedb/batch.go b/remotedb/batch.go new file mode 100644 index 000000000..5fb92f30c --- /dev/null +++ b/remotedb/batch.go @@ -0,0 +1,54 @@ +package remotedb + +import ( + "github.com/pkg/errors" + + db "github.com/tendermint/tm-db" + protodb "github.com/tendermint/tm-db/remotedb/proto" +) + +type batch struct { + db *RemoteDB + ops []*protodb.Operation +} + +var _ db.Batch = (*batch)(nil) + +// Set implements Batch. +func (b *batch) Set(key, value []byte) { + op := &protodb.Operation{ + Entity: &protodb.Entity{Key: key, Value: value}, + Type: protodb.Operation_SET, + } + b.ops = append(b.ops, op) +} + +// Delete implements Batch. +func (b *batch) Delete(key []byte) { + op := &protodb.Operation{ + Entity: &protodb.Entity{Key: key}, + Type: protodb.Operation_DELETE, + } + b.ops = append(b.ops, op) +} + +// Write implements Batch. +func (b *batch) Write() error { + if _, err := b.db.dc.BatchWrite(b.db.ctx, &protodb.Batch{Ops: b.ops}); err != nil { + return errors.Errorf("remoteDB.BatchWrite: %v", err) + } + return nil +} + +// WriteSync implements Batch. +func (b *batch) WriteSync() error { + if _, err := b.db.dc.BatchWriteSync(b.db.ctx, &protodb.Batch{Ops: b.ops}); err != nil { + return errors.Errorf("RemoteDB.BatchWriteSync: %v", err) + } + return nil +} + +// Close implements Batch. +func (b *batch) Close() { + b.ops = nil +} diff --git a/remotedb/iterator.go b/remotedb/iterator.go new file mode 100644 index 000000000..77b252512 --- /dev/null +++ b/remotedb/iterator.go @@ -0,0 +1,131 @@ +package remotedb + +import ( + "fmt" + + db "github.com/tendermint/tm-db" + protodb "github.com/tendermint/tm-db/remotedb/proto" +) + +func makeIterator(dic protodb.DB_IteratorClient) db.Iterator { + return &iterator{dic: dic} +} + +func makeReverseIterator(dric protodb.DB_ReverseIteratorClient) db.Iterator { + return &reverseIterator{dric: dric} +} + +type reverseIterator struct { + dric protodb.DB_ReverseIteratorClient + cur *protodb.Iterator +} + +var _ db.Iterator = (*iterator)(nil) + +// Valid implements Iterator. +func (rItr *reverseIterator) Valid() bool { + return rItr.cur != nil && rItr.cur.Valid +} + +// Domain implements Iterator. +func (rItr *reverseIterator) Domain() (start, end []byte) { + if rItr.cur == nil || rItr.cur.Domain == nil { + return nil, nil + } + return rItr.cur.Domain.Start, rItr.cur.Domain.End +} + +// Next implements Iterator. +func (rItr *reverseIterator) Next() { + var err error + rItr.cur, err = rItr.dric.Recv() + if err != nil { + panic(fmt.Sprintf("RemoteDB.ReverseIterator.Next error: %v", err)) + } +} + +// Key implements Iterator. +func (rItr *reverseIterator) Key() []byte { + if rItr.cur == nil { + panic("key does not exist") + } + return rItr.cur.Key +} + +// Value implements Iterator. +func (rItr *reverseIterator) Value() []byte { + if rItr.cur == nil { + panic("key does not exist") + } + return rItr.cur.Value +} + +// Error implements Iterator. +func (rItr *reverseIterator) Error() error { + return nil +} + +// Close implements Iterator. +func (rItr *reverseIterator) Close() {} + +// iterator implements the db.Iterator by retrieving +// streamed iterators from the remote backend as +// needed. It is NOT safe for concurrent usage, +// matching the behavior of other iterators. +type iterator struct { + dic protodb.DB_IteratorClient + cur *protodb.Iterator +} + +var _ db.Iterator = (*iterator)(nil) + +// Valid implements Iterator. +func (itr *iterator) Valid() bool { + return itr.cur != nil && itr.cur.Valid +} + +// Domain implements Iterator. +func (itr *iterator) Domain() (start, end []byte) { + if itr.cur == nil || itr.cur.Domain == nil { + return nil, nil + } + return itr.cur.Domain.Start, itr.cur.Domain.End +} + +// Next implements Iterator. +func (itr *iterator) Next() { + var err error + itr.cur, err = itr.dic.Recv() + if err != nil { + panic(fmt.Sprintf("remoteDB.Iterator.Next error: %v", err)) + } +} + +// Key implements Iterator. +func (itr *iterator) Key() []byte { + if itr.cur == nil { + return nil + } + return itr.cur.Key +} + +// Value implements Iterator. +func (itr *iterator) Value() []byte { + if itr.cur == nil { + panic("current poisition is not valid") + } + return itr.cur.Value +} + +// Error implements Iterator. +func (itr *iterator) Error() error { + return nil +} + +// Close implements Iterator. +func (itr *iterator) Close() { + err := itr.dic.CloseSend() + if err != nil { + panic(fmt.Sprintf("Error closing iterator: %v", err)) + } +} diff --git a/remotedb/remotedb.go b/remotedb/remotedb.go index bef708673..9da3dd08b 100644 --- a/remotedb/remotedb.go +++ b/remotedb/remotedb.go @@ -128,155 +128,3 @@ func (rd *RemoteDB) Iterator(start, end []byte) (db.Iterator, error) { } return makeIterator(dic), nil } - -func makeIterator(dic protodb.DB_IteratorClient) db.Iterator { - return &iterator{dic: dic} -} - -func makeReverseIterator(dric protodb.DB_ReverseIteratorClient) db.Iterator { - return &reverseIterator{dric: dric} -} - -type reverseIterator struct { - dric protodb.DB_ReverseIteratorClient - cur *protodb.Iterator -} - -var _ db.Iterator = (*iterator)(nil) - -func (rItr *reverseIterator) Valid() bool { - return rItr.cur != nil && rItr.cur.Valid -} - -func (rItr *reverseIterator) Domain() (start, end []byte) { - if rItr.cur == nil || rItr.cur.Domain == nil { - return nil, nil - } - return rItr.cur.Domain.Start, rItr.cur.Domain.End -} - -// Next advances the current reverseIterator -func (rItr *reverseIterator) Next() { - var err error - rItr.cur, err = rItr.dric.Recv() - if err != nil { - panic(fmt.Sprintf("RemoteDB.ReverseIterator.Next error: %v", err)) - } -} - -func (rItr *reverseIterator) Key() []byte { - if rItr.cur == nil { - panic("key does not exist") - } - return rItr.cur.Key -} - -func (rItr *reverseIterator) Value() []byte { - if rItr.cur == nil { - panic("key does not exist") - } - return rItr.cur.Value -} - -func (rItr *reverseIterator) Error() error { - return nil -} - -func (rItr *reverseIterator) Close() {} - -// iterator implements the db.Iterator by retrieving -// streamed iterators from the remote backend as -// needed. It is NOT safe for concurrent usage, -// matching the behavior of other iterators. -type iterator struct { - dic protodb.DB_IteratorClient - cur *protodb.Iterator -} - -var _ db.Iterator = (*iterator)(nil) - -func (itr *iterator) Valid() bool { - return itr.cur != nil && itr.cur.Valid -} - -func (itr *iterator) Domain() (start, end []byte) { - if itr.cur == nil || itr.cur.Domain == nil { - return nil, nil - } - return itr.cur.Domain.Start, itr.cur.Domain.End -} - -// Next advances the current iterator -func (itr *iterator) Next() { - var err error - itr.cur, err = itr.dic.Recv() - if err != nil { - panic(fmt.Sprintf("remoteDB.Iterator.Next error: %v", err)) - } -} - -func (itr *iterator) Key() []byte { - if itr.cur == nil { - return nil - } - return itr.cur.Key -} - -func (itr *iterator) Value() []byte { - if itr.cur == nil { - panic("current poisition is not valid") - } - return itr.cur.Value -} - -func (itr *iterator) Error() error { - return nil -} - -func (itr *iterator) Close() { - err := itr.dic.CloseSend() - if err != nil { - panic(fmt.Sprintf("Error closing iterator: %v", err)) - } -} - -type batch struct { - db *RemoteDB - ops []*protodb.Operation -} - -var _ db.Batch = (*batch)(nil) - -func (bat *batch) Set(key, value []byte) { - op := &protodb.Operation{ - Entity: &protodb.Entity{Key: key, Value: value}, - Type: protodb.Operation_SET, - } - bat.ops = append(bat.ops, op) -} - -func (bat *batch) Delete(key []byte) { - op := &protodb.Operation{ - Entity: &protodb.Entity{Key: key}, - Type: protodb.Operation_DELETE, - } - bat.ops = append(bat.ops, op) -} - -func (bat *batch) Write() error { - if _, err := bat.db.dc.BatchWrite(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil { - return errors.Errorf("remoteDB.BatchWrite: %v", err) - } - return nil -} - -func (bat *batch) WriteSync() error { - if _, err := bat.db.dc.BatchWriteSync(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil { - return errors.Errorf("RemoteDB.BatchWriteSync: %v", err) - } - return nil -} - -func (bat *batch) Close() { - bat.ops = nil -} diff --git a/rocks_db.go b/rocksdb.go similarity index 50% rename from rocks_db.go rename to rocksdb.go index 409ff5af3..b4bcf1def 100644 --- a/rocks_db.go +++ b/rocksdb.go @@ -3,7 +3,6 @@ package db import ( - "bytes" "fmt" "path/filepath" "runtime" @@ -18,8 +17,7 @@ func init() { registerDBCreator(RocksDBBackend, dbCreator, false) } -var _ DB = (*RocksDB)(nil) - +// RocksDB is a RocksDB backend. type RocksDB struct { db *gorocksdb.DB ro *gorocksdb.ReadOptions @@ -27,6 +25,8 @@ type RocksDB struct { woSync *gorocksdb.WriteOptions } +var _ DB = (*RocksDB)(nil) + func NewRocksDB(name string, dir string) (*RocksDB, error) { // default rocksdb option, good enough for most cases, including heavy workloads. // 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads). @@ -63,7 +63,7 @@ func NewRocksDBWithOptions(name string, dir string, opts *gorocksdb.Options) (*R return database, nil } -// Implements DB. +// Get implements DB. func (db *RocksDB) Get(key []byte) ([]byte, error) { key = nonNilBytes(key) res, err := db.db.Get(db.ro, key) @@ -73,7 +73,7 @@ func (db *RocksDB) Get(key []byte) ([]byte, error) { return moveSliceToBytes(res), nil } -// Implements DB. +// Has implements DB. func (db *RocksDB) Has(key []byte) (bool, error) { bytes, err := db.Get(key) if err != nil { @@ -82,7 +82,7 @@ func (db *RocksDB) Has(key []byte) (bool, error) { return bytes != nil, nil } -// Implements DB. +// Set implements DB. func (db *RocksDB) Set(key []byte, value []byte) error { key = nonNilBytes(key) value = nonNilBytes(value) @@ -93,7 +93,7 @@ func (db *RocksDB) Set(key []byte, value []byte) error { return nil } -// Implements DB. +// SetSync implements DB. func (db *RocksDB) SetSync(key []byte, value []byte) error { key = nonNilBytes(key) value = nonNilBytes(value) @@ -104,7 +104,7 @@ func (db *RocksDB) SetSync(key []byte, value []byte) error { return nil } -// Implements DB. +// Delete implements DB. func (db *RocksDB) Delete(key []byte) error { key = nonNilBytes(key) err := db.db.Delete(db.wo, key) @@ -114,7 +114,7 @@ func (db *RocksDB) Delete(key []byte) error { return nil } -// Implements DB. +// DeleteSync implements DB. func (db *RocksDB) DeleteSync(key []byte) error { key = nonNilBytes(key) err := db.db.Delete(db.woSync, key) @@ -128,7 +128,7 @@ func (db *RocksDB) DB() *gorocksdb.DB { return db.db } -// Implements DB. +// Close implements DB. func (db *RocksDB) Close() error { db.ro.Destroy() db.wo.Destroy() @@ -137,7 +137,7 @@ func (db *RocksDB) Close() error { return nil } -// Implements DB. +// Print implements DB. func (db *RocksDB) Print() error { itr, err := db.Iterator(nil, nil) if err != nil { @@ -152,7 +152,7 @@ func (db *RocksDB) Print() error { return nil } -// Implements DB. +// Stats implements DB. func (db *RocksDB) Stats() map[string]string { keys := []string{"rocksdb.stats"} stats := make(map[string]string, len(keys)) @@ -162,199 +162,20 @@ func (db *RocksDB) Stats() map[string]string { return stats } -//---------------------------------------- -// Batch - -// Implements DB. +// NewBatch implements DB. func (db *RocksDB) NewBatch() Batch { batch := gorocksdb.NewWriteBatch() return &rocksDBBatch{db, batch} } -type rocksDBBatch struct { - db *RocksDB - batch *gorocksdb.WriteBatch -} - -// Implements Batch. -func (mBatch *rocksDBBatch) Set(key, value []byte) { - mBatch.batch.Put(key, value) -} - -// Implements Batch. -func (mBatch *rocksDBBatch) Delete(key []byte) { - mBatch.batch.Delete(key) -} - -// Implements Batch. -func (mBatch *rocksDBBatch) Write() error { - err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) - if err != nil { - return err - } - return nil -} - -// Implements Batch. -func (mBatch *rocksDBBatch) WriteSync() error { - err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch) - if err != nil { - return err - } - return nil -} - -// Implements Batch. -func (mBatch *rocksDBBatch) Close() { - mBatch.batch.Destroy() -} - -//---------------------------------------- -// Iterator -// NOTE This is almost identical to db/go_level_db.Iterator -// Before creating a third version, refactor. - +// Iterator implements DB. func (db *RocksDB) Iterator(start, end []byte) (Iterator, error) { itr := db.db.NewIterator(db.ro) return newRocksDBIterator(itr, start, end, false), nil } +// ReverseIterator implements DB. func (db *RocksDB) ReverseIterator(start, end []byte) (Iterator, error) { itr := db.db.NewIterator(db.ro) return newRocksDBIterator(itr, start, end, true), nil } - -var _ Iterator = (*rocksDBIterator)(nil) - -type rocksDBIterator struct { - source *gorocksdb.Iterator - start, end []byte - isReverse bool - isInvalid bool -} - -func newRocksDBIterator(source *gorocksdb.Iterator, start, end []byte, isReverse bool) *rocksDBIterator { - if isReverse { - if end == nil { - source.SeekToLast() - } else { - source.Seek(end) - if source.Valid() { - eoakey := moveSliceToBytes(source.Key()) // end or after key - if bytes.Compare(end, eoakey) <= 0 { - source.Prev() - } - } else { - source.SeekToLast() - } - } - } else { - if start == nil { - source.SeekToFirst() - } else { - source.Seek(start) - } - } - return &rocksDBIterator{ - source: source, - start: start, - end: end, - isReverse: isReverse, - isInvalid: false, - } -} - -func (itr rocksDBIterator) Domain() ([]byte, []byte) { - return itr.start, itr.end -} - -func (itr rocksDBIterator) Valid() bool { - - // Once invalid, forever invalid. - if itr.isInvalid { - return false - } - - // Panic on DB error. No way to recover. - itr.assertNoError() - - // If source is invalid, invalid. - if !itr.source.Valid() { - itr.isInvalid = true - return false - } - - // If key is end or past it, invalid. - var start = itr.start - var end = itr.end - var key = moveSliceToBytes(itr.source.Key()) - if itr.isReverse { - if start != nil && bytes.Compare(key, start) < 0 { - itr.isInvalid = true - return false - } - } else { - if end != nil && bytes.Compare(end, key) <= 0 { - itr.isInvalid = true - return false - } - } - - // It's valid. - return true -} - -func (itr rocksDBIterator) Key() []byte { - itr.assertNoError() - itr.assertIsValid() - return moveSliceToBytes(itr.source.Key()) -} - -func (itr rocksDBIterator) Value() []byte { - itr.assertNoError() - itr.assertIsValid() - return moveSliceToBytes(itr.source.Value()) -} - -func (itr rocksDBIterator) Next() { - itr.assertNoError() - itr.assertIsValid() - if itr.isReverse { - itr.source.Prev() - } else { - itr.source.Next() - } -} - -func (itr rocksDBIterator) Error() error { - return itr.source.Err() -} - -func (itr rocksDBIterator) Close() { - itr.source.Close() -} - -func (itr rocksDBIterator) assertNoError() { - if err := itr.source.Err(); err != nil { - panic(err) - } -} - -func (itr rocksDBIterator) assertIsValid() { - if !itr.Valid() { - panic("rocksDBIterator is invalid") - } -} - -// moveSliceToBytes will free the slice and copy out a go []byte -// This function can be applied on *Slice returned from Key() and Value() -// of an Iterator, because they are marked as freed. -func moveSliceToBytes(s *gorocksdb.Slice) []byte { - defer s.Free() - if !s.Exists() { - return nil - } - v := make([]byte, len(s.Data())) - copy(v, s.Data()) - return v -} diff --git a/rocksdb_batch.go b/rocksdb_batch.go new file mode 100644 index 000000000..085ec51ce --- /dev/null +++ b/rocksdb_batch.go @@ -0,0 +1,45 @@ +// +build rocksdb + +package db + +import "github.com/tecbot/gorocksdb" + +type rocksDBBatch struct { + db *RocksDB + batch *gorocksdb.WriteBatch +} + +var _ Batch = (*rocksDBBatch)(nil) + +// Set implements Batch. +func (mBatch *rocksDBBatch) Set(key, value []byte) { + mBatch.batch.Put(key, value) +} + +// Delete implements Batch. +func (mBatch *rocksDBBatch) Delete(key []byte) { + mBatch.batch.Delete(key) +} + +// Write implements Batch. +func (mBatch *rocksDBBatch) Write() error { + err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) + if err != nil { + return err + } + return nil +} + +// WriteSync mplements Batch. +func (mBatch *rocksDBBatch) WriteSync() error { + err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch) + if err != nil { + return err + } + return nil +} + +// Close implements Batch. +func (mBatch *rocksDBBatch) Close() { + mBatch.batch.Destroy() +} diff --git a/rocksdb_iterator.go b/rocksdb_iterator.go new file mode 100644 index 000000000..301e49d15 --- /dev/null +++ b/rocksdb_iterator.go @@ -0,0 +1,151 @@ +// +build rocksdb + +package db + +import ( + "bytes" + + "github.com/tecbot/gorocksdb" +) + +type rocksDBIterator struct { + source *gorocksdb.Iterator + start, end []byte + isReverse bool + isInvalid bool +} + +var _ Iterator = (*rocksDBIterator)(nil) + +func newRocksDBIterator(source *gorocksdb.Iterator, start, end []byte, isReverse bool) *rocksDBIterator { + if isReverse { + if end == nil { + source.SeekToLast() + } else { + source.Seek(end) + if source.Valid() { + eoakey := moveSliceToBytes(source.Key()) // end or after key + if bytes.Compare(end, eoakey) <= 0 { + source.Prev() + } + } else { + source.SeekToLast() + } + } + } else { + if start == nil { + source.SeekToFirst() + } else { + source.Seek(start) + } + } + return &rocksDBIterator{ + source: source, + start: start, + end: end, + isReverse: isReverse, + isInvalid: false, + } +} + +// Domain implements Iterator. +func (itr rocksDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +// Valid implements Iterator. +func (itr rocksDBIterator) Valid() bool { + + // Once invalid, forever invalid. + if itr.isInvalid { + return false + } + + // Panic on DB error. No way to recover. + itr.assertNoError() + + // If source is invalid, invalid. + if !itr.source.Valid() { + itr.isInvalid = true + return false + } + + // If key is end or past it, invalid. + var start = itr.start + var end = itr.end + var key = moveSliceToBytes(itr.source.Key()) + if itr.isReverse { + if start != nil && bytes.Compare(key, start) < 0 { + itr.isInvalid = true + return false + } + } else { + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true + return false + } + } + + // It's valid. + return true +} + +// Key implements Iterator. +func (itr rocksDBIterator) Key() []byte { + itr.assertNoError() + itr.assertIsValid() + return moveSliceToBytes(itr.source.Key()) +} + +// Value implements Iterator. +func (itr rocksDBIterator) Value() []byte { + itr.assertNoError() + itr.assertIsValid() + return moveSliceToBytes(itr.source.Value()) +} + +// Next implements Iterator. +func (itr rocksDBIterator) Next() { + itr.assertNoError() + itr.assertIsValid() + if itr.isReverse { + itr.source.Prev() + } else { + itr.source.Next() + } +} + +// Error implements Iterator. +func (itr rocksDBIterator) Error() error { + return itr.source.Err() +} + +// Close implements Iterator. +func (itr rocksDBIterator) Close() { + itr.source.Close() +} + +func (itr rocksDBIterator) assertNoError() { + if err := itr.source.Err(); err != nil { + panic(err) + } +} + +func (itr rocksDBIterator) assertIsValid() { + if !itr.Valid() { + panic("rocksDBIterator is invalid") + } +} + +// moveSliceToBytes will free the slice and copy out a go []byte +// This function can be applied on *Slice returned from Key() and Value() +// of an Iterator, because they are marked as freed. +func moveSliceToBytes(s *gorocksdb.Slice) []byte { + defer s.Free() + if !s.Exists() { + return nil + } + v := make([]byte, len(s.Data())) + copy(v, s.Data()) + return v +} diff --git a/rocks_db_test.go b/rocksdb_test.go similarity index 100% rename from rocks_db_test.go rename to rocksdb_test.go From 70982435d41bc481f9816544f63f662d215b88b3 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Mon, 9 Mar 2020 20:18:18 +0100 Subject: [PATCH 08/30] test: add test for batch reuse semantics (#67) --- backend_test.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/backend_test.go b/backend_test.go index 5e62c2eaa..f6d4a1e76 100644 --- a/backend_test.go +++ b/backend_test.go @@ -460,6 +460,19 @@ func testDBBatch(t *testing.T, backend BackendType) { // it should be possible to re-close the batch batch.Close() + // it should also be possible to reuse a closed batch as if it were a new one + batch.Set([]byte("c"), []byte{3}) + err = batch.Write() + require.NoError(t, err) + assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}, "c": {3}}) + batch.Close() + + batch.Delete([]byte("c")) + err = batch.WriteSync() + require.NoError(t, err) + assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) + batch.Close() + // batches should also write changes in order batch = db.NewBatch() batch.Delete([]byte("a")) From 5db388c31082e0a02c9439c8828dbf03c395a5d7 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Mon, 9 Mar 2020 22:57:13 +0100 Subject: [PATCH 09/30] test: properly close iterator in assertKeyValues() (#68) --- backend_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/backend_test.go b/backend_test.go index f6d4a1e76..b008b82f7 100644 --- a/backend_test.go +++ b/backend_test.go @@ -504,6 +504,7 @@ func assertKeyValues(t *testing.T, db DB, expect map[string][]byte) { require.NoError(t, iter.Error()) actual[string(iter.Key())] = iter.Value() } + iter.Close() assert.Equal(t, expect, actual) } From fd58b75f49c3b70e33236a46e9dc1b1860479c31 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Tue, 10 Mar 2020 11:24:25 +0100 Subject: [PATCH 10/30] test: defer iter.Close() in assertKeyValues() (#70) --- backend_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend_test.go b/backend_test.go index b008b82f7..423d7ba77 100644 --- a/backend_test.go +++ b/backend_test.go @@ -498,13 +498,13 @@ func testDBBatch(t *testing.T, backend BackendType) { func assertKeyValues(t *testing.T, db DB, expect map[string][]byte) { iter, err := db.Iterator(nil, nil) require.NoError(t, err) + defer iter.Close() actual := make(map[string][]byte) for ; iter.Valid(); iter.Next() { require.NoError(t, iter.Error()) actual[string(iter.Key())] = iter.Value() } - iter.Close() assert.Equal(t, expect, actual) } From 7204cbbcca7a579a7919606640a9c988bdd04248 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Tue, 10 Mar 2020 11:25:06 +0100 Subject: [PATCH 11/30] boltdb: handle empty key placeholders in iterators (#69) --- CHANGELOG.md | 2 ++ boltdb.go | 7 +++-- boltdb_iterator.go | 76 +++++++++++++++++++++++++++++++++++----------- 3 files changed, 66 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6a0cde3a0..4dae0fbac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,8 @@ ### Bug Fixes +- [boltdb] Properly handle blank keys in iterators + - [goleveldb] [\#58](https://github.com/tendermint/tm-db/pull/58) Make `Batch.Close()` actually remove the batch contents ## 0.4.1 diff --git a/boltdb.go b/boltdb.go index c1a430082..522a43b51 100644 --- a/boltdb.go +++ b/boltdb.go @@ -11,7 +11,10 @@ import ( "github.com/pkg/errors" ) -var bucket = []byte("tm") +var ( + bucket = []byte("tm") + boltDBEmptyKey = []byte("nil") +) func init() { registerDBCreator(BoltDBBackend, func(name, dir string) (DB, error) { @@ -196,7 +199,7 @@ func (bdb *BoltDB) ReverseIterator(start, end []byte) (Iterator, error) { // WARNING: this may collude with "nil" user key! func nonEmptyKey(key []byte) []byte { if len(key) == 0 { - return []byte("nil") + return boltDBEmptyKey } return key } diff --git a/boltdb_iterator.go b/boltdb_iterator.go index 4f56a0da0..013510959 100644 --- a/boltdb_iterator.go +++ b/boltdb_iterator.go @@ -13,9 +13,10 @@ import ( type boltDBIterator struct { tx *bbolt.Tx - itr *bbolt.Cursor - start []byte - end []byte + itr *bbolt.Cursor + start []byte + end []byte + emptyKeyValue []byte // Tracks the value of the empty key, if it exists currentKey []byte currentValue []byte @@ -28,33 +29,58 @@ var _ Iterator = (*boltDBIterator)(nil) // newBoltDBIterator creates a new boltDBIterator. func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator { + // We can check for empty key at the start, because we use a read/write transaction that blocks + // the entire database for writes while the iterator exists. If we change to a read-only txn + // that supports concurrency we'll need to rewrite this logic. + emptyKeyValue := tx.Bucket(bucket).Get(boltDBEmptyKey) itr := tx.Bucket(bucket).Cursor() var ck, cv []byte if isReverse { - if end == nil { + switch { + case end == nil: ck, cv = itr.Last() - } else { + case len(end) == 0: + // If end is the blank key, then we don't return any keys by definition + ck = nil + cv = nil + default: _, _ = itr.Seek(end) // after key ck, cv = itr.Prev() // return to end key } + // If we're currently positioned at the placeholder for the empty key, skip it (handle later) + if emptyKeyValue != nil && bytes.Equal(ck, boltDBEmptyKey) { + ck, cv = itr.Prev() + } + // If we didn't find any initial key, but there's a placeholder for the empty key at the + // end that we've skipped, then the initial key should be the empty one (the final one). + if emptyKeyValue != nil && ck == nil && (end == nil || len(end) > 0) { + ck = []byte{} + cv = emptyKeyValue + emptyKeyValue = nil // ensure call to Next() skips this + } } else { - if start == nil { + switch { + case (start == nil || len(start) == 0) && emptyKeyValue != nil: + ck = []byte{} + cv = emptyKeyValue + case (start == nil || len(start) == 0) && emptyKeyValue == nil: ck, cv = itr.First() - } else { + default: ck, cv = itr.Seek(start) } } return &boltDBIterator{ - tx: tx, - itr: itr, - start: start, - end: end, - currentKey: ck, - currentValue: cv, - isReverse: isReverse, - isInvalid: false, + tx: tx, + itr: itr, + start: start, + end: end, + emptyKeyValue: emptyKeyValue, + currentKey: ck, + currentValue: cv, + isReverse: isReverse, + isInvalid: false, } } @@ -70,7 +96,7 @@ func (itr *boltDBIterator) Valid() bool { } // iterated to the end of the cursor - if len(itr.currentKey) == 0 { + if itr.currentKey == nil { itr.isInvalid = true return false } @@ -96,8 +122,24 @@ func (itr *boltDBIterator) Next() { itr.assertIsValid() if itr.isReverse { itr.currentKey, itr.currentValue = itr.itr.Prev() + if itr.emptyKeyValue != nil && itr.currentKey == nil { + // If we reached the end, but there exists an empty key whose placeholder we skipped, + // we should set up the empty key and its value as the final pair. + itr.currentKey = []byte{} + itr.currentValue = itr.emptyKeyValue + itr.emptyKeyValue = nil // This ensures the next call to Next() terminates + } } else { - itr.currentKey, itr.currentValue = itr.itr.Next() + if len(itr.currentKey) == 0 { + // If the first key was the empty key, then we need to move to the first non-empty key + itr.currentKey, itr.currentValue = itr.itr.First() + } else { + itr.currentKey, itr.currentValue = itr.itr.Next() + } + } + // If we encounter the placeholder for the empty key, skip it + if itr.emptyKeyValue != nil && bytes.Equal(itr.currentKey, boltDBEmptyKey) { + itr.Next() } } From 45ff9b50e981b22c995a490f5cc9a289c1d89dc0 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Tue, 10 Mar 2020 11:34:45 +0100 Subject: [PATCH 12/30] cleveldb: fix handling of empty keys as iterator endpoints (#65) --- CHANGELOG.md | 2 ++ cleveldb_iterator.go | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4dae0fbac..d89f6e047 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,8 @@ - [boltdb] Properly handle blank keys in iterators +- [cleveldb] Fix handling of empty keys as iterator endpoints + - [goleveldb] [\#58](https://github.com/tendermint/tm-db/pull/58) Make `Batch.Close()` actually remove the batch contents ## 0.4.1 diff --git a/cleveldb_iterator.go b/cleveldb_iterator.go index 951fd0407..f0a4f4e37 100644 --- a/cleveldb_iterator.go +++ b/cleveldb_iterator.go @@ -20,7 +20,7 @@ var _ Iterator = (*cLevelDBIterator)(nil) func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { if isReverse { - if end == nil { + if end == nil || len(end) == 0 { source.SeekToLast() } else { source.Seek(end) @@ -34,7 +34,7 @@ func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse b } } } else { - if start == nil { + if start == nil || len(start) == 0 { source.SeekToFirst() } else { source.Seek(start) From 79a4fbdccadda005f8e372e67b69a90af36219f2 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Tue, 10 Mar 2020 15:43:08 +0100 Subject: [PATCH 13/30] test: run common test suite for PrefixDB as well (#72) --- backend_test.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/backend_test.go b/backend_test.go index 423d7ba77..b494a8970 100644 --- a/backend_test.go +++ b/backend_test.go @@ -11,6 +11,21 @@ import ( "github.com/stretchr/testify/require" ) +// Register a test backend for PrefixDB as well, with some unrelated junk data +func init() { + // nolint: errcheck + registerDBCreator("prefixdb", func(name, dir string) (DB, error) { + mdb := NewMemDB() + mdb.Set([]byte("a"), []byte{1}) + mdb.Set([]byte("b"), []byte{2}) + mdb.Set([]byte("t"), []byte{20}) + mdb.Set([]byte("test"), []byte{0}) + mdb.Set([]byte("u"), []byte{21}) + mdb.Set([]byte("z"), []byte{26}) + return NewPrefixDB(mdb, []byte("test/")), nil + }, false) +} + func cleanupDBDir(dir, name string) { err := os.RemoveAll(filepath.Join(dir, name) + ".db") if err != nil { From bdf7336cc676c0b7bb518db7e9f247d3a87d7759 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Tue, 10 Mar 2020 15:57:12 +0100 Subject: [PATCH 14/30] Panic when calling non-Close() methods on closed or written batch --- CHANGELOG.md | 8 ++--- backend_test.go | 70 +++++++++++++++++++++----------------------- boltdb.go | 5 +--- boltdb_batch.go | 28 ++++++++++++++++-- cleveldb.go | 3 +- cleveldb_batch.go | 36 +++++++++++++++++++---- goleveldb.go | 3 +- goleveldb_batch.go | 35 +++++++++++++++++----- memdb.go | 2 +- memdb_batch.go | 20 +++++++++++++ remotedb/batch.go | 27 +++++++++++++++-- remotedb/remotedb.go | 5 +--- rocksdb.go | 3 +- rocksdb_batch.go | 46 ++++++++++++++++++++++------- types.go | 25 ++++++++++------ 15 files changed, 225 insertions(+), 91 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d89f6e047..7e4eb430c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ ### Breaking Changes +- [\#71](https://github.com/tendermint/tm-db/pull/71) Closed or written batches can no longer be reused, all non-`Close()` calls will panic + - [memdb] [\#56](https://github.com/tendermint/tm-db/pull/56) Removed some exported methods that were mainly meant for internal use: `Mutex()`, `SetNoLock()`, `SetNoLockSync()`, `DeleteNoLock()`, and `DeleteNoLockSync()` ### Improvements @@ -14,11 +16,9 @@ ### Bug Fixes -- [boltdb] Properly handle blank keys in iterators - -- [cleveldb] Fix handling of empty keys as iterator endpoints +- [boltdb] [\#69](https://github.com/tendermint/tm-db/pull/69) Properly handle blank keys in iterators -- [goleveldb] [\#58](https://github.com/tendermint/tm-db/pull/58) Make `Batch.Close()` actually remove the batch contents +- [cleveldb] [\#65](https://github.com/tendermint/tm-db/pull/65) Fix handling of empty keys as iterator endpoints ## 0.4.1 diff --git a/backend_test.go b/backend_test.go index b494a8970..d25265ea1 100644 --- a/backend_test.go +++ b/backend_test.go @@ -454,41 +454,14 @@ func testDBBatch(t *testing.T, backend BackendType) { require.NoError(t, err) assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}, "c": {3}}) - // the batch still keeps these values internally, so changing values and rewriting batch - // should set the values again - err = db.Set([]byte("a"), []byte{9}) - require.NoError(t, err) - err = db.Delete([]byte("c")) - require.NoError(t, err) - err = batch.WriteSync() - require.NoError(t, err) - assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}, "c": {3}}) - - // but when we close, it should no longer set the values - batch.Close() - err = db.Delete([]byte("c")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) - assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) - - // it should be possible to re-close the batch - batch.Close() - - // it should also be possible to reuse a closed batch as if it were a new one - batch.Set([]byte("c"), []byte{3}) - err = batch.Write() - require.NoError(t, err) - assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}, "c": {3}}) - batch.Close() - - batch.Delete([]byte("c")) - err = batch.WriteSync() - require.NoError(t, err) - assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) + // trying to modify or rewrite a written batch should panic, but closing it should work + require.Panics(t, func() { batch.Set([]byte("a"), []byte{9}) }) + require.Panics(t, func() { batch.Delete([]byte("a")) }) + require.Panics(t, func() { batch.Write() }) // nolint: errcheck + require.Panics(t, func() { batch.WriteSync() }) // nolint: errcheck batch.Close() - // batches should also write changes in order + // batches should write changes in order batch = db.NewBatch() batch.Delete([]byte("a")) batch.Set([]byte("a"), []byte{1}) @@ -501,13 +474,38 @@ func testDBBatch(t *testing.T, backend BackendType) { batch.Close() assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) - // and writing an empty batch should not fail + // writing nil keys and values should be the same as empty keys and values + // FIXME CLevelDB panics here: https://github.com/jmhodges/levigo/issues/55 + if backend != CLevelDBBackend { + batch = db.NewBatch() + batch.Set(nil, nil) + err = batch.WriteSync() + require.NoError(t, err) + assertKeyValues(t, db, map[string][]byte{"": {}, "a": {1}, "b": {2}}) + + batch = db.NewBatch() + batch.Delete(nil) + err = batch.Write() + require.NoError(t, err) + assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) + } + + // it should be possible to write an empty batch batch = db.NewBatch() err = batch.Write() require.NoError(t, err) - err = batch.WriteSync() - require.NoError(t, err) assertKeyValues(t, db, map[string][]byte{"a": {1}, "b": {2}}) + + // it should be possible to close an empty batch, and to re-close a closed batch + batch = db.NewBatch() + batch.Close() + batch.Close() + + // all other operations on a closed batch should panic + require.Panics(t, func() { batch.Set([]byte("a"), []byte{9}) }) + require.Panics(t, func() { batch.Delete([]byte("a")) }) + require.Panics(t, func() { batch.Write() }) // nolint: errcheck + require.Panics(t, func() { batch.WriteSync() }) // nolint: errcheck } func assertKeyValues(t *testing.T, db DB, expect map[string][]byte) { diff --git a/boltdb.go b/boltdb.go index 522a43b51..8360a6842 100644 --- a/boltdb.go +++ b/boltdb.go @@ -169,10 +169,7 @@ func (bdb *BoltDB) Stats() map[string]string { // NewBatch implements DB. func (bdb *BoltDB) NewBatch() Batch { - return &boltDBBatch{ - ops: nil, - db: bdb, - } + return newBoltDBBatch(bdb) } // WARNING: Any concurrent writes or reads will block until the iterator is diff --git a/boltdb_batch.go b/boltdb_batch.go index a5996fe38..f3eaf9ce3 100644 --- a/boltdb_batch.go +++ b/boltdb_batch.go @@ -12,19 +12,35 @@ type boltDBBatch struct { var _ Batch = (*boltDBBatch)(nil) +func newBoltDBBatch(db *BoltDB) *boltDBBatch { + return &boltDBBatch{ + db: db, + ops: []operation{}, + } +} + +func (b *boltDBBatch) assertOpen() { + if b.ops == nil { + panic("batch has been written or closed") + } +} + // Set implements Batch. func (b *boltDBBatch) Set(key, value []byte) { + b.assertOpen() b.ops = append(b.ops, operation{opTypeSet, key, value}) } // Delete implements Batch. func (b *boltDBBatch) Delete(key []byte) { + b.assertOpen() b.ops = append(b.ops, operation{opTypeDelete, key, nil}) } // Write implements Batch. func (b *boltDBBatch) Write() error { - return b.db.db.Batch(func(tx *bbolt.Tx) error { + b.assertOpen() + err := b.db.db.Batch(func(tx *bbolt.Tx) error { bkt := tx.Bucket(bucket) for _, op := range b.ops { key := nonEmptyKey(nonNilBytes(op.key)) @@ -41,6 +57,12 @@ func (b *boltDBBatch) Write() error { } return nil }) + if err != nil { + return err + } + // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. + b.Close() + return nil } // WriteSync implements Batch. @@ -49,4 +71,6 @@ func (b *boltDBBatch) WriteSync() error { } // Close implements Batch. -func (b *boltDBBatch) Close() {} +func (b *boltDBBatch) Close() { + b.ops = nil +} diff --git a/cleveldb.go b/cleveldb.go index 066126c1f..e2e983557 100644 --- a/cleveldb.go +++ b/cleveldb.go @@ -159,8 +159,7 @@ func (db *CLevelDB) Stats() map[string]string { // NewBatch implements DB. func (db *CLevelDB) NewBatch() Batch { - batch := levigo.NewWriteBatch() - return &cLevelDBBatch{db, batch} + return newCLevelDBBatch(db) } // Iterator implements DB. diff --git a/cleveldb_batch.go b/cleveldb_batch.go index c98fe3354..eeb2770d2 100644 --- a/cleveldb_batch.go +++ b/cleveldb_batch.go @@ -10,33 +10,59 @@ type cLevelDBBatch struct { batch *levigo.WriteBatch } +func newCLevelDBBatch(db *CLevelDB) *cLevelDBBatch { + return &cLevelDBBatch{ + db: db, + batch: levigo.NewWriteBatch(), + } +} + +func (b *cLevelDBBatch) assertOpen() { + if b.batch == nil { + panic("batch has been written or closed") + } +} + // Set implements Batch. func (b *cLevelDBBatch) Set(key, value []byte) { - b.batch.Put(key, value) + b.assertOpen() + b.batch.Put(nonNilBytes(key), nonNilBytes(value)) } // Delete implements Batch. func (b *cLevelDBBatch) Delete(key []byte) { - b.batch.Delete(key) + b.assertOpen() + b.batch.Delete(nonNilBytes(key)) } // Write implements Batch. func (b *cLevelDBBatch) Write() error { - if err := b.db.db.Write(b.db.wo, b.batch); err != nil { + b.assertOpen() + err := b.db.db.Write(b.db.wo, b.batch) + if err != nil { return err } + // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. + b.Close() return nil } // WriteSync implements Batch. func (b *cLevelDBBatch) WriteSync() error { - if err := b.db.db.Write(b.db.woSync, b.batch); err != nil { + b.assertOpen() + err := b.db.db.Write(b.db.woSync, b.batch) + if err != nil { return err } + // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. + b.Close() return nil } // Close implements Batch. func (b *cLevelDBBatch) Close() { - b.batch.Close() + if b.batch != nil { + b.batch.Close() + b.batch = nil + } } diff --git a/goleveldb.go b/goleveldb.go index 0c54cdee9..eadc83606 100644 --- a/goleveldb.go +++ b/goleveldb.go @@ -153,8 +153,7 @@ func (db *GoLevelDB) Stats() map[string]string { // NewBatch implements DB. func (db *GoLevelDB) NewBatch() Batch { - batch := new(leveldb.Batch) - return &goLevelDBBatch{db, batch} + return newGoLevelDBBatch(db) } // Iterator implements DB. diff --git a/goleveldb_batch.go b/goleveldb_batch.go index ec290fe10..efb33162a 100644 --- a/goleveldb_batch.go +++ b/goleveldb_batch.go @@ -12,35 +12,56 @@ type goLevelDBBatch struct { var _ Batch = (*goLevelDBBatch)(nil) +func newGoLevelDBBatch(db *GoLevelDB) *goLevelDBBatch { + return &goLevelDBBatch{ + db: db, + batch: new(leveldb.Batch), + } +} + +func (b *goLevelDBBatch) assertOpen() { + if b.batch == nil { + panic("batch has been written or closed") + } +} + // Set implements Batch. func (b *goLevelDBBatch) Set(key, value []byte) { + b.assertOpen() b.batch.Put(key, value) } // Delete implements Batch. func (b *goLevelDBBatch) Delete(key []byte) { + b.assertOpen() b.batch.Delete(key) } // Write implements Batch. func (b *goLevelDBBatch) Write() error { - err := b.db.db.Write(b.batch, &opt.WriteOptions{Sync: false}) - if err != nil { - return err - } - return nil + return b.write(false) } // WriteSync implements Batch. func (b *goLevelDBBatch) WriteSync() error { - err := b.db.db.Write(b.batch, &opt.WriteOptions{Sync: true}) + return b.write(true) +} + +func (b *goLevelDBBatch) write(sync bool) error { + b.assertOpen() + err := b.db.db.Write(b.batch, &opt.WriteOptions{Sync: sync}) if err != nil { return err } + // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. + b.Close() return nil } // Close implements Batch. func (b *goLevelDBBatch) Close() { - b.batch.Reset() + if b.batch != nil { + b.batch.Reset() + b.batch = nil + } } diff --git a/memdb.go b/memdb.go index 9cbb99ab8..68acaf29e 100644 --- a/memdb.go +++ b/memdb.go @@ -150,7 +150,7 @@ func (db *MemDB) Stats() map[string]string { // NewBatch implements DB. func (db *MemDB) NewBatch() Batch { - return &memDBBatch{db, nil} + return newMemDBBatch(db) } // Iterator implements DB. diff --git a/memdb_batch.go b/memdb_batch.go index 4bd1f1d0c..d9b94999c 100644 --- a/memdb_batch.go +++ b/memdb_batch.go @@ -24,18 +24,35 @@ type memDBBatch struct { var _ Batch = (*memDBBatch)(nil) +// newMemDBBatch creates a new memDBBatch +func newMemDBBatch(db *MemDB) *memDBBatch { + return &memDBBatch{ + db: db, + ops: []operation{}, + } +} + +func (b *memDBBatch) assertOpen() { + if b.ops == nil { + panic("batch has been written or closed") + } +} + // Set implements Batch. func (b *memDBBatch) Set(key, value []byte) { + b.assertOpen() b.ops = append(b.ops, operation{opTypeSet, key, value}) } // Delete implements Batch. func (b *memDBBatch) Delete(key []byte) { + b.assertOpen() b.ops = append(b.ops, operation{opTypeDelete, key, nil}) } // Write implements Batch. func (b *memDBBatch) Write() error { + b.assertOpen() b.db.mtx.Lock() defer b.db.mtx.Unlock() @@ -49,6 +66,9 @@ func (b *memDBBatch) Write() error { return errors.Errorf("unknown operation type %v (%v)", op.opType, op) } } + + // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. + b.Close() return nil } diff --git a/remotedb/batch.go b/remotedb/batch.go index 5fb92f30c..8187fd7f3 100644 --- a/remotedb/batch.go +++ b/remotedb/batch.go @@ -14,8 +14,22 @@ type batch struct { var _ db.Batch = (*batch)(nil) +func newBatch(rdb *RemoteDB) *batch { + return &batch{ + db: rdb, + ops: []*protodb.Operation{}, + } +} + +func (b *batch) assertOpen() { + if b.ops == nil { + panic("batch has been written or closed") + } +} + // Set implements Batch. func (b *batch) Set(key, value []byte) { + b.assertOpen() op := &protodb.Operation{ Entity: &protodb.Entity{Key: key, Value: value}, Type: protodb.Operation_SET, @@ -25,6 +39,7 @@ func (b *batch) Set(key, value []byte) { // Delete implements Batch. func (b *batch) Delete(key []byte) { + b.assertOpen() op := &protodb.Operation{ Entity: &protodb.Entity{Key: key}, Type: protodb.Operation_DELETE, @@ -34,17 +49,25 @@ func (b *batch) Delete(key []byte) { // Write implements Batch. func (b *batch) Write() error { - if _, err := b.db.dc.BatchWrite(b.db.ctx, &protodb.Batch{Ops: b.ops}); err != nil { + b.assertOpen() + _, err := b.db.dc.BatchWrite(b.db.ctx, &protodb.Batch{Ops: b.ops}) + if err != nil { return errors.Errorf("remoteDB.BatchWrite: %v", err) } + // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. + b.Close() return nil } // WriteSync implements Batch. func (b *batch) WriteSync() error { - if _, err := b.db.dc.BatchWriteSync(b.db.ctx, &protodb.Batch{Ops: b.ops}); err != nil { + b.assertOpen() + _, err := b.db.dc.BatchWriteSync(b.db.ctx, &protodb.Batch{Ops: b.ops}) + if err != nil { return errors.Errorf("RemoteDB.BatchWriteSync: %v", err) } + // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. + b.Close() return nil } diff --git a/remotedb/remotedb.go b/remotedb/remotedb.go index 9da3dd08b..02822807c 100644 --- a/remotedb/remotedb.go +++ b/remotedb/remotedb.go @@ -98,10 +98,7 @@ func (rd *RemoteDB) ReverseIterator(start, end []byte) (db.Iterator, error) { } func (rd *RemoteDB) NewBatch() db.Batch { - return &batch{ - db: rd, - ops: nil, - } + return newBatch(rd) } // TODO: Implement Print when db.DB implements a method diff --git a/rocksdb.go b/rocksdb.go index b4bcf1def..e2e23ce7e 100644 --- a/rocksdb.go +++ b/rocksdb.go @@ -164,8 +164,7 @@ func (db *RocksDB) Stats() map[string]string { // NewBatch implements DB. func (db *RocksDB) NewBatch() Batch { - batch := gorocksdb.NewWriteBatch() - return &rocksDBBatch{db, batch} + return newRocksDBBatch(db) } // Iterator implements DB. diff --git a/rocksdb_batch.go b/rocksdb_batch.go index 085ec51ce..9cab3df6d 100644 --- a/rocksdb_batch.go +++ b/rocksdb_batch.go @@ -11,35 +11,59 @@ type rocksDBBatch struct { var _ Batch = (*rocksDBBatch)(nil) +func newRocksDBBatch(db *RocksDB) *rocksDBBatch { + return &rocksDBBatch{ + db: db, + batch: gorocksdb.NewWriteBatch(), + } +} + +func (b *rocksDBBatch) assertOpen() { + if b.batch == nil { + panic("batch has been written or closed") + } +} + // Set implements Batch. -func (mBatch *rocksDBBatch) Set(key, value []byte) { - mBatch.batch.Put(key, value) +func (b *rocksDBBatch) Set(key, value []byte) { + b.assertOpen() + b.batch.Put(key, value) } // Delete implements Batch. -func (mBatch *rocksDBBatch) Delete(key []byte) { - mBatch.batch.Delete(key) +func (b *rocksDBBatch) Delete(key []byte) { + b.assertOpen() + b.batch.Delete(key) } // Write implements Batch. -func (mBatch *rocksDBBatch) Write() error { - err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) +func (b *rocksDBBatch) Write() error { + b.assertOpen() + err := b.db.db.Write(b.db.wo, b.batch) if err != nil { return err } + // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. + b.Close() return nil } -// WriteSync mplements Batch. -func (mBatch *rocksDBBatch) WriteSync() error { - err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch) +// WriteSync implements Batch. +func (b *rocksDBBatch) WriteSync() error { + b.assertOpen() + err := b.db.db.Write(b.db.woSync, b.batch) if err != nil { return err } + // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. + b.Close() return nil } // Close implements Batch. -func (mBatch *rocksDBBatch) Close() { - mBatch.batch.Destroy() +func (b *rocksDBBatch) Close() { + if b.batch != nil { + b.batch.Destroy() + b.batch = nil + } } diff --git a/types.go b/types.go index 1de83e96f..ad1dc587a 100644 --- a/types.go +++ b/types.go @@ -44,7 +44,7 @@ type DB interface { // Closes the connection. Close() error - // Creates a batch for atomic updates. + // Creates a batch for atomic updates. The caller must call Batch.Close. NewBatch() Batch // For debugging @@ -54,24 +54,31 @@ type DB interface { Stats() map[string]string } -//---------------------------------------- -// Batch - // Batch Close must be called when the program no longer needs the object. type Batch interface { SetDeleter + + // Write writes the batch, possibly without flushing to disk. Only Close() can be called after, + // other methods will panic. Write() error + + // WriteSync writes the batch and flushes it to disk. Only Close() can be called after, other + // methods will panic. WriteSync() error + + // Close closes the batch. It is idempotent, but any other calls afterwards will panic. Close() } type SetDeleter interface { - Set(key, value []byte) // CONTRACT: key, value readonly []byte - Delete(key []byte) // CONTRACT: key readonly []byte -} + // Set sets a key/value pair. + // CONTRACT: key, value readonly []byte + Set(key, value []byte) -//---------------------------------------- -// Iterator + // Delete deletes a key/value pair. + // CONTRACT: key readonly []byte + Delete(key []byte) +} /* Usage: From 766d71decced980d93ba6dfba2bb5be5c170aba5 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Tue, 10 Mar 2020 16:02:28 +0100 Subject: [PATCH 15/30] circleci: run all tests (#64) --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 4cd482e53..335887a40 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,7 +12,7 @@ jobs: - run: name: test command: | - make test + make test-all - save_cache: key: go-mod-v1-{{ checksum "go.sum" }} paths: From 3ff68ea4811b0f9f9ebc81a3a01d527fe42a74f1 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Tue, 10 Mar 2020 18:36:54 +0100 Subject: [PATCH 16/30] memdb: take out read-lock during iteration (#74) --- CHANGELOG.md | 2 ++ memdb.go | 10 ++++------ memdb_iterator.go | 16 +++++++++------- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e4eb430c..e185e332b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ - [\#71](https://github.com/tendermint/tm-db/pull/71) Closed or written batches can no longer be reused, all non-`Close()` calls will panic +- [memdb] [\#74](https://github.com/tendermint/tm-db/pull/74) `Iterator()` and `ReverseIterator()` now take out database read locks for the duration of the iteration + - [memdb] [\#56](https://github.com/tendermint/tm-db/pull/56) Removed some exported methods that were mainly meant for internal use: `Mutex()`, `SetNoLock()`, `SetNoLockSync()`, `DeleteNoLock()`, and `DeleteNoLockSync()` ### Improvements diff --git a/memdb.go b/memdb.go index 68acaf29e..20b319dd5 100644 --- a/memdb.go +++ b/memdb.go @@ -154,15 +154,13 @@ func (db *MemDB) NewBatch() Batch { } // Iterator implements DB. +// Takes out a read-lock on the database until the iterator is closed. func (db *MemDB) Iterator(start, end []byte) (Iterator, error) { - db.mtx.RLock() - defer db.mtx.RUnlock() - return newMemDBIterator(db.btree, start, end, false), nil + return newMemDBIterator(db, start, end, false), nil } // ReverseIterator implements DB. +// Takes out a read-lock on the database until the iterator is closed. func (db *MemDB) ReverseIterator(start, end []byte) (Iterator, error) { - db.mtx.RLock() - defer db.mtx.RUnlock() - return newMemDBIterator(db.btree, start, end, true), nil + return newMemDBIterator(db, start, end, true), nil } diff --git a/memdb_iterator.go b/memdb_iterator.go index b5e12abd8..f76d249b7 100644 --- a/memdb_iterator.go +++ b/memdb_iterator.go @@ -26,7 +26,7 @@ type memDBIterator struct { var _ Iterator = (*memDBIterator)(nil) // newMemDBIterator creates a new memDBIterator. -func newMemDBIterator(bt *btree.BTree, start []byte, end []byte, reverse bool) *memDBIterator { +func newMemDBIterator(db *MemDB, start []byte, end []byte, reverse bool) *memDBIterator { ctx, cancel := context.WithCancel(context.Background()) ch := make(chan *item, chBufferSize) iter := &memDBIterator{ @@ -36,7 +36,9 @@ func newMemDBIterator(bt *btree.BTree, start []byte, end []byte, reverse bool) * end: end, } + db.mtx.RLock() go func() { + defer db.mtx.RUnlock() // Because we use [start, end) for reverse ranges, while btree uses (start, end], we need // the following variables to handle some reverse iteration conditions ourselves. var ( @@ -61,23 +63,23 @@ func newMemDBIterator(bt *btree.BTree, start []byte, end []byte, reverse bool) * } switch { case start == nil && end == nil && !reverse: - bt.Ascend(visitor) + db.btree.Ascend(visitor) case start == nil && end == nil && reverse: - bt.Descend(visitor) + db.btree.Descend(visitor) case end == nil && !reverse: // must handle this specially, since nil is considered less than anything else - bt.AscendGreaterOrEqual(newKey(start), visitor) + db.btree.AscendGreaterOrEqual(newKey(start), visitor) case !reverse: - bt.AscendRange(newKey(start), newKey(end), visitor) + db.btree.AscendRange(newKey(start), newKey(end), visitor) case end == nil: // abort after start, since we use [start, end) while btree uses (start, end] abortLessThan = start - bt.Descend(visitor) + db.btree.Descend(visitor) default: // skip end and abort after start, since we use [start, end) while btree uses (start, end] skipEqual = end abortLessThan = start - bt.DescendLessOrEqual(newKey(end), visitor) + db.btree.DescendLessOrEqual(newKey(end), visitor) } close(ch) }() From 4865b606d72b2d44b73936d21105be50ce12d045 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Wed, 11 Mar 2020 11:37:11 +0100 Subject: [PATCH 17/30] Release 0.5.0 (#75) --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e185e332b..bd3e0d7b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## Unreleased +## 0.5.0 + +**2020-03-11** + ### Breaking Changes - [\#71](https://github.com/tendermint/tm-db/pull/71) Closed or written batches can no longer be reused, all non-`Close()` calls will panic From 435d08cdccff86fcb4fb8ed170e20d285a172af3 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Wed, 11 Mar 2020 13:07:35 +0100 Subject: [PATCH 18/30] build(deps): bump google.golang.org/grpc from 1.27.1 to 1.28.0 (#76) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.27.1 to 1.28.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.27.1...v1.28.0) Signed-off-by: dependabot-preview[bot] Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 56e87e583..b99de3738 100644 --- a/go.mod +++ b/go.mod @@ -17,5 +17,5 @@ require ( github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c go.etcd.io/bbolt v1.3.3 // indirect golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect - google.golang.org/grpc v1.27.1 + google.golang.org/grpc v1.28.0 ) diff --git a/go.sum b/go.sum index 5b70eeea6..28783fcd1 100644 --- a/go.sum +++ b/go.sum @@ -2,11 +2,14 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= @@ -26,6 +29,8 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= @@ -93,8 +98,11 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2El google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= From 0d727a5b5878dcfa8a415d0cea66191562d54e25 Mon Sep 17 00:00:00 2001 From: Marko Date: Mon, 16 Mar 2020 14:45:42 +0100 Subject: [PATCH 19/30] switch from golangci to github actions (#77) * golangci removal - golangci service is being deprecated: https://medium.com/golangci/golangci-com-is-closing-d1fc1bd30e0e transition to githubactions run linting Signed-off-by: Marko Baricevic * fix yaml * fix yaml again * fix yaml again * fix yaml again * fix go install * test different linter * yaml... fix * yaml... fix * setup go env * fix shel * fix shel * test review dog * test review dog commenting on pr * more testing * get it to comment * remvoe linter * testing test in githubactions * remove extra uses * add shell bash * fix image name * test clean flase * remove clean * remove circle * format on save --- .circleci/config.yml | 19 ------------------- .github/workflows/ci.yml | 12 ++++++++++++ .github/workflows/lint.yml | 14 ++++++++++++++ README.md | 14 +++++++------- 4 files changed, 33 insertions(+), 26 deletions(-) delete mode 100644 .circleci/config.yml create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/lint.yml diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 335887a40..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,19 +0,0 @@ -version: 2 -jobs: - build: - docker: - - image: tendermintdev/docker-tm-db-testing - - steps: - - checkout - - restore_cache: - keys: - - go-mod-v1-{{ checksum "go.sum" }} - - run: - name: test - command: | - make test-all - - save_cache: - key: go-mod-v1-{{ checksum "go.sum" }} - paths: - - "/go/pkg/mod" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..747b43b13 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,12 @@ +name: Test +on: [pull_request] +jobs: + Test: + runs-on: ubuntu-latest + container: tendermintdev/docker-tm-db-testing + steps: + - uses: actions/checkout@v2 + - name: test all dbs + run: | + make test-all + shell: bash diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 000000000..ca7c28d5a --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,14 @@ +name: Lint +on: [pull_request] +jobs: + golangci-lint: + name: runner / golangci-lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: golangci-lint + uses: reviewdog/action-golangci-lint@v1 + # uses: docker://reviewdog/action-golangci-lint:v1 # pre-build docker image + with: + github_token: ${{ secrets.github_token }} + reporter: github-pr-check diff --git a/README.md b/README.md index 88a79fe22..4ec52721a 100644 --- a/README.md +++ b/README.md @@ -8,21 +8,21 @@ Go 1.13+ ## Supported Database Backends -* **[GoLevelDB](https://github.com/syndtr/goleveldb) [stable]**: A pure Go implementation of [LevelDB](https://github.com/google/leveldb) (see below). Currently the default on-disk database used in the Cosmos SDK. +- **[GoLevelDB](https://github.com/syndtr/goleveldb) [stable]**: A pure Go implementation of [LevelDB](https://github.com/google/leveldb) (see below). Currently the default on-disk database used in the Cosmos SDK. -* **MemDB [stable]:** An in-memory database using [Google's B-tree package](https://github.com/google/btree). Has very high performance both for reads, writes, and range scans, but is not durable and will lose all data on process exit. Does not support transactions. Suitable for e.g. caches, working sets, and tests. Used for [IAVL](https://github.com/tendermint/iavl) working sets when the pruning strategy allows it. +- **MemDB [stable]:** An in-memory database using [Google's B-tree package](https://github.com/google/btree). Has very high performance both for reads, writes, and range scans, but is not durable and will lose all data on process exit. Does not support transactions. Suitable for e.g. caches, working sets, and tests. Used for [IAVL](https://github.com/tendermint/iavl) working sets when the pruning strategy allows it. -* **[LevelDB](https://github.com/google/leveldb) [experimental]:** A [Go wrapper](https://github.com/jmhodges/levigo) around [LevelDB](https://github.com/google/leveldb). Uses LSM-trees for on-disk storage, which have good performance for write-heavy workloads, particularly on spinning disks, but requires periodic compaction to maintain decent read performance and reclaim disk space. Does not support transactions. +- **[LevelDB](https://github.com/google/leveldb) [experimental]:** A [Go wrapper](https://github.com/jmhodges/levigo) around [LevelDB](https://github.com/google/leveldb). Uses LSM-trees for on-disk storage, which have good performance for write-heavy workloads, particularly on spinning disks, but requires periodic compaction to maintain decent read performance and reclaim disk space. Does not support transactions. -* **[BoltDB](https://github.com/etcd-io/bbolt) [experimental]:** A [fork](https://github.com/etcd-io/bbolt) of [BoltDB](https://github.com/boltdb/bolt). Uses B+trees for on-disk storage, which have good performance for read-heavy workloads and range scans. Supports serializable ACID transactions. +- **[BoltDB](https://github.com/etcd-io/bbolt) [experimental]:** A [fork](https://github.com/etcd-io/bbolt) of [BoltDB](https://github.com/boltdb/bolt). Uses B+trees for on-disk storage, which have good performance for read-heavy workloads and range scans. Supports serializable ACID transactions. -* **[RocksDB](https://github.com/tecbot/gorocksdb) [experimental]:** A [Go wrapper](https://github.com/tecbot/gorocksdb) around [RocksDB](https://rocksdb.org). Similarly to LevelDB (above) it uses LSM-trees for on-disk storage, but is optimized for fast storage media such as SSDs and memory. Supports atomic transactions, but not full ACID transactions. +- **[RocksDB](https://github.com/tecbot/gorocksdb) [experimental]:** A [Go wrapper](https://github.com/tecbot/gorocksdb) around [RocksDB](https://rocksdb.org). Similarly to LevelDB (above) it uses LSM-trees for on-disk storage, but is optimized for fast storage media such as SSDs and memory. Supports atomic transactions, but not full ACID transactions. ## Meta-databases -* **PrefixDB [stable]:** A database which wraps another database and uses a static prefix for all keys. This allows multiple logical databases to be stored in a common underlying databases by using different namespaces. Used by the Cosmos SDK to give different modules their own namespaced database in a single application database. +- **PrefixDB [stable]:** A database which wraps another database and uses a static prefix for all keys. This allows multiple logical databases to be stored in a common underlying databases by using different namespaces. Used by the Cosmos SDK to give different modules their own namespaced database in a single application database. -* **RemoteDB [experimental]:** A database that connects to distributed Tendermint db instances via [gRPC](https://grpc.io/). This can help with detaching difficult deployments such as LevelDB, and can also ease dependency management for Tendermint developers. +- **RemoteDB [experimental]:** A database that connects to distributed Tendermint db instances via [gRPC](https://grpc.io/). This can help with detaching difficult deployments such as LevelDB, and can also ease dependency management for Tendermint developers. ## Tests From 3467af366c0b42922fe4780ec772dc9a97ce9adb Mon Sep 17 00:00:00 2001 From: Marko Date: Wed, 18 Mar 2020 15:41:54 +0100 Subject: [PATCH 20/30] ci: run ci on master (#78) Signed-off-by: Marko Baricevic --- .github/workflows/ci.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 747b43b13..b5ee2479a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,9 @@ name: Test -on: [pull_request] +on: + push: + branches: + - master + pull_request: jobs: Test: runs-on: ubuntu-latest From d6f56dff6cbbb2813fb13c6ae37f41c31cecd5de Mon Sep 17 00:00:00 2001 From: Marko Date: Tue, 24 Mar 2020 17:21:13 +0100 Subject: [PATCH 21/30] fmt: add format cmd (#80) * fmt: add format cmd Signed-off-by: Marko Baricevic * remove local --- makefile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/makefile b/makefile index db14c69fa..d6466adfb 100644 --- a/makefile +++ b/makefile @@ -32,6 +32,12 @@ lint: @echo "--> Running linter" @golangci-lint run @go mod verify +.PHONY: lint + +format: + find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs gofmt -w -s + find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs goimports -w +.PHONY: format tools: go get -v $(GOTOOLS) @@ -57,5 +63,5 @@ clean_certs: ## Note the $@ here is substituted for the %.pb.go protoc $(INCLUDE) $< --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,plugins=grpc:../../.. + protoc_remotedb: remotedb/proto/defs.pb.go - \ No newline at end of file From df9b11d5476ebf79ed952f8cb742ebccbdcf606c Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Mon, 30 Mar 2020 08:24:55 +0200 Subject: [PATCH 22/30] boltdb: use correct import path (#81) --- CHANGELOG.md | 4 ++++ boltdb.go | 2 +- boltdb_batch.go | 2 +- boltdb_iterator.go | 2 +- go.mod | 3 +-- go.sum | 5 ----- 6 files changed, 8 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bd3e0d7b0..f0ef1ceb9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## Unreleased +### Bug Fixes + +- [boltdb] Use correct import path go.etcd.io/bbolt + ## 0.5.0 **2020-03-11** diff --git a/boltdb.go b/boltdb.go index 8360a6842..4fa89fb8f 100644 --- a/boltdb.go +++ b/boltdb.go @@ -7,8 +7,8 @@ import ( "os" "path/filepath" - "github.com/etcd-io/bbolt" "github.com/pkg/errors" + "go.etcd.io/bbolt" ) var ( diff --git a/boltdb_batch.go b/boltdb_batch.go index f3eaf9ce3..ae0f40b35 100644 --- a/boltdb_batch.go +++ b/boltdb_batch.go @@ -2,7 +2,7 @@ package db -import "github.com/etcd-io/bbolt" +import "go.etcd.io/bbolt" // boltDBBatch stores operations internally and dumps them to BoltDB on Write(). type boltDBBatch struct { diff --git a/boltdb_iterator.go b/boltdb_iterator.go index 013510959..95adc871f 100644 --- a/boltdb_iterator.go +++ b/boltdb_iterator.go @@ -5,7 +5,7 @@ package db import ( "bytes" - "github.com/etcd-io/bbolt" + "go.etcd.io/bbolt" ) // boltDBIterator allows you to iterate on range of keys/values given some diff --git a/go.mod b/go.mod index b99de3738..e7cb4d758 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.12 require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/etcd-io/bbolt v1.3.3 github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect @@ -15,7 +14,7 @@ require ( github.com/stretchr/testify v1.5.1 github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c - go.etcd.io/bbolt v1.3.3 // indirect + go.etcd.io/bbolt v1.3.3 golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect google.golang.org/grpc v1.28.0 ) diff --git a/go.sum b/go.sum index 28783fcd1..e5e27c2ad 100644 --- a/go.sum +++ b/go.sum @@ -8,11 +8,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= -github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= @@ -99,8 +96,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 94debf482ba27337a5dfb0d8c3621889e48a6e0f Mon Sep 17 00:00:00 2001 From: Marko Date: Mon, 30 Mar 2020 09:50:25 +0200 Subject: [PATCH 23/30] rc for new release (#82) --- CHANGELOG.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f0ef1ceb9..d0c60ba46 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,13 @@ ## Unreleased +## 0.5.1 + +**2020-03-30** + ### Bug Fixes -- [boltdb] Use correct import path go.etcd.io/bbolt +- [boltdb] [\#81](https://github.com/tendermint/tm-db/pull/81) Use correct import path go.etcd.io/bbolt ## 0.5.0 From ff90aa210f57c9d424b4ad1b4a9bb63155c061a7 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 30 Mar 2020 11:47:22 +0000 Subject: [PATCH 24/30] build(deps): bump go.etcd.io/bbolt from 1.3.3 to 1.3.4 (#83) Bumps [go.etcd.io/bbolt](https://github.com/etcd-io/bbolt) from 1.3.3 to 1.3.4. - [Release notes](https://github.com/etcd-io/bbolt/releases) - [Commits](https://github.com/etcd-io/bbolt/compare/v1.3.3...v1.3.4) Signed-off-by: dependabot-preview[bot] Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index e7cb4d758..ab1335e9b 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/stretchr/testify v1.5.1 github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c - go.etcd.io/bbolt v1.3.3 + go.etcd.io/bbolt v1.3.4 golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect google.golang.org/grpc v1.28.0 ) diff --git a/go.sum b/go.sum index e5e27c2ad..7d1d153b1 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,8 @@ github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzH github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -80,6 +82,8 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 9e2fc22482d498c97548554a1170f800953c569a Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Thu, 2 Apr 2020 17:48:38 +0200 Subject: [PATCH 25/30] use Apache 2.0 license --- LICENSE | 204 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 204 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..bb66bb350 --- /dev/null +++ b/LICENSE @@ -0,0 +1,204 @@ +Tendermint Core +License: Apache2.0 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 All in Bits, Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From 68de032743c1768f5b6a0975d7f93c829420bc73 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Tue, 7 Apr 2020 13:24:59 +0200 Subject: [PATCH 26/30] build(deps): bump google.golang.org/grpc from 1.28.0 to 1.28.1 (#87) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.28.0 to 1.28.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.28.0...v1.28.1) Signed-off-by: dependabot-preview[bot] Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index ab1335e9b..9e29305da 100644 --- a/go.mod +++ b/go.mod @@ -16,5 +16,5 @@ require ( github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c go.etcd.io/bbolt v1.3.4 golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect - google.golang.org/grpc v1.28.0 + google.golang.org/grpc v1.28.1 ) diff --git a/go.sum b/go.sum index 7d1d153b1..5b7e86363 100644 --- a/go.sum +++ b/go.sum @@ -102,6 +102,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= From 0dfd227ccbca96aeb3144622d2185c79bda15cd0 Mon Sep 17 00:00:00 2001 From: Marko Date: Wed, 8 Apr 2020 15:05:30 +0200 Subject: [PATCH 27/30] add badges and run go mod tidy (#88) --- .github/workflows/lint.yml | 6 +++++- README.md | 3 +++ go.mod | 2 +- go.sum | 10 ++++------ 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index ca7c28d5a..e0bfa0f68 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,5 +1,9 @@ name: Lint -on: [pull_request] +on: + push: + branches: + - master + pull_request: jobs: golangci-lint: name: runner / golangci-lint diff --git a/README.md b/README.md index 4ec52721a..1dc0d00e9 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,8 @@ # Tendermint DB +![Test](https://github.com/tendermint/tm-db/workflows/Test/badge.svg?branch=master) +![Lint](https://github.com/tendermint/tm-db/workflows/Lint/badge.svg?branch=master) + Common database interface for various database backends. Primarily meant for applications built on [Tendermint](https://github.com/tendermint/tendermint), such as the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk), but can be used independently of these as well. ### Minimum Go Version diff --git a/go.mod b/go.mod index 9e29305da..dae9f9e96 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,6 @@ require ( github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c go.etcd.io/bbolt v1.3.4 - golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect + golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect google.golang.org/grpc v1.28.1 ) diff --git a/go.sum b/go.sum index 5b7e86363..2c5a2ab2b 100644 --- a/go.sum +++ b/go.sum @@ -58,8 +58,6 @@ github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+ github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -72,8 +70,8 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -84,6 +82,8 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -100,8 +100,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 32b371b638e86931b54d2ba28b5e13a7cbdc366d Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Wed, 22 Apr 2020 11:22:31 +0000 Subject: [PATCH 28/30] build(deps): bump google.golang.org/grpc from 1.28.1 to 1.29.0 (#89) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.28.1 to 1.29.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.28.1...v1.29.0) Signed-off-by: dependabot-preview[bot] Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index dae9f9e96..9040f590f 100644 --- a/go.mod +++ b/go.mod @@ -16,5 +16,5 @@ require ( github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c go.etcd.io/bbolt v1.3.4 golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect - google.golang.org/grpc v1.28.1 + google.golang.org/grpc v1.29.0 ) diff --git a/go.sum b/go.sum index 2c5a2ab2b..04ac0e64c 100644 --- a/go.sum +++ b/go.sum @@ -102,6 +102,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.0 h1:2pJjwYOdkZ9HlN4sWRYBg9ttH5bCOlsueaM+b/oYjwo= +google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= From 5f36d42aa75d589dfbf75a7c6978a6985bed5f30 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Fri, 24 Apr 2020 11:16:27 +0000 Subject: [PATCH 29/30] build(deps): bump google.golang.org/grpc from 1.29.0 to 1.29.1 Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.29.0 to 1.29.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.29.0...v1.29.1) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 9040f590f..bc874a841 100644 --- a/go.mod +++ b/go.mod @@ -16,5 +16,5 @@ require ( github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c go.etcd.io/bbolt v1.3.4 golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect - google.golang.org/grpc v1.29.0 + google.golang.org/grpc v1.29.1 ) diff --git a/go.sum b/go.sum index 04ac0e64c..0d0795327 100644 --- a/go.sum +++ b/go.sum @@ -104,6 +104,8 @@ google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.0 h1:2pJjwYOdkZ9HlN4sWRYBg9ttH5bCOlsueaM+b/oYjwo= google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= From 6872dc88e9664f4967fa911e0a7454101872dcbb Mon Sep 17 00:00:00 2001 From: Tess Rinearson Date: Fri, 24 Apr 2020 17:02:28 +0200 Subject: [PATCH 30/30] .github: configure stalebot (#91) --- .github/stale.yml | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 .github/stale.yml diff --git a/.github/stale.yml b/.github/stale.yml new file mode 100644 index 000000000..63c6e0f1f --- /dev/null +++ b/.github/stale.yml @@ -0,0 +1,47 @@ +# Configuration for probot-stale - https://github.com/probot/stale + +# Number of days of inactivity before an Issue or Pull Request becomes stale +daysUntilStale: 60 + +# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. +# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. +daysUntilClose: 9 + +# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) +onlyLabels: [] + +# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable +exemptLabels: + - major-release + +# Set to true to ignore issues in a project (defaults to false) +exemptProjects: true + +# Set to true to ignore issues in a milestone (defaults to false) +exemptMilestones: true + +# Set to true to ignore issues with an assignee (defaults to false) +exemptAssignees: false + +# Label to use when marking as stale +staleLabel: stale + +# Comment to post when marking as stale. Set to `false` to disable +markComment: > + This issue has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. Thank you + for your contributions. + +# Limit the number of actions per hour, from 1-30. Default is 30 +limitPerRun: 30 + +Limit to only `issues` or `pulls` +only: pulls + +Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': +pulls: + daysUntilStale: 30 + markComment: > + This pull request has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. Thank you + for your contributions. \ No newline at end of file