From d9531179c974f7c8f717076b527a8ec00d41c8da Mon Sep 17 00:00:00 2001 From: Wondertan Date: Wed, 29 Nov 2023 16:29:41 +0100 Subject: [PATCH 001/132] chore(moddas): speed up sampling for LN --- nodebuilder/das/config.go | 1 + 1 file changed, 1 insertion(+) diff --git a/nodebuilder/das/config.go b/nodebuilder/das/config.go index eeaa382a41..4d0276214f 100644 --- a/nodebuilder/das/config.go +++ b/nodebuilder/das/config.go @@ -23,6 +23,7 @@ func DefaultConfig(tp node.Type) Config { switch tp { case node.Light: cfg.SampleTimeout = modp2p.BlockTime * time.Duration(cfg.ConcurrencyLimit) + cfg.ConcurrencyLimit = 64 case node.Full: // Default value for DASer concurrency limit is based on dasing using ipld getter. // Full node will primarily use shrex protocol for sampling, that is much more efficient and can From f122a71f7985455aba689f7ea655c88f2962931b Mon Sep 17 00:00:00 2001 From: Wondertan Date: Thu, 7 Sep 2023 22:31:54 +0200 Subject: [PATCH 002/132] prototype --- share/eds/file.go | 174 +++++++++++++++++++++++++++++++++ share/eds/file_header.go | 61 ++++++++++++ share/eds/file_store.go | 4 + share/eds/file_test.go | 58 +++++++++++ share/ipld/blockserv.go | 2 +- share/ipldv2/nmtv2.go | 195 +++++++++++++++++++++++++++++++++++++ share/ipldv2/nmtv2_test.go | 69 +++++++++++++ 7 files changed, 562 insertions(+), 1 deletion(-) create mode 100644 share/eds/file.go create mode 100644 share/eds/file_header.go create mode 100644 share/eds/file_store.go create mode 100644 share/eds/file_test.go create mode 100644 share/ipldv2/nmtv2.go create mode 100644 share/ipldv2/nmtv2_test.go diff --git a/share/eds/file.go b/share/eds/file.go new file mode 100644 index 0000000000..b53b80add3 --- /dev/null +++ b/share/eds/file.go @@ -0,0 +1,174 @@ +package eds + +import ( + "io" + "os" + + "golang.org/x/exp/mmap" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" +) + +// File +// * immutable +// * versionable +// TODO: +// - Cache Rows and Cols +// - Avoid storing constant shares, like padding +type File struct { + path string + h Header + f fileBackend +} + +type fileBackend interface { + io.ReaderAt + io.Closer +} + +func OpenFile(path string) (*File, error) { + f, err := mmap.Open(path) + if err != nil { + return nil, err + } + + h, err := ReadHeaderAt(f, 0) + if err != nil { + return nil, err + } + + return &File{ + path: path, + h: h, + f: f, + }, nil +} + +// TODO: Allow setting features +func CreateFile(path string, eds *rsmt2d.ExtendedDataSquare) (*File, error) { + f, err := os.Create(path) + if err != nil { + return nil, err + } + + h := Header{ + ShareSize: uint16(len(eds.GetCell(0, 0))), // TODO: rsmt2d should expose this field + SquareSize: uint32(eds.Width()), + } + + if _, err = h.WriteTo(f); err != nil { + return nil, err + } + + for _, shr := range eds.Flattened() { + // TOOD: Buffer and write as single? + if _, err := f.Write(shr); err != nil { + return nil, err + } + } + + return &File{ + path: path, + f: f, + h: h, + }, err +} + +func (f *File) Close() error { + return f.f.Close() +} + +func (f *File) Header() Header { + return f.h +} + +func (f *File) Axis(idx int, tp rsmt2d.Axis) ([]share.Share, error) { + // TODO: Add Col support + shrLn := int64(f.h.ShareSize) + sqrLn := int64(f.h.SquareSize) + rwwLn := shrLn * sqrLn + + offset := int64(idx)*rwwLn + HeaderSize + rowdata := make([]byte, rwwLn) + if _, err := f.f.ReadAt(rowdata, offset); err != nil { + return nil, err + } + + row := make([]share.Share, sqrLn) + for i := range row { + row[i] = rowdata[int64(i)*shrLn : (int64(i)+1)*shrLn] + } + return row, nil +} + +func (f *File) Share(idx int) (share.Share, error) { + // TODO: Check the cache first + shrLn := int64(f.h.ShareSize) + + offset := int64(idx)*shrLn + HeaderSize + shr := make(share.Share, shrLn) + if _, err := f.f.ReadAt(shr, offset); err != nil { + return nil, err + } + return shr, nil +} + +func (f *File) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, *nmt.Proof, error) { + // TODO: Cache the axis as well as computed tree + sqrLn := int(f.h.SquareSize) + rowIdx := idx / sqrLn + shrs, err := f.Axis(rowIdx, axis) + if err != nil { + return nil, nil, err + } + + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.h.SquareSize/2), uint(rowIdx)) + for _, shr := range shrs { + err = tree.Push(shr) + if err != nil { + return nil, nil, err + } + } + + shrIdx := idx % sqrLn + proof, err := tree.ProveRange(shrIdx, shrIdx+1) + if err != nil { + return nil, nil, err + } + + return shrs[shrIdx], &proof, nil +} + +func (f *File) EDS() (*rsmt2d.ExtendedDataSquare, error) { + shrLn := int(f.h.ShareSize) + sqrLn := int(f.h.SquareSize) + + buf := make([]byte, sqrLn*sqrLn*shrLn) + if _, err := f.f.ReadAt(buf, HeaderSize); err != nil { + return nil, err + } + + shrs := make([][]byte, sqrLn*sqrLn) + for i := 0; i < sqrLn; i++ { + for j := 0; j < sqrLn; j++ { + coord := i*sqrLn + j + shrs[coord] = buf[coord*shrLn : (coord+1)*shrLn] + } + } + + treeFn := func(_ rsmt2d.Axis, index uint) rsmt2d.Tree { + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.h.SquareSize/2), index) + return &tree + } + + eds, err := rsmt2d.ImportExtendedDataSquare(shrs, share.DefaultRSMT2DCodec(), treeFn) + if err != nil { + return nil, err + } + + return eds, nil +} diff --git a/share/eds/file_header.go b/share/eds/file_header.go new file mode 100644 index 0000000000..782eb3a606 --- /dev/null +++ b/share/eds/file_header.go @@ -0,0 +1,61 @@ +package eds + +import ( + "encoding/binary" + "io" +) + +const HeaderSize = 32 + +type Header struct { + // User set features + // TODO: Add codec + Version uint8 + Compression uint8 + Extensions map[string]string + // Taken directly from EDS + ShareSize uint16 + SquareSize uint32 +} + +func (h *Header) WriteTo(w io.Writer) (int64, error) { + buf := make([]byte, HeaderSize) + buf[0] = h.Version + buf[1] = h.Compression + binary.LittleEndian.PutUint16(buf[2:4], h.ShareSize) + binary.LittleEndian.PutUint32(buf[4:12], h.SquareSize) + // TODO: Extensions + n, err := w.Write(buf) + return int64(n), err +} + +func (h *Header) ReadFrom(r io.Reader) (int64, error) { + buf := make([]byte, HeaderSize) + n, err := io.ReadFull(r, buf) + if err != nil { + return int64(n), err + } + + h.Version = buf[0] + h.Compression = buf[1] + h.ShareSize = binary.LittleEndian.Uint16(buf[2:4]) + h.SquareSize = binary.LittleEndian.Uint32(buf[4:12]) + + // TODO: Extensions + return int64(n), err +} + +func ReadHeaderAt(r io.ReaderAt, offset int64) (Header, error) { + h := Header{} + buf := make([]byte, HeaderSize) + _, err := r.ReadAt(buf, offset) + if err != nil { + return h, err + } + + h.Version = buf[0] + h.Compression = buf[1] + h.ShareSize = binary.LittleEndian.Uint16(buf[2:4]) + h.SquareSize = binary.LittleEndian.Uint32(buf[4:12]) + return h, nil +} diff --git a/share/eds/file_store.go b/share/eds/file_store.go new file mode 100644 index 0000000000..2cbd567bc2 --- /dev/null +++ b/share/eds/file_store.go @@ -0,0 +1,4 @@ +package eds + +type FileStore struct { +} diff --git a/share/eds/file_test.go b/share/eds/file_test.go new file mode 100644 index 0000000000..63cb6afbe5 --- /dev/null +++ b/share/eds/file_test.go @@ -0,0 +1,58 @@ +package eds + +import ( + "crypto/sha256" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestFile(t *testing.T) { + path := t.TempDir() + "/testfile" + eds := edstest.RandEDS(t, 16) + + fl, err := CreateFile(path, eds) + require.NoError(t, err) + err = fl.Close() + require.NoError(t, err) + + fl, err = OpenFile(path) + require.NoError(t, err) + + for i := 0; i < int(eds.Width()); i++ { + row, err := fl.Axis(i, rsmt2d.Row) + require.NoError(t, err) + assert.EqualValues(t, eds.Row(uint(i)), row) + } + + width := int(eds.Width()) + for i := 0; i < width*2; i++ { + row, col := uint(i/width), uint(i%width) + shr, err := fl.Share(i) + require.NoError(t, err) + assert.EqualValues(t, eds.GetCell(row, col), shr) + + shr, proof, err := fl.ShareWithProof(i, rsmt2d.Row) + require.NoError(t, err) + assert.EqualValues(t, eds.GetCell(row, col), shr) + + roots, err := eds.RowRoots() + require.NoError(t, err) + + ok := proof.VerifyInclusion(sha256.New(), share.GetNamespace(shr).ToNMT(), [][]byte{shr}, roots[row]) + assert.True(t, ok) + } + + out, err := fl.EDS() + require.NoError(t, err) + assert.True(t, eds.Equals(out)) + + err = fl.Close() + require.NoError(t, err) +} diff --git a/share/ipld/blockserv.go b/share/ipld/blockserv.go index 2ed2a21c77..4bfca53bf6 100644 --- a/share/ipld/blockserv.go +++ b/share/ipld/blockserv.go @@ -26,5 +26,5 @@ type allowlist struct{} func (a allowlist) IsAllowed(code uint64) bool { // we allow all codes except home-baked sha256NamespaceFlagged - return code == sha256NamespaceFlagged + return code == sha256NamespaceFlagged || code == 0x7801 } diff --git a/share/ipldv2/nmtv2.go b/share/ipldv2/nmtv2.go new file mode 100644 index 0000000000..486a88ac5d --- /dev/null +++ b/share/ipldv2/nmtv2.go @@ -0,0 +1,195 @@ +package ipldv2 + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/binary" + "fmt" + + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/sync" + mh "github.com/multiformats/go-multihash" + + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/nmt/pb" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" +) + +const ( + // codec is the codec used for leaf and inner nodes of a Namespaced Merkle Tree. + codec = 0x7800 + + // multihashCode is the multihash code used to hash blocks + // that contain an NMT node (inner and leaf nodes). + multihashCode = 0x7801 +) + +// ShareCID returns the CID of the share with the given index in the given dataroot. +// TODO: Height is redundant and should be removed. +func ShareCID(dataroot []byte, height uint64, idx int) (cid.Cid, error) { + if got, want := len(dataroot), sha256.Size; got != want { + return cid.Cid{}, fmt.Errorf("invalid namespaced hash length, got: %v, want: %v", got, want) + } + + data := make([]byte, sha256.Size+4+8) + n := copy(data, dataroot) + binary.LittleEndian.PutUint64(data[n:], uint64(height)) + binary.LittleEndian.PutUint32(data[n+8:], uint32(idx)) + + buf, err := mh.Encode(data, multihashCode) + if err != nil { + return cid.Undef, err + } + return cid.NewCidV1(codec, buf), nil +} + +type Hasher struct { + // TODO: Hasher must be stateless eventually via sending inclusion proof from DAH to DataRoot + get libhead.Getter[*header.ExtendedHeader] + data []byte +} + +func (h *Hasher) Write(data []byte) (int, error) { + if h.data != nil { + panic("only a single Write is allowed") + } + // TODO Check size + // TODO Support Col proofs + + dataroot := data[:sha256.Size] + height := binary.LittleEndian.Uint64(data[sha256.Size : sha256.Size+8]) + idx := binary.LittleEndian.Uint32(data[sha256.Size+8 : sha256.Size+8+4]) + shareData := data[sha256.Size+8+4 : sha256.Size+8+4 : +share.Size] + proofData := data[sha256.Size+8+4+share.Size:] + + hdr, err := h.get.GetByHeight(context.TODO(), height) + if err != nil { + return 0, err + } + + if !bytes.Equal(hdr.DataHash, dataroot) { + return 0, fmt.Errorf("invalid dataroot") + } + + sqrLn := len(hdr.DAH.RowRoots) ^ 2 + if int(idx) > sqrLn { + return 0, fmt.Errorf("invalid share index") + } + + rowIdx := int(idx) / sqrLn + row := hdr.DAH.RowRoots[rowIdx] + + proofPb := pb.Proof{} + err = proofPb.Unmarshal(proofData) + if err != nil { + return 0, err + } + + proof := nmt.ProtoToProof(proofPb) + if proof.VerifyInclusion(sha256.New(), share.GetNamespace(shareData).ToNMT(), [][]byte{shareData}, row) { + return len(data), nil + } + + h.data = data + return len(h.data), nil +} + +func (h *Hasher) Sum([]byte) []byte { + return h.data[:sha256.Size+8+4] +} + +// Reset resets the Hash to its initial state. +func (h *Hasher) Reset() { + h.get = nil +} + +func (h *Hasher) Size() int { + return sha256.Size + 4 + 8 +} + +// BlockSize returns the hash's underlying block size. +func (h *Hasher) BlockSize() int { + return sha256.BlockSize +} + +// NewBlockservice constructs Blockservice for fetching NMTrees. +func NewBlockservice(bs blockstore.Blockstore, exchange exchange.Interface) blockservice.BlockService { + return blockservice.New(bs, exchange, blockservice.WithAllowlist(defaultAllowlist)) +} + +// NewMemBlockservice constructs Blockservice for fetching NMTrees with in-memory blockstore. +func NewMemBlockservice() blockservice.BlockService { + bstore := blockstore.NewBlockstore(sync.MutexWrap(datastore.NewMapDatastore())) + return NewBlockservice(bstore, nil) +} + +// defaultAllowlist keeps default list of hashes allowed in the network. +var defaultAllowlist allowlist + +type allowlist struct{} + +func (a allowlist) IsAllowed(code uint64) bool { + // we disable all codes except home-baked code + return code == multihashCode +} + +type node struct { + data []byte +} + +func MakeNode(idx int, shr share.Share, proof *nmt.Proof, dataroot []byte, height uint64) (block.Block, error) { + var data []byte + data = append(data, dataroot...) + data = binary.LittleEndian.AppendUint64(data, height) + data = binary.LittleEndian.AppendUint32(data, uint32(idx)) + data = append(data, shr...) + + proto := pb.Proof{} + proto.Nodes = proof.Nodes() + proto.End = int64(proof.End()) + proto.Start = int64(proof.Start()) + proto.IsMaxNamespaceIgnored = proof.IsMaxNamespaceIDIgnored() + proto.LeafHash = proof.LeafHash() + + proofData, err := proto.Marshal() + if err != nil { + return nil, err + } + + data = append(data, proofData...) + return &node{ + data: data, + }, nil +} + +func (n *node) RawData() []byte { + return n.data +} + +func (n *node) Cid() cid.Cid { + buf, err := mh.Encode(n.data[:sha256.Size+8+4], multihashCode) + if err != nil { + panic(err) + } + + return cid.NewCidV1(codec, buf) +} + +func (n *node) String() string { + // TODO implement me + panic("implement me") +} + +func (n *node) Loggable() map[string]interface{} { + // TODO implement me + panic("implement me") +} diff --git a/share/ipldv2/nmtv2_test.go b/share/ipldv2/nmtv2_test.go new file mode 100644 index 0000000000..b5f980bb27 --- /dev/null +++ b/share/ipldv2/nmtv2_test.go @@ -0,0 +1,69 @@ +package ipldv2 + +import ( + "context" + "hash" + "testing" + "time" + + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + availability_test "github.com/celestiaorg/celestia-node/share/availability/test" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestV2Roundtrip(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + store := headertest.NewStore(t) + + mh.Register(multihashCode, func() hash.Hash { + return &Hasher{get: store} + }) + + height := uint64(1) + shrIdx := 5 + + dn := availability_test.NewTestDAGNet(ctx, t) + srv1 := dn.NewTestNode().BlockService + srv2 := dn.NewTestNode().BlockService + dn.ConnectAll() + + square := edstest.RandEDS(t, 16) + root, err := share.NewRoot(square) + require.NoError(t, err) + + hdr, err := store.GetByHeight(ctx, height) + require.NoError(t, err) + hdr.DAH = root + hdr.DataHash = root.Hash() + + file, err := eds.CreateFile(t.TempDir()+"/eds_file", square) + require.NoError(t, err) + + shr, prf, err := file.ShareWithProof(shrIdx, rsmt2d.Row) + require.NoError(t, err) + + nd, err := MakeNode(5, shr, prf, hdr.DataHash, hdr.Height()) + require.NoError(t, err) + + err = srv1.AddBlock(ctx, nd) + require.NoError(t, err) + + cid, err := ShareCID(root.Hash(), hdr.Height(), shrIdx) + require.NoError(t, err) + require.True(t, cid.Equals(nd.Cid())) + + b, err := srv2.GetBlock(ctx, cid) + require.NoError(t, err) + + assert.EqualValues(t, b.RawData(), nd.RawData()) +} From 7237ad7c74e4e4bbaeadefeefc8c659c68679db6 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Mon, 11 Sep 2023 23:03:15 +0200 Subject: [PATCH 003/132] feat(modp2p): listen on WebTransport by default --- share/ipldv2/nmtv2.go | 75 ++++++++++++++++---------------------- share/ipldv2/nmtv2_test.go | 15 ++------ 2 files changed, 35 insertions(+), 55 deletions(-) diff --git a/share/ipldv2/nmtv2.go b/share/ipldv2/nmtv2.go index 486a88ac5d..f303855111 100644 --- a/share/ipldv2/nmtv2.go +++ b/share/ipldv2/nmtv2.go @@ -1,8 +1,6 @@ package ipldv2 import ( - "bytes" - "context" "crypto/sha256" "encoding/binary" "fmt" @@ -16,11 +14,10 @@ import ( "github.com/ipfs/go-datastore/sync" mh "github.com/multiformats/go-multihash" - libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/nmt" "github.com/celestiaorg/nmt/pb" + "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" ) @@ -31,19 +28,26 @@ const ( // multihashCode is the multihash code used to hash blocks // that contain an NMT node (inner and leaf nodes). multihashCode = 0x7801 + + nmtHashSize = 2*share.NamespaceSize + sha256.Size ) // ShareCID returns the CID of the share with the given index in the given dataroot. -// TODO: Height is redundant and should be removed. -func ShareCID(dataroot []byte, height uint64, idx int) (cid.Cid, error) { - if got, want := len(dataroot), sha256.Size; got != want { - return cid.Cid{}, fmt.Errorf("invalid namespaced hash length, got: %v, want: %v", got, want) +func ShareCID(root *share.Root, idx int, axis rsmt2d.Axis) (cid.Cid, error) { + if idx < 0 || idx >= len(root.ColumnRoots) { + return cid.Undef, fmt.Errorf("invalid share index") + } + + dataroot := root.Hash() + axisroot := root.RowRoots[axis] + if axis == rsmt2d.Col { + axisroot = root.ColumnRoots[axis] } - data := make([]byte, sha256.Size+4+8) + data := make([]byte, sha256.Size+nmtHashSize+4) n := copy(data, dataroot) - binary.LittleEndian.PutUint64(data[n:], uint64(height)) - binary.LittleEndian.PutUint32(data[n+8:], uint32(idx)) + n += copy(data[n:], axisroot) + binary.LittleEndian.PutUint32(data[n:], uint32(idx)) buf, err := mh.Encode(data, multihashCode) if err != nil { @@ -53,8 +57,6 @@ func ShareCID(dataroot []byte, height uint64, idx int) (cid.Cid, error) { } type Hasher struct { - // TODO: Hasher must be stateless eventually via sending inclusion proof from DAH to DataRoot - get libhead.Getter[*header.ExtendedHeader] data []byte } @@ -65,37 +67,18 @@ func (h *Hasher) Write(data []byte) (int, error) { // TODO Check size // TODO Support Col proofs - dataroot := data[:sha256.Size] - height := binary.LittleEndian.Uint64(data[sha256.Size : sha256.Size+8]) - idx := binary.LittleEndian.Uint32(data[sha256.Size+8 : sha256.Size+8+4]) - shareData := data[sha256.Size+8+4 : sha256.Size+8+4 : +share.Size] - proofData := data[sha256.Size+8+4+share.Size:] - - hdr, err := h.get.GetByHeight(context.TODO(), height) - if err != nil { - return 0, err - } - - if !bytes.Equal(hdr.DataHash, dataroot) { - return 0, fmt.Errorf("invalid dataroot") - } - - sqrLn := len(hdr.DAH.RowRoots) ^ 2 - if int(idx) > sqrLn { - return 0, fmt.Errorf("invalid share index") - } - - rowIdx := int(idx) / sqrLn - row := hdr.DAH.RowRoots[rowIdx] + axisroot := data[sha256.Size : sha256.Size+nmtHashSize] + shareData := data[sha256.Size+nmtHashSize+8 : sha256.Size+nmtHashSize+8+share.Size] + proofData := data[sha256.Size+nmtHashSize+8+share.Size:] proofPb := pb.Proof{} - err = proofPb.Unmarshal(proofData) + err := proofPb.Unmarshal(proofData) if err != nil { return 0, err } proof := nmt.ProtoToProof(proofPb) - if proof.VerifyInclusion(sha256.New(), share.GetNamespace(shareData).ToNMT(), [][]byte{shareData}, row) { + if proof.VerifyInclusion(sha256.New(), share.GetNamespace(shareData).ToNMT(), [][]byte{shareData}, axisroot) { return len(data), nil } @@ -104,16 +87,16 @@ func (h *Hasher) Write(data []byte) (int, error) { } func (h *Hasher) Sum([]byte) []byte { - return h.data[:sha256.Size+8+4] + return h.data[:sha256.Size+nmtHashSize+4] } // Reset resets the Hash to its initial state. func (h *Hasher) Reset() { - h.get = nil + h.data = nil } func (h *Hasher) Size() int { - return sha256.Size + 4 + 8 + return sha256.Size + nmtHashSize + 4 } // BlockSize returns the hash's underlying block size. @@ -146,10 +129,16 @@ type node struct { data []byte } -func MakeNode(idx int, shr share.Share, proof *nmt.Proof, dataroot []byte, height uint64) (block.Block, error) { +func MakeNode(root *share.Root, axis rsmt2d.Axis, idx int, shr share.Share, proof *nmt.Proof) (block.Block, error) { + dataroot := root.Hash() + axisroot := root.RowRoots[axis] + if axis == rsmt2d.Col { + axisroot = root.ColumnRoots[axis] + } + var data []byte data = append(data, dataroot...) - data = binary.LittleEndian.AppendUint64(data, height) + data = append(data, axisroot...) data = binary.LittleEndian.AppendUint32(data, uint32(idx)) data = append(data, shr...) @@ -176,7 +165,7 @@ func (n *node) RawData() []byte { } func (n *node) Cid() cid.Cid { - buf, err := mh.Encode(n.data[:sha256.Size+8+4], multihashCode) + buf, err := mh.Encode(n.data[:sha256.Size+nmtHashSize+4], multihashCode) if err != nil { panic(err) } diff --git a/share/ipldv2/nmtv2_test.go b/share/ipldv2/nmtv2_test.go index b5f980bb27..f18da5c3ac 100644 --- a/share/ipldv2/nmtv2_test.go +++ b/share/ipldv2/nmtv2_test.go @@ -12,7 +12,6 @@ import ( "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share" availability_test "github.com/celestiaorg/celestia-node/share/availability/test" "github.com/celestiaorg/celestia-node/share/eds" @@ -23,13 +22,10 @@ func TestV2Roundtrip(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - store := headertest.NewStore(t) - mh.Register(multihashCode, func() hash.Hash { - return &Hasher{get: store} + return &Hasher{} }) - height := uint64(1) shrIdx := 5 dn := availability_test.NewTestDAGNet(ctx, t) @@ -41,24 +37,19 @@ func TestV2Roundtrip(t *testing.T) { root, err := share.NewRoot(square) require.NoError(t, err) - hdr, err := store.GetByHeight(ctx, height) - require.NoError(t, err) - hdr.DAH = root - hdr.DataHash = root.Hash() - file, err := eds.CreateFile(t.TempDir()+"/eds_file", square) require.NoError(t, err) shr, prf, err := file.ShareWithProof(shrIdx, rsmt2d.Row) require.NoError(t, err) - nd, err := MakeNode(5, shr, prf, hdr.DataHash, hdr.Height()) + nd, err := MakeNode(root, rsmt2d.Row, shrIdx, shr, prf) require.NoError(t, err) err = srv1.AddBlock(ctx, nd) require.NoError(t, err) - cid, err := ShareCID(root.Hash(), hdr.Height(), shrIdx) + cid, err := ShareCID(root, shrIdx, rsmt2d.Row) require.NoError(t, err) require.True(t, cid.Equals(nd.Cid())) From ab6bec81ce68d2cf5cf81a0452ab9750a54ffe42 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Tue, 12 Sep 2023 13:42:02 +0200 Subject: [PATCH 004/132] lint --- share/eds/file.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/share/eds/file.go b/share/eds/file.go index b53b80add3..2e80965484 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -65,7 +65,7 @@ func CreateFile(path string, eds *rsmt2d.ExtendedDataSquare) (*File, error) { } for _, shr := range eds.Flattened() { - // TOOD: Buffer and write as single? + // TODO: Buffer and write as single? if _, err := f.Write(shr); err != nil { return nil, err } @@ -86,7 +86,7 @@ func (f *File) Header() Header { return f.h } -func (f *File) Axis(idx int, tp rsmt2d.Axis) ([]share.Share, error) { +func (f *File) Axis(idx int, _ rsmt2d.Axis) ([]share.Share, error) { // TODO: Add Col support shrLn := int64(f.h.ShareSize) sqrLn := int64(f.h.SquareSize) From 2f08bdd68850ceda6a763fbc3ebeb6a65ae4a548 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sun, 17 Sep 2023 15:55:50 +0200 Subject: [PATCH 005/132] now test verifies all the share proofs --- share/eds/file.go | 13 +++++-------- share/eds/file_header.go | 1 + share/eds/file_test.go | 9 +++++++-- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/share/eds/file.go b/share/eds/file.go index 2e80965484..d6ef971910 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -155,17 +155,14 @@ func (f *File) EDS() (*rsmt2d.ExtendedDataSquare, error) { shrs := make([][]byte, sqrLn*sqrLn) for i := 0; i < sqrLn; i++ { for j := 0; j < sqrLn; j++ { - coord := i*sqrLn + j - shrs[coord] = buf[coord*shrLn : (coord+1)*shrLn] + x := i*sqrLn + j + shrs[x] = buf[x*shrLn : (x+1)*shrLn] } } - treeFn := func(_ rsmt2d.Axis, index uint) rsmt2d.Tree { - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.h.SquareSize/2), index) - return &tree - } - - eds, err := rsmt2d.ImportExtendedDataSquare(shrs, share.DefaultRSMT2DCodec(), treeFn) + codec := share.DefaultRSMT2DCodec() + treeFn := wrapper.NewConstructor(uint64(f.h.SquareSize / 2)) + eds, err := rsmt2d.ImportExtendedDataSquare(shrs, codec, treeFn) if err != nil { return nil, err } diff --git a/share/eds/file_header.go b/share/eds/file_header.go index 782eb3a606..c435f8b52f 100644 --- a/share/eds/file_header.go +++ b/share/eds/file_header.go @@ -10,6 +10,7 @@ const HeaderSize = 32 type Header struct { // User set features // TODO: Add codec + // TDOD: Add ODS support Version uint8 Compression uint8 Extensions map[string]string diff --git a/share/eds/file_test.go b/share/eds/file_test.go index 63cb6afbe5..e9e3ea8647 100644 --- a/share/eds/file_test.go +++ b/share/eds/file_test.go @@ -32,7 +32,7 @@ func TestFile(t *testing.T) { } width := int(eds.Width()) - for i := 0; i < width*2; i++ { + for i := 0; i < width*width; i++ { row, col := uint(i/width), uint(i%width) shr, err := fl.Share(i) require.NoError(t, err) @@ -45,7 +45,12 @@ func TestFile(t *testing.T) { roots, err := eds.RowRoots() require.NoError(t, err) - ok := proof.VerifyInclusion(sha256.New(), share.GetNamespace(shr).ToNMT(), [][]byte{shr}, roots[row]) + namespace := share.ParitySharesNamespace + if int(row) < width/2 && int(col) < width/2 { + namespace = share.GetNamespace(shr) + } + + ok := proof.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, roots[row]) assert.True(t, ok) } From b33515e81876d4eefd4c09b2ef5ddbbbadc5e4a9 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Mon, 18 Sep 2023 12:13:04 +0200 Subject: [PATCH 006/132] refactor sampling protocol and use proto for serialization --- share/eds/file.go | 48 +- share/ipldv2/ipldv2.go | 42 + .../ipldv2/{nmtv2_test.go => ipldv2_test.go} | 38 +- share/ipldv2/nmtv2.go | 184 ---- share/ipldv2/pb/ipldv2pb.pb.go | 829 ++++++++++++++++++ share/ipldv2/pb/ipldv2pb.proto | 28 + share/ipldv2/sample.go | 122 +++ share/ipldv2/sample_hasher.go | 57 ++ share/ipldv2/sample_id.go | 95 ++ 9 files changed, 1215 insertions(+), 228 deletions(-) create mode 100644 share/ipldv2/ipldv2.go rename share/ipldv2/{nmtv2_test.go => ipldv2_test.go} (60%) delete mode 100644 share/ipldv2/nmtv2.go create mode 100644 share/ipldv2/pb/ipldv2pb.pb.go create mode 100644 share/ipldv2/pb/ipldv2pb.proto create mode 100644 share/ipldv2/sample.go create mode 100644 share/ipldv2/sample_hasher.go create mode 100644 share/ipldv2/sample_id.go diff --git a/share/eds/file.go b/share/eds/file.go index d6ef971910..1005468b74 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -21,8 +21,8 @@ import ( // - Avoid storing constant shares, like padding type File struct { path string - h Header - f fileBackend + hdr Header + fl fileBackend } type fileBackend interface { @@ -43,8 +43,8 @@ func OpenFile(path string) (*File, error) { return &File{ path: path, - h: h, - f: f, + hdr: h, + fl: f, }, nil } @@ -73,28 +73,28 @@ func CreateFile(path string, eds *rsmt2d.ExtendedDataSquare) (*File, error) { return &File{ path: path, - f: f, - h: h, + fl: f, + hdr: h, }, err } func (f *File) Close() error { - return f.f.Close() + return f.fl.Close() } func (f *File) Header() Header { - return f.h + return f.hdr } func (f *File) Axis(idx int, _ rsmt2d.Axis) ([]share.Share, error) { // TODO: Add Col support - shrLn := int64(f.h.ShareSize) - sqrLn := int64(f.h.SquareSize) + shrLn := int64(f.hdr.ShareSize) + sqrLn := int64(f.hdr.SquareSize) rwwLn := shrLn * sqrLn offset := int64(idx)*rwwLn + HeaderSize rowdata := make([]byte, rwwLn) - if _, err := f.f.ReadAt(rowdata, offset); err != nil { + if _, err := f.fl.ReadAt(rowdata, offset); err != nil { return nil, err } @@ -107,48 +107,48 @@ func (f *File) Axis(idx int, _ rsmt2d.Axis) ([]share.Share, error) { func (f *File) Share(idx int) (share.Share, error) { // TODO: Check the cache first - shrLn := int64(f.h.ShareSize) + shrLn := int64(f.hdr.ShareSize) offset := int64(idx)*shrLn + HeaderSize shr := make(share.Share, shrLn) - if _, err := f.f.ReadAt(shr, offset); err != nil { + if _, err := f.fl.ReadAt(shr, offset); err != nil { return nil, err } return shr, nil } -func (f *File) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, *nmt.Proof, error) { +func (f *File) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) { // TODO: Cache the axis as well as computed tree - sqrLn := int(f.h.SquareSize) + sqrLn := int(f.hdr.SquareSize) rowIdx := idx / sqrLn shrs, err := f.Axis(rowIdx, axis) if err != nil { - return nil, nil, err + return nil, nmt.Proof{}, err } - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.h.SquareSize/2), uint(rowIdx)) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.hdr.SquareSize/2), uint(rowIdx)) for _, shr := range shrs { err = tree.Push(shr) if err != nil { - return nil, nil, err + return nil, nmt.Proof{}, err } } shrIdx := idx % sqrLn proof, err := tree.ProveRange(shrIdx, shrIdx+1) if err != nil { - return nil, nil, err + return nil, nmt.Proof{}, err } - return shrs[shrIdx], &proof, nil + return shrs[shrIdx], proof, nil } func (f *File) EDS() (*rsmt2d.ExtendedDataSquare, error) { - shrLn := int(f.h.ShareSize) - sqrLn := int(f.h.SquareSize) + shrLn := int(f.hdr.ShareSize) + sqrLn := int(f.hdr.SquareSize) buf := make([]byte, sqrLn*sqrLn*shrLn) - if _, err := f.f.ReadAt(buf, HeaderSize); err != nil { + if _, err := f.fl.ReadAt(buf, HeaderSize); err != nil { return nil, err } @@ -161,7 +161,7 @@ func (f *File) EDS() (*rsmt2d.ExtendedDataSquare, error) { } codec := share.DefaultRSMT2DCodec() - treeFn := wrapper.NewConstructor(uint64(f.h.SquareSize / 2)) + treeFn := wrapper.NewConstructor(uint64(f.hdr.SquareSize / 2)) eds, err := rsmt2d.ImportExtendedDataSquare(shrs, codec, treeFn) if err != nil { return nil, err diff --git a/share/ipldv2/ipldv2.go b/share/ipldv2/ipldv2.go new file mode 100644 index 0000000000..abb6dc736c --- /dev/null +++ b/share/ipldv2/ipldv2.go @@ -0,0 +1,42 @@ +package ipldv2 + +import ( + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/sync" + logger "github.com/ipfs/go-log/v2" +) + +var log = logger.Logger("ipldv2") + +const ( + // codec is the codec used for leaf and inner nodes of a Namespaced Merkle Tree. + codec = 0x7800 + + // multihashCode is the multihash code used to hash blocks + // that contain an NMT node (inner and leaf nodes). + multihashCode = 0x7801 +) + +// NewBlockservice constructs Blockservice for fetching NMTrees. +func NewBlockservice(bs blockstore.Blockstore, exchange exchange.Interface) blockservice.BlockService { + return blockservice.New(bs, exchange, blockservice.WithAllowlist(defaultAllowlist)) +} + +// NewMemBlockservice constructs Blockservice for fetching NMTrees with in-memory blockstore. +func NewMemBlockservice() blockservice.BlockService { + bstore := blockstore.NewBlockstore(sync.MutexWrap(datastore.NewMapDatastore())) + return NewBlockservice(bstore, nil) +} + +// defaultAllowlist keeps default list of hashes allowed in the network. +var defaultAllowlist allowlist + +type allowlist struct{} + +func (a allowlist) IsAllowed(code uint64) bool { + // we disable all codes except home-baked code + return code == multihashCode +} diff --git a/share/ipldv2/nmtv2_test.go b/share/ipldv2/ipldv2_test.go similarity index 60% rename from share/ipldv2/nmtv2_test.go rename to share/ipldv2/ipldv2_test.go index f18da5c3ac..b6bb74ed4d 100644 --- a/share/ipldv2/nmtv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -2,11 +2,9 @@ package ipldv2 import ( "context" - "hash" "testing" "time" - mh "github.com/multiformats/go-multihash" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -22,12 +20,6 @@ func TestV2Roundtrip(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - mh.Register(multihashCode, func() hash.Hash { - return &Hasher{} - }) - - shrIdx := 5 - dn := availability_test.NewTestDAGNet(ctx, t) srv1 := dn.NewTestNode().BlockService srv2 := dn.NewTestNode().BlockService @@ -40,21 +32,27 @@ func TestV2Roundtrip(t *testing.T) { file, err := eds.CreateFile(t.TempDir()+"/eds_file", square) require.NoError(t, err) - shr, prf, err := file.ShareWithProof(shrIdx, rsmt2d.Row) - require.NoError(t, err) + width := int(square.Width()) + for i := 0; i < width*width; i++ { + shr, prf, err := file.ShareWithProof(i, rsmt2d.Row) + require.NoError(t, err) - nd, err := MakeNode(root, rsmt2d.Row, shrIdx, shr, prf) - require.NoError(t, err) + smpl := NewSample(root, i, rsmt2d.Row, shr, prf) + require.NoError(t, err) - err = srv1.AddBlock(ctx, nd) - require.NoError(t, err) + err = smpl.Validate() + require.NoError(t, err) - cid, err := ShareCID(root, shrIdx, rsmt2d.Row) - require.NoError(t, err) - require.True(t, cid.Equals(nd.Cid())) + blkIn, err := smpl.IPLDBlock() + require.NoError(t, err) - b, err := srv2.GetBlock(ctx, cid) - require.NoError(t, err) + err = srv1.AddBlock(ctx, blkIn) + require.NoError(t, err) + + blkOut, err := srv2.GetBlock(ctx, blkIn.Cid()) + require.NoError(t, err) - assert.EqualValues(t, b.RawData(), nd.RawData()) + assert.EqualValues(t, blkIn.RawData(), blkOut.RawData()) + assert.EqualValues(t, blkIn.Cid(), blkOut.Cid()) + } } diff --git a/share/ipldv2/nmtv2.go b/share/ipldv2/nmtv2.go deleted file mode 100644 index f303855111..0000000000 --- a/share/ipldv2/nmtv2.go +++ /dev/null @@ -1,184 +0,0 @@ -package ipldv2 - -import ( - "crypto/sha256" - "encoding/binary" - "fmt" - - "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/boxo/exchange" - block "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/sync" - mh "github.com/multiformats/go-multihash" - - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/nmt/pb" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" -) - -const ( - // codec is the codec used for leaf and inner nodes of a Namespaced Merkle Tree. - codec = 0x7800 - - // multihashCode is the multihash code used to hash blocks - // that contain an NMT node (inner and leaf nodes). - multihashCode = 0x7801 - - nmtHashSize = 2*share.NamespaceSize + sha256.Size -) - -// ShareCID returns the CID of the share with the given index in the given dataroot. -func ShareCID(root *share.Root, idx int, axis rsmt2d.Axis) (cid.Cid, error) { - if idx < 0 || idx >= len(root.ColumnRoots) { - return cid.Undef, fmt.Errorf("invalid share index") - } - - dataroot := root.Hash() - axisroot := root.RowRoots[axis] - if axis == rsmt2d.Col { - axisroot = root.ColumnRoots[axis] - } - - data := make([]byte, sha256.Size+nmtHashSize+4) - n := copy(data, dataroot) - n += copy(data[n:], axisroot) - binary.LittleEndian.PutUint32(data[n:], uint32(idx)) - - buf, err := mh.Encode(data, multihashCode) - if err != nil { - return cid.Undef, err - } - return cid.NewCidV1(codec, buf), nil -} - -type Hasher struct { - data []byte -} - -func (h *Hasher) Write(data []byte) (int, error) { - if h.data != nil { - panic("only a single Write is allowed") - } - // TODO Check size - // TODO Support Col proofs - - axisroot := data[sha256.Size : sha256.Size+nmtHashSize] - shareData := data[sha256.Size+nmtHashSize+8 : sha256.Size+nmtHashSize+8+share.Size] - proofData := data[sha256.Size+nmtHashSize+8+share.Size:] - - proofPb := pb.Proof{} - err := proofPb.Unmarshal(proofData) - if err != nil { - return 0, err - } - - proof := nmt.ProtoToProof(proofPb) - if proof.VerifyInclusion(sha256.New(), share.GetNamespace(shareData).ToNMT(), [][]byte{shareData}, axisroot) { - return len(data), nil - } - - h.data = data - return len(h.data), nil -} - -func (h *Hasher) Sum([]byte) []byte { - return h.data[:sha256.Size+nmtHashSize+4] -} - -// Reset resets the Hash to its initial state. -func (h *Hasher) Reset() { - h.data = nil -} - -func (h *Hasher) Size() int { - return sha256.Size + nmtHashSize + 4 -} - -// BlockSize returns the hash's underlying block size. -func (h *Hasher) BlockSize() int { - return sha256.BlockSize -} - -// NewBlockservice constructs Blockservice for fetching NMTrees. -func NewBlockservice(bs blockstore.Blockstore, exchange exchange.Interface) blockservice.BlockService { - return blockservice.New(bs, exchange, blockservice.WithAllowlist(defaultAllowlist)) -} - -// NewMemBlockservice constructs Blockservice for fetching NMTrees with in-memory blockstore. -func NewMemBlockservice() blockservice.BlockService { - bstore := blockstore.NewBlockstore(sync.MutexWrap(datastore.NewMapDatastore())) - return NewBlockservice(bstore, nil) -} - -// defaultAllowlist keeps default list of hashes allowed in the network. -var defaultAllowlist allowlist - -type allowlist struct{} - -func (a allowlist) IsAllowed(code uint64) bool { - // we disable all codes except home-baked code - return code == multihashCode -} - -type node struct { - data []byte -} - -func MakeNode(root *share.Root, axis rsmt2d.Axis, idx int, shr share.Share, proof *nmt.Proof) (block.Block, error) { - dataroot := root.Hash() - axisroot := root.RowRoots[axis] - if axis == rsmt2d.Col { - axisroot = root.ColumnRoots[axis] - } - - var data []byte - data = append(data, dataroot...) - data = append(data, axisroot...) - data = binary.LittleEndian.AppendUint32(data, uint32(idx)) - data = append(data, shr...) - - proto := pb.Proof{} - proto.Nodes = proof.Nodes() - proto.End = int64(proof.End()) - proto.Start = int64(proof.Start()) - proto.IsMaxNamespaceIgnored = proof.IsMaxNamespaceIDIgnored() - proto.LeafHash = proof.LeafHash() - - proofData, err := proto.Marshal() - if err != nil { - return nil, err - } - - data = append(data, proofData...) - return &node{ - data: data, - }, nil -} - -func (n *node) RawData() []byte { - return n.data -} - -func (n *node) Cid() cid.Cid { - buf, err := mh.Encode(n.data[:sha256.Size+nmtHashSize+4], multihashCode) - if err != nil { - panic(err) - } - - return cid.NewCidV1(codec, buf) -} - -func (n *node) String() string { - // TODO implement me - panic("implement me") -} - -func (n *node) Loggable() map[string]interface{} { - // TODO implement me - panic("implement me") -} diff --git a/share/ipldv2/pb/ipldv2pb.pb.go b/share/ipldv2/pb/ipldv2pb.pb.go new file mode 100644 index 0000000000..09a6a2bb9f --- /dev/null +++ b/share/ipldv2/pb/ipldv2pb.pb.go @@ -0,0 +1,829 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: share/ipldv2/pb/ipldv2pb.proto + +package ipldv2pb + +import ( + fmt "fmt" + pb "github.com/celestiaorg/nmt/pb" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Axis int32 + +const ( + Axis_Row Axis = 0 + Axis_Col Axis = 1 +) + +var Axis_name = map[int32]string{ + 0: "Row", + 1: "Col", +} + +var Axis_value = map[string]int32{ + "Row": 0, + "Col": 1, +} + +func (x Axis) String() string { + return proto.EnumName(Axis_name, int32(x)) +} + +func (Axis) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cb41c3a4f982a271, []int{0} +} + +type SampleType int32 + +const ( + SampleType_Data SampleType = 0 + SampleType_Parity SampleType = 1 +) + +var SampleType_name = map[int32]string{ + 0: "Data", + 1: "Parity", +} + +var SampleType_value = map[string]int32{ + "Data": 0, + "Parity": 1, +} + +func (x SampleType) String() string { + return proto.EnumName(SampleType_name, int32(x)) +} + +func (SampleType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cb41c3a4f982a271, []int{1} +} + +type SampleID struct { + DataRoot []byte `protobuf:"bytes,1,opt,name=data_root,json=dataRoot,proto3" json:"data_root,omitempty"` + DahRoot []byte `protobuf:"bytes,2,opt,name=dah_root,json=dahRoot,proto3" json:"dah_root,omitempty"` + Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` + Axis Axis `protobuf:"varint,4,opt,name=axis,proto3,enum=Axis" json:"axis,omitempty"` +} + +func (m *SampleID) Reset() { *m = SampleID{} } +func (m *SampleID) String() string { return proto.CompactTextString(m) } +func (*SampleID) ProtoMessage() {} +func (*SampleID) Descriptor() ([]byte, []int) { + return fileDescriptor_cb41c3a4f982a271, []int{0} +} +func (m *SampleID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SampleID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SampleID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SampleID) XXX_Merge(src proto.Message) { + xxx_messageInfo_SampleID.Merge(m, src) +} +func (m *SampleID) XXX_Size() int { + return m.Size() +} +func (m *SampleID) XXX_DiscardUnknown() { + xxx_messageInfo_SampleID.DiscardUnknown(m) +} + +var xxx_messageInfo_SampleID proto.InternalMessageInfo + +func (m *SampleID) GetDataRoot() []byte { + if m != nil { + return m.DataRoot + } + return nil +} + +func (m *SampleID) GetDahRoot() []byte { + if m != nil { + return m.DahRoot + } + return nil +} + +func (m *SampleID) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *SampleID) GetAxis() Axis { + if m != nil { + return m.Axis + } + return Axis_Row +} + +type Sample struct { + Id *SampleID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Type SampleType `protobuf:"varint,2,opt,name=type,proto3,enum=SampleType" json:"type,omitempty"` + Share []byte `protobuf:"bytes,3,opt,name=share,proto3" json:"share,omitempty"` + Proof *pb.Proof `protobuf:"bytes,4,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_cb41c3a4f982a271, []int{1} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetId() *SampleID { + if m != nil { + return m.Id + } + return nil +} + +func (m *Sample) GetType() SampleType { + if m != nil { + return m.Type + } + return SampleType_Data +} + +func (m *Sample) GetShare() []byte { + if m != nil { + return m.Share + } + return nil +} + +func (m *Sample) GetProof() *pb.Proof { + if m != nil { + return m.Proof + } + return nil +} + +func init() { + proto.RegisterEnum("Axis", Axis_name, Axis_value) + proto.RegisterEnum("SampleType", SampleType_name, SampleType_value) + proto.RegisterType((*SampleID)(nil), "SampleID") + proto.RegisterType((*Sample)(nil), "Sample") +} + +func init() { proto.RegisterFile("share/ipldv2/pb/ipldv2pb.proto", fileDescriptor_cb41c3a4f982a271) } + +var fileDescriptor_cb41c3a4f982a271 = []byte{ + // 305 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0xc1, 0x4a, 0xc3, 0x30, + 0x1c, 0xc6, 0x9b, 0xad, 0xdb, 0xba, 0xff, 0xe6, 0x2c, 0xc1, 0x43, 0xa7, 0x10, 0xc7, 0x40, 0x28, + 0x3b, 0xb4, 0x50, 0x9f, 0x40, 0xdd, 0xc5, 0xdb, 0x88, 0xde, 0x25, 0x25, 0x95, 0x06, 0xaa, 0x09, + 0x6d, 0xd0, 0xf6, 0xe8, 0x1b, 0xf8, 0x58, 0x1e, 0x77, 0xf4, 0x28, 0xed, 0x8b, 0x48, 0x93, 0x8a, + 0xb7, 0xef, 0xcb, 0xef, 0xf0, 0xfb, 0x7f, 0x01, 0x52, 0xe5, 0xac, 0xcc, 0x62, 0xa1, 0x0a, 0xfe, + 0x96, 0xc4, 0x2a, 0x1d, 0x92, 0x4a, 0x23, 0x55, 0x4a, 0x2d, 0xcf, 0x57, 0x2a, 0x8d, 0x55, 0x29, + 0xe5, 0xb3, 0xed, 0xdb, 0x0a, 0xbc, 0x07, 0xf6, 0xa2, 0x8a, 0xec, 0x7e, 0x8f, 0x2f, 0x60, 0xce, + 0x99, 0x66, 0x4f, 0xa5, 0x94, 0x3a, 0x40, 0x1b, 0x14, 0x2e, 0xa9, 0xd7, 0x3f, 0x50, 0x29, 0x35, + 0x5e, 0x83, 0xc7, 0x59, 0x6e, 0xd9, 0xc8, 0xb0, 0x19, 0x67, 0xb9, 0x41, 0x67, 0x30, 0x11, 0xaf, + 0x3c, 0xab, 0x83, 0xf1, 0x06, 0x85, 0x27, 0xd4, 0x16, 0xbc, 0x06, 0x97, 0xd5, 0xa2, 0x0a, 0xdc, + 0x0d, 0x0a, 0x57, 0xc9, 0x24, 0xba, 0xa9, 0x45, 0x45, 0xcd, 0xd3, 0xf6, 0x03, 0xc1, 0xd4, 0x5a, + 0xf1, 0x1a, 0x46, 0x82, 0x1b, 0xd9, 0x22, 0x99, 0x47, 0x7f, 0xa7, 0xd0, 0x91, 0xe0, 0xf8, 0x12, + 0x5c, 0xdd, 0xa8, 0xcc, 0xd8, 0x56, 0xc9, 0x62, 0x80, 0x8f, 0x8d, 0xca, 0xa8, 0x01, 0xbd, 0xd7, + 0xac, 0x35, 0xde, 0x25, 0xb5, 0x05, 0x5f, 0xc1, 0xc4, 0x0c, 0x34, 0xe2, 0x45, 0x72, 0x1a, 0x0d, + 0x73, 0xd3, 0xe8, 0xd0, 0x07, 0x6a, 0xe9, 0x2e, 0x00, 0xb7, 0xbf, 0x08, 0xcf, 0x60, 0x4c, 0xe5, + 0xbb, 0xef, 0xf4, 0xe1, 0x4e, 0x16, 0x3e, 0xda, 0x6d, 0x01, 0xfe, 0x55, 0xd8, 0x03, 0x77, 0xcf, + 0x34, 0xf3, 0x1d, 0x0c, 0x30, 0x3d, 0xb0, 0x52, 0xe8, 0xc6, 0x47, 0xb7, 0xc1, 0x57, 0x4b, 0xd0, + 0xb1, 0x25, 0xe8, 0xa7, 0x25, 0xe8, 0xb3, 0x23, 0xce, 0xb1, 0x23, 0xce, 0x77, 0x47, 0x9c, 0x74, + 0x6a, 0xfe, 0xf5, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, 0xea, 0xbb, 0x85, 0x53, 0x89, 0x01, 0x00, + 0x00, +} + +func (m *SampleID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SampleID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Axis != 0 { + i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Axis)) + i-- + dAtA[i] = 0x20 + } + if m.Index != 0 { + i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x18 + } + if len(m.DahRoot) > 0 { + i -= len(m.DahRoot) + copy(dAtA[i:], m.DahRoot) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DahRoot))) + i-- + dAtA[i] = 0x12 + } + if len(m.DataRoot) > 0 { + i -= len(m.DataRoot) + copy(dAtA[i:], m.DataRoot) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DataRoot))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Share) > 0 { + i -= len(m.Share) + copy(dAtA[i:], m.Share) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.Share))) + i-- + dAtA[i] = 0x1a + } + if m.Type != 0 { + i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + } + if m.Id != nil { + { + size, err := m.Id.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintIpldv2Pb(dAtA []byte, offset int, v uint64) int { + offset -= sovIpldv2Pb(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SampleID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DataRoot) + if l > 0 { + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + l = len(m.DahRoot) + if l > 0 { + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovIpldv2Pb(uint64(m.Index)) + } + if m.Axis != 0 { + n += 1 + sovIpldv2Pb(uint64(m.Axis)) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != nil { + l = m.Id.Size() + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovIpldv2Pb(uint64(m.Type)) + } + l = len(m.Share) + if l > 0 { + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + return n +} + +func sovIpldv2Pb(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozIpldv2Pb(x uint64) (n int) { + return sovIpldv2Pb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *SampleID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SampleID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SampleID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataRoot", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataRoot = append(m.DataRoot[:0], dAtA[iNdEx:postIndex]...) + if m.DataRoot == nil { + m.DataRoot = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DahRoot", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DahRoot = append(m.DahRoot[:0], dAtA[iNdEx:postIndex]...) + if m.DahRoot == nil { + m.DahRoot = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Axis", wireType) + } + m.Axis = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Axis |= Axis(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipIpldv2Pb(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIpldv2Pb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Id == nil { + m.Id = &SampleID{} + } + if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SampleType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Share", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Share = append(m.Share[:0], dAtA[iNdEx:postIndex]...) + if m.Share == nil { + m.Share = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &pb.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIpldv2Pb(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIpldv2Pb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipIpldv2Pb(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthIpldv2Pb + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupIpldv2Pb + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthIpldv2Pb + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthIpldv2Pb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowIpldv2Pb = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupIpldv2Pb = fmt.Errorf("proto: unexpected end of group") +) diff --git a/share/ipldv2/pb/ipldv2pb.proto b/share/ipldv2/pb/ipldv2pb.proto new file mode 100644 index 0000000000..c5e30ce92d --- /dev/null +++ b/share/ipldv2/pb/ipldv2pb.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +import "pb/proof.proto"; + +enum Axis { + Row = 0; + Col = 1; +} + +enum SampleType { + Data = 0; + Parity = 1; +} + +message SampleID{ + bytes data_root = 1; + bytes dah_root = 2; + uint32 index = 3; + Axis axis = 4; +} + +message Sample { + SampleID id = 1; + + SampleType type = 2; + bytes share = 3; + proof.pb.Proof proof = 4; +} diff --git a/share/ipldv2/sample.go b/share/ipldv2/sample.go new file mode 100644 index 0000000000..dddb1c5b82 --- /dev/null +++ b/share/ipldv2/sample.go @@ -0,0 +1,122 @@ +package ipldv2 + +import ( + "crypto/sha256" + "errors" + + "github.com/celestiaorg/nmt" + nmtpb "github.com/celestiaorg/nmt/pb" + "github.com/celestiaorg/rsmt2d" + blocks "github.com/ipfs/go-block-format" + + "github.com/celestiaorg/celestia-node/share" + ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" +) + +type Sample struct { + ID SampleID + Type SampleType + Proof nmt.Proof + Share share.Share +} + +type SampleType uint8 + +const ( + DataSample SampleType = iota + ParitySample +) + +func NewSample(root *share.Root, idx int, axis rsmt2d.Axis, shr share.Share, proof nmt.Proof) *Sample { + id := NewSampleID(root, idx, axis) + + sqrLn := len(root.RowRoots) + row, col := idx/sqrLn, idx%sqrLn + tp := ParitySample + if row < sqrLn/2 && col < sqrLn/2 { + tp = DataSample + } + + return &Sample{ + ID: id, + Type: tp, + Proof: proof, + Share: shr, + } +} + +func (s *Sample) Proto() *ipldv2pb.Sample { + // TODO: Extract as helper to nmt + proof := &nmtpb.Proof{} + proof.Nodes = s.Proof.Nodes() + proof.End = int64(s.Proof.End()) + proof.Start = int64(s.Proof.Start()) + proof.IsMaxNamespaceIgnored = s.Proof.IsMaxNamespaceIDIgnored() + proof.LeafHash = s.Proof.LeafHash() + + return &ipldv2pb.Sample{ + Id: s.ID.Proto(), + Type: ipldv2pb.SampleType(s.Type), + Proof: proof, + Share: s.Share, + } +} + +func (s *Sample) IPLDBlock() (blocks.Block, error) { + cid, err := s.ID.Cid() + if err != nil { + return nil, err + } + + data, err := s.MarshalBinary() + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, cid) +} + +func (s *Sample) MarshalBinary() ([]byte, error) { + return s.Proto().Marshal() +} + +func (s *Sample) UnmarshalBinary(data []byte) error { + proto := &ipldv2pb.Sample{} + err := proto.Unmarshal(data) + if err != nil { + return err + } + + s.ID = SampleID{ + DataRoot: proto.Id.DataRoot, + DAHRoot: proto.Id.DahRoot, + Index: int(proto.Id.Index), + Axis: rsmt2d.Axis(proto.Id.Axis), + } + s.Type = SampleType(proto.Type) + s.Proof = nmt.ProtoToProof(*proto.Proof) + s.Share = proto.Share + return nil +} + +func (s *Sample) Validate() error { + if err := s.ID.Validate(); err != nil { + return err + } + + if s.Type != DataSample && s.Type != ParitySample { + return errors.New("malformed sample type") + } + + // TODO Support Col proofs + namespace := share.ParitySharesNamespace + if s.Type == DataSample { + namespace = share.GetNamespace(s.Share) + } + + if !s.Proof.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{s.Share}, s.ID.DAHRoot) { + return errors.New("sample proof is invalid") + } + + return nil +} diff --git a/share/ipldv2/sample_hasher.go b/share/ipldv2/sample_hasher.go new file mode 100644 index 0000000000..828904d51c --- /dev/null +++ b/share/ipldv2/sample_hasher.go @@ -0,0 +1,57 @@ +package ipldv2 + +import ( + "crypto/sha256" + "hash" + + mh "github.com/multiformats/go-multihash" +) + +func init() { + // Register hasher for multihash. + mh.Register(multihashCode, func() hash.Hash { + return &SampleHasher{} + }) +} + +// SampleHasher implements hash.Hash interface for Samples. +type SampleHasher struct { + sample Sample +} + +// Write expects a marshaled Sample to validate. +func (sh *SampleHasher) Write(data []byte) (int, error) { + err := sh.sample.UnmarshalBinary(data) + if err != nil { + log.Error(err) + return 0, err + } + + if err = sh.sample.Validate(); err != nil { + log.Error(err) + return 0, err + } + + return len(data), nil +} + +// Sum returns the "multihash" of the SampleID. +func (sh *SampleHasher) Sum([]byte) []byte { + sum, _ := sh.sample.ID.MarshalBinary() + return sum +} + +// Reset resets the Hash to its initial state. +func (sh *SampleHasher) Reset() { + sh.sample = Sample{} +} + +// Size returns the number of bytes Sum will return. +func (sh *SampleHasher) Size() int { + return SampleIDSize +} + +// BlockSize returns the hash's underlying block size. +func (sh *SampleHasher) BlockSize() int { + return sha256.BlockSize +} diff --git a/share/ipldv2/sample_id.go b/share/ipldv2/sample_id.go new file mode 100644 index 0000000000..997683089b --- /dev/null +++ b/share/ipldv2/sample_id.go @@ -0,0 +1,95 @@ +package ipldv2 + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + + "github.com/celestiaorg/celestia-node/share" + ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" + "github.com/celestiaorg/rsmt2d" + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +// SampleIDSize is the size of the SampleID in bytes +const SampleIDSize = 127 + +// TODO: Harden with validation of fields and inputs +type SampleID struct { + DataRoot []byte + DAHRoot []byte + Index int + Axis rsmt2d.Axis +} + +func NewSampleID(root *share.Root, idx int, axis rsmt2d.Axis) SampleID { + sqrLn := len(root.RowRoots) + row, col := idx/sqrLn, idx%sqrLn + dahroot := root.RowRoots[row] + if axis == rsmt2d.Col { + dahroot = root.ColumnRoots[col] + } + + return SampleID{ + DataRoot: root.Hash(), + DAHRoot: dahroot, + Index: idx, + Axis: axis, + } +} + +func (s *SampleID) Cid() (cid.Cid, error) { + data, err := s.MarshalBinary() + if err != nil { + return cid.Undef, err + } + + buf, err := mh.Encode(data, multihashCode) + if err != nil { + return cid.Undef, err + } + + return cid.NewCidV1(codec, buf), nil +} + +func (s *SampleID) Proto() *ipldv2pb.SampleID { + return &ipldv2pb.SampleID{ + DataRoot: s.DataRoot, + DahRoot: s.DAHRoot, + Index: uint32(s.Index), + Axis: ipldv2pb.Axis(s.Axis), + } +} + +func (s *SampleID) MarshalBinary() ([]byte, error) { + // we cannot use protobuf here because it exceeds multihash limit of 128 bytes + data := make([]byte, 127) + n := copy(data, s.DataRoot) + n += copy(data[n:], s.DAHRoot) + binary.LittleEndian.PutUint32(data[n:], uint32(s.Index)) + data[n+4] = byte(s.Axis) + return data, nil +} + +// TODO(@Wondertan): Eventually this should become configurable +const ( + hashSize = sha256.Size + dahRootSize = 2*share.NamespaceSize + hashSize +) + +func (s *SampleID) Validate() error { + if len(s.DataRoot) != hashSize { + return errors.New("malformed data root") + } + + if len(s.DAHRoot) != dahRootSize { + return errors.New("malformed DAH root") + } + + if s.Axis != rsmt2d.Col && s.Axis != rsmt2d.Row { + return errors.New("malformed axis") + } + + return nil +} From 59aa7309f55481483d8b848bff05f6c86b21c781 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Mon, 18 Sep 2023 17:27:03 +0200 Subject: [PATCH 007/132] docs and tests for ipldv2 --- share/eds/file.go | 2 +- share/ipldv2/ipldv2_test.go | 15 ++------ share/ipldv2/sample.go | 59 ++++++++++++++++++++++++++---- share/ipldv2/sample_hasher.go | 2 +- share/ipldv2/sample_hasher_test.go | 40 ++++++++++++++++++++ share/ipldv2/sample_id.go | 52 +++++++++++++++++++------- share/ipldv2/sample_id_test.go | 39 ++++++++++++++++++++ share/ipldv2/sample_test.go | 37 +++++++++++++++++++ 8 files changed, 211 insertions(+), 35 deletions(-) create mode 100644 share/ipldv2/sample_hasher_test.go create mode 100644 share/ipldv2/sample_id_test.go create mode 100644 share/ipldv2/sample_test.go diff --git a/share/eds/file.go b/share/eds/file.go index 1005468b74..379f675251 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -126,7 +126,7 @@ func (f *File) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof return nil, nmt.Proof{}, err } - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.hdr.SquareSize/2), uint(rowIdx)) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(rowIdx)) for _, shr := range shrs { err = tree.Push(shr) if err != nil { diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index b6bb74ed4d..08e8e6e1d8 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -10,12 +10,12 @@ import ( "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/share" availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" ) +// TestV2Roundtrip tests full protocol round trip of: +// EDS -> Sample -> IPLDBlock -> BlockService -> Bitswap and in reverse. func TestV2Roundtrip(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() @@ -26,18 +26,9 @@ func TestV2Roundtrip(t *testing.T) { dn.ConnectAll() square := edstest.RandEDS(t, 16) - root, err := share.NewRoot(square) - require.NoError(t, err) - - file, err := eds.CreateFile(t.TempDir()+"/eds_file", square) - require.NoError(t, err) - width := int(square.Width()) for i := 0; i < width*width; i++ { - shr, prf, err := file.ShareWithProof(i, rsmt2d.Row) - require.NoError(t, err) - - smpl := NewSample(root, i, rsmt2d.Row, shr, prf) + smpl, err := NewSampleFrom(square, i, rsmt2d.Row) require.NoError(t, err) err = smpl.Validate() diff --git a/share/ipldv2/sample.go b/share/ipldv2/sample.go index dddb1c5b82..aecb7b8598 100644 --- a/share/ipldv2/sample.go +++ b/share/ipldv2/sample.go @@ -4,29 +4,40 @@ import ( "crypto/sha256" "errors" + blocks "github.com/ipfs/go-block-format" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" nmtpb "github.com/celestiaorg/nmt/pb" "github.com/celestiaorg/rsmt2d" - blocks "github.com/ipfs/go-block-format" "github.com/celestiaorg/celestia-node/share" ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" ) -type Sample struct { - ID SampleID - Type SampleType - Proof nmt.Proof - Share share.Share -} - +// SampleType represents type of sample. type SampleType uint8 const ( + // DataSample is a sample of a data share. DataSample SampleType = iota + // ParitySample is a sample of a parity share. ParitySample ) +// Sample represents a sample of an NMT in EDS. +type Sample struct { + // ID of the Sample + ID SampleID + // Type of the Sample + Type SampleType + // Proof of Share inclusion in the NMT + Proof nmt.Proof + // Share being sampled + Share share.Share +} + +// NewSample constructs a new Sample. func NewSample(root *share.Root, idx int, axis rsmt2d.Axis, shr share.Share, proof nmt.Proof) *Sample { id := NewSampleID(root, idx, axis) @@ -45,6 +56,34 @@ func NewSample(root *share.Root, idx int, axis rsmt2d.Axis, shr share.Share, pro } } +// NewSampleFrom samples the EDS and constructs a new Sample. +func NewSampleFrom(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.Axis) (*Sample, error) { + sqrLn := int(eds.Width()) + rowIdx, shrIdx := idx/sqrLn, idx%sqrLn + shrs := eds.Row(uint(rowIdx)) + + root, err := share.NewRoot(eds) + if err != nil { + return nil, err + } + + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(rowIdx)) + for _, shr := range shrs { + err := tree.Push(shr) + if err != nil { + return nil, err + } + } + + proof, err := tree.ProveRange(shrIdx, shrIdx+1) + if err != nil { + return nil, err + } + + return NewSample(root, idx, axis, shrs[shrIdx], proof), nil +} + +// Proto converts Sample to its protobuf representation. func (s *Sample) Proto() *ipldv2pb.Sample { // TODO: Extract as helper to nmt proof := &nmtpb.Proof{} @@ -62,6 +101,7 @@ func (s *Sample) Proto() *ipldv2pb.Sample { } } +// IPLDBlock converts Sample to an IPLD block for Bitswap compatibility. func (s *Sample) IPLDBlock() (blocks.Block, error) { cid, err := s.ID.Cid() if err != nil { @@ -76,10 +116,12 @@ func (s *Sample) IPLDBlock() (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } +// MarshalBinary marshals Sample to binary. func (s *Sample) MarshalBinary() ([]byte, error) { return s.Proto().Marshal() } +// UnmarshalBinary unmarshals Sample from binary. func (s *Sample) UnmarshalBinary(data []byte) error { proto := &ipldv2pb.Sample{} err := proto.Unmarshal(data) @@ -99,6 +141,7 @@ func (s *Sample) UnmarshalBinary(data []byte) error { return nil } +// Validate validates Sample's fields and proof of Share inclusion in the NMT. func (s *Sample) Validate() error { if err := s.ID.Validate(); err != nil { return err diff --git a/share/ipldv2/sample_hasher.go b/share/ipldv2/sample_hasher.go index 828904d51c..b4d380ea81 100644 --- a/share/ipldv2/sample_hasher.go +++ b/share/ipldv2/sample_hasher.go @@ -16,7 +16,7 @@ func init() { // SampleHasher implements hash.Hash interface for Samples. type SampleHasher struct { - sample Sample + sample Sample } // Write expects a marshaled Sample to validate. diff --git a/share/ipldv2/sample_hasher_test.go b/share/ipldv2/sample_hasher_test.go new file mode 100644 index 0000000000..c7e2b23b14 --- /dev/null +++ b/share/ipldv2/sample_hasher_test.go @@ -0,0 +1,40 @@ +package ipldv2 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestSampleHasher(t *testing.T) { + hasher := &SampleHasher{} + + _, err := hasher.Write([]byte("hello")) + assert.Error(t, err) + + square := edstest.RandEDS(t, 2) + + sample, err := NewSampleFrom(square, 2, rsmt2d.Row) + require.NoError(t, err) + + data, err := sample.MarshalBinary() + require.NoError(t, err) + + n, err := hasher.Write(data) + require.NoError(t, err) + assert.EqualValues(t, len(data), n) + + digest := hasher.Sum(nil) + sid, err := sample.ID.MarshalBinary() + require.NoError(t, err) + assert.EqualValues(t, sid, digest) + + hasher.Reset() + digest = hasher.Sum(nil) + assert.NotEqualValues(t, digest, sid) +} diff --git a/share/ipldv2/sample_id.go b/share/ipldv2/sample_id.go index 997683089b..0d2a6fc00b 100644 --- a/share/ipldv2/sample_id.go +++ b/share/ipldv2/sample_id.go @@ -5,24 +5,38 @@ import ( "encoding/binary" "errors" - "github.com/celestiaorg/celestia-node/share" - ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" - "github.com/celestiaorg/rsmt2d" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" ) // SampleIDSize is the size of the SampleID in bytes const SampleIDSize = 127 -// TODO: Harden with validation of fields and inputs +// TODO(@Wondertan): Eventually this should become configurable +const ( + hashSize = sha256.Size + dahRootSize = 2*share.NamespaceSize + hashSize +) + +// SampleID is an unique identifier of a Sample. type SampleID struct { + // DataRoot is the root of the data square + // Needed to identify the data square in the whole chain DataRoot []byte - DAHRoot []byte - Index int - Axis rsmt2d.Axis + // DAHRoot is the Col or Row root from DAH of the data square + DAHRoot []byte + // Index is the index of the sample in the data square(not row or col index) + Index int + // Axis is Col or Row axis of the sample in the data square + Axis rsmt2d.Axis } +// NewSampleID constructs a new SampleID. func NewSampleID(root *share.Root, idx int, axis rsmt2d.Axis) SampleID { sqrLn := len(root.RowRoots) row, col := idx/sqrLn, idx%sqrLn @@ -39,6 +53,7 @@ func NewSampleID(root *share.Root, idx int, axis rsmt2d.Axis) SampleID { } } +// Cid returns sample ID encoded as CID. func (s *SampleID) Cid() (cid.Cid, error) { data, err := s.MarshalBinary() if err != nil { @@ -53,6 +68,7 @@ func (s *SampleID) Cid() (cid.Cid, error) { return cid.NewCidV1(codec, buf), nil } +// Proto converts SampleID to its protobuf representation. func (s *SampleID) Proto() *ipldv2pb.SampleID { return &ipldv2pb.SampleID{ DataRoot: s.DataRoot, @@ -62,9 +78,10 @@ func (s *SampleID) Proto() *ipldv2pb.SampleID { } } +// MarshalBinary encodes SampleID into binary form. func (s *SampleID) MarshalBinary() ([]byte, error) { // we cannot use protobuf here because it exceeds multihash limit of 128 bytes - data := make([]byte, 127) + data := make([]byte, SampleIDSize) n := copy(data, s.DataRoot) n += copy(data[n:], s.DAHRoot) binary.LittleEndian.PutUint32(data[n:], uint32(s.Index)) @@ -72,12 +89,21 @@ func (s *SampleID) MarshalBinary() ([]byte, error) { return data, nil } -// TODO(@Wondertan): Eventually this should become configurable -const ( - hashSize = sha256.Size - dahRootSize = 2*share.NamespaceSize + hashSize -) +// UnmarshalBinary decodes SampleID from binary form. +func (s *SampleID) UnmarshalBinary(data []byte) error { + if len(data) != SampleIDSize { + return errors.New("malformed sample id") + } + + // copying data to avoid slice aliasing + s.DataRoot = append(s.DataRoot, data[:hashSize]...) + s.DAHRoot = append(s.DAHRoot, data[hashSize:hashSize+dahRootSize]...) + s.Index = int(binary.LittleEndian.Uint32(data[hashSize+dahRootSize : hashSize+dahRootSize+4])) + s.Axis = rsmt2d.Axis(data[hashSize+dahRootSize+4]) + return nil +} +// Validate validates fields of SampleID. func (s *SampleID) Validate() error { if len(s.DataRoot) != hashSize { return errors.New("malformed data root") diff --git a/share/ipldv2/sample_id_test.go b/share/ipldv2/sample_id_test.go new file mode 100644 index 0000000000..7158cecfe7 --- /dev/null +++ b/share/ipldv2/sample_id_test.go @@ -0,0 +1,39 @@ +package ipldv2 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestSampleID(t *testing.T) { + square := edstest.RandEDS(t, 2) + root, err := share.NewRoot(square) + require.NoError(t, err) + + sid := NewSampleID(root, 2, rsmt2d.Row) + + id, err := sid.Cid() + require.NoError(t, err) + + assert.EqualValues(t, codec, id.Prefix().Codec) + assert.EqualValues(t, multihashCode, id.Prefix().MhType) + assert.EqualValues(t, SampleIDSize, id.Prefix().MhLength) + + data, err := sid.MarshalBinary() + require.NoError(t, err) + + sidOut := SampleID{} + err = sidOut.UnmarshalBinary(data) + require.NoError(t, err) + assert.EqualValues(t, sid, sidOut) + + err = sidOut.Validate() + require.NoError(t, err) +} diff --git a/share/ipldv2/sample_test.go b/share/ipldv2/sample_test.go new file mode 100644 index 0000000000..8d56f4fe61 --- /dev/null +++ b/share/ipldv2/sample_test.go @@ -0,0 +1,37 @@ +package ipldv2 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestSample(t *testing.T) { + square := edstest.RandEDS(t, 2) + + sid, err := NewSampleFrom(square, 2, rsmt2d.Row) + require.NoError(t, err) + + data, err := sid.MarshalBinary() + require.NoError(t, err) + + blk, err := sid.IPLDBlock() + require.NoError(t, err) + + cid, err := sid.ID.Cid() + require.NoError(t, err) + assert.EqualValues(t, blk.Cid(), cid) + + sidOut := &Sample{} + err = sidOut.UnmarshalBinary(data) + require.NoError(t, err) + assert.EqualValues(t, sid, sidOut) + + err = sidOut.Validate() + require.NoError(t, err) +} From 1b3d881693f65d990fdd029687eb731863c928f6 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Thu, 21 Sep 2023 13:06:27 +0200 Subject: [PATCH 008/132] add support for col proofs sampling --- share/eds/file.go | 58 +++++++++++++++++++++---------- share/eds/file_test.go | 69 ++++++++++++++++++++++++------------- share/ipldv2/ipldv2_test.go | 29 +++++++++------- share/ipldv2/sample.go | 17 +++++++-- 4 files changed, 115 insertions(+), 58 deletions(-) diff --git a/share/eds/file.go b/share/eds/file.go index 379f675251..775b042c30 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -86,23 +86,42 @@ func (f *File) Header() Header { return f.hdr } -func (f *File) Axis(idx int, _ rsmt2d.Axis) ([]share.Share, error) { - // TODO: Add Col support - shrLn := int64(f.hdr.ShareSize) - sqrLn := int64(f.hdr.SquareSize) - rwwLn := shrLn * sqrLn +func (f *File) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { + shrLn := int(f.hdr.ShareSize) + sqrLn := int(f.hdr.SquareSize) - offset := int64(idx)*rwwLn + HeaderSize - rowdata := make([]byte, rwwLn) - if _, err := f.fl.ReadAt(rowdata, offset); err != nil { - return nil, err - } + shrs := make([]share.Share, sqrLn) + switch axis { + case rsmt2d.Col: + // [] [] [] [] + // [] [] [] [] + // [] [] [] [] + // [] [] [] [] + + for i := 0; i < sqrLn; i++ { + pos := idx + i*sqrLn + offset := pos*shrLn + HeaderSize + + shr := make(share.Share, shrLn) + if _, err := f.fl.ReadAt(shr, int64(offset)); err != nil { + return nil, err + } + shrs[i] = shr + } + case rsmt2d.Row: + pos := idx * sqrLn + offset := pos*shrLn + HeaderSize + axsData := make([]byte, sqrLn*shrLn) + if _, err := f.fl.ReadAt(axsData, int64(offset)); err != nil { + return nil, err + } - row := make([]share.Share, sqrLn) - for i := range row { - row[i] = rowdata[int64(i)*shrLn : (int64(i)+1)*shrLn] + for i := range shrs { + shrs[i] = axsData[i*shrLn : (i+1)*shrLn] + } } - return row, nil + + return shrs, nil } func (f *File) Share(idx int) (share.Share, error) { @@ -120,13 +139,17 @@ func (f *File) Share(idx int) (share.Share, error) { func (f *File) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) { // TODO: Cache the axis as well as computed tree sqrLn := int(f.hdr.SquareSize) - rowIdx := idx / sqrLn - shrs, err := f.Axis(rowIdx, axis) + axsIdx, shrIdx := idx/sqrLn, idx%sqrLn + if axis == rsmt2d.Col { + axsIdx, shrIdx = shrIdx, axsIdx + } + + shrs, err := f.Axis(axsIdx, axis) if err != nil { return nil, nmt.Proof{}, err } - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(rowIdx)) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axsIdx)) for _, shr := range shrs { err = tree.Push(shr) if err != nil { @@ -134,7 +157,6 @@ func (f *File) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof } } - shrIdx := idx % sqrLn proof, err := tree.ProveRange(shrIdx, shrIdx+1) if err != nil { return nil, nmt.Proof{}, err diff --git a/share/eds/file_test.go b/share/eds/file_test.go index e9e3ea8647..e850451aab 100644 --- a/share/eds/file_test.go +++ b/share/eds/file_test.go @@ -15,7 +15,9 @@ import ( func TestFile(t *testing.T) { path := t.TempDir() + "/testfile" - eds := edstest.RandEDS(t, 16) + eds := edstest.RandEDS(t, 8) + root, err := share.NewRoot(eds) + require.NoError(t, err) fl, err := CreateFile(path, eds) require.NoError(t, err) @@ -25,33 +27,40 @@ func TestFile(t *testing.T) { fl, err = OpenFile(path) require.NoError(t, err) - for i := 0; i < int(eds.Width()); i++ { - row, err := fl.Axis(i, rsmt2d.Row) - require.NoError(t, err) - assert.EqualValues(t, eds.Row(uint(i)), row) + axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} + for _, axis := range axis { + for i := 0; i < int(eds.Width()); i++ { + row, err := fl.Axis(i, axis) + require.NoError(t, err) + assert.EqualValues(t, getAxis(i, axis, eds), row) + } } width := int(eds.Width()) - for i := 0; i < width*width; i++ { - row, col := uint(i/width), uint(i%width) - shr, err := fl.Share(i) - require.NoError(t, err) - assert.EqualValues(t, eds.GetCell(row, col), shr) - - shr, proof, err := fl.ShareWithProof(i, rsmt2d.Row) - require.NoError(t, err) - assert.EqualValues(t, eds.GetCell(row, col), shr) - - roots, err := eds.RowRoots() - require.NoError(t, err) - - namespace := share.ParitySharesNamespace - if int(row) < width/2 && int(col) < width/2 { - namespace = share.GetNamespace(shr) - } + for _, axis := range axis { + for i := 0; i < width*width; i++ { + row, col := uint(i/width), uint(i%width) + shr, err := fl.Share(i) + require.NoError(t, err) + assert.EqualValues(t, eds.GetCell(row, col), shr) + + shr, proof, err := fl.ShareWithProof(i, axis) + require.NoError(t, err) + assert.EqualValues(t, eds.GetCell(row, col), shr) + + namespace := share.ParitySharesNamespace + if int(row) < width/2 && int(col) < width/2 { + namespace = share.GetNamespace(shr) + } - ok := proof.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, roots[row]) - assert.True(t, ok) + dahroot := root.RowRoots[row] + if axis == rsmt2d.Col { + dahroot = root.ColumnRoots[col] + } + + ok := proof.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, dahroot) + assert.True(t, ok) + } } out, err := fl.EDS() @@ -61,3 +70,15 @@ func TestFile(t *testing.T) { err = fl.Close() require.NoError(t, err) } + +// TODO(@Wondertan): Should be a method on eds +func getAxis(idx int, axis rsmt2d.Axis, eds *rsmt2d.ExtendedDataSquare) [][]byte { + switch axis { + case rsmt2d.Row: + return eds.Row(uint(idx)) + case rsmt2d.Col: + return eds.Col(uint(idx)) + default: + panic("") + } +} diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index 08e8e6e1d8..121f3e079d 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -26,24 +26,27 @@ func TestV2Roundtrip(t *testing.T) { dn.ConnectAll() square := edstest.RandEDS(t, 16) + axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} width := int(square.Width()) - for i := 0; i < width*width; i++ { - smpl, err := NewSampleFrom(square, i, rsmt2d.Row) - require.NoError(t, err) + for _, axis := range axis { + for i := 0; i < width*width; i++ { + smpl, err := NewSampleFrom(square, i, axis) + require.NoError(t, err) - err = smpl.Validate() - require.NoError(t, err) + err = smpl.Validate() + require.NoError(t, err) - blkIn, err := smpl.IPLDBlock() - require.NoError(t, err) + blkIn, err := smpl.IPLDBlock() + require.NoError(t, err) - err = srv1.AddBlock(ctx, blkIn) - require.NoError(t, err) + err = srv1.AddBlock(ctx, blkIn) + require.NoError(t, err) - blkOut, err := srv2.GetBlock(ctx, blkIn.Cid()) - require.NoError(t, err) + blkOut, err := srv2.GetBlock(ctx, blkIn.Cid()) + require.NoError(t, err) - assert.EqualValues(t, blkIn.RawData(), blkOut.RawData()) - assert.EqualValues(t, blkIn.Cid(), blkOut.Cid()) + assert.EqualValues(t, blkIn.RawData(), blkOut.RawData()) + assert.EqualValues(t, blkIn.Cid(), blkOut.Cid()) + } } } diff --git a/share/ipldv2/sample.go b/share/ipldv2/sample.go index aecb7b8598..25242f4042 100644 --- a/share/ipldv2/sample.go +++ b/share/ipldv2/sample.go @@ -59,15 +59,26 @@ func NewSample(root *share.Root, idx int, axis rsmt2d.Axis, shr share.Share, pro // NewSampleFrom samples the EDS and constructs a new Sample. func NewSampleFrom(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.Axis) (*Sample, error) { sqrLn := int(eds.Width()) - rowIdx, shrIdx := idx/sqrLn, idx%sqrLn - shrs := eds.Row(uint(rowIdx)) + axisIdx, shrIdx := idx/sqrLn, idx%sqrLn + + // TODO(@Wondertan): Should be an rsmt2d method + var shrs [][]byte + switch axis { + case rsmt2d.Row: + shrs = eds.Row(uint(axisIdx)) + case rsmt2d.Col: + axisIdx, shrIdx = shrIdx, axisIdx + shrs = eds.Col(uint(axisIdx)) + default: + panic("invalid axis") + } root, err := share.NewRoot(eds) if err != nil { return nil, err } - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(rowIdx)) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axisIdx)) for _, shr := range shrs { err := tree.Push(shr) if err != nil { From b53769bd9c3939206cc065d5c7745fba70143609 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Fri, 22 Sep 2023 02:13:24 +0200 Subject: [PATCH 009/132] blockstore impl and various cleanups and improvements --- share/eds/file.go | 33 ++++--- share/eds/file_header.go | 55 +++++++----- share/eds/file_store.go | 8 ++ share/eds/file_test.go | 18 +++- share/ipld/blockserv.go | 2 +- share/ipldv2/blockstore.go | 133 +++++++++++++++++++++++++++++ share/ipldv2/blockstore_test.go | 66 ++++++++++++++ share/ipldv2/ipldv2.go | 36 ++++---- share/ipldv2/ipldv2_test.go | 108 +++++++++++++++++++---- share/ipldv2/pb/ipldv2pb.pb.go | 90 +++++++++---------- share/ipldv2/pb/ipldv2pb.proto | 4 +- share/ipldv2/sample.go | 54 ++++++++---- share/ipldv2/sample_hasher.go | 16 ++-- share/ipldv2/sample_hasher_test.go | 2 +- share/ipldv2/sample_id.go | 57 ++++++++----- share/ipldv2/sample_test.go | 2 +- 16 files changed, 519 insertions(+), 165 deletions(-) create mode 100644 share/ipldv2/blockstore.go create mode 100644 share/ipldv2/blockstore_test.go diff --git a/share/eds/file.go b/share/eds/file.go index 775b042c30..9ed448b43d 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -1,6 +1,7 @@ package eds import ( + "fmt" "io" "os" @@ -21,7 +22,7 @@ import ( // - Avoid storing constant shares, like padding type File struct { path string - hdr Header + hdr *Header fl fileBackend } @@ -55,9 +56,9 @@ func CreateFile(path string, eds *rsmt2d.ExtendedDataSquare) (*File, error) { return nil, err } - h := Header{ - ShareSize: uint16(len(eds.GetCell(0, 0))), // TODO: rsmt2d should expose this field - SquareSize: uint32(eds.Width()), + h := &Header{ + shareSize: uint16(len(eds.GetCell(0, 0))), // TODO: rsmt2d should expose this field + squareSize: uint32(eds.Width()), } if _, err = h.WriteTo(f); err != nil { @@ -82,22 +83,17 @@ func (f *File) Close() error { return f.fl.Close() } -func (f *File) Header() Header { +func (f *File) Header() *Header { return f.hdr } func (f *File) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { - shrLn := int(f.hdr.ShareSize) - sqrLn := int(f.hdr.SquareSize) + shrLn := int(f.hdr.shareSize) + sqrLn := int(f.hdr.squareSize) shrs := make([]share.Share, sqrLn) switch axis { case rsmt2d.Col: - // [] [] [] [] - // [] [] [] [] - // [] [] [] [] - // [] [] [] [] - for i := 0; i < sqrLn; i++ { pos := idx + i*sqrLn offset := pos*shrLn + HeaderSize @@ -111,6 +107,7 @@ func (f *File) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { case rsmt2d.Row: pos := idx * sqrLn offset := pos*shrLn + HeaderSize + axsData := make([]byte, sqrLn*shrLn) if _, err := f.fl.ReadAt(axsData, int64(offset)); err != nil { return nil, err @@ -119,6 +116,8 @@ func (f *File) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { for i := range shrs { shrs[i] = axsData[i*shrLn : (i+1)*shrLn] } + default: + return nil, fmt.Errorf("unknown axis") } return shrs, nil @@ -126,7 +125,7 @@ func (f *File) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { func (f *File) Share(idx int) (share.Share, error) { // TODO: Check the cache first - shrLn := int64(f.hdr.ShareSize) + shrLn := int64(f.hdr.shareSize) offset := int64(idx)*shrLn + HeaderSize shr := make(share.Share, shrLn) @@ -138,7 +137,7 @@ func (f *File) Share(idx int) (share.Share, error) { func (f *File) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) { // TODO: Cache the axis as well as computed tree - sqrLn := int(f.hdr.SquareSize) + sqrLn := int(f.hdr.squareSize) axsIdx, shrIdx := idx/sqrLn, idx%sqrLn if axis == rsmt2d.Col { axsIdx, shrIdx = shrIdx, axsIdx @@ -166,8 +165,8 @@ func (f *File) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof } func (f *File) EDS() (*rsmt2d.ExtendedDataSquare, error) { - shrLn := int(f.hdr.ShareSize) - sqrLn := int(f.hdr.SquareSize) + shrLn := int(f.hdr.shareSize) + sqrLn := int(f.hdr.squareSize) buf := make([]byte, sqrLn*sqrLn*shrLn) if _, err := f.fl.ReadAt(buf, HeaderSize); err != nil { @@ -183,7 +182,7 @@ func (f *File) EDS() (*rsmt2d.ExtendedDataSquare, error) { } codec := share.DefaultRSMT2DCodec() - treeFn := wrapper.NewConstructor(uint64(f.hdr.SquareSize / 2)) + treeFn := wrapper.NewConstructor(uint64(f.hdr.squareSize / 2)) eds, err := rsmt2d.ImportExtendedDataSquare(shrs, codec, treeFn) if err != nil { return nil, err diff --git a/share/eds/file_header.go b/share/eds/file_header.go index c435f8b52f..318138420b 100644 --- a/share/eds/file_header.go +++ b/share/eds/file_header.go @@ -11,20 +11,37 @@ type Header struct { // User set features // TODO: Add codec // TDOD: Add ODS support - Version uint8 - Compression uint8 - Extensions map[string]string + version uint8 + compression uint8 + // extensions map[string]string + // Taken directly from EDS - ShareSize uint16 - SquareSize uint32 + shareSize uint16 + squareSize uint32 +} + +func (h *Header) ShareSize() int { + return int(h.shareSize) +} + +func (h *Header) SquareSize() int { + return int(h.squareSize) +} + +// TODO(@Wondertan) Should return special types +func (h *Header) Version() uint8 { + return h.version +} +func (h *Header) Compression() uint8 { + return h.compression } func (h *Header) WriteTo(w io.Writer) (int64, error) { buf := make([]byte, HeaderSize) - buf[0] = h.Version - buf[1] = h.Compression - binary.LittleEndian.PutUint16(buf[2:4], h.ShareSize) - binary.LittleEndian.PutUint32(buf[4:12], h.SquareSize) + buf[0] = h.version + buf[1] = h.compression + binary.LittleEndian.PutUint16(buf[2:4], h.shareSize) + binary.LittleEndian.PutUint32(buf[4:12], h.squareSize) // TODO: Extensions n, err := w.Write(buf) return int64(n), err @@ -37,26 +54,26 @@ func (h *Header) ReadFrom(r io.Reader) (int64, error) { return int64(n), err } - h.Version = buf[0] - h.Compression = buf[1] - h.ShareSize = binary.LittleEndian.Uint16(buf[2:4]) - h.SquareSize = binary.LittleEndian.Uint32(buf[4:12]) + h.version = buf[0] + h.compression = buf[1] + h.shareSize = binary.LittleEndian.Uint16(buf[2:4]) + h.squareSize = binary.LittleEndian.Uint32(buf[4:12]) // TODO: Extensions return int64(n), err } -func ReadHeaderAt(r io.ReaderAt, offset int64) (Header, error) { - h := Header{} +func ReadHeaderAt(r io.ReaderAt, offset int64) (*Header, error) { + h := &Header{} buf := make([]byte, HeaderSize) _, err := r.ReadAt(buf, offset) if err != nil { return h, err } - h.Version = buf[0] - h.Compression = buf[1] - h.ShareSize = binary.LittleEndian.Uint16(buf[2:4]) - h.SquareSize = binary.LittleEndian.Uint32(buf[4:12]) + h.version = buf[0] + h.compression = buf[1] + h.shareSize = binary.LittleEndian.Uint16(buf[2:4]) + h.squareSize = binary.LittleEndian.Uint32(buf[4:12]) return h, nil } diff --git a/share/eds/file_store.go b/share/eds/file_store.go index 2cbd567bc2..efbf968fed 100644 --- a/share/eds/file_store.go +++ b/share/eds/file_store.go @@ -1,4 +1,12 @@ package eds +import "github.com/celestiaorg/celestia-node/share" + type FileStore struct { + baspath string +} + +func (fs *FileStore) File(hash share.DataHash) (*File, error) { + // TODO(@Wondertan): Caching + return OpenFile(fs.baspath + "/" + hash.String()) } diff --git a/share/eds/file_test.go b/share/eds/file_test.go index e850451aab..73eb78fdd5 100644 --- a/share/eds/file_test.go +++ b/share/eds/file_test.go @@ -13,6 +13,16 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) +func TestCreateFile(t *testing.T) { + path := t.TempDir() + "/testfile" + edsIn := edstest.RandEDS(t, 8) + f, err := CreateFile(path, edsIn) + require.NoError(t, err) + edsOut, err := f.EDS() + require.NoError(t, err) + assert.True(t, edsIn.Equals(edsOut)) +} + func TestFile(t *testing.T) { path := t.TempDir() + "/testfile" eds := edstest.RandEDS(t, 8) @@ -44,7 +54,7 @@ func TestFile(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, eds.GetCell(row, col), shr) - shr, proof, err := fl.ShareWithProof(i, axis) + shr, prf, err := fl.ShareWithProof(i, axis) require.NoError(t, err) assert.EqualValues(t, eds.GetCell(row, col), shr) @@ -53,12 +63,12 @@ func TestFile(t *testing.T) { namespace = share.GetNamespace(shr) } - dahroot := root.RowRoots[row] + axishash := root.RowRoots[row] if axis == rsmt2d.Col { - dahroot = root.ColumnRoots[col] + axishash = root.ColumnRoots[col] } - ok := proof.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, dahroot) + ok := prf.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, axishash) assert.True(t, ok) } } diff --git a/share/ipld/blockserv.go b/share/ipld/blockserv.go index 4bfca53bf6..2ed2a21c77 100644 --- a/share/ipld/blockserv.go +++ b/share/ipld/blockserv.go @@ -26,5 +26,5 @@ type allowlist struct{} func (a allowlist) IsAllowed(code uint64) bool { // we allow all codes except home-baked sha256NamespaceFlagged - return code == sha256NamespaceFlagged || code == 0x7801 + return code == sha256NamespaceFlagged } diff --git a/share/ipldv2/blockstore.go b/share/ipldv2/blockstore.go new file mode 100644 index 0000000000..a678c6f1b6 --- /dev/null +++ b/share/ipldv2/blockstore.go @@ -0,0 +1,133 @@ +package ipldv2 + +import ( + "context" + "fmt" + "io" + + "github.com/ipfs/boxo/blockstore" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" +) + +// edsFile is a mocking friendly local interface over eds.File. +// TODO(@Wondertan): Consider making an actual interface of eds pkg +type edsFile interface { + io.Closer + Header() *eds.Header + ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) +} + +// fileStore is a mocking friendly local interface over eds.FileStore +// TODO(@Wondertan): Consider making an actual interface of eds pkg +type fileStore[F edsFile] interface { + File(share.DataHash) (F, error) +} + +type Blockstore[F edsFile] struct { + fs fileStore[F] +} + +func NewBlockstore[F edsFile](fs fileStore[F]) blockstore.Blockstore { + return &Blockstore[F]{fs} +} + +func (b Blockstore[F]) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { + id, err := SampleIDFromCID(cid) + if err != nil { + err = fmt.Errorf("while converting CID to SampleID: %w", err) + log.Error(err) + return nil, err + } + + f, err := b.fs.File(id.DataHash) + if err != nil { + err = fmt.Errorf("while getting EDS file from FS: %w", err) + log.Error(err) + return nil, err + } + + shr, prf, err := f.ShareWithProof(id.Index, id.Axis) + if err != nil { + err = fmt.Errorf("while getting share with proof: %w", err) + log.Error(err) + return nil, err + } + + s := NewSample(id, shr, prf, f.Header().SquareSize()) + blk, err := s.IPLDBlock() + if err != nil { + err = fmt.Errorf("while getting share with proof: %w", err) + log.Error(err) + return nil, err + } + + err = f.Close() + if err != nil { + err = fmt.Errorf("while closing EDS file: %w", err) + log.Error(err) + return nil, err + } + + return blk, nil +} + +func (b Blockstore[F]) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + // TODO(@Wondertan): There must be a way to derive size without reading, proving, serializing and + // allocating Sample's block.Block. + blk, err := b.Get(ctx, cid) + if err != nil { + return 0, err + } + + return len(blk.RawData()), nil +} + +func (b Blockstore[F]) Has(_ context.Context, cid cid.Cid) (bool, error) { + id, err := SampleIDFromCID(cid) + if err != nil { + err = fmt.Errorf("while converting CID to SampleID: %w", err) + log.Error(err) + return false, err + } + + f, err := b.fs.File(id.DataHash) + if err != nil { + err = fmt.Errorf("while getting EDS file from FS: %w", err) + log.Error(err) + return false, err + } + + err = f.Close() + if err != nil { + err = fmt.Errorf("while closing EDS file: %w", err) + log.Error(err) + return false, err + } + // existence of the file confirms existence of the share + return true, nil +} + +func (b Blockstore[F]) AllKeysChan(context.Context) (<-chan cid.Cid, error) { + return nil, fmt.Errorf("AllKeysChan is unsupported") +} + +func (b Blockstore[F]) DeleteBlock(context.Context, cid.Cid) error { + return fmt.Errorf("writes are not supported") +} + +func (b Blockstore[F]) Put(context.Context, blocks.Block) error { + return fmt.Errorf("writes are not supported") +} + +func (b Blockstore[F]) PutMany(context.Context, []blocks.Block) error { + return fmt.Errorf("writes are not supported") +} + +func (b Blockstore[F]) HashOnRead(bool) {} diff --git a/share/ipldv2/blockstore_test.go b/share/ipldv2/blockstore_test.go new file mode 100644 index 0000000000..0ca131dff2 --- /dev/null +++ b/share/ipldv2/blockstore_test.go @@ -0,0 +1,66 @@ +package ipldv2 + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestBlockstoreGet(t *testing.T) { + ctx := context.Background() + sqr := edstest.RandEDS(t, 4) + root, err := share.NewRoot(sqr) + require.NoError(t, err) + + path := t.TempDir() + "/eds_file" + f, err := eds.CreateFile(path, sqr) + require.NoError(t, err) + b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) + + axis := []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} + width := int(sqr.Width()) + for _, axis := range axis { + for i := 0; i < width*width; i++ { + id := NewSampleID(root, i, axis) + cid, err := id.Cid() + require.NoError(t, err) + + blk, err := b.Get(ctx, cid) + require.NoError(t, err) + + sample, err := SampleFromBlock(blk) + require.NoError(t, err) + + err = sample.Validate() + require.NoError(t, err) + assert.EqualValues(t, id, sample.ID) + } + } +} + +type edsFileAndFS eds.File + +func (m *edsFileAndFS) File(share.DataHash) (*edsFileAndFS, error) { + return m, nil +} + +func (m *edsFileAndFS) Header() *eds.Header { + return (*eds.File)(m).Header() +} + +func (m *edsFileAndFS) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) { + return (*eds.File)(m).ShareWithProof(idx, axis) +} + +func (m *edsFileAndFS) Close() error { + return nil +} diff --git a/share/ipldv2/ipldv2.go b/share/ipldv2/ipldv2.go index abb6dc736c..ae785b360e 100644 --- a/share/ipldv2/ipldv2.go +++ b/share/ipldv2/ipldv2.go @@ -1,11 +1,9 @@ package ipldv2 import ( - "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/boxo/exchange" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/sync" + "fmt" + + "github.com/ipfs/go-cid" logger "github.com/ipfs/go-log/v2" ) @@ -20,17 +18,6 @@ const ( multihashCode = 0x7801 ) -// NewBlockservice constructs Blockservice for fetching NMTrees. -func NewBlockservice(bs blockstore.Blockstore, exchange exchange.Interface) blockservice.BlockService { - return blockservice.New(bs, exchange, blockservice.WithAllowlist(defaultAllowlist)) -} - -// NewMemBlockservice constructs Blockservice for fetching NMTrees with in-memory blockstore. -func NewMemBlockservice() blockservice.BlockService { - bstore := blockstore.NewBlockstore(sync.MutexWrap(datastore.NewMapDatastore())) - return NewBlockservice(bstore, nil) -} - // defaultAllowlist keeps default list of hashes allowed in the network. var defaultAllowlist allowlist @@ -40,3 +27,20 @@ func (a allowlist) IsAllowed(code uint64) bool { // we disable all codes except home-baked code return code == multihashCode } + +func validateCID(cid cid.Cid) error { + prefix := cid.Prefix() + if prefix.Codec != codec { + return fmt.Errorf("unsupported codec") + } + + if prefix.MhType != multihashCode { + return fmt.Errorf("unsupported multihash") + } + + if prefix.MhLength != SampleIDSize { + return fmt.Errorf("invalid multihash length") + } + + return nil +} diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index 121f3e079d..5b2d42bc6b 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -5,48 +5,126 @@ import ( "testing" "time" + "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/routing/offline" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + record "github.com/libp2p/go-libp2p-record" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/celestiaorg/rsmt2d" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" + "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" ) // TestV2Roundtrip tests full protocol round trip of: // EDS -> Sample -> IPLDBlock -> BlockService -> Bitswap and in reverse. -func TestV2Roundtrip(t *testing.T) { +func TestV2RoundtripGetBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - dn := availability_test.NewTestDAGNet(ctx, t) - srv1 := dn.NewTestNode().BlockService - srv2 := dn.NewTestNode().BlockService - dn.ConnectAll() + sqr := edstest.RandEDS(t, 8) + + path := t.TempDir() + "/eds_file" + f, err := eds.CreateFile(path, sqr) + require.NoError(t, err) + + b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) + client := remoteClient(ctx, t, b) - square := edstest.RandEDS(t, 16) axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} - width := int(square.Width()) + width := int(sqr.Width()) for _, axis := range axis { for i := 0; i < width*width; i++ { - smpl, err := NewSampleFrom(square, i, axis) + smpl, err := NewSampleFromEDS(sqr, i, axis) require.NoError(t, err) - err = smpl.Validate() + cid, err := smpl.ID.Cid() require.NoError(t, err) - blkIn, err := smpl.IPLDBlock() + blkOut, err := client.GetBlock(ctx, cid) require.NoError(t, err) + assert.EqualValues(t, cid, blkOut.Cid()) - err = srv1.AddBlock(ctx, blkIn) + data, err := smpl.MarshalBinary() require.NoError(t, err) + assert.EqualValues(t, data, blkOut.RawData()) + } + } +} - blkOut, err := srv2.GetBlock(ctx, blkIn.Cid()) +func TestV2RoundtripGetBlocks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + sqr := edstest.RandEDS(t, 16) // TODO(@Wondertan): does not work with more than 8 + + path := t.TempDir() + "/eds_file" + f, err := eds.CreateFile(path, sqr) + require.NoError(t, err) + + b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) + client := remoteClient(ctx, t, b) + + set := cid.NewSet() + axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} + width := int(sqr.Width()) + for _, axis := range axis { + for i := 0; i < width*width; i++ { + smpl, err := NewSampleFromEDS(sqr, i, axis) + require.NoError(t, err) + + cid, err := smpl.ID.Cid() require.NoError(t, err) - assert.EqualValues(t, blkIn.RawData(), blkOut.RawData()) - assert.EqualValues(t, blkIn.Cid(), blkOut.Cid()) + set.Add(cid) } } + + blks := client.GetBlocks(ctx, set.Keys()) + err = set.ForEach(func(c cid.Cid) error { + select { + case blk := <-blks: + assert.True(t, set.Has(blk.Cid())) + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + assert.NoError(t, err) +} + +func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstore) blockservice.BlockService { + net, err := mocknet.FullMeshLinked(2) + require.NoError(t, err) + + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + routing := offline.NewOfflineRouter(dstore, record.NamespacedValidator{}) + _ = bitswap.New( + ctx, + network.NewFromIpfsHost(net.Hosts()[0], routing), + bstore, + ) + + dstoreClient := dssync.MutexWrap(ds.NewMapDatastore()) + bstoreClient := blockstore.NewBlockstore(dstoreClient) + routingClient := offline.NewOfflineRouter(dstoreClient, record.NamespacedValidator{}) + + bitswapClient := bitswap.New( + ctx, + network.NewFromIpfsHost(net.Hosts()[1], routingClient), + bstoreClient, + ) + + err = net.ConnectAllButSelf() + require.NoError(t, err) + + return blockservice.New(bstoreClient, bitswapClient, blockservice.WithAllowlist(defaultAllowlist)) } diff --git a/share/ipldv2/pb/ipldv2pb.pb.go b/share/ipldv2/pb/ipldv2pb.pb.go index 09a6a2bb9f..c3ff0dedd7 100644 --- a/share/ipldv2/pb/ipldv2pb.pb.go +++ b/share/ipldv2/pb/ipldv2pb.pb.go @@ -74,8 +74,8 @@ func (SampleType) EnumDescriptor() ([]byte, []int) { } type SampleID struct { - DataRoot []byte `protobuf:"bytes,1,opt,name=data_root,json=dataRoot,proto3" json:"data_root,omitempty"` - DahRoot []byte `protobuf:"bytes,2,opt,name=dah_root,json=dahRoot,proto3" json:"dah_root,omitempty"` + DataHash []byte `protobuf:"bytes,1,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + AxisHash []byte `protobuf:"bytes,2,opt,name=axis_hash,json=axisHash,proto3" json:"axis_hash,omitempty"` Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` Axis Axis `protobuf:"varint,4,opt,name=axis,proto3,enum=Axis" json:"axis,omitempty"` } @@ -113,16 +113,16 @@ func (m *SampleID) XXX_DiscardUnknown() { var xxx_messageInfo_SampleID proto.InternalMessageInfo -func (m *SampleID) GetDataRoot() []byte { +func (m *SampleID) GetDataHash() []byte { if m != nil { - return m.DataRoot + return m.DataHash } return nil } -func (m *SampleID) GetDahRoot() []byte { +func (m *SampleID) GetAxisHash() []byte { if m != nil { - return m.DahRoot + return m.AxisHash } return nil } @@ -219,27 +219,27 @@ func init() { func init() { proto.RegisterFile("share/ipldv2/pb/ipldv2pb.proto", fileDescriptor_cb41c3a4f982a271) } var fileDescriptor_cb41c3a4f982a271 = []byte{ - // 305 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0xc1, 0x4a, 0xc3, 0x30, - 0x1c, 0xc6, 0x9b, 0xad, 0xdb, 0xba, 0xff, 0xe6, 0x2c, 0xc1, 0x43, 0xa7, 0x10, 0xc7, 0x40, 0x28, - 0x3b, 0xb4, 0x50, 0x9f, 0x40, 0xdd, 0xc5, 0xdb, 0x88, 0xde, 0x25, 0x25, 0x95, 0x06, 0xaa, 0x09, - 0x6d, 0xd0, 0xf6, 0xe8, 0x1b, 0xf8, 0x58, 0x1e, 0x77, 0xf4, 0x28, 0xed, 0x8b, 0x48, 0x93, 0x8a, - 0xb7, 0xef, 0xcb, 0xef, 0xf0, 0xfb, 0x7f, 0x01, 0x52, 0xe5, 0xac, 0xcc, 0x62, 0xa1, 0x0a, 0xfe, - 0x96, 0xc4, 0x2a, 0x1d, 0x92, 0x4a, 0x23, 0x55, 0x4a, 0x2d, 0xcf, 0x57, 0x2a, 0x8d, 0x55, 0x29, - 0xe5, 0xb3, 0xed, 0xdb, 0x0a, 0xbc, 0x07, 0xf6, 0xa2, 0x8a, 0xec, 0x7e, 0x8f, 0x2f, 0x60, 0xce, - 0x99, 0x66, 0x4f, 0xa5, 0x94, 0x3a, 0x40, 0x1b, 0x14, 0x2e, 0xa9, 0xd7, 0x3f, 0x50, 0x29, 0x35, - 0x5e, 0x83, 0xc7, 0x59, 0x6e, 0xd9, 0xc8, 0xb0, 0x19, 0x67, 0xb9, 0x41, 0x67, 0x30, 0x11, 0xaf, - 0x3c, 0xab, 0x83, 0xf1, 0x06, 0x85, 0x27, 0xd4, 0x16, 0xbc, 0x06, 0x97, 0xd5, 0xa2, 0x0a, 0xdc, - 0x0d, 0x0a, 0x57, 0xc9, 0x24, 0xba, 0xa9, 0x45, 0x45, 0xcd, 0xd3, 0xf6, 0x03, 0xc1, 0xd4, 0x5a, - 0xf1, 0x1a, 0x46, 0x82, 0x1b, 0xd9, 0x22, 0x99, 0x47, 0x7f, 0xa7, 0xd0, 0x91, 0xe0, 0xf8, 0x12, - 0x5c, 0xdd, 0xa8, 0xcc, 0xd8, 0x56, 0xc9, 0x62, 0x80, 0x8f, 0x8d, 0xca, 0xa8, 0x01, 0xbd, 0xd7, - 0xac, 0x35, 0xde, 0x25, 0xb5, 0x05, 0x5f, 0xc1, 0xc4, 0x0c, 0x34, 0xe2, 0x45, 0x72, 0x1a, 0x0d, - 0x73, 0xd3, 0xe8, 0xd0, 0x07, 0x6a, 0xe9, 0x2e, 0x00, 0xb7, 0xbf, 0x08, 0xcf, 0x60, 0x4c, 0xe5, - 0xbb, 0xef, 0xf4, 0xe1, 0x4e, 0x16, 0x3e, 0xda, 0x6d, 0x01, 0xfe, 0x55, 0xd8, 0x03, 0x77, 0xcf, - 0x34, 0xf3, 0x1d, 0x0c, 0x30, 0x3d, 0xb0, 0x52, 0xe8, 0xc6, 0x47, 0xb7, 0xc1, 0x57, 0x4b, 0xd0, - 0xb1, 0x25, 0xe8, 0xa7, 0x25, 0xe8, 0xb3, 0x23, 0xce, 0xb1, 0x23, 0xce, 0x77, 0x47, 0x9c, 0x74, - 0x6a, 0xfe, 0xf5, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, 0xea, 0xbb, 0x85, 0x53, 0x89, 0x01, 0x00, - 0x00, + // 306 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0xc1, 0x4e, 0xab, 0x40, + 0x14, 0x86, 0x99, 0x96, 0xf6, 0xb6, 0xa7, 0xbd, 0x48, 0x26, 0x2e, 0xa8, 0x26, 0x23, 0x21, 0x31, + 0x21, 0x5d, 0x40, 0x82, 0x4f, 0xa0, 0x76, 0xa1, 0xbb, 0x66, 0x74, 0x6f, 0x86, 0x80, 0x61, 0x12, + 0x74, 0x26, 0x80, 0x0a, 0x4b, 0xdf, 0xc0, 0xc7, 0x72, 0xd9, 0xa5, 0x4b, 0x03, 0x2f, 0x62, 0x66, + 0x06, 0xe3, 0xee, 0x9c, 0xff, 0x9b, 0xe4, 0x3b, 0xff, 0x00, 0xa9, 0x0b, 0x56, 0xe5, 0x31, 0x97, + 0x65, 0xf6, 0x9a, 0xc4, 0x32, 0x1d, 0x27, 0x99, 0x46, 0xb2, 0x12, 0x8d, 0x38, 0x71, 0x64, 0x1a, + 0xcb, 0x4a, 0x88, 0x47, 0xb3, 0x07, 0x2f, 0xb0, 0xb8, 0x63, 0x4f, 0xb2, 0xcc, 0x6f, 0x77, 0xf8, + 0x14, 0x96, 0x19, 0x6b, 0xd8, 0x43, 0xc1, 0xea, 0xc2, 0x43, 0x3e, 0x0a, 0xd7, 0x74, 0xa1, 0x82, + 0x1b, 0x56, 0x17, 0x0a, 0xb2, 0x96, 0xd7, 0x06, 0x4e, 0x0c, 0x54, 0x81, 0x86, 0xc7, 0x30, 0xe3, + 0xcf, 0x59, 0xde, 0x7a, 0x53, 0x1f, 0x85, 0xff, 0xa9, 0x59, 0xf0, 0x06, 0x6c, 0xf5, 0xc2, 0xb3, + 0x7d, 0x14, 0x3a, 0xc9, 0x2c, 0xba, 0x6c, 0x79, 0x4d, 0x75, 0x14, 0xbc, 0x23, 0x98, 0x1b, 0x2f, + 0xde, 0xc0, 0x84, 0x67, 0x5a, 0xb7, 0x4a, 0x96, 0xd1, 0xef, 0x31, 0x74, 0xc2, 0x33, 0x7c, 0x06, + 0x76, 0xd3, 0xc9, 0x5c, 0xeb, 0x9c, 0x64, 0x35, 0xc2, 0xfb, 0x4e, 0xe6, 0x54, 0x03, 0xe5, 0xd5, + 0x7d, 0xb5, 0x77, 0x4d, 0xcd, 0x82, 0xcf, 0x61, 0xa6, 0x2b, 0x6a, 0xf1, 0x2a, 0x39, 0x8a, 0xc6, + 0xc2, 0x69, 0xb4, 0x57, 0x03, 0x35, 0x74, 0xeb, 0x81, 0xad, 0x2e, 0xc2, 0xff, 0x60, 0x4a, 0xc5, + 0x9b, 0x6b, 0xa9, 0xe1, 0x5a, 0x94, 0x2e, 0xda, 0x06, 0x00, 0x7f, 0x2a, 0xbc, 0x00, 0x7b, 0xc7, + 0x1a, 0xe6, 0x5a, 0x18, 0x60, 0xbe, 0x67, 0x15, 0x6f, 0x3a, 0x17, 0x5d, 0x79, 0x9f, 0x3d, 0x41, + 0x87, 0x9e, 0xa0, 0xef, 0x9e, 0xa0, 0x8f, 0x81, 0x58, 0x87, 0x81, 0x58, 0x5f, 0x03, 0xb1, 0xd2, + 0xb9, 0xfe, 0xd9, 0x8b, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2c, 0xc2, 0x9e, 0xb2, 0x8b, 0x01, + 0x00, 0x00, } func (m *SampleID) Marshal() (dAtA []byte, err error) { @@ -272,17 +272,17 @@ func (m *SampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x18 } - if len(m.DahRoot) > 0 { - i -= len(m.DahRoot) - copy(dAtA[i:], m.DahRoot) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DahRoot))) + if len(m.AxisHash) > 0 { + i -= len(m.AxisHash) + copy(dAtA[i:], m.AxisHash) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.AxisHash))) i-- dAtA[i] = 0x12 } - if len(m.DataRoot) > 0 { - i -= len(m.DataRoot) - copy(dAtA[i:], m.DataRoot) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DataRoot))) + if len(m.DataHash) > 0 { + i -= len(m.DataHash) + copy(dAtA[i:], m.DataHash) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DataHash))) i-- dAtA[i] = 0xa } @@ -365,11 +365,11 @@ func (m *SampleID) Size() (n int) { } var l int _ = l - l = len(m.DataRoot) + l = len(m.DataHash) if l > 0 { n += 1 + l + sovIpldv2Pb(uint64(l)) } - l = len(m.DahRoot) + l = len(m.AxisHash) if l > 0 { n += 1 + l + sovIpldv2Pb(uint64(l)) } @@ -443,7 +443,7 @@ func (m *SampleID) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataRoot", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -470,14 +470,14 @@ func (m *SampleID) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DataRoot = append(m.DataRoot[:0], dAtA[iNdEx:postIndex]...) - if m.DataRoot == nil { - m.DataRoot = []byte{} + m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) + if m.DataHash == nil { + m.DataHash = []byte{} } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DahRoot", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AxisHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -504,9 +504,9 @@ func (m *SampleID) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DahRoot = append(m.DahRoot[:0], dAtA[iNdEx:postIndex]...) - if m.DahRoot == nil { - m.DahRoot = []byte{} + m.AxisHash = append(m.AxisHash[:0], dAtA[iNdEx:postIndex]...) + if m.AxisHash == nil { + m.AxisHash = []byte{} } iNdEx = postIndex case 3: diff --git a/share/ipldv2/pb/ipldv2pb.proto b/share/ipldv2/pb/ipldv2pb.proto index c5e30ce92d..df3444de20 100644 --- a/share/ipldv2/pb/ipldv2pb.proto +++ b/share/ipldv2/pb/ipldv2pb.proto @@ -13,8 +13,8 @@ enum SampleType { } message SampleID{ - bytes data_root = 1; - bytes dah_root = 2; + bytes data_hash = 1; + bytes axis_hash = 2; uint32 index = 3; Axis axis = 4; } diff --git a/share/ipldv2/sample.go b/share/ipldv2/sample.go index 25242f4042..e5bbf269b1 100644 --- a/share/ipldv2/sample.go +++ b/share/ipldv2/sample.go @@ -3,6 +3,7 @@ package ipldv2 import ( "crypto/sha256" "errors" + "fmt" blocks "github.com/ipfs/go-block-format" @@ -38,11 +39,8 @@ type Sample struct { } // NewSample constructs a new Sample. -func NewSample(root *share.Root, idx int, axis rsmt2d.Axis, shr share.Share, proof nmt.Proof) *Sample { - id := NewSampleID(root, idx, axis) - - sqrLn := len(root.RowRoots) - row, col := idx/sqrLn, idx%sqrLn +func NewSample(id SampleID, shr share.Share, proof nmt.Proof, sqrLn int) *Sample { + row, col := id.Index/sqrLn, id.Index%sqrLn tp := ParitySample if row < sqrLn/2 && col < sqrLn/2 { tp = DataSample @@ -56,8 +54,14 @@ func NewSample(root *share.Root, idx int, axis rsmt2d.Axis, shr share.Share, pro } } -// NewSampleFrom samples the EDS and constructs a new Sample. -func NewSampleFrom(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.Axis) (*Sample, error) { +// NewSampleFrom constructs a new Sample from share.Root. +func NewSampleFrom(root *share.Root, idx int, axis rsmt2d.Axis, shr share.Share, proof nmt.Proof) *Sample { + id := NewSampleID(root, idx, axis) + return NewSample(id, shr, proof, len(root.RowRoots)) +} + +// NewSampleFromEDS samples the EDS and constructs a new Sample. +func NewSampleFromEDS(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.Axis) (*Sample, error) { sqrLn := int(eds.Width()) axisIdx, shrIdx := idx/sqrLn, idx%sqrLn @@ -75,23 +79,23 @@ func NewSampleFrom(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.Axis) (* root, err := share.NewRoot(eds) if err != nil { - return nil, err + return nil, fmt.Errorf("while computing root: %w", err) } tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axisIdx)) for _, shr := range shrs { err := tree.Push(shr) if err != nil { - return nil, err + return nil, fmt.Errorf("while pushing shares to NMT: %w", err) } } - proof, err := tree.ProveRange(shrIdx, shrIdx+1) + prf, err := tree.ProveRange(shrIdx, shrIdx+1) if err != nil { - return nil, err + return nil, fmt.Errorf("while proving range share over NMT: %w", err) } - return NewSample(root, idx, axis, shrs[shrIdx], proof), nil + return NewSampleFrom(root, idx, axis, shrs[shrIdx], prf), nil } // Proto converts Sample to its protobuf representation. @@ -112,6 +116,21 @@ func (s *Sample) Proto() *ipldv2pb.Sample { } } +// SampleFromBlock converts blocks.Block into Sample. +func SampleFromBlock(blk blocks.Block) (*Sample, error) { + if err := validateCID(blk.Cid()); err != nil { + return nil, err + } + + s := &Sample{} + err := s.UnmarshalBinary(blk.RawData()) + if err != nil { + return nil, fmt.Errorf("while unmarshalling Sample: %w", err) + } + + return s, nil +} + // IPLDBlock converts Sample to an IPLD block for Bitswap compatibility. func (s *Sample) IPLDBlock() (blocks.Block, error) { cid, err := s.ID.Cid() @@ -135,14 +154,13 @@ func (s *Sample) MarshalBinary() ([]byte, error) { // UnmarshalBinary unmarshals Sample from binary. func (s *Sample) UnmarshalBinary(data []byte) error { proto := &ipldv2pb.Sample{} - err := proto.Unmarshal(data) - if err != nil { + if err := proto.Unmarshal(data); err != nil { return err } s.ID = SampleID{ - DataRoot: proto.Id.DataRoot, - DAHRoot: proto.Id.DahRoot, + DataHash: proto.Id.DataHash, + AxisHash: proto.Id.AxisHash, Index: int(proto.Id.Index), Axis: rsmt2d.Axis(proto.Id.Axis), } @@ -159,7 +177,7 @@ func (s *Sample) Validate() error { } if s.Type != DataSample && s.Type != ParitySample { - return errors.New("malformed sample type") + return fmt.Errorf("incorrect sample type: %d", s.Type) } // TODO Support Col proofs @@ -168,7 +186,7 @@ func (s *Sample) Validate() error { namespace = share.GetNamespace(s.Share) } - if !s.Proof.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{s.Share}, s.ID.DAHRoot) { + if !s.Proof.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{s.Share}, s.ID.AxisHash) { return errors.New("sample proof is invalid") } diff --git a/share/ipldv2/sample_hasher.go b/share/ipldv2/sample_hasher.go index b4d380ea81..61341283e7 100644 --- a/share/ipldv2/sample_hasher.go +++ b/share/ipldv2/sample_hasher.go @@ -2,13 +2,14 @@ package ipldv2 import ( "crypto/sha256" + "fmt" "hash" mh "github.com/multiformats/go-multihash" ) func init() { - // Register hasher for multihash. + // Register hasher for our multihash code mh.Register(multihashCode, func() hash.Hash { return &SampleHasher{} }) @@ -21,13 +22,14 @@ type SampleHasher struct { // Write expects a marshaled Sample to validate. func (sh *SampleHasher) Write(data []byte) (int, error) { - err := sh.sample.UnmarshalBinary(data) - if err != nil { + if err := sh.sample.UnmarshalBinary(data); err != nil { + err = fmt.Errorf("while unmarshaling Sample: %w", err) log.Error(err) return 0, err } - if err = sh.sample.Validate(); err != nil { + if err := sh.sample.Validate(); err != nil { + err = fmt.Errorf("while validating Sample: %w", err) log.Error(err) return 0, err } @@ -37,7 +39,11 @@ func (sh *SampleHasher) Write(data []byte) (int, error) { // Sum returns the "multihash" of the SampleID. func (sh *SampleHasher) Sum([]byte) []byte { - sum, _ := sh.sample.ID.MarshalBinary() + sum, err := sh.sample.ID.MarshalBinary() + if err != nil { + err = fmt.Errorf("while marshaling SampleID") + log.Error(err) + } return sum } diff --git a/share/ipldv2/sample_hasher_test.go b/share/ipldv2/sample_hasher_test.go index c7e2b23b14..ace91d32eb 100644 --- a/share/ipldv2/sample_hasher_test.go +++ b/share/ipldv2/sample_hasher_test.go @@ -19,7 +19,7 @@ func TestSampleHasher(t *testing.T) { square := edstest.RandEDS(t, 2) - sample, err := NewSampleFrom(square, 2, rsmt2d.Row) + sample, err := NewSampleFromEDS(square, 2, rsmt2d.Row) require.NoError(t, err) data, err := sample.MarshalBinary() diff --git a/share/ipldv2/sample_id.go b/share/ipldv2/sample_id.go index 0d2a6fc00b..3dc1b6040b 100644 --- a/share/ipldv2/sample_id.go +++ b/share/ipldv2/sample_id.go @@ -3,7 +3,7 @@ package ipldv2 import ( "crypto/sha256" "encoding/binary" - "errors" + "fmt" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" @@ -19,17 +19,18 @@ const SampleIDSize = 127 // TODO(@Wondertan): Eventually this should become configurable const ( - hashSize = sha256.Size - dahRootSize = 2*share.NamespaceSize + hashSize + hashSize = sha256.Size + dahRootSize = 2*share.NamespaceSize + hashSize + mhPrefixSize = 4 ) // SampleID is an unique identifier of a Sample. type SampleID struct { - // DataRoot is the root of the data square + // DataHash is the root of the data square // Needed to identify the data square in the whole chain - DataRoot []byte - // DAHRoot is the Col or Row root from DAH of the data square - DAHRoot []byte + DataHash share.DataHash + // AxisHash is the Col or Row root from DAH of the data square + AxisHash []byte // Index is the index of the sample in the data square(not row or col index) Index int // Axis is Col or Row axis of the sample in the data square @@ -46,13 +47,27 @@ func NewSampleID(root *share.Root, idx int, axis rsmt2d.Axis) SampleID { } return SampleID{ - DataRoot: root.Hash(), - DAHRoot: dahroot, + DataHash: root.Hash(), + AxisHash: dahroot, Index: idx, Axis: axis, } } +// SampleIDFromCID coverts CID to SampleID. +func SampleIDFromCID(cid cid.Cid) (id SampleID, err error) { + if err = validateCID(cid); err != nil { + return id, err + } + + err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) + if err != nil { + return id, fmt.Errorf("while unmarhalling SampleID: %w", err) + } + + return id, nil +} + // Cid returns sample ID encoded as CID. func (s *SampleID) Cid() (cid.Cid, error) { data, err := s.MarshalBinary() @@ -71,8 +86,8 @@ func (s *SampleID) Cid() (cid.Cid, error) { // Proto converts SampleID to its protobuf representation. func (s *SampleID) Proto() *ipldv2pb.SampleID { return &ipldv2pb.SampleID{ - DataRoot: s.DataRoot, - DahRoot: s.DAHRoot, + DataHash: s.DataHash, + AxisHash: s.AxisHash, Index: uint32(s.Index), Axis: ipldv2pb.Axis(s.Axis), } @@ -82,8 +97,8 @@ func (s *SampleID) Proto() *ipldv2pb.SampleID { func (s *SampleID) MarshalBinary() ([]byte, error) { // we cannot use protobuf here because it exceeds multihash limit of 128 bytes data := make([]byte, SampleIDSize) - n := copy(data, s.DataRoot) - n += copy(data[n:], s.DAHRoot) + n := copy(data, s.DataHash) + n += copy(data[n:], s.AxisHash) binary.LittleEndian.PutUint32(data[n:], uint32(s.Index)) data[n+4] = byte(s.Axis) return data, nil @@ -92,12 +107,12 @@ func (s *SampleID) MarshalBinary() ([]byte, error) { // UnmarshalBinary decodes SampleID from binary form. func (s *SampleID) UnmarshalBinary(data []byte) error { if len(data) != SampleIDSize { - return errors.New("malformed sample id") + return fmt.Errorf("incorrect sample id size: %d != %d", len(data), SampleIDSize) } // copying data to avoid slice aliasing - s.DataRoot = append(s.DataRoot, data[:hashSize]...) - s.DAHRoot = append(s.DAHRoot, data[hashSize:hashSize+dahRootSize]...) + s.DataHash = append(s.DataHash, data[:hashSize]...) + s.AxisHash = append(s.AxisHash, data[hashSize:hashSize+dahRootSize]...) s.Index = int(binary.LittleEndian.Uint32(data[hashSize+dahRootSize : hashSize+dahRootSize+4])) s.Axis = rsmt2d.Axis(data[hashSize+dahRootSize+4]) return nil @@ -105,16 +120,16 @@ func (s *SampleID) UnmarshalBinary(data []byte) error { // Validate validates fields of SampleID. func (s *SampleID) Validate() error { - if len(s.DataRoot) != hashSize { - return errors.New("malformed data root") + if len(s.DataHash) != hashSize { + return fmt.Errorf("incorrect DataHash size: %d != %d", len(s.DataHash), hashSize) } - if len(s.DAHRoot) != dahRootSize { - return errors.New("malformed DAH root") + if len(s.AxisHash) != dahRootSize { + return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), hashSize) } if s.Axis != rsmt2d.Col && s.Axis != rsmt2d.Row { - return errors.New("malformed axis") + return fmt.Errorf("incorrect Axis: %d", s.Axis) } return nil diff --git a/share/ipldv2/sample_test.go b/share/ipldv2/sample_test.go index 8d56f4fe61..878fca8c17 100644 --- a/share/ipldv2/sample_test.go +++ b/share/ipldv2/sample_test.go @@ -14,7 +14,7 @@ import ( func TestSample(t *testing.T) { square := edstest.RandEDS(t, 2) - sid, err := NewSampleFrom(square, 2, rsmt2d.Row) + sid, err := NewSampleFromEDS(square, 2, rsmt2d.Row) require.NoError(t, err) data, err := sid.MarshalBinary() From ce31854e1e02910c611d014129578eda841ba559 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Fri, 22 Sep 2023 13:51:53 +0200 Subject: [PATCH 010/132] initial support for ODS Mode --- share/eds/file.go | 65 ++++++++++++++++++++++++++++++-------- share/eds/file_features.go | 20 ++++++++++++ share/eds/file_header.go | 33 ++++++++----------- share/eds/file_test.go | 14 +++++--- 4 files changed, 95 insertions(+), 37 deletions(-) create mode 100644 share/eds/file_features.go diff --git a/share/eds/file.go b/share/eds/file.go index 9ed448b43d..27d0987b84 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -14,6 +14,15 @@ import ( "github.com/celestiaorg/celestia-node/share" ) +type FileConfig struct { + Version FileVersion + Compression FileCompression + Mode FileMode + + // extensions map[string]string + // TODO: Add codec +} + // File // * immutable // * versionable @@ -42,6 +51,7 @@ func OpenFile(path string) (*File, error) { return nil, err } + // TODO(WWondertan): Validate header return &File{ path: path, hdr: h, @@ -49,26 +59,38 @@ func OpenFile(path string) (*File, error) { }, nil } -// TODO: Allow setting features -func CreateFile(path string, eds *rsmt2d.ExtendedDataSquare) (*File, error) { +func CreateFile(path string, eds *rsmt2d.ExtendedDataSquare, cfgs ...FileConfig) (*File, error) { f, err := os.Create(path) if err != nil { return nil, err } + cfg := FileConfig{} + if cfgs != nil { + cfg = cfgs[0] + } + h := &Header{ shareSize: uint16(len(eds.GetCell(0, 0))), // TODO: rsmt2d should expose this field squareSize: uint32(eds.Width()), + cfg: cfg, } if _, err = h.WriteTo(f); err != nil { return nil, err } - for _, shr := range eds.Flattened() { - // TODO: Buffer and write as single? - if _, err := f.Write(shr); err != nil { - return nil, err + width := eds.Width() + if cfg.Mode == ODSMode { + width /= 2 + } + for i := uint(0); i < width; i++ { + for j := uint(0); j < width; j++ { + // TODO: Buffer and write as single? + shr := eds.GetCell(i, j) + if _, err := f.Write(shr); err != nil { + return nil, err + } } } @@ -90,6 +112,9 @@ func (f *File) Header() *Header { func (f *File) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { shrLn := int(f.hdr.shareSize) sqrLn := int(f.hdr.squareSize) + if f.Header().Config().Mode == ODSMode { + sqrLn /= 2 + } shrs := make([]share.Share, sqrLn) switch axis { @@ -120,6 +145,14 @@ func (f *File) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { return nil, fmt.Errorf("unknown axis") } + if f.Header().Config().Mode == ODSMode { + parity, err := share.DefaultRSMT2DCodec().Decode(shrs) + if err != nil { + return nil, err + } + + return append(shrs, parity...), nil + } return shrs, nil } @@ -167,6 +200,9 @@ func (f *File) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof func (f *File) EDS() (*rsmt2d.ExtendedDataSquare, error) { shrLn := int(f.hdr.shareSize) sqrLn := int(f.hdr.squareSize) + if f.Header().Config().Mode == ODSMode { + sqrLn /= 2 + } buf := make([]byte, sqrLn*sqrLn*shrLn) if _, err := f.fl.ReadAt(buf, HeaderSize); err != nil { @@ -176,17 +212,20 @@ func (f *File) EDS() (*rsmt2d.ExtendedDataSquare, error) { shrs := make([][]byte, sqrLn*sqrLn) for i := 0; i < sqrLn; i++ { for j := 0; j < sqrLn; j++ { - x := i*sqrLn + j - shrs[x] = buf[x*shrLn : (x+1)*shrLn] + pos := i*sqrLn + j + shrs[pos] = buf[pos*shrLn : (pos+1)*shrLn] } } codec := share.DefaultRSMT2DCodec() treeFn := wrapper.NewConstructor(uint64(f.hdr.squareSize / 2)) - eds, err := rsmt2d.ImportExtendedDataSquare(shrs, codec, treeFn) - if err != nil { - return nil, err - } - return eds, nil + switch f.Header().Config().Mode { + case EDSMode: + return rsmt2d.ImportExtendedDataSquare(shrs, codec, treeFn) + case ODSMode: + return rsmt2d.ComputeExtendedDataSquare(shrs, codec, treeFn) + default: + return nil, fmt.Errorf("invalid mode type") // TODO(@Wondertan): Do fields validation right after read + } } diff --git a/share/eds/file_features.go b/share/eds/file_features.go new file mode 100644 index 0000000000..1374938548 --- /dev/null +++ b/share/eds/file_features.go @@ -0,0 +1,20 @@ +package eds + +type FileMode uint8 + +const ( + EDSMode FileMode = iota + ODSMode +) + +type FileVersion uint8 + +const ( + FileV0 FileVersion = iota +) + +type FileCompression uint8 + +const ( + NoCompression FileCompression = iota +) diff --git a/share/eds/file_header.go b/share/eds/file_header.go index 318138420b..2b6c767e07 100644 --- a/share/eds/file_header.go +++ b/share/eds/file_header.go @@ -9,17 +9,17 @@ const HeaderSize = 32 type Header struct { // User set features - // TODO: Add codec - // TDOD: Add ODS support - version uint8 - compression uint8 - // extensions map[string]string + cfg FileConfig // Taken directly from EDS shareSize uint16 squareSize uint32 } +func (h *Header) Config() FileConfig { + return h.cfg +} + func (h *Header) ShareSize() int { return int(h.shareSize) } @@ -28,18 +28,11 @@ func (h *Header) SquareSize() int { return int(h.squareSize) } -// TODO(@Wondertan) Should return special types -func (h *Header) Version() uint8 { - return h.version -} -func (h *Header) Compression() uint8 { - return h.compression -} - func (h *Header) WriteTo(w io.Writer) (int64, error) { buf := make([]byte, HeaderSize) - buf[0] = h.version - buf[1] = h.compression + buf[0] = byte(h.Config().Version) + buf[1] = byte(h.Config().Compression) + buf[2] = byte(h.Config().Mode) binary.LittleEndian.PutUint16(buf[2:4], h.shareSize) binary.LittleEndian.PutUint32(buf[4:12], h.squareSize) // TODO: Extensions @@ -54,8 +47,9 @@ func (h *Header) ReadFrom(r io.Reader) (int64, error) { return int64(n), err } - h.version = buf[0] - h.compression = buf[1] + h.cfg.Version = FileVersion(buf[0]) + h.cfg.Compression = FileCompression(buf[1]) + h.cfg.Mode = FileMode(buf[2]) h.shareSize = binary.LittleEndian.Uint16(buf[2:4]) h.squareSize = binary.LittleEndian.Uint32(buf[4:12]) @@ -71,8 +65,9 @@ func ReadHeaderAt(r io.ReaderAt, offset int64) (*Header, error) { return h, err } - h.version = buf[0] - h.compression = buf[1] + h.cfg.Version = FileVersion(buf[0]) + h.cfg.Compression = FileCompression(buf[1]) + h.cfg.Mode = FileMode(buf[2]) h.shareSize = binary.LittleEndian.Uint16(buf[2:4]) h.squareSize = binary.LittleEndian.Uint32(buf[4:12]) return h, nil diff --git a/share/eds/file_test.go b/share/eds/file_test.go index 73eb78fdd5..901158f030 100644 --- a/share/eds/file_test.go +++ b/share/eds/file_test.go @@ -16,11 +16,14 @@ import ( func TestCreateFile(t *testing.T) { path := t.TempDir() + "/testfile" edsIn := edstest.RandEDS(t, 8) - f, err := CreateFile(path, edsIn) - require.NoError(t, err) - edsOut, err := f.EDS() - require.NoError(t, err) - assert.True(t, edsIn.Equals(edsOut)) + + for _, mode := range []FileMode{EDSMode, ODSMode} { + f, err := CreateFile(path, edsIn, FileConfig{Mode: mode}) + require.NoError(t, err) + edsOut, err := f.EDS() + require.NoError(t, err) + assert.True(t, edsIn.Equals(edsOut)) + } } func TestFile(t *testing.T) { @@ -29,6 +32,7 @@ func TestFile(t *testing.T) { root, err := share.NewRoot(eds) require.NoError(t, err) + // TODO(@Wondartan): Test in multiple modes fl, err := CreateFile(path, eds) require.NoError(t, err) err = fl.Close() From 3ab6b372df431d8e91f358e52481924f7aa9a35e Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sat, 30 Sep 2023 21:53:38 +0200 Subject: [PATCH 011/132] implement axis sampling --- share/eds/file.go | 12 +- share/ipldv2/axis_sample.go | 155 ++++ share/ipldv2/axis_sample_hasher.go | 53 ++ ...her_test.go => axis_sample_hasher_test.go} | 6 +- share/ipldv2/axis_sample_id.go | 126 ++++ ...mple_id_test.go => axis_sample_id_test.go} | 12 +- share/ipldv2/axis_sample_test.go | 37 + share/ipldv2/blockstore.go | 120 ++- share/ipldv2/blockstore_test.go | 12 +- share/ipldv2/ipldv2.go | 53 +- share/ipldv2/ipldv2_test.go | 114 ++- share/ipldv2/pb/ipldv2pb.pb.go | 713 +++++++++++++++--- share/ipldv2/pb/ipldv2pb.proto | 25 +- share/ipldv2/sample_hasher.go | 63 -- share/ipldv2/{sample.go => share_sample.go} | 91 ++- share/ipldv2/share_sample_hasher.go | 53 ++ share/ipldv2/share_sample_hasher_test.go | 40 + .../{sample_id.go => share_sample_id.go} | 58 +- share/ipldv2/share_sample_id_test.go | 39 + .../{sample_test.go => share_sample_test.go} | 6 +- 20 files changed, 1485 insertions(+), 303 deletions(-) create mode 100644 share/ipldv2/axis_sample.go create mode 100644 share/ipldv2/axis_sample_hasher.go rename share/ipldv2/{sample_hasher_test.go => axis_sample_hasher_test.go} (84%) create mode 100644 share/ipldv2/axis_sample_id.go rename share/ipldv2/{sample_id_test.go => axis_sample_id_test.go} (66%) create mode 100644 share/ipldv2/axis_sample_test.go delete mode 100644 share/ipldv2/sample_hasher.go rename share/ipldv2/{sample.go => share_sample.go} (55%) create mode 100644 share/ipldv2/share_sample_hasher.go create mode 100644 share/ipldv2/share_sample_hasher_test.go rename share/ipldv2/{sample_id.go => share_sample_id.go} (62%) create mode 100644 share/ipldv2/share_sample_id_test.go rename share/ipldv2/{sample_test.go => share_sample_test.go} (83%) diff --git a/share/eds/file.go b/share/eds/file.go index 27d0987b84..69d7a4bc37 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -98,7 +98,7 @@ func CreateFile(path string, eds *rsmt2d.ExtendedDataSquare, cfgs ...FileConfig) path: path, fl: f, hdr: h, - }, err + }, f.Sync() } func (f *File) Close() error { @@ -156,6 +156,16 @@ func (f *File) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { return shrs, nil } +func (f *File) AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) { + // TODO(@Wondertan): this has to read directly from the file, avoiding recompute + fullAxis, err := f.Axis(idx, axis) + if err != nil { + return nil, err + } + + return fullAxis[:len(fullAxis)/2], nil +} + func (f *File) Share(idx int) (share.Share, error) { // TODO: Check the cache first shrLn := int64(f.hdr.shareSize) diff --git a/share/ipldv2/axis_sample.go b/share/ipldv2/axis_sample.go new file mode 100644 index 0000000000..b9379278ce --- /dev/null +++ b/share/ipldv2/axis_sample.go @@ -0,0 +1,155 @@ +package ipldv2 + +import ( + "bytes" + "fmt" + + blocks "github.com/ipfs/go-block-format" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" +) + +type AxisSample struct { + ID AxisSampleID + AxisHalf []share.Share +} + +// NewAxisSample constructs a new AxisSample. +func NewAxisSample(id AxisSampleID, axisHalf []share.Share) *AxisSample { + return &AxisSample{ + ID: id, + AxisHalf: axisHalf, + } +} + +// NewAxisSampleFrom constructs a new AxisSample from share.Root. +func NewAxisSampleFrom(root *share.Root, idx int, axis rsmt2d.Axis, axisHalf []share.Share) *AxisSample { + id := NewAxisSampleID(root, idx, axis) + return NewAxisSample(id, axisHalf) +} + +// NewAxisSampleFromEDS samples the EDS and constructs a new AxisSample. +func NewAxisSampleFromEDS(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.Axis) (*AxisSample, error) { + sqrLn := int(eds.Width()) + + // TODO(@Wondertan): Should be an rsmt2d method + var axisHalf [][]byte + switch axis { + case rsmt2d.Row: + axisHalf = eds.Row(uint(idx))[:sqrLn/2] + case rsmt2d.Col: + axisHalf = eds.Col(uint(idx))[:sqrLn/2] + default: + panic("invalid axis") + } + + root, err := share.NewRoot(eds) + if err != nil { + return nil, fmt.Errorf("while computing root: %w", err) + } + + return NewAxisSampleFrom(root, idx, axis, axisHalf), nil +} + +// Proto converts AxisSample to its protobuf representation. +func (s *AxisSample) Proto() *ipldv2pb.AxisSample { + return &ipldv2pb.AxisSample{ + Id: s.ID.Proto(), + AxisHalf: s.AxisHalf, + } +} + +// AxisSampleFromBlock converts blocks.Block into AxisSample. +func AxisSampleFromBlock(blk blocks.Block) (*AxisSample, error) { + if err := validateCID(blk.Cid()); err != nil { + return nil, err + } + + s := &AxisSample{} + err := s.UnmarshalBinary(blk.RawData()) + if err != nil { + return nil, fmt.Errorf("while unmarshalling ShareSample: %w", err) + } + + return s, nil +} + +// IPLDBlock converts AxisSample to an IPLD block for Bitswap compatibility. +func (s *AxisSample) IPLDBlock() (blocks.Block, error) { + cid, err := s.ID.Cid() + if err != nil { + return nil, err + } + + data, err := s.MarshalBinary() + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, cid) +} + +// MarshalBinary marshals AxisSample to binary. +func (s *AxisSample) MarshalBinary() ([]byte, error) { + return s.Proto().Marshal() +} + +// UnmarshalBinary unmarshal AxisSample from binary. +func (s *AxisSample) UnmarshalBinary(data []byte) error { + proto := &ipldv2pb.AxisSample{} + if err := proto.Unmarshal(data); err != nil { + return err + } + + s.ID = AxisSampleID{ + DataHash: proto.Id.DataHash, + AxisHash: proto.Id.AxisHash, + Index: int(proto.Id.Index), + Axis: rsmt2d.Axis(proto.Id.Axis), + } + s.AxisHalf = proto.AxisHalf + return nil +} + +// Validate validates AxisSample's fields and proof of Share inclusion in the NMT. +func (s *AxisSample) Validate() error { + if err := s.ID.Validate(); err != nil { + return err + } + + sqrLn := len(s.AxisHalf) * 2 + if s.ID.Index > sqrLn { + return fmt.Errorf("row index exceeds square size: %d > %d", s.ID.Index, sqrLn) + } + + // TODO(@Wondertan): This computations are quite expensive and likely to be used further, + // so we need to find a way to cache them and pass to the caller on the Bitswap side + parity, err := share.DefaultRSMT2DCodec().Encode(s.AxisHalf) + if err != nil { + return fmt.Errorf("while decoding erasure coded half: %w", err) + } + s.AxisHalf = append(s.AxisHalf, parity...) + + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(s.AxisHalf)), uint(s.ID.Index)) + for _, shr := range s.AxisHalf { + err := tree.Push(shr) + if err != nil { + return fmt.Errorf("while pushing shares to NMT: %w", err) + } + } + + root, err := tree.Root() + if err != nil { + return fmt.Errorf("while computing NMT root: %w", err) + } + + if !bytes.Equal(s.ID.AxisHash, root) { + return fmt.Errorf("invalid root: %X != %X", root, s.ID.AxisHash) + } + + return nil +} diff --git a/share/ipldv2/axis_sample_hasher.go b/share/ipldv2/axis_sample_hasher.go new file mode 100644 index 0000000000..08b08cd3d8 --- /dev/null +++ b/share/ipldv2/axis_sample_hasher.go @@ -0,0 +1,53 @@ +package ipldv2 + +import ( + "crypto/sha256" + "fmt" +) + +// AxisSampleHasher implements hash.Hash interface for Samples. +type AxisSampleHasher struct { + sample AxisSample +} + +// Write expects a marshaled ShareSample to validate. +func (sh *AxisSampleHasher) Write(data []byte) (int, error) { + if err := sh.sample.UnmarshalBinary(data); err != nil { + err = fmt.Errorf("while unmarshaling ShareSample: %w", err) + log.Error(err) + return 0, err + } + + if err := sh.sample.Validate(); err != nil { + err = fmt.Errorf("while validating ShareSample: %w", err) + log.Error(err) + return 0, err + } + + return len(data), nil +} + +// Sum returns the "multihash" of the ShareSampleID. +func (sh *AxisSampleHasher) Sum([]byte) []byte { + sum, err := sh.sample.ID.MarshalBinary() + if err != nil { + err = fmt.Errorf("while marshaling ShareSampleID") + log.Error(err) + } + return sum +} + +// Reset resets the Hash to its initial state. +func (sh *AxisSampleHasher) Reset() { + sh.sample = AxisSample{} +} + +// Size returns the number of bytes Sum will return. +func (sh *AxisSampleHasher) Size() int { + return AxisSampleIDSize +} + +// BlockSize returns the hash's underlying block size. +func (sh *AxisSampleHasher) BlockSize() int { + return sha256.BlockSize +} diff --git a/share/ipldv2/sample_hasher_test.go b/share/ipldv2/axis_sample_hasher_test.go similarity index 84% rename from share/ipldv2/sample_hasher_test.go rename to share/ipldv2/axis_sample_hasher_test.go index ace91d32eb..c9d54353f1 100644 --- a/share/ipldv2/sample_hasher_test.go +++ b/share/ipldv2/axis_sample_hasher_test.go @@ -11,15 +11,15 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -func TestSampleHasher(t *testing.T) { - hasher := &SampleHasher{} +func TestAxisSampleHasher(t *testing.T) { + hasher := &AxisSampleHasher{} _, err := hasher.Write([]byte("hello")) assert.Error(t, err) square := edstest.RandEDS(t, 2) - sample, err := NewSampleFromEDS(square, 2, rsmt2d.Row) + sample, err := NewAxisSampleFromEDS(square, 2, rsmt2d.Row) require.NoError(t, err) data, err := sample.MarshalBinary() diff --git a/share/ipldv2/axis_sample_id.go b/share/ipldv2/axis_sample_id.go new file mode 100644 index 0000000000..4b1e444e73 --- /dev/null +++ b/share/ipldv2/axis_sample_id.go @@ -0,0 +1,126 @@ +package ipldv2 + +import ( + "encoding/binary" + "fmt" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" +) + +// AxisSampleIDSize is the size of the AxisSampleID in bytes +const AxisSampleIDSize = 127 + +// AxisSampleID is an unique identifier of a AxisSample. +type AxisSampleID struct { + // DataHash is the root of the data square + // Needed to identify the data square in the whole chain + DataHash share.DataHash + // AxisHash is the Col or AxisSample root from DAH of the data square + AxisHash []byte + // Index is the index of the sample in the data square(not row or col index) + Index int + // Axis is Col or AxisSample axis of the sample in the data square + Axis rsmt2d.Axis +} + +// NewAxisSampleID constructs a new AxisSampleID. +func NewAxisSampleID(root *share.Root, idx int, axis rsmt2d.Axis) AxisSampleID { + dahroot := root.RowRoots[idx] + if axis == rsmt2d.Col { + dahroot = root.ColumnRoots[idx] + } + + return AxisSampleID{ + DataHash: root.Hash(), + AxisHash: dahroot, + Index: idx, + Axis: axis, + } +} + +// AxisSampleIDFromCID coverts CID to AxisSampleID. +func AxisSampleIDFromCID(cid cid.Cid) (id AxisSampleID, err error) { + if err = validateCID(cid); err != nil { + return id, err + } + + err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) + if err != nil { + return id, fmt.Errorf("while unmarhalling AxisSampleID: %w", err) + } + + return id, nil +} + +// Cid returns sample ID encoded as CID. +func (s *AxisSampleID) Cid() (cid.Cid, error) { + data, err := s.MarshalBinary() + if err != nil { + return cid.Undef, err + } + + buf, err := mh.Encode(data, axisSamplingMultihashCode) + if err != nil { + return cid.Undef, err + } + + return cid.NewCidV1(axisSamplingCodec, buf), nil +} + +// Proto converts AxisSampleID to its protobuf representation. +func (s *AxisSampleID) Proto() *ipldv2pb.AxisSampleID { + return &ipldv2pb.AxisSampleID{ + DataHash: s.DataHash, + AxisHash: s.AxisHash, + Index: uint32(s.Index), + Axis: ipldv2pb.Axis(s.Axis), + } +} + +// MarshalBinary encodes AxisSampleID into binary form. +func (s *AxisSampleID) MarshalBinary() ([]byte, error) { + // we cannot use protobuf here because it exceeds multihash limit of 128 bytes + data := make([]byte, ShareSampleIDSize) + n := copy(data, s.DataHash) + n += copy(data[n:], s.AxisHash) + binary.LittleEndian.PutUint32(data[n:], uint32(s.Index)) + data[n+4] = byte(s.Axis) + return data, nil +} + +// UnmarshalBinary decodes AxisSampleID from binary form. +func (s *AxisSampleID) UnmarshalBinary(data []byte) error { + if len(data) != ShareSampleIDSize { + return fmt.Errorf("incorrect sample id size: %d != %d", len(data), ShareSampleIDSize) + } + + // copying data to avoid slice aliasing + s.DataHash = append(s.DataHash, data[:hashSize]...) + s.AxisHash = append(s.AxisHash, data[hashSize:hashSize+dahRootSize]...) + s.Index = int(binary.LittleEndian.Uint32(data[hashSize+dahRootSize : hashSize+dahRootSize+4])) + s.Axis = rsmt2d.Axis(data[hashSize+dahRootSize+4]) + return nil +} + +// Validate validates fields of AxisSampleID. +func (s *AxisSampleID) Validate() error { + if len(s.DataHash) != hashSize { + return fmt.Errorf("incorrect DataHash size: %d != %d", len(s.DataHash), hashSize) + } + + if len(s.AxisHash) != dahRootSize { + return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), hashSize) + } + + if s.Axis != rsmt2d.Col && s.Axis != rsmt2d.Row { + return fmt.Errorf("incorrect Axis: %d", s.Axis) + } + + return nil +} diff --git a/share/ipldv2/sample_id_test.go b/share/ipldv2/axis_sample_id_test.go similarity index 66% rename from share/ipldv2/sample_id_test.go rename to share/ipldv2/axis_sample_id_test.go index 7158cecfe7..a520f636ec 100644 --- a/share/ipldv2/sample_id_test.go +++ b/share/ipldv2/axis_sample_id_test.go @@ -12,24 +12,24 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -func TestSampleID(t *testing.T) { +func TestAxisSampleID(t *testing.T) { square := edstest.RandEDS(t, 2) root, err := share.NewRoot(square) require.NoError(t, err) - sid := NewSampleID(root, 2, rsmt2d.Row) + sid := NewAxisSampleID(root, 2, rsmt2d.Row) id, err := sid.Cid() require.NoError(t, err) - assert.EqualValues(t, codec, id.Prefix().Codec) - assert.EqualValues(t, multihashCode, id.Prefix().MhType) - assert.EqualValues(t, SampleIDSize, id.Prefix().MhLength) + assert.EqualValues(t, axisSamplingCodec, id.Prefix().Codec) + assert.EqualValues(t, axisSamplingMultihashCode, id.Prefix().MhType) + assert.EqualValues(t, AxisSampleIDSize, id.Prefix().MhLength) data, err := sid.MarshalBinary() require.NoError(t, err) - sidOut := SampleID{} + sidOut := AxisSampleID{} err = sidOut.UnmarshalBinary(data) require.NoError(t, err) assert.EqualValues(t, sid, sidOut) diff --git a/share/ipldv2/axis_sample_test.go b/share/ipldv2/axis_sample_test.go new file mode 100644 index 0000000000..7ed017eaee --- /dev/null +++ b/share/ipldv2/axis_sample_test.go @@ -0,0 +1,37 @@ +package ipldv2 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestAxisSample(t *testing.T) { + square := edstest.RandEDS(t, 2) + + aid, err := NewAxisSampleFromEDS(square, 2, rsmt2d.Row) + require.NoError(t, err) + + data, err := aid.MarshalBinary() + require.NoError(t, err) + + blk, err := aid.IPLDBlock() + require.NoError(t, err) + + cid, err := aid.ID.Cid() + require.NoError(t, err) + assert.EqualValues(t, blk.Cid(), cid) + + sidOut := &AxisSample{} + err = sidOut.UnmarshalBinary(data) + require.NoError(t, err) + assert.EqualValues(t, aid, sidOut) + + err = sidOut.Validate() + require.NoError(t, err) +} diff --git a/share/ipldv2/blockstore.go b/share/ipldv2/blockstore.go index a678c6f1b6..a6c0f4e46b 100644 --- a/share/ipldv2/blockstore.go +++ b/share/ipldv2/blockstore.go @@ -13,15 +13,15 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" ) // edsFile is a mocking friendly local interface over eds.File. // TODO(@Wondertan): Consider making an actual interface of eds pkg type edsFile interface { io.Closer - Header() *eds.Header + Size() int ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) + AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) } // fileStore is a mocking friendly local interface over eds.FileStore @@ -39,40 +39,87 @@ func NewBlockstore[F edsFile](fs fileStore[F]) blockstore.Blockstore { } func (b Blockstore[F]) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { - id, err := SampleIDFromCID(cid) - if err != nil { - err = fmt.Errorf("while converting CID to SampleID: %w", err) - log.Error(err) - return nil, err + switch cid.Type() { + case shareSamplingCodec: + id, err := ShareSampleIDFromCID(cid) + if err != nil { + err = fmt.Errorf("while converting CID to ShareSampleId: %w", err) + log.Error(err) + return nil, err + } + + blk, err := b.getShareSampleBlock(id) + if err != nil { + log.Error(err) + return nil, err + } + + return blk, nil + case axisSamplingCodec: + id, err := AxisSampleIDFromCID(cid) + if err != nil { + err = fmt.Errorf("while converting CID to AxisSampleID: %w", err) + log.Error(err) + return nil, err + } + + blk, err := b.getAxisSampleBlock(id) + if err != nil { + log.Error(err) + return nil, err + } + + return blk, nil + default: + return nil, fmt.Errorf("unsupported codec") } +} +func (b Blockstore[F]) getShareSampleBlock(id ShareSampleID) (blocks.Block, error) { f, err := b.fs.File(id.DataHash) if err != nil { - err = fmt.Errorf("while getting EDS file from FS: %w", err) - log.Error(err) - return nil, err + return nil, fmt.Errorf("while getting EDS file from FS: %w", err) } shr, prf, err := f.ShareWithProof(id.Index, id.Axis) if err != nil { - err = fmt.Errorf("while getting share with proof: %w", err) - log.Error(err) - return nil, err + return nil, fmt.Errorf("while getting share with proof: %w", err) } - s := NewSample(id, shr, prf, f.Header().SquareSize()) + s := NewShareSample(id, shr, prf, f.Size()) blk, err := s.IPLDBlock() if err != nil { - err = fmt.Errorf("while getting share with proof: %w", err) - log.Error(err) - return nil, err + return nil, fmt.Errorf("while coverting to IPLD block: %w", err) } err = f.Close() if err != nil { - err = fmt.Errorf("while closing EDS file: %w", err) - log.Error(err) - return nil, err + return nil, fmt.Errorf("while closing EDS file: %w", err) + } + + return blk, nil +} + +func (b Blockstore[F]) getAxisSampleBlock(id AxisSampleID) (blocks.Block, error) { + f, err := b.fs.File(id.DataHash) + if err != nil { + return nil, fmt.Errorf("while getting EDS file from FS: %w", err) + } + + axisHalf, err := f.AxisHalf(id.Index, id.Axis) + if err != nil { + return nil, fmt.Errorf("while getting axis half: %w", err) + } + + s := NewAxisSample(id, axisHalf) + blk, err := s.IPLDBlock() + if err != nil { + return nil, fmt.Errorf("while coverting to IPLD block: %w", err) + } + + err = f.Close() + if err != nil { + return nil, fmt.Errorf("while closing EDS file: %w", err) } return blk, nil @@ -80,7 +127,9 @@ func (b Blockstore[F]) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) func (b Blockstore[F]) GetSize(ctx context.Context, cid cid.Cid) (int, error) { // TODO(@Wondertan): There must be a way to derive size without reading, proving, serializing and - // allocating Sample's block.Block. + // allocating ShareSample's block.Block. + // NOTE:Bitswap uses GetSize also to determine if we have content stored or not + // so simply returning constant size is not an option blk, err := b.Get(ctx, cid) if err != nil { return 0, err @@ -90,14 +139,31 @@ func (b Blockstore[F]) GetSize(ctx context.Context, cid cid.Cid) (int, error) { } func (b Blockstore[F]) Has(_ context.Context, cid cid.Cid) (bool, error) { - id, err := SampleIDFromCID(cid) - if err != nil { - err = fmt.Errorf("while converting CID to SampleID: %w", err) - log.Error(err) - return false, err + var datahash share.DataHash + switch cid.Type() { + case shareSamplingCodec: + id, err := ShareSampleIDFromCID(cid) + if err != nil { + err = fmt.Errorf("while converting CID to ShareSampleID: %w", err) + log.Error(err) + return false, err + } + + datahash = id.DataHash + case axisSamplingCodec: + id, err := AxisSampleIDFromCID(cid) + if err != nil { + err = fmt.Errorf("while converting CID to AxisSampleID: %w", err) + log.Error(err) + return false, err + } + + datahash = id.DataHash + default: + return false, fmt.Errorf("unsupported codec") } - f, err := b.fs.File(id.DataHash) + f, err := b.fs.File(datahash) if err != nil { err = fmt.Errorf("while getting EDS file from FS: %w", err) log.Error(err) diff --git a/share/ipldv2/blockstore_test.go b/share/ipldv2/blockstore_test.go index 0ca131dff2..044378aa4e 100644 --- a/share/ipldv2/blockstore_test.go +++ b/share/ipldv2/blockstore_test.go @@ -30,14 +30,14 @@ func TestBlockstoreGet(t *testing.T) { width := int(sqr.Width()) for _, axis := range axis { for i := 0; i < width*width; i++ { - id := NewSampleID(root, i, axis) + id := NewShareSampleID(root, i, axis) cid, err := id.Cid() require.NoError(t, err) blk, err := b.Get(ctx, cid) require.NoError(t, err) - sample, err := SampleFromBlock(blk) + sample, err := ShareSampleFromBlock(blk) require.NoError(t, err) err = sample.Validate() @@ -53,14 +53,18 @@ func (m *edsFileAndFS) File(share.DataHash) (*edsFileAndFS, error) { return m, nil } -func (m *edsFileAndFS) Header() *eds.Header { - return (*eds.File)(m).Header() +func (m *edsFileAndFS) Size() int { + return (*eds.File)(m).Header().SquareSize() } func (m *edsFileAndFS) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) { return (*eds.File)(m).ShareWithProof(idx, axis) } +func (m *edsFileAndFS) AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) { + return (*eds.File)(m).AxisHalf(idx, axis) +} + func (m *edsFileAndFS) Close() error { return nil } diff --git a/share/ipldv2/ipldv2.go b/share/ipldv2/ipldv2.go index ae785b360e..5b496d5efa 100644 --- a/share/ipldv2/ipldv2.go +++ b/share/ipldv2/ipldv2.go @@ -1,23 +1,52 @@ package ipldv2 import ( + "crypto/sha256" "fmt" + "hash" "github.com/ipfs/go-cid" logger "github.com/ipfs/go-log/v2" + mh "github.com/multiformats/go-multihash" + + "github.com/celestiaorg/celestia-node/share" ) var log = logger.Logger("ipldv2") const ( - // codec is the codec used for leaf and inner nodes of a Namespaced Merkle Tree. - codec = 0x7800 + // shareSamplingCodec is a CID codec used for share sampling Bitswap requests over Namespaced + // Merkle Tree. + shareSamplingCodec = 0x7800 + + // shareSamplingMultihashCode is the multihash code for share sampling multihash function. + shareSamplingMultihashCode = 0x7801 + + // axisSamplingCodec is a CID codec used for axis sampling Bitswap requests over Namespaced Merkle + // Tree. + axisSamplingCodec = 0x7810 - // multihashCode is the multihash code used to hash blocks - // that contain an NMT node (inner and leaf nodes). - multihashCode = 0x7801 + // axisSamplingMultihashCode is the multihash code for custom axis sampling multihash function. + axisSamplingMultihashCode = 0x7811 ) +// TODO(@Wondertan): Eventually this should become configurable +const ( + hashSize = sha256.Size + dahRootSize = 2*share.NamespaceSize + hashSize + mhPrefixSize = 4 +) + +func init() { + // Register hashers for new multihashes + mh.Register(shareSamplingMultihashCode, func() hash.Hash { + return &ShareSampleHasher{} + }) + mh.Register(axisSamplingMultihashCode, func() hash.Hash { + return &AxisSampleHasher{} + }) +} + // defaultAllowlist keeps default list of hashes allowed in the network. var defaultAllowlist allowlist @@ -25,20 +54,26 @@ type allowlist struct{} func (a allowlist) IsAllowed(code uint64) bool { // we disable all codes except home-baked code - return code == multihashCode + switch code { + case shareSamplingMultihashCode: + case axisSamplingMultihashCode: + default: + return false + } + return true } func validateCID(cid cid.Cid) error { prefix := cid.Prefix() - if prefix.Codec != codec { + if prefix.Codec != shareSamplingCodec && prefix.Codec != axisSamplingCodec { return fmt.Errorf("unsupported codec") } - if prefix.MhType != multihashCode { + if prefix.MhType != shareSamplingMultihashCode && prefix.MhType != axisSamplingMultihashCode { return fmt.Errorf("unsupported multihash") } - if prefix.MhLength != SampleIDSize { + if prefix.MhLength != ShareSampleIDSize { return fmt.Errorf("invalid multihash length") } diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index 5b2d42bc6b..d32fe39e61 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -24,9 +24,9 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -// TestV2Roundtrip tests full protocol round trip of: -// EDS -> Sample -> IPLDBlock -> BlockService -> Bitswap and in reverse. -func TestV2RoundtripGetBlock(t *testing.T) { +// TestShareSampleRoundtripGetBlock tests full protocol round trip of: +// EDS -> ShareSample -> IPLDBlock -> BlockService -> Bitswap and in reverse. +func TestShareSampleRoundtripGetBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() @@ -35,6 +35,7 @@ func TestV2RoundtripGetBlock(t *testing.T) { path := t.TempDir() + "/eds_file" f, err := eds.CreateFile(path, sqr) require.NoError(t, err) + defer f.Close() b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) client := remoteClient(ctx, t, b) @@ -43,7 +44,7 @@ func TestV2RoundtripGetBlock(t *testing.T) { width := int(sqr.Width()) for _, axis := range axis { for i := 0; i < width*width; i++ { - smpl, err := NewSampleFromEDS(sqr, i, axis) + smpl, err := NewShareSampleFromEDS(sqr, i, axis) require.NoError(t, err) cid, err := smpl.ID.Cid() @@ -53,22 +54,25 @@ func TestV2RoundtripGetBlock(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, cid, blkOut.Cid()) - data, err := smpl.MarshalBinary() - require.NoError(t, err) - assert.EqualValues(t, data, blkOut.RawData()) + smpl, err = ShareSampleFromBlock(blkOut) + assert.NoError(t, err) + + err = smpl.Validate() // bitswap already performed validation and this is only for testing + assert.NoError(t, err) } } } -func TestV2RoundtripGetBlocks(t *testing.T) { +func TestShareSampleRoundtripGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - sqr := edstest.RandEDS(t, 16) // TODO(@Wondertan): does not work with more than 8 + sqr := edstest.RandEDS(t, 8) // TODO(@Wondertan): does not work with more than 8 for some reasong path := t.TempDir() + "/eds_file" f, err := eds.CreateFile(path, sqr) require.NoError(t, err) + defer f.Close() b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) client := remoteClient(ctx, t, b) @@ -78,7 +82,91 @@ func TestV2RoundtripGetBlocks(t *testing.T) { width := int(sqr.Width()) for _, axis := range axis { for i := 0; i < width*width; i++ { - smpl, err := NewSampleFromEDS(sqr, i, axis) + smpl, err := NewShareSampleFromEDS(sqr, i, axis) + require.NoError(t, err) + + cid, err := smpl.ID.Cid() + require.NoError(t, err) + + set.Add(cid) + } + } + + blks := client.GetBlocks(ctx, set.Keys()) + err = set.ForEach(func(c cid.Cid) error { + select { + case blk := <-blks: + assert.True(t, set.Has(blk.Cid())) + + smpl, err := ShareSampleFromBlock(blk) + assert.NoError(t, err) + + err = smpl.Validate() // bitswap already performed validation and this is only for testing + assert.NoError(t, err) + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + assert.NoError(t, err) +} + +func TestAxisSampleRoundtripGetBlock(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + sqr := edstest.RandEDS(t, 8) + + path := t.TempDir() + "/eds_file" + f, err := eds.CreateFile(path, sqr) + require.NoError(t, err) + defer f.Close() + + b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) + client := remoteClient(ctx, t, b) + + axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} + width := int(sqr.Width()) + for _, axis := range axis { + for i := 0; i < width; i++ { + smpl, err := NewAxisSampleFromEDS(sqr, i, axis) + require.NoError(t, err) + + cid, err := smpl.ID.Cid() + require.NoError(t, err) + + blkOut, err := client.GetBlock(ctx, cid) + require.NoError(t, err) + assert.EqualValues(t, cid, blkOut.Cid()) + + smpl, err = AxisSampleFromBlock(blkOut) + assert.NoError(t, err) + + err = smpl.Validate() // bitswap already performed validation and this is only for testing + assert.NoError(t, err) + } + } +} + +func TestAxisSampleRoundtripGetBlocks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + sqr := edstest.RandEDS(t, 16) + + path := t.TempDir() + "/eds_file" + f, err := eds.CreateFile(path, sqr) + require.NoError(t, err) + + b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) + client := remoteClient(ctx, t, b) + + set := cid.NewSet() + axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} + width := int(sqr.Width()) + for _, axis := range axis { + for i := 0; i < width; i++ { + smpl, err := NewAxisSampleFromEDS(sqr, i, axis) require.NoError(t, err) cid, err := smpl.ID.Cid() @@ -93,6 +181,12 @@ func TestV2RoundtripGetBlocks(t *testing.T) { select { case blk := <-blks: assert.True(t, set.Has(blk.Cid())) + + smpl, err := AxisSampleFromBlock(blk) + assert.NoError(t, err) + + err = smpl.Validate() // bitswap already performed validation and this is only for testing + assert.NoError(t, err) case <-ctx.Done(): return ctx.Err() } diff --git a/share/ipldv2/pb/ipldv2pb.pb.go b/share/ipldv2/pb/ipldv2pb.pb.go index c3ff0dedd7..9af1d1f9b4 100644 --- a/share/ipldv2/pb/ipldv2pb.pb.go +++ b/share/ipldv2/pb/ipldv2pb.pb.go @@ -48,50 +48,50 @@ func (Axis) EnumDescriptor() ([]byte, []int) { return fileDescriptor_cb41c3a4f982a271, []int{0} } -type SampleType int32 +type ShareSampleType int32 const ( - SampleType_Data SampleType = 0 - SampleType_Parity SampleType = 1 + ShareSampleType_Data ShareSampleType = 0 + ShareSampleType_Parity ShareSampleType = 1 ) -var SampleType_name = map[int32]string{ +var ShareSampleType_name = map[int32]string{ 0: "Data", 1: "Parity", } -var SampleType_value = map[string]int32{ +var ShareSampleType_value = map[string]int32{ "Data": 0, "Parity": 1, } -func (x SampleType) String() string { - return proto.EnumName(SampleType_name, int32(x)) +func (x ShareSampleType) String() string { + return proto.EnumName(ShareSampleType_name, int32(x)) } -func (SampleType) EnumDescriptor() ([]byte, []int) { +func (ShareSampleType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_cb41c3a4f982a271, []int{1} } -type SampleID struct { +type ShareSampleID struct { DataHash []byte `protobuf:"bytes,1,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` AxisHash []byte `protobuf:"bytes,2,opt,name=axis_hash,json=axisHash,proto3" json:"axis_hash,omitempty"` Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` Axis Axis `protobuf:"varint,4,opt,name=axis,proto3,enum=Axis" json:"axis,omitempty"` } -func (m *SampleID) Reset() { *m = SampleID{} } -func (m *SampleID) String() string { return proto.CompactTextString(m) } -func (*SampleID) ProtoMessage() {} -func (*SampleID) Descriptor() ([]byte, []int) { +func (m *ShareSampleID) Reset() { *m = ShareSampleID{} } +func (m *ShareSampleID) String() string { return proto.CompactTextString(m) } +func (*ShareSampleID) ProtoMessage() {} +func (*ShareSampleID) Descriptor() ([]byte, []int) { return fileDescriptor_cb41c3a4f982a271, []int{0} } -func (m *SampleID) XXX_Unmarshal(b []byte) error { +func (m *ShareSampleID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *SampleID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ShareSampleID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_SampleID.Marshal(b, m, deterministic) + return xxx_messageInfo_ShareSampleID.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -101,65 +101,65 @@ func (m *SampleID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *SampleID) XXX_Merge(src proto.Message) { - xxx_messageInfo_SampleID.Merge(m, src) +func (m *ShareSampleID) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShareSampleID.Merge(m, src) } -func (m *SampleID) XXX_Size() int { +func (m *ShareSampleID) XXX_Size() int { return m.Size() } -func (m *SampleID) XXX_DiscardUnknown() { - xxx_messageInfo_SampleID.DiscardUnknown(m) +func (m *ShareSampleID) XXX_DiscardUnknown() { + xxx_messageInfo_ShareSampleID.DiscardUnknown(m) } -var xxx_messageInfo_SampleID proto.InternalMessageInfo +var xxx_messageInfo_ShareSampleID proto.InternalMessageInfo -func (m *SampleID) GetDataHash() []byte { +func (m *ShareSampleID) GetDataHash() []byte { if m != nil { return m.DataHash } return nil } -func (m *SampleID) GetAxisHash() []byte { +func (m *ShareSampleID) GetAxisHash() []byte { if m != nil { return m.AxisHash } return nil } -func (m *SampleID) GetIndex() uint32 { +func (m *ShareSampleID) GetIndex() uint32 { if m != nil { return m.Index } return 0 } -func (m *SampleID) GetAxis() Axis { +func (m *ShareSampleID) GetAxis() Axis { if m != nil { return m.Axis } return Axis_Row } -type Sample struct { - Id *SampleID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Type SampleType `protobuf:"varint,2,opt,name=type,proto3,enum=SampleType" json:"type,omitempty"` - Share []byte `protobuf:"bytes,3,opt,name=share,proto3" json:"share,omitempty"` - Proof *pb.Proof `protobuf:"bytes,4,opt,name=proof,proto3" json:"proof,omitempty"` +type ShareSample struct { + Id *ShareSampleID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Type ShareSampleType `protobuf:"varint,2,opt,name=type,proto3,enum=ShareSampleType" json:"type,omitempty"` + Share []byte `protobuf:"bytes,3,opt,name=share,proto3" json:"share,omitempty"` + Proof *pb.Proof `protobuf:"bytes,4,opt,name=proof,proto3" json:"proof,omitempty"` } -func (m *Sample) Reset() { *m = Sample{} } -func (m *Sample) String() string { return proto.CompactTextString(m) } -func (*Sample) ProtoMessage() {} -func (*Sample) Descriptor() ([]byte, []int) { +func (m *ShareSample) Reset() { *m = ShareSample{} } +func (m *ShareSample) String() string { return proto.CompactTextString(m) } +func (*ShareSample) ProtoMessage() {} +func (*ShareSample) Descriptor() ([]byte, []int) { return fileDescriptor_cb41c3a4f982a271, []int{1} } -func (m *Sample) XXX_Unmarshal(b []byte) error { +func (m *ShareSample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ShareSample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + return xxx_messageInfo_ShareSample.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -169,80 +169,204 @@ func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Sample) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sample.Merge(m, src) +func (m *ShareSample) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShareSample.Merge(m, src) } -func (m *Sample) XXX_Size() int { +func (m *ShareSample) XXX_Size() int { return m.Size() } -func (m *Sample) XXX_DiscardUnknown() { - xxx_messageInfo_Sample.DiscardUnknown(m) +func (m *ShareSample) XXX_DiscardUnknown() { + xxx_messageInfo_ShareSample.DiscardUnknown(m) } -var xxx_messageInfo_Sample proto.InternalMessageInfo +var xxx_messageInfo_ShareSample proto.InternalMessageInfo -func (m *Sample) GetId() *SampleID { +func (m *ShareSample) GetId() *ShareSampleID { if m != nil { return m.Id } return nil } -func (m *Sample) GetType() SampleType { +func (m *ShareSample) GetType() ShareSampleType { if m != nil { return m.Type } - return SampleType_Data + return ShareSampleType_Data } -func (m *Sample) GetShare() []byte { +func (m *ShareSample) GetShare() []byte { if m != nil { return m.Share } return nil } -func (m *Sample) GetProof() *pb.Proof { +func (m *ShareSample) GetProof() *pb.Proof { if m != nil { return m.Proof } return nil } +type AxisSampleID struct { + DataHash []byte `protobuf:"bytes,1,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + AxisHash []byte `protobuf:"bytes,2,opt,name=axis_hash,json=axisHash,proto3" json:"axis_hash,omitempty"` + Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` + Axis Axis `protobuf:"varint,4,opt,name=axis,proto3,enum=Axis" json:"axis,omitempty"` +} + +func (m *AxisSampleID) Reset() { *m = AxisSampleID{} } +func (m *AxisSampleID) String() string { return proto.CompactTextString(m) } +func (*AxisSampleID) ProtoMessage() {} +func (*AxisSampleID) Descriptor() ([]byte, []int) { + return fileDescriptor_cb41c3a4f982a271, []int{2} +} +func (m *AxisSampleID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AxisSampleID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AxisSampleID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AxisSampleID) XXX_Merge(src proto.Message) { + xxx_messageInfo_AxisSampleID.Merge(m, src) +} +func (m *AxisSampleID) XXX_Size() int { + return m.Size() +} +func (m *AxisSampleID) XXX_DiscardUnknown() { + xxx_messageInfo_AxisSampleID.DiscardUnknown(m) +} + +var xxx_messageInfo_AxisSampleID proto.InternalMessageInfo + +func (m *AxisSampleID) GetDataHash() []byte { + if m != nil { + return m.DataHash + } + return nil +} + +func (m *AxisSampleID) GetAxisHash() []byte { + if m != nil { + return m.AxisHash + } + return nil +} + +func (m *AxisSampleID) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *AxisSampleID) GetAxis() Axis { + if m != nil { + return m.Axis + } + return Axis_Row +} + +type AxisSample struct { + Id *AxisSampleID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AxisHalf [][]byte `protobuf:"bytes,2,rep,name=axis_half,json=axisHalf,proto3" json:"axis_half,omitempty"` +} + +func (m *AxisSample) Reset() { *m = AxisSample{} } +func (m *AxisSample) String() string { return proto.CompactTextString(m) } +func (*AxisSample) ProtoMessage() {} +func (*AxisSample) Descriptor() ([]byte, []int) { + return fileDescriptor_cb41c3a4f982a271, []int{3} +} +func (m *AxisSample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AxisSample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AxisSample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AxisSample) XXX_Merge(src proto.Message) { + xxx_messageInfo_AxisSample.Merge(m, src) +} +func (m *AxisSample) XXX_Size() int { + return m.Size() +} +func (m *AxisSample) XXX_DiscardUnknown() { + xxx_messageInfo_AxisSample.DiscardUnknown(m) +} + +var xxx_messageInfo_AxisSample proto.InternalMessageInfo + +func (m *AxisSample) GetId() *AxisSampleID { + if m != nil { + return m.Id + } + return nil +} + +func (m *AxisSample) GetAxisHalf() [][]byte { + if m != nil { + return m.AxisHalf + } + return nil +} + func init() { proto.RegisterEnum("Axis", Axis_name, Axis_value) - proto.RegisterEnum("SampleType", SampleType_name, SampleType_value) - proto.RegisterType((*SampleID)(nil), "SampleID") - proto.RegisterType((*Sample)(nil), "Sample") + proto.RegisterEnum("ShareSampleType", ShareSampleType_name, ShareSampleType_value) + proto.RegisterType((*ShareSampleID)(nil), "ShareSampleID") + proto.RegisterType((*ShareSample)(nil), "ShareSample") + proto.RegisterType((*AxisSampleID)(nil), "AxisSampleID") + proto.RegisterType((*AxisSample)(nil), "AxisSample") } func init() { proto.RegisterFile("share/ipldv2/pb/ipldv2pb.proto", fileDescriptor_cb41c3a4f982a271) } var fileDescriptor_cb41c3a4f982a271 = []byte{ - // 306 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0xc1, 0x4e, 0xab, 0x40, - 0x14, 0x86, 0x99, 0x96, 0xf6, 0xb6, 0xa7, 0xbd, 0x48, 0x26, 0x2e, 0xa8, 0x26, 0x23, 0x21, 0x31, - 0x21, 0x5d, 0x40, 0x82, 0x4f, 0xa0, 0x76, 0xa1, 0xbb, 0x66, 0x74, 0x6f, 0x86, 0x80, 0x61, 0x12, - 0x74, 0x26, 0x80, 0x0a, 0x4b, 0xdf, 0xc0, 0xc7, 0x72, 0xd9, 0xa5, 0x4b, 0x03, 0x2f, 0x62, 0x66, - 0x06, 0xe3, 0xee, 0x9c, 0xff, 0x9b, 0xe4, 0x3b, 0xff, 0x00, 0xa9, 0x0b, 0x56, 0xe5, 0x31, 0x97, - 0x65, 0xf6, 0x9a, 0xc4, 0x32, 0x1d, 0x27, 0x99, 0x46, 0xb2, 0x12, 0x8d, 0x38, 0x71, 0x64, 0x1a, - 0xcb, 0x4a, 0x88, 0x47, 0xb3, 0x07, 0x2f, 0xb0, 0xb8, 0x63, 0x4f, 0xb2, 0xcc, 0x6f, 0x77, 0xf8, - 0x14, 0x96, 0x19, 0x6b, 0xd8, 0x43, 0xc1, 0xea, 0xc2, 0x43, 0x3e, 0x0a, 0xd7, 0x74, 0xa1, 0x82, - 0x1b, 0x56, 0x17, 0x0a, 0xb2, 0x96, 0xd7, 0x06, 0x4e, 0x0c, 0x54, 0x81, 0x86, 0xc7, 0x30, 0xe3, - 0xcf, 0x59, 0xde, 0x7a, 0x53, 0x1f, 0x85, 0xff, 0xa9, 0x59, 0xf0, 0x06, 0x6c, 0xf5, 0xc2, 0xb3, - 0x7d, 0x14, 0x3a, 0xc9, 0x2c, 0xba, 0x6c, 0x79, 0x4d, 0x75, 0x14, 0xbc, 0x23, 0x98, 0x1b, 0x2f, - 0xde, 0xc0, 0x84, 0x67, 0x5a, 0xb7, 0x4a, 0x96, 0xd1, 0xef, 0x31, 0x74, 0xc2, 0x33, 0x7c, 0x06, - 0x76, 0xd3, 0xc9, 0x5c, 0xeb, 0x9c, 0x64, 0x35, 0xc2, 0xfb, 0x4e, 0xe6, 0x54, 0x03, 0xe5, 0xd5, - 0x7d, 0xb5, 0x77, 0x4d, 0xcd, 0x82, 0xcf, 0x61, 0xa6, 0x2b, 0x6a, 0xf1, 0x2a, 0x39, 0x8a, 0xc6, - 0xc2, 0x69, 0xb4, 0x57, 0x03, 0x35, 0x74, 0xeb, 0x81, 0xad, 0x2e, 0xc2, 0xff, 0x60, 0x4a, 0xc5, - 0x9b, 0x6b, 0xa9, 0xe1, 0x5a, 0x94, 0x2e, 0xda, 0x06, 0x00, 0x7f, 0x2a, 0xbc, 0x00, 0x7b, 0xc7, - 0x1a, 0xe6, 0x5a, 0x18, 0x60, 0xbe, 0x67, 0x15, 0x6f, 0x3a, 0x17, 0x5d, 0x79, 0x9f, 0x3d, 0x41, - 0x87, 0x9e, 0xa0, 0xef, 0x9e, 0xa0, 0x8f, 0x81, 0x58, 0x87, 0x81, 0x58, 0x5f, 0x03, 0xb1, 0xd2, - 0xb9, 0xfe, 0xd9, 0x8b, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2c, 0xc2, 0x9e, 0xb2, 0x8b, 0x01, - 0x00, 0x00, -} - -func (m *SampleID) Marshal() (dAtA []byte, err error) { + // 347 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x92, 0xc1, 0x4e, 0xfa, 0x40, + 0x10, 0xc6, 0xbb, 0xa5, 0xf0, 0xe7, 0x3f, 0x40, 0x69, 0x36, 0x1e, 0xaa, 0xc6, 0x4d, 0x43, 0x34, + 0x12, 0x0e, 0x25, 0xa9, 0x4f, 0xa0, 0x72, 0xc0, 0x1b, 0x59, 0xbc, 0x9b, 0x6d, 0x5a, 0xd2, 0x4d, + 0xaa, 0xbb, 0x69, 0x1b, 0xa5, 0x3e, 0x05, 0x8f, 0xe5, 0x91, 0xa3, 0x47, 0x03, 0x2f, 0x62, 0x76, + 0x97, 0x84, 0xea, 0x0b, 0x78, 0x9b, 0x99, 0x6f, 0x26, 0xdf, 0xb7, 0xbf, 0x2c, 0x90, 0x32, 0x63, + 0x45, 0x3a, 0xe5, 0x32, 0x4f, 0x5e, 0xa3, 0xa9, 0x8c, 0x0f, 0x95, 0x8c, 0x43, 0x59, 0x88, 0x4a, + 0x9c, 0xb9, 0x32, 0x9e, 0xca, 0x42, 0x88, 0x95, 0xe9, 0x47, 0xef, 0x30, 0x58, 0xaa, 0x8b, 0x25, + 0x7b, 0x96, 0x79, 0xfa, 0x30, 0xc3, 0xe7, 0xf0, 0x3f, 0x61, 0x15, 0x7b, 0xca, 0x58, 0x99, 0xf9, + 0x28, 0x40, 0xe3, 0x3e, 0xed, 0xaa, 0xc1, 0x9c, 0x95, 0x99, 0x12, 0xd9, 0x9a, 0x97, 0x46, 0xb4, + 0x8d, 0xa8, 0x06, 0x5a, 0x3c, 0x81, 0x36, 0x7f, 0x49, 0xd2, 0xb5, 0xdf, 0x0a, 0xd0, 0x78, 0x40, + 0x4d, 0x83, 0x4f, 0xc1, 0x51, 0x1b, 0xbe, 0x13, 0xa0, 0xb1, 0x1b, 0xb5, 0xc3, 0xdb, 0x35, 0x2f, + 0xa9, 0x1e, 0x8d, 0x36, 0x08, 0x7a, 0x0d, 0x73, 0x4c, 0xc0, 0xe6, 0x89, 0xf6, 0xec, 0x45, 0x6e, + 0xf8, 0x23, 0x16, 0xb5, 0x79, 0x82, 0x2f, 0xc1, 0xa9, 0x6a, 0x99, 0x6a, 0x63, 0x37, 0xf2, 0x9a, + 0x1b, 0x8f, 0xb5, 0x4c, 0xa9, 0x56, 0x55, 0x0c, 0xcd, 0x40, 0xc7, 0xe8, 0x53, 0xd3, 0xe0, 0x2b, + 0x68, 0xeb, 0x67, 0xeb, 0x1c, 0xbd, 0x68, 0x18, 0x1e, 0x20, 0xc4, 0xe1, 0x42, 0x15, 0xd4, 0xa8, + 0xa3, 0x1a, 0xfa, 0x2a, 0xe0, 0x5f, 0xd0, 0x98, 0x03, 0x1c, 0xad, 0xf1, 0x45, 0x83, 0xc5, 0x20, + 0x6c, 0x66, 0xd2, 0x28, 0x8e, 0xd6, 0xf9, 0xca, 0xb7, 0x83, 0xd6, 0xd1, 0x3a, 0x5f, 0x4d, 0x7c, + 0x70, 0xd4, 0x01, 0xfe, 0x07, 0x2d, 0x2a, 0xde, 0x3c, 0x4b, 0x15, 0xf7, 0x22, 0xf7, 0xd0, 0xe4, + 0x1a, 0x86, 0xbf, 0xa0, 0xe1, 0x2e, 0x38, 0x33, 0x56, 0x31, 0xcf, 0xc2, 0x00, 0x9d, 0x05, 0x2b, + 0x78, 0x55, 0x7b, 0xe8, 0xce, 0xff, 0xd8, 0x11, 0xb4, 0xdd, 0x11, 0xf4, 0xb5, 0x23, 0x68, 0xb3, + 0x27, 0xd6, 0x76, 0x4f, 0xac, 0xcf, 0x3d, 0xb1, 0xe2, 0x8e, 0xfe, 0x37, 0x37, 0xdf, 0x01, 0x00, + 0x00, 0xff, 0xff, 0xa1, 0x8c, 0xa7, 0x00, 0x69, 0x02, 0x00, 0x00, +} + +func (m *ShareSampleID) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -252,12 +376,12 @@ func (m *SampleID) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SampleID) MarshalTo(dAtA []byte) (int, error) { +func (m *ShareSampleID) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ShareSampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -289,7 +413,7 @@ func (m *SampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Sample) Marshal() (dAtA []byte, err error) { +func (m *ShareSample) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -299,12 +423,12 @@ func (m *Sample) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Sample) MarshalTo(dAtA []byte) (int, error) { +func (m *ShareSample) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ShareSample) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -348,6 +472,97 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *AxisSampleID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AxisSampleID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AxisSampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Axis != 0 { + i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Axis)) + i-- + dAtA[i] = 0x20 + } + if m.Index != 0 { + i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x18 + } + if len(m.AxisHash) > 0 { + i -= len(m.AxisHash) + copy(dAtA[i:], m.AxisHash) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.AxisHash))) + i-- + dAtA[i] = 0x12 + } + if len(m.DataHash) > 0 { + i -= len(m.DataHash) + copy(dAtA[i:], m.DataHash) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DataHash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AxisSample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AxisSample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AxisSample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AxisHalf) > 0 { + for iNdEx := len(m.AxisHalf) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AxisHalf[iNdEx]) + copy(dAtA[i:], m.AxisHalf[iNdEx]) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.AxisHalf[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Id != nil { + { + size, err := m.Id.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintIpldv2Pb(dAtA []byte, offset int, v uint64) int { offset -= sovIpldv2Pb(v) base := offset @@ -359,7 +574,7 @@ func encodeVarintIpldv2Pb(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *SampleID) Size() (n int) { +func (m *ShareSampleID) Size() (n int) { if m == nil { return 0 } @@ -382,7 +597,7 @@ func (m *SampleID) Size() (n int) { return n } -func (m *Sample) Size() (n int) { +func (m *ShareSample) Size() (n int) { if m == nil { return 0 } @@ -406,13 +621,55 @@ func (m *Sample) Size() (n int) { return n } +func (m *AxisSampleID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DataHash) + if l > 0 { + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + l = len(m.AxisHash) + if l > 0 { + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovIpldv2Pb(uint64(m.Index)) + } + if m.Axis != 0 { + n += 1 + sovIpldv2Pb(uint64(m.Axis)) + } + return n +} + +func (m *AxisSample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != nil { + l = m.Id.Size() + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + if len(m.AxisHalf) > 0 { + for _, b := range m.AxisHalf { + l = len(b) + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + } + return n +} + func sovIpldv2Pb(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozIpldv2Pb(x uint64) (n int) { return sovIpldv2Pb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *SampleID) Unmarshal(dAtA []byte) error { +func (m *ShareSampleID) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -435,10 +692,10 @@ func (m *SampleID) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SampleID: wiretype end group for non-group") + return fmt.Errorf("proto: ShareSampleID: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SampleID: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShareSampleID: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -568,7 +825,7 @@ func (m *SampleID) Unmarshal(dAtA []byte) error { } return nil } -func (m *Sample) Unmarshal(dAtA []byte) error { +func (m *ShareSample) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -591,10 +848,10 @@ func (m *Sample) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Sample: wiretype end group for non-group") + return fmt.Errorf("proto: ShareSample: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShareSample: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -627,7 +884,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Id == nil { - m.Id = &SampleID{} + m.Id = &ShareSampleID{} } if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -647,7 +904,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= SampleType(b&0x7F) << shift + m.Type |= ShareSampleType(b&0x7F) << shift if b < 0x80 { break } @@ -743,6 +1000,280 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } return nil } +func (m *AxisSampleID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AxisSampleID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AxisSampleID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) + if m.DataHash == nil { + m.DataHash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AxisHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AxisHash = append(m.AxisHash[:0], dAtA[iNdEx:postIndex]...) + if m.AxisHash == nil { + m.AxisHash = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Axis", wireType) + } + m.Axis = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Axis |= Axis(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipIpldv2Pb(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIpldv2Pb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AxisSample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AxisSample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AxisSample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Id == nil { + m.Id = &AxisSampleID{} + } + if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AxisHalf", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AxisHalf = append(m.AxisHalf, make([]byte, postIndex-iNdEx)) + copy(m.AxisHalf[len(m.AxisHalf)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIpldv2Pb(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIpldv2Pb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipIpldv2Pb(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/share/ipldv2/pb/ipldv2pb.proto b/share/ipldv2/pb/ipldv2pb.proto index df3444de20..c39127fa21 100644 --- a/share/ipldv2/pb/ipldv2pb.proto +++ b/share/ipldv2/pb/ipldv2pb.proto @@ -7,22 +7,33 @@ enum Axis { Col = 1; } -enum SampleType { +enum ShareSampleType { Data = 0; Parity = 1; } -message SampleID{ +message ShareSampleID{ bytes data_hash = 1; - bytes axis_hash = 2; + bytes axis_hash = 2; // TODO(@Wondertan): Redundant, but has to be sent due to Bitswap's stateless verification requirement uint32 index = 3; Axis axis = 4; } -message Sample { - SampleID id = 1; - - SampleType type = 2; +message ShareSample { + ShareSampleID id = 1; + ShareSampleType type = 2; bytes share = 3; proof.pb.Proof proof = 4; } + +message AxisSampleID{ + bytes data_hash = 1; + bytes axis_hash = 2; // TODO(@Wondertan): Redundant, but has to be sent due to Bitswap's stateless verification requirement + uint32 index = 3; // TODO(@Wondertan): uint16 would be enough, but proto3 doest not support it + Axis axis = 4; +} + +message AxisSample { + AxisSampleID id = 1; + repeated bytes axis_half = 2; +} diff --git a/share/ipldv2/sample_hasher.go b/share/ipldv2/sample_hasher.go deleted file mode 100644 index 61341283e7..0000000000 --- a/share/ipldv2/sample_hasher.go +++ /dev/null @@ -1,63 +0,0 @@ -package ipldv2 - -import ( - "crypto/sha256" - "fmt" - "hash" - - mh "github.com/multiformats/go-multihash" -) - -func init() { - // Register hasher for our multihash code - mh.Register(multihashCode, func() hash.Hash { - return &SampleHasher{} - }) -} - -// SampleHasher implements hash.Hash interface for Samples. -type SampleHasher struct { - sample Sample -} - -// Write expects a marshaled Sample to validate. -func (sh *SampleHasher) Write(data []byte) (int, error) { - if err := sh.sample.UnmarshalBinary(data); err != nil { - err = fmt.Errorf("while unmarshaling Sample: %w", err) - log.Error(err) - return 0, err - } - - if err := sh.sample.Validate(); err != nil { - err = fmt.Errorf("while validating Sample: %w", err) - log.Error(err) - return 0, err - } - - return len(data), nil -} - -// Sum returns the "multihash" of the SampleID. -func (sh *SampleHasher) Sum([]byte) []byte { - sum, err := sh.sample.ID.MarshalBinary() - if err != nil { - err = fmt.Errorf("while marshaling SampleID") - log.Error(err) - } - return sum -} - -// Reset resets the Hash to its initial state. -func (sh *SampleHasher) Reset() { - sh.sample = Sample{} -} - -// Size returns the number of bytes Sum will return. -func (sh *SampleHasher) Size() int { - return SampleIDSize -} - -// BlockSize returns the hash's underlying block size. -func (sh *SampleHasher) BlockSize() int { - return sha256.BlockSize -} diff --git a/share/ipldv2/sample.go b/share/ipldv2/share_sample.go similarity index 55% rename from share/ipldv2/sample.go rename to share/ipldv2/share_sample.go index e5bbf269b1..e7e4557170 100644 --- a/share/ipldv2/sample.go +++ b/share/ipldv2/share_sample.go @@ -16,37 +16,37 @@ import ( ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" ) -// SampleType represents type of sample. -type SampleType uint8 +// ShareSampleType represents type of sample. +type ShareSampleType uint8 const ( - // DataSample is a sample of a data share. - DataSample SampleType = iota - // ParitySample is a sample of a parity share. - ParitySample + // DataShareSample is a sample of a data share. + DataShareSample ShareSampleType = iota + // ParityShareSample is a sample of a parity share. + ParityShareSample ) -// Sample represents a sample of an NMT in EDS. -type Sample struct { - // ID of the Sample - ID SampleID - // Type of the Sample - Type SampleType +// ShareSample represents a sample of an NMT in EDS. +type ShareSample struct { + // ID of the ShareSample + ID ShareSampleID + // Type of the ShareSample + Type ShareSampleType // Proof of Share inclusion in the NMT Proof nmt.Proof // Share being sampled Share share.Share } -// NewSample constructs a new Sample. -func NewSample(id SampleID, shr share.Share, proof nmt.Proof, sqrLn int) *Sample { +// NewShareSample constructs a new ShareSample. +func NewShareSample(id ShareSampleID, shr share.Share, proof nmt.Proof, sqrLn int) *ShareSample { row, col := id.Index/sqrLn, id.Index%sqrLn - tp := ParitySample + tp := ParityShareSample if row < sqrLn/2 && col < sqrLn/2 { - tp = DataSample + tp = DataShareSample } - return &Sample{ + return &ShareSample{ ID: id, Type: tp, Proof: proof, @@ -54,14 +54,14 @@ func NewSample(id SampleID, shr share.Share, proof nmt.Proof, sqrLn int) *Sample } } -// NewSampleFrom constructs a new Sample from share.Root. -func NewSampleFrom(root *share.Root, idx int, axis rsmt2d.Axis, shr share.Share, proof nmt.Proof) *Sample { - id := NewSampleID(root, idx, axis) - return NewSample(id, shr, proof, len(root.RowRoots)) +// NewShareSampleFrom constructs a new ShareSample from share.Root. +func NewShareSampleFrom(root *share.Root, idx int, axis rsmt2d.Axis, shr share.Share, proof nmt.Proof) *ShareSample { + id := NewShareSampleID(root, idx, axis) + return NewShareSample(id, shr, proof, len(root.RowRoots)) } -// NewSampleFromEDS samples the EDS and constructs a new Sample. -func NewSampleFromEDS(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.Axis) (*Sample, error) { +// NewShareSampleFromEDS samples the EDS and constructs a new ShareSample. +func NewShareSampleFromEDS(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.Axis) (*ShareSample, error) { sqrLn := int(eds.Width()) axisIdx, shrIdx := idx/sqrLn, idx%sqrLn @@ -95,11 +95,11 @@ func NewSampleFromEDS(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.Axis) return nil, fmt.Errorf("while proving range share over NMT: %w", err) } - return NewSampleFrom(root, idx, axis, shrs[shrIdx], prf), nil + return NewShareSampleFrom(root, idx, axis, shrs[shrIdx], prf), nil } -// Proto converts Sample to its protobuf representation. -func (s *Sample) Proto() *ipldv2pb.Sample { +// Proto converts ShareSample to its protobuf representation. +func (s *ShareSample) Proto() *ipldv2pb.ShareSample { // TODO: Extract as helper to nmt proof := &nmtpb.Proof{} proof.Nodes = s.Proof.Nodes() @@ -108,31 +108,31 @@ func (s *Sample) Proto() *ipldv2pb.Sample { proof.IsMaxNamespaceIgnored = s.Proof.IsMaxNamespaceIDIgnored() proof.LeafHash = s.Proof.LeafHash() - return &ipldv2pb.Sample{ + return &ipldv2pb.ShareSample{ Id: s.ID.Proto(), - Type: ipldv2pb.SampleType(s.Type), + Type: ipldv2pb.ShareSampleType(s.Type), Proof: proof, Share: s.Share, } } -// SampleFromBlock converts blocks.Block into Sample. -func SampleFromBlock(blk blocks.Block) (*Sample, error) { +// ShareSampleFromBlock converts blocks.Block into ShareSample. +func ShareSampleFromBlock(blk blocks.Block) (*ShareSample, error) { if err := validateCID(blk.Cid()); err != nil { return nil, err } - s := &Sample{} + s := &ShareSample{} err := s.UnmarshalBinary(blk.RawData()) if err != nil { - return nil, fmt.Errorf("while unmarshalling Sample: %w", err) + return nil, fmt.Errorf("while unmarshalling ShareSample: %w", err) } return s, nil } -// IPLDBlock converts Sample to an IPLD block for Bitswap compatibility. -func (s *Sample) IPLDBlock() (blocks.Block, error) { +// IPLDBlock converts ShareSample to an IPLD block for Bitswap compatibility. +func (s *ShareSample) IPLDBlock() (blocks.Block, error) { cid, err := s.ID.Cid() if err != nil { return nil, err @@ -146,43 +146,42 @@ func (s *Sample) IPLDBlock() (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } -// MarshalBinary marshals Sample to binary. -func (s *Sample) MarshalBinary() ([]byte, error) { +// MarshalBinary marshals ShareSample to binary. +func (s *ShareSample) MarshalBinary() ([]byte, error) { return s.Proto().Marshal() } -// UnmarshalBinary unmarshals Sample from binary. -func (s *Sample) UnmarshalBinary(data []byte) error { - proto := &ipldv2pb.Sample{} +// UnmarshalBinary unmarshal ShareSample from binary. +func (s *ShareSample) UnmarshalBinary(data []byte) error { + proto := &ipldv2pb.ShareSample{} if err := proto.Unmarshal(data); err != nil { return err } - s.ID = SampleID{ + s.ID = ShareSampleID{ DataHash: proto.Id.DataHash, AxisHash: proto.Id.AxisHash, Index: int(proto.Id.Index), Axis: rsmt2d.Axis(proto.Id.Axis), } - s.Type = SampleType(proto.Type) + s.Type = ShareSampleType(proto.Type) s.Proof = nmt.ProtoToProof(*proto.Proof) s.Share = proto.Share return nil } -// Validate validates Sample's fields and proof of Share inclusion in the NMT. -func (s *Sample) Validate() error { +// Validate validates ShareSample's fields and proof of Share inclusion in the NMT. +func (s *ShareSample) Validate() error { if err := s.ID.Validate(); err != nil { return err } - if s.Type != DataSample && s.Type != ParitySample { + if s.Type != DataShareSample && s.Type != ParityShareSample { return fmt.Errorf("incorrect sample type: %d", s.Type) } - // TODO Support Col proofs namespace := share.ParitySharesNamespace - if s.Type == DataSample { + if s.Type == DataShareSample { namespace = share.GetNamespace(s.Share) } diff --git a/share/ipldv2/share_sample_hasher.go b/share/ipldv2/share_sample_hasher.go new file mode 100644 index 0000000000..98bb18db24 --- /dev/null +++ b/share/ipldv2/share_sample_hasher.go @@ -0,0 +1,53 @@ +package ipldv2 + +import ( + "crypto/sha256" + "fmt" +) + +// ShareSampleHasher implements hash.Hash interface for Samples. +type ShareSampleHasher struct { + sample ShareSample +} + +// Write expects a marshaled ShareSample to validate. +func (sh *ShareSampleHasher) Write(data []byte) (int, error) { + if err := sh.sample.UnmarshalBinary(data); err != nil { + err = fmt.Errorf("while unmarshaling ShareSample: %w", err) + log.Error(err) + return 0, err + } + + if err := sh.sample.Validate(); err != nil { + err = fmt.Errorf("while validating ShareSample: %w", err) + log.Error(err) + return 0, err + } + + return len(data), nil +} + +// Sum returns the "multihash" of the ShareSampleID. +func (sh *ShareSampleHasher) Sum([]byte) []byte { + sum, err := sh.sample.ID.MarshalBinary() + if err != nil { + err = fmt.Errorf("while marshaling ShareSampleID") + log.Error(err) + } + return sum +} + +// Reset resets the Hash to its initial state. +func (sh *ShareSampleHasher) Reset() { + sh.sample = ShareSample{} +} + +// Size returns the number of bytes Sum will return. +func (sh *ShareSampleHasher) Size() int { + return ShareSampleIDSize +} + +// BlockSize returns the hash's underlying block size. +func (sh *ShareSampleHasher) BlockSize() int { + return sha256.BlockSize +} diff --git a/share/ipldv2/share_sample_hasher_test.go b/share/ipldv2/share_sample_hasher_test.go new file mode 100644 index 0000000000..8b6462b163 --- /dev/null +++ b/share/ipldv2/share_sample_hasher_test.go @@ -0,0 +1,40 @@ +package ipldv2 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestShareSampleHasher(t *testing.T) { + hasher := &ShareSampleHasher{} + + _, err := hasher.Write([]byte("hello")) + assert.Error(t, err) + + square := edstest.RandEDS(t, 2) + + sample, err := NewShareSampleFromEDS(square, 2, rsmt2d.Row) + require.NoError(t, err) + + data, err := sample.MarshalBinary() + require.NoError(t, err) + + n, err := hasher.Write(data) + require.NoError(t, err) + assert.EqualValues(t, len(data), n) + + digest := hasher.Sum(nil) + sid, err := sample.ID.MarshalBinary() + require.NoError(t, err) + assert.EqualValues(t, sid, digest) + + hasher.Reset() + digest = hasher.Sum(nil) + assert.NotEqualValues(t, digest, sid) +} diff --git a/share/ipldv2/sample_id.go b/share/ipldv2/share_sample_id.go similarity index 62% rename from share/ipldv2/sample_id.go rename to share/ipldv2/share_sample_id.go index 3dc1b6040b..46293a16eb 100644 --- a/share/ipldv2/sample_id.go +++ b/share/ipldv2/share_sample_id.go @@ -1,7 +1,6 @@ package ipldv2 import ( - "crypto/sha256" "encoding/binary" "fmt" @@ -14,18 +13,11 @@ import ( ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" ) -// SampleIDSize is the size of the SampleID in bytes -const SampleIDSize = 127 +// ShareSampleIDSize is the size of the ShareSampleID in bytes +const ShareSampleIDSize = 127 -// TODO(@Wondertan): Eventually this should become configurable -const ( - hashSize = sha256.Size - dahRootSize = 2*share.NamespaceSize + hashSize - mhPrefixSize = 4 -) - -// SampleID is an unique identifier of a Sample. -type SampleID struct { +// ShareSampleID is an unique identifier of a ShareSample. +type ShareSampleID struct { // DataHash is the root of the data square // Needed to identify the data square in the whole chain DataHash share.DataHash @@ -37,8 +29,8 @@ type SampleID struct { Axis rsmt2d.Axis } -// NewSampleID constructs a new SampleID. -func NewSampleID(root *share.Root, idx int, axis rsmt2d.Axis) SampleID { +// NewShareSampleID constructs a new ShareSampleID. +func NewShareSampleID(root *share.Root, idx int, axis rsmt2d.Axis) ShareSampleID { sqrLn := len(root.RowRoots) row, col := idx/sqrLn, idx%sqrLn dahroot := root.RowRoots[row] @@ -46,7 +38,7 @@ func NewSampleID(root *share.Root, idx int, axis rsmt2d.Axis) SampleID { dahroot = root.ColumnRoots[col] } - return SampleID{ + return ShareSampleID{ DataHash: root.Hash(), AxisHash: dahroot, Index: idx, @@ -54,38 +46,38 @@ func NewSampleID(root *share.Root, idx int, axis rsmt2d.Axis) SampleID { } } -// SampleIDFromCID coverts CID to SampleID. -func SampleIDFromCID(cid cid.Cid) (id SampleID, err error) { +// ShareSampleIDFromCID coverts CID to ShareSampleID. +func ShareSampleIDFromCID(cid cid.Cid) (id ShareSampleID, err error) { if err = validateCID(cid); err != nil { return id, err } err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) if err != nil { - return id, fmt.Errorf("while unmarhalling SampleID: %w", err) + return id, fmt.Errorf("while unmarhalling ShareSampleID: %w", err) } return id, nil } // Cid returns sample ID encoded as CID. -func (s *SampleID) Cid() (cid.Cid, error) { +func (s *ShareSampleID) Cid() (cid.Cid, error) { data, err := s.MarshalBinary() if err != nil { return cid.Undef, err } - buf, err := mh.Encode(data, multihashCode) + buf, err := mh.Encode(data, shareSamplingMultihashCode) if err != nil { return cid.Undef, err } - return cid.NewCidV1(codec, buf), nil + return cid.NewCidV1(shareSamplingCodec, buf), nil } -// Proto converts SampleID to its protobuf representation. -func (s *SampleID) Proto() *ipldv2pb.SampleID { - return &ipldv2pb.SampleID{ +// Proto converts ShareSampleID to its protobuf representation. +func (s *ShareSampleID) Proto() *ipldv2pb.ShareSampleID { + return &ipldv2pb.ShareSampleID{ DataHash: s.DataHash, AxisHash: s.AxisHash, Index: uint32(s.Index), @@ -93,10 +85,10 @@ func (s *SampleID) Proto() *ipldv2pb.SampleID { } } -// MarshalBinary encodes SampleID into binary form. -func (s *SampleID) MarshalBinary() ([]byte, error) { +// MarshalBinary encodes ShareSampleID into binary form. +func (s *ShareSampleID) MarshalBinary() ([]byte, error) { // we cannot use protobuf here because it exceeds multihash limit of 128 bytes - data := make([]byte, SampleIDSize) + data := make([]byte, ShareSampleIDSize) n := copy(data, s.DataHash) n += copy(data[n:], s.AxisHash) binary.LittleEndian.PutUint32(data[n:], uint32(s.Index)) @@ -104,10 +96,10 @@ func (s *SampleID) MarshalBinary() ([]byte, error) { return data, nil } -// UnmarshalBinary decodes SampleID from binary form. -func (s *SampleID) UnmarshalBinary(data []byte) error { - if len(data) != SampleIDSize { - return fmt.Errorf("incorrect sample id size: %d != %d", len(data), SampleIDSize) +// UnmarshalBinary decodes ShareSampleID from binary form. +func (s *ShareSampleID) UnmarshalBinary(data []byte) error { + if len(data) != ShareSampleIDSize { + return fmt.Errorf("incorrect SampleID size: %d != %d", len(data), ShareSampleIDSize) } // copying data to avoid slice aliasing @@ -118,8 +110,8 @@ func (s *SampleID) UnmarshalBinary(data []byte) error { return nil } -// Validate validates fields of SampleID. -func (s *SampleID) Validate() error { +// Validate validates fields of ShareSampleID. +func (s *ShareSampleID) Validate() error { if len(s.DataHash) != hashSize { return fmt.Errorf("incorrect DataHash size: %d != %d", len(s.DataHash), hashSize) } diff --git a/share/ipldv2/share_sample_id_test.go b/share/ipldv2/share_sample_id_test.go new file mode 100644 index 0000000000..dc015cd72b --- /dev/null +++ b/share/ipldv2/share_sample_id_test.go @@ -0,0 +1,39 @@ +package ipldv2 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestShareSampleID(t *testing.T) { + square := edstest.RandEDS(t, 2) + root, err := share.NewRoot(square) + require.NoError(t, err) + + sid := NewShareSampleID(root, 2, rsmt2d.Row) + + id, err := sid.Cid() + require.NoError(t, err) + + assert.EqualValues(t, shareSamplingCodec, id.Prefix().Codec) + assert.EqualValues(t, shareSamplingMultihashCode, id.Prefix().MhType) + assert.EqualValues(t, ShareSampleIDSize, id.Prefix().MhLength) + + data, err := sid.MarshalBinary() + require.NoError(t, err) + + sidOut := ShareSampleID{} + err = sidOut.UnmarshalBinary(data) + require.NoError(t, err) + assert.EqualValues(t, sid, sidOut) + + err = sidOut.Validate() + require.NoError(t, err) +} diff --git a/share/ipldv2/sample_test.go b/share/ipldv2/share_sample_test.go similarity index 83% rename from share/ipldv2/sample_test.go rename to share/ipldv2/share_sample_test.go index 878fca8c17..b83879d72c 100644 --- a/share/ipldv2/sample_test.go +++ b/share/ipldv2/share_sample_test.go @@ -11,10 +11,10 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -func TestSample(t *testing.T) { +func TestShareSample(t *testing.T) { square := edstest.RandEDS(t, 2) - sid, err := NewSampleFromEDS(square, 2, rsmt2d.Row) + sid, err := NewShareSampleFromEDS(square, 2, rsmt2d.Row) require.NoError(t, err) data, err := sid.MarshalBinary() @@ -27,7 +27,7 @@ func TestSample(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, blk.Cid(), cid) - sidOut := &Sample{} + sidOut := &ShareSample{} err = sidOut.UnmarshalBinary(data) require.NoError(t, err) assert.EqualValues(t, sid, sidOut) From 041ed3d27b77e9055ae0312c15e4f3dc67b35e18 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sun, 1 Oct 2023 13:58:29 +0200 Subject: [PATCH 012/132] introduce File interface and decouple ipldv2 tests from on disk file --- share/eds/file.go | 49 +++++++++++----------- share/eds/file_store.go | 2 +- share/eds/file_test.go | 16 ------- share/eds/ods_file.go | 74 +++++++++++++++++++++++++++++++++ share/ipldv2/axis_sample.go | 2 +- share/ipldv2/blockstore.go | 20 ++------- share/ipldv2/blockstore_test.go | 33 +++++---------- share/ipldv2/ipldv2_test.go | 38 ++++------------- 8 files changed, 122 insertions(+), 112 deletions(-) create mode 100644 share/eds/ods_file.go diff --git a/share/eds/file.go b/share/eds/file.go index 69d7a4bc37..f353b13002 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -14,6 +14,15 @@ import ( "github.com/celestiaorg/celestia-node/share" ) +type File interface { + io.Closer + Size() int + ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) + Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) + AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) + EDS() (*rsmt2d.ExtendedDataSquare, error) +} + type FileConfig struct { Version FileVersion Compression FileCompression @@ -23,13 +32,13 @@ type FileConfig struct { // TODO: Add codec } -// File +// LazyFile // * immutable // * versionable // TODO: // - Cache Rows and Cols // - Avoid storing constant shares, like padding -type File struct { +type LazyFile struct { path string hdr *Header fl fileBackend @@ -40,7 +49,7 @@ type fileBackend interface { io.Closer } -func OpenFile(path string) (*File, error) { +func OpenFile(path string) (*LazyFile, error) { f, err := mmap.Open(path) if err != nil { return nil, err @@ -52,14 +61,14 @@ func OpenFile(path string) (*File, error) { } // TODO(WWondertan): Validate header - return &File{ + return &LazyFile{ path: path, hdr: h, fl: f, }, nil } -func CreateFile(path string, eds *rsmt2d.ExtendedDataSquare, cfgs ...FileConfig) (*File, error) { +func CreateFile(path string, eds *rsmt2d.ExtendedDataSquare, cfgs ...FileConfig) (*LazyFile, error) { f, err := os.Create(path) if err != nil { return nil, err @@ -94,22 +103,26 @@ func CreateFile(path string, eds *rsmt2d.ExtendedDataSquare, cfgs ...FileConfig) } } - return &File{ + return &LazyFile{ path: path, fl: f, hdr: h, }, f.Sync() } -func (f *File) Close() error { +func (f *LazyFile) Size() int { + return f.hdr.SquareSize() +} + +func (f *LazyFile) Close() error { return f.fl.Close() } -func (f *File) Header() *Header { +func (f *LazyFile) Header() *Header { return f.hdr } -func (f *File) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { +func (f *LazyFile) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { shrLn := int(f.hdr.shareSize) sqrLn := int(f.hdr.squareSize) if f.Header().Config().Mode == ODSMode { @@ -156,7 +169,7 @@ func (f *File) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { return shrs, nil } -func (f *File) AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) { +func (f *LazyFile) AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) { // TODO(@Wondertan): this has to read directly from the file, avoiding recompute fullAxis, err := f.Axis(idx, axis) if err != nil { @@ -166,19 +179,7 @@ func (f *File) AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) { return fullAxis[:len(fullAxis)/2], nil } -func (f *File) Share(idx int) (share.Share, error) { - // TODO: Check the cache first - shrLn := int64(f.hdr.shareSize) - - offset := int64(idx)*shrLn + HeaderSize - shr := make(share.Share, shrLn) - if _, err := f.fl.ReadAt(shr, offset); err != nil { - return nil, err - } - return shr, nil -} - -func (f *File) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) { +func (f *LazyFile) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) { // TODO: Cache the axis as well as computed tree sqrLn := int(f.hdr.squareSize) axsIdx, shrIdx := idx/sqrLn, idx%sqrLn @@ -207,7 +208,7 @@ func (f *File) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof return shrs[shrIdx], proof, nil } -func (f *File) EDS() (*rsmt2d.ExtendedDataSquare, error) { +func (f *LazyFile) EDS() (*rsmt2d.ExtendedDataSquare, error) { shrLn := int(f.hdr.shareSize) sqrLn := int(f.hdr.squareSize) if f.Header().Config().Mode == ODSMode { diff --git a/share/eds/file_store.go b/share/eds/file_store.go index efbf968fed..d580bdc43d 100644 --- a/share/eds/file_store.go +++ b/share/eds/file_store.go @@ -6,7 +6,7 @@ type FileStore struct { baspath string } -func (fs *FileStore) File(hash share.DataHash) (*File, error) { +func (fs *FileStore) File(hash share.DataHash) (File, error) { // TODO(@Wondertan): Caching return OpenFile(fs.baspath + "/" + hash.String()) } diff --git a/share/eds/file_test.go b/share/eds/file_test.go index 901158f030..53e1bad8c8 100644 --- a/share/eds/file_test.go +++ b/share/eds/file_test.go @@ -54,10 +54,6 @@ func TestFile(t *testing.T) { for _, axis := range axis { for i := 0; i < width*width; i++ { row, col := uint(i/width), uint(i%width) - shr, err := fl.Share(i) - require.NoError(t, err) - assert.EqualValues(t, eds.GetCell(row, col), shr) - shr, prf, err := fl.ShareWithProof(i, axis) require.NoError(t, err) assert.EqualValues(t, eds.GetCell(row, col), shr) @@ -84,15 +80,3 @@ func TestFile(t *testing.T) { err = fl.Close() require.NoError(t, err) } - -// TODO(@Wondertan): Should be a method on eds -func getAxis(idx int, axis rsmt2d.Axis, eds *rsmt2d.ExtendedDataSquare) [][]byte { - switch axis { - case rsmt2d.Row: - return eds.Row(uint(idx)) - case rsmt2d.Col: - return eds.Col(uint(idx)) - default: - panic("") - } -} diff --git a/share/eds/ods_file.go b/share/eds/ods_file.go new file mode 100644 index 0000000000..6930174cc1 --- /dev/null +++ b/share/eds/ods_file.go @@ -0,0 +1,74 @@ +package eds + +import ( + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" +) + +type MemFile struct { + Eds *rsmt2d.ExtendedDataSquare +} + +func (f *MemFile) Close() error { + return nil +} + +func (f *MemFile) Size() int { + return int(f.Eds.Width()) +} + +func (f *MemFile) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) { + sqrLn := f.Size() + axsIdx, shrIdx := idx/sqrLn, idx%sqrLn + if axis == rsmt2d.Col { + axsIdx, shrIdx = shrIdx, axsIdx + } + + shrs, err := f.Axis(axsIdx, axis) + if err != nil { + return nil, nmt.Proof{}, err + } + + // TODO(@Wondartan): this must access cached NMT on EDS instead of computing a new one + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axsIdx)) + for _, shr := range shrs { + err = tree.Push(shr) + if err != nil { + return nil, nmt.Proof{}, err + } + } + + proof, err := tree.ProveRange(shrIdx, shrIdx+1) + if err != nil { + return nil, nmt.Proof{}, err + } + + return shrs[shrIdx], proof, nil +} + +func (f *MemFile) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { + return getAxis(idx, axis, f.Eds), nil +} + +func (f *MemFile) AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) { + return getAxis(idx, axis, f.Eds)[:f.Size()/2], nil +} + +func (f *MemFile) EDS() (*rsmt2d.ExtendedDataSquare, error) { + return f.Eds, nil +} + +// TODO(@Wondertan): Should be a method on eds +func getAxis(idx int, axis rsmt2d.Axis, eds *rsmt2d.ExtendedDataSquare) [][]byte { + switch axis { + case rsmt2d.Row: + return eds.Row(uint(idx)) + case rsmt2d.Col: + return eds.Col(uint(idx)) + default: + panic("unknown axis") + } +} diff --git a/share/ipldv2/axis_sample.go b/share/ipldv2/axis_sample.go index b9379278ce..75bed9b0c1 100644 --- a/share/ipldv2/axis_sample.go +++ b/share/ipldv2/axis_sample.go @@ -134,7 +134,7 @@ func (s *AxisSample) Validate() error { } s.AxisHalf = append(s.AxisHalf, parity...) - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(s.AxisHalf)), uint(s.ID.Index)) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(s.AxisHalf)/2), uint(s.ID.Index)) for _, shr := range s.AxisHalf { err := tree.Push(shr) if err != nil { diff --git a/share/ipldv2/blockstore.go b/share/ipldv2/blockstore.go index a6c0f4e46b..6ac55b358b 100644 --- a/share/ipldv2/blockstore.go +++ b/share/ipldv2/blockstore.go @@ -3,38 +3,26 @@ package ipldv2 import ( "context" "fmt" - "io" "github.com/ipfs/boxo/blockstore" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" ) -// edsFile is a mocking friendly local interface over eds.File. -// TODO(@Wondertan): Consider making an actual interface of eds pkg -type edsFile interface { - io.Closer - Size() int - ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) - AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) -} - // fileStore is a mocking friendly local interface over eds.FileStore // TODO(@Wondertan): Consider making an actual interface of eds pkg -type fileStore[F edsFile] interface { +type fileStore[F eds.File] interface { File(share.DataHash) (F, error) } -type Blockstore[F edsFile] struct { +type Blockstore[F eds.File] struct { fs fileStore[F] } -func NewBlockstore[F edsFile](fs fileStore[F]) blockstore.Blockstore { +func NewBlockstore[F eds.File](fs fileStore[F]) blockstore.Blockstore { return &Blockstore[F]{fs} } diff --git a/share/ipldv2/blockstore_test.go b/share/ipldv2/blockstore_test.go index 044378aa4e..aef1c7e5b6 100644 --- a/share/ipldv2/blockstore_test.go +++ b/share/ipldv2/blockstore_test.go @@ -4,10 +4,10 @@ import ( "context" "testing" + "github.com/ipfs/boxo/blockstore" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" @@ -15,16 +15,15 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -func TestBlockstoreGet(t *testing.T) { +// TODO(@Wondertan): Add axis sampling code + +func TestBlockstoreGetShareSample(t *testing.T) { ctx := context.Background() sqr := edstest.RandEDS(t, 4) root, err := share.NewRoot(sqr) require.NoError(t, err) - path := t.TempDir() + "/eds_file" - f, err := eds.CreateFile(path, sqr) - require.NoError(t, err) - b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) + b := edsBlockstore(sqr) axis := []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} width := int(sqr.Width()) @@ -47,24 +46,12 @@ func TestBlockstoreGet(t *testing.T) { } } -type edsFileAndFS eds.File - -func (m *edsFileAndFS) File(share.DataHash) (*edsFileAndFS, error) { - return m, nil -} - -func (m *edsFileAndFS) Size() int { - return (*eds.File)(m).Header().SquareSize() -} - -func (m *edsFileAndFS) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) { - return (*eds.File)(m).ShareWithProof(idx, axis) -} +type edsFileAndFS eds.MemFile -func (m *edsFileAndFS) AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) { - return (*eds.File)(m).AxisHalf(idx, axis) +func (m *edsFileAndFS) File(share.DataHash) (*eds.MemFile, error) { + return (*eds.MemFile)(m), nil } -func (m *edsFileAndFS) Close() error { - return nil +func edsBlockstore(sqr *rsmt2d.ExtendedDataSquare) blockstore.Blockstore { + return NewBlockstore[*eds.MemFile]((*edsFileAndFS)(&eds.MemFile{Eds: sqr})) } diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index d32fe39e61..e2fc8d95c8 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -20,7 +20,6 @@ import ( "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" ) @@ -31,13 +30,7 @@ func TestShareSampleRoundtripGetBlock(t *testing.T) { defer cancel() sqr := edstest.RandEDS(t, 8) - - path := t.TempDir() + "/eds_file" - f, err := eds.CreateFile(path, sqr) - require.NoError(t, err) - defer f.Close() - - b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) + b := edsBlockstore(sqr) client := remoteClient(ctx, t, b) axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} @@ -68,13 +61,7 @@ func TestShareSampleRoundtripGetBlocks(t *testing.T) { defer cancel() sqr := edstest.RandEDS(t, 8) // TODO(@Wondertan): does not work with more than 8 for some reasong - - path := t.TempDir() + "/eds_file" - f, err := eds.CreateFile(path, sqr) - require.NoError(t, err) - defer f.Close() - - b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) + b := edsBlockstore(sqr) client := remoteClient(ctx, t, b) set := cid.NewSet() @@ -93,7 +80,7 @@ func TestShareSampleRoundtripGetBlocks(t *testing.T) { } blks := client.GetBlocks(ctx, set.Keys()) - err = set.ForEach(func(c cid.Cid) error { + err := set.ForEach(func(c cid.Cid) error { select { case blk := <-blks: assert.True(t, set.Has(blk.Cid())) @@ -112,17 +99,11 @@ func TestShareSampleRoundtripGetBlocks(t *testing.T) { } func TestAxisSampleRoundtripGetBlock(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10000) defer cancel() sqr := edstest.RandEDS(t, 8) - - path := t.TempDir() + "/eds_file" - f, err := eds.CreateFile(path, sqr) - require.NoError(t, err) - defer f.Close() - - b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) + b := edsBlockstore(sqr) client := remoteClient(ctx, t, b) axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} @@ -153,12 +134,7 @@ func TestAxisSampleRoundtripGetBlocks(t *testing.T) { defer cancel() sqr := edstest.RandEDS(t, 16) - - path := t.TempDir() + "/eds_file" - f, err := eds.CreateFile(path, sqr) - require.NoError(t, err) - - b := NewBlockstore[*edsFileAndFS]((*edsFileAndFS)(f)) + b := edsBlockstore(sqr) client := remoteClient(ctx, t, b) set := cid.NewSet() @@ -177,7 +153,7 @@ func TestAxisSampleRoundtripGetBlocks(t *testing.T) { } blks := client.GetBlocks(ctx, set.Keys()) - err = set.ForEach(func(c cid.Cid) error { + err := set.ForEach(func(c cid.Cid) error { select { case blk := <-blks: assert.True(t, set.Has(blk.Cid())) From 16014600b7874cc29ff523fc768d89a410a0d555 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Thu, 19 Oct 2023 22:16:54 +0200 Subject: [PATCH 013/132] use height as block id --- share/ipldv2/axis_sample.go | 18 +-- share/ipldv2/axis_sample_hasher_test.go | 2 +- share/ipldv2/axis_sample_id.go | 45 ++++---- share/ipldv2/axis_sample_id_test.go | 2 +- share/ipldv2/axis_sample_test.go | 2 +- share/ipldv2/blockstore.go | 15 ++- share/ipldv2/blockstore_test.go | 4 +- share/ipldv2/ipldv2_test.go | 8 +- share/ipldv2/pb/ipldv2pb.pb.go | 134 +++++++++-------------- share/ipldv2/pb/ipldv2pb.proto | 4 +- share/ipldv2/share_sample.go | 18 +-- share/ipldv2/share_sample_hasher_test.go | 2 +- share/ipldv2/share_sample_id.go | 41 ++++--- share/ipldv2/share_sample_id_test.go | 2 +- share/ipldv2/share_sample_test.go | 2 +- 15 files changed, 130 insertions(+), 169 deletions(-) diff --git a/share/ipldv2/axis_sample.go b/share/ipldv2/axis_sample.go index 75bed9b0c1..9903a0af21 100644 --- a/share/ipldv2/axis_sample.go +++ b/share/ipldv2/axis_sample.go @@ -26,14 +26,13 @@ func NewAxisSample(id AxisSampleID, axisHalf []share.Share) *AxisSample { } } -// NewAxisSampleFrom constructs a new AxisSample from share.Root. -func NewAxisSampleFrom(root *share.Root, idx int, axis rsmt2d.Axis, axisHalf []share.Share) *AxisSample { - id := NewAxisSampleID(root, idx, axis) - return NewAxisSample(id, axisHalf) -} - // NewAxisSampleFromEDS samples the EDS and constructs a new AxisSample. -func NewAxisSampleFromEDS(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.Axis) (*AxisSample, error) { +func NewAxisSampleFromEDS( + height uint64, + eds *rsmt2d.ExtendedDataSquare, + idx int, + axis rsmt2d.Axis, +) (*AxisSample, error) { sqrLn := int(eds.Width()) // TODO(@Wondertan): Should be an rsmt2d method @@ -52,7 +51,8 @@ func NewAxisSampleFromEDS(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.A return nil, fmt.Errorf("while computing root: %w", err) } - return NewAxisSampleFrom(root, idx, axis, axisHalf), nil + id := NewAxisSampleID(height, root, idx, axis) + return NewAxisSample(id, axisHalf), nil } // Proto converts AxisSample to its protobuf representation. @@ -106,7 +106,7 @@ func (s *AxisSample) UnmarshalBinary(data []byte) error { } s.ID = AxisSampleID{ - DataHash: proto.Id.DataHash, + Height: proto.Id.Height, AxisHash: proto.Id.AxisHash, Index: int(proto.Id.Index), Axis: rsmt2d.Axis(proto.Id.Axis), diff --git a/share/ipldv2/axis_sample_hasher_test.go b/share/ipldv2/axis_sample_hasher_test.go index c9d54353f1..d41c46d66a 100644 --- a/share/ipldv2/axis_sample_hasher_test.go +++ b/share/ipldv2/axis_sample_hasher_test.go @@ -19,7 +19,7 @@ func TestAxisSampleHasher(t *testing.T) { square := edstest.RandEDS(t, 2) - sample, err := NewAxisSampleFromEDS(square, 2, rsmt2d.Row) + sample, err := NewAxisSampleFromEDS(1, square, 2, rsmt2d.Row) require.NoError(t, err) data, err := sample.MarshalBinary() diff --git a/share/ipldv2/axis_sample_id.go b/share/ipldv2/axis_sample_id.go index 4b1e444e73..d10ba4cf41 100644 --- a/share/ipldv2/axis_sample_id.go +++ b/share/ipldv2/axis_sample_id.go @@ -14,30 +14,30 @@ import ( ) // AxisSampleIDSize is the size of the AxisSampleID in bytes -const AxisSampleIDSize = 127 +const AxisSampleIDSize = 103 // AxisSampleID is an unique identifier of a AxisSample. type AxisSampleID struct { - // DataHash is the root of the data square - // Needed to identify the data square in the whole chain - DataHash share.DataHash + // Height of the block. + // Needed to identify block's data square in the whole chain + Height uint64 // AxisHash is the Col or AxisSample root from DAH of the data square AxisHash []byte - // Index is the index of the sample in the data square(not row or col index) + // Index is the index of the axis(row, col) in the data square Index int // Axis is Col or AxisSample axis of the sample in the data square Axis rsmt2d.Axis } // NewAxisSampleID constructs a new AxisSampleID. -func NewAxisSampleID(root *share.Root, idx int, axis rsmt2d.Axis) AxisSampleID { +func NewAxisSampleID(height uint64, root *share.Root, idx int, axis rsmt2d.Axis) AxisSampleID { dahroot := root.RowRoots[idx] if axis == rsmt2d.Col { dahroot = root.ColumnRoots[idx] } return AxisSampleID{ - DataHash: root.Hash(), + Height: height, AxisHash: dahroot, Index: idx, Axis: axis, @@ -76,7 +76,7 @@ func (s *AxisSampleID) Cid() (cid.Cid, error) { // Proto converts AxisSampleID to its protobuf representation. func (s *AxisSampleID) Proto() *ipldv2pb.AxisSampleID { return &ipldv2pb.AxisSampleID{ - DataHash: s.DataHash, + Height: s.Height, AxisHash: s.AxisHash, Index: uint32(s.Index), Axis: ipldv2pb.Axis(s.Axis), @@ -86,36 +86,35 @@ func (s *AxisSampleID) Proto() *ipldv2pb.AxisSampleID { // MarshalBinary encodes AxisSampleID into binary form. func (s *AxisSampleID) MarshalBinary() ([]byte, error) { // we cannot use protobuf here because it exceeds multihash limit of 128 bytes - data := make([]byte, ShareSampleIDSize) - n := copy(data, s.DataHash) - n += copy(data[n:], s.AxisHash) - binary.LittleEndian.PutUint32(data[n:], uint32(s.Index)) - data[n+4] = byte(s.Axis) + data := make([]byte, 0, AxisSampleIDSize) + data = binary.LittleEndian.AppendUint64(data, s.Height) + data = append(data, s.AxisHash...) + data = binary.LittleEndian.AppendUint32(data, uint32(s.Index)) + data = append(data, byte(s.Axis)) return data, nil } // UnmarshalBinary decodes AxisSampleID from binary form. func (s *AxisSampleID) UnmarshalBinary(data []byte) error { - if len(data) != ShareSampleIDSize { - return fmt.Errorf("incorrect sample id size: %d != %d", len(data), ShareSampleIDSize) + if len(data) != AxisSampleIDSize { + return fmt.Errorf("incorrect sample id size: %d != %d", len(data), AxisSampleIDSize) } - // copying data to avoid slice aliasing - s.DataHash = append(s.DataHash, data[:hashSize]...) - s.AxisHash = append(s.AxisHash, data[hashSize:hashSize+dahRootSize]...) - s.Index = int(binary.LittleEndian.Uint32(data[hashSize+dahRootSize : hashSize+dahRootSize+4])) - s.Axis = rsmt2d.Axis(data[hashSize+dahRootSize+4]) + s.Height = binary.LittleEndian.Uint64(data) + s.AxisHash = append(s.AxisHash, data[8:8+dahRootSize]...) // copying data to avoid slice aliasing + s.Index = int(binary.LittleEndian.Uint32(data[8+dahRootSize : 8+dahRootSize+4])) + s.Axis = rsmt2d.Axis(data[8+dahRootSize+4]) return nil } // Validate validates fields of AxisSampleID. func (s *AxisSampleID) Validate() error { - if len(s.DataHash) != hashSize { - return fmt.Errorf("incorrect DataHash size: %d != %d", len(s.DataHash), hashSize) + if s.Height == 0 { + return fmt.Errorf("zero Height") } if len(s.AxisHash) != dahRootSize { - return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), hashSize) + return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), dahRootSize) } if s.Axis != rsmt2d.Col && s.Axis != rsmt2d.Row { diff --git a/share/ipldv2/axis_sample_id_test.go b/share/ipldv2/axis_sample_id_test.go index a520f636ec..0cf0baff7a 100644 --- a/share/ipldv2/axis_sample_id_test.go +++ b/share/ipldv2/axis_sample_id_test.go @@ -17,7 +17,7 @@ func TestAxisSampleID(t *testing.T) { root, err := share.NewRoot(square) require.NoError(t, err) - sid := NewAxisSampleID(root, 2, rsmt2d.Row) + sid := NewAxisSampleID(1, root, 2, rsmt2d.Row) id, err := sid.Cid() require.NoError(t, err) diff --git a/share/ipldv2/axis_sample_test.go b/share/ipldv2/axis_sample_test.go index 7ed017eaee..c7b777e73c 100644 --- a/share/ipldv2/axis_sample_test.go +++ b/share/ipldv2/axis_sample_test.go @@ -14,7 +14,7 @@ import ( func TestAxisSample(t *testing.T) { square := edstest.RandEDS(t, 2) - aid, err := NewAxisSampleFromEDS(square, 2, rsmt2d.Row) + aid, err := NewAxisSampleFromEDS(1, square, 2, rsmt2d.Row) require.NoError(t, err) data, err := aid.MarshalBinary() diff --git a/share/ipldv2/blockstore.go b/share/ipldv2/blockstore.go index 6ac55b358b..ae7104b071 100644 --- a/share/ipldv2/blockstore.go +++ b/share/ipldv2/blockstore.go @@ -8,14 +8,13 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" ) // fileStore is a mocking friendly local interface over eds.FileStore // TODO(@Wondertan): Consider making an actual interface of eds pkg type fileStore[F eds.File] interface { - File(share.DataHash) (F, error) + File(height uint64) (F, error) } type Blockstore[F eds.File] struct { @@ -64,7 +63,7 @@ func (b Blockstore[F]) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) } func (b Blockstore[F]) getShareSampleBlock(id ShareSampleID) (blocks.Block, error) { - f, err := b.fs.File(id.DataHash) + f, err := b.fs.File(id.Height) if err != nil { return nil, fmt.Errorf("while getting EDS file from FS: %w", err) } @@ -89,7 +88,7 @@ func (b Blockstore[F]) getShareSampleBlock(id ShareSampleID) (blocks.Block, erro } func (b Blockstore[F]) getAxisSampleBlock(id AxisSampleID) (blocks.Block, error) { - f, err := b.fs.File(id.DataHash) + f, err := b.fs.File(id.Height) if err != nil { return nil, fmt.Errorf("while getting EDS file from FS: %w", err) } @@ -127,7 +126,7 @@ func (b Blockstore[F]) GetSize(ctx context.Context, cid cid.Cid) (int, error) { } func (b Blockstore[F]) Has(_ context.Context, cid cid.Cid) (bool, error) { - var datahash share.DataHash + var height uint64 switch cid.Type() { case shareSamplingCodec: id, err := ShareSampleIDFromCID(cid) @@ -137,7 +136,7 @@ func (b Blockstore[F]) Has(_ context.Context, cid cid.Cid) (bool, error) { return false, err } - datahash = id.DataHash + height = id.Height case axisSamplingCodec: id, err := AxisSampleIDFromCID(cid) if err != nil { @@ -146,12 +145,12 @@ func (b Blockstore[F]) Has(_ context.Context, cid cid.Cid) (bool, error) { return false, err } - datahash = id.DataHash + height = id.Height default: return false, fmt.Errorf("unsupported codec") } - f, err := b.fs.File(datahash) + f, err := b.fs.File(height) if err != nil { err = fmt.Errorf("while getting EDS file from FS: %w", err) log.Error(err) diff --git a/share/ipldv2/blockstore_test.go b/share/ipldv2/blockstore_test.go index aef1c7e5b6..950d113490 100644 --- a/share/ipldv2/blockstore_test.go +++ b/share/ipldv2/blockstore_test.go @@ -29,7 +29,7 @@ func TestBlockstoreGetShareSample(t *testing.T) { width := int(sqr.Width()) for _, axis := range axis { for i := 0; i < width*width; i++ { - id := NewShareSampleID(root, i, axis) + id := NewShareSampleID(1, root, i, axis) cid, err := id.Cid() require.NoError(t, err) @@ -48,7 +48,7 @@ func TestBlockstoreGetShareSample(t *testing.T) { type edsFileAndFS eds.MemFile -func (m *edsFileAndFS) File(share.DataHash) (*eds.MemFile, error) { +func (m *edsFileAndFS) File(uint64) (*eds.MemFile, error) { return (*eds.MemFile)(m), nil } diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index e2fc8d95c8..4e0a90f1ec 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -37,7 +37,7 @@ func TestShareSampleRoundtripGetBlock(t *testing.T) { width := int(sqr.Width()) for _, axis := range axis { for i := 0; i < width*width; i++ { - smpl, err := NewShareSampleFromEDS(sqr, i, axis) + smpl, err := NewShareSampleFromEDS(1, sqr, i, axis) require.NoError(t, err) cid, err := smpl.ID.Cid() @@ -69,7 +69,7 @@ func TestShareSampleRoundtripGetBlocks(t *testing.T) { width := int(sqr.Width()) for _, axis := range axis { for i := 0; i < width*width; i++ { - smpl, err := NewShareSampleFromEDS(sqr, i, axis) + smpl, err := NewShareSampleFromEDS(1, sqr, i, axis) require.NoError(t, err) cid, err := smpl.ID.Cid() @@ -110,7 +110,7 @@ func TestAxisSampleRoundtripGetBlock(t *testing.T) { width := int(sqr.Width()) for _, axis := range axis { for i := 0; i < width; i++ { - smpl, err := NewAxisSampleFromEDS(sqr, i, axis) + smpl, err := NewAxisSampleFromEDS(1, sqr, i, axis) require.NoError(t, err) cid, err := smpl.ID.Cid() @@ -142,7 +142,7 @@ func TestAxisSampleRoundtripGetBlocks(t *testing.T) { width := int(sqr.Width()) for _, axis := range axis { for i := 0; i < width; i++ { - smpl, err := NewAxisSampleFromEDS(sqr, i, axis) + smpl, err := NewAxisSampleFromEDS(1, sqr, i, axis) require.NoError(t, err) cid, err := smpl.ID.Cid() diff --git a/share/ipldv2/pb/ipldv2pb.pb.go b/share/ipldv2/pb/ipldv2pb.pb.go index 9af1d1f9b4..cf10ca06d8 100644 --- a/share/ipldv2/pb/ipldv2pb.pb.go +++ b/share/ipldv2/pb/ipldv2pb.pb.go @@ -74,7 +74,7 @@ func (ShareSampleType) EnumDescriptor() ([]byte, []int) { } type ShareSampleID struct { - DataHash []byte `protobuf:"bytes,1,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` AxisHash []byte `protobuf:"bytes,2,opt,name=axis_hash,json=axisHash,proto3" json:"axis_hash,omitempty"` Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` Axis Axis `protobuf:"varint,4,opt,name=axis,proto3,enum=Axis" json:"axis,omitempty"` @@ -113,11 +113,11 @@ func (m *ShareSampleID) XXX_DiscardUnknown() { var xxx_messageInfo_ShareSampleID proto.InternalMessageInfo -func (m *ShareSampleID) GetDataHash() []byte { +func (m *ShareSampleID) GetHeight() uint64 { if m != nil { - return m.DataHash + return m.Height } - return nil + return 0 } func (m *ShareSampleID) GetAxisHash() []byte { @@ -210,7 +210,7 @@ func (m *ShareSample) GetProof() *pb.Proof { } type AxisSampleID struct { - DataHash []byte `protobuf:"bytes,1,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` AxisHash []byte `protobuf:"bytes,2,opt,name=axis_hash,json=axisHash,proto3" json:"axis_hash,omitempty"` Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` Axis Axis `protobuf:"varint,4,opt,name=axis,proto3,enum=Axis" json:"axis,omitempty"` @@ -249,11 +249,11 @@ func (m *AxisSampleID) XXX_DiscardUnknown() { var xxx_messageInfo_AxisSampleID proto.InternalMessageInfo -func (m *AxisSampleID) GetDataHash() []byte { +func (m *AxisSampleID) GetHeight() uint64 { if m != nil { - return m.DataHash + return m.Height } - return nil + return 0 } func (m *AxisSampleID) GetAxisHash() []byte { @@ -341,29 +341,29 @@ func init() { func init() { proto.RegisterFile("share/ipldv2/pb/ipldv2pb.proto", fileDescriptor_cb41c3a4f982a271) } var fileDescriptor_cb41c3a4f982a271 = []byte{ - // 347 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x92, 0xc1, 0x4e, 0xfa, 0x40, - 0x10, 0xc6, 0xbb, 0xa5, 0xf0, 0xe7, 0x3f, 0x40, 0x69, 0x36, 0x1e, 0xaa, 0xc6, 0x4d, 0x43, 0x34, - 0x12, 0x0e, 0x25, 0xa9, 0x4f, 0xa0, 0x72, 0xc0, 0x1b, 0x59, 0xbc, 0x9b, 0x6d, 0x5a, 0xd2, 0x4d, - 0xaa, 0xbb, 0x69, 0x1b, 0xa5, 0x3e, 0x05, 0x8f, 0xe5, 0x91, 0xa3, 0x47, 0x03, 0x2f, 0x62, 0x76, - 0x97, 0x84, 0xea, 0x0b, 0x78, 0x9b, 0x99, 0x6f, 0x26, 0xdf, 0xb7, 0xbf, 0x2c, 0x90, 0x32, 0x63, - 0x45, 0x3a, 0xe5, 0x32, 0x4f, 0x5e, 0xa3, 0xa9, 0x8c, 0x0f, 0x95, 0x8c, 0x43, 0x59, 0x88, 0x4a, - 0x9c, 0xb9, 0x32, 0x9e, 0xca, 0x42, 0x88, 0x95, 0xe9, 0x47, 0xef, 0x30, 0x58, 0xaa, 0x8b, 0x25, - 0x7b, 0x96, 0x79, 0xfa, 0x30, 0xc3, 0xe7, 0xf0, 0x3f, 0x61, 0x15, 0x7b, 0xca, 0x58, 0x99, 0xf9, - 0x28, 0x40, 0xe3, 0x3e, 0xed, 0xaa, 0xc1, 0x9c, 0x95, 0x99, 0x12, 0xd9, 0x9a, 0x97, 0x46, 0xb4, - 0x8d, 0xa8, 0x06, 0x5a, 0x3c, 0x81, 0x36, 0x7f, 0x49, 0xd2, 0xb5, 0xdf, 0x0a, 0xd0, 0x78, 0x40, - 0x4d, 0x83, 0x4f, 0xc1, 0x51, 0x1b, 0xbe, 0x13, 0xa0, 0xb1, 0x1b, 0xb5, 0xc3, 0xdb, 0x35, 0x2f, - 0xa9, 0x1e, 0x8d, 0x36, 0x08, 0x7a, 0x0d, 0x73, 0x4c, 0xc0, 0xe6, 0x89, 0xf6, 0xec, 0x45, 0x6e, - 0xf8, 0x23, 0x16, 0xb5, 0x79, 0x82, 0x2f, 0xc1, 0xa9, 0x6a, 0x99, 0x6a, 0x63, 0x37, 0xf2, 0x9a, - 0x1b, 0x8f, 0xb5, 0x4c, 0xa9, 0x56, 0x55, 0x0c, 0xcd, 0x40, 0xc7, 0xe8, 0x53, 0xd3, 0xe0, 0x2b, - 0x68, 0xeb, 0x67, 0xeb, 0x1c, 0xbd, 0x68, 0x18, 0x1e, 0x20, 0xc4, 0xe1, 0x42, 0x15, 0xd4, 0xa8, - 0xa3, 0x1a, 0xfa, 0x2a, 0xe0, 0x5f, 0xd0, 0x98, 0x03, 0x1c, 0xad, 0xf1, 0x45, 0x83, 0xc5, 0x20, - 0x6c, 0x66, 0xd2, 0x28, 0x8e, 0xd6, 0xf9, 0xca, 0xb7, 0x83, 0xd6, 0xd1, 0x3a, 0x5f, 0x4d, 0x7c, - 0x70, 0xd4, 0x01, 0xfe, 0x07, 0x2d, 0x2a, 0xde, 0x3c, 0x4b, 0x15, 0xf7, 0x22, 0xf7, 0xd0, 0xe4, - 0x1a, 0x86, 0xbf, 0xa0, 0xe1, 0x2e, 0x38, 0x33, 0x56, 0x31, 0xcf, 0xc2, 0x00, 0x9d, 0x05, 0x2b, - 0x78, 0x55, 0x7b, 0xe8, 0xce, 0xff, 0xd8, 0x11, 0xb4, 0xdd, 0x11, 0xf4, 0xb5, 0x23, 0x68, 0xb3, - 0x27, 0xd6, 0x76, 0x4f, 0xac, 0xcf, 0x3d, 0xb1, 0xe2, 0x8e, 0xfe, 0x37, 0x37, 0xdf, 0x01, 0x00, - 0x00, 0xff, 0xff, 0xa1, 0x8c, 0xa7, 0x00, 0x69, 0x02, 0x00, 0x00, + // 351 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x92, 0xc1, 0x4a, 0xfb, 0x40, + 0x10, 0xc6, 0xb3, 0x69, 0xda, 0x7f, 0xff, 0xd3, 0x36, 0x0d, 0x8b, 0xc8, 0xaa, 0xb8, 0x84, 0xa2, + 0x18, 0x7a, 0x48, 0x21, 0x3e, 0x81, 0xda, 0x43, 0xbd, 0x95, 0xad, 0x77, 0xd9, 0x90, 0xd4, 0x2c, + 0x44, 0xb3, 0x24, 0x51, 0xdb, 0xb7, 0xe8, 0x63, 0x79, 0xec, 0xd1, 0xa3, 0xb4, 0x2f, 0x22, 0xbb, + 0x29, 0x24, 0xfa, 0x00, 0xde, 0xe6, 0xdb, 0x99, 0xe1, 0xfb, 0xcd, 0xc7, 0x02, 0x2d, 0x12, 0x9e, + 0xc7, 0x13, 0x21, 0xd3, 0xe8, 0x2d, 0x98, 0xc8, 0xf0, 0x50, 0xc9, 0xd0, 0x97, 0x79, 0x56, 0x66, + 0xa7, 0xb6, 0x0c, 0x27, 0x32, 0xcf, 0xb2, 0x65, 0xa5, 0x47, 0xaf, 0x30, 0x58, 0xa8, 0x8d, 0x05, + 0x7f, 0x96, 0x69, 0x7c, 0x3f, 0xc5, 0xc7, 0xd0, 0x49, 0x62, 0xf1, 0x94, 0x94, 0x04, 0xb9, 0xc8, + 0xb3, 0xd8, 0x41, 0xe1, 0x33, 0xf8, 0xcf, 0x57, 0xa2, 0x78, 0x4c, 0x78, 0x91, 0x10, 0xd3, 0x45, + 0x5e, 0x9f, 0x75, 0xd5, 0xc3, 0x8c, 0x17, 0x09, 0x3e, 0x82, 0xb6, 0x78, 0x89, 0xe2, 0x15, 0x69, + 0xb9, 0xc8, 0x1b, 0xb0, 0x4a, 0xe0, 0x13, 0xb0, 0xd4, 0x04, 0xb1, 0x5c, 0xe4, 0xd9, 0x41, 0xdb, + 0xbf, 0x59, 0x89, 0x82, 0xe9, 0xa7, 0xd1, 0x06, 0x41, 0xaf, 0xe1, 0x8b, 0x29, 0x98, 0x22, 0xd2, + 0x8e, 0xbd, 0xc0, 0xf6, 0x7f, 0x10, 0x31, 0x53, 0x44, 0xf8, 0x02, 0xac, 0x72, 0x2d, 0x63, 0x6d, + 0x6c, 0x07, 0x4e, 0x73, 0xe2, 0x61, 0x2d, 0x63, 0xa6, 0xbb, 0x0a, 0x43, 0x9f, 0xaf, 0x31, 0xfa, + 0xac, 0x12, 0xf8, 0x12, 0xda, 0xfa, 0x62, 0xcd, 0xd1, 0x0b, 0x86, 0xfe, 0xe1, 0xfe, 0xd0, 0x9f, + 0xab, 0x82, 0x55, 0xdd, 0x51, 0x09, 0x7d, 0x05, 0xf8, 0xc7, 0x41, 0xcc, 0x00, 0x6a, 0x57, 0x7c, + 0xde, 0x88, 0x61, 0xe0, 0x37, 0x71, 0x74, 0x0a, 0xb5, 0x75, 0xba, 0x24, 0xa6, 0xdb, 0xaa, 0xad, + 0xd3, 0xe5, 0x98, 0x80, 0xa5, 0x16, 0xf0, 0x3f, 0x68, 0xb1, 0xec, 0xdd, 0x31, 0x54, 0x71, 0x97, + 0xa5, 0x0e, 0x1a, 0x5f, 0xc1, 0xf0, 0x57, 0x5e, 0xb8, 0x0b, 0xd6, 0x94, 0x97, 0xdc, 0x31, 0x30, + 0x40, 0x67, 0xce, 0x73, 0x51, 0xae, 0x1d, 0x74, 0x4b, 0x3e, 0x76, 0x14, 0x6d, 0x77, 0x14, 0x7d, + 0xed, 0x28, 0xda, 0xec, 0xa9, 0xb1, 0xdd, 0x53, 0xe3, 0x73, 0x4f, 0x8d, 0xb0, 0xa3, 0x7f, 0xcb, + 0xf5, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x6e, 0xfb, 0x99, 0x5f, 0x02, 0x00, 0x00, } func (m *ShareSampleID) Marshal() (dAtA []byte, err error) { @@ -403,12 +403,10 @@ func (m *ShareSampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if len(m.DataHash) > 0 { - i -= len(m.DataHash) - copy(dAtA[i:], m.DataHash) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DataHash))) + if m.Height != 0 { + i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Height)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -509,12 +507,10 @@ func (m *AxisSampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if len(m.DataHash) > 0 { - i -= len(m.DataHash) - copy(dAtA[i:], m.DataHash) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DataHash))) + if m.Height != 0 { + i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Height)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -580,9 +576,8 @@ func (m *ShareSampleID) Size() (n int) { } var l int _ = l - l = len(m.DataHash) - if l > 0 { - n += 1 + l + sovIpldv2Pb(uint64(l)) + if m.Height != 0 { + n += 1 + sovIpldv2Pb(uint64(m.Height)) } l = len(m.AxisHash) if l > 0 { @@ -627,9 +622,8 @@ func (m *AxisSampleID) Size() (n int) { } var l int _ = l - l = len(m.DataHash) - if l > 0 { - n += 1 + l + sovIpldv2Pb(uint64(l)) + if m.Height != 0 { + n += 1 + sovIpldv2Pb(uint64(m.Height)) } l = len(m.AxisHash) if l > 0 { @@ -699,10 +693,10 @@ func (m *ShareSampleID) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var byteLen int + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIpldv2Pb @@ -712,26 +706,11 @@ func (m *ShareSampleID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Height |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthIpldv2Pb - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) - if m.DataHash == nil { - m.DataHash = []byte{} - } - iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AxisHash", wireType) @@ -1030,10 +1009,10 @@ func (m *AxisSampleID) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var byteLen int + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIpldv2Pb @@ -1043,26 +1022,11 @@ func (m *AxisSampleID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Height |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthIpldv2Pb - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) - if m.DataHash == nil { - m.DataHash = []byte{} - } - iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AxisHash", wireType) diff --git a/share/ipldv2/pb/ipldv2pb.proto b/share/ipldv2/pb/ipldv2pb.proto index c39127fa21..e9ee86f7b5 100644 --- a/share/ipldv2/pb/ipldv2pb.proto +++ b/share/ipldv2/pb/ipldv2pb.proto @@ -13,7 +13,7 @@ enum ShareSampleType { } message ShareSampleID{ - bytes data_hash = 1; + uint64 height = 1; bytes axis_hash = 2; // TODO(@Wondertan): Redundant, but has to be sent due to Bitswap's stateless verification requirement uint32 index = 3; Axis axis = 4; @@ -27,7 +27,7 @@ message ShareSample { } message AxisSampleID{ - bytes data_hash = 1; + uint64 height = 1; bytes axis_hash = 2; // TODO(@Wondertan): Redundant, but has to be sent due to Bitswap's stateless verification requirement uint32 index = 3; // TODO(@Wondertan): uint16 would be enough, but proto3 doest not support it Axis axis = 4; diff --git a/share/ipldv2/share_sample.go b/share/ipldv2/share_sample.go index e7e4557170..fee81560c2 100644 --- a/share/ipldv2/share_sample.go +++ b/share/ipldv2/share_sample.go @@ -54,14 +54,13 @@ func NewShareSample(id ShareSampleID, shr share.Share, proof nmt.Proof, sqrLn in } } -// NewShareSampleFrom constructs a new ShareSample from share.Root. -func NewShareSampleFrom(root *share.Root, idx int, axis rsmt2d.Axis, shr share.Share, proof nmt.Proof) *ShareSample { - id := NewShareSampleID(root, idx, axis) - return NewShareSample(id, shr, proof, len(root.RowRoots)) -} - // NewShareSampleFromEDS samples the EDS and constructs a new ShareSample. -func NewShareSampleFromEDS(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d.Axis) (*ShareSample, error) { +func NewShareSampleFromEDS( + height uint64, + eds *rsmt2d.ExtendedDataSquare, + idx int, + axis rsmt2d.Axis, +) (*ShareSample, error) { sqrLn := int(eds.Width()) axisIdx, shrIdx := idx/sqrLn, idx%sqrLn @@ -95,7 +94,8 @@ func NewShareSampleFromEDS(eds *rsmt2d.ExtendedDataSquare, idx int, axis rsmt2d. return nil, fmt.Errorf("while proving range share over NMT: %w", err) } - return NewShareSampleFrom(root, idx, axis, shrs[shrIdx], prf), nil + id := NewShareSampleID(height, root, idx, axis) + return NewShareSample(id, shrs[shrIdx], prf, len(root.RowRoots)), nil } // Proto converts ShareSample to its protobuf representation. @@ -159,7 +159,7 @@ func (s *ShareSample) UnmarshalBinary(data []byte) error { } s.ID = ShareSampleID{ - DataHash: proto.Id.DataHash, + Height: proto.Id.Height, AxisHash: proto.Id.AxisHash, Index: int(proto.Id.Index), Axis: rsmt2d.Axis(proto.Id.Axis), diff --git a/share/ipldv2/share_sample_hasher_test.go b/share/ipldv2/share_sample_hasher_test.go index 8b6462b163..e4dac92a1a 100644 --- a/share/ipldv2/share_sample_hasher_test.go +++ b/share/ipldv2/share_sample_hasher_test.go @@ -19,7 +19,7 @@ func TestShareSampleHasher(t *testing.T) { square := edstest.RandEDS(t, 2) - sample, err := NewShareSampleFromEDS(square, 2, rsmt2d.Row) + sample, err := NewShareSampleFromEDS(1, square, 2, rsmt2d.Row) require.NoError(t, err) data, err := sample.MarshalBinary() diff --git a/share/ipldv2/share_sample_id.go b/share/ipldv2/share_sample_id.go index 46293a16eb..956b5aca38 100644 --- a/share/ipldv2/share_sample_id.go +++ b/share/ipldv2/share_sample_id.go @@ -14,23 +14,23 @@ import ( ) // ShareSampleIDSize is the size of the ShareSampleID in bytes -const ShareSampleIDSize = 127 +const ShareSampleIDSize = 103 // ShareSampleID is an unique identifier of a ShareSample. type ShareSampleID struct { - // DataHash is the root of the data square - // Needed to identify the data square in the whole chain - DataHash share.DataHash + // Height of the block. + // Needed to identify block's data square in the whole chain + Height uint64 // AxisHash is the Col or Row root from DAH of the data square AxisHash []byte - // Index is the index of the sample in the data square(not row or col index) + // Index is the index of the sampled share in the data square(not row or col index) Index int // Axis is Col or Row axis of the sample in the data square Axis rsmt2d.Axis } // NewShareSampleID constructs a new ShareSampleID. -func NewShareSampleID(root *share.Root, idx int, axis rsmt2d.Axis) ShareSampleID { +func NewShareSampleID(height uint64, root *share.Root, idx int, axis rsmt2d.Axis) ShareSampleID { sqrLn := len(root.RowRoots) row, col := idx/sqrLn, idx%sqrLn dahroot := root.RowRoots[row] @@ -39,7 +39,7 @@ func NewShareSampleID(root *share.Root, idx int, axis rsmt2d.Axis) ShareSampleID } return ShareSampleID{ - DataHash: root.Hash(), + Height: height, AxisHash: dahroot, Index: idx, Axis: axis, @@ -78,7 +78,7 @@ func (s *ShareSampleID) Cid() (cid.Cid, error) { // Proto converts ShareSampleID to its protobuf representation. func (s *ShareSampleID) Proto() *ipldv2pb.ShareSampleID { return &ipldv2pb.ShareSampleID{ - DataHash: s.DataHash, + Height: s.Height, AxisHash: s.AxisHash, Index: uint32(s.Index), Axis: ipldv2pb.Axis(s.Axis), @@ -88,11 +88,11 @@ func (s *ShareSampleID) Proto() *ipldv2pb.ShareSampleID { // MarshalBinary encodes ShareSampleID into binary form. func (s *ShareSampleID) MarshalBinary() ([]byte, error) { // we cannot use protobuf here because it exceeds multihash limit of 128 bytes - data := make([]byte, ShareSampleIDSize) - n := copy(data, s.DataHash) - n += copy(data[n:], s.AxisHash) - binary.LittleEndian.PutUint32(data[n:], uint32(s.Index)) - data[n+4] = byte(s.Axis) + data := make([]byte, 0, ShareSampleIDSize) + data = binary.LittleEndian.AppendUint64(data, s.Height) + data = append(data, s.AxisHash...) + data = binary.LittleEndian.AppendUint32(data, uint32(s.Index)) + data = append(data, byte(s.Axis)) return data, nil } @@ -102,22 +102,21 @@ func (s *ShareSampleID) UnmarshalBinary(data []byte) error { return fmt.Errorf("incorrect SampleID size: %d != %d", len(data), ShareSampleIDSize) } - // copying data to avoid slice aliasing - s.DataHash = append(s.DataHash, data[:hashSize]...) - s.AxisHash = append(s.AxisHash, data[hashSize:hashSize+dahRootSize]...) - s.Index = int(binary.LittleEndian.Uint32(data[hashSize+dahRootSize : hashSize+dahRootSize+4])) - s.Axis = rsmt2d.Axis(data[hashSize+dahRootSize+4]) + s.Height = binary.LittleEndian.Uint64(data) + s.AxisHash = append(s.AxisHash, data[8:8+dahRootSize]...) // copying data to avoid slice aliasing + s.Index = int(binary.LittleEndian.Uint32(data[8+dahRootSize : 8+dahRootSize+4])) + s.Axis = rsmt2d.Axis(data[8+dahRootSize+4]) return nil } // Validate validates fields of ShareSampleID. func (s *ShareSampleID) Validate() error { - if len(s.DataHash) != hashSize { - return fmt.Errorf("incorrect DataHash size: %d != %d", len(s.DataHash), hashSize) + if s.Height == 0 { + return fmt.Errorf("zero Height") } if len(s.AxisHash) != dahRootSize { - return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), hashSize) + return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), dahRootSize) } if s.Axis != rsmt2d.Col && s.Axis != rsmt2d.Row { diff --git a/share/ipldv2/share_sample_id_test.go b/share/ipldv2/share_sample_id_test.go index dc015cd72b..410fda3674 100644 --- a/share/ipldv2/share_sample_id_test.go +++ b/share/ipldv2/share_sample_id_test.go @@ -17,7 +17,7 @@ func TestShareSampleID(t *testing.T) { root, err := share.NewRoot(square) require.NoError(t, err) - sid := NewShareSampleID(root, 2, rsmt2d.Row) + sid := NewShareSampleID(1, root, 2, rsmt2d.Row) id, err := sid.Cid() require.NoError(t, err) diff --git a/share/ipldv2/share_sample_test.go b/share/ipldv2/share_sample_test.go index b83879d72c..04724f8e4f 100644 --- a/share/ipldv2/share_sample_test.go +++ b/share/ipldv2/share_sample_test.go @@ -14,7 +14,7 @@ import ( func TestShareSample(t *testing.T) { square := edstest.RandEDS(t, 2) - sid, err := NewShareSampleFromEDS(square, 2, rsmt2d.Row) + sid, err := NewShareSampleFromEDS(1, square, 2, rsmt2d.Row) require.NoError(t, err) data, err := sid.MarshalBinary() From 600d18617bf09724514d4d735479e53717b3ed1a Mon Sep 17 00:00:00 2001 From: Wondertan Date: Thu, 19 Oct 2023 22:28:33 +0200 Subject: [PATCH 014/132] chore: extract proto helper --- share/ipldv2/axis_sample.go | 7 +------ share/ipldv2/axis_sample_id.go | 10 ++++++++++ share/ipldv2/share_sample.go | 7 +------ share/ipldv2/share_sample_id.go | 10 ++++++++++ 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/share/ipldv2/axis_sample.go b/share/ipldv2/axis_sample.go index 9903a0af21..8df5016dd1 100644 --- a/share/ipldv2/axis_sample.go +++ b/share/ipldv2/axis_sample.go @@ -105,12 +105,7 @@ func (s *AxisSample) UnmarshalBinary(data []byte) error { return err } - s.ID = AxisSampleID{ - Height: proto.Id.Height, - AxisHash: proto.Id.AxisHash, - Index: int(proto.Id.Index), - Axis: rsmt2d.Axis(proto.Id.Axis), - } + s.ID = AxisSampleIDFromProto(proto.Id) s.AxisHalf = proto.AxisHalf return nil } diff --git a/share/ipldv2/axis_sample_id.go b/share/ipldv2/axis_sample_id.go index d10ba4cf41..655c22c7a0 100644 --- a/share/ipldv2/axis_sample_id.go +++ b/share/ipldv2/axis_sample_id.go @@ -58,6 +58,16 @@ func AxisSampleIDFromCID(cid cid.Cid) (id AxisSampleID, err error) { return id, nil } +// AxisSampleIDFromProto converts from protobuf representation of AxisSampleID. +func AxisSampleIDFromProto(proto *ipldv2pb.AxisSampleID) AxisSampleID { + return AxisSampleID{ + Height: proto.Height, + AxisHash: proto.AxisHash, + Index: int(proto.Index), + Axis: rsmt2d.Axis(proto.Axis), + } +} + // Cid returns sample ID encoded as CID. func (s *AxisSampleID) Cid() (cid.Cid, error) { data, err := s.MarshalBinary() diff --git a/share/ipldv2/share_sample.go b/share/ipldv2/share_sample.go index fee81560c2..5e9b4d4eb1 100644 --- a/share/ipldv2/share_sample.go +++ b/share/ipldv2/share_sample.go @@ -158,12 +158,7 @@ func (s *ShareSample) UnmarshalBinary(data []byte) error { return err } - s.ID = ShareSampleID{ - Height: proto.Id.Height, - AxisHash: proto.Id.AxisHash, - Index: int(proto.Id.Index), - Axis: rsmt2d.Axis(proto.Id.Axis), - } + s.ID = ShareSampleIDFromProto(proto.Id) s.Type = ShareSampleType(proto.Type) s.Proof = nmt.ProtoToProof(*proto.Proof) s.Share = proto.Share diff --git a/share/ipldv2/share_sample_id.go b/share/ipldv2/share_sample_id.go index 956b5aca38..2425af854d 100644 --- a/share/ipldv2/share_sample_id.go +++ b/share/ipldv2/share_sample_id.go @@ -60,6 +60,16 @@ func ShareSampleIDFromCID(cid cid.Cid) (id ShareSampleID, err error) { return id, nil } +// ShareSampleIDFromProto converts from protobuf representation of ShareSampleID. +func ShareSampleIDFromProto(proto *ipldv2pb.ShareSampleID) ShareSampleID { + return ShareSampleID{ + Height: proto.Height, + AxisHash: proto.AxisHash, + Index: int(proto.Index), + Axis: rsmt2d.Axis(proto.Axis), + } +} + // Cid returns sample ID encoded as CID. func (s *ShareSampleID) Cid() (cid.Cid, error) { data, err := s.MarshalBinary() From 6673564db2ed5ab8e7746ec9979aca69f0f99cfc Mon Sep 17 00:00:00 2001 From: Wondertan Date: Thu, 19 Oct 2023 23:00:18 +0200 Subject: [PATCH 015/132] successful experiment with request size shortening for axis sampling --- share/ipldv2/axis_sample.go | 3 ++- share/ipldv2/axis_sample_id.go | 15 ++++++++------- share/ipldv2/ipldv2.go | 14 ++++++++++---- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/share/ipldv2/axis_sample.go b/share/ipldv2/axis_sample.go index 8df5016dd1..81dc734424 100644 --- a/share/ipldv2/axis_sample.go +++ b/share/ipldv2/axis_sample.go @@ -142,7 +142,8 @@ func (s *AxisSample) Validate() error { return fmt.Errorf("while computing NMT root: %w", err) } - if !bytes.Equal(s.ID.AxisHash, root) { + hashedRoot := hashBytes(root) + if !bytes.Equal(s.ID.AxisHash, hashedRoot) { return fmt.Errorf("invalid root: %X != %X", root, s.ID.AxisHash) } diff --git a/share/ipldv2/axis_sample_id.go b/share/ipldv2/axis_sample_id.go index 655c22c7a0..3d9a4fb03b 100644 --- a/share/ipldv2/axis_sample_id.go +++ b/share/ipldv2/axis_sample_id.go @@ -14,7 +14,7 @@ import ( ) // AxisSampleIDSize is the size of the AxisSampleID in bytes -const AxisSampleIDSize = 103 +const AxisSampleIDSize = 45 // AxisSampleID is an unique identifier of a AxisSample. type AxisSampleID struct { @@ -35,10 +35,11 @@ func NewAxisSampleID(height uint64, root *share.Root, idx int, axis rsmt2d.Axis) if axis == rsmt2d.Col { dahroot = root.ColumnRoots[idx] } + axisHash := hashBytes(dahroot) return AxisSampleID{ Height: height, - AxisHash: dahroot, + AxisHash: axisHash, Index: idx, Axis: axis, } @@ -111,9 +112,9 @@ func (s *AxisSampleID) UnmarshalBinary(data []byte) error { } s.Height = binary.LittleEndian.Uint64(data) - s.AxisHash = append(s.AxisHash, data[8:8+dahRootSize]...) // copying data to avoid slice aliasing - s.Index = int(binary.LittleEndian.Uint32(data[8+dahRootSize : 8+dahRootSize+4])) - s.Axis = rsmt2d.Axis(data[8+dahRootSize+4]) + s.AxisHash = append(s.AxisHash, data[8:8+hashSize]...) // copying data to avoid slice aliasing + s.Index = int(binary.LittleEndian.Uint32(data[8+hashSize : 8+hashSize+4])) + s.Axis = rsmt2d.Axis(data[8+hashSize+4]) return nil } @@ -123,8 +124,8 @@ func (s *AxisSampleID) Validate() error { return fmt.Errorf("zero Height") } - if len(s.AxisHash) != dahRootSize { - return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), dahRootSize) + if len(s.AxisHash) != hashSize { + return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), hashSize) } if s.Axis != rsmt2d.Col && s.Axis != rsmt2d.Row { diff --git a/share/ipldv2/ipldv2.go b/share/ipldv2/ipldv2.go index 5b496d5efa..7cd9331be7 100644 --- a/share/ipldv2/ipldv2.go +++ b/share/ipldv2/ipldv2.go @@ -66,16 +66,22 @@ func (a allowlist) IsAllowed(code uint64) bool { func validateCID(cid cid.Cid) error { prefix := cid.Prefix() if prefix.Codec != shareSamplingCodec && prefix.Codec != axisSamplingCodec { - return fmt.Errorf("unsupported codec") + return fmt.Errorf("unsupported codec %d", prefix.Codec) } if prefix.MhType != shareSamplingMultihashCode && prefix.MhType != axisSamplingMultihashCode { - return fmt.Errorf("unsupported multihash") + return fmt.Errorf("unsupported multihash %d", prefix.MhType) } - if prefix.MhLength != ShareSampleIDSize { - return fmt.Errorf("invalid multihash length") + if prefix.MhLength != ShareSampleIDSize && prefix.MhLength != AxisSampleIDSize { + return fmt.Errorf("invalid multihash length %d", prefix.MhLength) } return nil } + +func hashBytes(preimage []byte) []byte { + hsh := sha256.New() + hsh.Write(preimage) + return hsh.Sum(nil) +} From 830860d9a951f6de2dff6beede4073e4c2b231b8 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Thu, 19 Oct 2023 23:03:23 +0200 Subject: [PATCH 016/132] docs fix --- share/ipldv2/axis_sample_id.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/share/ipldv2/axis_sample_id.go b/share/ipldv2/axis_sample_id.go index 3d9a4fb03b..f228c17ff0 100644 --- a/share/ipldv2/axis_sample_id.go +++ b/share/ipldv2/axis_sample_id.go @@ -21,11 +21,11 @@ type AxisSampleID struct { // Height of the block. // Needed to identify block's data square in the whole chain Height uint64 - // AxisHash is the Col or AxisSample root from DAH of the data square + // AxisHash is the sha256 hash of a Col or Row root taken from DAH of the data square AxisHash []byte // Index is the index of the axis(row, col) in the data square Index int - // Axis is Col or AxisSample axis of the sample in the data square + // Axis is Col or Row axis of the sample in the data square Axis rsmt2d.Axis } From 9ffb2843cc2a5f48581fc660a4d0d5c72efb7e5b Mon Sep 17 00:00:00 2001 From: Wondertan Date: Thu, 19 Oct 2023 23:47:32 +0200 Subject: [PATCH 017/132] request size optimization for share sample --- go.mod | 2 ++ go.sum | 11 +++++++++-- share/ipldv2/ipldv2.go | 15 +++++++-------- share/ipldv2/share_sample.go | 4 ++-- share/ipldv2/share_sample_id.go | 17 +++++++++-------- 5 files changed, 29 insertions(+), 20 deletions(-) diff --git a/go.mod b/go.mod index c648c01889..9a58a2961f 100644 --- a/go.mod +++ b/go.mod @@ -351,3 +351,5 @@ replace ( github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29 ) + +replace github.com/celestiaorg/nmt => github.com/Wondertan/nmt v0.0.0-20231019214331-d200d40bdad6 diff --git a/go.sum b/go.sum index d45b9f0c06..cb340549cc 100644 --- a/go.sum +++ b/go.sum @@ -258,6 +258,8 @@ github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/Wondertan/nmt v0.0.0-20231019214331-d200d40bdad6 h1:9VcKz1VmJOa3aYRDc+hxAhpSGkG9BSvc1Mowq4366VU= +github.com/Wondertan/nmt v0.0.0-20231019214331-d200d40bdad6/go.mod h1:jXKMLje7T3YTvX4CfM0c38oHjcwKqCSkklymyMMt9Cw= github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= github.com/Zilliqa/gozilliqa-sdk v1.2.1-0.20201201074141-dd0ecada1be6/go.mod h1:eSYp2T6f0apnuW8TzhV3f6Aff2SE8Dwio++U4ha4yEM= @@ -376,10 +378,15 @@ github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= +<<<<<<< HEAD github.com/celestiaorg/nmt v0.20.0 h1:9i7ultZ8Wv5ytt8ZRaxKQ5KOOMo4A2K2T/aPGjIlSas= github.com/celestiaorg/nmt v0.20.0/go.mod h1:Oz15Ub6YPez9uJV0heoU4WpFctxazuIhKyUtaYNio7E= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2 h1:Q8nr5SAtDW5gocrBwqwDJcSS/JedqU58WwQA2SP+nXw= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2/go.mod h1:s/LzLUw0WeYPJ6qdk4q46jKLOq7rc9Z5Mdrxtfpcigw= +======= +github.com/celestiaorg/quantum-gravity-bridge v1.3.0 h1:9zPIp7w1FWfkPnn16y3S4FpFLnQtS7rm81CUVcHEts0= +github.com/celestiaorg/quantum-gravity-bridge v1.3.0/go.mod h1:6WOajINTDEUXpSj5UZzod16UZ96ZVB/rFNKyM+Mt1gI= +>>>>>>> ee829610 (request size optimization for share sample) github.com/celestiaorg/rsmt2d v0.11.0 h1:lcto/637WyTEZR3dLRoNvyuExfnUbxvdvKi3qz/2V4k= github.com/celestiaorg/rsmt2d v0.11.0/go.mod h1:6Y580I3gVr0+OVFfW6m2JTwnCCmvW3WfbwSLfuT+HCA= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= @@ -2258,8 +2265,8 @@ github.com/tidwall/btree v1.5.0/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYms github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= -github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= +github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= diff --git a/share/ipldv2/ipldv2.go b/share/ipldv2/ipldv2.go index 7cd9331be7..eaa309c33c 100644 --- a/share/ipldv2/ipldv2.go +++ b/share/ipldv2/ipldv2.go @@ -8,8 +8,6 @@ import ( "github.com/ipfs/go-cid" logger "github.com/ipfs/go-log/v2" mh "github.com/multiformats/go-multihash" - - "github.com/celestiaorg/celestia-node/share" ) var log = logger.Logger("ipldv2") @@ -28,15 +26,16 @@ const ( // axisSamplingMultihashCode is the multihash code for custom axis sampling multihash function. axisSamplingMultihashCode = 0x7811 -) -// TODO(@Wondertan): Eventually this should become configurable -const ( - hashSize = sha256.Size - dahRootSize = 2*share.NamespaceSize + hashSize + // mhPrefixSize is the size of the multihash prefix that used to cut it off. mhPrefixSize = 4 ) +var ( + hashSize = sha256.Size + hasher = sha256.New +) + func init() { // Register hashers for new multihashes mh.Register(shareSamplingMultihashCode, func() hash.Hash { @@ -81,7 +80,7 @@ func validateCID(cid cid.Cid) error { } func hashBytes(preimage []byte) []byte { - hsh := sha256.New() + hsh := hasher() hsh.Write(preimage) return hsh.Sum(nil) } diff --git a/share/ipldv2/share_sample.go b/share/ipldv2/share_sample.go index 5e9b4d4eb1..1204f07507 100644 --- a/share/ipldv2/share_sample.go +++ b/share/ipldv2/share_sample.go @@ -1,7 +1,6 @@ package ipldv2 import ( - "crypto/sha256" "errors" "fmt" @@ -180,7 +179,8 @@ func (s *ShareSample) Validate() error { namespace = share.GetNamespace(s.Share) } - if !s.Proof.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{s.Share}, s.ID.AxisHash) { + s.Proof.WithHashedProof(hasher()) + if !s.Proof.VerifyInclusion(hasher(), namespace.ToNMT(), [][]byte{s.Share}, s.ID.AxisHash) { return errors.New("sample proof is invalid") } diff --git a/share/ipldv2/share_sample_id.go b/share/ipldv2/share_sample_id.go index 2425af854d..88492887e0 100644 --- a/share/ipldv2/share_sample_id.go +++ b/share/ipldv2/share_sample_id.go @@ -14,14 +14,14 @@ import ( ) // ShareSampleIDSize is the size of the ShareSampleID in bytes -const ShareSampleIDSize = 103 +const ShareSampleIDSize = 45 // ShareSampleID is an unique identifier of a ShareSample. type ShareSampleID struct { // Height of the block. // Needed to identify block's data square in the whole chain Height uint64 - // AxisHash is the Col or Row root from DAH of the data square + // AxisHash is the sha256 hash of a Col or Row root taken from DAH of the data square AxisHash []byte // Index is the index of the sampled share in the data square(not row or col index) Index int @@ -37,10 +37,11 @@ func NewShareSampleID(height uint64, root *share.Root, idx int, axis rsmt2d.Axis if axis == rsmt2d.Col { dahroot = root.ColumnRoots[col] } + axisHash := hashBytes(dahroot) return ShareSampleID{ Height: height, - AxisHash: dahroot, + AxisHash: axisHash, Index: idx, Axis: axis, } @@ -113,9 +114,9 @@ func (s *ShareSampleID) UnmarshalBinary(data []byte) error { } s.Height = binary.LittleEndian.Uint64(data) - s.AxisHash = append(s.AxisHash, data[8:8+dahRootSize]...) // copying data to avoid slice aliasing - s.Index = int(binary.LittleEndian.Uint32(data[8+dahRootSize : 8+dahRootSize+4])) - s.Axis = rsmt2d.Axis(data[8+dahRootSize+4]) + s.AxisHash = append(s.AxisHash, data[8:8+hashSize]...) // copying data to avoid slice aliasing + s.Index = int(binary.LittleEndian.Uint32(data[8+hashSize : 8+hashSize+4])) + s.Axis = rsmt2d.Axis(data[8+hashSize+4]) return nil } @@ -125,8 +126,8 @@ func (s *ShareSampleID) Validate() error { return fmt.Errorf("zero Height") } - if len(s.AxisHash) != dahRootSize { - return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), dahRootSize) + if len(s.AxisHash) != hashSize { + return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), hashSize) } if s.Axis != rsmt2d.Col && s.Axis != rsmt2d.Row { From 52f3ab993d5b0f225f38d8e96d6a8df530e88dad Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sun, 22 Oct 2023 20:15:43 +0200 Subject: [PATCH 018/132] refactor AxisID away and many more improvements --- share/eds/file.go | 29 +- share/eds/file_test.go | 27 +- share/eds/ods_file.go | 27 +- share/ipldv2/{axis_sample.go => axis.go} | 70 +- .../{axis_sample_hasher.go => axis_hasher.go} | 20 +- ...ple_hasher_test.go => axis_hasher_test.go} | 6 +- share/ipldv2/axis_id.go | 147 ++++ ...axis_sample_id_test.go => axis_id_test.go} | 12 +- share/ipldv2/axis_sample_id.go | 136 --- .../{axis_sample_test.go => axis_test.go} | 6 +- share/ipldv2/blockstore.go | 42 +- share/ipldv2/blockstore_test.go | 7 +- share/ipldv2/ipldv2.go | 34 +- share/ipldv2/ipldv2_test.go | 42 +- share/ipldv2/pb/ipldv2pb.pb.go | 783 ++++++++---------- share/ipldv2/pb/ipldv2pb.proto | 42 +- share/ipldv2/sample_id.go | 115 +++ ...re_sample_id_test.go => sample_id_test.go} | 12 +- share/ipldv2/share_sample.go | 97 ++- share/ipldv2/share_sample_hasher.go | 30 +- share/ipldv2/share_sample_hasher_test.go | 6 +- share/ipldv2/share_sample_id.go | 138 --- share/ipldv2/share_sample_test.go | 6 +- 23 files changed, 874 insertions(+), 960 deletions(-) rename share/ipldv2/{axis_sample.go => axis.go} (58%) rename share/ipldv2/{axis_sample_hasher.go => axis_hasher.go} (67%) rename share/ipldv2/{axis_sample_hasher_test.go => axis_hasher_test.go} (83%) create mode 100644 share/ipldv2/axis_id.go rename share/ipldv2/{axis_sample_id_test.go => axis_id_test.go} (66%) delete mode 100644 share/ipldv2/axis_sample_id.go rename share/ipldv2/{axis_sample_test.go => axis_test.go} (83%) create mode 100644 share/ipldv2/sample_id.go rename share/ipldv2/{share_sample_id_test.go => sample_id_test.go} (66%) delete mode 100644 share/ipldv2/share_sample_id.go diff --git a/share/eds/file.go b/share/eds/file.go index f353b13002..cd305b7878 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -17,9 +17,9 @@ import ( type File interface { io.Closer Size() int - ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) - Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) - AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) + ShareWithProof(axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) + Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) + AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) EDS() (*rsmt2d.ExtendedDataSquare, error) } @@ -122,7 +122,7 @@ func (f *LazyFile) Header() *Header { return f.hdr } -func (f *LazyFile) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { +func (f *LazyFile) Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { shrLn := int(f.hdr.shareSize) sqrLn := int(f.hdr.squareSize) if f.Header().Config().Mode == ODSMode { @@ -130,10 +130,10 @@ func (f *LazyFile) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { } shrs := make([]share.Share, sqrLn) - switch axis { + switch axisType { case rsmt2d.Col: for i := 0; i < sqrLn; i++ { - pos := idx + i*sqrLn + pos := axisIdx + i*sqrLn offset := pos*shrLn + HeaderSize shr := make(share.Share, shrLn) @@ -143,7 +143,7 @@ func (f *LazyFile) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { shrs[i] = shr } case rsmt2d.Row: - pos := idx * sqrLn + pos := axisIdx * sqrLn offset := pos*shrLn + HeaderSize axsData := make([]byte, sqrLn*shrLn) @@ -169,9 +169,9 @@ func (f *LazyFile) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { return shrs, nil } -func (f *LazyFile) AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) { +func (f *LazyFile) AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { // TODO(@Wondertan): this has to read directly from the file, avoiding recompute - fullAxis, err := f.Axis(idx, axis) + fullAxis, err := f.Axis(axisType, axisIdx) if err != nil { return nil, err } @@ -179,20 +179,15 @@ func (f *LazyFile) AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) { return fullAxis[:len(fullAxis)/2], nil } -func (f *LazyFile) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) { +func (f *LazyFile) ShareWithProof(axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) { // TODO: Cache the axis as well as computed tree sqrLn := int(f.hdr.squareSize) - axsIdx, shrIdx := idx/sqrLn, idx%sqrLn - if axis == rsmt2d.Col { - axsIdx, shrIdx = shrIdx, axsIdx - } - - shrs, err := f.Axis(axsIdx, axis) + shrs, err := f.Axis(axisType, axisIdx) if err != nil { return nil, nmt.Proof{}, err } - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axsIdx)) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axisIdx)) for _, shr := range shrs { err = tree.Push(shr) if err != nil { diff --git a/share/eds/file_test.go b/share/eds/file_test.go index 53e1bad8c8..028e88d24b 100644 --- a/share/eds/file_test.go +++ b/share/eds/file_test.go @@ -41,31 +41,34 @@ func TestFile(t *testing.T) { fl, err = OpenFile(path) require.NoError(t, err) - axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} - for _, axis := range axis { + axisTypes := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} + for _, axisType := range axisTypes { for i := 0; i < int(eds.Width()); i++ { - row, err := fl.Axis(i, axis) + row, err := fl.Axis(axisType, i) require.NoError(t, err) - assert.EqualValues(t, getAxis(i, axis, eds), row) + assert.EqualValues(t, getAxis(axisType, i, eds), row) } } width := int(eds.Width()) - for _, axis := range axis { + for _, axisType := range axisTypes { for i := 0; i < width*width; i++ { - row, col := uint(i/width), uint(i%width) - shr, prf, err := fl.ShareWithProof(i, axis) + axisIdx, shrIdx := i/width, i%width + if axisType == rsmt2d.Col { + axisIdx, shrIdx = shrIdx, axisIdx + } + + shr, prf, err := fl.ShareWithProof(axisType, axisIdx, shrIdx) require.NoError(t, err) - assert.EqualValues(t, eds.GetCell(row, col), shr) namespace := share.ParitySharesNamespace - if int(row) < width/2 && int(col) < width/2 { + if axisIdx < width/2 && shrIdx < width/2 { namespace = share.GetNamespace(shr) } - axishash := root.RowRoots[row] - if axis == rsmt2d.Col { - axishash = root.ColumnRoots[col] + axishash := root.RowRoots[axisIdx] + if axisType == rsmt2d.Col { + axishash = root.ColumnRoots[axisIdx] } ok := prf.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, axishash) diff --git a/share/eds/ods_file.go b/share/eds/ods_file.go index 6930174cc1..aaaa04c22f 100644 --- a/share/eds/ods_file.go +++ b/share/eds/ods_file.go @@ -20,20 +20,15 @@ func (f *MemFile) Size() int { return int(f.Eds.Width()) } -func (f *MemFile) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Proof, error) { +func (f *MemFile) ShareWithProof(axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) { sqrLn := f.Size() - axsIdx, shrIdx := idx/sqrLn, idx%sqrLn - if axis == rsmt2d.Col { - axsIdx, shrIdx = shrIdx, axsIdx - } - - shrs, err := f.Axis(axsIdx, axis) + shrs, err := f.Axis(axisType, axisIdx) if err != nil { return nil, nmt.Proof{}, err } // TODO(@Wondartan): this must access cached NMT on EDS instead of computing a new one - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axsIdx)) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axisIdx)) for _, shr := range shrs { err = tree.Push(shr) if err != nil { @@ -49,12 +44,12 @@ func (f *MemFile) ShareWithProof(idx int, axis rsmt2d.Axis) (share.Share, nmt.Pr return shrs[shrIdx], proof, nil } -func (f *MemFile) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { - return getAxis(idx, axis, f.Eds), nil +func (f *MemFile) Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + return getAxis(axisType, axisIdx, f.Eds), nil } -func (f *MemFile) AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) { - return getAxis(idx, axis, f.Eds)[:f.Size()/2], nil +func (f *MemFile) AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + return getAxis(axisType, axisIdx, f.Eds)[:f.Size()/2], nil } func (f *MemFile) EDS() (*rsmt2d.ExtendedDataSquare, error) { @@ -62,12 +57,12 @@ func (f *MemFile) EDS() (*rsmt2d.ExtendedDataSquare, error) { } // TODO(@Wondertan): Should be a method on eds -func getAxis(idx int, axis rsmt2d.Axis, eds *rsmt2d.ExtendedDataSquare) [][]byte { - switch axis { +func getAxis(axisType rsmt2d.Axis, axisIdx int, eds *rsmt2d.ExtendedDataSquare) [][]byte { + switch axisType { case rsmt2d.Row: - return eds.Row(uint(idx)) + return eds.Row(uint(axisIdx)) case rsmt2d.Col: - return eds.Col(uint(idx)) + return eds.Col(uint(axisIdx)) default: panic("unknown axis") } diff --git a/share/ipldv2/axis_sample.go b/share/ipldv2/axis.go similarity index 58% rename from share/ipldv2/axis_sample.go rename to share/ipldv2/axis.go index 81dc734424..ada30507f1 100644 --- a/share/ipldv2/axis_sample.go +++ b/share/ipldv2/axis.go @@ -13,31 +13,31 @@ import ( ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" ) -type AxisSample struct { - ID AxisSampleID +type Axis struct { + ID AxisID AxisHalf []share.Share } -// NewAxisSample constructs a new AxisSample. -func NewAxisSample(id AxisSampleID, axisHalf []share.Share) *AxisSample { - return &AxisSample{ +// NewAxis constructs a new Axis. +func NewAxis(id AxisID, axisHalf []share.Share) *Axis { + return &Axis{ ID: id, AxisHalf: axisHalf, } } -// NewAxisSampleFromEDS samples the EDS and constructs a new AxisSample. -func NewAxisSampleFromEDS( - height uint64, - eds *rsmt2d.ExtendedDataSquare, +// NewAxisFromEDS samples the EDS and constructs a new Axis. +func NewAxisFromEDS( + axisType rsmt2d.Axis, idx int, - axis rsmt2d.Axis, -) (*AxisSample, error) { + eds *rsmt2d.ExtendedDataSquare, + height uint64, +) (*Axis, error) { sqrLn := int(eds.Width()) // TODO(@Wondertan): Should be an rsmt2d method var axisHalf [][]byte - switch axis { + switch axisType { case rsmt2d.Row: axisHalf = eds.Row(uint(idx))[:sqrLn/2] case rsmt2d.Col: @@ -51,35 +51,35 @@ func NewAxisSampleFromEDS( return nil, fmt.Errorf("while computing root: %w", err) } - id := NewAxisSampleID(height, root, idx, axis) - return NewAxisSample(id, axisHalf), nil + id := NewAxisID(axisType, uint16(idx), root, height) + return NewAxis(id, axisHalf), nil } -// Proto converts AxisSample to its protobuf representation. -func (s *AxisSample) Proto() *ipldv2pb.AxisSample { - return &ipldv2pb.AxisSample{ +// Proto converts Axis to its protobuf representation. +func (s *Axis) Proto() *ipldv2pb.Axis { + return &ipldv2pb.Axis{ Id: s.ID.Proto(), AxisHalf: s.AxisHalf, } } -// AxisSampleFromBlock converts blocks.Block into AxisSample. -func AxisSampleFromBlock(blk blocks.Block) (*AxisSample, error) { +// AxisFromBlock converts blocks.Block into Axis. +func AxisFromBlock(blk blocks.Block) (*Axis, error) { if err := validateCID(blk.Cid()); err != nil { return nil, err } - s := &AxisSample{} + s := &Axis{} err := s.UnmarshalBinary(blk.RawData()) if err != nil { - return nil, fmt.Errorf("while unmarshalling ShareSample: %w", err) + return nil, fmt.Errorf("while unmarshalling Axis: %w", err) } return s, nil } -// IPLDBlock converts AxisSample to an IPLD block for Bitswap compatibility. -func (s *AxisSample) IPLDBlock() (blocks.Block, error) { +// IPLDBlock converts Axis to an IPLD block for Bitswap compatibility. +func (s *Axis) IPLDBlock() (blocks.Block, error) { cid, err := s.ID.Cid() if err != nil { return nil, err @@ -93,32 +93,32 @@ func (s *AxisSample) IPLDBlock() (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } -// MarshalBinary marshals AxisSample to binary. -func (s *AxisSample) MarshalBinary() ([]byte, error) { +// MarshalBinary marshals Axis to binary. +func (s *Axis) MarshalBinary() ([]byte, error) { return s.Proto().Marshal() } -// UnmarshalBinary unmarshal AxisSample from binary. -func (s *AxisSample) UnmarshalBinary(data []byte) error { - proto := &ipldv2pb.AxisSample{} +// UnmarshalBinary unmarshal Axis from binary. +func (s *Axis) UnmarshalBinary(data []byte) error { + proto := &ipldv2pb.Axis{} if err := proto.Unmarshal(data); err != nil { return err } - s.ID = AxisSampleIDFromProto(proto.Id) + s.ID = AxisIDFromProto(proto.Id) s.AxisHalf = proto.AxisHalf return nil } -// Validate validates AxisSample's fields and proof of Share inclusion in the NMT. -func (s *AxisSample) Validate() error { +// Validate validates Axis's fields and proof of axis inclusion. +func (s *Axis) Validate() error { if err := s.ID.Validate(); err != nil { return err } sqrLn := len(s.AxisHalf) * 2 - if s.ID.Index > sqrLn { - return fmt.Errorf("row index exceeds square size: %d > %d", s.ID.Index, sqrLn) + if s.ID.AxisIndex > uint16(sqrLn) { + return fmt.Errorf("axis index exceeds square size: %d > %d", s.ID.AxisIndex, sqrLn) } // TODO(@Wondertan): This computations are quite expensive and likely to be used further, @@ -129,7 +129,7 @@ func (s *AxisSample) Validate() error { } s.AxisHalf = append(s.AxisHalf, parity...) - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(s.AxisHalf)/2), uint(s.ID.Index)) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(s.AxisHalf)/2), uint(s.ID.AxisIndex)) for _, shr := range s.AxisHalf { err := tree.Push(shr) if err != nil { @@ -144,7 +144,7 @@ func (s *AxisSample) Validate() error { hashedRoot := hashBytes(root) if !bytes.Equal(s.ID.AxisHash, hashedRoot) { - return fmt.Errorf("invalid root: %X != %X", root, s.ID.AxisHash) + return fmt.Errorf("invalid axis hash: %X != %X", root, s.ID.AxisHash) } return nil diff --git a/share/ipldv2/axis_sample_hasher.go b/share/ipldv2/axis_hasher.go similarity index 67% rename from share/ipldv2/axis_sample_hasher.go rename to share/ipldv2/axis_hasher.go index 08b08cd3d8..83bb8b517d 100644 --- a/share/ipldv2/axis_sample_hasher.go +++ b/share/ipldv2/axis_hasher.go @@ -5,13 +5,13 @@ import ( "fmt" ) -// AxisSampleHasher implements hash.Hash interface for Samples. -type AxisSampleHasher struct { - sample AxisSample +// AxisHasher implements hash.Hash interface for Samples. +type AxisHasher struct { + sample Axis } // Write expects a marshaled ShareSample to validate. -func (sh *AxisSampleHasher) Write(data []byte) (int, error) { +func (sh *AxisHasher) Write(data []byte) (int, error) { if err := sh.sample.UnmarshalBinary(data); err != nil { err = fmt.Errorf("while unmarshaling ShareSample: %w", err) log.Error(err) @@ -28,7 +28,7 @@ func (sh *AxisSampleHasher) Write(data []byte) (int, error) { } // Sum returns the "multihash" of the ShareSampleID. -func (sh *AxisSampleHasher) Sum([]byte) []byte { +func (sh *AxisHasher) Sum([]byte) []byte { sum, err := sh.sample.ID.MarshalBinary() if err != nil { err = fmt.Errorf("while marshaling ShareSampleID") @@ -38,16 +38,16 @@ func (sh *AxisSampleHasher) Sum([]byte) []byte { } // Reset resets the Hash to its initial state. -func (sh *AxisSampleHasher) Reset() { - sh.sample = AxisSample{} +func (sh *AxisHasher) Reset() { + sh.sample = Axis{} } // Size returns the number of bytes Sum will return. -func (sh *AxisSampleHasher) Size() int { - return AxisSampleIDSize +func (sh *AxisHasher) Size() int { + return AxisIDSize } // BlockSize returns the hash's underlying block size. -func (sh *AxisSampleHasher) BlockSize() int { +func (sh *AxisHasher) BlockSize() int { return sha256.BlockSize } diff --git a/share/ipldv2/axis_sample_hasher_test.go b/share/ipldv2/axis_hasher_test.go similarity index 83% rename from share/ipldv2/axis_sample_hasher_test.go rename to share/ipldv2/axis_hasher_test.go index d41c46d66a..44cf42ab1e 100644 --- a/share/ipldv2/axis_sample_hasher_test.go +++ b/share/ipldv2/axis_hasher_test.go @@ -11,15 +11,15 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -func TestAxisSampleHasher(t *testing.T) { - hasher := &AxisSampleHasher{} +func TestAxisHasher(t *testing.T) { + hasher := &AxisHasher{} _, err := hasher.Write([]byte("hello")) assert.Error(t, err) square := edstest.RandEDS(t, 2) - sample, err := NewAxisSampleFromEDS(1, square, 2, rsmt2d.Row) + sample, err := NewAxisFromEDS(rsmt2d.Row, 2, square, 1) require.NoError(t, err) data, err := sample.MarshalBinary() diff --git a/share/ipldv2/axis_id.go b/share/ipldv2/axis_id.go new file mode 100644 index 0000000000..d4c70d687b --- /dev/null +++ b/share/ipldv2/axis_id.go @@ -0,0 +1,147 @@ +package ipldv2 + +import ( + "encoding/binary" + "fmt" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" +) + +// AxisIDSize is the size of the AxisID in bytes +const AxisIDSize = 43 + +// AxisID is an unique identifier of a Axis. +type AxisID struct { + // AxisType is Col or Row axis of the sample in the data square + AxisType rsmt2d.Axis + // AxisIndex is the index of the axis(row, col) in the data square + AxisIndex uint16 + // AxisHash is the sha256 hash of a Col or Row root taken from DAH of the data square + AxisHash []byte + // Height of the block. + // Needed to identify block's data square in the whole chain + Height uint64 +} + +// NewAxisID constructs a new AxisID. +func NewAxisID(axisType rsmt2d.Axis, idx uint16, root *share.Root, height uint64) AxisID { + dahroot := root.RowRoots[idx] + if axisType == rsmt2d.Col { + dahroot = root.ColumnRoots[idx] + } + axisHash := hashBytes(dahroot) + + return AxisID{ + AxisType: axisType, + AxisIndex: idx, + AxisHash: axisHash, + Height: height, + } +} + +// AxisIDFromCID coverts CID to AxisID. +func AxisIDFromCID(cid cid.Cid) (id AxisID, err error) { + if err = validateCID(cid); err != nil { + return id, err + } + + err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) + if err != nil { + return id, fmt.Errorf("while unmarhalling AxisID: %w", err) + } + + return id, nil +} + +// AxisIDFromProto converts from protobuf representation of AxisID. +func AxisIDFromProto(proto *ipldv2pb.AxisID) AxisID { + return AxisID{ + AxisType: rsmt2d.Axis(proto.Type), + AxisIndex: uint16(proto.Index), + AxisHash: proto.Hash, + Height: proto.Height, + } +} + +// Cid returns sample ID encoded as CID. +func (s *AxisID) Cid() (cid.Cid, error) { + // avoid using proto serialization for CIDs as it's not deterministic + data, err := s.MarshalBinary() + if err != nil { + return cid.Undef, err + } + + buf, err := mh.Encode(data, axisMultihashCode) + if err != nil { + return cid.Undef, err + } + + return cid.NewCidV1(axisCodec, buf), nil +} + +// Proto converts AxisID to its protobuf representation. +func (s *AxisID) Proto() *ipldv2pb.AxisID { + return &ipldv2pb.AxisID{ + Type: ipldv2pb.AxisType(s.AxisType), + Height: s.Height, + Hash: s.AxisHash, + Index: uint32(s.AxisIndex), + } +} + +// MarshalTo encodes AxisID into given byte slice. +func (s *AxisID) MarshalTo(data []byte) (int, error) { + data = append(data, byte(s.AxisType)) + data = binary.LittleEndian.AppendUint16(data, s.AxisIndex) + data = append(data, s.AxisHash...) + binary.LittleEndian.AppendUint64(data, s.Height) + return AxisIDSize, nil +} + +// UnmarshalFrom decodes AxisID from given byte slice. +func (s *AxisID) UnmarshalFrom(data []byte) (int, error) { + s.AxisType = rsmt2d.Axis(data[0]) + s.AxisIndex = binary.LittleEndian.Uint16(data[1:]) + s.AxisHash = append(s.AxisHash, data[3:hashSize+3]...) + s.Height = binary.LittleEndian.Uint64(data[hashSize+3:]) + return AxisIDSize, nil +} + +// MarshalBinary encodes AxisID into binary form. +func (s *AxisID) MarshalBinary() ([]byte, error) { + data := make([]byte, 0, AxisIDSize) + n, err := s.MarshalTo(data) + return data[:n], err +} + +// UnmarshalBinary decodes AxisID from binary form. +func (s *AxisID) UnmarshalBinary(data []byte) error { + if len(data) != AxisIDSize { + return fmt.Errorf("incorrect data length: %d != %d", len(data), AxisIDSize) + } + _, err := s.UnmarshalFrom(data) + return err +} + +// Validate validates fields of AxisID. +func (s *AxisID) Validate() error { + if s.Height == 0 { + return fmt.Errorf("zero Height") + } + + if len(s.AxisHash) != hashSize { + return fmt.Errorf("incorrect Hash size: %d != %d", len(s.AxisHash), hashSize) + } + + if s.AxisType != rsmt2d.Col && s.AxisType != rsmt2d.Row { + return fmt.Errorf("incorrect Axis: %d", s.AxisType) + } + + return nil +} diff --git a/share/ipldv2/axis_sample_id_test.go b/share/ipldv2/axis_id_test.go similarity index 66% rename from share/ipldv2/axis_sample_id_test.go rename to share/ipldv2/axis_id_test.go index 0cf0baff7a..34bfad58cd 100644 --- a/share/ipldv2/axis_sample_id_test.go +++ b/share/ipldv2/axis_id_test.go @@ -12,24 +12,24 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -func TestAxisSampleID(t *testing.T) { +func TestAxisID(t *testing.T) { square := edstest.RandEDS(t, 2) root, err := share.NewRoot(square) require.NoError(t, err) - sid := NewAxisSampleID(1, root, 2, rsmt2d.Row) + sid := NewAxisID(rsmt2d.Row, 2, root, 1) id, err := sid.Cid() require.NoError(t, err) - assert.EqualValues(t, axisSamplingCodec, id.Prefix().Codec) - assert.EqualValues(t, axisSamplingMultihashCode, id.Prefix().MhType) - assert.EqualValues(t, AxisSampleIDSize, id.Prefix().MhLength) + assert.EqualValues(t, axisCodec, id.Prefix().Codec) + assert.EqualValues(t, axisMultihashCode, id.Prefix().MhType) + assert.EqualValues(t, AxisIDSize, id.Prefix().MhLength) data, err := sid.MarshalBinary() require.NoError(t, err) - sidOut := AxisSampleID{} + sidOut := AxisID{} err = sidOut.UnmarshalBinary(data) require.NoError(t, err) assert.EqualValues(t, sid, sidOut) diff --git a/share/ipldv2/axis_sample_id.go b/share/ipldv2/axis_sample_id.go deleted file mode 100644 index f228c17ff0..0000000000 --- a/share/ipldv2/axis_sample_id.go +++ /dev/null @@ -1,136 +0,0 @@ -package ipldv2 - -import ( - "encoding/binary" - "fmt" - - "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" -) - -// AxisSampleIDSize is the size of the AxisSampleID in bytes -const AxisSampleIDSize = 45 - -// AxisSampleID is an unique identifier of a AxisSample. -type AxisSampleID struct { - // Height of the block. - // Needed to identify block's data square in the whole chain - Height uint64 - // AxisHash is the sha256 hash of a Col or Row root taken from DAH of the data square - AxisHash []byte - // Index is the index of the axis(row, col) in the data square - Index int - // Axis is Col or Row axis of the sample in the data square - Axis rsmt2d.Axis -} - -// NewAxisSampleID constructs a new AxisSampleID. -func NewAxisSampleID(height uint64, root *share.Root, idx int, axis rsmt2d.Axis) AxisSampleID { - dahroot := root.RowRoots[idx] - if axis == rsmt2d.Col { - dahroot = root.ColumnRoots[idx] - } - axisHash := hashBytes(dahroot) - - return AxisSampleID{ - Height: height, - AxisHash: axisHash, - Index: idx, - Axis: axis, - } -} - -// AxisSampleIDFromCID coverts CID to AxisSampleID. -func AxisSampleIDFromCID(cid cid.Cid) (id AxisSampleID, err error) { - if err = validateCID(cid); err != nil { - return id, err - } - - err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) - if err != nil { - return id, fmt.Errorf("while unmarhalling AxisSampleID: %w", err) - } - - return id, nil -} - -// AxisSampleIDFromProto converts from protobuf representation of AxisSampleID. -func AxisSampleIDFromProto(proto *ipldv2pb.AxisSampleID) AxisSampleID { - return AxisSampleID{ - Height: proto.Height, - AxisHash: proto.AxisHash, - Index: int(proto.Index), - Axis: rsmt2d.Axis(proto.Axis), - } -} - -// Cid returns sample ID encoded as CID. -func (s *AxisSampleID) Cid() (cid.Cid, error) { - data, err := s.MarshalBinary() - if err != nil { - return cid.Undef, err - } - - buf, err := mh.Encode(data, axisSamplingMultihashCode) - if err != nil { - return cid.Undef, err - } - - return cid.NewCidV1(axisSamplingCodec, buf), nil -} - -// Proto converts AxisSampleID to its protobuf representation. -func (s *AxisSampleID) Proto() *ipldv2pb.AxisSampleID { - return &ipldv2pb.AxisSampleID{ - Height: s.Height, - AxisHash: s.AxisHash, - Index: uint32(s.Index), - Axis: ipldv2pb.Axis(s.Axis), - } -} - -// MarshalBinary encodes AxisSampleID into binary form. -func (s *AxisSampleID) MarshalBinary() ([]byte, error) { - // we cannot use protobuf here because it exceeds multihash limit of 128 bytes - data := make([]byte, 0, AxisSampleIDSize) - data = binary.LittleEndian.AppendUint64(data, s.Height) - data = append(data, s.AxisHash...) - data = binary.LittleEndian.AppendUint32(data, uint32(s.Index)) - data = append(data, byte(s.Axis)) - return data, nil -} - -// UnmarshalBinary decodes AxisSampleID from binary form. -func (s *AxisSampleID) UnmarshalBinary(data []byte) error { - if len(data) != AxisSampleIDSize { - return fmt.Errorf("incorrect sample id size: %d != %d", len(data), AxisSampleIDSize) - } - - s.Height = binary.LittleEndian.Uint64(data) - s.AxisHash = append(s.AxisHash, data[8:8+hashSize]...) // copying data to avoid slice aliasing - s.Index = int(binary.LittleEndian.Uint32(data[8+hashSize : 8+hashSize+4])) - s.Axis = rsmt2d.Axis(data[8+hashSize+4]) - return nil -} - -// Validate validates fields of AxisSampleID. -func (s *AxisSampleID) Validate() error { - if s.Height == 0 { - return fmt.Errorf("zero Height") - } - - if len(s.AxisHash) != hashSize { - return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), hashSize) - } - - if s.Axis != rsmt2d.Col && s.Axis != rsmt2d.Row { - return fmt.Errorf("incorrect Axis: %d", s.Axis) - } - - return nil -} diff --git a/share/ipldv2/axis_sample_test.go b/share/ipldv2/axis_test.go similarity index 83% rename from share/ipldv2/axis_sample_test.go rename to share/ipldv2/axis_test.go index c7b777e73c..91dfe495a5 100644 --- a/share/ipldv2/axis_sample_test.go +++ b/share/ipldv2/axis_test.go @@ -11,10 +11,10 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -func TestAxisSample(t *testing.T) { +func TestAxis(t *testing.T) { square := edstest.RandEDS(t, 2) - aid, err := NewAxisSampleFromEDS(1, square, 2, rsmt2d.Row) + aid, err := NewAxisFromEDS(rsmt2d.Row, 1, square, 2) require.NoError(t, err) data, err := aid.MarshalBinary() @@ -27,7 +27,7 @@ func TestAxisSample(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, blk.Cid(), cid) - sidOut := &AxisSample{} + sidOut := &Axis{} err = sidOut.UnmarshalBinary(data) require.NoError(t, err) assert.EqualValues(t, aid, sidOut) diff --git a/share/ipldv2/blockstore.go b/share/ipldv2/blockstore.go index ae7104b071..1e8e408975 100644 --- a/share/ipldv2/blockstore.go +++ b/share/ipldv2/blockstore.go @@ -27,30 +27,30 @@ func NewBlockstore[F eds.File](fs fileStore[F]) blockstore.Blockstore { func (b Blockstore[F]) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { switch cid.Type() { - case shareSamplingCodec: - id, err := ShareSampleIDFromCID(cid) + case sampleCodec: + id, err := SampleIDFromCID(cid) if err != nil { - err = fmt.Errorf("while converting CID to ShareSampleId: %w", err) + err = fmt.Errorf("while converting CID to SampleId: %w", err) log.Error(err) return nil, err } - blk, err := b.getShareSampleBlock(id) + blk, err := b.getSampleBlock(id) if err != nil { log.Error(err) return nil, err } return blk, nil - case axisSamplingCodec: - id, err := AxisSampleIDFromCID(cid) + case axisCodec: + id, err := AxisIDFromCID(cid) if err != nil { - err = fmt.Errorf("while converting CID to AxisSampleID: %w", err) + err = fmt.Errorf("while converting CID to AxisID: %w", err) log.Error(err) return nil, err } - blk, err := b.getAxisSampleBlock(id) + blk, err := b.getAxisBlock(id) if err != nil { log.Error(err) return nil, err @@ -62,18 +62,18 @@ func (b Blockstore[F]) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) } } -func (b Blockstore[F]) getShareSampleBlock(id ShareSampleID) (blocks.Block, error) { +func (b Blockstore[F]) getSampleBlock(id SampleID) (blocks.Block, error) { f, err := b.fs.File(id.Height) if err != nil { return nil, fmt.Errorf("while getting EDS file from FS: %w", err) } - shr, prf, err := f.ShareWithProof(id.Index, id.Axis) + shr, prf, err := f.ShareWithProof(id.AxisType, int(id.AxisIndex), int(id.ShareIndex)) if err != nil { return nil, fmt.Errorf("while getting share with proof: %w", err) } - s := NewShareSample(id, shr, prf, f.Size()) + s := NewSample(id, shr, prf, f.Size()) blk, err := s.IPLDBlock() if err != nil { return nil, fmt.Errorf("while coverting to IPLD block: %w", err) @@ -87,18 +87,18 @@ func (b Blockstore[F]) getShareSampleBlock(id ShareSampleID) (blocks.Block, erro return blk, nil } -func (b Blockstore[F]) getAxisSampleBlock(id AxisSampleID) (blocks.Block, error) { +func (b Blockstore[F]) getAxisBlock(id AxisID) (blocks.Block, error) { f, err := b.fs.File(id.Height) if err != nil { return nil, fmt.Errorf("while getting EDS file from FS: %w", err) } - axisHalf, err := f.AxisHalf(id.Index, id.Axis) + axisHalf, err := f.AxisHalf(id.AxisType, int(id.AxisIndex)) if err != nil { return nil, fmt.Errorf("while getting axis half: %w", err) } - s := NewAxisSample(id, axisHalf) + s := NewAxis(id, axisHalf) blk, err := s.IPLDBlock() if err != nil { return nil, fmt.Errorf("while coverting to IPLD block: %w", err) @@ -114,7 +114,7 @@ func (b Blockstore[F]) getAxisSampleBlock(id AxisSampleID) (blocks.Block, error) func (b Blockstore[F]) GetSize(ctx context.Context, cid cid.Cid) (int, error) { // TODO(@Wondertan): There must be a way to derive size without reading, proving, serializing and - // allocating ShareSample's block.Block. + // allocating Sample's block.Block. // NOTE:Bitswap uses GetSize also to determine if we have content stored or not // so simply returning constant size is not an option blk, err := b.Get(ctx, cid) @@ -128,19 +128,19 @@ func (b Blockstore[F]) GetSize(ctx context.Context, cid cid.Cid) (int, error) { func (b Blockstore[F]) Has(_ context.Context, cid cid.Cid) (bool, error) { var height uint64 switch cid.Type() { - case shareSamplingCodec: - id, err := ShareSampleIDFromCID(cid) + case sampleCodec: + id, err := SampleIDFromCID(cid) if err != nil { - err = fmt.Errorf("while converting CID to ShareSampleID: %w", err) + err = fmt.Errorf("while converting CID to SampleID: %w", err) log.Error(err) return false, err } height = id.Height - case axisSamplingCodec: - id, err := AxisSampleIDFromCID(cid) + case axisCodec: + id, err := AxisIDFromCID(cid) if err != nil { - err = fmt.Errorf("while converting CID to AxisSampleID: %w", err) + err = fmt.Errorf("while converting CID to AxisID: %w", err) log.Error(err) return false, err } diff --git a/share/ipldv2/blockstore_test.go b/share/ipldv2/blockstore_test.go index 950d113490..0d3a0f84e0 100644 --- a/share/ipldv2/blockstore_test.go +++ b/share/ipldv2/blockstore_test.go @@ -25,18 +25,17 @@ func TestBlockstoreGetShareSample(t *testing.T) { b := edsBlockstore(sqr) - axis := []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} width := int(sqr.Width()) - for _, axis := range axis { + for _, axisType := range axisTypes { for i := 0; i < width*width; i++ { - id := NewShareSampleID(1, root, i, axis) + id := NewSampleID(axisType, i, root, 1) cid, err := id.Cid() require.NoError(t, err) blk, err := b.Get(ctx, cid) require.NoError(t, err) - sample, err := ShareSampleFromBlock(blk) + sample, err := SampleFromBlock(blk) require.NoError(t, err) err = sample.Validate() diff --git a/share/ipldv2/ipldv2.go b/share/ipldv2/ipldv2.go index eaa309c33c..6738da4f4c 100644 --- a/share/ipldv2/ipldv2.go +++ b/share/ipldv2/ipldv2.go @@ -13,19 +13,19 @@ import ( var log = logger.Logger("ipldv2") const ( - // shareSamplingCodec is a CID codec used for share sampling Bitswap requests over Namespaced + // sampleCodec is a CID codec used for share sampling Bitswap requests over Namespaced // Merkle Tree. - shareSamplingCodec = 0x7800 + sampleCodec = 0x7800 - // shareSamplingMultihashCode is the multihash code for share sampling multihash function. - shareSamplingMultihashCode = 0x7801 + // sampleMultihashCode is the multihash code for share sampling multihash function. + sampleMultihashCode = 0x7801 - // axisSamplingCodec is a CID codec used for axis sampling Bitswap requests over Namespaced Merkle + // axisCodec is a CID codec used for axis sampling Bitswap requests over Namespaced Merkle // Tree. - axisSamplingCodec = 0x7810 + axisCodec = 0x7810 - // axisSamplingMultihashCode is the multihash code for custom axis sampling multihash function. - axisSamplingMultihashCode = 0x7811 + // axisMultihashCode is the multihash code for custom axis sampling multihash function. + axisMultihashCode = 0x7811 // mhPrefixSize is the size of the multihash prefix that used to cut it off. mhPrefixSize = 4 @@ -38,11 +38,11 @@ var ( func init() { // Register hashers for new multihashes - mh.Register(shareSamplingMultihashCode, func() hash.Hash { - return &ShareSampleHasher{} + mh.Register(sampleMultihashCode, func() hash.Hash { + return &SampleHasher{} }) - mh.Register(axisSamplingMultihashCode, func() hash.Hash { - return &AxisSampleHasher{} + mh.Register(axisMultihashCode, func() hash.Hash { + return &AxisHasher{} }) } @@ -54,8 +54,8 @@ type allowlist struct{} func (a allowlist) IsAllowed(code uint64) bool { // we disable all codes except home-baked code switch code { - case shareSamplingMultihashCode: - case axisSamplingMultihashCode: + case sampleMultihashCode: + case axisMultihashCode: default: return false } @@ -64,15 +64,15 @@ func (a allowlist) IsAllowed(code uint64) bool { func validateCID(cid cid.Cid) error { prefix := cid.Prefix() - if prefix.Codec != shareSamplingCodec && prefix.Codec != axisSamplingCodec { + if prefix.Codec != sampleCodec && prefix.Codec != axisCodec { return fmt.Errorf("unsupported codec %d", prefix.Codec) } - if prefix.MhType != shareSamplingMultihashCode && prefix.MhType != axisSamplingMultihashCode { + if prefix.MhType != sampleMultihashCode && prefix.MhType != axisMultihashCode { return fmt.Errorf("unsupported multihash %d", prefix.MhType) } - if prefix.MhLength != ShareSampleIDSize && prefix.MhLength != AxisSampleIDSize { + if prefix.MhLength != SampleIDSize && prefix.MhLength != AxisIDSize { return fmt.Errorf("invalid multihash length %d", prefix.MhLength) } diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index 4e0a90f1ec..89a8b1bfce 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -23,9 +23,11 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -// TestShareSampleRoundtripGetBlock tests full protocol round trip of: -// EDS -> ShareSample -> IPLDBlock -> BlockService -> Bitswap and in reverse. -func TestShareSampleRoundtripGetBlock(t *testing.T) { +var axisTypes = []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} + +// TestSampleRoundtripGetBlock tests full protocol round trip of: +// EDS -> Sample -> IPLDBlock -> BlockService -> Bitswap and in reverse. +func TestSampleRoundtripGetBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() @@ -33,11 +35,10 @@ func TestShareSampleRoundtripGetBlock(t *testing.T) { b := edsBlockstore(sqr) client := remoteClient(ctx, t, b) - axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} width := int(sqr.Width()) - for _, axis := range axis { + for _, axisType := range axisTypes { for i := 0; i < width*width; i++ { - smpl, err := NewShareSampleFromEDS(1, sqr, i, axis) + smpl, err := NewSampleFromEDS(axisType, i, sqr, 1) require.NoError(t, err) cid, err := smpl.ID.Cid() @@ -47,7 +48,7 @@ func TestShareSampleRoundtripGetBlock(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, cid, blkOut.Cid()) - smpl, err = ShareSampleFromBlock(blkOut) + smpl, err = SampleFromBlock(blkOut) assert.NoError(t, err) err = smpl.Validate() // bitswap already performed validation and this is only for testing @@ -56,7 +57,7 @@ func TestShareSampleRoundtripGetBlock(t *testing.T) { } } -func TestShareSampleRoundtripGetBlocks(t *testing.T) { +func TestSampleRoundtripGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() @@ -65,11 +66,10 @@ func TestShareSampleRoundtripGetBlocks(t *testing.T) { client := remoteClient(ctx, t, b) set := cid.NewSet() - axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} width := int(sqr.Width()) - for _, axis := range axis { + for _, axisType := range axisTypes { for i := 0; i < width*width; i++ { - smpl, err := NewShareSampleFromEDS(1, sqr, i, axis) + smpl, err := NewSampleFromEDS(axisType, i, sqr, 1) require.NoError(t, err) cid, err := smpl.ID.Cid() @@ -85,7 +85,7 @@ func TestShareSampleRoundtripGetBlocks(t *testing.T) { case blk := <-blks: assert.True(t, set.Has(blk.Cid())) - smpl, err := ShareSampleFromBlock(blk) + smpl, err := SampleFromBlock(blk) assert.NoError(t, err) err = smpl.Validate() // bitswap already performed validation and this is only for testing @@ -98,7 +98,7 @@ func TestShareSampleRoundtripGetBlocks(t *testing.T) { assert.NoError(t, err) } -func TestAxisSampleRoundtripGetBlock(t *testing.T) { +func TestAxisRoundtripGetBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10000) defer cancel() @@ -106,11 +106,10 @@ func TestAxisSampleRoundtripGetBlock(t *testing.T) { b := edsBlockstore(sqr) client := remoteClient(ctx, t, b) - axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} width := int(sqr.Width()) - for _, axis := range axis { + for _, axisType := range axisTypes { for i := 0; i < width; i++ { - smpl, err := NewAxisSampleFromEDS(1, sqr, i, axis) + smpl, err := NewAxisFromEDS(axisType, i, sqr, 1) require.NoError(t, err) cid, err := smpl.ID.Cid() @@ -120,7 +119,7 @@ func TestAxisSampleRoundtripGetBlock(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, cid, blkOut.Cid()) - smpl, err = AxisSampleFromBlock(blkOut) + smpl, err = AxisFromBlock(blkOut) assert.NoError(t, err) err = smpl.Validate() // bitswap already performed validation and this is only for testing @@ -129,7 +128,7 @@ func TestAxisSampleRoundtripGetBlock(t *testing.T) { } } -func TestAxisSampleRoundtripGetBlocks(t *testing.T) { +func TestAxisRoundtripGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() @@ -138,11 +137,10 @@ func TestAxisSampleRoundtripGetBlocks(t *testing.T) { client := remoteClient(ctx, t, b) set := cid.NewSet() - axis := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} width := int(sqr.Width()) - for _, axis := range axis { + for _, axisType := range axisTypes { for i := 0; i < width; i++ { - smpl, err := NewAxisSampleFromEDS(1, sqr, i, axis) + smpl, err := NewAxisFromEDS(axisType, i, sqr, 1) require.NoError(t, err) cid, err := smpl.ID.Cid() @@ -158,7 +156,7 @@ func TestAxisSampleRoundtripGetBlocks(t *testing.T) { case blk := <-blks: assert.True(t, set.Has(blk.Cid())) - smpl, err := AxisSampleFromBlock(blk) + smpl, err := AxisFromBlock(blk) assert.NoError(t, err) err = smpl.Validate() // bitswap already performed validation and this is only for testing diff --git a/share/ipldv2/pb/ipldv2pb.pb.go b/share/ipldv2/pb/ipldv2pb.pb.go index cf10ca06d8..f94e7ac1a5 100644 --- a/share/ipldv2/pb/ipldv2pb.pb.go +++ b/share/ipldv2/pb/ipldv2pb.pb.go @@ -23,75 +23,75 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -type Axis int32 +type AxisType int32 const ( - Axis_Row Axis = 0 - Axis_Col Axis = 1 + AxisType_Row AxisType = 0 + AxisType_Col AxisType = 1 ) -var Axis_name = map[int32]string{ +var AxisType_name = map[int32]string{ 0: "Row", 1: "Col", } -var Axis_value = map[string]int32{ +var AxisType_value = map[string]int32{ "Row": 0, "Col": 1, } -func (x Axis) String() string { - return proto.EnumName(Axis_name, int32(x)) +func (x AxisType) String() string { + return proto.EnumName(AxisType_name, int32(x)) } -func (Axis) EnumDescriptor() ([]byte, []int) { +func (AxisType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_cb41c3a4f982a271, []int{0} } -type ShareSampleType int32 +type SampleType int32 const ( - ShareSampleType_Data ShareSampleType = 0 - ShareSampleType_Parity ShareSampleType = 1 + SampleType_Data SampleType = 0 + SampleType_Parity SampleType = 1 ) -var ShareSampleType_name = map[int32]string{ +var SampleType_name = map[int32]string{ 0: "Data", 1: "Parity", } -var ShareSampleType_value = map[string]int32{ +var SampleType_value = map[string]int32{ "Data": 0, "Parity": 1, } -func (x ShareSampleType) String() string { - return proto.EnumName(ShareSampleType_name, int32(x)) +func (x SampleType) String() string { + return proto.EnumName(SampleType_name, int32(x)) } -func (ShareSampleType) EnumDescriptor() ([]byte, []int) { +func (SampleType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_cb41c3a4f982a271, []int{1} } -type ShareSampleID struct { - Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - AxisHash []byte `protobuf:"bytes,2,opt,name=axis_hash,json=axisHash,proto3" json:"axis_hash,omitempty"` - Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` - Axis Axis `protobuf:"varint,4,opt,name=axis,proto3,enum=Axis" json:"axis,omitempty"` +type AxisID struct { + Type AxisType `protobuf:"varint,1,opt,name=type,proto3,enum=AxisType" json:"type,omitempty"` + Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` } -func (m *ShareSampleID) Reset() { *m = ShareSampleID{} } -func (m *ShareSampleID) String() string { return proto.CompactTextString(m) } -func (*ShareSampleID) ProtoMessage() {} -func (*ShareSampleID) Descriptor() ([]byte, []int) { +func (m *AxisID) Reset() { *m = AxisID{} } +func (m *AxisID) String() string { return proto.CompactTextString(m) } +func (*AxisID) ProtoMessage() {} +func (*AxisID) Descriptor() ([]byte, []int) { return fileDescriptor_cb41c3a4f982a271, []int{0} } -func (m *ShareSampleID) XXX_Unmarshal(b []byte) error { +func (m *AxisID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ShareSampleID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *AxisID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ShareSampleID.Marshal(b, m, deterministic) + return xxx_messageInfo_AxisID.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -101,65 +101,63 @@ func (m *ShareSampleID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return b[:n], nil } } -func (m *ShareSampleID) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShareSampleID.Merge(m, src) +func (m *AxisID) XXX_Merge(src proto.Message) { + xxx_messageInfo_AxisID.Merge(m, src) } -func (m *ShareSampleID) XXX_Size() int { +func (m *AxisID) XXX_Size() int { return m.Size() } -func (m *ShareSampleID) XXX_DiscardUnknown() { - xxx_messageInfo_ShareSampleID.DiscardUnknown(m) +func (m *AxisID) XXX_DiscardUnknown() { + xxx_messageInfo_AxisID.DiscardUnknown(m) } -var xxx_messageInfo_ShareSampleID proto.InternalMessageInfo +var xxx_messageInfo_AxisID proto.InternalMessageInfo -func (m *ShareSampleID) GetHeight() uint64 { +func (m *AxisID) GetType() AxisType { if m != nil { - return m.Height + return m.Type } - return 0 + return AxisType_Row } -func (m *ShareSampleID) GetAxisHash() []byte { +func (m *AxisID) GetHeight() uint64 { if m != nil { - return m.AxisHash + return m.Height } - return nil + return 0 } -func (m *ShareSampleID) GetIndex() uint32 { +func (m *AxisID) GetIndex() uint32 { if m != nil { return m.Index } return 0 } -func (m *ShareSampleID) GetAxis() Axis { +func (m *AxisID) GetHash() []byte { if m != nil { - return m.Axis + return m.Hash } - return Axis_Row + return nil } -type ShareSample struct { - Id *ShareSampleID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Type ShareSampleType `protobuf:"varint,2,opt,name=type,proto3,enum=ShareSampleType" json:"type,omitempty"` - Share []byte `protobuf:"bytes,3,opt,name=share,proto3" json:"share,omitempty"` - Proof *pb.Proof `protobuf:"bytes,4,opt,name=proof,proto3" json:"proof,omitempty"` +type Axis struct { + Id *AxisID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AxisHalf [][]byte `protobuf:"bytes,2,rep,name=axis_half,json=axisHalf,proto3" json:"axis_half,omitempty"` } -func (m *ShareSample) Reset() { *m = ShareSample{} } -func (m *ShareSample) String() string { return proto.CompactTextString(m) } -func (*ShareSample) ProtoMessage() {} -func (*ShareSample) Descriptor() ([]byte, []int) { +func (m *Axis) Reset() { *m = Axis{} } +func (m *Axis) String() string { return proto.CompactTextString(m) } +func (*Axis) ProtoMessage() {} +func (*Axis) Descriptor() ([]byte, []int) { return fileDescriptor_cb41c3a4f982a271, []int{1} } -func (m *ShareSample) XXX_Unmarshal(b []byte) error { +func (m *Axis) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ShareSample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Axis) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ShareSample.Marshal(b, m, deterministic) + return xxx_messageInfo_Axis.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -169,65 +167,49 @@ func (m *ShareSample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *ShareSample) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShareSample.Merge(m, src) +func (m *Axis) XXX_Merge(src proto.Message) { + xxx_messageInfo_Axis.Merge(m, src) } -func (m *ShareSample) XXX_Size() int { +func (m *Axis) XXX_Size() int { return m.Size() } -func (m *ShareSample) XXX_DiscardUnknown() { - xxx_messageInfo_ShareSample.DiscardUnknown(m) +func (m *Axis) XXX_DiscardUnknown() { + xxx_messageInfo_Axis.DiscardUnknown(m) } -var xxx_messageInfo_ShareSample proto.InternalMessageInfo +var xxx_messageInfo_Axis proto.InternalMessageInfo -func (m *ShareSample) GetId() *ShareSampleID { +func (m *Axis) GetId() *AxisID { if m != nil { return m.Id } return nil } -func (m *ShareSample) GetType() ShareSampleType { +func (m *Axis) GetAxisHalf() [][]byte { if m != nil { - return m.Type - } - return ShareSampleType_Data -} - -func (m *ShareSample) GetShare() []byte { - if m != nil { - return m.Share - } - return nil -} - -func (m *ShareSample) GetProof() *pb.Proof { - if m != nil { - return m.Proof + return m.AxisHalf } return nil } -type AxisSampleID struct { - Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - AxisHash []byte `protobuf:"bytes,2,opt,name=axis_hash,json=axisHash,proto3" json:"axis_hash,omitempty"` - Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` - Axis Axis `protobuf:"varint,4,opt,name=axis,proto3,enum=Axis" json:"axis,omitempty"` +type SampleID struct { + AxisId *AxisID `protobuf:"bytes,1,opt,name=axis_id,json=axisId,proto3" json:"axis_id,omitempty"` + ShareIndex uint32 `protobuf:"varint,2,opt,name=share_index,json=shareIndex,proto3" json:"share_index,omitempty"` } -func (m *AxisSampleID) Reset() { *m = AxisSampleID{} } -func (m *AxisSampleID) String() string { return proto.CompactTextString(m) } -func (*AxisSampleID) ProtoMessage() {} -func (*AxisSampleID) Descriptor() ([]byte, []int) { +func (m *SampleID) Reset() { *m = SampleID{} } +func (m *SampleID) String() string { return proto.CompactTextString(m) } +func (*SampleID) ProtoMessage() {} +func (*SampleID) Descriptor() ([]byte, []int) { return fileDescriptor_cb41c3a4f982a271, []int{2} } -func (m *AxisSampleID) XXX_Unmarshal(b []byte) error { +func (m *SampleID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *AxisSampleID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *SampleID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_AxisSampleID.Marshal(b, m, deterministic) + return xxx_messageInfo_SampleID.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -237,63 +219,51 @@ func (m *AxisSampleID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *AxisSampleID) XXX_Merge(src proto.Message) { - xxx_messageInfo_AxisSampleID.Merge(m, src) +func (m *SampleID) XXX_Merge(src proto.Message) { + xxx_messageInfo_SampleID.Merge(m, src) } -func (m *AxisSampleID) XXX_Size() int { +func (m *SampleID) XXX_Size() int { return m.Size() } -func (m *AxisSampleID) XXX_DiscardUnknown() { - xxx_messageInfo_AxisSampleID.DiscardUnknown(m) +func (m *SampleID) XXX_DiscardUnknown() { + xxx_messageInfo_SampleID.DiscardUnknown(m) } -var xxx_messageInfo_AxisSampleID proto.InternalMessageInfo +var xxx_messageInfo_SampleID proto.InternalMessageInfo -func (m *AxisSampleID) GetHeight() uint64 { +func (m *SampleID) GetAxisId() *AxisID { if m != nil { - return m.Height - } - return 0 -} - -func (m *AxisSampleID) GetAxisHash() []byte { - if m != nil { - return m.AxisHash + return m.AxisId } return nil } -func (m *AxisSampleID) GetIndex() uint32 { +func (m *SampleID) GetShareIndex() uint32 { if m != nil { - return m.Index + return m.ShareIndex } return 0 } -func (m *AxisSampleID) GetAxis() Axis { - if m != nil { - return m.Axis - } - return Axis_Row -} - -type AxisSample struct { - Id *AxisSampleID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - AxisHalf [][]byte `protobuf:"bytes,2,rep,name=axis_half,json=axisHalf,proto3" json:"axis_half,omitempty"` +type Sample struct { + Id *SampleID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Type SampleType `protobuf:"varint,2,opt,name=type,proto3,enum=SampleType" json:"type,omitempty"` + Share []byte `protobuf:"bytes,3,opt,name=share,proto3" json:"share,omitempty"` + Proof *pb.Proof `protobuf:"bytes,4,opt,name=proof,proto3" json:"proof,omitempty"` } -func (m *AxisSample) Reset() { *m = AxisSample{} } -func (m *AxisSample) String() string { return proto.CompactTextString(m) } -func (*AxisSample) ProtoMessage() {} -func (*AxisSample) Descriptor() ([]byte, []int) { +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { return fileDescriptor_cb41c3a4f982a271, []int{3} } -func (m *AxisSample) XXX_Unmarshal(b []byte) error { +func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *AxisSample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_AxisSample.Marshal(b, m, deterministic) + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -303,70 +273,86 @@ func (m *AxisSample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *AxisSample) XXX_Merge(src proto.Message) { - xxx_messageInfo_AxisSample.Merge(m, src) +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) } -func (m *AxisSample) XXX_Size() int { +func (m *Sample) XXX_Size() int { return m.Size() } -func (m *AxisSample) XXX_DiscardUnknown() { - xxx_messageInfo_AxisSample.DiscardUnknown(m) +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) } -var xxx_messageInfo_AxisSample proto.InternalMessageInfo +var xxx_messageInfo_Sample proto.InternalMessageInfo -func (m *AxisSample) GetId() *AxisSampleID { +func (m *Sample) GetId() *SampleID { if m != nil { return m.Id } return nil } -func (m *AxisSample) GetAxisHalf() [][]byte { +func (m *Sample) GetType() SampleType { if m != nil { - return m.AxisHalf + return m.Type + } + return SampleType_Data +} + +func (m *Sample) GetShare() []byte { + if m != nil { + return m.Share + } + return nil +} + +func (m *Sample) GetProof() *pb.Proof { + if m != nil { + return m.Proof } return nil } func init() { - proto.RegisterEnum("Axis", Axis_name, Axis_value) - proto.RegisterEnum("ShareSampleType", ShareSampleType_name, ShareSampleType_value) - proto.RegisterType((*ShareSampleID)(nil), "ShareSampleID") - proto.RegisterType((*ShareSample)(nil), "ShareSample") - proto.RegisterType((*AxisSampleID)(nil), "AxisSampleID") - proto.RegisterType((*AxisSample)(nil), "AxisSample") + proto.RegisterEnum("AxisType", AxisType_name, AxisType_value) + proto.RegisterEnum("SampleType", SampleType_name, SampleType_value) + proto.RegisterType((*AxisID)(nil), "AxisID") + proto.RegisterType((*Axis)(nil), "Axis") + proto.RegisterType((*SampleID)(nil), "SampleID") + proto.RegisterType((*Sample)(nil), "Sample") } func init() { proto.RegisterFile("share/ipldv2/pb/ipldv2pb.proto", fileDescriptor_cb41c3a4f982a271) } var fileDescriptor_cb41c3a4f982a271 = []byte{ - // 351 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x92, 0xc1, 0x4a, 0xfb, 0x40, - 0x10, 0xc6, 0xb3, 0x69, 0xda, 0x7f, 0xff, 0xd3, 0x36, 0x0d, 0x8b, 0xc8, 0xaa, 0xb8, 0x84, 0xa2, - 0x18, 0x7a, 0x48, 0x21, 0x3e, 0x81, 0xda, 0x43, 0xbd, 0x95, 0xad, 0x77, 0xd9, 0x90, 0xd4, 0x2c, - 0x44, 0xb3, 0x24, 0x51, 0xdb, 0xb7, 0xe8, 0x63, 0x79, 0xec, 0xd1, 0xa3, 0xb4, 0x2f, 0x22, 0xbb, - 0x29, 0x24, 0xfa, 0x00, 0xde, 0xe6, 0xdb, 0x99, 0xe1, 0xfb, 0xcd, 0xc7, 0x02, 0x2d, 0x12, 0x9e, - 0xc7, 0x13, 0x21, 0xd3, 0xe8, 0x2d, 0x98, 0xc8, 0xf0, 0x50, 0xc9, 0xd0, 0x97, 0x79, 0x56, 0x66, - 0xa7, 0xb6, 0x0c, 0x27, 0x32, 0xcf, 0xb2, 0x65, 0xa5, 0x47, 0xaf, 0x30, 0x58, 0xa8, 0x8d, 0x05, - 0x7f, 0x96, 0x69, 0x7c, 0x3f, 0xc5, 0xc7, 0xd0, 0x49, 0x62, 0xf1, 0x94, 0x94, 0x04, 0xb9, 0xc8, - 0xb3, 0xd8, 0x41, 0xe1, 0x33, 0xf8, 0xcf, 0x57, 0xa2, 0x78, 0x4c, 0x78, 0x91, 0x10, 0xd3, 0x45, - 0x5e, 0x9f, 0x75, 0xd5, 0xc3, 0x8c, 0x17, 0x09, 0x3e, 0x82, 0xb6, 0x78, 0x89, 0xe2, 0x15, 0x69, - 0xb9, 0xc8, 0x1b, 0xb0, 0x4a, 0xe0, 0x13, 0xb0, 0xd4, 0x04, 0xb1, 0x5c, 0xe4, 0xd9, 0x41, 0xdb, - 0xbf, 0x59, 0x89, 0x82, 0xe9, 0xa7, 0xd1, 0x06, 0x41, 0xaf, 0xe1, 0x8b, 0x29, 0x98, 0x22, 0xd2, - 0x8e, 0xbd, 0xc0, 0xf6, 0x7f, 0x10, 0x31, 0x53, 0x44, 0xf8, 0x02, 0xac, 0x72, 0x2d, 0x63, 0x6d, - 0x6c, 0x07, 0x4e, 0x73, 0xe2, 0x61, 0x2d, 0x63, 0xa6, 0xbb, 0x0a, 0x43, 0x9f, 0xaf, 0x31, 0xfa, - 0xac, 0x12, 0xf8, 0x12, 0xda, 0xfa, 0x62, 0xcd, 0xd1, 0x0b, 0x86, 0xfe, 0xe1, 0xfe, 0xd0, 0x9f, - 0xab, 0x82, 0x55, 0xdd, 0x51, 0x09, 0x7d, 0x05, 0xf8, 0xc7, 0x41, 0xcc, 0x00, 0x6a, 0x57, 0x7c, - 0xde, 0x88, 0x61, 0xe0, 0x37, 0x71, 0x74, 0x0a, 0xb5, 0x75, 0xba, 0x24, 0xa6, 0xdb, 0xaa, 0xad, - 0xd3, 0xe5, 0x98, 0x80, 0xa5, 0x16, 0xf0, 0x3f, 0x68, 0xb1, 0xec, 0xdd, 0x31, 0x54, 0x71, 0x97, - 0xa5, 0x0e, 0x1a, 0x5f, 0xc1, 0xf0, 0x57, 0x5e, 0xb8, 0x0b, 0xd6, 0x94, 0x97, 0xdc, 0x31, 0x30, - 0x40, 0x67, 0xce, 0x73, 0x51, 0xae, 0x1d, 0x74, 0x4b, 0x3e, 0x76, 0x14, 0x6d, 0x77, 0x14, 0x7d, - 0xed, 0x28, 0xda, 0xec, 0xa9, 0xb1, 0xdd, 0x53, 0xe3, 0x73, 0x4f, 0x8d, 0xb0, 0xa3, 0x7f, 0xcb, - 0xf5, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x6e, 0xfb, 0x99, 0x5f, 0x02, 0x00, 0x00, -} - -func (m *ShareSampleID) Marshal() (dAtA []byte, err error) { + // 371 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x51, 0x4f, 0x6b, 0xab, 0x40, + 0x1c, 0x74, 0x8d, 0x31, 0xe6, 0x67, 0x5e, 0x9e, 0x2c, 0xe1, 0x3d, 0xfb, 0xcf, 0x88, 0x50, 0x90, + 0x1c, 0x0c, 0xd8, 0x6b, 0x2f, 0x6d, 0x73, 0xa8, 0x87, 0x42, 0xd8, 0xf6, 0x1e, 0x56, 0x34, 0x75, + 0xc1, 0xd6, 0x45, 0xa5, 0x4d, 0x8e, 0xfd, 0x06, 0xfd, 0x58, 0x3d, 0xe6, 0xd8, 0x63, 0x49, 0xbe, + 0x48, 0x71, 0xd7, 0xd0, 0x40, 0x6f, 0x33, 0xb3, 0x32, 0x33, 0xbf, 0x11, 0x9c, 0x2a, 0xa3, 0x65, + 0x3a, 0x65, 0x3c, 0x4f, 0x5e, 0xc2, 0x29, 0x8f, 0x5b, 0xc4, 0xe3, 0x80, 0x97, 0x45, 0x5d, 0x1c, + 0x0f, 0x79, 0x3c, 0xe5, 0x65, 0x51, 0x2c, 0x25, 0xf7, 0x18, 0xe8, 0x57, 0x2b, 0x56, 0x45, 0x33, + 0x7c, 0x06, 0x5a, 0xbd, 0xe6, 0xa9, 0x8d, 0x5c, 0xe4, 0x0f, 0xc3, 0x7e, 0xd0, 0xc8, 0x0f, 0x6b, + 0x9e, 0x12, 0x21, 0xe3, 0x7f, 0xa0, 0x67, 0x29, 0x7b, 0xcc, 0x6a, 0x5b, 0x75, 0x91, 0xaf, 0x91, + 0x96, 0xe1, 0x11, 0x74, 0xd9, 0x73, 0x92, 0xae, 0xec, 0x8e, 0x8b, 0xfc, 0x3f, 0x44, 0x12, 0x8c, + 0x41, 0xcb, 0x68, 0x95, 0xd9, 0x9a, 0x8b, 0xfc, 0x01, 0x11, 0xd8, 0xbb, 0x04, 0xad, 0xf1, 0xc4, + 0xff, 0x41, 0x65, 0x89, 0x88, 0x31, 0xc3, 0x5e, 0x20, 0xd3, 0x89, 0xca, 0x12, 0x7c, 0x02, 0x7d, + 0xba, 0x62, 0xd5, 0x22, 0xa3, 0xf9, 0xd2, 0x56, 0xdd, 0x8e, 0x3f, 0x20, 0x46, 0x23, 0xdc, 0xd2, + 0x7c, 0xe9, 0xdd, 0x81, 0x71, 0x4f, 0x9f, 0x78, 0x9e, 0x46, 0x33, 0xec, 0x42, 0x4f, 0x7c, 0xf8, + 0xdb, 0x46, 0x6f, 0xf4, 0x28, 0xc1, 0x63, 0x30, 0xc5, 0x10, 0x0b, 0xd9, 0x4d, 0x15, 0xdd, 0x40, + 0x48, 0x51, 0xa3, 0x78, 0x6f, 0x08, 0x74, 0xe9, 0x87, 0x8f, 0x0e, 0xfa, 0xf4, 0x83, 0x7d, 0x88, + 0x68, 0x34, 0x6e, 0x37, 0x51, 0xc5, 0x26, 0x66, 0xfb, 0x78, 0xb0, 0xca, 0x08, 0xba, 0xc2, 0x54, + 0x5c, 0x3f, 0x20, 0x92, 0xe0, 0x73, 0xe8, 0x8a, 0x8d, 0xc5, 0xf9, 0x66, 0xf8, 0x37, 0x68, 0x17, + 0x8f, 0x83, 0x79, 0x03, 0x88, 0x7c, 0x9d, 0x9c, 0x82, 0xb1, 0x1f, 0x19, 0xf7, 0xa0, 0x43, 0x8a, + 0x57, 0x4b, 0x69, 0xc0, 0x4d, 0x91, 0x5b, 0x68, 0xe2, 0x01, 0xfc, 0xc4, 0x61, 0x03, 0xb4, 0x19, + 0xad, 0xa9, 0xa5, 0x60, 0x00, 0x7d, 0x4e, 0x4b, 0x56, 0xaf, 0x2d, 0x74, 0x6d, 0x7f, 0x6c, 0x1d, + 0xb4, 0xd9, 0x3a, 0xe8, 0x6b, 0xeb, 0xa0, 0xf7, 0x9d, 0xa3, 0x6c, 0x76, 0x8e, 0xf2, 0xb9, 0x73, + 0x94, 0x58, 0x17, 0xbf, 0xf7, 0xe2, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xb7, 0xed, 0xa5, 0x10, + 0x02, 0x00, 0x00, +} + +func (m *AxisID) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -376,42 +362,42 @@ func (m *ShareSampleID) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShareSampleID) MarshalTo(dAtA []byte) (int, error) { +func (m *AxisID) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ShareSampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AxisID) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Axis != 0 { - i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Axis)) + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.Hash))) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x22 } if m.Index != 0 { i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Index)) i-- dAtA[i] = 0x18 } - if len(m.AxisHash) > 0 { - i -= len(m.AxisHash) - copy(dAtA[i:], m.AxisHash) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.AxisHash))) - i-- - dAtA[i] = 0x12 - } if m.Height != 0 { i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Height)) i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Type)) + i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *ShareSample) Marshal() (dAtA []byte, err error) { +func (m *Axis) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -421,39 +407,24 @@ func (m *ShareSample) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShareSample) MarshalTo(dAtA []byte) (int, error) { +func (m *Axis) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ShareSample) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Axis) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Proof != nil { - { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) + if len(m.AxisHalf) > 0 { + for iNdEx := len(m.AxisHalf) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AxisHalf[iNdEx]) + copy(dAtA[i:], m.AxisHalf[iNdEx]) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.AxisHalf[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i-- - dAtA[i] = 0x22 - } - if len(m.Share) > 0 { - i -= len(m.Share) - copy(dAtA[i:], m.Share) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.Share))) - i-- - dAtA[i] = 0x1a - } - if m.Type != 0 { - i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x10 } if m.Id != nil { { @@ -470,7 +441,7 @@ func (m *ShareSample) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *AxisSampleID) Marshal() (dAtA []byte, err error) { +func (m *SampleID) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -480,42 +451,37 @@ func (m *AxisSampleID) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AxisSampleID) MarshalTo(dAtA []byte) (int, error) { +func (m *SampleID) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AxisSampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Axis != 0 { - i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Axis)) - i-- - dAtA[i] = 0x20 - } - if m.Index != 0 { - i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Index)) + if m.ShareIndex != 0 { + i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.ShareIndex)) i-- - dAtA[i] = 0x18 - } - if len(m.AxisHash) > 0 { - i -= len(m.AxisHash) - copy(dAtA[i:], m.AxisHash) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.AxisHash))) - i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if m.Height != 0 { - i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Height)) + if m.AxisId != nil { + { + size, err := m.AxisId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *AxisSample) Marshal() (dAtA []byte, err error) { +func (m *Sample) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -525,24 +491,39 @@ func (m *AxisSample) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AxisSample) MarshalTo(dAtA []byte) (int, error) { +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AxisSample) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.AxisHalf) > 0 { - for iNdEx := len(m.AxisHalf) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AxisHalf[iNdEx]) - copy(dAtA[i:], m.AxisHalf[iNdEx]) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.AxisHalf[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 + } + if len(m.Share) > 0 { + i -= len(m.Share) + copy(dAtA[i:], m.Share) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.Share))) + i-- + dAtA[i] = 0x1a + } + if m.Type != 0 { + i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 } if m.Id != nil { { @@ -570,29 +551,29 @@ func encodeVarintIpldv2Pb(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *ShareSampleID) Size() (n int) { +func (m *AxisID) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.Type != 0 { + n += 1 + sovIpldv2Pb(uint64(m.Type)) + } if m.Height != 0 { n += 1 + sovIpldv2Pb(uint64(m.Height)) } - l = len(m.AxisHash) - if l > 0 { - n += 1 + l + sovIpldv2Pb(uint64(l)) - } if m.Index != 0 { n += 1 + sovIpldv2Pb(uint64(m.Index)) } - if m.Axis != 0 { - n += 1 + sovIpldv2Pb(uint64(m.Axis)) + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovIpldv2Pb(uint64(l)) } return n } -func (m *ShareSample) Size() (n int) { +func (m *Axis) Size() (n int) { if m == nil { return 0 } @@ -602,43 +583,32 @@ func (m *ShareSample) Size() (n int) { l = m.Id.Size() n += 1 + l + sovIpldv2Pb(uint64(l)) } - if m.Type != 0 { - n += 1 + sovIpldv2Pb(uint64(m.Type)) - } - l = len(m.Share) - if l > 0 { - n += 1 + l + sovIpldv2Pb(uint64(l)) - } - if m.Proof != nil { - l = m.Proof.Size() - n += 1 + l + sovIpldv2Pb(uint64(l)) + if len(m.AxisHalf) > 0 { + for _, b := range m.AxisHalf { + l = len(b) + n += 1 + l + sovIpldv2Pb(uint64(l)) + } } return n } -func (m *AxisSampleID) Size() (n int) { +func (m *SampleID) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Height != 0 { - n += 1 + sovIpldv2Pb(uint64(m.Height)) - } - l = len(m.AxisHash) - if l > 0 { + if m.AxisId != nil { + l = m.AxisId.Size() n += 1 + l + sovIpldv2Pb(uint64(l)) } - if m.Index != 0 { - n += 1 + sovIpldv2Pb(uint64(m.Index)) - } - if m.Axis != 0 { - n += 1 + sovIpldv2Pb(uint64(m.Axis)) + if m.ShareIndex != 0 { + n += 1 + sovIpldv2Pb(uint64(m.ShareIndex)) } return n } -func (m *AxisSample) Size() (n int) { +func (m *Sample) Size() (n int) { if m == nil { return 0 } @@ -648,11 +618,16 @@ func (m *AxisSample) Size() (n int) { l = m.Id.Size() n += 1 + l + sovIpldv2Pb(uint64(l)) } - if len(m.AxisHalf) > 0 { - for _, b := range m.AxisHalf { - l = len(b) - n += 1 + l + sovIpldv2Pb(uint64(l)) - } + if m.Type != 0 { + n += 1 + sovIpldv2Pb(uint64(m.Type)) + } + l = len(m.Share) + if l > 0 { + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovIpldv2Pb(uint64(l)) } return n } @@ -663,7 +638,7 @@ func sovIpldv2Pb(x uint64) (n int) { func sozIpldv2Pb(x uint64) (n int) { return sovIpldv2Pb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *ShareSampleID) Unmarshal(dAtA []byte) error { +func (m *AxisID) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -686,17 +661,17 @@ func (m *ShareSampleID) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShareSampleID: wiretype end group for non-group") + return fmt.Errorf("proto: AxisID: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShareSampleID: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AxisID: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Height = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIpldv2Pb @@ -706,16 +681,16 @@ func (m *ShareSampleID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= uint64(b&0x7F) << shift + m.Type |= AxisType(b&0x7F) << shift if b < 0x80 { break } } case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AxisHash", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var byteLen int + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIpldv2Pb @@ -725,26 +700,11 @@ func (m *ShareSampleID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Height |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthIpldv2Pb - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AxisHash = append(m.AxisHash[:0], dAtA[iNdEx:postIndex]...) - if m.AxisHash == nil { - m.AxisHash = []byte{} - } - iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) @@ -765,10 +725,10 @@ func (m *ShareSampleID) Unmarshal(dAtA []byte) error { } } case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Axis", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) } - m.Axis = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIpldv2Pb @@ -778,11 +738,26 @@ func (m *ShareSampleID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Axis |= Axis(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipIpldv2Pb(dAtA[iNdEx:]) @@ -804,7 +779,7 @@ func (m *ShareSampleID) Unmarshal(dAtA []byte) error { } return nil } -func (m *ShareSample) Unmarshal(dAtA []byte) error { +func (m *Axis) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -827,10 +802,10 @@ func (m *ShareSample) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShareSample: wiretype end group for non-group") + return fmt.Errorf("proto: Axis: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShareSample: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Axis: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -863,34 +838,15 @@ func (m *ShareSample) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Id == nil { - m.Id = &ShareSampleID{} + m.Id = &AxisID{} } if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= ShareSampleType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Share", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AxisHalf", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -917,46 +873,8 @@ func (m *ShareSample) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Share = append(m.Share[:0], dAtA[iNdEx:postIndex]...) - if m.Share == nil { - m.Share = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIpldv2Pb - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Proof == nil { - m.Proof = &pb.Proof{} - } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.AxisHalf = append(m.AxisHalf, make([]byte, postIndex-iNdEx)) + copy(m.AxisHalf[len(m.AxisHalf)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -979,7 +897,7 @@ func (m *ShareSample) Unmarshal(dAtA []byte) error { } return nil } -func (m *AxisSampleID) Unmarshal(dAtA []byte) error { +func (m *SampleID) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1002,36 +920,17 @@ func (m *AxisSampleID) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AxisSampleID: wiretype end group for non-group") + return fmt.Errorf("proto: SampleID: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AxisSampleID: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SampleID: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AxisHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AxisId", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIpldv2Pb @@ -1041,50 +940,33 @@ func (m *AxisSampleID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthIpldv2Pb } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthIpldv2Pb } if postIndex > l { return io.ErrUnexpectedEOF } - m.AxisHash = append(m.AxisHash[:0], dAtA[iNdEx:postIndex]...) - if m.AxisHash == nil { - m.AxisHash = []byte{} + if m.AxisId == nil { + m.AxisId = &AxisID{} } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.AxisId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - case 4: + iNdEx = postIndex + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Axis", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShareIndex", wireType) } - m.Axis = 0 + m.ShareIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIpldv2Pb @@ -1094,7 +976,7 @@ func (m *AxisSampleID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Axis |= Axis(b&0x7F) << shift + m.ShareIndex |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -1120,7 +1002,7 @@ func (m *AxisSampleID) Unmarshal(dAtA []byte) error { } return nil } -func (m *AxisSample) Unmarshal(dAtA []byte) error { +func (m *Sample) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1143,10 +1025,10 @@ func (m *AxisSample) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AxisSample: wiretype end group for non-group") + return fmt.Errorf("proto: Sample: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AxisSample: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1179,15 +1061,34 @@ func (m *AxisSample) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Id == nil { - m.Id = &AxisSampleID{} + m.Id = &SampleID{} } if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SampleType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AxisHalf", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Share", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -1214,8 +1115,46 @@ func (m *AxisSample) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AxisHalf = append(m.AxisHalf, make([]byte, postIndex-iNdEx)) - copy(m.AxisHalf[len(m.AxisHalf)-1], dAtA[iNdEx:postIndex]) + m.Share = append(m.Share[:0], dAtA[iNdEx:postIndex]...) + if m.Share == nil { + m.Share = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &pb.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex diff --git a/share/ipldv2/pb/ipldv2pb.proto b/share/ipldv2/pb/ipldv2pb.proto index e9ee86f7b5..40dae74746 100644 --- a/share/ipldv2/pb/ipldv2pb.proto +++ b/share/ipldv2/pb/ipldv2pb.proto @@ -2,38 +2,36 @@ syntax = "proto3"; import "pb/proof.proto"; -enum Axis { +enum AxisType { Row = 0; Col = 1; } -enum ShareSampleType { - Data = 0; - Parity = 1; +message AxisID{ + AxisType type = 1; + uint64 height = 2; + uint32 index = 3; // TODO(@Wondertan): uint16 would be enough, but proto3 doest not support it + bytes hash = 4; } -message ShareSampleID{ - uint64 height = 1; - bytes axis_hash = 2; // TODO(@Wondertan): Redundant, but has to be sent due to Bitswap's stateless verification requirement - uint32 index = 3; - Axis axis = 4; +message Axis { + AxisID id = 1; + repeated bytes axis_half = 2; } -message ShareSample { - ShareSampleID id = 1; - ShareSampleType type = 2; - bytes share = 3; - proof.pb.Proof proof = 4; +enum SampleType { + Data = 0; + Parity = 1; } -message AxisSampleID{ - uint64 height = 1; - bytes axis_hash = 2; // TODO(@Wondertan): Redundant, but has to be sent due to Bitswap's stateless verification requirement - uint32 index = 3; // TODO(@Wondertan): uint16 would be enough, but proto3 doest not support it - Axis axis = 4; +message SampleID{ + AxisID axis_id = 1; + uint32 share_index = 2; // TODO: uint16 would be enough, but proto3 doest not support it } -message AxisSample { - AxisSampleID id = 1; - repeated bytes axis_half = 2; +message Sample { + SampleID id = 1; + SampleType type = 2; + bytes share = 3; + proof.pb.Proof proof = 4; } diff --git a/share/ipldv2/sample_id.go b/share/ipldv2/sample_id.go new file mode 100644 index 0000000000..2c4329f0f5 --- /dev/null +++ b/share/ipldv2/sample_id.go @@ -0,0 +1,115 @@ +package ipldv2 + +import ( + "encoding/binary" + "fmt" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" +) + +// SampleIDSize is the size of the SampleID in bytes +const SampleIDSize = AxisIDSize + 2 + +// SampleID is an unique identifier of a Sample. +type SampleID struct { + AxisID + + // ShareIndex is the index of the sampled share in the axis + ShareIndex uint16 +} + +// NewSampleID constructs a new SampleID. +func NewSampleID(axisType rsmt2d.Axis, idx int, root *share.Root, height uint64) SampleID { + sqrLn := len(root.RowRoots) + axsIdx, shrIdx := idx/sqrLn, idx%sqrLn + if axisType == rsmt2d.Col { + axsIdx, shrIdx = shrIdx, axsIdx + } + + return SampleID{ + AxisID: NewAxisID(axisType, uint16(axsIdx), root, height), + ShareIndex: uint16(shrIdx), + } +} + +// SampleIDFromCID coverts CID to SampleID. +func SampleIDFromCID(cid cid.Cid) (id SampleID, err error) { + if err = validateCID(cid); err != nil { + return id, err + } + + err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) + if err != nil { + return id, fmt.Errorf("while unmarhalling SampleID: %w", err) + } + + return id, nil +} + +// SampleIDFromProto converts from protobuf representation of SampleID. +func SampleIDFromProto(proto *ipldv2pb.SampleID) SampleID { + return SampleID{ + AxisID: AxisIDFromProto(proto.AxisId), + ShareIndex: uint16(proto.ShareIndex), + } +} + +// Cid returns sample ID encoded as CID. +func (s *SampleID) Cid() (cid.Cid, error) { + // avoid using proto serialization for CID as it's not deterministic + data, err := s.MarshalBinary() + if err != nil { + return cid.Undef, err + } + + buf, err := mh.Encode(data, sampleMultihashCode) + if err != nil { + return cid.Undef, err + } + + return cid.NewCidV1(sampleCodec, buf), nil +} + +// Proto converts SampleID to its protobuf representation. +func (s *SampleID) Proto() *ipldv2pb.SampleID { + return &ipldv2pb.SampleID{ + AxisId: s.AxisID.Proto(), + ShareIndex: uint32(s.ShareIndex), + } +} + +// MarshalBinary encodes SampleID into binary form. +func (s *SampleID) MarshalBinary() ([]byte, error) { + data := make([]byte, 0, SampleIDSize) + n, err := s.AxisID.MarshalTo(data) + if err != nil { + return nil, err + } + data = data[:n] + data = binary.LittleEndian.AppendUint16(data, s.ShareIndex) + return data, nil +} + +// UnmarshalBinary decodes SampleID from binary form. +func (s *SampleID) UnmarshalBinary(data []byte) error { + if len(data) != SampleIDSize { + return fmt.Errorf("invalid data lengt: %d != %d", len(data), SampleIDSize) + } + n, err := s.AxisID.UnmarshalFrom(data) + if err != nil { + return err + } + s.ShareIndex = binary.LittleEndian.Uint16(data[n:]) + return nil +} + +// Validate validates fields of SampleID. +func (s *SampleID) Validate() error { + return s.AxisID.Validate() +} diff --git a/share/ipldv2/share_sample_id_test.go b/share/ipldv2/sample_id_test.go similarity index 66% rename from share/ipldv2/share_sample_id_test.go rename to share/ipldv2/sample_id_test.go index 410fda3674..443dd3d5f1 100644 --- a/share/ipldv2/share_sample_id_test.go +++ b/share/ipldv2/sample_id_test.go @@ -12,24 +12,24 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -func TestShareSampleID(t *testing.T) { +func TestSampleID(t *testing.T) { square := edstest.RandEDS(t, 2) root, err := share.NewRoot(square) require.NoError(t, err) - sid := NewShareSampleID(1, root, 2, rsmt2d.Row) + sid := NewSampleID(rsmt2d.Row, 2, root, 1) id, err := sid.Cid() require.NoError(t, err) - assert.EqualValues(t, shareSamplingCodec, id.Prefix().Codec) - assert.EqualValues(t, shareSamplingMultihashCode, id.Prefix().MhType) - assert.EqualValues(t, ShareSampleIDSize, id.Prefix().MhLength) + assert.EqualValues(t, sampleCodec, id.Prefix().Codec) + assert.EqualValues(t, sampleMultihashCode, id.Prefix().MhType) + assert.EqualValues(t, SampleIDSize, id.Prefix().MhLength) data, err := sid.MarshalBinary() require.NoError(t, err) - sidOut := ShareSampleID{} + sidOut := SampleID{} err = sidOut.UnmarshalBinary(data) require.NoError(t, err) assert.EqualValues(t, sid, sidOut) diff --git a/share/ipldv2/share_sample.go b/share/ipldv2/share_sample.go index 1204f07507..4aefa3f41d 100644 --- a/share/ipldv2/share_sample.go +++ b/share/ipldv2/share_sample.go @@ -15,37 +15,36 @@ import ( ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" ) -// ShareSampleType represents type of sample. -type ShareSampleType uint8 +// SampleType represents type of sample. +type SampleType uint8 const ( - // DataShareSample is a sample of a data share. - DataShareSample ShareSampleType = iota - // ParityShareSample is a sample of a parity share. - ParityShareSample + // DataSample is a sample of a data share. + DataSample SampleType = iota + // ParitySample is a sample of a parity share. + ParitySample ) -// ShareSample represents a sample of an NMT in EDS. -type ShareSample struct { - // ID of the ShareSample - ID ShareSampleID - // Type of the ShareSample - Type ShareSampleType +// Sample represents a sample of an NMT in EDS. +type Sample struct { + // ID of the Sample + ID SampleID + // Type of the Sample + Type SampleType // Proof of Share inclusion in the NMT Proof nmt.Proof // Share being sampled Share share.Share } -// NewShareSample constructs a new ShareSample. -func NewShareSample(id ShareSampleID, shr share.Share, proof nmt.Proof, sqrLn int) *ShareSample { - row, col := id.Index/sqrLn, id.Index%sqrLn - tp := ParityShareSample - if row < sqrLn/2 && col < sqrLn/2 { - tp = DataShareSample +// NewSample constructs a new Sample. +func NewSample(id SampleID, shr share.Share, proof nmt.Proof, sqrLn int) *Sample { + tp := ParitySample + if int(id.AxisIndex) < sqrLn/2 && int(id.ShareIndex) < sqrLn/2 { + tp = DataSample } - return &ShareSample{ + return &Sample{ ID: id, Type: tp, Proof: proof, @@ -53,19 +52,19 @@ func NewShareSample(id ShareSampleID, shr share.Share, proof nmt.Proof, sqrLn in } } -// NewShareSampleFromEDS samples the EDS and constructs a new ShareSample. -func NewShareSampleFromEDS( - height uint64, - eds *rsmt2d.ExtendedDataSquare, +// NewSampleFromEDS samples the EDS and constructs a new Sample. +func NewSampleFromEDS( + axisType rsmt2d.Axis, idx int, - axis rsmt2d.Axis, -) (*ShareSample, error) { + eds *rsmt2d.ExtendedDataSquare, + height uint64, +) (*Sample, error) { sqrLn := int(eds.Width()) axisIdx, shrIdx := idx/sqrLn, idx%sqrLn // TODO(@Wondertan): Should be an rsmt2d method var shrs [][]byte - switch axis { + switch axisType { case rsmt2d.Row: shrs = eds.Row(uint(axisIdx)) case rsmt2d.Col: @@ -93,12 +92,12 @@ func NewShareSampleFromEDS( return nil, fmt.Errorf("while proving range share over NMT: %w", err) } - id := NewShareSampleID(height, root, idx, axis) - return NewShareSample(id, shrs[shrIdx], prf, len(root.RowRoots)), nil + id := NewSampleID(axisType, idx, root, height) + return NewSample(id, shrs[shrIdx], prf, len(root.RowRoots)), nil } -// Proto converts ShareSample to its protobuf representation. -func (s *ShareSample) Proto() *ipldv2pb.ShareSample { +// Proto converts Sample to its protobuf representation. +func (s *Sample) Proto() *ipldv2pb.Sample { // TODO: Extract as helper to nmt proof := &nmtpb.Proof{} proof.Nodes = s.Proof.Nodes() @@ -107,31 +106,31 @@ func (s *ShareSample) Proto() *ipldv2pb.ShareSample { proof.IsMaxNamespaceIgnored = s.Proof.IsMaxNamespaceIDIgnored() proof.LeafHash = s.Proof.LeafHash() - return &ipldv2pb.ShareSample{ + return &ipldv2pb.Sample{ Id: s.ID.Proto(), - Type: ipldv2pb.ShareSampleType(s.Type), + Type: ipldv2pb.SampleType(s.Type), Proof: proof, Share: s.Share, } } -// ShareSampleFromBlock converts blocks.Block into ShareSample. -func ShareSampleFromBlock(blk blocks.Block) (*ShareSample, error) { +// SampleFromBlock converts blocks.Block into Sample. +func SampleFromBlock(blk blocks.Block) (*Sample, error) { if err := validateCID(blk.Cid()); err != nil { return nil, err } - s := &ShareSample{} + s := &Sample{} err := s.UnmarshalBinary(blk.RawData()) if err != nil { - return nil, fmt.Errorf("while unmarshalling ShareSample: %w", err) + return nil, fmt.Errorf("while unmarshalling Sample: %w", err) } return s, nil } -// IPLDBlock converts ShareSample to an IPLD block for Bitswap compatibility. -func (s *ShareSample) IPLDBlock() (blocks.Block, error) { +// IPLDBlock converts Sample to an IPLD block for Bitswap compatibility. +func (s *Sample) IPLDBlock() (blocks.Block, error) { cid, err := s.ID.Cid() if err != nil { return nil, err @@ -145,37 +144,37 @@ func (s *ShareSample) IPLDBlock() (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } -// MarshalBinary marshals ShareSample to binary. -func (s *ShareSample) MarshalBinary() ([]byte, error) { +// MarshalBinary marshals Sample to binary. +func (s *Sample) MarshalBinary() ([]byte, error) { return s.Proto().Marshal() } -// UnmarshalBinary unmarshal ShareSample from binary. -func (s *ShareSample) UnmarshalBinary(data []byte) error { - proto := &ipldv2pb.ShareSample{} +// UnmarshalBinary unmarshal Sample from binary. +func (s *Sample) UnmarshalBinary(data []byte) error { + proto := &ipldv2pb.Sample{} if err := proto.Unmarshal(data); err != nil { return err } - s.ID = ShareSampleIDFromProto(proto.Id) - s.Type = ShareSampleType(proto.Type) + s.ID = SampleIDFromProto(proto.Id) + s.Type = SampleType(proto.Type) s.Proof = nmt.ProtoToProof(*proto.Proof) s.Share = proto.Share return nil } -// Validate validates ShareSample's fields and proof of Share inclusion in the NMT. -func (s *ShareSample) Validate() error { +// Validate validates Sample's fields and proof of Share inclusion in the NMT. +func (s *Sample) Validate() error { if err := s.ID.Validate(); err != nil { return err } - if s.Type != DataShareSample && s.Type != ParityShareSample { + if s.Type != DataSample && s.Type != ParitySample { return fmt.Errorf("incorrect sample type: %d", s.Type) } namespace := share.ParitySharesNamespace - if s.Type == DataShareSample { + if s.Type == DataSample { namespace = share.GetNamespace(s.Share) } diff --git a/share/ipldv2/share_sample_hasher.go b/share/ipldv2/share_sample_hasher.go index 98bb18db24..572f0673ca 100644 --- a/share/ipldv2/share_sample_hasher.go +++ b/share/ipldv2/share_sample_hasher.go @@ -5,21 +5,21 @@ import ( "fmt" ) -// ShareSampleHasher implements hash.Hash interface for Samples. -type ShareSampleHasher struct { - sample ShareSample +// SampleHasher implements hash.Hash interface for Samples. +type SampleHasher struct { + sample Sample } -// Write expects a marshaled ShareSample to validate. -func (sh *ShareSampleHasher) Write(data []byte) (int, error) { +// Write expects a marshaled Sample to validate. +func (sh *SampleHasher) Write(data []byte) (int, error) { if err := sh.sample.UnmarshalBinary(data); err != nil { - err = fmt.Errorf("while unmarshaling ShareSample: %w", err) + err = fmt.Errorf("while unmarshaling Sample: %w", err) log.Error(err) return 0, err } if err := sh.sample.Validate(); err != nil { - err = fmt.Errorf("while validating ShareSample: %w", err) + err = fmt.Errorf("while validating Sample: %w", err) log.Error(err) return 0, err } @@ -27,27 +27,27 @@ func (sh *ShareSampleHasher) Write(data []byte) (int, error) { return len(data), nil } -// Sum returns the "multihash" of the ShareSampleID. -func (sh *ShareSampleHasher) Sum([]byte) []byte { +// Sum returns the "multihash" of the SampleID. +func (sh *SampleHasher) Sum([]byte) []byte { sum, err := sh.sample.ID.MarshalBinary() if err != nil { - err = fmt.Errorf("while marshaling ShareSampleID") + err = fmt.Errorf("while marshaling SampleID") log.Error(err) } return sum } // Reset resets the Hash to its initial state. -func (sh *ShareSampleHasher) Reset() { - sh.sample = ShareSample{} +func (sh *SampleHasher) Reset() { + sh.sample = Sample{} } // Size returns the number of bytes Sum will return. -func (sh *ShareSampleHasher) Size() int { - return ShareSampleIDSize +func (sh *SampleHasher) Size() int { + return SampleIDSize } // BlockSize returns the hash's underlying block size. -func (sh *ShareSampleHasher) BlockSize() int { +func (sh *SampleHasher) BlockSize() int { return sha256.BlockSize } diff --git a/share/ipldv2/share_sample_hasher_test.go b/share/ipldv2/share_sample_hasher_test.go index e4dac92a1a..31989dd4aa 100644 --- a/share/ipldv2/share_sample_hasher_test.go +++ b/share/ipldv2/share_sample_hasher_test.go @@ -11,15 +11,15 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -func TestShareSampleHasher(t *testing.T) { - hasher := &ShareSampleHasher{} +func TestSampleHasher(t *testing.T) { + hasher := &SampleHasher{} _, err := hasher.Write([]byte("hello")) assert.Error(t, err) square := edstest.RandEDS(t, 2) - sample, err := NewShareSampleFromEDS(1, square, 2, rsmt2d.Row) + sample, err := NewSampleFromEDS(rsmt2d.Row, 2, square, 1) require.NoError(t, err) data, err := sample.MarshalBinary() diff --git a/share/ipldv2/share_sample_id.go b/share/ipldv2/share_sample_id.go deleted file mode 100644 index 88492887e0..0000000000 --- a/share/ipldv2/share_sample_id.go +++ /dev/null @@ -1,138 +0,0 @@ -package ipldv2 - -import ( - "encoding/binary" - "fmt" - - "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" -) - -// ShareSampleIDSize is the size of the ShareSampleID in bytes -const ShareSampleIDSize = 45 - -// ShareSampleID is an unique identifier of a ShareSample. -type ShareSampleID struct { - // Height of the block. - // Needed to identify block's data square in the whole chain - Height uint64 - // AxisHash is the sha256 hash of a Col or Row root taken from DAH of the data square - AxisHash []byte - // Index is the index of the sampled share in the data square(not row or col index) - Index int - // Axis is Col or Row axis of the sample in the data square - Axis rsmt2d.Axis -} - -// NewShareSampleID constructs a new ShareSampleID. -func NewShareSampleID(height uint64, root *share.Root, idx int, axis rsmt2d.Axis) ShareSampleID { - sqrLn := len(root.RowRoots) - row, col := idx/sqrLn, idx%sqrLn - dahroot := root.RowRoots[row] - if axis == rsmt2d.Col { - dahroot = root.ColumnRoots[col] - } - axisHash := hashBytes(dahroot) - - return ShareSampleID{ - Height: height, - AxisHash: axisHash, - Index: idx, - Axis: axis, - } -} - -// ShareSampleIDFromCID coverts CID to ShareSampleID. -func ShareSampleIDFromCID(cid cid.Cid) (id ShareSampleID, err error) { - if err = validateCID(cid); err != nil { - return id, err - } - - err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) - if err != nil { - return id, fmt.Errorf("while unmarhalling ShareSampleID: %w", err) - } - - return id, nil -} - -// ShareSampleIDFromProto converts from protobuf representation of ShareSampleID. -func ShareSampleIDFromProto(proto *ipldv2pb.ShareSampleID) ShareSampleID { - return ShareSampleID{ - Height: proto.Height, - AxisHash: proto.AxisHash, - Index: int(proto.Index), - Axis: rsmt2d.Axis(proto.Axis), - } -} - -// Cid returns sample ID encoded as CID. -func (s *ShareSampleID) Cid() (cid.Cid, error) { - data, err := s.MarshalBinary() - if err != nil { - return cid.Undef, err - } - - buf, err := mh.Encode(data, shareSamplingMultihashCode) - if err != nil { - return cid.Undef, err - } - - return cid.NewCidV1(shareSamplingCodec, buf), nil -} - -// Proto converts ShareSampleID to its protobuf representation. -func (s *ShareSampleID) Proto() *ipldv2pb.ShareSampleID { - return &ipldv2pb.ShareSampleID{ - Height: s.Height, - AxisHash: s.AxisHash, - Index: uint32(s.Index), - Axis: ipldv2pb.Axis(s.Axis), - } -} - -// MarshalBinary encodes ShareSampleID into binary form. -func (s *ShareSampleID) MarshalBinary() ([]byte, error) { - // we cannot use protobuf here because it exceeds multihash limit of 128 bytes - data := make([]byte, 0, ShareSampleIDSize) - data = binary.LittleEndian.AppendUint64(data, s.Height) - data = append(data, s.AxisHash...) - data = binary.LittleEndian.AppendUint32(data, uint32(s.Index)) - data = append(data, byte(s.Axis)) - return data, nil -} - -// UnmarshalBinary decodes ShareSampleID from binary form. -func (s *ShareSampleID) UnmarshalBinary(data []byte) error { - if len(data) != ShareSampleIDSize { - return fmt.Errorf("incorrect SampleID size: %d != %d", len(data), ShareSampleIDSize) - } - - s.Height = binary.LittleEndian.Uint64(data) - s.AxisHash = append(s.AxisHash, data[8:8+hashSize]...) // copying data to avoid slice aliasing - s.Index = int(binary.LittleEndian.Uint32(data[8+hashSize : 8+hashSize+4])) - s.Axis = rsmt2d.Axis(data[8+hashSize+4]) - return nil -} - -// Validate validates fields of ShareSampleID. -func (s *ShareSampleID) Validate() error { - if s.Height == 0 { - return fmt.Errorf("zero Height") - } - - if len(s.AxisHash) != hashSize { - return fmt.Errorf("incorrect AxisHash size: %d != %d", len(s.AxisHash), hashSize) - } - - if s.Axis != rsmt2d.Col && s.Axis != rsmt2d.Row { - return fmt.Errorf("incorrect Axis: %d", s.Axis) - } - - return nil -} diff --git a/share/ipldv2/share_sample_test.go b/share/ipldv2/share_sample_test.go index 04724f8e4f..3dd60b0482 100644 --- a/share/ipldv2/share_sample_test.go +++ b/share/ipldv2/share_sample_test.go @@ -11,10 +11,10 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -func TestShareSample(t *testing.T) { +func TestSample(t *testing.T) { square := edstest.RandEDS(t, 2) - sid, err := NewShareSampleFromEDS(1, square, 2, rsmt2d.Row) + sid, err := NewSampleFromEDS(rsmt2d.Row, 2, square, 1) require.NoError(t, err) data, err := sid.MarshalBinary() @@ -27,7 +27,7 @@ func TestShareSample(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, blk.Cid(), cid) - sidOut := &ShareSample{} + sidOut := &Sample{} err = sidOut.UnmarshalBinary(data) require.NoError(t, err) assert.EqualValues(t, sid, sidOut) From 21bd2fc7a62b8f829b87ed8c93449c53885108b3 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sun, 22 Oct 2023 20:39:14 +0200 Subject: [PATCH 019/132] remove serialization ambigiouty and ensure there is only one serialization method for ids --- share/ipldv2/axis.go | 42 +- share/ipldv2/axis_hasher.go | 8 +- share/ipldv2/axis_hasher_test.go | 2 +- share/ipldv2/axis_id.go | 25 +- share/ipldv2/axis_test.go | 2 +- share/ipldv2/ipldv2_test.go | 4 +- share/ipldv2/pb/ipldv2pb.pb.go | 613 ++---------------- share/ipldv2/pb/ipldv2pb.proto | 16 +- share/ipldv2/{share_sample.go => sample.go} | 43 +- ...hare_sample_hasher.go => sample_hasher.go} | 2 +- ...e_hasher_test.go => sample_hasher_test.go} | 0 share/ipldv2/sample_id.go | 20 +- .../{share_sample_test.go => sample_test.go} | 0 13 files changed, 114 insertions(+), 663 deletions(-) rename share/ipldv2/{share_sample.go => sample.go} (94%) rename share/ipldv2/{share_sample_hasher.go => sample_hasher.go} (95%) rename share/ipldv2/{share_sample_hasher_test.go => sample_hasher_test.go} (100%) rename share/ipldv2/{share_sample_test.go => sample_test.go} (100%) diff --git a/share/ipldv2/axis.go b/share/ipldv2/axis.go index ada30507f1..992fa23b19 100644 --- a/share/ipldv2/axis.go +++ b/share/ipldv2/axis.go @@ -14,14 +14,14 @@ import ( ) type Axis struct { - ID AxisID + AxisID AxisID AxisHalf []share.Share } // NewAxis constructs a new Axis. func NewAxis(id AxisID, axisHalf []share.Share) *Axis { return &Axis{ - ID: id, + AxisID: id, AxisHalf: axisHalf, } } @@ -55,14 +55,6 @@ func NewAxisFromEDS( return NewAxis(id, axisHalf), nil } -// Proto converts Axis to its protobuf representation. -func (s *Axis) Proto() *ipldv2pb.Axis { - return &ipldv2pb.Axis{ - Id: s.ID.Proto(), - AxisHalf: s.AxisHalf, - } -} - // AxisFromBlock converts blocks.Block into Axis. func AxisFromBlock(blk blocks.Block) (*Axis, error) { if err := validateCID(blk.Cid()); err != nil { @@ -80,7 +72,7 @@ func AxisFromBlock(blk blocks.Block) (*Axis, error) { // IPLDBlock converts Axis to an IPLD block for Bitswap compatibility. func (s *Axis) IPLDBlock() (blocks.Block, error) { - cid, err := s.ID.Cid() + cid, err := s.AxisID.Cid() if err != nil { return nil, err } @@ -95,7 +87,15 @@ func (s *Axis) IPLDBlock() (blocks.Block, error) { // MarshalBinary marshals Axis to binary. func (s *Axis) MarshalBinary() ([]byte, error) { - return s.Proto().Marshal() + id, err := s.AxisID.MarshalBinary() + if err != nil { + return nil, err + } + + return (&ipldv2pb.Axis{ + AxisId: id, + AxisHalf: s.AxisHalf, + }).Marshal() } // UnmarshalBinary unmarshal Axis from binary. @@ -105,20 +105,24 @@ func (s *Axis) UnmarshalBinary(data []byte) error { return err } - s.ID = AxisIDFromProto(proto.Id) + err := s.AxisID.UnmarshalBinary(proto.AxisId) + if err != nil { + return err + } + s.AxisHalf = proto.AxisHalf return nil } // Validate validates Axis's fields and proof of axis inclusion. func (s *Axis) Validate() error { - if err := s.ID.Validate(); err != nil { + if err := s.AxisID.Validate(); err != nil { return err } sqrLn := len(s.AxisHalf) * 2 - if s.ID.AxisIndex > uint16(sqrLn) { - return fmt.Errorf("axis index exceeds square size: %d > %d", s.ID.AxisIndex, sqrLn) + if s.AxisID.AxisIndex > uint16(sqrLn) { + return fmt.Errorf("axis index exceeds square size: %d > %d", s.AxisID.AxisIndex, sqrLn) } // TODO(@Wondertan): This computations are quite expensive and likely to be used further, @@ -129,7 +133,7 @@ func (s *Axis) Validate() error { } s.AxisHalf = append(s.AxisHalf, parity...) - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(s.AxisHalf)/2), uint(s.ID.AxisIndex)) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(s.AxisHalf)/2), uint(s.AxisID.AxisIndex)) for _, shr := range s.AxisHalf { err := tree.Push(shr) if err != nil { @@ -143,8 +147,8 @@ func (s *Axis) Validate() error { } hashedRoot := hashBytes(root) - if !bytes.Equal(s.ID.AxisHash, hashedRoot) { - return fmt.Errorf("invalid axis hash: %X != %X", root, s.ID.AxisHash) + if !bytes.Equal(s.AxisID.AxisHash, hashedRoot) { + return fmt.Errorf("invalid axis hash: %X != %X", root, s.AxisID.AxisHash) } return nil diff --git a/share/ipldv2/axis_hasher.go b/share/ipldv2/axis_hasher.go index 83bb8b517d..633ed4f1f6 100644 --- a/share/ipldv2/axis_hasher.go +++ b/share/ipldv2/axis_hasher.go @@ -13,13 +13,13 @@ type AxisHasher struct { // Write expects a marshaled ShareSample to validate. func (sh *AxisHasher) Write(data []byte) (int, error) { if err := sh.sample.UnmarshalBinary(data); err != nil { - err = fmt.Errorf("while unmarshaling ShareSample: %w", err) + err = fmt.Errorf("while unmarshaling Axis: %w", err) log.Error(err) return 0, err } if err := sh.sample.Validate(); err != nil { - err = fmt.Errorf("while validating ShareSample: %w", err) + err = fmt.Errorf("while validating Axis: %w", err) log.Error(err) return 0, err } @@ -29,9 +29,9 @@ func (sh *AxisHasher) Write(data []byte) (int, error) { // Sum returns the "multihash" of the ShareSampleID. func (sh *AxisHasher) Sum([]byte) []byte { - sum, err := sh.sample.ID.MarshalBinary() + sum, err := sh.sample.AxisID.MarshalBinary() if err != nil { - err = fmt.Errorf("while marshaling ShareSampleID") + err = fmt.Errorf("while marshaling AxisID: %w", err) log.Error(err) } return sum diff --git a/share/ipldv2/axis_hasher_test.go b/share/ipldv2/axis_hasher_test.go index 44cf42ab1e..c8fc4a10b6 100644 --- a/share/ipldv2/axis_hasher_test.go +++ b/share/ipldv2/axis_hasher_test.go @@ -30,7 +30,7 @@ func TestAxisHasher(t *testing.T) { assert.EqualValues(t, len(data), n) digest := hasher.Sum(nil) - sid, err := sample.ID.MarshalBinary() + sid, err := sample.AxisID.MarshalBinary() require.NoError(t, err) assert.EqualValues(t, sid, digest) diff --git a/share/ipldv2/axis_id.go b/share/ipldv2/axis_id.go index d4c70d687b..9aa6059353 100644 --- a/share/ipldv2/axis_id.go +++ b/share/ipldv2/axis_id.go @@ -10,7 +10,6 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" - ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" ) // AxisIDSize is the size of the AxisID in bytes @@ -59,19 +58,8 @@ func AxisIDFromCID(cid cid.Cid) (id AxisID, err error) { return id, nil } -// AxisIDFromProto converts from protobuf representation of AxisID. -func AxisIDFromProto(proto *ipldv2pb.AxisID) AxisID { - return AxisID{ - AxisType: rsmt2d.Axis(proto.Type), - AxisIndex: uint16(proto.Index), - AxisHash: proto.Hash, - Height: proto.Height, - } -} - // Cid returns sample ID encoded as CID. func (s *AxisID) Cid() (cid.Cid, error) { - // avoid using proto serialization for CIDs as it's not deterministic data, err := s.MarshalBinary() if err != nil { return cid.Undef, err @@ -85,17 +73,10 @@ func (s *AxisID) Cid() (cid.Cid, error) { return cid.NewCidV1(axisCodec, buf), nil } -// Proto converts AxisID to its protobuf representation. -func (s *AxisID) Proto() *ipldv2pb.AxisID { - return &ipldv2pb.AxisID{ - Type: ipldv2pb.AxisType(s.AxisType), - Height: s.Height, - Hash: s.AxisHash, - Index: uint32(s.AxisIndex), - } -} - // MarshalTo encodes AxisID into given byte slice. +// NOTE: Proto is avoided because +// * Its size is not deterministic which is required for IPLD. +// * No support for uint16 func (s *AxisID) MarshalTo(data []byte) (int, error) { data = append(data, byte(s.AxisType)) data = binary.LittleEndian.AppendUint16(data, s.AxisIndex) diff --git a/share/ipldv2/axis_test.go b/share/ipldv2/axis_test.go index 91dfe495a5..81a13b6ff0 100644 --- a/share/ipldv2/axis_test.go +++ b/share/ipldv2/axis_test.go @@ -23,7 +23,7 @@ func TestAxis(t *testing.T) { blk, err := aid.IPLDBlock() require.NoError(t, err) - cid, err := aid.ID.Cid() + cid, err := aid.AxisID.Cid() require.NoError(t, err) assert.EqualValues(t, blk.Cid(), cid) diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index 89a8b1bfce..8a66a0f1b0 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -112,7 +112,7 @@ func TestAxisRoundtripGetBlock(t *testing.T) { smpl, err := NewAxisFromEDS(axisType, i, sqr, 1) require.NoError(t, err) - cid, err := smpl.ID.Cid() + cid, err := smpl.AxisID.Cid() require.NoError(t, err) blkOut, err := client.GetBlock(ctx, cid) @@ -143,7 +143,7 @@ func TestAxisRoundtripGetBlocks(t *testing.T) { smpl, err := NewAxisFromEDS(axisType, i, sqr, 1) require.NoError(t, err) - cid, err := smpl.ID.Cid() + cid, err := smpl.AxisID.Cid() require.NoError(t, err) set.Add(cid) diff --git a/share/ipldv2/pb/ipldv2pb.pb.go b/share/ipldv2/pb/ipldv2pb.pb.go index f94e7ac1a5..5e127c39b8 100644 --- a/share/ipldv2/pb/ipldv2pb.pb.go +++ b/share/ipldv2/pb/ipldv2pb.pb.go @@ -73,76 +73,8 @@ func (SampleType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_cb41c3a4f982a271, []int{1} } -type AxisID struct { - Type AxisType `protobuf:"varint,1,opt,name=type,proto3,enum=AxisType" json:"type,omitempty"` - Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` - Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` - Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` -} - -func (m *AxisID) Reset() { *m = AxisID{} } -func (m *AxisID) String() string { return proto.CompactTextString(m) } -func (*AxisID) ProtoMessage() {} -func (*AxisID) Descriptor() ([]byte, []int) { - return fileDescriptor_cb41c3a4f982a271, []int{0} -} -func (m *AxisID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AxisID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AxisID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AxisID) XXX_Merge(src proto.Message) { - xxx_messageInfo_AxisID.Merge(m, src) -} -func (m *AxisID) XXX_Size() int { - return m.Size() -} -func (m *AxisID) XXX_DiscardUnknown() { - xxx_messageInfo_AxisID.DiscardUnknown(m) -} - -var xxx_messageInfo_AxisID proto.InternalMessageInfo - -func (m *AxisID) GetType() AxisType { - if m != nil { - return m.Type - } - return AxisType_Row -} - -func (m *AxisID) GetHeight() uint64 { - if m != nil { - return m.Height - } - return 0 -} - -func (m *AxisID) GetIndex() uint32 { - if m != nil { - return m.Index - } - return 0 -} - -func (m *AxisID) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - type Axis struct { - Id *AxisID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AxisId []byte `protobuf:"bytes,1,opt,name=axis_id,json=axisId,proto3" json:"axis_id,omitempty"` AxisHalf [][]byte `protobuf:"bytes,2,rep,name=axis_half,json=axisHalf,proto3" json:"axis_half,omitempty"` } @@ -150,7 +82,7 @@ func (m *Axis) Reset() { *m = Axis{} } func (m *Axis) String() string { return proto.CompactTextString(m) } func (*Axis) ProtoMessage() {} func (*Axis) Descriptor() ([]byte, []int) { - return fileDescriptor_cb41c3a4f982a271, []int{1} + return fileDescriptor_cb41c3a4f982a271, []int{0} } func (m *Axis) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -179,9 +111,9 @@ func (m *Axis) XXX_DiscardUnknown() { var xxx_messageInfo_Axis proto.InternalMessageInfo -func (m *Axis) GetId() *AxisID { +func (m *Axis) GetAxisId() []byte { if m != nil { - return m.Id + return m.AxisId } return nil } @@ -193,60 +125,8 @@ func (m *Axis) GetAxisHalf() [][]byte { return nil } -type SampleID struct { - AxisId *AxisID `protobuf:"bytes,1,opt,name=axis_id,json=axisId,proto3" json:"axis_id,omitempty"` - ShareIndex uint32 `protobuf:"varint,2,opt,name=share_index,json=shareIndex,proto3" json:"share_index,omitempty"` -} - -func (m *SampleID) Reset() { *m = SampleID{} } -func (m *SampleID) String() string { return proto.CompactTextString(m) } -func (*SampleID) ProtoMessage() {} -func (*SampleID) Descriptor() ([]byte, []int) { - return fileDescriptor_cb41c3a4f982a271, []int{2} -} -func (m *SampleID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SampleID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SampleID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SampleID) XXX_Merge(src proto.Message) { - xxx_messageInfo_SampleID.Merge(m, src) -} -func (m *SampleID) XXX_Size() int { - return m.Size() -} -func (m *SampleID) XXX_DiscardUnknown() { - xxx_messageInfo_SampleID.DiscardUnknown(m) -} - -var xxx_messageInfo_SampleID proto.InternalMessageInfo - -func (m *SampleID) GetAxisId() *AxisID { - if m != nil { - return m.AxisId - } - return nil -} - -func (m *SampleID) GetShareIndex() uint32 { - if m != nil { - return m.ShareIndex - } - return 0 -} - type Sample struct { - Id *SampleID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Type SampleType `protobuf:"varint,2,opt,name=type,proto3,enum=SampleType" json:"type,omitempty"` Share []byte `protobuf:"bytes,3,opt,name=share,proto3" json:"share,omitempty"` Proof *pb.Proof `protobuf:"bytes,4,opt,name=proof,proto3" json:"proof,omitempty"` @@ -256,7 +136,7 @@ func (m *Sample) Reset() { *m = Sample{} } func (m *Sample) String() string { return proto.CompactTextString(m) } func (*Sample) ProtoMessage() {} func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_cb41c3a4f982a271, []int{3} + return fileDescriptor_cb41c3a4f982a271, []int{1} } func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -285,7 +165,7 @@ func (m *Sample) XXX_DiscardUnknown() { var xxx_messageInfo_Sample proto.InternalMessageInfo -func (m *Sample) GetId() *SampleID { +func (m *Sample) GetId() []byte { if m != nil { return m.Id } @@ -316,85 +196,32 @@ func (m *Sample) GetProof() *pb.Proof { func init() { proto.RegisterEnum("AxisType", AxisType_name, AxisType_value) proto.RegisterEnum("SampleType", SampleType_name, SampleType_value) - proto.RegisterType((*AxisID)(nil), "AxisID") proto.RegisterType((*Axis)(nil), "Axis") - proto.RegisterType((*SampleID)(nil), "SampleID") proto.RegisterType((*Sample)(nil), "Sample") } func init() { proto.RegisterFile("share/ipldv2/pb/ipldv2pb.proto", fileDescriptor_cb41c3a4f982a271) } var fileDescriptor_cb41c3a4f982a271 = []byte{ - // 371 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x51, 0x4f, 0x6b, 0xab, 0x40, - 0x1c, 0x74, 0x8d, 0x31, 0xe6, 0x67, 0x5e, 0x9e, 0x2c, 0xe1, 0x3d, 0xfb, 0xcf, 0x88, 0x50, 0x90, - 0x1c, 0x0c, 0xd8, 0x6b, 0x2f, 0x6d, 0x73, 0xa8, 0x87, 0x42, 0xd8, 0xf6, 0x1e, 0x56, 0x34, 0x75, - 0xc1, 0xd6, 0x45, 0xa5, 0x4d, 0x8e, 0xfd, 0x06, 0xfd, 0x58, 0x3d, 0xe6, 0xd8, 0x63, 0x49, 0xbe, - 0x48, 0x71, 0xd7, 0xd0, 0x40, 0x6f, 0x33, 0xb3, 0x32, 0x33, 0xbf, 0x11, 0x9c, 0x2a, 0xa3, 0x65, - 0x3a, 0x65, 0x3c, 0x4f, 0x5e, 0xc2, 0x29, 0x8f, 0x5b, 0xc4, 0xe3, 0x80, 0x97, 0x45, 0x5d, 0x1c, - 0x0f, 0x79, 0x3c, 0xe5, 0x65, 0x51, 0x2c, 0x25, 0xf7, 0x18, 0xe8, 0x57, 0x2b, 0x56, 0x45, 0x33, - 0x7c, 0x06, 0x5a, 0xbd, 0xe6, 0xa9, 0x8d, 0x5c, 0xe4, 0x0f, 0xc3, 0x7e, 0xd0, 0xc8, 0x0f, 0x6b, - 0x9e, 0x12, 0x21, 0xe3, 0x7f, 0xa0, 0x67, 0x29, 0x7b, 0xcc, 0x6a, 0x5b, 0x75, 0x91, 0xaf, 0x91, - 0x96, 0xe1, 0x11, 0x74, 0xd9, 0x73, 0x92, 0xae, 0xec, 0x8e, 0x8b, 0xfc, 0x3f, 0x44, 0x12, 0x8c, - 0x41, 0xcb, 0x68, 0x95, 0xd9, 0x9a, 0x8b, 0xfc, 0x01, 0x11, 0xd8, 0xbb, 0x04, 0xad, 0xf1, 0xc4, - 0xff, 0x41, 0x65, 0x89, 0x88, 0x31, 0xc3, 0x5e, 0x20, 0xd3, 0x89, 0xca, 0x12, 0x7c, 0x02, 0x7d, - 0xba, 0x62, 0xd5, 0x22, 0xa3, 0xf9, 0xd2, 0x56, 0xdd, 0x8e, 0x3f, 0x20, 0x46, 0x23, 0xdc, 0xd2, - 0x7c, 0xe9, 0xdd, 0x81, 0x71, 0x4f, 0x9f, 0x78, 0x9e, 0x46, 0x33, 0xec, 0x42, 0x4f, 0x7c, 0xf8, - 0xdb, 0x46, 0x6f, 0xf4, 0x28, 0xc1, 0x63, 0x30, 0xc5, 0x10, 0x0b, 0xd9, 0x4d, 0x15, 0xdd, 0x40, - 0x48, 0x51, 0xa3, 0x78, 0x6f, 0x08, 0x74, 0xe9, 0x87, 0x8f, 0x0e, 0xfa, 0xf4, 0x83, 0x7d, 0x88, - 0x68, 0x34, 0x6e, 0x37, 0x51, 0xc5, 0x26, 0x66, 0xfb, 0x78, 0xb0, 0xca, 0x08, 0xba, 0xc2, 0x54, - 0x5c, 0x3f, 0x20, 0x92, 0xe0, 0x73, 0xe8, 0x8a, 0x8d, 0xc5, 0xf9, 0x66, 0xf8, 0x37, 0x68, 0x17, - 0x8f, 0x83, 0x79, 0x03, 0x88, 0x7c, 0x9d, 0x9c, 0x82, 0xb1, 0x1f, 0x19, 0xf7, 0xa0, 0x43, 0x8a, - 0x57, 0x4b, 0x69, 0xc0, 0x4d, 0x91, 0x5b, 0x68, 0xe2, 0x01, 0xfc, 0xc4, 0x61, 0x03, 0xb4, 0x19, - 0xad, 0xa9, 0xa5, 0x60, 0x00, 0x7d, 0x4e, 0x4b, 0x56, 0xaf, 0x2d, 0x74, 0x6d, 0x7f, 0x6c, 0x1d, - 0xb4, 0xd9, 0x3a, 0xe8, 0x6b, 0xeb, 0xa0, 0xf7, 0x9d, 0xa3, 0x6c, 0x76, 0x8e, 0xf2, 0xb9, 0x73, - 0x94, 0x58, 0x17, 0xbf, 0xf7, 0xe2, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xb7, 0xed, 0xa5, 0x10, - 0x02, 0x00, 0x00, -} - -func (m *AxisID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AxisID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AxisID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0x22 - } - if m.Index != 0 { - i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x18 - } - if m.Height != 0 { - i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x10 - } - if m.Type != 0 { - i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + // 279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0x4f, 0x4b, 0x84, 0x40, + 0x18, 0xc6, 0x1d, 0x75, 0x5d, 0x7b, 0x77, 0x31, 0x19, 0x82, 0x86, 0x8a, 0x49, 0x84, 0x40, 0xf6, + 0xa0, 0x60, 0xd7, 0x2e, 0xfd, 0x39, 0xd4, 0x6d, 0xb1, 0xee, 0x31, 0xa2, 0xb2, 0x03, 0xc6, 0x0c, + 0x2a, 0xdb, 0xfa, 0x2d, 0xfa, 0x58, 0x1d, 0xf7, 0xd8, 0x31, 0xf4, 0x8b, 0xc4, 0x8c, 0x4b, 0x7b, + 0x7b, 0x9e, 0x79, 0xe6, 0x79, 0xdf, 0x1f, 0x2f, 0xd0, 0x76, 0xc3, 0x9a, 0x32, 0xe1, 0xb2, 0x2e, + 0xb6, 0x69, 0x22, 0xf3, 0x83, 0x92, 0x79, 0x2c, 0x1b, 0xd1, 0x89, 0x0b, 0x4f, 0xe6, 0x89, 0x6c, + 0x84, 0xa8, 0x26, 0x1f, 0xde, 0x81, 0x7d, 0xbf, 0xe3, 0x2d, 0x3e, 0x87, 0x39, 0xdb, 0xf1, 0xf6, + 0x9d, 0x17, 0x04, 0x05, 0x28, 0x5a, 0x66, 0x8e, 0xb2, 0x2f, 0x05, 0xbe, 0x84, 0x13, 0x1d, 0x6c, + 0x58, 0x5d, 0x11, 0x33, 0xb0, 0xa2, 0x65, 0xe6, 0xaa, 0x87, 0x67, 0x56, 0x57, 0xe1, 0x16, 0x9c, + 0x57, 0xf6, 0x21, 0xeb, 0x12, 0x7b, 0x60, 0xfe, 0x57, 0x4d, 0x5e, 0xe0, 0x6b, 0xb0, 0xbb, 0x5e, + 0x96, 0xc4, 0x0c, 0x50, 0xe4, 0xa5, 0x8b, 0x78, 0xfa, 0xf6, 0xd6, 0xcb, 0x32, 0xd3, 0x01, 0x3e, + 0x83, 0x99, 0x46, 0x25, 0x96, 0xee, 0x4c, 0x06, 0xdf, 0xc0, 0x4c, 0xd3, 0x11, 0x3b, 0x40, 0xd1, + 0x22, 0x3d, 0x8d, 0x0f, 0xac, 0x79, 0xbc, 0x56, 0x22, 0x9b, 0xd2, 0xd5, 0x15, 0xb8, 0x8a, 0x5a, + 0x8d, 0xc3, 0x73, 0xb0, 0x32, 0xf1, 0xe9, 0x1b, 0x4a, 0x3c, 0x8a, 0xda, 0x47, 0xab, 0x10, 0xe0, + 0xb8, 0x0e, 0xbb, 0x60, 0x3f, 0xb1, 0x8e, 0xf9, 0x06, 0x06, 0x70, 0xd6, 0xac, 0xe1, 0x5d, 0xef, + 0xa3, 0x07, 0xf2, 0x3d, 0x50, 0xb4, 0x1f, 0x28, 0xfa, 0x1d, 0x28, 0xfa, 0x1a, 0xa9, 0xb1, 0x1f, + 0xa9, 0xf1, 0x33, 0x52, 0x23, 0x77, 0xf4, 0x61, 0x6e, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2e, + 0x20, 0xe3, 0x8f, 0x4a, 0x01, 0x00, 0x00, } func (m *Axis) Marshal() (dAtA []byte, err error) { @@ -426,55 +253,10 @@ func (m *Axis) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x12 } } - if m.Id != nil { - { - size, err := m.Id.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SampleID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SampleID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SampleID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ShareIndex != 0 { - i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.ShareIndex)) - i-- - dAtA[i] = 0x10 - } - if m.AxisId != nil { - { - size, err := m.AxisId.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) - } + if len(m.AxisId) > 0 { + i -= len(m.AxisId) + copy(dAtA[i:], m.AxisId) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.AxisId))) i-- dAtA[i] = 0xa } @@ -525,15 +307,10 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x10 } - if m.Id != nil { - { - size, err := m.Id.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) - } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.Id))) i-- dAtA[i] = 0xa } @@ -551,36 +328,14 @@ func encodeVarintIpldv2Pb(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *AxisID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovIpldv2Pb(uint64(m.Type)) - } - if m.Height != 0 { - n += 1 + sovIpldv2Pb(uint64(m.Height)) - } - if m.Index != 0 { - n += 1 + sovIpldv2Pb(uint64(m.Index)) - } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovIpldv2Pb(uint64(l)) - } - return n -} - func (m *Axis) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Id != nil { - l = m.Id.Size() + l = len(m.AxisId) + if l > 0 { n += 1 + l + sovIpldv2Pb(uint64(l)) } if len(m.AxisHalf) > 0 { @@ -592,30 +347,14 @@ func (m *Axis) Size() (n int) { return n } -func (m *SampleID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.AxisId != nil { - l = m.AxisId.Size() - n += 1 + l + sovIpldv2Pb(uint64(l)) - } - if m.ShareIndex != 0 { - n += 1 + sovIpldv2Pb(uint64(m.ShareIndex)) - } - return n -} - func (m *Sample) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Id != nil { - l = m.Id.Size() + l = len(m.Id) + if l > 0 { n += 1 + l + sovIpldv2Pb(uint64(l)) } if m.Type != 0 { @@ -638,7 +377,7 @@ func sovIpldv2Pb(x uint64) (n int) { func sozIpldv2Pb(x uint64) (n int) { return sovIpldv2Pb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *AxisID) Unmarshal(dAtA []byte) error { +func (m *Axis) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -661,72 +400,15 @@ func (m *AxisID) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AxisID: wiretype end group for non-group") + return fmt.Errorf("proto: Axis: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AxisID: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Axis: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= AxisType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AxisId", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -753,95 +435,9 @@ func (m *AxisID) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIpldv2Pb(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIpldv2Pb - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Axis) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Axis: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Axis: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIpldv2Pb - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Id == nil { - m.Id = &AxisID{} - } - if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.AxisId = append(m.AxisId[:0], dAtA[iNdEx:postIndex]...) + if m.AxisId == nil { + m.AxisId = []byte{} } iNdEx = postIndex case 2: @@ -897,111 +493,6 @@ func (m *Axis) Unmarshal(dAtA []byte) error { } return nil } -func (m *SampleID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SampleID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SampleID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AxisId", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIpldv2Pb - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AxisId == nil { - m.AxisId = &AxisID{} - } - if err := m.AxisId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareIndex", wireType) - } - m.ShareIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIpldv2Pb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ShareIndex |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIpldv2Pb(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIpldv2Pb - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *Sample) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1035,7 +526,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIpldv2Pb @@ -1045,26 +536,24 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthIpldv2Pb } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthIpldv2Pb } if postIndex > l { return io.ErrUnexpectedEOF } + m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) if m.Id == nil { - m.Id = &SampleID{} - } - if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Id = []byte{} } iNdEx = postIndex case 2: diff --git a/share/ipldv2/pb/ipldv2pb.proto b/share/ipldv2/pb/ipldv2pb.proto index 40dae74746..6cbdc2ce52 100644 --- a/share/ipldv2/pb/ipldv2pb.proto +++ b/share/ipldv2/pb/ipldv2pb.proto @@ -7,15 +7,8 @@ enum AxisType { Col = 1; } -message AxisID{ - AxisType type = 1; - uint64 height = 2; - uint32 index = 3; // TODO(@Wondertan): uint16 would be enough, but proto3 doest not support it - bytes hash = 4; -} - message Axis { - AxisID id = 1; + bytes axis_id = 1; repeated bytes axis_half = 2; } @@ -24,13 +17,8 @@ enum SampleType { Parity = 1; } -message SampleID{ - AxisID axis_id = 1; - uint32 share_index = 2; // TODO: uint16 would be enough, but proto3 doest not support it -} - message Sample { - SampleID id = 1; + bytes id = 1; SampleType type = 2; bytes share = 3; proof.pb.Proof proof = 4; diff --git a/share/ipldv2/share_sample.go b/share/ipldv2/sample.go similarity index 94% rename from share/ipldv2/share_sample.go rename to share/ipldv2/sample.go index 4aefa3f41d..11220dadba 100644 --- a/share/ipldv2/share_sample.go +++ b/share/ipldv2/sample.go @@ -96,24 +96,6 @@ func NewSampleFromEDS( return NewSample(id, shrs[shrIdx], prf, len(root.RowRoots)), nil } -// Proto converts Sample to its protobuf representation. -func (s *Sample) Proto() *ipldv2pb.Sample { - // TODO: Extract as helper to nmt - proof := &nmtpb.Proof{} - proof.Nodes = s.Proof.Nodes() - proof.End = int64(s.Proof.End()) - proof.Start = int64(s.Proof.Start()) - proof.IsMaxNamespaceIgnored = s.Proof.IsMaxNamespaceIDIgnored() - proof.LeafHash = s.Proof.LeafHash() - - return &ipldv2pb.Sample{ - Id: s.ID.Proto(), - Type: ipldv2pb.SampleType(s.Type), - Proof: proof, - Share: s.Share, - } -} - // SampleFromBlock converts blocks.Block into Sample. func SampleFromBlock(blk blocks.Block) (*Sample, error) { if err := validateCID(blk.Cid()); err != nil { @@ -146,7 +128,24 @@ func (s *Sample) IPLDBlock() (blocks.Block, error) { // MarshalBinary marshals Sample to binary. func (s *Sample) MarshalBinary() ([]byte, error) { - return s.Proto().Marshal() + id, err := s.ID.MarshalBinary() + if err != nil { + return nil, err + } + + proof := &nmtpb.Proof{} + proof.Nodes = s.Proof.Nodes() + proof.End = int64(s.Proof.End()) + proof.Start = int64(s.Proof.Start()) + proof.IsMaxNamespaceIgnored = s.Proof.IsMaxNamespaceIDIgnored() + proof.LeafHash = s.Proof.LeafHash() + + return (&ipldv2pb.Sample{ + Id: id, + Type: ipldv2pb.SampleType(s.Type), + Proof: proof, + Share: s.Share, + }).Marshal() } // UnmarshalBinary unmarshal Sample from binary. @@ -156,7 +155,11 @@ func (s *Sample) UnmarshalBinary(data []byte) error { return err } - s.ID = SampleIDFromProto(proto.Id) + err := s.ID.UnmarshalBinary(proto.Id) + if err != nil { + return err + } + s.Type = SampleType(proto.Type) s.Proof = nmt.ProtoToProof(*proto.Proof) s.Share = proto.Share diff --git a/share/ipldv2/share_sample_hasher.go b/share/ipldv2/sample_hasher.go similarity index 95% rename from share/ipldv2/share_sample_hasher.go rename to share/ipldv2/sample_hasher.go index 572f0673ca..a9dc4d60e0 100644 --- a/share/ipldv2/share_sample_hasher.go +++ b/share/ipldv2/sample_hasher.go @@ -31,7 +31,7 @@ func (sh *SampleHasher) Write(data []byte) (int, error) { func (sh *SampleHasher) Sum([]byte) []byte { sum, err := sh.sample.ID.MarshalBinary() if err != nil { - err = fmt.Errorf("while marshaling SampleID") + err = fmt.Errorf("while marshaling SampleID: %w", err) log.Error(err) } return sum diff --git a/share/ipldv2/share_sample_hasher_test.go b/share/ipldv2/sample_hasher_test.go similarity index 100% rename from share/ipldv2/share_sample_hasher_test.go rename to share/ipldv2/sample_hasher_test.go diff --git a/share/ipldv2/sample_id.go b/share/ipldv2/sample_id.go index 2c4329f0f5..e8111e520b 100644 --- a/share/ipldv2/sample_id.go +++ b/share/ipldv2/sample_id.go @@ -10,7 +10,6 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" - ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" ) // SampleIDSize is the size of the SampleID in bytes @@ -52,14 +51,6 @@ func SampleIDFromCID(cid cid.Cid) (id SampleID, err error) { return id, nil } -// SampleIDFromProto converts from protobuf representation of SampleID. -func SampleIDFromProto(proto *ipldv2pb.SampleID) SampleID { - return SampleID{ - AxisID: AxisIDFromProto(proto.AxisId), - ShareIndex: uint16(proto.ShareIndex), - } -} - // Cid returns sample ID encoded as CID. func (s *SampleID) Cid() (cid.Cid, error) { // avoid using proto serialization for CID as it's not deterministic @@ -76,15 +67,10 @@ func (s *SampleID) Cid() (cid.Cid, error) { return cid.NewCidV1(sampleCodec, buf), nil } -// Proto converts SampleID to its protobuf representation. -func (s *SampleID) Proto() *ipldv2pb.SampleID { - return &ipldv2pb.SampleID{ - AxisId: s.AxisID.Proto(), - ShareIndex: uint32(s.ShareIndex), - } -} - // MarshalBinary encodes SampleID into binary form. +// NOTE: Proto is avoided because +// * Its size is not deterministic which is required for IPLD. +// * No support for uint16 func (s *SampleID) MarshalBinary() ([]byte, error) { data := make([]byte, 0, SampleIDSize) n, err := s.AxisID.MarshalTo(data) diff --git a/share/ipldv2/share_sample_test.go b/share/ipldv2/sample_test.go similarity index 100% rename from share/ipldv2/share_sample_test.go rename to share/ipldv2/sample_test.go From cdbd69415d5dc5765c476683a1107745b40f54e2 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sun, 22 Oct 2023 20:41:41 +0200 Subject: [PATCH 020/132] cleanup proto field names --- share/ipldv2/pb/ipldv2pb.pb.go | 129 +++++++++++++++++---------------- share/ipldv2/pb/ipldv2pb.proto | 8 +- share/ipldv2/sample.go | 16 ++-- 3 files changed, 77 insertions(+), 76 deletions(-) diff --git a/share/ipldv2/pb/ipldv2pb.pb.go b/share/ipldv2/pb/ipldv2pb.pb.go index 5e127c39b8..39d2b28ccc 100644 --- a/share/ipldv2/pb/ipldv2pb.pb.go +++ b/share/ipldv2/pb/ipldv2pb.pb.go @@ -126,10 +126,10 @@ func (m *Axis) GetAxisHalf() [][]byte { } type Sample struct { - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Type SampleType `protobuf:"varint,2,opt,name=type,proto3,enum=SampleType" json:"type,omitempty"` - Share []byte `protobuf:"bytes,3,opt,name=share,proto3" json:"share,omitempty"` - Proof *pb.Proof `protobuf:"bytes,4,opt,name=proof,proto3" json:"proof,omitempty"` + SampleId []byte `protobuf:"bytes,1,opt,name=sample_id,json=sampleId,proto3" json:"sample_id,omitempty"` + SampleType SampleType `protobuf:"varint,2,opt,name=sample_type,json=sampleType,proto3,enum=SampleType" json:"sample_type,omitempty"` + SampleShare []byte `protobuf:"bytes,3,opt,name=sample_share,json=sampleShare,proto3" json:"sample_share,omitempty"` + SampleProof *pb.Proof `protobuf:"bytes,4,opt,name=sample_proof,json=sampleProof,proto3" json:"sample_proof,omitempty"` } func (m *Sample) Reset() { *m = Sample{} } @@ -165,30 +165,30 @@ func (m *Sample) XXX_DiscardUnknown() { var xxx_messageInfo_Sample proto.InternalMessageInfo -func (m *Sample) GetId() []byte { +func (m *Sample) GetSampleId() []byte { if m != nil { - return m.Id + return m.SampleId } return nil } -func (m *Sample) GetType() SampleType { +func (m *Sample) GetSampleType() SampleType { if m != nil { - return m.Type + return m.SampleType } return SampleType_Data } -func (m *Sample) GetShare() []byte { +func (m *Sample) GetSampleShare() []byte { if m != nil { - return m.Share + return m.SampleShare } return nil } -func (m *Sample) GetProof() *pb.Proof { +func (m *Sample) GetSampleProof() *pb.Proof { if m != nil { - return m.Proof + return m.SampleProof } return nil } @@ -203,25 +203,26 @@ func init() { func init() { proto.RegisterFile("share/ipldv2/pb/ipldv2pb.proto", fileDescriptor_cb41c3a4f982a271) } var fileDescriptor_cb41c3a4f982a271 = []byte{ - // 279 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0x4f, 0x4b, 0x84, 0x40, - 0x18, 0xc6, 0x1d, 0x75, 0x5d, 0x7b, 0x77, 0x31, 0x19, 0x82, 0x86, 0x8a, 0x49, 0x84, 0x40, 0xf6, - 0xa0, 0x60, 0xd7, 0x2e, 0xfd, 0x39, 0xd4, 0x6d, 0xb1, 0xee, 0x31, 0xa2, 0xb2, 0x03, 0xc6, 0x0c, - 0x2a, 0xdb, 0xfa, 0x2d, 0xfa, 0x58, 0x1d, 0xf7, 0xd8, 0x31, 0xf4, 0x8b, 0xc4, 0x8c, 0x4b, 0x7b, - 0x7b, 0x9e, 0x79, 0xe6, 0x79, 0xdf, 0x1f, 0x2f, 0xd0, 0x76, 0xc3, 0x9a, 0x32, 0xe1, 0xb2, 0x2e, - 0xb6, 0x69, 0x22, 0xf3, 0x83, 0x92, 0x79, 0x2c, 0x1b, 0xd1, 0x89, 0x0b, 0x4f, 0xe6, 0x89, 0x6c, - 0x84, 0xa8, 0x26, 0x1f, 0xde, 0x81, 0x7d, 0xbf, 0xe3, 0x2d, 0x3e, 0x87, 0x39, 0xdb, 0xf1, 0xf6, - 0x9d, 0x17, 0x04, 0x05, 0x28, 0x5a, 0x66, 0x8e, 0xb2, 0x2f, 0x05, 0xbe, 0x84, 0x13, 0x1d, 0x6c, - 0x58, 0x5d, 0x11, 0x33, 0xb0, 0xa2, 0x65, 0xe6, 0xaa, 0x87, 0x67, 0x56, 0x57, 0xe1, 0x16, 0x9c, - 0x57, 0xf6, 0x21, 0xeb, 0x12, 0x7b, 0x60, 0xfe, 0x57, 0x4d, 0x5e, 0xe0, 0x6b, 0xb0, 0xbb, 0x5e, - 0x96, 0xc4, 0x0c, 0x50, 0xe4, 0xa5, 0x8b, 0x78, 0xfa, 0xf6, 0xd6, 0xcb, 0x32, 0xd3, 0x01, 0x3e, - 0x83, 0x99, 0x46, 0x25, 0x96, 0xee, 0x4c, 0x06, 0xdf, 0xc0, 0x4c, 0xd3, 0x11, 0x3b, 0x40, 0xd1, - 0x22, 0x3d, 0x8d, 0x0f, 0xac, 0x79, 0xbc, 0x56, 0x22, 0x9b, 0xd2, 0xd5, 0x15, 0xb8, 0x8a, 0x5a, - 0x8d, 0xc3, 0x73, 0xb0, 0x32, 0xf1, 0xe9, 0x1b, 0x4a, 0x3c, 0x8a, 0xda, 0x47, 0xab, 0x10, 0xe0, - 0xb8, 0x0e, 0xbb, 0x60, 0x3f, 0xb1, 0x8e, 0xf9, 0x06, 0x06, 0x70, 0xd6, 0xac, 0xe1, 0x5d, 0xef, - 0xa3, 0x07, 0xf2, 0x3d, 0x50, 0xb4, 0x1f, 0x28, 0xfa, 0x1d, 0x28, 0xfa, 0x1a, 0xa9, 0xb1, 0x1f, - 0xa9, 0xf1, 0x33, 0x52, 0x23, 0x77, 0xf4, 0x61, 0x6e, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2e, - 0x20, 0xe3, 0x8f, 0x4a, 0x01, 0x00, 0x00, + // 291 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x50, 0xcd, 0x4e, 0x83, 0x40, + 0x18, 0x64, 0x0b, 0xa1, 0xf8, 0x41, 0x2a, 0xd9, 0x8b, 0x1b, 0x35, 0x1b, 0xe4, 0x44, 0x1a, 0x03, + 0x09, 0x5e, 0xbd, 0xf8, 0x73, 0xb0, 0xb7, 0x86, 0x7a, 0x37, 0x4b, 0x80, 0x94, 0x04, 0xb3, 0x1b, + 0x20, 0x5a, 0xde, 0xc2, 0xe7, 0xf0, 0x49, 0x3c, 0xf6, 0xe8, 0xd1, 0xc0, 0x8b, 0x98, 0x5d, 0x68, + 0x7b, 0x9b, 0x99, 0x6f, 0xbe, 0xd9, 0x9d, 0x0f, 0x68, 0xb3, 0x65, 0x75, 0x1e, 0x95, 0xa2, 0xca, + 0x3e, 0xe2, 0x48, 0xa4, 0x13, 0x12, 0x69, 0x28, 0x6a, 0xde, 0xf2, 0xcb, 0x85, 0x48, 0x23, 0x51, + 0x73, 0x5e, 0x8c, 0xdc, 0xbf, 0x07, 0xe3, 0x61, 0x57, 0x36, 0xf8, 0x02, 0xe6, 0x6c, 0x57, 0x36, + 0x6f, 0x65, 0x46, 0x90, 0x87, 0x02, 0x27, 0x31, 0x25, 0x5d, 0x65, 0xf8, 0x0a, 0xce, 0xd4, 0x60, + 0xcb, 0xaa, 0x82, 0xcc, 0x3c, 0x3d, 0x70, 0x12, 0x4b, 0x0a, 0x2f, 0xac, 0x2a, 0xfc, 0x6f, 0x04, + 0xe6, 0x86, 0xbd, 0x8b, 0x2a, 0x97, 0xbe, 0x46, 0xa1, 0x53, 0x84, 0x35, 0x0a, 0xab, 0x0c, 0xdf, + 0x82, 0x3d, 0x0d, 0xdb, 0x4e, 0xe4, 0x64, 0xe6, 0xa1, 0x60, 0x11, 0xdb, 0xe1, 0xb8, 0xfa, 0xda, + 0x89, 0x3c, 0x81, 0xe6, 0x88, 0xf1, 0x0d, 0x38, 0x93, 0x5b, 0x95, 0x21, 0xba, 0x4a, 0x9b, 0x12, + 0x36, 0x52, 0xc2, 0xf1, 0xd1, 0xa2, 0xca, 0x10, 0xc3, 0x43, 0x81, 0x1d, 0x9f, 0x87, 0x53, 0xb5, + 0x34, 0x5c, 0x4b, 0x70, 0xd8, 0x51, 0x64, 0x79, 0x0d, 0x96, 0xac, 0xaa, 0x9e, 0x98, 0x83, 0x9e, + 0xf0, 0x4f, 0x57, 0x93, 0xe0, 0x89, 0x57, 0x2e, 0x5a, 0xfa, 0x00, 0xa7, 0xef, 0x60, 0x0b, 0x8c, + 0x67, 0xd6, 0x32, 0x57, 0xc3, 0x00, 0xe6, 0x9a, 0xd5, 0x65, 0xdb, 0xb9, 0xe8, 0x91, 0xfc, 0xf4, + 0x14, 0xed, 0x7b, 0x8a, 0xfe, 0x7a, 0x8a, 0xbe, 0x06, 0xaa, 0xed, 0x07, 0xaa, 0xfd, 0x0e, 0x54, + 0x4b, 0x4d, 0x75, 0xcd, 0xbb, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe0, 0x45, 0x2a, 0x9d, 0x7f, + 0x01, 0x00, 0x00, } func (m *Axis) Marshal() (dAtA []byte, err error) { @@ -283,9 +284,9 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Proof != nil { + if m.SampleProof != nil { { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.SampleProof.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -295,22 +296,22 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if len(m.Share) > 0 { - i -= len(m.Share) - copy(dAtA[i:], m.Share) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.Share))) + if len(m.SampleShare) > 0 { + i -= len(m.SampleShare) + copy(dAtA[i:], m.SampleShare) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.SampleShare))) i-- dAtA[i] = 0x1a } - if m.Type != 0 { - i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.Type)) + if m.SampleType != 0 { + i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.SampleType)) i-- dAtA[i] = 0x10 } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.Id))) + if len(m.SampleId) > 0 { + i -= len(m.SampleId) + copy(dAtA[i:], m.SampleId) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.SampleId))) i-- dAtA[i] = 0xa } @@ -353,19 +354,19 @@ func (m *Sample) Size() (n int) { } var l int _ = l - l = len(m.Id) + l = len(m.SampleId) if l > 0 { n += 1 + l + sovIpldv2Pb(uint64(l)) } - if m.Type != 0 { - n += 1 + sovIpldv2Pb(uint64(m.Type)) + if m.SampleType != 0 { + n += 1 + sovIpldv2Pb(uint64(m.SampleType)) } - l = len(m.Share) + l = len(m.SampleShare) if l > 0 { n += 1 + l + sovIpldv2Pb(uint64(l)) } - if m.Proof != nil { - l = m.Proof.Size() + if m.SampleProof != nil { + l = m.SampleProof.Size() n += 1 + l + sovIpldv2Pb(uint64(l)) } return n @@ -524,7 +525,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SampleId", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -551,16 +552,16 @@ func (m *Sample) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} + m.SampleId = append(m.SampleId[:0], dAtA[iNdEx:postIndex]...) + if m.SampleId == nil { + m.SampleId = []byte{} } iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType) } - m.Type = 0 + m.SampleType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIpldv2Pb @@ -570,14 +571,14 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= SampleType(b&0x7F) << shift + m.SampleType |= SampleType(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Share", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SampleShare", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -604,14 +605,14 @@ func (m *Sample) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Share = append(m.Share[:0], dAtA[iNdEx:postIndex]...) - if m.Share == nil { - m.Share = []byte{} + m.SampleShare = append(m.SampleShare[:0], dAtA[iNdEx:postIndex]...) + if m.SampleShare == nil { + m.SampleShare = []byte{} } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SampleProof", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -638,10 +639,10 @@ func (m *Sample) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Proof == nil { - m.Proof = &pb.Proof{} + if m.SampleProof == nil { + m.SampleProof = &pb.Proof{} } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SampleProof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/share/ipldv2/pb/ipldv2pb.proto b/share/ipldv2/pb/ipldv2pb.proto index 6cbdc2ce52..22ce8e3229 100644 --- a/share/ipldv2/pb/ipldv2pb.proto +++ b/share/ipldv2/pb/ipldv2pb.proto @@ -18,8 +18,8 @@ enum SampleType { } message Sample { - bytes id = 1; - SampleType type = 2; - bytes share = 3; - proof.pb.Proof proof = 4; + bytes sample_id = 1; + SampleType sample_type = 2; + bytes sample_share = 3; + proof.pb.Proof sample_proof = 4; } diff --git a/share/ipldv2/sample.go b/share/ipldv2/sample.go index 11220dadba..8c1449a21d 100644 --- a/share/ipldv2/sample.go +++ b/share/ipldv2/sample.go @@ -141,10 +141,10 @@ func (s *Sample) MarshalBinary() ([]byte, error) { proof.LeafHash = s.Proof.LeafHash() return (&ipldv2pb.Sample{ - Id: id, - Type: ipldv2pb.SampleType(s.Type), - Proof: proof, - Share: s.Share, + SampleId: id, + SampleType: ipldv2pb.SampleType(s.Type), + SampleProof: proof, + SampleShare: s.Share, }).Marshal() } @@ -155,14 +155,14 @@ func (s *Sample) UnmarshalBinary(data []byte) error { return err } - err := s.ID.UnmarshalBinary(proto.Id) + err := s.ID.UnmarshalBinary(proto.SampleId) if err != nil { return err } - s.Type = SampleType(proto.Type) - s.Proof = nmt.ProtoToProof(*proto.Proof) - s.Share = proto.Share + s.Type = SampleType(proto.SampleType) + s.Proof = nmt.ProtoToProof(*proto.SampleProof) + s.Share = proto.SampleShare return nil } From c3e845071833295c514898a16a9ef831d6466b07 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sat, 2 Dec 2023 01:05:49 +0100 Subject: [PATCH 021/132] namespace mh --- share/eds/file.go | 1 + share/ipldv2/axis.go | 25 ++- share/ipldv2/axis_id.go | 6 +- share/ipldv2/blockstore.go | 70 +++++- share/ipldv2/blockstore_test.go | 4 +- share/ipldv2/data.go | 133 +++++++++++ share/ipldv2/data_hasher.go | 53 +++++ share/ipldv2/data_id.go | 103 +++++++++ share/ipldv2/data_id_test.go | 38 ++++ share/ipldv2/ipldv2.go | 35 ++- share/ipldv2/ipldv2_test.go | 4 +- share/ipldv2/pb/ipldv2pb.pb.go | 343 ++++++++++++++++++++++++++--- share/ipldv2/pb/ipldv2pb.proto | 12 +- share/ipldv2/sample.go | 56 ++--- share/ipldv2/sample_hasher.go | 2 +- share/ipldv2/sample_hasher_test.go | 2 +- share/ipldv2/sample_id.go | 2 +- share/ipldv2/sample_test.go | 16 +- 18 files changed, 796 insertions(+), 109 deletions(-) create mode 100644 share/ipldv2/data.go create mode 100644 share/ipldv2/data_hasher.go create mode 100644 share/ipldv2/data_id.go create mode 100644 share/ipldv2/data_id_test.go diff --git a/share/eds/file.go b/share/eds/file.go index cd305b7878..b2a6331339 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -20,6 +20,7 @@ type File interface { ShareWithProof(axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) + // Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) EDS() (*rsmt2d.ExtendedDataSquare, error) } diff --git a/share/ipldv2/axis.go b/share/ipldv2/axis.go index 992fa23b19..03c6354e9d 100644 --- a/share/ipldv2/axis.go +++ b/share/ipldv2/axis.go @@ -13,16 +13,19 @@ import ( ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" ) +// Axis represents an Axis of an EDS. type Axis struct { - AxisID AxisID - AxisHalf []share.Share + AxisID + + // AxisShares is the original half of the axis. + AxisShares []share.Share } // NewAxis constructs a new Axis. func NewAxis(id AxisID, axisHalf []share.Share) *Axis { return &Axis{ - AxisID: id, - AxisHalf: axisHalf, + AxisID: id, + AxisShares: axisHalf, } } @@ -94,7 +97,7 @@ func (s *Axis) MarshalBinary() ([]byte, error) { return (&ipldv2pb.Axis{ AxisId: id, - AxisHalf: s.AxisHalf, + AxisHalf: s.AxisShares, }).Marshal() } @@ -110,7 +113,7 @@ func (s *Axis) UnmarshalBinary(data []byte) error { return err } - s.AxisHalf = proto.AxisHalf + s.AxisShares = proto.AxisHalf return nil } @@ -120,21 +123,21 @@ func (s *Axis) Validate() error { return err } - sqrLn := len(s.AxisHalf) * 2 + sqrLn := len(s.AxisShares) * 2 if s.AxisID.AxisIndex > uint16(sqrLn) { return fmt.Errorf("axis index exceeds square size: %d > %d", s.AxisID.AxisIndex, sqrLn) } // TODO(@Wondertan): This computations are quite expensive and likely to be used further, // so we need to find a way to cache them and pass to the caller on the Bitswap side - parity, err := share.DefaultRSMT2DCodec().Encode(s.AxisHalf) + parity, err := share.DefaultRSMT2DCodec().Encode(s.AxisShares) if err != nil { return fmt.Errorf("while decoding erasure coded half: %w", err) } - s.AxisHalf = append(s.AxisHalf, parity...) + s.AxisShares = append(s.AxisShares, parity...) - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(s.AxisHalf)/2), uint(s.AxisID.AxisIndex)) - for _, shr := range s.AxisHalf { + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(s.AxisShares)/2), uint(s.AxisID.AxisIndex)) + for _, shr := range s.AxisShares { err := tree.Push(shr) if err != nil { return fmt.Errorf("while pushing shares to NMT: %w", err) diff --git a/share/ipldv2/axis_id.go b/share/ipldv2/axis_id.go index 9aa6059353..e4222a6882 100644 --- a/share/ipldv2/axis_id.go +++ b/share/ipldv2/axis_id.go @@ -104,7 +104,7 @@ func (s *AxisID) MarshalBinary() ([]byte, error) { // UnmarshalBinary decodes AxisID from binary form. func (s *AxisID) UnmarshalBinary(data []byte) error { if len(data) != AxisIDSize { - return fmt.Errorf("incorrect data length: %d != %d", len(data), AxisIDSize) + return fmt.Errorf("invalid data length: %d != %d", len(data), AxisIDSize) } _, err := s.UnmarshalFrom(data) return err @@ -117,11 +117,11 @@ func (s *AxisID) Validate() error { } if len(s.AxisHash) != hashSize { - return fmt.Errorf("incorrect Hash size: %d != %d", len(s.AxisHash), hashSize) + return fmt.Errorf("invalid AxisHash size: %d != %d", len(s.AxisHash), hashSize) } if s.AxisType != rsmt2d.Col && s.AxisType != rsmt2d.Row { - return fmt.Errorf("incorrect Axis: %d", s.AxisType) + return fmt.Errorf("invalid AxisType: %d", s.AxisType) } return nil diff --git a/share/ipldv2/blockstore.go b/share/ipldv2/blockstore.go index 1e8e408975..fcea737469 100644 --- a/share/ipldv2/blockstore.go +++ b/share/ipldv2/blockstore.go @@ -8,6 +8,8 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/celestia-node/share/eds" ) @@ -56,6 +58,21 @@ func (b Blockstore[F]) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) return nil, err } + return blk, nil + case dataCodec: + id, err := DataIDFromCID(cid) + if err != nil { + err = fmt.Errorf("while converting CID to DataID: %w", err) + log.Error(err) + return nil, err + } + + blk, err := b.getDataBlock(id) + if err != nil { + log.Error(err) + return nil, err + } + return blk, nil default: return nil, fmt.Errorf("unsupported codec") @@ -65,7 +82,7 @@ func (b Blockstore[F]) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) func (b Blockstore[F]) getSampleBlock(id SampleID) (blocks.Block, error) { f, err := b.fs.File(id.Height) if err != nil { - return nil, fmt.Errorf("while getting EDS file from FS: %w", err) + return nil, fmt.Errorf("while getting ODS file from FS: %w", err) } shr, prf, err := f.ShareWithProof(id.AxisType, int(id.AxisIndex), int(id.ShareIndex)) @@ -81,7 +98,7 @@ func (b Blockstore[F]) getSampleBlock(id SampleID) (blocks.Block, error) { err = f.Close() if err != nil { - return nil, fmt.Errorf("while closing EDS file: %w", err) + return nil, fmt.Errorf("while closing ODS file: %w", err) } return blk, nil @@ -112,6 +129,31 @@ func (b Blockstore[F]) getAxisBlock(id AxisID) (blocks.Block, error) { return blk, nil } +func (b Blockstore[F]) getDataBlock(id DataID) (blocks.Block, error) { + f, err := b.fs.File(id.Height) + if err != nil { + return nil, fmt.Errorf("while getting ODS file from FS: %w", err) + } + + // data, prf, err := f.Data(id.DataNamespace, int(id.AxisIndex)) + // if err != nil { + // return nil, fmt.Errorf("while getting Data: %w", err) + // } + + s := NewData(id, nil, nmt.Proof{}) + blk, err := s.IPLDBlock() + if err != nil { + return nil, fmt.Errorf("while coverting Data to IPLD block: %w", err) + } + + err = f.Close() + if err != nil { + return nil, fmt.Errorf("while closing ODS file: %w", err) + } + + return blk, nil +} + func (b Blockstore[F]) GetSize(ctx context.Context, cid cid.Cid) (int, error) { // TODO(@Wondertan): There must be a way to derive size without reading, proving, serializing and // allocating Sample's block.Block. @@ -126,40 +168,48 @@ func (b Blockstore[F]) GetSize(ctx context.Context, cid cid.Cid) (int, error) { } func (b Blockstore[F]) Has(_ context.Context, cid cid.Cid) (bool, error) { - var height uint64 + var id AxisID switch cid.Type() { case sampleCodec: - id, err := SampleIDFromCID(cid) + sid, err := SampleIDFromCID(cid) if err != nil { err = fmt.Errorf("while converting CID to SampleID: %w", err) log.Error(err) return false, err } - height = id.Height + id = sid.AxisID case axisCodec: - id, err := AxisIDFromCID(cid) + var err error + id, err = AxisIDFromCID(cid) if err != nil { err = fmt.Errorf("while converting CID to AxisID: %w", err) log.Error(err) return false, err } + case dataCodec: + did, err := DataIDFromCID(cid) + if err != nil { + err = fmt.Errorf("while converting CID to DataID: %w", err) + log.Error(err) + return false, err + } - height = id.Height + id = did.AxisID default: return false, fmt.Errorf("unsupported codec") } - f, err := b.fs.File(height) + f, err := b.fs.File(id.Height) if err != nil { - err = fmt.Errorf("while getting EDS file from FS: %w", err) + err = fmt.Errorf("while getting ODS file from FS: %w", err) log.Error(err) return false, err } err = f.Close() if err != nil { - err = fmt.Errorf("while closing EDS file: %w", err) + err = fmt.Errorf("while closing ODS file: %w", err) log.Error(err) return false, err } diff --git a/share/ipldv2/blockstore_test.go b/share/ipldv2/blockstore_test.go index 0d3a0f84e0..d186431e39 100644 --- a/share/ipldv2/blockstore_test.go +++ b/share/ipldv2/blockstore_test.go @@ -15,7 +15,7 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -// TODO(@Wondertan): Add axis sampling code +// TODO(@Wondertan): Add axis and data code func TestBlockstoreGetShareSample(t *testing.T) { ctx := context.Background() @@ -40,7 +40,7 @@ func TestBlockstoreGetShareSample(t *testing.T) { err = sample.Validate() require.NoError(t, err) - assert.EqualValues(t, id, sample.ID) + assert.EqualValues(t, id, sample.SampleID) } } } diff --git a/share/ipldv2/data.go b/share/ipldv2/data.go new file mode 100644 index 0000000000..6472d4df44 --- /dev/null +++ b/share/ipldv2/data.go @@ -0,0 +1,133 @@ +package ipldv2 + +import ( + "fmt" + + blocks "github.com/ipfs/go-block-format" + + "github.com/celestiaorg/nmt" + + "github.com/celestiaorg/celestia-node/share" + ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" +) + +type Data struct { + DataID + + DataProof nmt.Proof + DataShares []share.Share +} + +// NewData constructs a new Data. +func NewData(id DataID, shares []share.Share, proof nmt.Proof) *Data { + return &Data{ + DataID: id, + DataShares: shares, + DataProof: proof, + } +} + +// // NewDataFromEDS samples the EDS and constructs a new Data. +// func NewDataFromEDS( +// idx int, +// eds *rsmt2d.ExtendedDataSquare, +// height uint64, +// ) (*Data, error) { +// sqrLn := int(eds.Width()) +// +// // TODO(@Wondertan): Should be an rsmt2d method +// var axisHalf [][]byte +// switch axisType { +// case rsmt2d.Row: +// axisHalf = eds.Row(uint(idx))[:sqrLn/2] +// case rsmt2d.Col: +// axisHalf = eds.Col(uint(idx))[:sqrLn/2] +// default: +// panic("invalid axis") +// } +// +// root, err := share.NewRoot(eds) +// if err != nil { +// return nil, fmt.Errorf("while computing root: %w", err) +// } +// +// id := NewDataID(axisType, uint16(idx), root, height) +// return NewData(id, axisHalf), nil +// } + +// DataFromBlock converts blocks.Block into Data. +func DataFromBlock(blk blocks.Block) (*Data, error) { + if err := validateCID(blk.Cid()); err != nil { + return nil, err + } + + s := &Data{} + err := s.UnmarshalBinary(blk.RawData()) + if err != nil { + return nil, fmt.Errorf("while unmarshalling Data: %w", err) + } + + return s, nil +} + +// IPLDBlock converts Data to an IPLD block for Bitswap compatibility. +func (s *Data) IPLDBlock() (blocks.Block, error) { + cid, err := s.DataID.Cid() + if err != nil { + return nil, err + } + + data, err := s.MarshalBinary() + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, cid) +} + +// MarshalBinary marshals Data to binary. +func (s *Data) MarshalBinary() ([]byte, error) { + id, err := s.DataID.MarshalBinary() + if err != nil { + return nil, err + } + + return (&ipldv2pb.Data{ + DataId: id, + DataShares: s.DataShares, + }).Marshal() +} + +// UnmarshalBinary unmarshal Data from binary. +func (s *Data) UnmarshalBinary(data []byte) error { + proto := &ipldv2pb.Data{} + if err := proto.Unmarshal(data); err != nil { + return err + } + + err := s.DataID.UnmarshalBinary(proto.DataId) + if err != nil { + return err + } + + s.DataShares = proto.DataShares + return nil +} + +// Validate performs basic validation of Data. +func (s *Data) Validate() error { + if err := s.DataID.Validate(); err != nil { + return err + } + + if len(s.DataShares) == 0 { + return fmt.Errorf("empty DataShares") + } + + s.DataProof.WithHashedProof(hasher()) + if !s.DataProof.VerifyNamespace(hasher(), s.DataNamespace.ToNMT(), s.DataShares, s.AxisHash) { + return fmt.Errorf("invalid DataProof") + } + + return nil +} diff --git a/share/ipldv2/data_hasher.go b/share/ipldv2/data_hasher.go new file mode 100644 index 0000000000..5903269e70 --- /dev/null +++ b/share/ipldv2/data_hasher.go @@ -0,0 +1,53 @@ +package ipldv2 + +import ( + "crypto/sha256" + "fmt" +) + +// DataHasher implements hash.Hash interface for Data. +type DataHasher struct { + data Data +} + +// Write expects a marshaled Data to validate. +func (sh *DataHasher) Write(data []byte) (int, error) { + if err := sh.data.UnmarshalBinary(data); err != nil { + err = fmt.Errorf("while unmarshaling Data: %w", err) + log.Error(err) + return 0, err + } + + if err := sh.data.Validate(); err != nil { + err = fmt.Errorf("while validating Data: %w", err) + log.Error(err) + return 0, err + } + + return len(data), nil +} + +// Sum returns the "multihash" of the DataID. +func (sh *DataHasher) Sum([]byte) []byte { + sum, err := sh.data.DataID.MarshalBinary() + if err != nil { + err = fmt.Errorf("while marshaling DataID: %w", err) + log.Error(err) + } + return sum +} + +// Reset resets the Hash to its initial state. +func (sh *DataHasher) Reset() { + sh.data = Data{} +} + +// Size returns the number of bytes Sum will return. +func (sh *DataHasher) Size() int { + return DataIDSize +} + +// BlockSize returns the hash's underlying block size. +func (sh *DataHasher) BlockSize() int { + return sha256.BlockSize +} diff --git a/share/ipldv2/data_id.go b/share/ipldv2/data_id.go new file mode 100644 index 0000000000..bd6028c60a --- /dev/null +++ b/share/ipldv2/data_id.go @@ -0,0 +1,103 @@ +package ipldv2 + +import ( + "fmt" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" +) + +// DataIDSize is the size of the DataID in bytes +// We cut 1 byte from AxisIDSize because we don't need AxisType +// as its value is always Row. +const DataIDSize = AxisIDSize - 1 + share.NamespaceSize + +// DataID is an unique identifier of a namespaced Data inside EDS Axis. +type DataID struct { + AxisID + + // DataNamespace is the namespace of the data. + DataNamespace share.Namespace +} + +// NewDataID constructs a new DataID. +func NewDataID(namespace share.Namespace, axisIdx int, root *share.Root, height uint64) DataID { + return DataID{ + AxisID: NewAxisID(rsmt2d.Row, uint16(axisIdx), root, height), + DataNamespace: namespace, + } +} + +// DataIDFromCID coverts CID to DataID. +func DataIDFromCID(cid cid.Cid) (id DataID, err error) { + if err = validateCID(cid); err != nil { + return id, err + } + + err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) + if err != nil { + return id, fmt.Errorf("while unmarhalling DataID: %w", err) + } + + return id, nil +} + +// Cid returns sample ID encoded as CID. +func (s *DataID) Cid() (cid.Cid, error) { + // avoid using proto serialization for CID as it's not deterministicnot lambo though + data, err := s.MarshalBinary() + if err != nil { + return cid.Undef, err + } + + buf, err := mh.Encode(data, sampleMultihashCode) + if err != nil { + return cid.Undef, err + } + + return cid.NewCidV1(sampleCodec, buf), nil +} + +// MarshalBinary encodes DataID into binary form. +// NOTE: Proto is avoided because +// * Its size is not deterministic which is required for IPLD. +// * No support for uint16 +func (s *DataID) MarshalBinary() ([]byte, error) { + data := make([]byte, 0, DataIDSize+1) + n, err := s.AxisID.MarshalTo(data) + if err != nil { + return nil, err + } + data = data[1:n] // cut the first byte with AxisType + data = append(data, s.DataNamespace...) + return data, nil +} + +// UnmarshalBinary decodes DataID from binary form. +func (s *DataID) UnmarshalBinary(data []byte) error { + if len(data) != DataIDSize { + return fmt.Errorf("invalid data length: %d != %d", len(data), DataIDSize) + } + n, err := s.AxisID.UnmarshalFrom(append([]byte{byte(rsmt2d.Row)}, data...)) + if err != nil { + return err + } + s.DataNamespace = data[n-1:] + return nil +} + +// Validate validates fields of DataID. +func (s *DataID) Validate() error { + if err := s.AxisID.Validate(); err != nil { + return fmt.Errorf("while validating AxisID: %w", err) + } + if err := s.DataNamespace.ValidateForData(); err != nil { + return fmt.Errorf("while validating DataNamespace: %w", err) + } + + return nil +} diff --git a/share/ipldv2/data_id_test.go b/share/ipldv2/data_id_test.go new file mode 100644 index 0000000000..89018a45fe --- /dev/null +++ b/share/ipldv2/data_id_test.go @@ -0,0 +1,38 @@ +package ipldv2 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +// TODO: Add test that AxisType is not serialized +func TestDataID(t *testing.T) { + square := edstest.RandEDS(t, 2) + root, err := share.NewRoot(square) + require.NoError(t, err) + + sid := NewDataID(sharetest.RandV0Namespace(), 2, root, 1) + id, err := sid.Cid() + require.NoError(t, err) + + assert.EqualValues(t, sampleCodec, id.Prefix().Codec) + assert.EqualValues(t, sampleMultihashCode, id.Prefix().MhType) + assert.EqualValues(t, DataIDSize, id.Prefix().MhLength) + + data, err := sid.MarshalBinary() + require.NoError(t, err) + + sidOut := DataID{} + err = sidOut.UnmarshalBinary(data) + require.NoError(t, err) + assert.EqualValues(t, sid, sidOut) + + err = sidOut.Validate() + require.NoError(t, err) +} diff --git a/share/ipldv2/ipldv2.go b/share/ipldv2/ipldv2.go index 6738da4f4c..ad9a285297 100644 --- a/share/ipldv2/ipldv2.go +++ b/share/ipldv2/ipldv2.go @@ -20,13 +20,19 @@ const ( // sampleMultihashCode is the multihash code for share sampling multihash function. sampleMultihashCode = 0x7801 - // axisCodec is a CID codec used for axis sampling Bitswap requests over Namespaced Merkle + // axisCodec is a CID codec used for axis Bitswap requests over Namespaced Merkle // Tree. axisCodec = 0x7810 // axisMultihashCode is the multihash code for custom axis sampling multihash function. axisMultihashCode = 0x7811 + // dataCodec is a CID codec used for data Bitswap requests over Namespaced Merkle Tree. + dataCodec = 0x7820 + + // dataMultihashCode is the multihash code for data multihash function. + dataMultihashCode = 0x7821 + // mhPrefixSize is the size of the multihash prefix that used to cut it off. mhPrefixSize = 4 ) @@ -44,6 +50,9 @@ func init() { mh.Register(axisMultihashCode, func() hash.Hash { return &AxisHasher{} }) + mh.Register(dataMultihashCode, func() hash.Hash { + return &AxisHasher{} + }) } // defaultAllowlist keeps default list of hashes allowed in the network. @@ -54,26 +63,28 @@ type allowlist struct{} func (a allowlist) IsAllowed(code uint64) bool { // we disable all codes except home-baked code switch code { - case sampleMultihashCode: - case axisMultihashCode: - default: - return false + case sampleMultihashCode, axisMultihashCode, dataMultihashCode: + return true } - return true + return false } func validateCID(cid cid.Cid) error { prefix := cid.Prefix() - if prefix.Codec != sampleCodec && prefix.Codec != axisCodec { - return fmt.Errorf("unsupported codec %d", prefix.Codec) + if !defaultAllowlist.IsAllowed(prefix.MhType) { + return fmt.Errorf("unsupported multihash type %d", prefix.MhType) } - if prefix.MhType != sampleMultihashCode && prefix.MhType != axisMultihashCode { - return fmt.Errorf("unsupported multihash %d", prefix.MhType) + switch prefix.Codec { + default: + return fmt.Errorf("unsupported codec %d", prefix.Codec) + case sampleCodec, axisCodec, dataCodec: } - if prefix.MhLength != SampleIDSize && prefix.MhLength != AxisIDSize { - return fmt.Errorf("invalid multihash length %d", prefix.MhLength) + switch prefix.MhLength { + default: + return fmt.Errorf("unsupported multihash length %d", prefix.MhLength) + case SampleIDSize, AxisIDSize, DataIDSize: } return nil diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index 8a66a0f1b0..ed483cdbc9 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -41,7 +41,7 @@ func TestSampleRoundtripGetBlock(t *testing.T) { smpl, err := NewSampleFromEDS(axisType, i, sqr, 1) require.NoError(t, err) - cid, err := smpl.ID.Cid() + cid, err := smpl.SampleID.Cid() require.NoError(t, err) blkOut, err := client.GetBlock(ctx, cid) @@ -72,7 +72,7 @@ func TestSampleRoundtripGetBlocks(t *testing.T) { smpl, err := NewSampleFromEDS(axisType, i, sqr, 1) require.NoError(t, err) - cid, err := smpl.ID.Cid() + cid, err := smpl.SampleID.Cid() require.NoError(t, err) set.Add(cid) diff --git a/share/ipldv2/pb/ipldv2pb.pb.go b/share/ipldv2/pb/ipldv2pb.pb.go index 39d2b28ccc..db5f876086 100644 --- a/share/ipldv2/pb/ipldv2pb.pb.go +++ b/share/ipldv2/pb/ipldv2pb.pb.go @@ -51,18 +51,18 @@ func (AxisType) EnumDescriptor() ([]byte, []int) { type SampleType int32 const ( - SampleType_Data SampleType = 0 - SampleType_Parity SampleType = 1 + SampleType_DataSample SampleType = 0 + SampleType_ParitySample SampleType = 1 ) var SampleType_name = map[int32]string{ - 0: "Data", - 1: "Parity", + 0: "DataSample", + 1: "ParitySample", } var SampleType_value = map[string]int32{ - "Data": 0, - "Parity": 1, + "DataSample": 0, + "ParitySample": 1, } func (x SampleType) String() string { @@ -176,7 +176,7 @@ func (m *Sample) GetSampleType() SampleType { if m != nil { return m.SampleType } - return SampleType_Data + return SampleType_DataSample } func (m *Sample) GetSampleShare() []byte { @@ -193,36 +193,99 @@ func (m *Sample) GetSampleProof() *pb.Proof { return nil } +type Data struct { + DataId []byte `protobuf:"bytes,1,opt,name=data_id,json=dataId,proto3" json:"data_id,omitempty"` + DataShares [][]byte `protobuf:"bytes,2,rep,name=data_shares,json=dataShares,proto3" json:"data_shares,omitempty"` + DataProof *pb.Proof `protobuf:"bytes,3,opt,name=data_proof,json=dataProof,proto3" json:"data_proof,omitempty"` +} + +func (m *Data) Reset() { *m = Data{} } +func (m *Data) String() string { return proto.CompactTextString(m) } +func (*Data) ProtoMessage() {} +func (*Data) Descriptor() ([]byte, []int) { + return fileDescriptor_cb41c3a4f982a271, []int{2} +} +func (m *Data) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Data) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Data.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Data) XXX_Merge(src proto.Message) { + xxx_messageInfo_Data.Merge(m, src) +} +func (m *Data) XXX_Size() int { + return m.Size() +} +func (m *Data) XXX_DiscardUnknown() { + xxx_messageInfo_Data.DiscardUnknown(m) +} + +var xxx_messageInfo_Data proto.InternalMessageInfo + +func (m *Data) GetDataId() []byte { + if m != nil { + return m.DataId + } + return nil +} + +func (m *Data) GetDataShares() [][]byte { + if m != nil { + return m.DataShares + } + return nil +} + +func (m *Data) GetDataProof() *pb.Proof { + if m != nil { + return m.DataProof + } + return nil +} + func init() { proto.RegisterEnum("AxisType", AxisType_name, AxisType_value) proto.RegisterEnum("SampleType", SampleType_name, SampleType_value) proto.RegisterType((*Axis)(nil), "Axis") proto.RegisterType((*Sample)(nil), "Sample") + proto.RegisterType((*Data)(nil), "Data") } func init() { proto.RegisterFile("share/ipldv2/pb/ipldv2pb.proto", fileDescriptor_cb41c3a4f982a271) } var fileDescriptor_cb41c3a4f982a271 = []byte{ - // 291 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x50, 0xcd, 0x4e, 0x83, 0x40, - 0x18, 0x64, 0x0b, 0xa1, 0xf8, 0x41, 0x2a, 0xd9, 0x8b, 0x1b, 0x35, 0x1b, 0xe4, 0x44, 0x1a, 0x03, - 0x09, 0x5e, 0xbd, 0xf8, 0x73, 0xb0, 0xb7, 0x86, 0x7a, 0x37, 0x4b, 0x80, 0x94, 0x04, 0xb3, 0x1b, - 0x20, 0x5a, 0xde, 0xc2, 0xe7, 0xf0, 0x49, 0x3c, 0xf6, 0xe8, 0xd1, 0xc0, 0x8b, 0x98, 0x5d, 0x68, - 0x7b, 0x9b, 0x99, 0x6f, 0xbe, 0xd9, 0x9d, 0x0f, 0x68, 0xb3, 0x65, 0x75, 0x1e, 0x95, 0xa2, 0xca, - 0x3e, 0xe2, 0x48, 0xa4, 0x13, 0x12, 0x69, 0x28, 0x6a, 0xde, 0xf2, 0xcb, 0x85, 0x48, 0x23, 0x51, - 0x73, 0x5e, 0x8c, 0xdc, 0xbf, 0x07, 0xe3, 0x61, 0x57, 0x36, 0xf8, 0x02, 0xe6, 0x6c, 0x57, 0x36, - 0x6f, 0x65, 0x46, 0x90, 0x87, 0x02, 0x27, 0x31, 0x25, 0x5d, 0x65, 0xf8, 0x0a, 0xce, 0xd4, 0x60, - 0xcb, 0xaa, 0x82, 0xcc, 0x3c, 0x3d, 0x70, 0x12, 0x4b, 0x0a, 0x2f, 0xac, 0x2a, 0xfc, 0x6f, 0x04, - 0xe6, 0x86, 0xbd, 0x8b, 0x2a, 0x97, 0xbe, 0x46, 0xa1, 0x53, 0x84, 0x35, 0x0a, 0xab, 0x0c, 0xdf, - 0x82, 0x3d, 0x0d, 0xdb, 0x4e, 0xe4, 0x64, 0xe6, 0xa1, 0x60, 0x11, 0xdb, 0xe1, 0xb8, 0xfa, 0xda, - 0x89, 0x3c, 0x81, 0xe6, 0x88, 0xf1, 0x0d, 0x38, 0x93, 0x5b, 0x95, 0x21, 0xba, 0x4a, 0x9b, 0x12, - 0x36, 0x52, 0xc2, 0xf1, 0xd1, 0xa2, 0xca, 0x10, 0xc3, 0x43, 0x81, 0x1d, 0x9f, 0x87, 0x53, 0xb5, - 0x34, 0x5c, 0x4b, 0x70, 0xd8, 0x51, 0x64, 0x79, 0x0d, 0x96, 0xac, 0xaa, 0x9e, 0x98, 0x83, 0x9e, - 0xf0, 0x4f, 0x57, 0x93, 0xe0, 0x89, 0x57, 0x2e, 0x5a, 0xfa, 0x00, 0xa7, 0xef, 0x60, 0x0b, 0x8c, - 0x67, 0xd6, 0x32, 0x57, 0xc3, 0x00, 0xe6, 0x9a, 0xd5, 0x65, 0xdb, 0xb9, 0xe8, 0x91, 0xfc, 0xf4, - 0x14, 0xed, 0x7b, 0x8a, 0xfe, 0x7a, 0x8a, 0xbe, 0x06, 0xaa, 0xed, 0x07, 0xaa, 0xfd, 0x0e, 0x54, - 0x4b, 0x4d, 0x75, 0xcd, 0xbb, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe0, 0x45, 0x2a, 0x9d, 0x7f, - 0x01, 0x00, 0x00, + // 336 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xbd, 0x4e, 0xf3, 0x40, + 0x10, 0xf4, 0xc5, 0x51, 0x7e, 0xd6, 0x56, 0x3e, 0xeb, 0x9a, 0xcf, 0x02, 0x74, 0x98, 0x54, 0x56, + 0x84, 0x1c, 0xc9, 0xb4, 0x34, 0xfc, 0x14, 0xa4, 0x8b, 0x1c, 0x7a, 0x74, 0x96, 0x1d, 0xc5, 0x92, + 0x91, 0x4f, 0x3e, 0x0b, 0x92, 0xb7, 0xe0, 0x39, 0x78, 0x12, 0xca, 0x94, 0x94, 0x28, 0x79, 0x11, + 0xb4, 0xeb, 0x23, 0xa1, 0xa0, 0x9b, 0x99, 0x9d, 0xdb, 0xf1, 0xac, 0x41, 0xe8, 0x95, 0xac, 0xf3, + 0x69, 0xa1, 0xca, 0xec, 0x25, 0x9e, 0xaa, 0xd4, 0x20, 0x95, 0x46, 0xaa, 0xae, 0x9a, 0xea, 0x64, + 0xa4, 0xd2, 0xa9, 0xaa, 0xab, 0x6a, 0xd9, 0xf2, 0xf1, 0x35, 0x74, 0x6f, 0xd6, 0x85, 0xe6, 0xff, + 0xa1, 0x2f, 0xd7, 0x85, 0x7e, 0x2a, 0x32, 0x9f, 0x05, 0x2c, 0x74, 0x93, 0x1e, 0xd2, 0x59, 0xc6, + 0x4f, 0x61, 0x48, 0x83, 0x95, 0x2c, 0x97, 0x7e, 0x27, 0xb0, 0x43, 0x37, 0x19, 0xa0, 0xf0, 0x20, + 0xcb, 0xe5, 0xf8, 0x9d, 0x41, 0x6f, 0x21, 0x9f, 0x55, 0x99, 0xa3, 0x4f, 0x13, 0x3a, 0xae, 0x18, + 0xb4, 0xc2, 0x2c, 0xe3, 0x97, 0xe0, 0x98, 0x61, 0xb3, 0x51, 0xb9, 0xdf, 0x09, 0x58, 0x38, 0x8a, + 0x9d, 0xa8, 0x7d, 0xfa, 0xb8, 0x51, 0x79, 0x02, 0xfa, 0x80, 0xf9, 0x05, 0xb8, 0xc6, 0x4d, 0x65, + 0x7c, 0x9b, 0xb6, 0x99, 0x0d, 0x0b, 0x94, 0x78, 0x7c, 0xb0, 0x50, 0x19, 0xbf, 0x1b, 0xb0, 0xd0, + 0x89, 0xff, 0x45, 0xa6, 0x5a, 0x1a, 0xcd, 0x11, 0xfc, 0xbc, 0x21, 0x32, 0x56, 0xd0, 0xbd, 0x97, + 0x8d, 0xc4, 0xaa, 0x99, 0x6c, 0xe4, 0xaf, 0xaa, 0x48, 0x67, 0x19, 0x3f, 0x07, 0x87, 0x06, 0x94, + 0xaa, 0x4d, 0x59, 0x40, 0x89, 0x42, 0x35, 0x8f, 0x80, 0x98, 0xc9, 0xb4, 0xff, 0xce, 0x1c, 0xa2, + 0x85, 0xe0, 0xe4, 0x0c, 0x06, 0x78, 0x5c, 0x2a, 0xd5, 0x07, 0x3b, 0xa9, 0x5e, 0x3d, 0x0b, 0xc1, + 0x5d, 0x55, 0x7a, 0x6c, 0x12, 0x01, 0x1c, 0x0f, 0xc0, 0x47, 0x00, 0xf8, 0x75, 0xad, 0xe2, 0x59, + 0xdc, 0x03, 0x77, 0x2e, 0xeb, 0xa2, 0xd9, 0x18, 0x85, 0xdd, 0xfa, 0x1f, 0x3b, 0xc1, 0xb6, 0x3b, + 0xc1, 0xbe, 0x76, 0x82, 0xbd, 0xed, 0x85, 0xb5, 0xdd, 0x0b, 0xeb, 0x73, 0x2f, 0xac, 0xb4, 0x47, + 0xff, 0xf2, 0xea, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x91, 0x61, 0x65, 0x9c, 0xfd, 0x01, 0x00, 0x00, } func (m *Axis) Marshal() (dAtA []byte, err error) { @@ -318,6 +381,57 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Data) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Data) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DataProof != nil { + { + size, err := m.DataProof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.DataShares) > 0 { + for iNdEx := len(m.DataShares) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DataShares[iNdEx]) + copy(dAtA[i:], m.DataShares[iNdEx]) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DataShares[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.DataId) > 0 { + i -= len(m.DataId) + copy(dAtA[i:], m.DataId) + i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DataId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintIpldv2Pb(dAtA []byte, offset int, v uint64) int { offset -= sovIpldv2Pb(v) base := offset @@ -372,6 +486,29 @@ func (m *Sample) Size() (n int) { return n } +func (m *Data) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DataId) + if l > 0 { + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + if len(m.DataShares) > 0 { + for _, b := range m.DataShares { + l = len(b) + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + } + if m.DataProof != nil { + l = m.DataProof.Size() + n += 1 + l + sovIpldv2Pb(uint64(l)) + } + return n +} + func sovIpldv2Pb(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -667,6 +804,158 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } return nil } +func (m *Data) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Data: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataId = append(m.DataId[:0], dAtA[iNdEx:postIndex]...) + if m.DataId == nil { + m.DataId = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataShares", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataShares = append(m.DataShares, make([]byte, postIndex-iNdEx)) + copy(m.DataShares[len(m.DataShares)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataProof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIpldv2Pb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIpldv2Pb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIpldv2Pb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DataProof == nil { + m.DataProof = &pb.Proof{} + } + if err := m.DataProof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIpldv2Pb(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIpldv2Pb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipIpldv2Pb(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/share/ipldv2/pb/ipldv2pb.proto b/share/ipldv2/pb/ipldv2pb.proto index 22ce8e3229..e6f19f252a 100644 --- a/share/ipldv2/pb/ipldv2pb.proto +++ b/share/ipldv2/pb/ipldv2pb.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -import "pb/proof.proto"; +import "pb/proof.proto"; // celestiaorg/nmt/pb/proof.proto enum AxisType { Row = 0; @@ -13,8 +13,8 @@ message Axis { } enum SampleType { - Data = 0; - Parity = 1; + DataSample = 0; + ParitySample = 1; } message Sample { @@ -23,3 +23,9 @@ message Sample { bytes sample_share = 3; proof.pb.Proof sample_proof = 4; } + +message Data { + bytes data_id = 1; + repeated bytes data_shares = 2; + proof.pb.Proof data_proof = 3; +} diff --git a/share/ipldv2/sample.go b/share/ipldv2/sample.go index 8c1449a21d..c7d05b23d2 100644 --- a/share/ipldv2/sample.go +++ b/share/ipldv2/sample.go @@ -27,14 +27,14 @@ const ( // Sample represents a sample of an NMT in EDS. type Sample struct { - // ID of the Sample - ID SampleID + SampleID + // Type of the Sample Type SampleType - // Proof of Share inclusion in the NMT - Proof nmt.Proof - // Share being sampled - Share share.Share + // SampleProof of SampleShare inclusion in the NMT + SampleProof nmt.Proof + // SampleShare is a share being sampled + SampleShare share.Share } // NewSample constructs a new Sample. @@ -45,10 +45,10 @@ func NewSample(id SampleID, shr share.Share, proof nmt.Proof, sqrLn int) *Sample } return &Sample{ - ID: id, - Type: tp, - Proof: proof, - Share: shr, + SampleID: id, + Type: tp, + SampleProof: proof, + SampleShare: shr, } } @@ -113,7 +113,7 @@ func SampleFromBlock(blk blocks.Block) (*Sample, error) { // IPLDBlock converts Sample to an IPLD block for Bitswap compatibility. func (s *Sample) IPLDBlock() (blocks.Block, error) { - cid, err := s.ID.Cid() + cid, err := s.SampleID.Cid() if err != nil { return nil, err } @@ -128,23 +128,23 @@ func (s *Sample) IPLDBlock() (blocks.Block, error) { // MarshalBinary marshals Sample to binary. func (s *Sample) MarshalBinary() ([]byte, error) { - id, err := s.ID.MarshalBinary() + id, err := s.SampleID.MarshalBinary() if err != nil { return nil, err } proof := &nmtpb.Proof{} - proof.Nodes = s.Proof.Nodes() - proof.End = int64(s.Proof.End()) - proof.Start = int64(s.Proof.Start()) - proof.IsMaxNamespaceIgnored = s.Proof.IsMaxNamespaceIDIgnored() - proof.LeafHash = s.Proof.LeafHash() + proof.Nodes = s.SampleProof.Nodes() + proof.End = int64(s.SampleProof.End()) + proof.Start = int64(s.SampleProof.Start()) + proof.IsMaxNamespaceIgnored = s.SampleProof.IsMaxNamespaceIDIgnored() + proof.LeafHash = s.SampleProof.LeafHash() return (&ipldv2pb.Sample{ SampleId: id, SampleType: ipldv2pb.SampleType(s.Type), SampleProof: proof, - SampleShare: s.Share, + SampleShare: s.SampleShare, }).Marshal() } @@ -155,35 +155,35 @@ func (s *Sample) UnmarshalBinary(data []byte) error { return err } - err := s.ID.UnmarshalBinary(proto.SampleId) + err := s.SampleID.UnmarshalBinary(proto.SampleId) if err != nil { return err } s.Type = SampleType(proto.SampleType) - s.Proof = nmt.ProtoToProof(*proto.SampleProof) - s.Share = proto.SampleShare + s.SampleProof = nmt.ProtoToProof(*proto.SampleProof) + s.SampleShare = proto.SampleShare return nil } -// Validate validates Sample's fields and proof of Share inclusion in the NMT. +// Validate validates Sample's fields and proof of SampleShare inclusion in the NMT. func (s *Sample) Validate() error { - if err := s.ID.Validate(); err != nil { + if err := s.SampleID.Validate(); err != nil { return err } if s.Type != DataSample && s.Type != ParitySample { - return fmt.Errorf("incorrect sample type: %d", s.Type) + return fmt.Errorf("invalid SampleType: %d", s.Type) } namespace := share.ParitySharesNamespace if s.Type == DataSample { - namespace = share.GetNamespace(s.Share) + namespace = share.GetNamespace(s.SampleShare) } - s.Proof.WithHashedProof(hasher()) - if !s.Proof.VerifyInclusion(hasher(), namespace.ToNMT(), [][]byte{s.Share}, s.ID.AxisHash) { - return errors.New("sample proof is invalid") + s.SampleProof.WithHashedProof(hasher()) + if !s.SampleProof.VerifyInclusion(hasher(), namespace.ToNMT(), [][]byte{s.SampleShare}, s.AxisHash) { + return errors.New("invalid ") } return nil diff --git a/share/ipldv2/sample_hasher.go b/share/ipldv2/sample_hasher.go index a9dc4d60e0..ddccbcbb26 100644 --- a/share/ipldv2/sample_hasher.go +++ b/share/ipldv2/sample_hasher.go @@ -29,7 +29,7 @@ func (sh *SampleHasher) Write(data []byte) (int, error) { // Sum returns the "multihash" of the SampleID. func (sh *SampleHasher) Sum([]byte) []byte { - sum, err := sh.sample.ID.MarshalBinary() + sum, err := sh.sample.SampleID.MarshalBinary() if err != nil { err = fmt.Errorf("while marshaling SampleID: %w", err) log.Error(err) diff --git a/share/ipldv2/sample_hasher_test.go b/share/ipldv2/sample_hasher_test.go index 31989dd4aa..00223049e8 100644 --- a/share/ipldv2/sample_hasher_test.go +++ b/share/ipldv2/sample_hasher_test.go @@ -30,7 +30,7 @@ func TestSampleHasher(t *testing.T) { assert.EqualValues(t, len(data), n) digest := hasher.Sum(nil) - sid, err := sample.ID.MarshalBinary() + sid, err := sample.SampleID.MarshalBinary() require.NoError(t, err) assert.EqualValues(t, sid, digest) diff --git a/share/ipldv2/sample_id.go b/share/ipldv2/sample_id.go index e8111e520b..8daa815c72 100644 --- a/share/ipldv2/sample_id.go +++ b/share/ipldv2/sample_id.go @@ -85,7 +85,7 @@ func (s *SampleID) MarshalBinary() ([]byte, error) { // UnmarshalBinary decodes SampleID from binary form. func (s *SampleID) UnmarshalBinary(data []byte) error { if len(data) != SampleIDSize { - return fmt.Errorf("invalid data lengt: %d != %d", len(data), SampleIDSize) + return fmt.Errorf("invalid data length: %d != %d", len(data), SampleIDSize) } n, err := s.AxisID.UnmarshalFrom(data) if err != nil { diff --git a/share/ipldv2/sample_test.go b/share/ipldv2/sample_test.go index 3dd60b0482..1bf28e76a8 100644 --- a/share/ipldv2/sample_test.go +++ b/share/ipldv2/sample_test.go @@ -14,24 +14,24 @@ import ( func TestSample(t *testing.T) { square := edstest.RandEDS(t, 2) - sid, err := NewSampleFromEDS(rsmt2d.Row, 2, square, 1) + sample, err := NewSampleFromEDS(rsmt2d.Row, 2, square, 1) require.NoError(t, err) - data, err := sid.MarshalBinary() + data, err := sample.MarshalBinary() require.NoError(t, err) - blk, err := sid.IPLDBlock() + blk, err := sample.IPLDBlock() require.NoError(t, err) - cid, err := sid.ID.Cid() + cid, err := sample.SampleID.Cid() require.NoError(t, err) assert.EqualValues(t, blk.Cid(), cid) - sidOut := &Sample{} - err = sidOut.UnmarshalBinary(data) + sampleOut := &Sample{} + err = sampleOut.UnmarshalBinary(data) require.NoError(t, err) - assert.EqualValues(t, sid, sidOut) + assert.EqualValues(t, sample, sampleOut) - err = sidOut.Validate() + err = sampleOut.Validate() require.NoError(t, err) } From 8a66fd56ec0b3f5975c03d8d5c9bcb28883efd83 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sat, 2 Dec 2023 02:44:04 +0100 Subject: [PATCH 022/132] namespace mh but finished and tested --- share/eds/file.go | 43 +++++++++++++++++++- share/eds/ods_file.go | 9 +++++ share/ipldv2/axis.go | 14 +++---- share/ipldv2/axis_id.go | 8 ++-- share/ipldv2/axis_test.go | 18 ++++----- share/ipldv2/blockstore.go | 12 +++--- share/ipldv2/data.go | 76 +++++++++++++++++++++++------------- share/ipldv2/data_id.go | 8 ++-- share/ipldv2/data_id_test.go | 2 +- share/ipldv2/data_test.go | 38 ++++++++++++++++++ share/ipldv2/ipldv2.go | 2 +- share/ipldv2/ipldv2_test.go | 75 +++++++++++++++++++++++++++++++++-- share/ipldv2/sample.go | 10 ++--- share/ipldv2/sample_test.go | 2 +- 14 files changed, 245 insertions(+), 72 deletions(-) create mode 100644 share/ipldv2/data_test.go diff --git a/share/eds/file.go b/share/eds/file.go index b2a6331339..9d3a5f2b31 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -1,6 +1,7 @@ package eds import ( + "context" "fmt" "io" "os" @@ -12,6 +13,7 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/ipld" ) type File interface { @@ -20,7 +22,7 @@ type File interface { ShareWithProof(axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) - // Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) + Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) EDS() (*rsmt2d.ExtendedDataSquare, error) } @@ -204,6 +206,15 @@ func (f *LazyFile) ShareWithProof(axisType rsmt2d.Axis, axisIdx, shrIdx int) (sh return shrs[shrIdx], proof, nil } +func (f *LazyFile) Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) { + shrs, err := f.Axis(rsmt2d.Row, axisIdx) + if err != nil { + return nil, nmt.Proof{}, err + } + + return NDFromShares(shrs, namespace, axisIdx) +} + func (f *LazyFile) EDS() (*rsmt2d.ExtendedDataSquare, error) { shrLn := int(f.hdr.shareSize) sqrLn := int(f.hdr.squareSize) @@ -236,3 +247,33 @@ func (f *LazyFile) EDS() (*rsmt2d.ExtendedDataSquare, error) { return nil, fmt.Errorf("invalid mode type") // TODO(@Wondertan): Do fields validation right after read } } + +func NDFromShares(shrs []share.Share, namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) { + bserv := ipld.NewMemBlockservice() + batchAdder := ipld.NewNmtNodeAdder(context.TODO(), bserv, ipld.MaxSizeBatchOption(len(shrs))) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(shrs)/2), uint(axisIdx), + nmt.NodeVisitor(batchAdder.Visit)) + for _, shr := range shrs { + err := tree.Push(shr) + if err != nil { + return nil, nmt.Proof{}, err + } + } + + root, err := tree.Root() + if err != nil { + return nil, nmt.Proof{}, err + } + + err = batchAdder.Commit() + if err != nil { + return nil, nmt.Proof{}, err + } + + cid := ipld.MustCidFromNamespacedSha256(root) + row, proof, err := ipld.GetSharesByNamespace(context.TODO(), bserv, cid, namespace, len(shrs)) + if err != nil { + return nil, nmt.Proof{}, err + } + return row, *proof, nil +} diff --git a/share/eds/ods_file.go b/share/eds/ods_file.go index aaaa04c22f..646044a218 100644 --- a/share/eds/ods_file.go +++ b/share/eds/ods_file.go @@ -52,6 +52,15 @@ func (f *MemFile) AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, er return getAxis(axisType, axisIdx, f.Eds)[:f.Size()/2], nil } +func (f *MemFile) Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) { + shrs, err := f.Axis(rsmt2d.Row, axisIdx) + if err != nil { + return nil, nmt.Proof{}, err + } + + return NDFromShares(shrs, namespace, axisIdx) +} + func (f *MemFile) EDS() (*rsmt2d.ExtendedDataSquare, error) { return f.Eds, nil } diff --git a/share/ipldv2/axis.go b/share/ipldv2/axis.go index 03c6354e9d..11b6adaa1a 100644 --- a/share/ipldv2/axis.go +++ b/share/ipldv2/axis.go @@ -32,29 +32,29 @@ func NewAxis(id AxisID, axisHalf []share.Share) *Axis { // NewAxisFromEDS samples the EDS and constructs a new Axis. func NewAxisFromEDS( axisType rsmt2d.Axis, - idx int, - eds *rsmt2d.ExtendedDataSquare, + axisIdx int, + square *rsmt2d.ExtendedDataSquare, height uint64, ) (*Axis, error) { - sqrLn := int(eds.Width()) + sqrLn := int(square.Width()) // TODO(@Wondertan): Should be an rsmt2d method var axisHalf [][]byte switch axisType { case rsmt2d.Row: - axisHalf = eds.Row(uint(idx))[:sqrLn/2] + axisHalf = square.Row(uint(axisIdx))[:sqrLn/2] case rsmt2d.Col: - axisHalf = eds.Col(uint(idx))[:sqrLn/2] + axisHalf = square.Col(uint(axisIdx))[:sqrLn/2] default: panic("invalid axis") } - root, err := share.NewRoot(eds) + root, err := share.NewRoot(square) if err != nil { return nil, fmt.Errorf("while computing root: %w", err) } - id := NewAxisID(axisType, uint16(idx), root, height) + id := NewAxisID(axisType, uint16(axisIdx), root, height) return NewAxis(id, axisHalf), nil } diff --git a/share/ipldv2/axis_id.go b/share/ipldv2/axis_id.go index e4222a6882..3f45ccbdeb 100644 --- a/share/ipldv2/axis_id.go +++ b/share/ipldv2/axis_id.go @@ -29,16 +29,16 @@ type AxisID struct { } // NewAxisID constructs a new AxisID. -func NewAxisID(axisType rsmt2d.Axis, idx uint16, root *share.Root, height uint64) AxisID { - dahroot := root.RowRoots[idx] +func NewAxisID(axisType rsmt2d.Axis, axisIdx uint16, root *share.Root, height uint64) AxisID { + dahroot := root.RowRoots[axisIdx] if axisType == rsmt2d.Col { - dahroot = root.ColumnRoots[idx] + dahroot = root.ColumnRoots[axisIdx] } axisHash := hashBytes(dahroot) return AxisID{ AxisType: axisType, - AxisIndex: idx, + AxisIndex: axisIdx, AxisHash: axisHash, Height: height, } diff --git a/share/ipldv2/axis_test.go b/share/ipldv2/axis_test.go index 81a13b6ff0..08626bf070 100644 --- a/share/ipldv2/axis_test.go +++ b/share/ipldv2/axis_test.go @@ -12,26 +12,26 @@ import ( ) func TestAxis(t *testing.T) { - square := edstest.RandEDS(t, 2) + square := edstest.RandEDS(t, 8) - aid, err := NewAxisFromEDS(rsmt2d.Row, 1, square, 2) + axis, err := NewAxisFromEDS(rsmt2d.Row, 1, square, 2) require.NoError(t, err) - data, err := aid.MarshalBinary() + data, err := axis.MarshalBinary() require.NoError(t, err) - blk, err := aid.IPLDBlock() + blk, err := axis.IPLDBlock() require.NoError(t, err) - cid, err := aid.AxisID.Cid() + cid, err := axis.AxisID.Cid() require.NoError(t, err) assert.EqualValues(t, blk.Cid(), cid) - sidOut := &Axis{} - err = sidOut.UnmarshalBinary(data) + axisOut := &Axis{} + err = axisOut.UnmarshalBinary(data) require.NoError(t, err) - assert.EqualValues(t, aid, sidOut) + assert.EqualValues(t, axis, axisOut) - err = sidOut.Validate() + err = axisOut.Validate() require.NoError(t, err) } diff --git a/share/ipldv2/blockstore.go b/share/ipldv2/blockstore.go index fcea737469..7ff00e81dd 100644 --- a/share/ipldv2/blockstore.go +++ b/share/ipldv2/blockstore.go @@ -8,8 +8,6 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/celestia-node/share/eds" ) @@ -135,12 +133,12 @@ func (b Blockstore[F]) getDataBlock(id DataID) (blocks.Block, error) { return nil, fmt.Errorf("while getting ODS file from FS: %w", err) } - // data, prf, err := f.Data(id.DataNamespace, int(id.AxisIndex)) - // if err != nil { - // return nil, fmt.Errorf("while getting Data: %w", err) - // } + data, prf, err := f.Data(id.DataNamespace, int(id.AxisIndex)) + if err != nil { + return nil, fmt.Errorf("while getting Data: %w", err) + } - s := NewData(id, nil, nmt.Proof{}) + s := NewData(id, data, prf) blk, err := s.IPLDBlock() if err != nil { return nil, fmt.Errorf("while coverting Data to IPLD block: %w", err) diff --git a/share/ipldv2/data.go b/share/ipldv2/data.go index 6472d4df44..3e26b30a31 100644 --- a/share/ipldv2/data.go +++ b/share/ipldv2/data.go @@ -6,8 +6,11 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/celestiaorg/nmt" + nmtpb "github.com/celestiaorg/nmt/pb" + "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" ) @@ -27,33 +30,36 @@ func NewData(id DataID, shares []share.Share, proof nmt.Proof) *Data { } } -// // NewDataFromEDS samples the EDS and constructs a new Data. -// func NewDataFromEDS( -// idx int, -// eds *rsmt2d.ExtendedDataSquare, -// height uint64, -// ) (*Data, error) { -// sqrLn := int(eds.Width()) -// -// // TODO(@Wondertan): Should be an rsmt2d method -// var axisHalf [][]byte -// switch axisType { -// case rsmt2d.Row: -// axisHalf = eds.Row(uint(idx))[:sqrLn/2] -// case rsmt2d.Col: -// axisHalf = eds.Col(uint(idx))[:sqrLn/2] -// default: -// panic("invalid axis") -// } -// -// root, err := share.NewRoot(eds) -// if err != nil { -// return nil, fmt.Errorf("while computing root: %w", err) -// } -// -// id := NewDataID(axisType, uint16(idx), root, height) -// return NewData(id, axisHalf), nil -// } +// NewDataFromEDS samples the EDS and constructs Data for each row with the given namespace. +func NewDataFromEDS( + square *rsmt2d.ExtendedDataSquare, + height uint64, + namespace share.Namespace, +) ([]*Data, error) { + root, err := share.NewRoot(square) + if err != nil { + return nil, fmt.Errorf("while computing root: %w", err) + } + + var datas []*Data + for rowIdx, rowRoot := range root.RowRoots { + if namespace.IsOutsideRange(rowRoot, rowRoot) { + continue + } + + shrs := square.Row(uint(rowIdx)) + // TDOD(@Wondertan): This will likely be removed + nd, proof, err := eds.NDFromShares(shrs, namespace, rowIdx) + if err != nil { + return nil, err + } + + id := NewDataID(rowIdx, root, height, namespace) + datas = append(datas, NewData(id, nd, proof)) + } + + return datas, nil +} // DataFromBlock converts blocks.Block into Data. func DataFromBlock(blk blocks.Block) (*Data, error) { @@ -92,8 +98,16 @@ func (s *Data) MarshalBinary() ([]byte, error) { return nil, err } + proof := &nmtpb.Proof{} + proof.Nodes = s.DataProof.Nodes() + proof.End = int64(s.DataProof.End()) + proof.Start = int64(s.DataProof.Start()) + proof.IsMaxNamespaceIgnored = s.DataProof.IsMaxNamespaceIDIgnored() + proof.LeafHash = s.DataProof.LeafHash() + return (&ipldv2pb.Data{ DataId: id, + DataProof: proof, DataShares: s.DataShares, }).Marshal() } @@ -110,6 +124,7 @@ func (s *Data) UnmarshalBinary(data []byte) error { return err } + s.DataProof = nmt.ProtoToProof(*proto.DataProof) s.DataShares = proto.DataShares return nil } @@ -124,8 +139,13 @@ func (s *Data) Validate() error { return fmt.Errorf("empty DataShares") } + shrs := make([][]byte, 0, len(s.DataShares)) + for _, shr := range s.DataShares { + shrs = append(shrs, append(share.GetNamespace(shr), shr...)) + } + s.DataProof.WithHashedProof(hasher()) - if !s.DataProof.VerifyNamespace(hasher(), s.DataNamespace.ToNMT(), s.DataShares, s.AxisHash) { + if !s.DataProof.VerifyNamespace(hasher(), s.DataNamespace.ToNMT(), shrs, s.AxisHash) { return fmt.Errorf("invalid DataProof") } diff --git a/share/ipldv2/data_id.go b/share/ipldv2/data_id.go index bd6028c60a..fa036312bb 100644 --- a/share/ipldv2/data_id.go +++ b/share/ipldv2/data_id.go @@ -25,7 +25,7 @@ type DataID struct { } // NewDataID constructs a new DataID. -func NewDataID(namespace share.Namespace, axisIdx int, root *share.Root, height uint64) DataID { +func NewDataID(axisIdx int, root *share.Root, height uint64, namespace share.Namespace) DataID { return DataID{ AxisID: NewAxisID(rsmt2d.Row, uint16(axisIdx), root, height), DataNamespace: namespace, @@ -48,18 +48,18 @@ func DataIDFromCID(cid cid.Cid) (id DataID, err error) { // Cid returns sample ID encoded as CID. func (s *DataID) Cid() (cid.Cid, error) { - // avoid using proto serialization for CID as it's not deterministicnot lambo though + // avoid using proto serialization for CID as it's not deterministic data, err := s.MarshalBinary() if err != nil { return cid.Undef, err } - buf, err := mh.Encode(data, sampleMultihashCode) + buf, err := mh.Encode(data, dataMultihashCode) if err != nil { return cid.Undef, err } - return cid.NewCidV1(sampleCodec, buf), nil + return cid.NewCidV1(dataCodec, buf), nil } // MarshalBinary encodes DataID into binary form. diff --git a/share/ipldv2/data_id_test.go b/share/ipldv2/data_id_test.go index 89018a45fe..f07bb3f2b3 100644 --- a/share/ipldv2/data_id_test.go +++ b/share/ipldv2/data_id_test.go @@ -17,7 +17,7 @@ func TestDataID(t *testing.T) { root, err := share.NewRoot(square) require.NoError(t, err) - sid := NewDataID(sharetest.RandV0Namespace(), 2, root, 1) + sid := NewDataID(2, root, 1, sharetest.RandV0Namespace()) id, err := sid.Cid() require.NoError(t, err) diff --git a/share/ipldv2/data_test.go b/share/ipldv2/data_test.go new file mode 100644 index 0000000000..3eab90264d --- /dev/null +++ b/share/ipldv2/data_test.go @@ -0,0 +1,38 @@ +package ipldv2 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestData(t *testing.T) { + namespace := sharetest.RandV0Namespace() + square, _ := edstest.RandEDSWithNamespace(t, namespace, 8) + + nds, err := NewDataFromEDS(square, 1, namespace) + require.NoError(t, err) + nd := nds[0] + + data, err := nd.MarshalBinary() + require.NoError(t, err) + + blk, err := nd.IPLDBlock() + require.NoError(t, err) + + cid, err := nd.DataID.Cid() + require.NoError(t, err) + assert.EqualValues(t, blk.Cid(), cid) + + ndOut := &Data{} + err = ndOut.UnmarshalBinary(data) + require.NoError(t, err) + assert.EqualValues(t, nd, ndOut) + + err = ndOut.Validate() + require.NoError(t, err) +} diff --git a/share/ipldv2/ipldv2.go b/share/ipldv2/ipldv2.go index ad9a285297..c38de89548 100644 --- a/share/ipldv2/ipldv2.go +++ b/share/ipldv2/ipldv2.go @@ -51,7 +51,7 @@ func init() { return &AxisHasher{} }) mh.Register(dataMultihashCode, func() hash.Hash { - return &AxisHasher{} + return &DataHasher{} }) } diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index ed483cdbc9..6ba3961039 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -21,6 +21,7 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" ) var axisTypes = []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} @@ -38,7 +39,7 @@ func TestSampleRoundtripGetBlock(t *testing.T) { width := int(sqr.Width()) for _, axisType := range axisTypes { for i := 0; i < width*width; i++ { - smpl, err := NewSampleFromEDS(axisType, i, sqr, 1) + smpl, err := NewSampleFromEDS(axisType, i, sqr, 16) require.NoError(t, err) cid, err := smpl.SampleID.Cid() @@ -58,10 +59,10 @@ func TestSampleRoundtripGetBlock(t *testing.T) { } func TestSampleRoundtripGetBlocks(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*100) defer cancel() - sqr := edstest.RandEDS(t, 8) // TODO(@Wondertan): does not work with more than 8 for some reasong + sqr := edstest.RandEDS(t, 8) b := edsBlockstore(sqr) client := remoteClient(ctx, t, b) @@ -102,7 +103,7 @@ func TestAxisRoundtripGetBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10000) defer cancel() - sqr := edstest.RandEDS(t, 8) + sqr := edstest.RandEDS(t, 16) b := edsBlockstore(sqr) client := remoteClient(ctx, t, b) @@ -169,6 +170,72 @@ func TestAxisRoundtripGetBlocks(t *testing.T) { assert.NoError(t, err) } +func TestDataRoundtripGetBlock(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + namespace := sharetest.RandV0Namespace() + sqr, _ := edstest.RandEDSWithNamespace(t, namespace, 16) + b := edsBlockstore(sqr) + client := remoteClient(ctx, t, b) + + nds, err := NewDataFromEDS(sqr, 1, namespace) + require.NoError(t, err) + + for _, nd := range nds { + cid, err := nd.DataID.Cid() + require.NoError(t, err) + + blkOut, err := client.GetBlock(ctx, cid) + require.NoError(t, err) + assert.EqualValues(t, cid, blkOut.Cid()) + + ndOut, err := DataFromBlock(blkOut) + assert.NoError(t, err) + + err = ndOut.Validate() // bitswap already performed validation and this is only for testing + assert.NoError(t, err) + } +} + +func TestDataRoundtripGetBlocks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + namespace := sharetest.RandV0Namespace() + sqr, _ := edstest.RandEDSWithNamespace(t, namespace, 16) + b := edsBlockstore(sqr) + client := remoteClient(ctx, t, b) + + nds, err := NewDataFromEDS(sqr, 1, namespace) + require.NoError(t, err) + + set := cid.NewSet() + for _, nd := range nds { + cid, err := nd.DataID.Cid() + require.NoError(t, err) + set.Add(cid) + } + + blks := client.GetBlocks(ctx, set.Keys()) + err = set.ForEach(func(c cid.Cid) error { + select { + case blk := <-blks: + assert.True(t, set.Has(blk.Cid())) + + smpl, err := DataFromBlock(blk) + assert.NoError(t, err) + + err = smpl.Validate() // bitswap already performed validation and this is only for testing + assert.NoError(t, err) + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + assert.NoError(t, err) +} + func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstore) blockservice.BlockService { net, err := mocknet.FullMeshLinked(2) require.NoError(t, err) diff --git a/share/ipldv2/sample.go b/share/ipldv2/sample.go index c7d05b23d2..5667f45993 100644 --- a/share/ipldv2/sample.go +++ b/share/ipldv2/sample.go @@ -56,25 +56,25 @@ func NewSample(id SampleID, shr share.Share, proof nmt.Proof, sqrLn int) *Sample func NewSampleFromEDS( axisType rsmt2d.Axis, idx int, - eds *rsmt2d.ExtendedDataSquare, + square *rsmt2d.ExtendedDataSquare, height uint64, ) (*Sample, error) { - sqrLn := int(eds.Width()) + sqrLn := int(square.Width()) axisIdx, shrIdx := idx/sqrLn, idx%sqrLn // TODO(@Wondertan): Should be an rsmt2d method var shrs [][]byte switch axisType { case rsmt2d.Row: - shrs = eds.Row(uint(axisIdx)) + shrs = square.Row(uint(axisIdx)) case rsmt2d.Col: axisIdx, shrIdx = shrIdx, axisIdx - shrs = eds.Col(uint(axisIdx)) + shrs = square.Col(uint(axisIdx)) default: panic("invalid axis") } - root, err := share.NewRoot(eds) + root, err := share.NewRoot(square) if err != nil { return nil, fmt.Errorf("while computing root: %w", err) } diff --git a/share/ipldv2/sample_test.go b/share/ipldv2/sample_test.go index 1bf28e76a8..676bd952f1 100644 --- a/share/ipldv2/sample_test.go +++ b/share/ipldv2/sample_test.go @@ -12,7 +12,7 @@ import ( ) func TestSample(t *testing.T) { - square := edstest.RandEDS(t, 2) + square := edstest.RandEDS(t, 8) sample, err := NewSampleFromEDS(rsmt2d.Row, 2, square, 1) require.NoError(t, err) From bbcd956c0b95682a20712530787907c9accf6d46 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sat, 2 Dec 2023 02:47:48 +0100 Subject: [PATCH 023/132] lol --- share/ipldv2/ipldv2_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index 6ba3961039..b6ff323757 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -39,7 +39,7 @@ func TestSampleRoundtripGetBlock(t *testing.T) { width := int(sqr.Width()) for _, axisType := range axisTypes { for i := 0; i < width*width; i++ { - smpl, err := NewSampleFromEDS(axisType, i, sqr, 16) + smpl, err := NewSampleFromEDS(axisType, i, sqr, 1) require.NoError(t, err) cid, err := smpl.SampleID.Cid() From 583481b6efa5de313625248912dcd7c8b2f18bbb Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sun, 3 Dec 2023 13:14:05 +0100 Subject: [PATCH 024/132] pass by value and cid must constructors --- go.sum | 7 ------- share/ipldv2/axis_id.go | 8 ++++---- share/ipldv2/data.go | 2 +- share/ipldv2/data_id.go | 14 ++++++++++---- share/ipldv2/ipldv2.go | 32 ++++++++++++++++++++++++++++++++ share/ipldv2/sample_id.go | 20 ++++++++++++++------ 6 files changed, 61 insertions(+), 22 deletions(-) diff --git a/go.sum b/go.sum index cb340549cc..9094e56efb 100644 --- a/go.sum +++ b/go.sum @@ -378,15 +378,8 @@ github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= -<<<<<<< HEAD -github.com/celestiaorg/nmt v0.20.0 h1:9i7ultZ8Wv5ytt8ZRaxKQ5KOOMo4A2K2T/aPGjIlSas= -github.com/celestiaorg/nmt v0.20.0/go.mod h1:Oz15Ub6YPez9uJV0heoU4WpFctxazuIhKyUtaYNio7E= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2 h1:Q8nr5SAtDW5gocrBwqwDJcSS/JedqU58WwQA2SP+nXw= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2/go.mod h1:s/LzLUw0WeYPJ6qdk4q46jKLOq7rc9Z5Mdrxtfpcigw= -======= -github.com/celestiaorg/quantum-gravity-bridge v1.3.0 h1:9zPIp7w1FWfkPnn16y3S4FpFLnQtS7rm81CUVcHEts0= -github.com/celestiaorg/quantum-gravity-bridge v1.3.0/go.mod h1:6WOajINTDEUXpSj5UZzod16UZ96ZVB/rFNKyM+Mt1gI= ->>>>>>> ee829610 (request size optimization for share sample) github.com/celestiaorg/rsmt2d v0.11.0 h1:lcto/637WyTEZR3dLRoNvyuExfnUbxvdvKi3qz/2V4k= github.com/celestiaorg/rsmt2d v0.11.0/go.mod h1:6Y580I3gVr0+OVFfW6m2JTwnCCmvW3WfbwSLfuT+HCA= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= diff --git a/share/ipldv2/axis_id.go b/share/ipldv2/axis_id.go index 3f45ccbdeb..400b40ebc3 100644 --- a/share/ipldv2/axis_id.go +++ b/share/ipldv2/axis_id.go @@ -59,7 +59,7 @@ func AxisIDFromCID(cid cid.Cid) (id AxisID, err error) { } // Cid returns sample ID encoded as CID. -func (s *AxisID) Cid() (cid.Cid, error) { +func (s AxisID) Cid() (cid.Cid, error) { data, err := s.MarshalBinary() if err != nil { return cid.Undef, err @@ -77,7 +77,7 @@ func (s *AxisID) Cid() (cid.Cid, error) { // NOTE: Proto is avoided because // * Its size is not deterministic which is required for IPLD. // * No support for uint16 -func (s *AxisID) MarshalTo(data []byte) (int, error) { +func (s AxisID) MarshalTo(data []byte) (int, error) { data = append(data, byte(s.AxisType)) data = binary.LittleEndian.AppendUint16(data, s.AxisIndex) data = append(data, s.AxisHash...) @@ -95,7 +95,7 @@ func (s *AxisID) UnmarshalFrom(data []byte) (int, error) { } // MarshalBinary encodes AxisID into binary form. -func (s *AxisID) MarshalBinary() ([]byte, error) { +func (s AxisID) MarshalBinary() ([]byte, error) { data := make([]byte, 0, AxisIDSize) n, err := s.MarshalTo(data) return data[:n], err @@ -111,7 +111,7 @@ func (s *AxisID) UnmarshalBinary(data []byte) error { } // Validate validates fields of AxisID. -func (s *AxisID) Validate() error { +func (s AxisID) Validate() error { if s.Height == 0 { return fmt.Errorf("zero Height") } diff --git a/share/ipldv2/data.go b/share/ipldv2/data.go index 3e26b30a31..53767a06b4 100644 --- a/share/ipldv2/data.go +++ b/share/ipldv2/data.go @@ -41,7 +41,7 @@ func NewDataFromEDS( return nil, fmt.Errorf("while computing root: %w", err) } - var datas []*Data + var datas []*Data //nolint:prealloc// we don't know how many rows with needed namespace there are for rowIdx, rowRoot := range root.RowRoots { if namespace.IsOutsideRange(rowRoot, rowRoot) { continue diff --git a/share/ipldv2/data_id.go b/share/ipldv2/data_id.go index fa036312bb..151ac91deb 100644 --- a/share/ipldv2/data_id.go +++ b/share/ipldv2/data_id.go @@ -26,8 +26,14 @@ type DataID struct { // NewDataID constructs a new DataID. func NewDataID(axisIdx int, root *share.Root, height uint64, namespace share.Namespace) DataID { + axisHash := hashBytes(root.RowRoots[axisIdx]) return DataID{ - AxisID: NewAxisID(rsmt2d.Row, uint16(axisIdx), root, height), + AxisID: AxisID{ + AxisType: rsmt2d.Row, + AxisIndex: uint16(axisIdx), + AxisHash: axisHash, + Height: height, + }, DataNamespace: namespace, } } @@ -47,7 +53,7 @@ func DataIDFromCID(cid cid.Cid) (id DataID, err error) { } // Cid returns sample ID encoded as CID. -func (s *DataID) Cid() (cid.Cid, error) { +func (s DataID) Cid() (cid.Cid, error) { // avoid using proto serialization for CID as it's not deterministic data, err := s.MarshalBinary() if err != nil { @@ -66,7 +72,7 @@ func (s *DataID) Cid() (cid.Cid, error) { // NOTE: Proto is avoided because // * Its size is not deterministic which is required for IPLD. // * No support for uint16 -func (s *DataID) MarshalBinary() ([]byte, error) { +func (s DataID) MarshalBinary() ([]byte, error) { data := make([]byte, 0, DataIDSize+1) n, err := s.AxisID.MarshalTo(data) if err != nil { @@ -91,7 +97,7 @@ func (s *DataID) UnmarshalBinary(data []byte) error { } // Validate validates fields of DataID. -func (s *DataID) Validate() error { +func (s DataID) Validate() error { if err := s.AxisID.Validate(); err != nil { return fmt.Errorf("while validating AxisID: %w", err) } diff --git a/share/ipldv2/ipldv2.go b/share/ipldv2/ipldv2.go index c38de89548..a1310a4f73 100644 --- a/share/ipldv2/ipldv2.go +++ b/share/ipldv2/ipldv2.go @@ -8,8 +8,40 @@ import ( "github.com/ipfs/go-cid" logger "github.com/ipfs/go-log/v2" mh "github.com/multiformats/go-multihash" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" ) +// MustSampleCID constructs a sample CID or panics. +func MustSampleCID(axisIdx int, root *share.Root, height uint64) cid.Cid { + axisTp := rsmt2d.Row // TODO: Randomize axis type + cid, err := NewSampleID(axisTp, axisIdx, root, height).Cid() + if err != nil { + panic("failed to create sample CID") + } + return cid +} + +// MustAxisCID constructs an axis CID or panics. +func MustAxisCID(axisTp rsmt2d.Axis, axisIdx int, root *share.Root, height uint64) cid.Cid { + cid, err := NewAxisID(axisTp, uint16(axisIdx), root, height).Cid() + if err != nil { + panic("failed to create axis CID") + } + return cid +} + +// MustDataCID constructs a data CID or panics. +func MustDataCID(axisIdx int, root *share.Root, height uint64, namespace share.Namespace) cid.Cid { + cid, err := NewDataID(axisIdx, root, height, namespace).Cid() + if err != nil { + panic("failed to create data CID") + } + return cid +} + var log = logger.Logger("ipldv2") const ( diff --git a/share/ipldv2/sample_id.go b/share/ipldv2/sample_id.go index 8daa815c72..c8841b4072 100644 --- a/share/ipldv2/sample_id.go +++ b/share/ipldv2/sample_id.go @@ -26,13 +26,21 @@ type SampleID struct { // NewSampleID constructs a new SampleID. func NewSampleID(axisType rsmt2d.Axis, idx int, root *share.Root, height uint64) SampleID { sqrLn := len(root.RowRoots) - axsIdx, shrIdx := idx/sqrLn, idx%sqrLn + axisIdx, shrIdx := idx/sqrLn, idx%sqrLn + dahroot := root.RowRoots[axisIdx] if axisType == rsmt2d.Col { - axsIdx, shrIdx = shrIdx, axsIdx + axisIdx, shrIdx = shrIdx, axisIdx + dahroot = root.ColumnRoots[axisIdx] } + axisHash := hashBytes(dahroot) return SampleID{ - AxisID: NewAxisID(axisType, uint16(axsIdx), root, height), + AxisID: AxisID{ + AxisType: axisType, + AxisIndex: uint16(axisIdx), + AxisHash: axisHash, + Height: height, + }, ShareIndex: uint16(shrIdx), } } @@ -52,7 +60,7 @@ func SampleIDFromCID(cid cid.Cid) (id SampleID, err error) { } // Cid returns sample ID encoded as CID. -func (s *SampleID) Cid() (cid.Cid, error) { +func (s SampleID) Cid() (cid.Cid, error) { // avoid using proto serialization for CID as it's not deterministic data, err := s.MarshalBinary() if err != nil { @@ -71,7 +79,7 @@ func (s *SampleID) Cid() (cid.Cid, error) { // NOTE: Proto is avoided because // * Its size is not deterministic which is required for IPLD. // * No support for uint16 -func (s *SampleID) MarshalBinary() ([]byte, error) { +func (s SampleID) MarshalBinary() ([]byte, error) { data := make([]byte, 0, SampleIDSize) n, err := s.AxisID.MarshalTo(data) if err != nil { @@ -96,6 +104,6 @@ func (s *SampleID) UnmarshalBinary(data []byte) error { } // Validate validates fields of SampleID. -func (s *SampleID) Validate() error { +func (s SampleID) Validate() error { return s.AxisID.Validate() } From a28cfefc537316952c5c64504755a91b748f9da7 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sun, 3 Dec 2023 13:25:34 +0100 Subject: [PATCH 025/132] fix data id test --- share/ipldv2/data_id_test.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/share/ipldv2/data_id_test.go b/share/ipldv2/data_id_test.go index f07bb3f2b3..a260839c70 100644 --- a/share/ipldv2/data_id_test.go +++ b/share/ipldv2/data_id_test.go @@ -6,23 +6,21 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/sharetest" ) // TODO: Add test that AxisType is not serialized func TestDataID(t *testing.T) { - square := edstest.RandEDS(t, 2) - root, err := share.NewRoot(square) - require.NoError(t, err) + ns := sharetest.RandV0Namespace() + _, root := edstest.RandEDSWithNamespace(t, ns, 4) - sid := NewDataID(2, root, 1, sharetest.RandV0Namespace()) + sid := NewDataID(2, root, 1, ns) id, err := sid.Cid() require.NoError(t, err) - assert.EqualValues(t, sampleCodec, id.Prefix().Codec) - assert.EqualValues(t, sampleMultihashCode, id.Prefix().MhType) + assert.EqualValues(t, dataCodec, id.Prefix().Codec) + assert.EqualValues(t, dataMultihashCode, id.Prefix().MhType) assert.EqualValues(t, DataIDSize, id.Prefix().MhLength) data, err := sid.MarshalBinary() From f6db8f9fe96c88b985fd8d1488a3c326a7dedd83 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sun, 3 Dec 2023 21:17:49 +0100 Subject: [PATCH 026/132] blockservice constructor --- share/ipldv2/ipldv2.go | 8 ++++++++ share/ipldv2/ipldv2_test.go | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/share/ipldv2/ipldv2.go b/share/ipldv2/ipldv2.go index a1310a4f73..1b50803e17 100644 --- a/share/ipldv2/ipldv2.go +++ b/share/ipldv2/ipldv2.go @@ -5,6 +5,9 @@ import ( "fmt" "hash" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" "github.com/ipfs/go-cid" logger "github.com/ipfs/go-log/v2" mh "github.com/multiformats/go-multihash" @@ -42,6 +45,11 @@ func MustDataCID(axisIdx int, root *share.Root, height uint64, namespace share.N return cid } +// NewBlockService creates a new blockservice.BlockService with allowlist supporting the protocol. +func NewBlockService(b blockstore.Blockstore, ex exchange.Interface) blockservice.BlockService { + return blockservice.New(b, ex, blockservice.WithAllowlist(defaultAllowlist)) +} + var log = logger.Logger("ipldv2") const ( diff --git a/share/ipldv2/ipldv2_test.go b/share/ipldv2/ipldv2_test.go index b6ff323757..f199148e81 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/ipldv2/ipldv2_test.go @@ -261,5 +261,5 @@ func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstor err = net.ConnectAllButSelf() require.NoError(t, err) - return blockservice.New(bstoreClient, bitswapClient, blockservice.WithAllowlist(defaultAllowlist)) + return NewBlockService(bstoreClient, bitswapClient) } From 9a8b5ed020d182d19f7342ed4edd742ef0a33a44 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Sun, 3 Dec 2023 22:50:44 +0100 Subject: [PATCH 027/132] implement Getter and tests for it --- share/ipldv2/getter.go | 181 ++++++++++++++++++++++++++++++++++++ share/ipldv2/getter_test.go | 146 +++++++++++++++++++++++++++++ 2 files changed, 327 insertions(+) create mode 100644 share/ipldv2/getter.go create mode 100644 share/ipldv2/getter_test.go diff --git a/share/ipldv2/getter.go b/share/ipldv2/getter.go new file mode 100644 index 0000000000..e9c19318e6 --- /dev/null +++ b/share/ipldv2/getter.go @@ -0,0 +1,181 @@ +package ipldv2 + +import ( + "context" + "fmt" + "slices" + + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/go-cid" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" +) + +type Getter struct { + bget blockservice.BlockGetter +} + +func NewGetter(bget blockservice.BlockGetter) *Getter { + return &Getter{bget: bget} +} + +// GetShare +// TODO: Deprecate this method +func (g *Getter) GetShare(ctx context.Context, hdr *header.ExtendedHeader, row, col int) (share.Share, error) { + shrIdx := row*len(hdr.DAH.RowRoots) + col + cid := MustSampleCID(shrIdx, hdr.DAH, hdr.Height()) + blk, err := g.bget.GetBlock(ctx, cid) + if err != nil { + return nil, fmt.Errorf("getting block from blockservice: %w", err) + } + + smpl, err := SampleFromBlock(blk) + if err != nil { + return nil, fmt.Errorf("converting block to Sample: %w", err) + } + + return smpl.SampleShare, nil +} + +// GetShares +// Guarantee that the returned shares are in the same order as shrIdxs. +func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, shrIdxs ...int) ([]share.Share, error) { + maxIdx := len(hdr.DAH.RowRoots) * len(hdr.DAH.ColumnRoots) + cids := make([]cid.Cid, len(shrIdxs)) + for i, shrIdx := range shrIdxs { + if shrIdx < 0 || shrIdx >= maxIdx { + return nil, fmt.Errorf("share index %d is out of bounds", shrIdx) + } + cids[i] = MustSampleCID(shrIdx, hdr.DAH, hdr.Height()) + } + + smpls := make(map[int]*Sample, len(shrIdxs)) + blkCh := g.bget.GetBlocks(ctx, cids) + for blk := range blkCh { // NOTE: GetBlocks handles ctx, so we don't have to + smpl, err := SampleFromBlock(blk) + if err != nil { + // NOTE: Should never error in fact, as Hasher already validated the block + return nil, fmt.Errorf("converting block to Sample: %w", err) + } + + shrIdx := int(smpl.SampleID.AxisIndex)*len(hdr.DAH.RowRoots) + int(smpl.SampleID.ShareIndex) + smpls[shrIdx] = smpl + } + + if len(smpls) != len(shrIdxs) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + return nil, fmt.Errorf("not all shares were found") + } + + shrs := make([]share.Share, len(shrIdxs)) + for i, shrIdx := range shrIdxs { + shrs[i] = smpls[shrIdx].SampleShare + } + + return shrs, nil +} + +// GetEDS +// TODO(@Wondertan): Consider requesting randomized rows and cols instead of ODS only +func (g *Getter) GetEDS(ctx context.Context, hdr *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + sqrLn := len(hdr.DAH.RowRoots) + cids := make([]cid.Cid, sqrLn/2) + for i := 0; i < sqrLn/2; i++ { + cids[i] = MustAxisCID(rsmt2d.Row, i, hdr.DAH, hdr.Height()) + } + + square, err := rsmt2d.NewExtendedDataSquare( + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(sqrLn/2)), uint(sqrLn), + share.Size, + ) + if err != nil { + return nil, err + } + + blkCh := g.bget.GetBlocks(ctx, cids) + for blk := range blkCh { // NOTE: GetBlocks handles ctx, so we don't have to + axis, err := AxisFromBlock(blk) + if err != nil { + // NOTE: Should never error in fact, as Hasher already validated the block + return nil, fmt.Errorf("converting block to Axis: %w", err) + } + + for shrIdx, shr := range axis.AxisShares { + err = square.SetCell(uint(axis.AxisIndex), uint(shrIdx), shr) + if err != nil { + panic(err) // this should never happen and if it is... something is really wrong + } + } + } + + // TODO(@Wondertan): Figure out a way to avoid recompute of what has been already computed + // during verification in AxisHasher + err = square.Repair(hdr.DAH.RowRoots, hdr.DAH.ColumnRoots) + if err != nil { + if ctx.Err() != nil { + return nil, ctx.Err() + } + return nil, fmt.Errorf("repairing EDS: %w", err) + } + + return square, nil +} + +func (g *Getter) GetSharesByNamespace( + ctx context.Context, + hdr *header.ExtendedHeader, + ns share.Namespace, +) (share.NamespacedShares, error) { + if err := ns.ValidateForData(); err != nil { + return nil, err + } + + var cids []cid.Cid //nolint:prealloc// we don't know how many rows with needed namespace there are + for rowIdx, rowRoot := range hdr.DAH.RowRoots { + if ns.IsOutsideRange(rowRoot, rowRoot) { + continue + } + + cids = append(cids, MustDataCID(rowIdx, hdr.DAH, hdr.Height(), ns)) + } + if len(cids) == 0 { + return share.NamespacedShares{}, nil + } + + datas := make([]*Data, 0, len(cids)) + blkCh := g.bget.GetBlocks(ctx, cids) + for blk := range blkCh { // NOTE: GetBlocks handles ctx, so we don't have to + data, err := DataFromBlock(blk) + if err != nil { + // NOTE: Should never error in fact, as Hasher already validated the block + return nil, fmt.Errorf("converting block to Data: %w", err) + } + + datas = append(datas, data) + } + + slices.SortFunc(datas, func(a, b *Data) int { + if a.DataID.AxisIndex < b.DataID.AxisIndex { + return -1 + } + return 1 + }) + + nShrs := make(share.NamespacedShares, len(datas)) + for i, row := range datas { + nShrs[i] = share.NamespacedRow{ + Shares: row.DataShares, + Proof: &row.DataProof, + } + } + + // NOTE: We don't need to call Verify here as Bitswap already did it for us internal. + return nShrs, nil +} diff --git a/share/ipldv2/getter_test.go b/share/ipldv2/getter_test.go new file mode 100644 index 0000000000..7a0fbdbb9a --- /dev/null +++ b/share/ipldv2/getter_test.go @@ -0,0 +1,146 @@ +package ipldv2 + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/nmt" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestGetter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ns := sharetest.RandV0Namespace() + square, root := edstest.RandEDSWithNamespace(t, ns, 16) + hdr := &header.ExtendedHeader{DAH: root} + + b := edsBlockstore(square) + bserv := NewBlockService(b, nil) + get := NewGetter(bserv) + + t.Run("GetShares", func(t *testing.T) { + idxs := rand.Perm(int(square.Width() ^ 2))[:30] + shrs, err := get.GetShares(ctx, hdr, idxs...) + assert.NoError(t, err) + + for i, shrs := range shrs { + idx := idxs[i] + x, y := uint(idx)/square.Width(), uint(idx)%square.Width() + cell := square.GetCell(x, y) + ok := bytes.Equal(cell, shrs) + require.True(t, ok) + } + }) + + t.Run("GetEDS", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + eds, err := get.GetEDS(ctx, hdr) + assert.NoError(t, err) + assert.NotNil(t, eds) + + ok := eds.Equals(square) + assert.True(t, ok) + }) + + t.Run("GetSharesByNamespace", func(t *testing.T) { + nshrs, err := get.GetSharesByNamespace(ctx, hdr, ns) + assert.NoError(t, err) + assert.NoError(t, nshrs.Verify(root, ns)) + assert.NotEmpty(t, nshrs.Flatten()) + + t.Run("NamespaceOutsideOfRoot", func(t *testing.T) { + randNamespace := sharetest.RandV0Namespace() + emptyShares, err := get.GetSharesByNamespace(ctx, hdr, randNamespace) + assert.NoError(t, err) + assert.Empty(t, emptyShares) + assert.NoError(t, emptyShares.Verify(root, randNamespace)) + assert.Empty(t, emptyShares.Flatten()) + }) + + t.Run("NamespaceInsideOfRoot", func(t *testing.T) { + square := edstest.RandEDS(t, 8) + root, err := share.NewRoot(square) + require.NoError(t, err) + hdr := &header.ExtendedHeader{DAH: root} + + b := edsBlockstore(square) + bserv := NewBlockService(b, nil) + get := NewGetter(bserv) + + maxNs := nmt.MaxNamespace(root.RowRoots[(len(root.RowRoots))/2-1], share.NamespaceSize) + ns, err := addToNamespace(maxNs, -1) + require.NoError(t, err) + require.Len(t, ipld.FilterRootByNamespace(root, ns), 1) + + emptyShares, err := get.GetSharesByNamespace(ctx, hdr, ns) + assert.NoError(t, err) + assert.NotNil(t, emptyShares[0].Proof) + assert.NoError(t, emptyShares.Verify(root, ns)) + assert.Empty(t, emptyShares.Flatten()) + }) + }) +} + +// addToNamespace adds arbitrary int value to namespace, treating namespace as big-endian +// implementation of int +// TODO: dedup with getters/shrex_test.go +func addToNamespace(namespace share.Namespace, val int) (share.Namespace, error) { + if val == 0 { + return namespace, nil + } + // Convert the input integer to a byte slice and add it to result slice + result := make([]byte, len(namespace)) + if val > 0 { + binary.BigEndian.PutUint64(result[len(namespace)-8:], uint64(val)) + } else { + binary.BigEndian.PutUint64(result[len(namespace)-8:], uint64(-val)) + } + + // Perform addition byte by byte + var carry int + for i := len(namespace) - 1; i >= 0; i-- { + sum := 0 + if val > 0 { + sum = int(namespace[i]) + int(result[i]) + carry + } else { + sum = int(namespace[i]) - int(result[i]) + carry + } + + switch { + case sum > 255: + carry = 1 + sum -= 256 + case sum < 0: + carry = -1 + sum += 256 + default: + carry = 0 + } + + result[i] = uint8(sum) + } + + // Handle any remaining carry + if carry != 0 { + return nil, fmt.Errorf("namespace overflow") + } + + return result, nil +} From 0d4dd27c6cc38373c22ba5c5de6d92f2f05f6e05 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Wed, 6 Dec 2023 01:15:47 +0100 Subject: [PATCH 028/132] rename to shwap --- share/{ipldv2 => shwap}/axis.go | 8 +- share/{ipldv2 => shwap}/axis_hasher.go | 2 +- share/{ipldv2 => shwap}/axis_hasher_test.go | 2 +- share/{ipldv2 => shwap}/axis_id.go | 2 +- share/{ipldv2 => shwap}/axis_id_test.go | 2 +- share/{ipldv2 => shwap}/axis_test.go | 2 +- share/{ipldv2 => shwap}/blockstore.go | 2 +- share/{ipldv2 => shwap}/blockstore_test.go | 2 +- share/{ipldv2 => shwap}/data.go | 8 +- share/{ipldv2 => shwap}/data_hasher.go | 2 +- share/{ipldv2 => shwap}/data_id.go | 2 +- share/{ipldv2 => shwap}/data_id_test.go | 2 +- share/{ipldv2 => shwap}/data_test.go | 2 +- share/{ipldv2 => shwap}/getter.go | 2 +- share/{ipldv2 => shwap}/getter_test.go | 2 +- share/{ipldv2 => shwap}/ipldv2.go | 4 +- share/{ipldv2 => shwap}/ipldv2_test.go | 2 +- .../pb/shwap_pb.pb.go} | 197 +++++++++--------- .../pb/shwap_pb.proto} | 0 share/{ipldv2 => shwap}/sample.go | 10 +- share/{ipldv2 => shwap}/sample_hasher.go | 2 +- share/{ipldv2 => shwap}/sample_hasher_test.go | 2 +- share/{ipldv2 => shwap}/sample_id.go | 2 +- share/{ipldv2 => shwap}/sample_id_test.go | 2 +- share/{ipldv2 => shwap}/sample_test.go | 2 +- 25 files changed, 133 insertions(+), 132 deletions(-) rename share/{ipldv2 => shwap}/axis.go (96%) rename share/{ipldv2 => shwap}/axis_hasher.go (98%) rename share/{ipldv2 => shwap}/axis_hasher_test.go (98%) rename share/{ipldv2 => shwap}/axis_id.go (99%) rename share/{ipldv2 => shwap}/axis_id_test.go (98%) rename share/{ipldv2 => shwap}/axis_test.go (97%) rename share/{ipldv2 => shwap}/blockstore.go (99%) rename share/{ipldv2 => shwap}/blockstore_test.go (98%) rename share/{ipldv2 => shwap}/data.go (96%) rename share/{ipldv2 => shwap}/data_hasher.go (98%) rename share/{ipldv2 => shwap}/data_id.go (99%) rename share/{ipldv2 => shwap}/data_id_test.go (98%) rename share/{ipldv2 => shwap}/data_test.go (98%) rename share/{ipldv2 => shwap}/getter.go (99%) rename share/{ipldv2 => shwap}/getter_test.go (99%) rename share/{ipldv2 => shwap}/ipldv2.go (98%) rename share/{ipldv2 => shwap}/ipldv2_test.go (99%) rename share/{ipldv2/pb/ipldv2pb.pb.go => shwap/pb/shwap_pb.pb.go} (78%) rename share/{ipldv2/pb/ipldv2pb.proto => shwap/pb/shwap_pb.proto} (100%) rename share/{ipldv2 => shwap}/sample.go (96%) rename share/{ipldv2 => shwap}/sample_hasher.go (98%) rename share/{ipldv2 => shwap}/sample_hasher_test.go (98%) rename share/{ipldv2 => shwap}/sample_id.go (99%) rename share/{ipldv2 => shwap}/sample_id_test.go (98%) rename share/{ipldv2 => shwap}/sample_test.go (98%) diff --git a/share/ipldv2/axis.go b/share/shwap/axis.go similarity index 96% rename from share/ipldv2/axis.go rename to share/shwap/axis.go index 11b6adaa1a..dddad56ee1 100644 --- a/share/ipldv2/axis.go +++ b/share/shwap/axis.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "bytes" @@ -10,7 +10,7 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" - ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" + shwappb "github.com/celestiaorg/celestia-node/share/shwap/pb" ) // Axis represents an Axis of an EDS. @@ -95,7 +95,7 @@ func (s *Axis) MarshalBinary() ([]byte, error) { return nil, err } - return (&ipldv2pb.Axis{ + return (&shwappb.Axis{ AxisId: id, AxisHalf: s.AxisShares, }).Marshal() @@ -103,7 +103,7 @@ func (s *Axis) MarshalBinary() ([]byte, error) { // UnmarshalBinary unmarshal Axis from binary. func (s *Axis) UnmarshalBinary(data []byte) error { - proto := &ipldv2pb.Axis{} + proto := &shwappb.Axis{} if err := proto.Unmarshal(data); err != nil { return err } diff --git a/share/ipldv2/axis_hasher.go b/share/shwap/axis_hasher.go similarity index 98% rename from share/ipldv2/axis_hasher.go rename to share/shwap/axis_hasher.go index 633ed4f1f6..d6fe7fdd9f 100644 --- a/share/ipldv2/axis_hasher.go +++ b/share/shwap/axis_hasher.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "crypto/sha256" diff --git a/share/ipldv2/axis_hasher_test.go b/share/shwap/axis_hasher_test.go similarity index 98% rename from share/ipldv2/axis_hasher_test.go rename to share/shwap/axis_hasher_test.go index c8fc4a10b6..afb378a4b5 100644 --- a/share/ipldv2/axis_hasher_test.go +++ b/share/shwap/axis_hasher_test.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "testing" diff --git a/share/ipldv2/axis_id.go b/share/shwap/axis_id.go similarity index 99% rename from share/ipldv2/axis_id.go rename to share/shwap/axis_id.go index 400b40ebc3..619046ea5e 100644 --- a/share/ipldv2/axis_id.go +++ b/share/shwap/axis_id.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "encoding/binary" diff --git a/share/ipldv2/axis_id_test.go b/share/shwap/axis_id_test.go similarity index 98% rename from share/ipldv2/axis_id_test.go rename to share/shwap/axis_id_test.go index 34bfad58cd..f2b61516e2 100644 --- a/share/ipldv2/axis_id_test.go +++ b/share/shwap/axis_id_test.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "testing" diff --git a/share/ipldv2/axis_test.go b/share/shwap/axis_test.go similarity index 97% rename from share/ipldv2/axis_test.go rename to share/shwap/axis_test.go index 08626bf070..4095a4e804 100644 --- a/share/ipldv2/axis_test.go +++ b/share/shwap/axis_test.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "testing" diff --git a/share/ipldv2/blockstore.go b/share/shwap/blockstore.go similarity index 99% rename from share/ipldv2/blockstore.go rename to share/shwap/blockstore.go index 7ff00e81dd..e1bc3acf2a 100644 --- a/share/ipldv2/blockstore.go +++ b/share/shwap/blockstore.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "context" diff --git a/share/ipldv2/blockstore_test.go b/share/shwap/blockstore_test.go similarity index 98% rename from share/ipldv2/blockstore_test.go rename to share/shwap/blockstore_test.go index d186431e39..b0dcf85c2a 100644 --- a/share/ipldv2/blockstore_test.go +++ b/share/shwap/blockstore_test.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "context" diff --git a/share/ipldv2/data.go b/share/shwap/data.go similarity index 96% rename from share/ipldv2/data.go rename to share/shwap/data.go index 53767a06b4..6b29ace634 100644 --- a/share/ipldv2/data.go +++ b/share/shwap/data.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "fmt" @@ -11,7 +11,7 @@ import ( "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" - ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" + shwappb "github.com/celestiaorg/celestia-node/share/shwap/pb" ) type Data struct { @@ -105,7 +105,7 @@ func (s *Data) MarshalBinary() ([]byte, error) { proof.IsMaxNamespaceIgnored = s.DataProof.IsMaxNamespaceIDIgnored() proof.LeafHash = s.DataProof.LeafHash() - return (&ipldv2pb.Data{ + return (&shwappb.Data{ DataId: id, DataProof: proof, DataShares: s.DataShares, @@ -114,7 +114,7 @@ func (s *Data) MarshalBinary() ([]byte, error) { // UnmarshalBinary unmarshal Data from binary. func (s *Data) UnmarshalBinary(data []byte) error { - proto := &ipldv2pb.Data{} + proto := &shwappb.Data{} if err := proto.Unmarshal(data); err != nil { return err } diff --git a/share/ipldv2/data_hasher.go b/share/shwap/data_hasher.go similarity index 98% rename from share/ipldv2/data_hasher.go rename to share/shwap/data_hasher.go index 5903269e70..dd8db7b05e 100644 --- a/share/ipldv2/data_hasher.go +++ b/share/shwap/data_hasher.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "crypto/sha256" diff --git a/share/ipldv2/data_id.go b/share/shwap/data_id.go similarity index 99% rename from share/ipldv2/data_id.go rename to share/shwap/data_id.go index 151ac91deb..955560a07a 100644 --- a/share/ipldv2/data_id.go +++ b/share/shwap/data_id.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "fmt" diff --git a/share/ipldv2/data_id_test.go b/share/shwap/data_id_test.go similarity index 98% rename from share/ipldv2/data_id_test.go rename to share/shwap/data_id_test.go index a260839c70..d5dcf56140 100644 --- a/share/ipldv2/data_id_test.go +++ b/share/shwap/data_id_test.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "testing" diff --git a/share/ipldv2/data_test.go b/share/shwap/data_test.go similarity index 98% rename from share/ipldv2/data_test.go rename to share/shwap/data_test.go index 3eab90264d..65a9516716 100644 --- a/share/ipldv2/data_test.go +++ b/share/shwap/data_test.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "testing" diff --git a/share/ipldv2/getter.go b/share/shwap/getter.go similarity index 99% rename from share/ipldv2/getter.go rename to share/shwap/getter.go index e9c19318e6..948f9cdc5b 100644 --- a/share/ipldv2/getter.go +++ b/share/shwap/getter.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "context" diff --git a/share/ipldv2/getter_test.go b/share/shwap/getter_test.go similarity index 99% rename from share/ipldv2/getter_test.go rename to share/shwap/getter_test.go index 7a0fbdbb9a..445d9d2ef3 100644 --- a/share/ipldv2/getter_test.go +++ b/share/shwap/getter_test.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "bytes" diff --git a/share/ipldv2/ipldv2.go b/share/shwap/ipldv2.go similarity index 98% rename from share/ipldv2/ipldv2.go rename to share/shwap/ipldv2.go index 1b50803e17..a71c62a838 100644 --- a/share/ipldv2/ipldv2.go +++ b/share/shwap/ipldv2.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "crypto/sha256" @@ -50,7 +50,7 @@ func NewBlockService(b blockstore.Blockstore, ex exchange.Interface) blockservic return blockservice.New(b, ex, blockservice.WithAllowlist(defaultAllowlist)) } -var log = logger.Logger("ipldv2") +var log = logger.Logger("shwap") const ( // sampleCodec is a CID codec used for share sampling Bitswap requests over Namespaced diff --git a/share/ipldv2/ipldv2_test.go b/share/shwap/ipldv2_test.go similarity index 99% rename from share/ipldv2/ipldv2_test.go rename to share/shwap/ipldv2_test.go index f199148e81..c9cd2a13cd 100644 --- a/share/ipldv2/ipldv2_test.go +++ b/share/shwap/ipldv2_test.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "context" diff --git a/share/ipldv2/pb/ipldv2pb.pb.go b/share/shwap/pb/shwap_pb.pb.go similarity index 78% rename from share/ipldv2/pb/ipldv2pb.pb.go rename to share/shwap/pb/shwap_pb.pb.go index db5f876086..87905655e1 100644 --- a/share/ipldv2/pb/ipldv2pb.pb.go +++ b/share/shwap/pb/shwap_pb.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: share/ipldv2/pb/ipldv2pb.proto +// source: share/shwap/pb/shwap_pb.proto -package ipldv2pb +package shwap_pb import ( fmt "fmt" @@ -45,7 +45,7 @@ func (x AxisType) String() string { } func (AxisType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_cb41c3a4f982a271, []int{0} + return fileDescriptor_fdfe0676a85dc852, []int{0} } type SampleType int32 @@ -70,7 +70,7 @@ func (x SampleType) String() string { } func (SampleType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_cb41c3a4f982a271, []int{1} + return fileDescriptor_fdfe0676a85dc852, []int{1} } type Axis struct { @@ -82,7 +82,7 @@ func (m *Axis) Reset() { *m = Axis{} } func (m *Axis) String() string { return proto.CompactTextString(m) } func (*Axis) ProtoMessage() {} func (*Axis) Descriptor() ([]byte, []int) { - return fileDescriptor_cb41c3a4f982a271, []int{0} + return fileDescriptor_fdfe0676a85dc852, []int{0} } func (m *Axis) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,7 +136,7 @@ func (m *Sample) Reset() { *m = Sample{} } func (m *Sample) String() string { return proto.CompactTextString(m) } func (*Sample) ProtoMessage() {} func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_cb41c3a4f982a271, []int{1} + return fileDescriptor_fdfe0676a85dc852, []int{1} } func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -203,7 +203,7 @@ func (m *Data) Reset() { *m = Data{} } func (m *Data) String() string { return proto.CompactTextString(m) } func (*Data) ProtoMessage() {} func (*Data) Descriptor() ([]byte, []int) { - return fileDescriptor_cb41c3a4f982a271, []int{2} + return fileDescriptor_fdfe0676a85dc852, []int{2} } func (m *Data) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -261,31 +261,32 @@ func init() { proto.RegisterType((*Data)(nil), "Data") } -func init() { proto.RegisterFile("share/ipldv2/pb/ipldv2pb.proto", fileDescriptor_cb41c3a4f982a271) } +func init() { proto.RegisterFile("share/shwap/pb/shwap_pb.proto", fileDescriptor_fdfe0676a85dc852) } -var fileDescriptor_cb41c3a4f982a271 = []byte{ - // 336 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xbd, 0x4e, 0xf3, 0x40, - 0x10, 0xf4, 0xc5, 0x51, 0x7e, 0xd6, 0x56, 0x3e, 0xeb, 0x9a, 0xcf, 0x02, 0x74, 0x98, 0x54, 0x56, - 0x84, 0x1c, 0xc9, 0xb4, 0x34, 0xfc, 0x14, 0xa4, 0x8b, 0x1c, 0x7a, 0x74, 0x96, 0x1d, 0xc5, 0x92, - 0x91, 0x4f, 0x3e, 0x0b, 0x92, 0xb7, 0xe0, 0x39, 0x78, 0x12, 0xca, 0x94, 0x94, 0x28, 0x79, 0x11, - 0xb4, 0xeb, 0x23, 0xa1, 0xa0, 0x9b, 0x99, 0x9d, 0xdb, 0xf1, 0xac, 0x41, 0xe8, 0x95, 0xac, 0xf3, - 0x69, 0xa1, 0xca, 0xec, 0x25, 0x9e, 0xaa, 0xd4, 0x20, 0x95, 0x46, 0xaa, 0xae, 0x9a, 0xea, 0x64, - 0xa4, 0xd2, 0xa9, 0xaa, 0xab, 0x6a, 0xd9, 0xf2, 0xf1, 0x35, 0x74, 0x6f, 0xd6, 0x85, 0xe6, 0xff, - 0xa1, 0x2f, 0xd7, 0x85, 0x7e, 0x2a, 0x32, 0x9f, 0x05, 0x2c, 0x74, 0x93, 0x1e, 0xd2, 0x59, 0xc6, - 0x4f, 0x61, 0x48, 0x83, 0x95, 0x2c, 0x97, 0x7e, 0x27, 0xb0, 0x43, 0x37, 0x19, 0xa0, 0xf0, 0x20, - 0xcb, 0xe5, 0xf8, 0x9d, 0x41, 0x6f, 0x21, 0x9f, 0x55, 0x99, 0xa3, 0x4f, 0x13, 0x3a, 0xae, 0x18, - 0xb4, 0xc2, 0x2c, 0xe3, 0x97, 0xe0, 0x98, 0x61, 0xb3, 0x51, 0xb9, 0xdf, 0x09, 0x58, 0x38, 0x8a, - 0x9d, 0xa8, 0x7d, 0xfa, 0xb8, 0x51, 0x79, 0x02, 0xfa, 0x80, 0xf9, 0x05, 0xb8, 0xc6, 0x4d, 0x65, - 0x7c, 0x9b, 0xb6, 0x99, 0x0d, 0x0b, 0x94, 0x78, 0x7c, 0xb0, 0x50, 0x19, 0xbf, 0x1b, 0xb0, 0xd0, - 0x89, 0xff, 0x45, 0xa6, 0x5a, 0x1a, 0xcd, 0x11, 0xfc, 0xbc, 0x21, 0x32, 0x56, 0xd0, 0xbd, 0x97, - 0x8d, 0xc4, 0xaa, 0x99, 0x6c, 0xe4, 0xaf, 0xaa, 0x48, 0x67, 0x19, 0x3f, 0x07, 0x87, 0x06, 0x94, - 0xaa, 0x4d, 0x59, 0x40, 0x89, 0x42, 0x35, 0x8f, 0x80, 0x98, 0xc9, 0xb4, 0xff, 0xce, 0x1c, 0xa2, - 0x85, 0xe0, 0xe4, 0x0c, 0x06, 0x78, 0x5c, 0x2a, 0xd5, 0x07, 0x3b, 0xa9, 0x5e, 0x3d, 0x0b, 0xc1, - 0x5d, 0x55, 0x7a, 0x6c, 0x12, 0x01, 0x1c, 0x0f, 0xc0, 0x47, 0x00, 0xf8, 0x75, 0xad, 0xe2, 0x59, - 0xdc, 0x03, 0x77, 0x2e, 0xeb, 0xa2, 0xd9, 0x18, 0x85, 0xdd, 0xfa, 0x1f, 0x3b, 0xc1, 0xb6, 0x3b, - 0xc1, 0xbe, 0x76, 0x82, 0xbd, 0xed, 0x85, 0xb5, 0xdd, 0x0b, 0xeb, 0x73, 0x2f, 0xac, 0xb4, 0x47, - 0xff, 0xf2, 0xea, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x91, 0x61, 0x65, 0x9c, 0xfd, 0x01, 0x00, 0x00, +var fileDescriptor_fdfe0676a85dc852 = []byte{ + // 337 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xbd, 0x6e, 0xf2, 0x40, + 0x10, 0xf4, 0x61, 0xc4, 0xcf, 0xda, 0xe2, 0xb3, 0xae, 0xf9, 0xac, 0xfc, 0x38, 0x0e, 0x95, 0x85, + 0x22, 0x23, 0x91, 0x36, 0x4d, 0x7e, 0x8a, 0xd0, 0x21, 0x93, 0x1e, 0x9d, 0x65, 0x23, 0x2c, 0x39, + 0xba, 0x93, 0xcf, 0x12, 0xf0, 0x16, 0x79, 0x8e, 0x3c, 0x49, 0x4a, 0xca, 0x94, 0x11, 0xbc, 0x48, + 0xb4, 0xeb, 0x0b, 0xa4, 0x48, 0x37, 0x33, 0x3b, 0xb7, 0xe3, 0x59, 0xc3, 0xa5, 0x5e, 0x89, 0x2a, + 0x1f, 0xeb, 0xd5, 0x5a, 0xa8, 0xb1, 0x4a, 0x1b, 0xb0, 0x50, 0x69, 0xac, 0x2a, 0x59, 0xcb, 0xb3, + 0x81, 0x4a, 0xc7, 0xaa, 0x92, 0x72, 0xd9, 0xf0, 0xe1, 0x1d, 0xb4, 0xef, 0x37, 0x85, 0xe6, 0xff, + 0xa1, 0x2b, 0x36, 0x85, 0x5e, 0x14, 0x99, 0xcf, 0x42, 0x16, 0xb9, 0x49, 0x07, 0xe9, 0x34, 0xe3, + 0xe7, 0xd0, 0xa7, 0xc1, 0x4a, 0x94, 0x4b, 0xbf, 0x15, 0xda, 0x91, 0x9b, 0xf4, 0x50, 0x78, 0x16, + 0xe5, 0x72, 0xf8, 0xce, 0xa0, 0x33, 0x17, 0xaf, 0xaa, 0xcc, 0xd1, 0xa7, 0x09, 0x9d, 0x56, 0xf4, + 0x1a, 0x61, 0x9a, 0xf1, 0x1b, 0x70, 0xcc, 0xb0, 0xde, 0xaa, 0xdc, 0x6f, 0x85, 0x2c, 0x1a, 0x4c, + 0x9c, 0xb8, 0x79, 0xfa, 0xb2, 0x55, 0x79, 0x02, 0xfa, 0x88, 0xf9, 0x35, 0xb8, 0xc6, 0x4d, 0x5d, + 0x7c, 0x9b, 0xb6, 0x99, 0x0d, 0x73, 0x94, 0xf8, 0xe4, 0x68, 0xa1, 0x32, 0x7e, 0x3b, 0x64, 0x91, + 0x33, 0xf9, 0x17, 0x9b, 0x6a, 0x69, 0x3c, 0x43, 0xf0, 0xf3, 0x86, 0xc8, 0x50, 0x41, 0xfb, 0x49, + 0xd4, 0x02, 0xab, 0x66, 0xa2, 0x16, 0xbf, 0xaa, 0x22, 0x9d, 0x66, 0xfc, 0x0a, 0x1c, 0x1a, 0x50, + 0xaa, 0x36, 0x65, 0x01, 0x25, 0x0a, 0xd5, 0x3c, 0x06, 0x62, 0x26, 0xd3, 0xfe, 0x3b, 0xb3, 0x8f, + 0x16, 0x82, 0xa3, 0x0b, 0xe8, 0xe1, 0x71, 0xa9, 0x54, 0x17, 0xec, 0x44, 0xae, 0x3d, 0x0b, 0xc1, + 0xa3, 0x2c, 0x3d, 0x36, 0x8a, 0x01, 0x4e, 0x07, 0xe0, 0x03, 0x00, 0xfc, 0xba, 0x46, 0xf1, 0x2c, + 0xee, 0x81, 0x3b, 0x13, 0x55, 0x51, 0x6f, 0x8d, 0xc2, 0x1e, 0xfc, 0x8f, 0x7d, 0xc0, 0x76, 0xfb, + 0x80, 0x7d, 0xed, 0x03, 0xf6, 0x76, 0x08, 0xac, 0xdd, 0x21, 0xb0, 0x3e, 0x0f, 0x81, 0x95, 0x76, + 0xe8, 0x5f, 0xde, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0xad, 0x11, 0x6f, 0x50, 0xfc, 0x01, 0x00, + 0x00, } func (m *Axis) Marshal() (dAtA []byte, err error) { @@ -312,7 +313,7 @@ func (m *Axis) MarshalToSizedBuffer(dAtA []byte) (int, error) { for iNdEx := len(m.AxisHalf) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.AxisHalf[iNdEx]) copy(dAtA[i:], m.AxisHalf[iNdEx]) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.AxisHalf[iNdEx]))) + i = encodeVarintShwapPb(dAtA, i, uint64(len(m.AxisHalf[iNdEx]))) i-- dAtA[i] = 0x12 } @@ -320,7 +321,7 @@ func (m *Axis) MarshalToSizedBuffer(dAtA []byte) (int, error) { if len(m.AxisId) > 0 { i -= len(m.AxisId) copy(dAtA[i:], m.AxisId) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.AxisId))) + i = encodeVarintShwapPb(dAtA, i, uint64(len(m.AxisId))) i-- dAtA[i] = 0xa } @@ -354,7 +355,7 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { return 0, err } i -= size - i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) + i = encodeVarintShwapPb(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 @@ -362,19 +363,19 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { if len(m.SampleShare) > 0 { i -= len(m.SampleShare) copy(dAtA[i:], m.SampleShare) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.SampleShare))) + i = encodeVarintShwapPb(dAtA, i, uint64(len(m.SampleShare))) i-- dAtA[i] = 0x1a } if m.SampleType != 0 { - i = encodeVarintIpldv2Pb(dAtA, i, uint64(m.SampleType)) + i = encodeVarintShwapPb(dAtA, i, uint64(m.SampleType)) i-- dAtA[i] = 0x10 } if len(m.SampleId) > 0 { i -= len(m.SampleId) copy(dAtA[i:], m.SampleId) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.SampleId))) + i = encodeVarintShwapPb(dAtA, i, uint64(len(m.SampleId))) i-- dAtA[i] = 0xa } @@ -408,7 +409,7 @@ func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { return 0, err } i -= size - i = encodeVarintIpldv2Pb(dAtA, i, uint64(size)) + i = encodeVarintShwapPb(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a @@ -417,7 +418,7 @@ func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { for iNdEx := len(m.DataShares) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.DataShares[iNdEx]) copy(dAtA[i:], m.DataShares[iNdEx]) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DataShares[iNdEx]))) + i = encodeVarintShwapPb(dAtA, i, uint64(len(m.DataShares[iNdEx]))) i-- dAtA[i] = 0x12 } @@ -425,15 +426,15 @@ func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { if len(m.DataId) > 0 { i -= len(m.DataId) copy(dAtA[i:], m.DataId) - i = encodeVarintIpldv2Pb(dAtA, i, uint64(len(m.DataId))) + i = encodeVarintShwapPb(dAtA, i, uint64(len(m.DataId))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func encodeVarintIpldv2Pb(dAtA []byte, offset int, v uint64) int { - offset -= sovIpldv2Pb(v) +func encodeVarintShwapPb(dAtA []byte, offset int, v uint64) int { + offset -= sovShwapPb(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -451,12 +452,12 @@ func (m *Axis) Size() (n int) { _ = l l = len(m.AxisId) if l > 0 { - n += 1 + l + sovIpldv2Pb(uint64(l)) + n += 1 + l + sovShwapPb(uint64(l)) } if len(m.AxisHalf) > 0 { for _, b := range m.AxisHalf { l = len(b) - n += 1 + l + sovIpldv2Pb(uint64(l)) + n += 1 + l + sovShwapPb(uint64(l)) } } return n @@ -470,18 +471,18 @@ func (m *Sample) Size() (n int) { _ = l l = len(m.SampleId) if l > 0 { - n += 1 + l + sovIpldv2Pb(uint64(l)) + n += 1 + l + sovShwapPb(uint64(l)) } if m.SampleType != 0 { - n += 1 + sovIpldv2Pb(uint64(m.SampleType)) + n += 1 + sovShwapPb(uint64(m.SampleType)) } l = len(m.SampleShare) if l > 0 { - n += 1 + l + sovIpldv2Pb(uint64(l)) + n += 1 + l + sovShwapPb(uint64(l)) } if m.SampleProof != nil { l = m.SampleProof.Size() - n += 1 + l + sovIpldv2Pb(uint64(l)) + n += 1 + l + sovShwapPb(uint64(l)) } return n } @@ -494,26 +495,26 @@ func (m *Data) Size() (n int) { _ = l l = len(m.DataId) if l > 0 { - n += 1 + l + sovIpldv2Pb(uint64(l)) + n += 1 + l + sovShwapPb(uint64(l)) } if len(m.DataShares) > 0 { for _, b := range m.DataShares { l = len(b) - n += 1 + l + sovIpldv2Pb(uint64(l)) + n += 1 + l + sovShwapPb(uint64(l)) } } if m.DataProof != nil { l = m.DataProof.Size() - n += 1 + l + sovIpldv2Pb(uint64(l)) + n += 1 + l + sovShwapPb(uint64(l)) } return n } -func sovIpldv2Pb(x uint64) (n int) { +func sovShwapPb(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } -func sozIpldv2Pb(x uint64) (n int) { - return sovIpldv2Pb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func sozShwapPb(x uint64) (n int) { + return sovShwapPb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *Axis) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -523,7 +524,7 @@ func (m *Axis) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -551,7 +552,7 @@ func (m *Axis) Unmarshal(dAtA []byte) error { var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -564,11 +565,11 @@ func (m *Axis) Unmarshal(dAtA []byte) error { } } if byteLen < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } postIndex := iNdEx + byteLen if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } if postIndex > l { return io.ErrUnexpectedEOF @@ -585,7 +586,7 @@ func (m *Axis) Unmarshal(dAtA []byte) error { var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -598,11 +599,11 @@ func (m *Axis) Unmarshal(dAtA []byte) error { } } if byteLen < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } postIndex := iNdEx + byteLen if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } if postIndex > l { return io.ErrUnexpectedEOF @@ -612,12 +613,12 @@ func (m *Axis) Unmarshal(dAtA []byte) error { iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipIpldv2Pb(dAtA[iNdEx:]) + skippy, err := skipShwapPb(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -639,7 +640,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -667,7 +668,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -680,11 +681,11 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } } if byteLen < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } postIndex := iNdEx + byteLen if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } if postIndex > l { return io.ErrUnexpectedEOF @@ -701,7 +702,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { m.SampleType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -720,7 +721,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -733,11 +734,11 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } } if byteLen < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } postIndex := iNdEx + byteLen if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } if postIndex > l { return io.ErrUnexpectedEOF @@ -754,7 +755,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -767,11 +768,11 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } postIndex := iNdEx + msglen if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } if postIndex > l { return io.ErrUnexpectedEOF @@ -785,12 +786,12 @@ func (m *Sample) Unmarshal(dAtA []byte) error { iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipIpldv2Pb(dAtA[iNdEx:]) + skippy, err := skipShwapPb(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -812,7 +813,7 @@ func (m *Data) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -840,7 +841,7 @@ func (m *Data) Unmarshal(dAtA []byte) error { var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -853,11 +854,11 @@ func (m *Data) Unmarshal(dAtA []byte) error { } } if byteLen < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } postIndex := iNdEx + byteLen if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } if postIndex > l { return io.ErrUnexpectedEOF @@ -874,7 +875,7 @@ func (m *Data) Unmarshal(dAtA []byte) error { var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -887,11 +888,11 @@ func (m *Data) Unmarshal(dAtA []byte) error { } } if byteLen < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } postIndex := iNdEx + byteLen if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } if postIndex > l { return io.ErrUnexpectedEOF @@ -906,7 +907,7 @@ func (m *Data) Unmarshal(dAtA []byte) error { var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowIpldv2Pb + return ErrIntOverflowShwapPb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -919,11 +920,11 @@ func (m *Data) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } postIndex := iNdEx + msglen if postIndex < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } if postIndex > l { return io.ErrUnexpectedEOF @@ -937,12 +938,12 @@ func (m *Data) Unmarshal(dAtA []byte) error { iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipIpldv2Pb(dAtA[iNdEx:]) + skippy, err := skipShwapPb(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIpldv2Pb + return ErrInvalidLengthShwapPb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -956,7 +957,7 @@ func (m *Data) Unmarshal(dAtA []byte) error { } return nil } -func skipIpldv2Pb(dAtA []byte) (n int, err error) { +func skipShwapPb(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 @@ -964,7 +965,7 @@ func skipIpldv2Pb(dAtA []byte) (n int, err error) { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return 0, ErrIntOverflowIpldv2Pb + return 0, ErrIntOverflowShwapPb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF @@ -981,7 +982,7 @@ func skipIpldv2Pb(dAtA []byte) (n int, err error) { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { - return 0, ErrIntOverflowIpldv2Pb + return 0, ErrIntOverflowShwapPb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF @@ -997,7 +998,7 @@ func skipIpldv2Pb(dAtA []byte) (n int, err error) { var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return 0, ErrIntOverflowIpldv2Pb + return 0, ErrIntOverflowShwapPb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF @@ -1010,14 +1011,14 @@ func skipIpldv2Pb(dAtA []byte) (n int, err error) { } } if length < 0 { - return 0, ErrInvalidLengthIpldv2Pb + return 0, ErrInvalidLengthShwapPb } iNdEx += length case 3: depth++ case 4: if depth == 0 { - return 0, ErrUnexpectedEndOfGroupIpldv2Pb + return 0, ErrUnexpectedEndOfGroupShwapPb } depth-- case 5: @@ -1026,7 +1027,7 @@ func skipIpldv2Pb(dAtA []byte) (n int, err error) { return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { - return 0, ErrInvalidLengthIpldv2Pb + return 0, ErrInvalidLengthShwapPb } if depth == 0 { return iNdEx, nil @@ -1036,7 +1037,7 @@ func skipIpldv2Pb(dAtA []byte) (n int, err error) { } var ( - ErrInvalidLengthIpldv2Pb = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowIpldv2Pb = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupIpldv2Pb = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthShwapPb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowShwapPb = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupShwapPb = fmt.Errorf("proto: unexpected end of group") ) diff --git a/share/ipldv2/pb/ipldv2pb.proto b/share/shwap/pb/shwap_pb.proto similarity index 100% rename from share/ipldv2/pb/ipldv2pb.proto rename to share/shwap/pb/shwap_pb.proto diff --git a/share/ipldv2/sample.go b/share/shwap/sample.go similarity index 96% rename from share/ipldv2/sample.go rename to share/shwap/sample.go index 5667f45993..de73a802ec 100644 --- a/share/ipldv2/sample.go +++ b/share/shwap/sample.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "errors" @@ -12,7 +12,7 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" - ipldv2pb "github.com/celestiaorg/celestia-node/share/ipldv2/pb" + shwappb "github.com/celestiaorg/celestia-node/share/shwap/pb" ) // SampleType represents type of sample. @@ -140,9 +140,9 @@ func (s *Sample) MarshalBinary() ([]byte, error) { proof.IsMaxNamespaceIgnored = s.SampleProof.IsMaxNamespaceIDIgnored() proof.LeafHash = s.SampleProof.LeafHash() - return (&ipldv2pb.Sample{ + return (&shwappb.Sample{ SampleId: id, - SampleType: ipldv2pb.SampleType(s.Type), + SampleType: shwappb.SampleType(s.Type), SampleProof: proof, SampleShare: s.SampleShare, }).Marshal() @@ -150,7 +150,7 @@ func (s *Sample) MarshalBinary() ([]byte, error) { // UnmarshalBinary unmarshal Sample from binary. func (s *Sample) UnmarshalBinary(data []byte) error { - proto := &ipldv2pb.Sample{} + proto := &shwappb.Sample{} if err := proto.Unmarshal(data); err != nil { return err } diff --git a/share/ipldv2/sample_hasher.go b/share/shwap/sample_hasher.go similarity index 98% rename from share/ipldv2/sample_hasher.go rename to share/shwap/sample_hasher.go index ddccbcbb26..7a427cce83 100644 --- a/share/ipldv2/sample_hasher.go +++ b/share/shwap/sample_hasher.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "crypto/sha256" diff --git a/share/ipldv2/sample_hasher_test.go b/share/shwap/sample_hasher_test.go similarity index 98% rename from share/ipldv2/sample_hasher_test.go rename to share/shwap/sample_hasher_test.go index 00223049e8..ebf8da1d6c 100644 --- a/share/ipldv2/sample_hasher_test.go +++ b/share/shwap/sample_hasher_test.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "testing" diff --git a/share/ipldv2/sample_id.go b/share/shwap/sample_id.go similarity index 99% rename from share/ipldv2/sample_id.go rename to share/shwap/sample_id.go index c8841b4072..9667002159 100644 --- a/share/ipldv2/sample_id.go +++ b/share/shwap/sample_id.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "encoding/binary" diff --git a/share/ipldv2/sample_id_test.go b/share/shwap/sample_id_test.go similarity index 98% rename from share/ipldv2/sample_id_test.go rename to share/shwap/sample_id_test.go index 443dd3d5f1..a060dd6793 100644 --- a/share/ipldv2/sample_id_test.go +++ b/share/shwap/sample_id_test.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "testing" diff --git a/share/ipldv2/sample_test.go b/share/shwap/sample_test.go similarity index 98% rename from share/ipldv2/sample_test.go rename to share/shwap/sample_test.go index 676bd952f1..dd666f9502 100644 --- a/share/ipldv2/sample_test.go +++ b/share/shwap/sample_test.go @@ -1,4 +1,4 @@ -package ipldv2 +package shwap import ( "testing" From c96579a2241964be95f8a9c2b2e185ec968cfb35 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Wed, 6 Dec 2023 02:36:08 +0100 Subject: [PATCH 029/132] ensure only shares a cached in blockstore --- share/shwap/getter.go | 67 ++++++++++++++++++++++---------------- share/shwap/getter_test.go | 16 +++++---- 2 files changed, 49 insertions(+), 34 deletions(-) diff --git a/share/shwap/getter.go b/share/shwap/getter.go index 948f9cdc5b..b59eb27376 100644 --- a/share/shwap/getter.go +++ b/share/shwap/getter.go @@ -5,7 +5,9 @@ import ( "fmt" "slices" - "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" + block "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/celestiaorg/celestia-app/pkg/wrapper" @@ -16,32 +18,16 @@ import ( ) type Getter struct { - bget blockservice.BlockGetter + fetch exchange.Interface + bstore blockstore.Blockstore } -func NewGetter(bget blockservice.BlockGetter) *Getter { - return &Getter{bget: bget} +func NewGetter(fetch exchange.Interface, bstore blockstore.Blockstore) *Getter { + return &Getter{fetch: fetch, bstore: bstore} } -// GetShare -// TODO: Deprecate this method -func (g *Getter) GetShare(ctx context.Context, hdr *header.ExtendedHeader, row, col int) (share.Share, error) { - shrIdx := row*len(hdr.DAH.RowRoots) + col - cid := MustSampleCID(shrIdx, hdr.DAH, hdr.Height()) - blk, err := g.bget.GetBlock(ctx, cid) - if err != nil { - return nil, fmt.Errorf("getting block from blockservice: %w", err) - } - - smpl, err := SampleFromBlock(blk) - if err != nil { - return nil, fmt.Errorf("converting block to Sample: %w", err) - } - - return smpl.SampleShare, nil -} - -// GetShares +// GetShares fetches in the Block/EDS by their indexes. +// Automatically caches them on the Blockstore. // Guarantee that the returned shares are in the same order as shrIdxs. func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, shrIdxs ...int) ([]share.Share, error) { maxIdx := len(hdr.DAH.RowRoots) * len(hdr.DAH.ColumnRoots) @@ -53,8 +39,13 @@ func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, shrI cids[i] = MustSampleCID(shrIdx, hdr.DAH, hdr.Height()) } - smpls := make(map[int]*Sample, len(shrIdxs)) - blkCh := g.bget.GetBlocks(ctx, cids) + blkCh, err := g.fetch.GetBlocks(ctx, cids) + if err != nil { + return nil, fmt.Errorf("fetching blocks: %w", err) + } + + blks := make([]block.Block, 0, len(cids)) + smpls := make(map[int]*Sample, len(cids)) for blk := range blkCh { // NOTE: GetBlocks handles ctx, so we don't have to smpl, err := SampleFromBlock(blk) if err != nil { @@ -64,15 +55,27 @@ func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, shrI shrIdx := int(smpl.SampleID.AxisIndex)*len(hdr.DAH.RowRoots) + int(smpl.SampleID.ShareIndex) smpls[shrIdx] = smpl + + blks = append(blks, blk) } - if len(smpls) != len(shrIdxs) { + if len(blks) != len(shrIdxs) { if ctx.Err() != nil { return nil, ctx.Err() } return nil, fmt.Errorf("not all shares were found") } + err = g.bstore.PutMany(ctx, blks) + if err != nil { + return nil, fmt.Errorf("storing shares: %w", err) + } + + err = g.fetch.NotifyNewBlocks(ctx, blks...) // tell bitswap that we stored the blks and can serve them now + if err != nil { + return nil, fmt.Errorf("notifying new shares: %w", err) + } + shrs := make([]share.Share, len(shrIdxs)) for i, shrIdx := range shrIdxs { shrs[i] = smpls[shrIdx].SampleShare @@ -99,7 +102,11 @@ func (g *Getter) GetEDS(ctx context.Context, hdr *header.ExtendedHeader) (*rsmt2 return nil, err } - blkCh := g.bget.GetBlocks(ctx, cids) + blkCh, err := g.fetch.GetBlocks(ctx, cids) + if err != nil { + return nil, fmt.Errorf("fetching blocks: %w", err) + } + for blk := range blkCh { // NOTE: GetBlocks handles ctx, so we don't have to axis, err := AxisFromBlock(blk) if err != nil { @@ -149,8 +156,12 @@ func (g *Getter) GetSharesByNamespace( return share.NamespacedShares{}, nil } + blkCh, err := g.fetch.GetBlocks(ctx, cids) + if err != nil { + return nil, fmt.Errorf("fetching blocks:%w", err) + } + datas := make([]*Data, 0, len(cids)) - blkCh := g.bget.GetBlocks(ctx, cids) for blk := range blkCh { // NOTE: GetBlocks handles ctx, so we don't have to data, err := DataFromBlock(blk) if err != nil { diff --git a/share/shwap/getter_test.go b/share/shwap/getter_test.go index 445d9d2ef3..d993ef5bea 100644 --- a/share/shwap/getter_test.go +++ b/share/shwap/getter_test.go @@ -9,6 +9,9 @@ import ( "testing" "time" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/go-datastore" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -29,9 +32,9 @@ func TestGetter(t *testing.T) { square, root := edstest.RandEDSWithNamespace(t, ns, 16) hdr := &header.ExtendedHeader{DAH: root} - b := edsBlockstore(square) - bserv := NewBlockService(b, nil) - get := NewGetter(bserv) + bstore := edsBlockstore(square) + exch := offline.Exchange(bstore) + get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) t.Run("GetShares", func(t *testing.T) { idxs := rand.Perm(int(square.Width() ^ 2))[:30] @@ -75,14 +78,15 @@ func TestGetter(t *testing.T) { }) t.Run("NamespaceInsideOfRoot", func(t *testing.T) { + // this test requires a different setup so we generate a new EDS square := edstest.RandEDS(t, 8) root, err := share.NewRoot(square) require.NoError(t, err) hdr := &header.ExtendedHeader{DAH: root} - b := edsBlockstore(square) - bserv := NewBlockService(b, nil) - get := NewGetter(bserv) + bstore := edsBlockstore(square) + exch := offline.Exchange(bstore) + get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) maxNs := nmt.MaxNamespace(root.RowRoots[(len(root.RowRoots))/2-1], share.NamespaceSize) ns, err := addToNamespace(maxNs, -1) From bb034b3174bd030f9efb758cd3a4d61afd58c5f8 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Wed, 6 Dec 2023 03:09:10 +0100 Subject: [PATCH 030/132] add sessions --- share/shwap/getter.go | 22 +++++++++++++++++----- share/shwap/getter_test.go | 15 ++++++++++++--- 2 files changed, 29 insertions(+), 8 deletions(-) diff --git a/share/shwap/getter.go b/share/shwap/getter.go index b59eb27376..4c3e2ef741 100644 --- a/share/shwap/getter.go +++ b/share/shwap/getter.go @@ -18,11 +18,11 @@ import ( ) type Getter struct { - fetch exchange.Interface + fetch exchange.SessionExchange bstore blockstore.Blockstore } -func NewGetter(fetch exchange.Interface, bstore blockstore.Blockstore) *Getter { +func NewGetter(fetch exchange.SessionExchange, bstore blockstore.Blockstore) *Getter { return &Getter{fetch: fetch, bstore: bstore} } @@ -39,7 +39,11 @@ func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, shrI cids[i] = MustSampleCID(shrIdx, hdr.DAH, hdr.Height()) } - blkCh, err := g.fetch.GetBlocks(ctx, cids) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + ses := g.fetch.NewSession(ctx) + + blkCh, err := ses.GetBlocks(ctx, cids) if err != nil { return nil, fmt.Errorf("fetching blocks: %w", err) } @@ -102,7 +106,11 @@ func (g *Getter) GetEDS(ctx context.Context, hdr *header.ExtendedHeader) (*rsmt2 return nil, err } - blkCh, err := g.fetch.GetBlocks(ctx, cids) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + ses := g.fetch.NewSession(ctx) + + blkCh, err := ses.GetBlocks(ctx, cids) if err != nil { return nil, fmt.Errorf("fetching blocks: %w", err) } @@ -156,7 +164,11 @@ func (g *Getter) GetSharesByNamespace( return share.NamespacedShares{}, nil } - blkCh, err := g.fetch.GetBlocks(ctx, cids) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + ses := g.fetch.NewSession(ctx) + + blkCh, err := ses.GetBlocks(ctx, cids) if err != nil { return nil, fmt.Errorf("fetching blocks:%w", err) } diff --git a/share/shwap/getter_test.go b/share/shwap/getter_test.go index d993ef5bea..a51d6775e6 100644 --- a/share/shwap/getter_test.go +++ b/share/shwap/getter_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" "github.com/ipfs/boxo/exchange/offline" "github.com/ipfs/go-datastore" "github.com/stretchr/testify/assert" @@ -33,7 +34,7 @@ func TestGetter(t *testing.T) { hdr := &header.ExtendedHeader{DAH: root} bstore := edsBlockstore(square) - exch := offline.Exchange(bstore) + exch := dummySessionExchange{offline.Exchange(bstore)} get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) t.Run("GetShares", func(t *testing.T) { @@ -78,14 +79,14 @@ func TestGetter(t *testing.T) { }) t.Run("NamespaceInsideOfRoot", func(t *testing.T) { - // this test requires a different setup so we generate a new EDS + // this test requires a different setup, so we generate a new EDS square := edstest.RandEDS(t, 8) root, err := share.NewRoot(square) require.NoError(t, err) hdr := &header.ExtendedHeader{DAH: root} bstore := edsBlockstore(square) - exch := offline.Exchange(bstore) + exch := &dummySessionExchange{offline.Exchange(bstore)} get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) maxNs := nmt.MaxNamespace(root.RowRoots[(len(root.RowRoots))/2-1], share.NamespaceSize) @@ -148,3 +149,11 @@ func addToNamespace(namespace share.Namespace, val int) (share.Namespace, error) return result, nil } + +type dummySessionExchange struct { + exchange.Interface +} + +func (d dummySessionExchange) NewSession(context.Context) exchange.Fetcher { + return d +} From e6d39dc3c0c16f348839656522c9324a77829c05 Mon Sep 17 00:00:00 2001 From: Vlad <13818348+walldiss@users.noreply.github.com> Date: Wed, 6 Dec 2023 20:32:31 +0800 Subject: [PATCH 031/132] create store v2 file interface (#2989) This is the foundation of new ~era~ sampling protocol and storage. --- share/store/eds_file.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 share/store/eds_file.go diff --git a/share/store/eds_file.go b/share/store/eds_file.go new file mode 100644 index 0000000000..fb9b23c82f --- /dev/null +++ b/share/store/eds_file.go @@ -0,0 +1,25 @@ +package store + +import ( + "context" + "io" + + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" +) + +type EdsFile interface { + io.Closer + // Size returns square size of the file. + Size() int + // Share returns share and corresponding proof for the given axis and share index in this axis. + Share(ctx context.Context, axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) + // AxisHalf returns shares for the first half of the axis of the given type and index. + AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) + // Data returns data for the given namespace and row index. + Data(ctx context.Context, namespace share.Namespace, rowIdx int) ([]share.NamespacedRow, error) + // EDS returns extended data square stored in the file. + EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) +} From 82150f296b1d4c8d6301609f04557d2b29491f21 Mon Sep 17 00:00:00 2001 From: Vlad <13818348+walldiss@users.noreply.github.com> Date: Wed, 6 Dec 2023 23:30:32 +0800 Subject: [PATCH 032/132] feat(store/mem_file): add in-memory eds file implementation (#2992) Adds in-memory file implementation --- share/eds/edstest/testing.go | 6 ++- share/getter.go | 9 ++-- share/getters/shrex_test.go | 3 +- share/sharetest/testing.go | 13 ++++- share/store/eds_file.go | 2 +- share/store/mem_file.go | 101 +++++++++++++++++++++++++++++++++++ share/store/mem_file_test.go | 69 ++++++++++++++++++++++++ 7 files changed, 194 insertions(+), 9 deletions(-) create mode 100644 share/store/mem_file.go create mode 100644 share/store/mem_file_test.go diff --git a/share/eds/edstest/testing.go b/share/eds/edstest/testing.go index bf5e664f90..5c1c4aa7f1 100644 --- a/share/eds/edstest/testing.go +++ b/share/eds/edstest/testing.go @@ -34,12 +34,14 @@ func RandEDS(t require.TestingT, size int) *rsmt2d.ExtendedDataSquare { return eds } +// RandEDSWithNamespace generates EDS with given square size. Returned EDS will have namespacedAmount of +// shares with the given namespace. func RandEDSWithNamespace( t require.TestingT, namespace share.Namespace, - size int, + namespacedAmount, size int, ) (*rsmt2d.ExtendedDataSquare, *share.Root) { - shares := sharetest.RandSharesWithNamespace(t, namespace, size*size) + shares := sharetest.RandSharesWithNamespace(t, namespace, namespacedAmount, size*size) eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(size))) require.NoError(t, err, "failure to recompute the extended data square") dah, err := share.NewRoot(eds) diff --git a/share/getter.go b/share/getter.go index 3fcc93de33..c75c2f5b3f 100644 --- a/share/getter.go +++ b/share/getter.go @@ -72,16 +72,19 @@ func (ns NamespacedShares) Verify(root *Root, namespace Namespace) error { } for i, row := range ns { + if row.Proof == nil && row.Shares == nil { + return fmt.Errorf("row verification failed: no proofs and shares") + } // verify row data against row hash from original root - if !row.verify(originalRoots[i], namespace) { + if !row.Verify(originalRoots[i], namespace) { return fmt.Errorf("row verification failed: row %d doesn't match original root: %s", i, root.String()) } } return nil } -// verify validates the row using nmt inclusion proof. -func (row *NamespacedRow) verify(rowRoot []byte, namespace Namespace) bool { +// Verify validates the row using nmt inclusion proof. +func (row *NamespacedRow) Verify(rowRoot []byte, namespace Namespace) bool { // construct nmt leaves from shares by prepending namespace leaves := make([][]byte, 0, len(row.Shares)) for _, shr := range row.Shares { diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go index b625bb4c10..b9a23faae3 100644 --- a/share/getters/shrex_test.go +++ b/share/getters/shrex_test.go @@ -61,8 +61,9 @@ func TestShrexGetter(t *testing.T) { t.Cleanup(cancel) // generate test data + size := 64 namespace := sharetest.RandV0Namespace() - randEDS, dah := edstest.RandEDSWithNamespace(t, namespace, 64) + randEDS, dah := edstest.RandEDSWithNamespace(t, namespace, size*size, size) eh := headertest.RandExtendedHeaderWithRoot(t, dah) require.NoError(t, edsStore.Put(ctx, dah.Hash(), randEDS)) peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ diff --git a/share/sharetest/testing.go b/share/sharetest/testing.go index 3889260393..6564af9b06 100644 --- a/share/sharetest/testing.go +++ b/share/sharetest/testing.go @@ -38,17 +38,26 @@ func RandShares(t require.TestingT, total int) []share.Share { } // RandSharesWithNamespace is same the as RandShares, but sets same namespace for all shares. -func RandSharesWithNamespace(t require.TestingT, namespace share.Namespace, total int) []share.Share { +func RandSharesWithNamespace(t require.TestingT, namespace share.Namespace, namespacedAmount, total int) []share.Share { if total&(total-1) != 0 { t.Errorf("total must be power of 2: %d", total) t.FailNow() } + if namespacedAmount > total { + t.Errorf("withNamespace must be less than total: %d", total) + t.FailNow() + } + shares := make([]share.Share, total) rnd := rand.New(rand.NewSource(time.Now().Unix())) //nolint:gosec for i := range shares { shr := make([]byte, share.Size) - copy(share.GetNamespace(shr), namespace) + if i < namespacedAmount { + copy(share.GetNamespace(shr), namespace) + } else { + copy(share.GetNamespace(shr), RandV0Namespace()) + } _, err := rnd.Read(share.GetData(shr)) require.NoError(t, err) shares[i] = shr diff --git a/share/store/eds_file.go b/share/store/eds_file.go index fb9b23c82f..0bedd4a5ec 100644 --- a/share/store/eds_file.go +++ b/share/store/eds_file.go @@ -19,7 +19,7 @@ type EdsFile interface { // AxisHalf returns shares for the first half of the axis of the given type and index. AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) // Data returns data for the given namespace and row index. - Data(ctx context.Context, namespace share.Namespace, rowIdx int) ([]share.NamespacedRow, error) + Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) // EDS returns extended data square stored in the file. EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) } diff --git a/share/store/mem_file.go b/share/store/mem_file.go new file mode 100644 index 0000000000..f57f4d58a9 --- /dev/null +++ b/share/store/mem_file.go @@ -0,0 +1,101 @@ +package store + +import ( + "context" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +var _ EdsFile = (*MemFile)(nil) + +type MemFile struct { + Eds *rsmt2d.ExtendedDataSquare +} + +func (f *MemFile) Close() error { + return nil +} + +func (f *MemFile) Size() int { + return int(f.Eds.Width()) +} + +func (f *MemFile) Share( + _ context.Context, + axisType rsmt2d.Axis, + axisIdx, shrIdx int, +) (share.Share, nmt.Proof, error) { + shares := f.axis(axisType, axisIdx) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(axisIdx)) + for _, shr := range shares { + err := tree.Push(shr) + if err != nil { + return nil, nmt.Proof{}, err + } + } + + proof, err := tree.ProveRange(shrIdx, shrIdx+1) + if err != nil { + return nil, nmt.Proof{}, err + } + + return shares[shrIdx], proof, nil +} + +func (f *MemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + return f.axis(axisType, axisIdx)[:f.Size()/2], nil +} + +func (f *MemFile) Data(_ context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { + shares := f.axis(rsmt2d.Row, rowIdx) + bserv := ipld.NewMemBlockservice() + batchAdder := ipld.NewNmtNodeAdder(context.TODO(), bserv, ipld.MaxSizeBatchOption(len(shares))) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(shares)/2), uint(rowIdx), + nmt.NodeVisitor(batchAdder.Visit)) + for _, shr := range shares { + err := tree.Push(shr) + if err != nil { + return share.NamespacedRow{}, err + } + } + + root, err := tree.Root() + if err != nil { + return share.NamespacedRow{}, err + } + + err = batchAdder.Commit() + if err != nil { + return share.NamespacedRow{}, err + } + + cid := ipld.MustCidFromNamespacedSha256(root) + row, proof, err := ipld.GetSharesByNamespace(context.TODO(), bserv, cid, namespace, len(shares)) + if err != nil { + return share.NamespacedRow{}, err + } + return share.NamespacedRow{ + Shares: row, + Proof: proof, + }, nil +} + +func (f *MemFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { + return f.Eds, nil +} + +func (f *MemFile) axis(axisType rsmt2d.Axis, axisIdx int) []share.Share { + switch axisType { + case rsmt2d.Row: + return f.Eds.Row(uint(axisIdx)) + case rsmt2d.Col: + return f.Eds.Col(uint(axisIdx)) + default: + panic("unknown axis") + } +} diff --git a/share/store/mem_file_test.go b/share/store/mem_file_test.go new file mode 100644 index 0000000000..4d82db2446 --- /dev/null +++ b/share/store/mem_file_test.go @@ -0,0 +1,69 @@ +package store + +import ( + "context" + "crypto/sha256" + mrand "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestMemFileShare(t *testing.T) { + eds := edstest.RandEDS(t, 32) + root, err := share.NewRoot(eds) + require.NoError(t, err) + fl := &MemFile{Eds: eds} + + width := int(eds.Width()) + for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { + for i := 0; i < width*width; i++ { + axisIdx, shrIdx := i/width, i%width + if axisType == rsmt2d.Col { + axisIdx, shrIdx = shrIdx, axisIdx + } + + shr, prf, err := fl.Share(context.TODO(), axisType, axisIdx, shrIdx) + require.NoError(t, err) + + namespace := share.ParitySharesNamespace + if axisIdx < width/2 && shrIdx < width/2 { + namespace = share.GetNamespace(shr) + } + + axishash := root.RowRoots[axisIdx] + if axisType == rsmt2d.Col { + axishash = root.ColumnRoots[axisIdx] + } + + ok := prf.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, axishash) + require.True(t, ok) + } + } +} + +func TestMemFileDate(t *testing.T) { + size := 32 + + // generate EDS with random data and some shares with the same namespace + namespace := sharetest.RandV0Namespace() + amount := mrand.Intn(size*size-1) + 1 + eds, dah := edstest.RandEDSWithNamespace(t, namespace, amount, size) + + file := &MemFile{Eds: eds} + + for i, root := range dah.RowRoots { + if !namespace.IsOutsideRange(root, root) { + nd, err := file.Data(context.Background(), namespace, i) + require.NoError(t, err) + ok := nd.Verify(root, namespace) + require.True(t, ok) + } + } +} From 4feea07b6e57829ef767772ffd842eac24925322 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 7 Dec 2023 21:01:04 +0800 Subject: [PATCH 033/132] add ods eds files --- share/store/eds_file.go | 201 ++++++++++++++++++++++++-- share/store/eds_file_test.go | 103 ++++++++++++++ share/store/file.go | 25 ++++ share/store/file_header.go | 73 ++++++++++ share/store/file_test.go | 92 ++++++++++++ share/store/mem_file.go | 24 ++-- share/store/mem_file_test.go | 104 +++++++------- share/store/ods_file.go | 265 +++++++++++++++++++++++++++++++++++ share/store/ods_file_test.go | 162 +++++++++++++++++++++ 9 files changed, 979 insertions(+), 70 deletions(-) create mode 100644 share/store/eds_file_test.go create mode 100644 share/store/file.go create mode 100644 share/store/file_header.go create mode 100644 share/store/file_test.go create mode 100644 share/store/ods_file.go create mode 100644 share/store/ods_file_test.go diff --git a/share/store/eds_file.go b/share/store/eds_file.go index 0bedd4a5ec..d9483e5063 100644 --- a/share/store/eds_file.go +++ b/share/store/eds_file.go @@ -2,24 +2,199 @@ package store import ( "context" - "io" + "fmt" + "os" + "golang.org/x/exp/mmap" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" ) -type EdsFile interface { - io.Closer - // Size returns square size of the file. - Size() int - // Share returns share and corresponding proof for the given axis and share index in this axis. - Share(ctx context.Context, axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) - // AxisHalf returns shares for the first half of the axis of the given type and index. - AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) - // Data returns data for the given namespace and row index. - Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) - // EDS returns extended data square stored in the file. - EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) +var _ File = (*EdsFile)(nil) + +type EdsFile struct { + path string + hdr *Header + fl fileBackend +} + +// OpenEdsFile opens an existing file. File has to be closed after usage. +func OpenEdsFile(path string) (*EdsFile, error) { + f, err := mmap.Open(path) + if err != nil { + return nil, err + } + + h, err := ReadHeader(f) + if err != nil { + return nil, err + } + + // TODO(WWondertan): Validate header + return &EdsFile{ + path: path, + hdr: h, + fl: f, + }, nil +} + +func CreateEdsFile(path string, eds *rsmt2d.ExtendedDataSquare) (*EdsFile, error) { + f, err := os.Create(path) + if err != nil { + return nil, err + } + + h := &Header{ + shareSize: uint16(len(eds.GetCell(0, 0))), // TODO: rsmt2d should expose this field + squareSize: uint16(eds.Width()), + version: FileV0, + } + + if _, err = h.WriteTo(f); err != nil { + return nil, err + } + + for i := uint(0); i < eds.Width(); i++ { + for j := uint(0); j < eds.Width(); j++ { + // TODO: Implemented buffered write through io.CopyBuffer + shr := eds.GetCell(i, j) + if _, err := f.Write(shr); err != nil { + return nil, err + } + } + } + + return &EdsFile{ + path: path, + fl: f, + hdr: h, + }, f.Sync() +} + +func (f *EdsFile) Size() int { + return f.hdr.SquareSize() +} + +func (f *EdsFile) Close() error { + return f.fl.Close() +} + +func (f *EdsFile) Header() *Header { + return f.hdr +} + +func (f *EdsFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + axis, err := f.axis(axisType, axisIdx) + if err != nil { + return nil, err + } + return axis[:f.Size()/2], nil +} + +func (f *EdsFile) axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + switch axisType { + case rsmt2d.Col: + return f.readCol(axisIdx) + case rsmt2d.Row: + return f.readRow(axisIdx) + } + return nil, fmt.Errorf("unknown axis") +} + +func (f *EdsFile) readRow(idx int) ([]share.Share, error) { + shrLn := int(f.hdr.shareSize) + odsLn := int(f.hdr.squareSize) + + shrs := make([]share.Share, odsLn) + + pos := idx * odsLn + offset := pos*shrLn + HeaderSize + + axsData := make([]byte, odsLn*shrLn) + if _, err := f.fl.ReadAt(axsData, int64(offset)); err != nil { + return nil, err + } + + for i := range shrs { + shrs[i] = axsData[i*shrLn : (i+1)*shrLn] + } + return shrs, nil +} + +func (f *EdsFile) readCol(idx int) ([]share.Share, error) { + shrLn := int(f.hdr.shareSize) + odsLn := int(f.hdr.squareSize) + + shrs := make([]share.Share, odsLn) + + for i := 0; i < odsLn; i++ { + pos := idx + i*odsLn + offset := pos*shrLn + HeaderSize + + shr := make(share.Share, shrLn) + if _, err := f.fl.ReadAt(shr, int64(offset)); err != nil { + return nil, err + } + shrs[i] = shr + } + return shrs, nil +} + +func (f *EdsFile) Share( + _ context.Context, + axisType rsmt2d.Axis, + axisIdx, shrIdx int, +) (share.Share, nmt.Proof, error) { + shares, err := f.axis(axisType, axisIdx) + if err != nil { + return nil, nmt.Proof{}, err + } + + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(axisIdx)) + for _, shr := range shares { + err := tree.Push(shr) + if err != nil { + return nil, nmt.Proof{}, err + } + } + + proof, err := tree.ProveRange(shrIdx, shrIdx+1) + if err != nil { + return nil, nmt.Proof{}, err + } + + return shares[shrIdx], proof, nil +} + +func (f *EdsFile) Data(_ context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { + shares, err := f.axis(rsmt2d.Row, rowIdx) + if err != nil { + return share.NamespacedRow{}, err + } + return ndDateFromShares(shares, namespace, rowIdx) +} + +func (f *EdsFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { + shrLn := int(f.hdr.shareSize) + odsLn := int(f.hdr.squareSize) + + buf := make([]byte, odsLn*odsLn*shrLn) + if _, err := f.fl.ReadAt(buf, HeaderSize); err != nil { + return nil, err + } + + shrs := make([][]byte, odsLn*odsLn) + for i := 0; i < odsLn; i++ { + for j := 0; j < odsLn; j++ { + pos := i*odsLn + j + shrs[pos] = buf[pos*shrLn : (pos+1)*shrLn] + } + } + + treeFn := wrapper.NewConstructor(uint64(f.hdr.squareSize / 2)) + return rsmt2d.ImportExtendedDataSquare(shrs, share.DefaultRSMT2DCodec(), treeFn) } diff --git a/share/store/eds_file_test.go b/share/store/eds_file_test.go new file mode 100644 index 0000000000..659eab06b2 --- /dev/null +++ b/share/store/eds_file_test.go @@ -0,0 +1,103 @@ +package store + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestCreateEdsFile(t *testing.T) { + path := t.TempDir() + "/testfile" + edsIn := edstest.RandEDS(t, 8) + + _, err := CreateEdsFile(path, edsIn) + require.NoError(t, err) + + f, err := OpenEdsFile(path) + require.NoError(t, err) + edsOut, err := f.EDS(context.TODO()) + require.NoError(t, err) + assert.True(t, edsIn.Equals(edsOut)) +} + +func TestEdsFile(t *testing.T) { + size := 32 + createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) File { + path := t.TempDir() + "/testfile" + fl, err := CreateEdsFile(path, eds) + require.NoError(t, err) + return fl + } + + t.Run("Share", func(t *testing.T) { + testFileShare(t, createOdsFile, size) + }) + + t.Run("AxisHalf", func(t *testing.T) { + testFileAxisHalf(t, createOdsFile, size) + }) + + t.Run("Data", func(t *testing.T) { + testFileDate(t, createOdsFile, size) + }) + + t.Run("EDS", func(t *testing.T) { + testFileEds(t, createOdsFile, size) + }) +} + +// BenchmarkAxisFromEdsFile/Size:32/Axis:row/squareHalf:first(original)-10 288624 3758 ns/op +// BenchmarkAxisFromEdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 313893 3729 ns/op +// BenchmarkAxisFromEdsFile/Size:32/Axis:col/squareHalf:first(original)-10 29406 41051 ns/op +// BenchmarkAxisFromEdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 29145 41047 ns/op +// BenchmarkAxisFromEdsFile/Size:64/Axis:row/squareHalf:first(original)-10 186302 6532 ns/op +// BenchmarkAxisFromEdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 186172 6383 ns/op +// BenchmarkAxisFromEdsFile/Size:64/Axis:col/squareHalf:first(original)-10 14451 82114 ns/op +// BenchmarkAxisFromEdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 14572 82047 ns/op +// BenchmarkAxisFromEdsFile/Size:128/Axis:row/squareHalf:first(original)-10 94576 11349 ns/op +// BenchmarkAxisFromEdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 103954 11276 ns/op +// BenchmarkAxisFromEdsFile/Size:128/Axis:col/squareHalf:first(original)-10 7072 165301 ns/op +// BenchmarkAxisFromEdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 6805 165173 ns/op +func BenchmarkAxisFromEdsFile(b *testing.B) { + minSize, maxSize := 32, 128 + dir := b.TempDir() + newFile := func(size int) File { + eds := edstest.RandEDS(b, size) + path := dir + "/testfile" + f, err := CreateEdsFile(path, eds) + require.NoError(b, err) + return f + } + benchGetAxisFromFile(b, newFile, minSize, maxSize) +} + +// BenchmarkShareFromEdsFile/Size:32/Axis:row/squareHalf:first(original)-10 17850 66716 ns/op +// BenchmarkShareFromEdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 18517 64462 ns/op +// BenchmarkShareFromEdsFile/Size:32/Axis:col/squareHalf:first(original)-10 10000 104241 ns/op +// BenchmarkShareFromEdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 10000 101964 ns/op +// BenchmarkShareFromEdsFile/Size:64/Axis:row/squareHalf:first(original)-10 8641 129674 ns/op +// BenchmarkShareFromEdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 9022 124899 ns/op +// BenchmarkShareFromEdsFile/Size:64/Axis:col/squareHalf:first(original)-10 5625 204934 ns/op +// BenchmarkShareFromEdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 5785 200634 ns/op +// BenchmarkShareFromEdsFile/Size:128/Axis:row/squareHalf:first(original)-10 4424 262753 ns/op +// BenchmarkShareFromEdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 4690 252676 ns/op +// BenchmarkShareFromEdsFile/Size:128/Axis:col/squareHalf:first(original)-10 2834 415072 ns/op +// BenchmarkShareFromEdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 2934 426160 ns/op +func BenchmarkShareFromEdsFile(b *testing.B) { + minSize, maxSize := 32, 128 + dir := b.TempDir() + newFile := func(size int) File { + eds := edstest.RandEDS(b, size) + path := dir + "/testfile" + f, err := CreateEdsFile(path, eds) + require.NoError(b, err) + return f + } + benchGetShareFromFile(b, newFile, minSize, maxSize) +} diff --git a/share/store/file.go b/share/store/file.go new file mode 100644 index 0000000000..c3d5f4b6c5 --- /dev/null +++ b/share/store/file.go @@ -0,0 +1,25 @@ +package store + +import ( + "context" + "io" + + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" +) + +type File interface { + io.Closer + // Size returns square size of the file. + Size() int + // Share returns share and corresponding proof for the given axis and share index in this axis. + Share(ctx context.Context, axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) + // AxisHalf returns shares for the first half of the axis of the given type and index. + AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) + // Data returns data for the given namespace and row index. + Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) + // EDS returns extended data square stored in the file. + EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) +} diff --git a/share/store/file_header.go b/share/store/file_header.go new file mode 100644 index 0000000000..83c9d69190 --- /dev/null +++ b/share/store/file_header.go @@ -0,0 +1,73 @@ +package store + +import ( + "encoding/binary" + "io" +) + +const HeaderSize = 32 + +type Header struct { + version FileVersion + + // Taken directly from EDS + shareSize uint16 + squareSize uint16 +} + +type FileVersion uint8 + +const ( + FileV0 FileVersion = iota +) + +func (h *Header) Version() FileVersion { + return h.version +} + +func (h *Header) ShareSize() int { + return int(h.shareSize) +} + +func (h *Header) SquareSize() int { + return int(h.squareSize) +} + +func (h *Header) WriteTo(w io.Writer) (int64, error) { + buf := make([]byte, HeaderSize) + buf[0] = byte(h.version) + binary.LittleEndian.PutUint16(buf[1:3], h.shareSize) + binary.LittleEndian.PutUint16(buf[3:5], h.squareSize) + // TODO: Extensions + n, err := w.Write(buf) + return int64(n), err +} + +func (h *Header) ReadFrom(r io.Reader) (int64, error) { + buf := make([]byte, HeaderSize) + n, err := io.ReadFull(r, buf) + if err != nil { + return int64(n), err + } + + h.version = FileVersion(buf[0]) + h.shareSize = binary.LittleEndian.Uint16(buf[1:3]) + h.squareSize = binary.LittleEndian.Uint16(buf[3:5]) + + // TODO: Extensions + return int64(n), err +} + +func ReadHeader(r io.ReaderAt) (*Header, error) { + h := &Header{} + buf := make([]byte, HeaderSize) + _, err := r.ReadAt(buf, 0) + if err != nil { + return h, err + } + + h.version = FileVersion(buf[0]) + h.shareSize = binary.LittleEndian.Uint16(buf[1:3]) + h.squareSize = binary.LittleEndian.Uint16(buf[3:5]) + return h, nil +} diff --git a/share/store/file_test.go b/share/store/file_test.go new file mode 100644 index 0000000000..2d31496551 --- /dev/null +++ b/share/store/file_test.go @@ -0,0 +1,92 @@ +package store + +import ( + "context" + "crypto/sha256" + mrand "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +type createFile func(eds *rsmt2d.ExtendedDataSquare) File + +func testFileShare(t *testing.T, createFile createFile, size int) { + eds := edstest.RandEDS(t, size) + fl := createFile(eds) + + root, err := share.NewRoot(eds) + require.NoError(t, err) + + width := int(eds.Width()) + for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { + for i := 0; i < width*width; i++ { + axisIdx, shrIdx := i/width, i%width + if axisType == rsmt2d.Col { + axisIdx, shrIdx = shrIdx, axisIdx + } + + shr, prf, err := fl.Share(context.TODO(), axisType, axisIdx, shrIdx) + require.NoError(t, err) + + namespace := share.ParitySharesNamespace + if axisIdx < width/2 && shrIdx < width/2 { + namespace = share.GetNamespace(shr) + } + + axishash := root.RowRoots[axisIdx] + if axisType == rsmt2d.Col { + axishash = root.ColumnRoots[axisIdx] + } + + ok := prf.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, axishash) + require.True(t, ok) + } + } +} + +func testFileDate(t *testing.T, createFile createFile, size int) { + // generate EDS with random data and some shares with the same namespace + namespace := sharetest.RandV0Namespace() + amount := mrand.Intn(size*size-1) + 1 + eds, dah := edstest.RandEDSWithNamespace(t, namespace, amount, size) + + f := createFile(eds) + + for i, root := range dah.RowRoots { + if !namespace.IsOutsideRange(root, root) { + nd, err := f.Data(context.Background(), namespace, i) + require.NoError(t, err) + ok := nd.Verify(root, namespace) + require.True(t, ok) + } + } +} + +func testFileAxisHalf(t *testing.T, createFile createFile, size int) { + eds := edstest.RandEDS(t, size) + fl := createFile(eds) + + for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { + for i := 0; i < size; i++ { + half, err := fl.AxisHalf(context.Background(), axisType, i) + require.NoError(t, err) + require.Equal(t, getAxis(eds, axisType, i)[:size], half) + } + } +} + +func testFileEds(t *testing.T, createFile createFile, size int) { + eds := edstest.RandEDS(t, size) + fl := createFile(eds) + + eds2, err := fl.EDS(context.Background()) + require.NoError(t, err) + require.True(t, eds.Equals(eds2)) +} diff --git a/share/store/mem_file.go b/share/store/mem_file.go index f57f4d58a9..36c17f002c 100644 --- a/share/store/mem_file.go +++ b/share/store/mem_file.go @@ -11,7 +11,7 @@ import ( "github.com/celestiaorg/celestia-node/share/ipld" ) -var _ EdsFile = (*MemFile)(nil) +var _ File = (*MemFile)(nil) type MemFile struct { Eds *rsmt2d.ExtendedDataSquare @@ -53,6 +53,18 @@ func (f *MemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) func (f *MemFile) Data(_ context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { shares := f.axis(rsmt2d.Row, rowIdx) + return ndDateFromShares(shares, namespace, rowIdx) +} + +func (f *MemFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { + return f.Eds, nil +} + +func (f *MemFile) axis(axisType rsmt2d.Axis, axisIdx int) []share.Share { + return getAxis(f.Eds, axisType, axisIdx) +} + +func ndDateFromShares(shares []share.Share, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { bserv := ipld.NewMemBlockservice() batchAdder := ipld.NewNmtNodeAdder(context.TODO(), bserv, ipld.MaxSizeBatchOption(len(shares))) tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(shares)/2), uint(rowIdx), @@ -85,16 +97,12 @@ func (f *MemFile) Data(_ context.Context, namespace share.Namespace, rowIdx int) }, nil } -func (f *MemFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { - return f.Eds, nil -} - -func (f *MemFile) axis(axisType rsmt2d.Axis, axisIdx int) []share.Share { +func getAxis(eds *rsmt2d.ExtendedDataSquare, axisType rsmt2d.Axis, axisIdx int) []share.Share { switch axisType { case rsmt2d.Row: - return f.Eds.Row(uint(axisIdx)) + return eds.Row(uint(axisIdx)) case rsmt2d.Col: - return f.Eds.Col(uint(axisIdx)) + return eds.Col(uint(axisIdx)) default: panic("unknown axis") } diff --git a/share/store/mem_file_test.go b/share/store/mem_file_test.go index 4d82db2446..73c70483af 100644 --- a/share/store/mem_file_test.go +++ b/share/store/mem_file_test.go @@ -1,69 +1,75 @@ package store import ( - "context" - "crypto/sha256" - mrand "math/rand" "testing" - "github.com/stretchr/testify/require" - "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/sharetest" ) -func TestMemFileShare(t *testing.T) { - eds := edstest.RandEDS(t, 32) - root, err := share.NewRoot(eds) - require.NoError(t, err) - fl := &MemFile{Eds: eds} +func TestMemFile(t *testing.T) { + size := 32 + createMemFile := func(eds *rsmt2d.ExtendedDataSquare) File { + return &MemFile{Eds: eds} + } - width := int(eds.Width()) - for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { - for i := 0; i < width*width; i++ { - axisIdx, shrIdx := i/width, i%width - if axisType == rsmt2d.Col { - axisIdx, shrIdx = shrIdx, axisIdx - } + t.Run("Share", func(t *testing.T) { + testFileShare(t, createMemFile, size) + }) - shr, prf, err := fl.Share(context.TODO(), axisType, axisIdx, shrIdx) - require.NoError(t, err) + t.Run("AxisHalf", func(t *testing.T) { + testFileAxisHalf(t, createMemFile, size) + }) - namespace := share.ParitySharesNamespace - if axisIdx < width/2 && shrIdx < width/2 { - namespace = share.GetNamespace(shr) - } + t.Run("Data", func(t *testing.T) { + testFileDate(t, createMemFile, size) + }) - axishash := root.RowRoots[axisIdx] - if axisType == rsmt2d.Col { - axishash = root.ColumnRoots[axisIdx] - } + t.Run("EDS", func(t *testing.T) { + testFileEds(t, createMemFile, size) + }) +} - ok := prf.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, axishash) - require.True(t, ok) - } +// BenchmarkAxisFromMemFile/Size:32/Axis:row/squareHalf:first(original)-10 269438 4743 ns/op +// BenchmarkAxisFromMemFile/Size:32/Axis:row/squareHalf:second(extended)-10 258612 4540 ns/op +// BenchmarkAxisFromMemFile/Size:32/Axis:col/squareHalf:first(original)-10 245673 4312 ns/op +// BenchmarkAxisFromMemFile/Size:32/Axis:col/squareHalf:second(extended)-10 274141 4541 ns/op +// BenchmarkAxisFromMemFile/Size:64/Axis:row/squareHalf:first(original)-10 132518 9809 ns/op +// BenchmarkAxisFromMemFile/Size:64/Axis:row/squareHalf:second(extended)-10 132085 9833 ns/op +// BenchmarkAxisFromMemFile/Size:64/Axis:col/squareHalf:first(original)-10 112770 9613 ns/op +// BenchmarkAxisFromMemFile/Size:64/Axis:col/squareHalf:second(extended)-10 114934 9927 ns/op +// BenchmarkAxisFromMemFile/Size:128/Axis:row/squareHalf:first(original)-10 68439 19694 ns/op +// BenchmarkAxisFromMemFile/Size:128/Axis:row/squareHalf:second(extended)-10 64341 20275 ns/op +// BenchmarkAxisFromMemFile/Size:128/Axis:col/squareHalf:first(original)-10 66495 20180 ns/op +// BenchmarkAxisFromMemFile/Size:128/Axis:col/squareHalf:second(extended)-10 61392 20912 ns/op +func BenchmarkAxisFromMemFile(b *testing.B) { + minSize, maxSize := 32, 128 + newFile := func(size int) File { + eds := edstest.RandEDS(b, size) + return &MemFile{Eds: eds} } + benchGetAxisFromFile(b, newFile, minSize, maxSize) } -func TestMemFileDate(t *testing.T) { - size := 32 - - // generate EDS with random data and some shares with the same namespace - namespace := sharetest.RandV0Namespace() - amount := mrand.Intn(size*size-1) + 1 - eds, dah := edstest.RandEDSWithNamespace(t, namespace, amount, size) - - file := &MemFile{Eds: eds} - - for i, root := range dah.RowRoots { - if !namespace.IsOutsideRange(root, root) { - nd, err := file.Data(context.Background(), namespace, i) - require.NoError(t, err) - ok := nd.Verify(root, namespace) - require.True(t, ok) - } +// BenchmarkShareFromMemFile/Size:32/Axis:row/squareHalf:first(original)-10 17586 66750 ns/op +// BenchmarkShareFromMemFile/Size:32/Axis:row/squareHalf:second(extended)-10 18468 68188 ns/op +// BenchmarkShareFromMemFile/Size:32/Axis:col/squareHalf:first(original)-10 17899 66697 ns/op +// BenchmarkShareFromMemFile/Size:32/Axis:col/squareHalf:second(extended)-10 18092 65383 ns/op +// BenchmarkShareFromMemFile/Size:64/Axis:row/squareHalf:first(original)-10 8922 135033 ns/op +// BenchmarkShareFromMemFile/Size:64/Axis:row/squareHalf:second(extended)-10 9652 130358 ns/op +// BenchmarkShareFromMemFile/Size:64/Axis:col/squareHalf:first(original)-10 8041 130971 ns/op +// BenchmarkShareFromMemFile/Size:64/Axis:col/squareHalf:second(extended)-10 8778 127361 ns/op +// BenchmarkShareFromMemFile/Size:128/Axis:row/squareHalf:first(original)-10 4464 260158 ns/op +// BenchmarkShareFromMemFile/Size:128/Axis:row/squareHalf:second(extended)-10 4464 248248 ns/op +// BenchmarkShareFromMemFile/Size:128/Axis:col/squareHalf:first(original)-10 4486 257392 ns/op +// BenchmarkShareFromMemFile/Size:128/Axis:col/squareHalf:second(extended)-10 4335 248022 ns/op +func BenchmarkShareFromMemFile(b *testing.B) { + minSize, maxSize := 32, 128 + newFile := func(size int) File { + eds := edstest.RandEDS(b, size) + return &MemFile{Eds: eds} } + + benchGetShareFromFile(b, newFile, minSize, maxSize) } diff --git a/share/store/ods_file.go b/share/store/ods_file.go new file mode 100644 index 0000000000..f1793d7187 --- /dev/null +++ b/share/store/ods_file.go @@ -0,0 +1,265 @@ +package store + +import ( + "context" + "fmt" + "io" + "os" + + "golang.org/x/exp/mmap" + "golang.org/x/sync/errgroup" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" +) + +var _ File = (*OdsFile)(nil) + +type OdsFile struct { + path string + hdr *Header + fl fileBackend +} + +type fileBackend interface { + io.ReaderAt + io.Closer +} + +// OpenOdsFile opens an existing file. File has to be closed after usage. +func OpenOdsFile(path string) (*OdsFile, error) { + f, err := mmap.Open(path) + if err != nil { + return nil, err + } + + h, err := ReadHeader(f) + if err != nil { + return nil, err + } + + // TODO(WWondertan): Validate header + return &OdsFile{ + path: path, + hdr: h, + fl: f, + }, nil +} + +func CreateOdsFile(path string, eds *rsmt2d.ExtendedDataSquare) (*OdsFile, error) { + f, err := os.Create(path) + if err != nil { + return nil, err + } + + h := &Header{ + shareSize: uint16(len(eds.GetCell(0, 0))), // TODO: rsmt2d should expose this field + squareSize: uint16(eds.Width()), + version: FileV0, + } + + if _, err = h.WriteTo(f); err != nil { + return nil, err + } + + for i := uint(0); i < eds.Width()/2; i++ { + for j := uint(0); j < eds.Width()/2; j++ { + // TODO: Implemented buffered write through io.CopyBuffer + shr := eds.GetCell(i, j) + if _, err := f.Write(shr); err != nil { + return nil, err + } + } + } + + return &OdsFile{ + path: path, + fl: f, + hdr: h, + }, f.Sync() +} + +func (f *OdsFile) Size() int { + return f.hdr.SquareSize() +} + +func (f *OdsFile) Close() error { + return f.fl.Close() +} + +func (f *OdsFile) Header() *Header { + return f.hdr +} + +func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + // read axis from file if axis is in the first quadrant + if axisIdx < f.Size()/2 { + return f.odsAxisHalf(axisType, axisIdx) + } + + // compute axis if axis is outside the first quadrant + return computeAxisHalf(ctx, f, axisType, axisIdx) +} + +func (f *OdsFile) odsAxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + switch axisType { + case rsmt2d.Col: + return f.readCol(axisIdx) + case rsmt2d.Row: + return f.readRow(axisIdx) + } + return nil, fmt.Errorf("unknown axis") +} + +func (f *OdsFile) readRow(idx int) ([]share.Share, error) { + shrLn := int(f.hdr.shareSize) + odsLn := int(f.hdr.squareSize) / 2 + + shrs := make([]share.Share, odsLn) + + pos := idx * odsLn + offset := pos*shrLn + HeaderSize + + axsData := make([]byte, odsLn*shrLn) + if _, err := f.fl.ReadAt(axsData, int64(offset)); err != nil { + return nil, err + } + + for i := range shrs { + shrs[i] = axsData[i*shrLn : (i+1)*shrLn] + } + return shrs, nil +} + +func (f *OdsFile) readCol(idx int) ([]share.Share, error) { + shrLn := int(f.hdr.shareSize) + odsLn := int(f.hdr.squareSize) / 2 + + shrs := make([]share.Share, odsLn) + + for i := 0; i < odsLn; i++ { + pos := idx + i*odsLn + offset := pos*shrLn + HeaderSize + + shr := make(share.Share, shrLn) + if _, err := f.fl.ReadAt(shr, int64(offset)); err != nil { + return nil, err + } + shrs[i] = shr + } + return shrs, nil +} + +func computeAxisHalf( + ctx context.Context, + f *OdsFile, + axisType rsmt2d.Axis, + axisIdx int, +) ([]share.Share, error) { + shares := make([]share.Share, f.Size()/2) + + // extend opposite half of the square while collecting shares for the first half of required axis + g, ctx := errgroup.WithContext(ctx) + opposite := oppositeAxis(axisType) + for i := 0; i < f.Size()/2; i++ { + i := i + g.Go(func() error { + ax, err := f.axis(ctx, opposite, i) + if err != nil { + return err + } + shares[i] = ax[axisIdx] + return nil + }) + } + + err := g.Wait() + return shares, err +} + +func (f *OdsFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + original, err := f.AxisHalf(ctx, axisType, axisIdx) + if err != nil { + return nil, err + } + + return extendShares(original) +} + +func extendShares(original []share.Share) ([]share.Share, error) { + parity, err := rsmt2d.NewLeoRSCodec().Encode(original) + if err != nil { + return nil, err + } + + shares := make([]share.Share, 0, len(original)+len(parity)) + shares = append(shares, original...) + shares = append(shares, parity...) + + return shares, nil +} + +func (f *OdsFile) Share( + ctx context.Context, + axisType rsmt2d.Axis, + axisIdx, shrIdx int, +) (share.Share, nmt.Proof, error) { + shares, err := f.axis(ctx, axisType, axisIdx) + if err != nil { + return nil, nmt.Proof{}, err + } + + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(axisIdx)) + for _, shr := range shares { + err := tree.Push(shr) + if err != nil { + return nil, nmt.Proof{}, err + } + } + + proof, err := tree.ProveRange(shrIdx, shrIdx+1) + if err != nil { + return nil, nmt.Proof{}, err + } + + return shares[shrIdx], proof, nil +} + +func (f *OdsFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { + shares, err := f.axis(ctx, rsmt2d.Row, rowIdx) + if err != nil { + return share.NamespacedRow{}, err + } + return ndDateFromShares(shares, namespace, rowIdx) +} + +func (f *OdsFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { + shrLn := int(f.hdr.shareSize) + odsLn := int(f.hdr.squareSize) / 2 + + buf := make([]byte, odsLn*odsLn*shrLn) + if _, err := f.fl.ReadAt(buf, HeaderSize); err != nil { + return nil, err + } + + shrs := make([][]byte, odsLn*odsLn) + for i := 0; i < odsLn; i++ { + for j := 0; j < odsLn; j++ { + pos := i*odsLn + j + shrs[pos] = buf[pos*shrLn : (pos+1)*shrLn] + } + } + + treeFn := wrapper.NewConstructor(uint64(f.hdr.squareSize / 2)) + return rsmt2d.ComputeExtendedDataSquare(shrs, share.DefaultRSMT2DCodec(), treeFn) +} + +func oppositeAxis(axis rsmt2d.Axis) rsmt2d.Axis { + if axis == rsmt2d.Col { + return rsmt2d.Row + } + return rsmt2d.Col +} diff --git a/share/store/ods_file_test.go b/share/store/ods_file_test.go new file mode 100644 index 0000000000..2541c447af --- /dev/null +++ b/share/store/ods_file_test.go @@ -0,0 +1,162 @@ +package store + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestCreateOdsFile(t *testing.T) { + path := t.TempDir() + "/testfile" + edsIn := edstest.RandEDS(t, 8) + + _, err := CreateOdsFile(path, edsIn) + require.NoError(t, err) + + f, err := OpenOdsFile(path) + require.NoError(t, err) + edsOut, err := f.EDS(context.TODO()) + require.NoError(t, err) + assert.True(t, edsIn.Equals(edsOut)) +} + +func TestOdsFile(t *testing.T) { + size := 32 + createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) File { + path := t.TempDir() + "/testfile" + fl, err := CreateOdsFile(path, eds) + require.NoError(t, err) + return fl + } + + t.Run("Share", func(t *testing.T) { + testFileShare(t, createOdsFile, size) + }) + + t.Run("AxisHalf", func(t *testing.T) { + testFileAxisHalf(t, createOdsFile, size) + }) + + t.Run("Data", func(t *testing.T) { + testFileDate(t, createOdsFile, size) + }) + + t.Run("EDS", func(t *testing.T) { + testFileEds(t, createOdsFile, size) + }) +} + +// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 435496 2488 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 814 1279260 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 57886 21029 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 2365 493366 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 272930 3932 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 235 4881303 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 28566 41591 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 758 1605038 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 145546 7922 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 64 17827662 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 14073 84737 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 127 11064373 ns/op +func BenchmarkAxisFromOdsFile(b *testing.B) { + minSize, maxSize := 32, 128 + dir := b.TempDir() + newFile := func(size int) File { + eds := edstest.RandEDS(b, size) + path := dir + "/testfile" + f, err := CreateOdsFile(path, eds) + require.NoError(b, err) + return f + } + benchGetAxisFromFile(b, newFile, minSize, maxSize) +} + +// BenchmarkShareFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 10316 111701 ns/op +// BenchmarkShareFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 778 1352715 ns/op +// BenchmarkShareFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 8174 130810 ns/op +// BenchmarkShareFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 1890 646434 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 4935 214392 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 235 5023812 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 4323 252924 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 567 1870541 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 2424 452331 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 66 21867956 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 2100 542252 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 100 14112671 ns/op +func BenchmarkShareFromOdsFile(b *testing.B) { + minSize, maxSize := 32, 128 + dir := b.TempDir() + newFile := func(size int) File { + eds := edstest.RandEDS(b, size) + path := dir + "/testfile" + f, err := CreateOdsFile(path, eds) + require.NoError(b, err) + return f + } + + benchGetShareFromFile(b, newFile, minSize, maxSize) +} + +type squareHalf int + +func (q squareHalf) String() string { + switch q { + case 0: + return "first(original)" + case 1: + return "second(extended)" + } + return "unknown" +} + +func benchGetAxisFromFile(b *testing.B, newFile func(size int) File, minSize, maxSize int) { + for size := minSize; size <= maxSize; size *= 2 { + f := newFile(size) + + // loop over all possible axis types and quadrants + for _, axisType := range []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} { + for _, squareHalf := range []squareHalf{0, 1} { + name := fmt.Sprintf("Size:%v/Axis:%s/squareHalf:%s", size, axisType, squareHalf) + b.Run(name, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := f.AxisHalf(context.TODO(), axisType, f.Size()/2*int(squareHalf)) + require.NoError(b, err) + } + }) + } + } + } +} + +func benchGetShareFromFile(b *testing.B, newFile func(size int) File, minSize, maxSize int) { + for size := minSize; size <= maxSize; size *= 2 { + f := newFile(size) + + // loop over all possible axis types and quadrants + for _, axisType := range []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} { + for _, squareHalf := range []squareHalf{0, 1} { + name := fmt.Sprintf("Size:%v/Axis:%s/squareHalf:%s", size, axisType, squareHalf) + b.Run(name, func(b *testing.B) { + idx := f.Size() - 1 + // warm up cache + _, _, err := f.Share(context.TODO(), axisType, f.Size()/2*int(squareHalf), idx) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err = f.Share(context.TODO(), axisType, f.Size()/2*int(squareHalf), idx) + require.NoError(b, err) + } + }) + } + } + } +} From e34394b8b815d46bb715974922f8efb6ec5595ee Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 8 Dec 2023 17:26:15 +0800 Subject: [PATCH 034/132] single read of ods for recomputed axes --- share/store/ods_file.go | 66 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 61 insertions(+), 5 deletions(-) diff --git a/share/store/ods_file.go b/share/store/ods_file.go index f1793d7187..5e79b7678f 100644 --- a/share/store/ods_file.go +++ b/share/store/ods_file.go @@ -100,8 +100,9 @@ func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx in return f.odsAxisHalf(axisType, axisIdx) } - // compute axis if axis is outside the first quadrant - return computeAxisHalf(ctx, f, axisType, axisIdx) + // compute axis if it is outside the first quadrant + ods := f.readOds(oppositeAxis(axisType)) + return computeAxisHalf(ctx, ods, axisType, axisIdx) } func (f *OdsFile) odsAxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { @@ -114,6 +115,56 @@ func (f *OdsFile) odsAxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, return nil, fmt.Errorf("unknown axis") } +type odsInMemFile struct { + File + axisType rsmt2d.Axis + shares [][]share.Share +} + +func (f *odsInMemFile) Size() int { + return len(f.shares) * 2 +} + +func (f *odsInMemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + if axisType != f.axisType { + return nil, fmt.Errorf("order of shares is not preserved") + } + return f.shares[axisIdx], nil +} + +func (f *OdsFile) readOds(axisType rsmt2d.Axis) *odsInMemFile { + shrLn := int(f.hdr.shareSize) + odsLn := int(f.hdr.squareSize) / 2 + + buf := make([]byte, odsLn*odsLn*shrLn) + if _, err := f.fl.ReadAt(buf, HeaderSize); err != nil { + return nil + } + + shrs := make([][]share.Share, odsLn) + for i := 0; i < odsLn; i++ { + for j := 0; j < odsLn; j++ { + pos := i*odsLn + j + if axisType == rsmt2d.Row { + if shrs[i] == nil { + shrs[i] = make([]share.Share, odsLn) + } + shrs[i][j] = buf[pos*shrLn : (pos+1)*shrLn] + } else { + if shrs[j] == nil { + shrs[j] = make([]share.Share, odsLn) + } + shrs[j][i] = buf[pos*shrLn : (pos+1)*shrLn] + } + } + } + + return &odsInMemFile{ + axisType: axisType, + shares: shrs, + } +} + func (f *OdsFile) readRow(idx int) ([]share.Share, error) { shrLn := int(f.hdr.shareSize) odsLn := int(f.hdr.squareSize) / 2 @@ -155,7 +206,7 @@ func (f *OdsFile) readCol(idx int) ([]share.Share, error) { func computeAxisHalf( ctx context.Context, - f *OdsFile, + f File, axisType rsmt2d.Axis, axisIdx int, ) ([]share.Share, error) { @@ -167,11 +218,16 @@ func computeAxisHalf( for i := 0; i < f.Size()/2; i++ { i := i g.Go(func() error { - ax, err := f.axis(ctx, opposite, i) + original, err := f.AxisHalf(ctx, opposite, i) + if err != nil { + return err + } + + axis, err := extendShares(original) if err != nil { return err } - shares[i] = ax[axisIdx] + shares[i] = axis[axisIdx] return nil }) } From 698ae46236a31230d59dc798b79cf7d36cb6e0b3 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 8 Dec 2023 19:15:13 +0800 Subject: [PATCH 035/132] reduce parity allocations --- share/store/ods_file.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/share/store/ods_file.go b/share/store/ods_file.go index 5e79b7678f..def65f94e2 100644 --- a/share/store/ods_file.go +++ b/share/store/ods_file.go @@ -223,11 +223,11 @@ func computeAxisHalf( return err } - axis, err := extendShares(original) + parity, err := rsmt2d.NewLeoRSCodec().Encode(original) if err != nil { return err } - shares[i] = axis[axisIdx] + shares[i] = parity[axisIdx-f.Size()/2] return nil }) } From 90672ec6916f2ab074b0dc87f27933531bfeb93d Mon Sep 17 00:00:00 2001 From: Vlad Date: Tue, 12 Dec 2023 20:24:16 +0800 Subject: [PATCH 036/132] reuse allocated memory --- share/store/ods_file.go | 80 +++++++++++++++++++++++++----------- share/store/ods_file_test.go | 73 +++++++++++++++++++------------- 2 files changed, 100 insertions(+), 53 deletions(-) diff --git a/share/store/ods_file.go b/share/store/ods_file.go index def65f94e2..0c0d219427 100644 --- a/share/store/ods_file.go +++ b/share/store/ods_file.go @@ -5,8 +5,8 @@ import ( "fmt" "io" "os" + "sync" - "golang.org/x/exp/mmap" "golang.org/x/sync/errgroup" "github.com/celestiaorg/celestia-app/pkg/wrapper" @@ -21,7 +21,9 @@ var _ File = (*OdsFile)(nil) type OdsFile struct { path string hdr *Header - fl fileBackend + fl *os.File + + memPool memPool } type fileBackend interface { @@ -31,7 +33,7 @@ type fileBackend interface { // OpenOdsFile opens an existing file. File has to be closed after usage. func OpenOdsFile(path string) (*OdsFile, error) { - f, err := mmap.Open(path) + f, err := os.Open(path) if err != nil { return nil, err } @@ -49,7 +51,38 @@ func OpenOdsFile(path string) (*OdsFile, error) { }, nil } -func CreateOdsFile(path string, eds *rsmt2d.ExtendedDataSquare) (*OdsFile, error) { +type memPool struct { + codec rsmt2d.Codec + shares, ods *sync.Pool +} + +func newMemPool(codec rsmt2d.Codec, size int) memPool { + shares := &sync.Pool{ + New: func() interface{} { + shrs := make([][]share.Share, size) + for i := 0; i < size; i++ { + if shrs[i] == nil { + shrs[i] = make([]share.Share, size) + } + } + return shrs + }, + } + + ods := &sync.Pool{ + New: func() interface{} { + buf := make([]byte, size*share.Size) + return buf + }, + } + return memPool{ + shares: shares, + ods: ods, + codec: codec, + } +} + +func CreateOdsFile(path string, eds *rsmt2d.ExtendedDataSquare, memPool memPool) (*OdsFile, error) { f, err := os.Create(path) if err != nil { return nil, err @@ -76,9 +109,10 @@ func CreateOdsFile(path string, eds *rsmt2d.ExtendedDataSquare) (*OdsFile, error } return &OdsFile{ - path: path, - fl: f, - hdr: h, + path: path, + fl: f, + hdr: h, + memPool: memPool, }, f.Sync() } @@ -100,9 +134,10 @@ func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx in return f.odsAxisHalf(axisType, axisIdx) } - // compute axis if it is outside the first quadrant ods := f.readOds(oppositeAxis(axisType)) - return computeAxisHalf(ctx, ods, axisType, axisIdx) + defer f.memPool.shares.Put(ods.shares) + + return computeAxisHalf(ctx, ods, f.memPool.codec, axisType, axisIdx) } func (f *OdsFile) odsAxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { @@ -136,25 +171,21 @@ func (f *OdsFile) readOds(axisType rsmt2d.Axis) *odsInMemFile { shrLn := int(f.hdr.shareSize) odsLn := int(f.hdr.squareSize) / 2 - buf := make([]byte, odsLn*odsLn*shrLn) - if _, err := f.fl.ReadAt(buf, HeaderSize); err != nil { - return nil - } + buf := f.memPool.ods.Get().([]byte) + defer f.memPool.ods.Put(buf) - shrs := make([][]share.Share, odsLn) + shrs := f.memPool.shares.Get().([][]share.Share) for i := 0; i < odsLn; i++ { + pos := HeaderSize + odsLn*shrLn*i + if _, err := f.fl.ReadAt(buf, int64(pos)); err != nil { + return nil + } + for j := 0; j < odsLn; j++ { - pos := i*odsLn + j if axisType == rsmt2d.Row { - if shrs[i] == nil { - shrs[i] = make([]share.Share, odsLn) - } - shrs[i][j] = buf[pos*shrLn : (pos+1)*shrLn] + shrs[i][j] = buf[j*shrLn : (j+1)*shrLn] } else { - if shrs[j] == nil { - shrs[j] = make([]share.Share, odsLn) - } - shrs[j][i] = buf[pos*shrLn : (pos+1)*shrLn] + shrs[j][i] = buf[j*shrLn : (j+1)*shrLn] } } } @@ -207,6 +238,7 @@ func (f *OdsFile) readCol(idx int) ([]share.Share, error) { func computeAxisHalf( ctx context.Context, f File, + codec rsmt2d.Codec, axisType rsmt2d.Axis, axisIdx int, ) ([]share.Share, error) { @@ -223,7 +255,7 @@ func computeAxisHalf( return err } - parity, err := rsmt2d.NewLeoRSCodec().Encode(original) + parity, err := codec.Encode(original) if err != nil { return err } diff --git a/share/store/ods_file_test.go b/share/store/ods_file_test.go index 2541c447af..55c63f72e6 100644 --- a/share/store/ods_file_test.go +++ b/share/store/ods_file_test.go @@ -16,8 +16,9 @@ import ( func TestCreateOdsFile(t *testing.T) { path := t.TempDir() + "/testfile" edsIn := edstest.RandEDS(t, 8) - - _, err := CreateOdsFile(path, edsIn) + codec := rsmt2d.NewLeoRSCodec() + mem := newMemPool(codec, int(edsIn.Width())) + _, err := CreateOdsFile(path, edsIn, mem) require.NoError(t, err) f, err := OpenOdsFile(path) @@ -29,9 +30,11 @@ func TestCreateOdsFile(t *testing.T) { func TestOdsFile(t *testing.T) { size := 32 + codec := rsmt2d.NewLeoRSCodec() + mem := newMemPool(codec, size) createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) File { path := t.TempDir() + "/testfile" - fl, err := CreateOdsFile(path, eds) + fl, err := CreateOdsFile(path, eds, mem) require.NoError(t, err) return fl } @@ -53,50 +56,62 @@ func TestOdsFile(t *testing.T) { }) } -// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 435496 2488 ns/op -// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 814 1279260 ns/op -// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 57886 21029 ns/op -// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 2365 493366 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 272930 3932 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 235 4881303 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 28566 41591 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 758 1605038 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 145546 7922 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 64 17827662 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 14073 84737 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 127 11064373 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 429498 2464 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 5889 192904 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 56209 20926 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 5480 193249 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 287070 4003 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 2212 506601 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 28990 41353 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 2358 511020 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 186265 6309 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 610 1814819 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 14460 82613 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 640 1819996 ns/op func BenchmarkAxisFromOdsFile(b *testing.B) { minSize, maxSize := 32, 128 dir := b.TempDir() + codec := rsmt2d.NewLeoRSCodec() + mem := make(map[int]memPool) + for i := minSize; i <= maxSize; i *= 2 { + mem[i] = newMemPool(codec, i) + } + newFile := func(size int) File { eds := edstest.RandEDS(b, size) path := dir + "/testfile" - f, err := CreateOdsFile(path, eds) + f, err := CreateOdsFile(path, eds, mem[size]) require.NoError(b, err) return f } benchGetAxisFromFile(b, newFile, minSize, maxSize) } -// BenchmarkShareFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 10316 111701 ns/op -// BenchmarkShareFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 778 1352715 ns/op -// BenchmarkShareFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 8174 130810 ns/op -// BenchmarkShareFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 1890 646434 ns/op -// BenchmarkShareFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 4935 214392 ns/op -// BenchmarkShareFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 235 5023812 ns/op -// BenchmarkShareFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 4323 252924 ns/op -// BenchmarkShareFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 567 1870541 ns/op -// BenchmarkShareFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 2424 452331 ns/op -// BenchmarkShareFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 66 21867956 ns/op -// BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 2100 542252 ns/op -// BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 100 14112671 ns/op +// BenchmarkShareFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 10333 113351 ns/op +// BenchmarkShareFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 3794 319437 ns/op +// BenchmarkShareFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 7201 139066 ns/op +// BenchmarkShareFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 3612 317520 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 5462 220543 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 1586 775291 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 4611 257328 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 1534 788619 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 2413 448675 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 517 2427473 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 2200 528681 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 464 2385446 ns/op func BenchmarkShareFromOdsFile(b *testing.B) { minSize, maxSize := 32, 128 dir := b.TempDir() + codec := rsmt2d.NewLeoRSCodec() + mem := make(map[int]memPool) + for i := minSize; i <= maxSize; i *= 2 { + mem[i] = newMemPool(codec, i) + } + newFile := func(size int) File { eds := edstest.RandEDS(b, size) path := dir + "/testfile" - f, err := CreateOdsFile(path, eds) + f, err := CreateOdsFile(path, eds, mem[size]) require.NoError(b, err) return f } From 0a9daec05e863789b5d5375c2bdb3f4616fd772e Mon Sep 17 00:00:00 2001 From: Vlad Date: Tue, 12 Dec 2023 20:48:50 +0800 Subject: [PATCH 037/132] minor rafactoring --- share/store/ods_file.go | 97 ++++++++++++++++++++++-------------- share/store/ods_file_test.go | 22 +++----- 2 files changed, 66 insertions(+), 53 deletions(-) diff --git a/share/store/ods_file.go b/share/store/ods_file.go index 0c0d219427..b02dc7d742 100644 --- a/share/store/ods_file.go +++ b/share/store/ods_file.go @@ -51,38 +51,7 @@ func OpenOdsFile(path string) (*OdsFile, error) { }, nil } -type memPool struct { - codec rsmt2d.Codec - shares, ods *sync.Pool -} - -func newMemPool(codec rsmt2d.Codec, size int) memPool { - shares := &sync.Pool{ - New: func() interface{} { - shrs := make([][]share.Share, size) - for i := 0; i < size; i++ { - if shrs[i] == nil { - shrs[i] = make([]share.Share, size) - } - } - return shrs - }, - } - - ods := &sync.Pool{ - New: func() interface{} { - buf := make([]byte, size*share.Size) - return buf - }, - } - return memPool{ - shares: shares, - ods: ods, - codec: codec, - } -} - -func CreateOdsFile(path string, eds *rsmt2d.ExtendedDataSquare, memPool memPool) (*OdsFile, error) { +func CreateOdsFile(path string, eds *rsmt2d.ExtendedDataSquare, memPools memPools) (*OdsFile, error) { f, err := os.Create(path) if err != nil { return nil, err @@ -112,7 +81,7 @@ func CreateOdsFile(path string, eds *rsmt2d.ExtendedDataSquare, memPool memPool) path: path, fl: f, hdr: h, - memPool: memPool, + memPool: memPools.get(int(h.squareSize) / 2), }, f.Sync() } @@ -134,7 +103,10 @@ func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx in return f.odsAxisHalf(axisType, axisIdx) } - ods := f.readOds(oppositeAxis(axisType)) + ods, err := f.readOds(oppositeAxis(axisType)) + if err != nil { + return nil, err + } defer f.memPool.shares.Put(ods.shares) return computeAxisHalf(ctx, ods, f.memPool.codec, axisType, axisIdx) @@ -167,7 +139,7 @@ func (f *odsInMemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx return f.shares[axisIdx], nil } -func (f *OdsFile) readOds(axisType rsmt2d.Axis) *odsInMemFile { +func (f *OdsFile) readOds(axisType rsmt2d.Axis) (*odsInMemFile, error) { shrLn := int(f.hdr.shareSize) odsLn := int(f.hdr.squareSize) / 2 @@ -178,7 +150,7 @@ func (f *OdsFile) readOds(axisType rsmt2d.Axis) *odsInMemFile { for i := 0; i < odsLn; i++ { pos := HeaderSize + odsLn*shrLn*i if _, err := f.fl.ReadAt(buf, int64(pos)); err != nil { - return nil + return nil, err } for j := 0; j < odsLn; j++ { @@ -193,7 +165,7 @@ func (f *OdsFile) readOds(axisType rsmt2d.Axis) *odsInMemFile { return &odsInMemFile{ axisType: axisType, shares: shrs, - } + }, nil } func (f *OdsFile) readRow(idx int) ([]share.Share, error) { @@ -345,6 +317,57 @@ func (f *OdsFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { return rsmt2d.ComputeExtendedDataSquare(shrs, share.DefaultRSMT2DCodec(), treeFn) } +type memPools struct { + pools map[int]memPool + codec rsmt2d.Codec +} + +type memPool struct { + codec rsmt2d.Codec + shares, ods *sync.Pool +} + +func newMemPools(codec rsmt2d.Codec) memPools { + return memPools{ + pools: make(map[int]memPool), + codec: codec, + } +} +func (m memPools) get(size int) memPool { + if pool, ok := m.pools[size]; ok { + return pool + } + pool := newMemPool(m.codec, size) + m.pools[size] = pool + return pool +} + +func newMemPool(codec rsmt2d.Codec, size int) memPool { + shares := &sync.Pool{ + New: func() interface{} { + shrs := make([][]share.Share, size) + for i := 0; i < size; i++ { + if shrs[i] == nil { + shrs[i] = make([]share.Share, size) + } + } + return shrs + }, + } + + ods := &sync.Pool{ + New: func() interface{} { + buf := make([]byte, size*share.Size) + return buf + }, + } + return memPool{ + shares: shares, + ods: ods, + codec: codec, + } +} + func oppositeAxis(axis rsmt2d.Axis) rsmt2d.Axis { if axis == rsmt2d.Col { return rsmt2d.Row diff --git a/share/store/ods_file_test.go b/share/store/ods_file_test.go index 55c63f72e6..e4ee3be5d8 100644 --- a/share/store/ods_file_test.go +++ b/share/store/ods_file_test.go @@ -16,8 +16,7 @@ import ( func TestCreateOdsFile(t *testing.T) { path := t.TempDir() + "/testfile" edsIn := edstest.RandEDS(t, 8) - codec := rsmt2d.NewLeoRSCodec() - mem := newMemPool(codec, int(edsIn.Width())) + mem := newMemPools(rsmt2d.NewLeoRSCodec()) _, err := CreateOdsFile(path, edsIn, mem) require.NoError(t, err) @@ -30,8 +29,7 @@ func TestCreateOdsFile(t *testing.T) { func TestOdsFile(t *testing.T) { size := 32 - codec := rsmt2d.NewLeoRSCodec() - mem := newMemPool(codec, size) + mem := newMemPools(rsmt2d.NewLeoRSCodec()) createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) File { path := t.TempDir() + "/testfile" fl, err := CreateOdsFile(path, eds, mem) @@ -71,16 +69,12 @@ func TestOdsFile(t *testing.T) { func BenchmarkAxisFromOdsFile(b *testing.B) { minSize, maxSize := 32, 128 dir := b.TempDir() - codec := rsmt2d.NewLeoRSCodec() - mem := make(map[int]memPool) - for i := minSize; i <= maxSize; i *= 2 { - mem[i] = newMemPool(codec, i) - } + mem := newMemPools(rsmt2d.NewLeoRSCodec()) newFile := func(size int) File { eds := edstest.RandEDS(b, size) path := dir + "/testfile" - f, err := CreateOdsFile(path, eds, mem[size]) + f, err := CreateOdsFile(path, eds, mem) require.NoError(b, err) return f } @@ -102,16 +96,12 @@ func BenchmarkAxisFromOdsFile(b *testing.B) { func BenchmarkShareFromOdsFile(b *testing.B) { minSize, maxSize := 32, 128 dir := b.TempDir() - codec := rsmt2d.NewLeoRSCodec() - mem := make(map[int]memPool) - for i := minSize; i <= maxSize; i *= 2 { - mem[i] = newMemPool(codec, i) - } + mem := newMemPools(rsmt2d.NewLeoRSCodec()) newFile := func(size int) File { eds := edstest.RandEDS(b, size) path := dir + "/testfile" - f, err := CreateOdsFile(path, eds, mem[size]) + f, err := CreateOdsFile(path, eds, mem) require.NoError(b, err) return f } From fd824752854d72334fa7464f4507e9847106f453 Mon Sep 17 00:00:00 2001 From: Vlad Date: Sun, 17 Dec 2023 14:52:34 +0800 Subject: [PATCH 038/132] allow Store implementation to choose proof axis for Share --- share/share.go | 28 +++++++++++++++++++++++++++ share/store/eds_file.go | 15 +++++++++++++-- share/store/mem_file.go | 22 +++++++++++++++------ share/store/mem_file_test.go | 37 +++++++++++++----------------------- 4 files changed, 70 insertions(+), 32 deletions(-) diff --git a/share/share.go b/share/share.go index 4079028d82..f578817a48 100644 --- a/share/share.go +++ b/share/share.go @@ -2,10 +2,13 @@ package share import ( "bytes" + "crypto/sha256" "encoding/hex" "fmt" "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" ) var ( @@ -40,6 +43,31 @@ func GetData(s Share) []byte { return s[NamespaceSize:] } +// ShareWithProof contains data with corresponding Merkle Proof +type ShareWithProof struct { //nolint: revive + // Share is a full data including namespace + Share + // Proof is a Merkle Proof of current share + Proof *nmt.Proof + // Axis is a type of axis against which the share proof is computed + Axis rsmt2d.Axis +} + +// Validate validates inclusion of the share under the given root CID. +func (s *ShareWithProof) Validate(rootHash []byte, shrIdx, axisIdx, edsSize int) bool { + isParity := shrIdx >= edsSize/2 || axisIdx >= edsSize/2 + namespace := ParitySharesNamespace + if !isParity { + namespace = GetNamespace(s.Share) + } + return s.Proof.VerifyInclusion( + sha256.New(), // TODO(@Wondertan): This should be defined somewhere globally + namespace.ToNMT(), + [][]byte{s.Share}, + rootHash, + ) +} + // DataHash is a representation of the Root hash. type DataHash []byte diff --git a/share/store/eds_file.go b/share/store/eds_file.go index 0bedd4a5ec..344c4e6893 100644 --- a/share/store/eds_file.go +++ b/share/store/eds_file.go @@ -4,7 +4,6 @@ import ( "context" "io" - "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" @@ -15,7 +14,7 @@ type EdsFile interface { // Size returns square size of the file. Size() int // Share returns share and corresponding proof for the given axis and share index in this axis. - Share(ctx context.Context, axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) + Share(ctx context.Context, x, y int, proofType ProofType) (*share.ShareWithProof, error) // AxisHalf returns shares for the first half of the axis of the given type and index. AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) // Data returns data for the given namespace and row index. @@ -23,3 +22,15 @@ type EdsFile interface { // EDS returns extended data square stored in the file. EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) } + +// ProofType represents type of proof that should be computed for the share. +type ProofType int + +const ( + // ProofTypeAny indicates that any proof could be computed for the share. + ProofTypeAny ProofType = iota + // ProofTypeRow indicates that only row inclusion proof should be computed for the share. + ProofTypeRow + // ProofTypeColumn indicates that only column inclusion proof should be computed for the share. + ProofTypeColumn +) diff --git a/share/store/mem_file.go b/share/store/mem_file.go index f57f4d58a9..afd90a8fcd 100644 --- a/share/store/mem_file.go +++ b/share/store/mem_file.go @@ -27,24 +27,34 @@ func (f *MemFile) Size() int { func (f *MemFile) Share( _ context.Context, - axisType rsmt2d.Axis, - axisIdx, shrIdx int, -) (share.Share, nmt.Proof, error) { + x, y int, + proofType ProofType, +) (*share.ShareWithProof, error) { + axisType := rsmt2d.Row + axisIdx, shrIdx := y, x + if proofType == ProofTypeColumn { + axisType = rsmt2d.Col + axisIdx, shrIdx = x, y + } shares := f.axis(axisType, axisIdx) tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(axisIdx)) for _, shr := range shares { err := tree.Push(shr) if err != nil { - return nil, nmt.Proof{}, err + return nil, err } } proof, err := tree.ProveRange(shrIdx, shrIdx+1) if err != nil { - return nil, nmt.Proof{}, err + return nil, err } - return shares[shrIdx], proof, nil + return &share.ShareWithProof{ + Share: shares[shrIdx], + Proof: &proof, + Axis: axisType, + }, nil } func (f *MemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { diff --git a/share/store/mem_file_test.go b/share/store/mem_file_test.go index 4d82db2446..4db89541a1 100644 --- a/share/store/mem_file_test.go +++ b/share/store/mem_file_test.go @@ -2,14 +2,11 @@ package store import ( "context" - "crypto/sha256" mrand "math/rand" "testing" "github.com/stretchr/testify/require" - "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/sharetest" @@ -22,28 +19,20 @@ func TestMemFileShare(t *testing.T) { fl := &MemFile{Eds: eds} width := int(eds.Width()) - for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { - for i := 0; i < width*width; i++ { - axisIdx, shrIdx := i/width, i%width - if axisType == rsmt2d.Col { - axisIdx, shrIdx = shrIdx, axisIdx - } - - shr, prf, err := fl.Share(context.TODO(), axisType, axisIdx, shrIdx) - require.NoError(t, err) - - namespace := share.ParitySharesNamespace - if axisIdx < width/2 && shrIdx < width/2 { - namespace = share.GetNamespace(shr) + for _, proofType := range []ProofType{ProofTypeAny, ProofTypeRow, ProofTypeColumn} { + for x := 0; x < width; x++ { + for y := 0; y < width; y++ { + shr, err := fl.Share(context.TODO(), x, y, proofType) + require.NoError(t, err) + + axishash := root.RowRoots[y] + if proofType == ProofTypeColumn { + axishash = root.ColumnRoots[x] + } + + ok := shr.Validate(axishash, x, y, width) + require.True(t, ok) } - - axishash := root.RowRoots[axisIdx] - if axisType == rsmt2d.Col { - axishash = root.ColumnRoots[axisIdx] - } - - ok := prf.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, axishash) - require.True(t, ok) } } } From 17e98216acde793cde9ababa024aac09176423ee Mon Sep 17 00:00:00 2001 From: Vlad Date: Sun, 17 Dec 2023 14:56:39 +0800 Subject: [PATCH 039/132] move shareWithProof outside to share pkg --- api/docgen/examples.go | 2 +- share/eds/byzantine/bad_encoding.go | 70 ++++++++++-- share/eds/byzantine/bad_encoding_test.go | 2 +- share/eds/byzantine/byzantine.go | 16 ++- share/eds/byzantine/share_proof.go | 134 ----------------------- share/eds/byzantine/share_proof_test.go | 83 -------------- share/ipld/get.go | 74 ++++++++++++- share/ipld/get_test.go | 84 ++++++++++++++ 8 files changed, 225 insertions(+), 240 deletions(-) delete mode 100644 share/eds/byzantine/share_proof.go delete mode 100644 share/eds/byzantine/share_proof_test.go create mode 100644 share/ipld/get_test.go diff --git a/api/docgen/examples.go b/api/docgen/examples.go index b873e7e050..2b32a80186 100644 --- a/api/docgen/examples.go +++ b/api/docgen/examples.go @@ -62,7 +62,7 @@ var ExampleValues = map[reflect.Type]interface{}{ &byzantine.ErrByzantine{ Index: 0, Axis: rsmt2d.Axis(0), - Shares: []*byzantine.ShareWithProof{}, + Shares: []*share.ShareWithProof{}, }, ), reflect.TypeOf((*error)(nil)).Elem(): fmt.Errorf("error"), diff --git a/share/eds/byzantine/bad_encoding.go b/share/eds/byzantine/bad_encoding.go index fbb6b592ea..b9b6b999d7 100644 --- a/share/eds/byzantine/bad_encoding.go +++ b/share/eds/byzantine/bad_encoding.go @@ -5,16 +5,21 @@ import ( "errors" "fmt" + logging "github.com/ipfs/go-log/v2" + "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/nmt" + nmt_pb "github.com/celestiaorg/nmt/pb" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" pb "github.com/celestiaorg/celestia-node/share/eds/byzantine/pb" - "github.com/celestiaorg/celestia-node/share/ipld" ) +var log = logging.Logger("share/byzantine") + const ( version = "v0.1" @@ -27,7 +32,7 @@ type BadEncodingProof struct { // ShareWithProof contains all shares from row or col. // Shares that did not pass verification in rsmt2d will be nil. // For non-nil shares MerkleProofs are computed. - Shares []*ShareWithProof + Shares []*share.ShareWithProof // Index represents the row/col index where ErrByzantineRow/ErrByzantineColl occurred. Index uint32 // Axis represents the axis that verification failed on. @@ -70,7 +75,7 @@ func (p *BadEncodingProof) Height() uint64 { func (p *BadEncodingProof) MarshalBinary() ([]byte, error) { shares := make([]*pb.Share, 0, len(p.Shares)) for _, share := range p.Shares { - shares = append(shares, share.ShareWithProofToProto()) + shares = append(shares, ShareWithProofToProto(share)) } badEncodingFraudProof := pb.BadEncoding{ @@ -89,10 +94,11 @@ func (p *BadEncodingProof) UnmarshalBinary(data []byte) error { if err := in.Unmarshal(data); err != nil { return err } + axisType := rsmt2d.Axis(in.Axis) befp := &BadEncodingProof{ headerHash: in.HeaderHash, BlockHeight: in.Height, - Shares: ProtoToShare(in.Shares), + Shares: ProtoToShare(in.Shares, axisType), Index: in.Index, Axis: rsmt2d.Axis(in.Axis), } @@ -190,13 +196,11 @@ func (p *BadEncodingProof) Validate(hdr *header.ExtendedHeader) error { continue } // validate inclusion of the share into one of the DAHeader roots - if ok := shr.Validate(ipld.MustCidFromNamespacedSha256(merkleRoots[index])); !ok { + if ok := shr.Validate(merkleRoots[index], index, int(p.Index), int(odsWidth)*2); !ok { log.Debugf("%s: %s at index %d", invalidProofPrefix, errIncorrectShare, index) return errIncorrectShare } - // NMTree commits the additional namespace while rsmt2d does not know about, so we trim it - // this is ugliness from NMTWrapper that we have to embrace ¯\_(ツ)_/¯ - shares[index] = share.GetData(shr.Share) + shares[index] = shr.Share } codec := share.DefaultRSMT2DCodec() @@ -208,7 +212,7 @@ func (p *BadEncodingProof) Validate(hdr *header.ExtendedHeader) error { log.Debugw("failed to decode shares at height", "height", hdr.Height(), "err", err, ) - return nil + return fmt.Errorf("failed to decode shares: %w", err) } rebuiltExtendedShares, err := codec.Encode(rebuiltShares[0:odsWidth]) @@ -216,7 +220,7 @@ func (p *BadEncodingProof) Validate(hdr *header.ExtendedHeader) error { log.Debugw("failed to encode shares at height", "height", hdr.Height(), "err", err, ) - return nil + return fmt.Errorf("failed to encode shares: %w", err) } copy(rebuiltShares[odsWidth:], rebuiltExtendedShares) @@ -227,7 +231,7 @@ func (p *BadEncodingProof) Validate(hdr *header.ExtendedHeader) error { log.Debugw("failed to build a tree from the reconstructed shares at height", "height", hdr.Height(), "err", err, ) - return nil + return fmt.Errorf("failed to build a tree from the reconstructed shares: %w", err) } } @@ -236,7 +240,7 @@ func (p *BadEncodingProof) Validate(hdr *header.ExtendedHeader) error { log.Debugw("failed to build a tree root at height", "height", hdr.Height(), "err", err, ) - return nil + return fmt.Errorf("failed to build a tree root: %w", err) } // root is a merkle root of the row/col where ErrByzantine occurred @@ -252,3 +256,45 @@ func (p *BadEncodingProof) Validate(hdr *header.ExtendedHeader) error { } return nil } + +func ShareWithProofToProto(s *share.ShareWithProof) *pb.Share { + if s == nil { + return &pb.Share{} + } + + return &pb.Share{ + Data: s.Share, + Proof: &nmt_pb.Proof{ + Start: int64(s.Proof.Start()), + End: int64(s.Proof.End()), + Nodes: s.Proof.Nodes(), + LeafHash: s.Proof.LeafHash(), + IsMaxNamespaceIgnored: s.Proof.IsMaxNamespaceIDIgnored(), + }, + } +} + +func ProtoToShare(protoShares []*pb.Share, axisType rsmt2d.Axis) []*share.ShareWithProof { + shares := make([]*share.ShareWithProof, len(protoShares)) + for i, sh := range protoShares { + if sh.Proof == nil { + continue + } + proof := ProtoToProof(sh.Proof) + shares[i] = &share.ShareWithProof{ + Share: sh.Data, + Proof: &proof, + Axis: axisType, + } + } + return shares +} + +func ProtoToProof(protoProof *nmt_pb.Proof) nmt.Proof { + return nmt.NewInclusionProof( + int(protoProof.Start), + int(protoProof.End), + protoProof.Nodes, + protoProof.IsMaxNamespaceIgnored, + ) +} diff --git a/share/eds/byzantine/bad_encoding_test.go b/share/eds/byzantine/bad_encoding_test.go index e7032107ca..cb9d449a48 100644 --- a/share/eds/byzantine/bad_encoding_test.go +++ b/share/eds/byzantine/bad_encoding_test.go @@ -170,7 +170,7 @@ func TestIncorrectBadEncodingFraudProof(t *testing.T) { rowShares := eds.Row(row) rowRoot := dah.RowRoots[row] - shareProofs, err := GetProofsForShares(ctx, bServ, ipld.MustCidFromNamespacedSha256(rowRoot), rowShares) + shareProofs, err := ipld.GetSharesWithProofs(ctx, bServ, rowRoot, rowShares, rsmt2d.Row) require.NoError(t, err) // create a fake error for data that was encoded correctly diff --git a/share/eds/byzantine/byzantine.go b/share/eds/byzantine/byzantine.go index dfdf681f04..3987d8ef71 100644 --- a/share/eds/byzantine/byzantine.go +++ b/share/eds/byzantine/byzantine.go @@ -10,6 +10,7 @@ import ( "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/ipld" ) @@ -20,7 +21,7 @@ import ( // Merkle Proof for each share. type ErrByzantine struct { Index uint32 - Shares []*ShareWithProof + Shares []*share.ShareWithProof Axis rsmt2d.Axis } @@ -41,8 +42,12 @@ func NewErrByzantine( dah.ColumnRoots, dah.RowRoots, }[errByz.Axis] + axisType := rsmt2d.Row + if errByz.Axis == rsmt2d.Row { + axisType = rsmt2d.Col + } - sharesWithProof := make([]*ShareWithProof, len(errByz.Shares)) + sharesWithProof := make([]*share.ShareWithProof, len(errByz.Shares)) sharesAmount := 0 errGr, ctx := errgroup.WithContext(ctx) @@ -60,12 +65,13 @@ func NewErrByzantine( index := index errGr.Go(func() error { - share, err := getProofsAt( + sh, err := ipld.GetShareWithProof( ctx, bGetter, - ipld.MustCidFromNamespacedSha256(roots[index]), + roots[index], int(errByz.Index), len(errByz.Shares), + axisType, ) - sharesWithProof[index] = share + sharesWithProof[index] = sh return err }) } diff --git a/share/eds/byzantine/share_proof.go b/share/eds/byzantine/share_proof.go deleted file mode 100644 index 98b58ebbec..0000000000 --- a/share/eds/byzantine/share_proof.go +++ /dev/null @@ -1,134 +0,0 @@ -package byzantine - -import ( - "context" - "crypto/sha256" - - "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - - "github.com/celestiaorg/nmt" - nmt_pb "github.com/celestiaorg/nmt/pb" - - "github.com/celestiaorg/celestia-node/share" - pb "github.com/celestiaorg/celestia-node/share/eds/byzantine/pb" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -var log = logging.Logger("share/byzantine") - -// ShareWithProof contains data with corresponding Merkle Proof -type ShareWithProof struct { - // Share is a full data including namespace - share.Share - // Proof is a Merkle Proof of current share - Proof *nmt.Proof -} - -// NewShareWithProof takes the given leaf and its path, starting from the tree root, -// and computes the nmt.Proof for it. -func NewShareWithProof(index int, share share.Share, pathToLeaf []cid.Cid) *ShareWithProof { - rangeProofs := make([][]byte, 0, len(pathToLeaf)) - for i := len(pathToLeaf) - 1; i >= 0; i-- { - node := ipld.NamespacedSha256FromCID(pathToLeaf[i]) - rangeProofs = append(rangeProofs, node) - } - - proof := nmt.NewInclusionProof(index, index+1, rangeProofs, true) - return &ShareWithProof{ - share, - &proof, - } -} - -// Validate validates inclusion of the share under the given root CID. -func (s *ShareWithProof) Validate(root cid.Cid) bool { - return s.Proof.VerifyInclusion( - sha256.New(), // TODO(@Wondertan): This should be defined somewhere globally - share.GetNamespace(s.Share).ToNMT(), - [][]byte{share.GetData(s.Share)}, - ipld.NamespacedSha256FromCID(root), - ) -} - -func (s *ShareWithProof) ShareWithProofToProto() *pb.Share { - if s == nil { - return &pb.Share{} - } - - return &pb.Share{ - Data: s.Share, - Proof: &nmt_pb.Proof{ - Start: int64(s.Proof.Start()), - End: int64(s.Proof.End()), - Nodes: s.Proof.Nodes(), - LeafHash: s.Proof.LeafHash(), - IsMaxNamespaceIgnored: s.Proof.IsMaxNamespaceIDIgnored(), - }, - } -} - -// GetProofsForShares fetches Merkle proofs for the given shares -// and returns the result as an array of ShareWithProof. -func GetProofsForShares( - ctx context.Context, - bGetter blockservice.BlockGetter, - root cid.Cid, - shares [][]byte, -) ([]*ShareWithProof, error) { - proofs := make([]*ShareWithProof, len(shares)) - for index, share := range shares { - if share != nil { - proof, err := getProofsAt(ctx, bGetter, root, index, len(shares)) - if err != nil { - return nil, err - } - proofs[index] = proof - } - } - return proofs, nil -} - -func getProofsAt( - ctx context.Context, - bGetter blockservice.BlockGetter, - root cid.Cid, - index, - total int, -) (*ShareWithProof, error) { - proof := make([]cid.Cid, 0) - // TODO(@vgonkivs): Combine GetLeafData and GetProof in one function as the are traversing the same - // tree. Add options that will control what data will be fetched. - node, err := ipld.GetLeaf(ctx, bGetter, root, index, total) - if err != nil { - return nil, err - } - - proof, err = ipld.GetProof(ctx, bGetter, root, proof, index, total) - if err != nil { - return nil, err - } - return NewShareWithProof(index, node.RawData(), proof), nil -} - -func ProtoToShare(protoShares []*pb.Share) []*ShareWithProof { - shares := make([]*ShareWithProof, len(protoShares)) - for i, share := range protoShares { - if share.Proof == nil { - continue - } - proof := ProtoToProof(share.Proof) - shares[i] = &ShareWithProof{share.Data, &proof} - } - return shares -} - -func ProtoToProof(protoProof *nmt_pb.Proof) nmt.Proof { - return nmt.NewInclusionProof( - int(protoProof.Start), - int(protoProof.End), - protoProof.Nodes, - protoProof.IsMaxNamespaceIgnored, - ) -} diff --git a/share/eds/byzantine/share_proof_test.go b/share/eds/byzantine/share_proof_test.go deleted file mode 100644 index a9021d806d..0000000000 --- a/share/eds/byzantine/share_proof_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package byzantine - -import ( - "context" - "strconv" - "testing" - "time" - - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-app/pkg/da" - - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/sharetest" -) - -func TestGetProof(t *testing.T) { - const width = 4 - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) - defer cancel() - bServ := ipld.NewMemBlockservice() - - shares := sharetest.RandShares(t, width*width) - in, err := ipld.AddShares(ctx, shares, bServ) - require.NoError(t, err) - - dah, err := da.NewDataAvailabilityHeader(in) - require.NoError(t, err) - var tests = []struct { - roots [][]byte - }{ - {dah.RowRoots}, - {dah.ColumnRoots}, - } - - for i, tt := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - for _, root := range tt.roots { - rootCid := ipld.MustCidFromNamespacedSha256(root) - for index := 0; uint(index) < in.Width(); index++ { - proof := make([]cid.Cid, 0) - proof, err = ipld.GetProof(ctx, bServ, rootCid, proof, index, int(in.Width())) - require.NoError(t, err) - node, err := ipld.GetLeaf(ctx, bServ, rootCid, index, int(in.Width())) - require.NoError(t, err) - inclusion := NewShareWithProof(index, node.RawData(), proof) - require.True(t, inclusion.Validate(rootCid)) - } - } - }) - } -} - -func TestGetProofs(t *testing.T) { - const width = 4 - ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) - defer cancel() - bServ := ipld.NewMemBlockservice() - - shares := sharetest.RandShares(t, width*width) - in, err := ipld.AddShares(ctx, shares, bServ) - require.NoError(t, err) - - dah, err := da.NewDataAvailabilityHeader(in) - require.NoError(t, err) - for _, root := range dah.ColumnRoots { - rootCid := ipld.MustCidFromNamespacedSha256(root) - data := make([][]byte, 0, in.Width()) - for index := 0; uint(index) < in.Width(); index++ { - node, err := ipld.GetLeaf(ctx, bServ, rootCid, index, int(in.Width())) - require.NoError(t, err) - data = append(data, node.RawData()[9:]) - } - - proves, err := GetProofsForShares(ctx, bServ, rootCid, data) - require.NoError(t, err) - for _, proof := range proves { - require.True(t, proof.Validate(rootCid)) - } - } -} diff --git a/share/ipld/get.go b/share/ipld/get.go index adf2ffa8c5..09de5c3d23 100644 --- a/share/ipld/get.go +++ b/share/ipld/get.go @@ -11,6 +11,9 @@ import ( "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/share" ) @@ -157,9 +160,72 @@ func GetLeaves(ctx context.Context, wg.Wait() } -// GetProof fetches and returns the leaf's Merkle Proof. +// GetSharesWithProofs fetches Merkle proofs for the given shares +// and returns the result as an array of ShareWithProof. +func GetSharesWithProofs( + ctx context.Context, + bGetter blockservice.BlockGetter, + rootHash []byte, + shares [][]byte, + axisType rsmt2d.Axis, +) ([]*share.ShareWithProof, error) { + proofs := make([]*share.ShareWithProof, len(shares)) + for index, share := range shares { + if share != nil { + proof, err := GetShareWithProof(ctx, bGetter, rootHash, index, len(shares), axisType) + if err != nil { + return nil, err + } + proofs[index] = proof + } + } + return proofs, nil +} + +// GetShareWithProof fetches a Merkle proof for the given share +func GetShareWithProof( + ctx context.Context, + bGetter blockservice.BlockGetter, + rootHash []byte, + index, + total int, + axisType rsmt2d.Axis, +) (*share.ShareWithProof, error) { + rootCid := MustCidFromNamespacedSha256(rootHash) + proof := make([]cid.Cid, 0) + // TODO(@vgonkivs): Combine GetLeafData and getProofNodes in one function as the are traversing the same + // tree. Add options that will control what data will be fetched. + leaf, err := GetLeaf(ctx, bGetter, rootCid, index, total) + if err != nil { + return nil, err + } + + nodes, err := getProofNodes(ctx, bGetter, rootCid, proof, index, total) + if err != nil { + return nil, err + } + + return &share.ShareWithProof{ + Share: share.GetData(leaf.RawData()), + Proof: buildProof(nodes, index), + Axis: axisType, + }, nil +} + +func buildProof(proofNodes []cid.Cid, sharePos int) *nmt.Proof { + rangeProofs := make([][]byte, 0, len(proofNodes)) + for i := len(proofNodes) - 1; i >= 0; i-- { + node := NamespacedSha256FromCID(proofNodes[i]) + rangeProofs = append(rangeProofs, node) + } + + proof := nmt.NewInclusionProof(sharePos, sharePos+1, rangeProofs, true) + return &proof +} + +// getProofNodes fetches and returns the leaf's Merkle Proof. // It walks down the IPLD NMT tree until it reaches the leaf and returns collected proof -func GetProof( +func getProofNodes( ctx context.Context, bGetter blockservice.BlockGetter, root cid.Cid, @@ -186,7 +252,7 @@ func GetProof( proof = append(proof, lnks[1].Cid) } else { root, leaf = lnks[1].Cid, leaf-total // otherwise go down the second - proof, err = GetProof(ctx, bGetter, root, proof, leaf, total) + proof, err = getProofNodes(ctx, bGetter, root, proof, leaf, total) if err != nil { return nil, err } @@ -194,7 +260,7 @@ func GetProof( } // recursively walk down through selected children - return GetProof(ctx, bGetter, root, proof, leaf, total) + return getProofNodes(ctx, bGetter, root, proof, leaf, total) } // chanGroup implements an atomic wait group, closing a jobs chan diff --git a/share/ipld/get_test.go b/share/ipld/get_test.go new file mode 100644 index 0000000000..db9237d95f --- /dev/null +++ b/share/ipld/get_test.go @@ -0,0 +1,84 @@ +package ipld + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestGetProof(t *testing.T) { + const width = 4 + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + bServ := NewMemBlockservice() + + shares := sharetest.RandShares(t, width*width) + in, err := AddShares(ctx, shares, bServ) + require.NoError(t, err) + + dah, err := da.NewDataAvailabilityHeader(in) + require.NoError(t, err) + var tests = []struct { + roots [][]byte + axisType rsmt2d.Axis + }{ + { + roots: dah.RowRoots, + axisType: rsmt2d.Row, + }, + { + roots: dah.ColumnRoots, + axisType: rsmt2d.Col, + }, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + for axisIdx, root := range tt.roots { + for shrIdx := 0; uint(shrIdx) < in.Width(); shrIdx++ { + share, err := GetShareWithProof(ctx, bServ, root, shrIdx, int(in.Width()), tt.axisType) + require.NoError(t, err) + require.True(t, share.Validate(root, shrIdx, axisIdx, int(in.Width()))) + } + } + }) + } +} + +func TestGetSharesProofs(t *testing.T) { + const width = 4 + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + bServ := NewMemBlockservice() + + shares := sharetest.RandShares(t, width*width) + in, err := AddShares(ctx, shares, bServ) + require.NoError(t, err) + + dah, err := da.NewDataAvailabilityHeader(in) + require.NoError(t, err) + for axisIdx, root := range dah.ColumnRoots { + rootCid := MustCidFromNamespacedSha256(root) + data := make([][]byte, 0, in.Width()) + for index := 0; uint(index) < in.Width(); index++ { + node, err := GetLeaf(ctx, bServ, rootCid, index, int(in.Width())) + require.NoError(t, err) + data = append(data, node.RawData()[9:]) + } + + proves, err := GetSharesWithProofs(ctx, bServ, root, data, rsmt2d.Col) + require.NoError(t, err) + for i, proof := range proves { + require.True(t, proof.Validate(root, i, axisIdx, int(in.Width()))) + } + } +} From 8b29af3f32c84e6ff02b45811104c070344e0121 Mon Sep 17 00:00:00 2001 From: Vlad Date: Sun, 17 Dec 2023 14:52:34 +0800 Subject: [PATCH 040/132] allow Store implementation to choose proof axis for Share --- share/share.go | 28 +++++++++++++++++++++++++++ share/store/eds_file.go | 15 +++++++++++++-- share/store/mem_file.go | 21 ++++++++++++++------ share/store/mem_file_test.go | 37 +++++++++++++----------------------- 4 files changed, 69 insertions(+), 32 deletions(-) diff --git a/share/share.go b/share/share.go index 4079028d82..f578817a48 100644 --- a/share/share.go +++ b/share/share.go @@ -2,10 +2,13 @@ package share import ( "bytes" + "crypto/sha256" "encoding/hex" "fmt" "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" ) var ( @@ -40,6 +43,31 @@ func GetData(s Share) []byte { return s[NamespaceSize:] } +// ShareWithProof contains data with corresponding Merkle Proof +type ShareWithProof struct { //nolint: revive + // Share is a full data including namespace + Share + // Proof is a Merkle Proof of current share + Proof *nmt.Proof + // Axis is a type of axis against which the share proof is computed + Axis rsmt2d.Axis +} + +// Validate validates inclusion of the share under the given root CID. +func (s *ShareWithProof) Validate(rootHash []byte, shrIdx, axisIdx, edsSize int) bool { + isParity := shrIdx >= edsSize/2 || axisIdx >= edsSize/2 + namespace := ParitySharesNamespace + if !isParity { + namespace = GetNamespace(s.Share) + } + return s.Proof.VerifyInclusion( + sha256.New(), // TODO(@Wondertan): This should be defined somewhere globally + namespace.ToNMT(), + [][]byte{s.Share}, + rootHash, + ) +} + // DataHash is a representation of the Root hash. type DataHash []byte diff --git a/share/store/eds_file.go b/share/store/eds_file.go index 0bedd4a5ec..03420eece1 100644 --- a/share/store/eds_file.go +++ b/share/store/eds_file.go @@ -4,7 +4,6 @@ import ( "context" "io" - "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" @@ -15,7 +14,7 @@ type EdsFile interface { // Size returns square size of the file. Size() int // Share returns share and corresponding proof for the given axis and share index in this axis. - Share(ctx context.Context, axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) + Share(ctx context.Context, x, y int, proofType ProofAxis) (*share.ShareWithProof, error) // AxisHalf returns shares for the first half of the axis of the given type and index. AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) // Data returns data for the given namespace and row index. @@ -23,3 +22,15 @@ type EdsFile interface { // EDS returns extended data square stored in the file. EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) } + +// ProofAxis represents axis for which proof should be computed. +type ProofAxis int + +const ( + // ProofAxisAny indicates that any proof could be computed for the share. + ProofAxisAny ProofAxis = iota + // ProofAxisRow indicates that only row inclusion proof should be computed for the share. + ProofAxisRow + // ProofAxisCol indicates that only column inclusion proof should be computed for the share. + ProofAxisCol +) diff --git a/share/store/mem_file.go b/share/store/mem_file.go index f57f4d58a9..1f0a267119 100644 --- a/share/store/mem_file.go +++ b/share/store/mem_file.go @@ -27,24 +27,33 @@ func (f *MemFile) Size() int { func (f *MemFile) Share( _ context.Context, - axisType rsmt2d.Axis, - axisIdx, shrIdx int, -) (share.Share, nmt.Proof, error) { + x, y int, + proofType ProofAxis, +) (*share.ShareWithProof, error) { + axisType, axisIdx, shrIdx := rsmt2d.Row, y, x + if proofType == ProofAxisCol { + axisType, axisIdx, shrIdx = rsmt2d.Col, x, y + } + shares := f.axis(axisType, axisIdx) tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(axisIdx)) for _, shr := range shares { err := tree.Push(shr) if err != nil { - return nil, nmt.Proof{}, err + return nil, err } } proof, err := tree.ProveRange(shrIdx, shrIdx+1) if err != nil { - return nil, nmt.Proof{}, err + return nil, err } - return shares[shrIdx], proof, nil + return &share.ShareWithProof{ + Share: shares[shrIdx], + Proof: &proof, + Axis: axisType, + }, nil } func (f *MemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { diff --git a/share/store/mem_file_test.go b/share/store/mem_file_test.go index 4d82db2446..066dc9c1b1 100644 --- a/share/store/mem_file_test.go +++ b/share/store/mem_file_test.go @@ -2,14 +2,11 @@ package store import ( "context" - "crypto/sha256" mrand "math/rand" "testing" "github.com/stretchr/testify/require" - "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/sharetest" @@ -22,28 +19,20 @@ func TestMemFileShare(t *testing.T) { fl := &MemFile{Eds: eds} width := int(eds.Width()) - for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { - for i := 0; i < width*width; i++ { - axisIdx, shrIdx := i/width, i%width - if axisType == rsmt2d.Col { - axisIdx, shrIdx = shrIdx, axisIdx - } - - shr, prf, err := fl.Share(context.TODO(), axisType, axisIdx, shrIdx) - require.NoError(t, err) - - namespace := share.ParitySharesNamespace - if axisIdx < width/2 && shrIdx < width/2 { - namespace = share.GetNamespace(shr) + for _, proofType := range []ProofAxis{ProofAxisAny, ProofAxisRow, ProofAxisCol} { + for x := 0; x < width; x++ { + for y := 0; y < width; y++ { + shr, err := fl.Share(context.TODO(), x, y, proofType) + require.NoError(t, err) + + axishash := root.RowRoots[y] + if proofType == ProofAxisCol { + axishash = root.ColumnRoots[x] + } + + ok := shr.Validate(axishash, x, y, width) + require.True(t, ok) } - - axishash := root.RowRoots[axisIdx] - if axisType == rsmt2d.Col { - axishash = root.ColumnRoots[axisIdx] - } - - ok := prf.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, axishash) - require.True(t, ok) } } } From eae53591403e3dfa77b84f1346bb3e2d41f5ecc9 Mon Sep 17 00:00:00 2001 From: Vlad Date: Tue, 19 Dec 2023 02:10:29 +0800 Subject: [PATCH 041/132] remove option to select proof axis from store interface --- share/store/eds_file.go | 14 +------------- share/store/mem_file.go | 7 ++----- share/store/mem_file_test.go | 22 ++++++++-------------- 3 files changed, 11 insertions(+), 32 deletions(-) diff --git a/share/store/eds_file.go b/share/store/eds_file.go index 03420eece1..5d1eefb758 100644 --- a/share/store/eds_file.go +++ b/share/store/eds_file.go @@ -14,7 +14,7 @@ type EdsFile interface { // Size returns square size of the file. Size() int // Share returns share and corresponding proof for the given axis and share index in this axis. - Share(ctx context.Context, x, y int, proofType ProofAxis) (*share.ShareWithProof, error) + Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) // AxisHalf returns shares for the first half of the axis of the given type and index. AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) // Data returns data for the given namespace and row index. @@ -22,15 +22,3 @@ type EdsFile interface { // EDS returns extended data square stored in the file. EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) } - -// ProofAxis represents axis for which proof should be computed. -type ProofAxis int - -const ( - // ProofAxisAny indicates that any proof could be computed for the share. - ProofAxisAny ProofAxis = iota - // ProofAxisRow indicates that only row inclusion proof should be computed for the share. - ProofAxisRow - // ProofAxisCol indicates that only column inclusion proof should be computed for the share. - ProofAxisCol -) diff --git a/share/store/mem_file.go b/share/store/mem_file.go index 1f0a267119..24f9cfd110 100644 --- a/share/store/mem_file.go +++ b/share/store/mem_file.go @@ -28,12 +28,9 @@ func (f *MemFile) Size() int { func (f *MemFile) Share( _ context.Context, x, y int, - proofType ProofAxis, ) (*share.ShareWithProof, error) { - axisType, axisIdx, shrIdx := rsmt2d.Row, y, x - if proofType == ProofAxisCol { - axisType, axisIdx, shrIdx = rsmt2d.Col, x, y - } + axisType := rsmt2d.Row + axisIdx, shrIdx := y, x shares := f.axis(axisType, axisIdx) tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(axisIdx)) diff --git a/share/store/mem_file_test.go b/share/store/mem_file_test.go index 066dc9c1b1..9df1616afd 100644 --- a/share/store/mem_file_test.go +++ b/share/store/mem_file_test.go @@ -19,20 +19,14 @@ func TestMemFileShare(t *testing.T) { fl := &MemFile{Eds: eds} width := int(eds.Width()) - for _, proofType := range []ProofAxis{ProofAxisAny, ProofAxisRow, ProofAxisCol} { - for x := 0; x < width; x++ { - for y := 0; y < width; y++ { - shr, err := fl.Share(context.TODO(), x, y, proofType) - require.NoError(t, err) - - axishash := root.RowRoots[y] - if proofType == ProofAxisCol { - axishash = root.ColumnRoots[x] - } - - ok := shr.Validate(axishash, x, y, width) - require.True(t, ok) - } + for x := 0; x < width; x++ { + for y := 0; y < width; y++ { + shr, err := fl.Share(context.TODO(), x, y) + require.NoError(t, err) + + axishash := root.RowRoots[y] + ok := shr.Validate(axishash, x, y, width) + require.True(t, ok) } } } From daf4bcc86dac49291e96bea0daf6a2c5b43e72f1 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 21 Dec 2023 15:49:31 +0800 Subject: [PATCH 042/132] fix shadow subslicing by data copy --- share/store/ods_file.go | 28 ++++++++-------- share/store/ods_file_test.go | 65 +++++++++++++++++++++++------------- 2 files changed, 55 insertions(+), 38 deletions(-) diff --git a/share/store/ods_file.go b/share/store/ods_file.go index b02dc7d742..1e458079dc 100644 --- a/share/store/ods_file.go +++ b/share/store/ods_file.go @@ -136,6 +136,9 @@ func (f *odsInMemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx if axisType != f.axisType { return nil, fmt.Errorf("order of shares is not preserved") } + if axisIdx >= f.Size()/2 { + return nil, fmt.Errorf("index is out of ods bounds") + } return f.shares[axisIdx], nil } @@ -155,9 +158,9 @@ func (f *OdsFile) readOds(axisType rsmt2d.Axis) (*odsInMemFile, error) { for j := 0; j < odsLn; j++ { if axisType == rsmt2d.Row { - shrs[i][j] = buf[j*shrLn : (j+1)*shrLn] + copy(shrs[i][j], buf[j*shrLn:(j+1)*shrLn]) } else { - shrs[j][i] = buf[j*shrLn : (j+1)*shrLn] + copy(shrs[j][i], buf[j*shrLn:(j+1)*shrLn]) } } } @@ -297,20 +300,14 @@ func (f *OdsFile) Data(ctx context.Context, namespace share.Namespace, rowIdx in } func (f *OdsFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { - shrLn := int(f.hdr.shareSize) - odsLn := int(f.hdr.squareSize) / 2 - - buf := make([]byte, odsLn*odsLn*shrLn) - if _, err := f.fl.ReadAt(buf, HeaderSize); err != nil { + ods, err := f.readOds(rsmt2d.Row) + if err != nil { return nil, err } - shrs := make([][]byte, odsLn*odsLn) - for i := 0; i < odsLn; i++ { - for j := 0; j < odsLn; j++ { - pos := i*odsLn + j - shrs[pos] = buf[pos*shrLn : (pos+1)*shrLn] - } + shrs := make([]share.Share, 0, len(ods.shares)*len(ods.shares)) + for _, row := range ods.shares { + shrs = append(shrs, row...) } treeFn := wrapper.NewConstructor(uint64(f.hdr.squareSize / 2)) @@ -346,9 +343,12 @@ func newMemPool(codec rsmt2d.Codec, size int) memPool { shares := &sync.Pool{ New: func() interface{} { shrs := make([][]share.Share, size) - for i := 0; i < size; i++ { + for i := range shrs { if shrs[i] == nil { shrs[i] = make([]share.Share, size) + for j := range shrs[i] { + shrs[i][j] = make(share.Share, share.Size) + } } } return shrs diff --git a/share/store/ods_file_test.go b/share/store/ods_file_test.go index e4ee3be5d8..802c09c173 100644 --- a/share/store/ods_file_test.go +++ b/share/store/ods_file_test.go @@ -54,18 +54,35 @@ func TestOdsFile(t *testing.T) { }) } -// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 429498 2464 ns/op -// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 5889 192904 ns/op -// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 56209 20926 ns/op -// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 5480 193249 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 287070 4003 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 2212 506601 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 28990 41353 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 2358 511020 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 186265 6309 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 610 1814819 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 14460 82613 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 640 1819996 ns/op +func TestReadOdsFile(t *testing.T) { + eds := edstest.RandEDS(t, 8) + mem := newMemPools(rsmt2d.NewLeoRSCodec()) + path := t.TempDir() + "/testfile" + f, err := CreateOdsFile(path, eds, mem) + require.NoError(t, err) + + ods, err := f.readOds(rsmt2d.Row) + require.NoError(t, err) + for i, row := range ods.shares { + original, err := f.readRow(i) + require.NoError(t, err) + require.True(t, len(original) == len(row)) + require.Equal(t, original, row) + } +} + +// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 418206 2545 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 4968 227265 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 57007 20707 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 5016 214184 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 308559 3786 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 1624 713999 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 28724 41421 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 1686 629314 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 183322 6360 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 428 2616150 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 14338 83598 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 488 2213146 ns/op func BenchmarkAxisFromOdsFile(b *testing.B) { minSize, maxSize := 32, 128 dir := b.TempDir() @@ -81,18 +98,18 @@ func BenchmarkAxisFromOdsFile(b *testing.B) { benchGetAxisFromFile(b, newFile, minSize, maxSize) } -// BenchmarkShareFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 10333 113351 ns/op -// BenchmarkShareFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 3794 319437 ns/op -// BenchmarkShareFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 7201 139066 ns/op -// BenchmarkShareFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 3612 317520 ns/op -// BenchmarkShareFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 5462 220543 ns/op -// BenchmarkShareFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 1586 775291 ns/op -// BenchmarkShareFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 4611 257328 ns/op -// BenchmarkShareFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 1534 788619 ns/op -// BenchmarkShareFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 2413 448675 ns/op -// BenchmarkShareFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 517 2427473 ns/op -// BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 2200 528681 ns/op -// BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 464 2385446 ns/op +// BenchmarkShareFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 10339 111328 ns/op +// BenchmarkShareFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 3392 359180 ns/op +// BenchmarkShareFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 8925 131352 ns/op +// BenchmarkShareFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 3447 346218 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 5503 215833 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 1231 1001053 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 4711 250001 ns/op +// BenchmarkShareFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 1315 910079 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 2364 435748 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 358 3330620 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 2114 514642 ns/op +// BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 373 3068104 ns/op func BenchmarkShareFromOdsFile(b *testing.B) { minSize, maxSize := 32, 128 dir := b.TempDir() From 4d71dad7e418724c8c9a7d51e1b73370cb8dbca0 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 22 Dec 2023 21:38:36 +0700 Subject: [PATCH 043/132] allow reconstructSome using direct reedsolomon --- share/eds/edstest/testing.go | 4 +- share/store/codec.go | 32 ++++++++++++++++ share/store/ods_file.go | 72 +++++++++++++++++++++++------------- share/store/ods_file_test.go | 12 +++--- 4 files changed, 86 insertions(+), 34 deletions(-) create mode 100644 share/store/codec.go diff --git a/share/eds/edstest/testing.go b/share/eds/edstest/testing.go index 5c1c4aa7f1..450f706a39 100644 --- a/share/eds/edstest/testing.go +++ b/share/eds/edstest/testing.go @@ -34,8 +34,8 @@ func RandEDS(t require.TestingT, size int) *rsmt2d.ExtendedDataSquare { return eds } -// RandEDSWithNamespace generates EDS with given square size. Returned EDS will have namespacedAmount of -// shares with the given namespace. +// RandEDSWithNamespace generates EDS with given square size. Returned EDS will have +// namespacedAmount of shares with the given namespace. func RandEDSWithNamespace( t require.TestingT, namespace share.Namespace, diff --git a/share/store/codec.go b/share/store/codec.go new file mode 100644 index 0000000000..af9dd5af8e --- /dev/null +++ b/share/store/codec.go @@ -0,0 +1,32 @@ +package store + +import ( + "sync" + + "github.com/klauspost/reedsolomon" +) + +type Codec interface { + Encoder(len int) (reedsolomon.Encoder, error) +} + +type codec struct { + encCache sync.Map +} + +func NewCodec() Codec { + return &codec{} +} + +func (l *codec) Encoder(len int) (reedsolomon.Encoder, error) { + enc, ok := l.encCache.Load(len) + if !ok { + var err error + enc, err = reedsolomon.New(len/2, len/2, reedsolomon.WithLeopardGF(true)) + if err != nil { + return nil, err + } + l.encCache.Store(len, enc) + } + return enc.(reedsolomon.Encoder), nil +} diff --git a/share/store/ods_file.go b/share/store/ods_file.go index 1e458079dc..6ab7870a9a 100644 --- a/share/store/ods_file.go +++ b/share/store/ods_file.go @@ -107,7 +107,7 @@ func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx in if err != nil { return nil, err } - defer f.memPool.shares.Put(ods.shares) + defer f.memPool.ods.Put(ods.square) return computeAxisHalf(ctx, ods, f.memPool.codec, axisType, axisIdx) } @@ -125,11 +125,11 @@ func (f *OdsFile) odsAxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, type odsInMemFile struct { File axisType rsmt2d.Axis - shares [][]share.Share + square [][]share.Share } func (f *odsInMemFile) Size() int { - return len(f.shares) * 2 + return len(f.square) * 2 } func (f *odsInMemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { @@ -139,17 +139,17 @@ func (f *odsInMemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx if axisIdx >= f.Size()/2 { return nil, fmt.Errorf("index is out of ods bounds") } - return f.shares[axisIdx], nil + return f.square[axisIdx], nil } func (f *OdsFile) readOds(axisType rsmt2d.Axis) (*odsInMemFile, error) { shrLn := int(f.hdr.shareSize) odsLn := int(f.hdr.squareSize) / 2 - buf := f.memPool.ods.Get().([]byte) - defer f.memPool.ods.Put(buf) + buf := f.memPool.halfAxis.Get().([]byte) + defer f.memPool.halfAxis.Put(buf) - shrs := f.memPool.shares.Get().([][]share.Share) + ods := f.memPool.ods.Get().([][]share.Share) for i := 0; i < odsLn; i++ { pos := HeaderSize + odsLn*shrLn*i if _, err := f.fl.ReadAt(buf, int64(pos)); err != nil { @@ -158,16 +158,16 @@ func (f *OdsFile) readOds(axisType rsmt2d.Axis) (*odsInMemFile, error) { for j := 0; j < odsLn; j++ { if axisType == rsmt2d.Row { - copy(shrs[i][j], buf[j*shrLn:(j+1)*shrLn]) + copy(ods[i][j], buf[j*shrLn:(j+1)*shrLn]) } else { - copy(shrs[j][i], buf[j*shrLn:(j+1)*shrLn]) + copy(ods[j][i], buf[j*shrLn:(j+1)*shrLn]) } } } return &odsInMemFile{ axisType: axisType, - shares: shrs, + square: ods, }, nil } @@ -213,7 +213,7 @@ func (f *OdsFile) readCol(idx int) ([]share.Share, error) { func computeAxisHalf( ctx context.Context, f File, - codec rsmt2d.Codec, + codec Codec, axisType rsmt2d.Axis, axisIdx int, ) ([]share.Share, error) { @@ -230,11 +230,31 @@ func computeAxisHalf( return err } - parity, err := codec.Encode(original) + enc, err := codec.Encoder(f.Size()) if err != nil { - return err + return fmt.Errorf("encoder: %w", err) + } + + shards := make([][]byte, f.Size()) + copy(shards, original) + for j := len(original); j < len(shards); j++ { + shards[j] = make([]byte, len(original[0])) + } + + //target := make([]bool, f.Size()) + //target[axisIdx] = true + // + //err = enc.ReconstructSome(shards, target) + //if err != nil { + // return fmt.Errorf("reconstruct some: %w", err) + //} + + err = enc.Encode(shards) + if err != nil { + return fmt.Errorf("encode: %w", err) } - shares[i] = parity[axisIdx-f.Size()/2] + + shares[i] = shards[axisIdx] return nil }) } @@ -305,8 +325,8 @@ func (f *OdsFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { return nil, err } - shrs := make([]share.Share, 0, len(ods.shares)*len(ods.shares)) - for _, row := range ods.shares { + shrs := make([]share.Share, 0, len(ods.square)*len(ods.square)) + for _, row := range ods.square { shrs = append(shrs, row...) } @@ -316,15 +336,15 @@ func (f *OdsFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { type memPools struct { pools map[int]memPool - codec rsmt2d.Codec + codec Codec } type memPool struct { - codec rsmt2d.Codec - shares, ods *sync.Pool + codec Codec + ods, halfAxis *sync.Pool } -func newMemPools(codec rsmt2d.Codec) memPools { +func newMemPools(codec Codec) memPools { return memPools{ pools: make(map[int]memPool), codec: codec, @@ -339,8 +359,8 @@ func (m memPools) get(size int) memPool { return pool } -func newMemPool(codec rsmt2d.Codec, size int) memPool { - shares := &sync.Pool{ +func newMemPool(codec Codec, size int) memPool { + ods := &sync.Pool{ New: func() interface{} { shrs := make([][]share.Share, size) for i := range shrs { @@ -355,16 +375,16 @@ func newMemPool(codec rsmt2d.Codec, size int) memPool { }, } - ods := &sync.Pool{ + halfAxis := &sync.Pool{ New: func() interface{} { buf := make([]byte, size*share.Size) return buf }, } return memPool{ - shares: shares, - ods: ods, - codec: codec, + halfAxis: halfAxis, + ods: ods, + codec: codec, } } diff --git a/share/store/ods_file_test.go b/share/store/ods_file_test.go index 802c09c173..00668f0b4b 100644 --- a/share/store/ods_file_test.go +++ b/share/store/ods_file_test.go @@ -16,7 +16,7 @@ import ( func TestCreateOdsFile(t *testing.T) { path := t.TempDir() + "/testfile" edsIn := edstest.RandEDS(t, 8) - mem := newMemPools(rsmt2d.NewLeoRSCodec()) + mem := newMemPools(NewCodec()) _, err := CreateOdsFile(path, edsIn, mem) require.NoError(t, err) @@ -29,7 +29,7 @@ func TestCreateOdsFile(t *testing.T) { func TestOdsFile(t *testing.T) { size := 32 - mem := newMemPools(rsmt2d.NewLeoRSCodec()) + mem := newMemPools(NewCodec()) createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) File { path := t.TempDir() + "/testfile" fl, err := CreateOdsFile(path, eds, mem) @@ -56,14 +56,14 @@ func TestOdsFile(t *testing.T) { func TestReadOdsFile(t *testing.T) { eds := edstest.RandEDS(t, 8) - mem := newMemPools(rsmt2d.NewLeoRSCodec()) + mem := newMemPools(NewCodec()) path := t.TempDir() + "/testfile" f, err := CreateOdsFile(path, eds, mem) require.NoError(t, err) ods, err := f.readOds(rsmt2d.Row) require.NoError(t, err) - for i, row := range ods.shares { + for i, row := range ods.square { original, err := f.readRow(i) require.NoError(t, err) require.True(t, len(original) == len(row)) @@ -86,7 +86,7 @@ func TestReadOdsFile(t *testing.T) { func BenchmarkAxisFromOdsFile(b *testing.B) { minSize, maxSize := 32, 128 dir := b.TempDir() - mem := newMemPools(rsmt2d.NewLeoRSCodec()) + mem := newMemPools(NewCodec()) newFile := func(size int) File { eds := edstest.RandEDS(b, size) @@ -113,7 +113,7 @@ func BenchmarkAxisFromOdsFile(b *testing.B) { func BenchmarkShareFromOdsFile(b *testing.B) { minSize, maxSize := 32, 128 dir := b.TempDir() - mem := newMemPools(rsmt2d.NewLeoRSCodec()) + mem := newMemPools(NewCodec()) newFile := func(size int) File { eds := edstest.RandEDS(b, size) From fc082f4f759b7390b0eca94c136dc2fa902af541 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Thu, 28 Dec 2023 03:22:10 +0100 Subject: [PATCH 044/132] protocol updates: * Axis -> Row * It's not allowed anymore to request Columns * Delete AxisType from SampleID * Now server decides which Row or Column comittment to choose, not client * New SampleProofType field on Sample * Server's decision on the Sample proof response * Delete AxisHash from Row(Axis) * We break compatibility with Bitswap stateless Content Cerification and now verification has to be done with DAH --- go.mod | 2 - go.sum | 8 +- share/eds/file.go | 17 +- share/eds/file_test.go | 2 +- share/eds/ods_file.go | 16 +- share/shwap/axis.go | 158 ------------- share/shwap/axis_hasher.go | 53 ----- share/shwap/axis_id.go | 128 ----------- share/shwap/axis_id_test.go | 39 ---- share/shwap/axis_test.go | 37 ---- share/shwap/blockstore.go | 36 +-- share/shwap/blockstore_test.go | 31 ++- share/shwap/data.go | 33 ++- share/shwap/data_hasher.go | 33 +-- share/shwap/data_hasher_test.go | 45 ++++ share/shwap/data_id.go | 80 +++---- share/shwap/data_id_test.go | 17 +- share/shwap/data_test.go | 15 +- share/shwap/getter.go | 194 +++++++++------- share/shwap/getter_test.go | 74 ++++++- share/shwap/pb/shwap_pb.pb.go | 209 ++++++++---------- share/shwap/pb/shwap_pb.proto | 19 +- share/shwap/row.go | 140 ++++++++++++ share/shwap/row_hasher.go | 54 +++++ ...axis_hasher_test.go => row_hasher_test.go} | 23 +- share/shwap/row_id.go | 116 ++++++++++ share/shwap/row_id_test.go | 36 +++ share/shwap/row_test.go | 35 +++ share/shwap/sample.go | 103 +++++---- share/shwap/sample_hasher.go | 35 +-- share/shwap/sample_hasher_test.go | 17 +- share/shwap/sample_id.go | 72 +++--- share/shwap/sample_id_test.go | 23 +- share/shwap/sample_test.go | 14 +- share/shwap/{ipldv2.go => shwap.go} | 83 +++---- share/shwap/{ipldv2_test.go => shwap_test.go} | 193 ++++++++-------- 36 files changed, 1135 insertions(+), 1055 deletions(-) delete mode 100644 share/shwap/axis.go delete mode 100644 share/shwap/axis_hasher.go delete mode 100644 share/shwap/axis_id.go delete mode 100644 share/shwap/axis_id_test.go delete mode 100644 share/shwap/axis_test.go create mode 100644 share/shwap/data_hasher_test.go create mode 100644 share/shwap/row.go create mode 100644 share/shwap/row_hasher.go rename share/shwap/{axis_hasher_test.go => row_hasher_test.go} (54%) create mode 100644 share/shwap/row_id.go create mode 100644 share/shwap/row_id_test.go create mode 100644 share/shwap/row_test.go rename share/shwap/{ipldv2.go => shwap.go} (57%) rename share/shwap/{ipldv2_test.go => shwap_test.go} (55%) diff --git a/go.mod b/go.mod index 9a58a2961f..c648c01889 100644 --- a/go.mod +++ b/go.mod @@ -351,5 +351,3 @@ replace ( github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29 ) - -replace github.com/celestiaorg/nmt => github.com/Wondertan/nmt v0.0.0-20231019214331-d200d40bdad6 diff --git a/go.sum b/go.sum index 9094e56efb..d45b9f0c06 100644 --- a/go.sum +++ b/go.sum @@ -258,8 +258,6 @@ github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/Wondertan/nmt v0.0.0-20231019214331-d200d40bdad6 h1:9VcKz1VmJOa3aYRDc+hxAhpSGkG9BSvc1Mowq4366VU= -github.com/Wondertan/nmt v0.0.0-20231019214331-d200d40bdad6/go.mod h1:jXKMLje7T3YTvX4CfM0c38oHjcwKqCSkklymyMMt9Cw= github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= github.com/Zilliqa/gozilliqa-sdk v1.2.1-0.20201201074141-dd0ecada1be6/go.mod h1:eSYp2T6f0apnuW8TzhV3f6Aff2SE8Dwio++U4ha4yEM= @@ -378,6 +376,8 @@ github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= +github.com/celestiaorg/nmt v0.20.0 h1:9i7ultZ8Wv5ytt8ZRaxKQ5KOOMo4A2K2T/aPGjIlSas= +github.com/celestiaorg/nmt v0.20.0/go.mod h1:Oz15Ub6YPez9uJV0heoU4WpFctxazuIhKyUtaYNio7E= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2 h1:Q8nr5SAtDW5gocrBwqwDJcSS/JedqU58WwQA2SP+nXw= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2/go.mod h1:s/LzLUw0WeYPJ6qdk4q46jKLOq7rc9Z5Mdrxtfpcigw= github.com/celestiaorg/rsmt2d v0.11.0 h1:lcto/637WyTEZR3dLRoNvyuExfnUbxvdvKi3qz/2V4k= @@ -2258,8 +2258,8 @@ github.com/tidwall/btree v1.5.0/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYms github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= -github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= diff --git a/share/eds/file.go b/share/eds/file.go index 9d3a5f2b31..56cc218654 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "math/rand" "os" "golang.org/x/exp/mmap" @@ -19,7 +20,7 @@ import ( type File interface { io.Closer Size() int - ShareWithProof(axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) + ShareWithProof(xisIdx, shrIdx int) (share.Share, nmt.Proof, rsmt2d.Axis, error) Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) @@ -182,28 +183,32 @@ func (f *LazyFile) AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, e return fullAxis[:len(fullAxis)/2], nil } -func (f *LazyFile) ShareWithProof(axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) { +func (f *LazyFile) ShareWithProof(axisIdx, shrIdx int) (share.Share, nmt.Proof, rsmt2d.Axis, error) { // TODO: Cache the axis as well as computed tree + axisType := rsmt2d.Row + if rand.Int()/2 == 0 { + axisType = rsmt2d.Col + } sqrLn := int(f.hdr.squareSize) shrs, err := f.Axis(axisType, axisIdx) if err != nil { - return nil, nmt.Proof{}, err + return nil, nmt.Proof{}, axisType, err } tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axisIdx)) for _, shr := range shrs { err = tree.Push(shr) if err != nil { - return nil, nmt.Proof{}, err + return nil, nmt.Proof{}, axisType, err } } proof, err := tree.ProveRange(shrIdx, shrIdx+1) if err != nil { - return nil, nmt.Proof{}, err + return nil, nmt.Proof{}, axisType, err } - return shrs[shrIdx], proof, nil + return shrs[shrIdx], proof, axisType, nil } func (f *LazyFile) Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) { diff --git a/share/eds/file_test.go b/share/eds/file_test.go index 028e88d24b..73a3f33855 100644 --- a/share/eds/file_test.go +++ b/share/eds/file_test.go @@ -58,7 +58,7 @@ func TestFile(t *testing.T) { axisIdx, shrIdx = shrIdx, axisIdx } - shr, prf, err := fl.ShareWithProof(axisType, axisIdx, shrIdx) + shr, prf, _, err := fl.ShareWithProof(axisIdx, shrIdx) require.NoError(t, err) namespace := share.ParitySharesNamespace diff --git a/share/eds/ods_file.go b/share/eds/ods_file.go index 646044a218..22a4adf7dd 100644 --- a/share/eds/ods_file.go +++ b/share/eds/ods_file.go @@ -1,6 +1,8 @@ package eds import ( + "math/rand" + "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -20,11 +22,15 @@ func (f *MemFile) Size() int { return int(f.Eds.Width()) } -func (f *MemFile) ShareWithProof(axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) { +func (f *MemFile) ShareWithProof(axisIdx, shrIdx int) (share.Share, nmt.Proof, rsmt2d.Axis, error) { sqrLn := f.Size() + axisType := rsmt2d.Row + if rand.Int()/2 == 0 { + axisType = rsmt2d.Col + } shrs, err := f.Axis(axisType, axisIdx) if err != nil { - return nil, nmt.Proof{}, err + return nil, nmt.Proof{}, axisType, err } // TODO(@Wondartan): this must access cached NMT on EDS instead of computing a new one @@ -32,16 +38,16 @@ func (f *MemFile) ShareWithProof(axisType rsmt2d.Axis, axisIdx, shrIdx int) (sha for _, shr := range shrs { err = tree.Push(shr) if err != nil { - return nil, nmt.Proof{}, err + return nil, nmt.Proof{}, axisType, err } } proof, err := tree.ProveRange(shrIdx, shrIdx+1) if err != nil { - return nil, nmt.Proof{}, err + return nil, nmt.Proof{}, axisType, err } - return shrs[shrIdx], proof, nil + return shrs[shrIdx], proof, axisType, nil } func (f *MemFile) Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { diff --git a/share/shwap/axis.go b/share/shwap/axis.go deleted file mode 100644 index dddad56ee1..0000000000 --- a/share/shwap/axis.go +++ /dev/null @@ -1,158 +0,0 @@ -package shwap - -import ( - "bytes" - "fmt" - - blocks "github.com/ipfs/go-block-format" - - "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - shwappb "github.com/celestiaorg/celestia-node/share/shwap/pb" -) - -// Axis represents an Axis of an EDS. -type Axis struct { - AxisID - - // AxisShares is the original half of the axis. - AxisShares []share.Share -} - -// NewAxis constructs a new Axis. -func NewAxis(id AxisID, axisHalf []share.Share) *Axis { - return &Axis{ - AxisID: id, - AxisShares: axisHalf, - } -} - -// NewAxisFromEDS samples the EDS and constructs a new Axis. -func NewAxisFromEDS( - axisType rsmt2d.Axis, - axisIdx int, - square *rsmt2d.ExtendedDataSquare, - height uint64, -) (*Axis, error) { - sqrLn := int(square.Width()) - - // TODO(@Wondertan): Should be an rsmt2d method - var axisHalf [][]byte - switch axisType { - case rsmt2d.Row: - axisHalf = square.Row(uint(axisIdx))[:sqrLn/2] - case rsmt2d.Col: - axisHalf = square.Col(uint(axisIdx))[:sqrLn/2] - default: - panic("invalid axis") - } - - root, err := share.NewRoot(square) - if err != nil { - return nil, fmt.Errorf("while computing root: %w", err) - } - - id := NewAxisID(axisType, uint16(axisIdx), root, height) - return NewAxis(id, axisHalf), nil -} - -// AxisFromBlock converts blocks.Block into Axis. -func AxisFromBlock(blk blocks.Block) (*Axis, error) { - if err := validateCID(blk.Cid()); err != nil { - return nil, err - } - - s := &Axis{} - err := s.UnmarshalBinary(blk.RawData()) - if err != nil { - return nil, fmt.Errorf("while unmarshalling Axis: %w", err) - } - - return s, nil -} - -// IPLDBlock converts Axis to an IPLD block for Bitswap compatibility. -func (s *Axis) IPLDBlock() (blocks.Block, error) { - cid, err := s.AxisID.Cid() - if err != nil { - return nil, err - } - - data, err := s.MarshalBinary() - if err != nil { - return nil, err - } - - return blocks.NewBlockWithCid(data, cid) -} - -// MarshalBinary marshals Axis to binary. -func (s *Axis) MarshalBinary() ([]byte, error) { - id, err := s.AxisID.MarshalBinary() - if err != nil { - return nil, err - } - - return (&shwappb.Axis{ - AxisId: id, - AxisHalf: s.AxisShares, - }).Marshal() -} - -// UnmarshalBinary unmarshal Axis from binary. -func (s *Axis) UnmarshalBinary(data []byte) error { - proto := &shwappb.Axis{} - if err := proto.Unmarshal(data); err != nil { - return err - } - - err := s.AxisID.UnmarshalBinary(proto.AxisId) - if err != nil { - return err - } - - s.AxisShares = proto.AxisHalf - return nil -} - -// Validate validates Axis's fields and proof of axis inclusion. -func (s *Axis) Validate() error { - if err := s.AxisID.Validate(); err != nil { - return err - } - - sqrLn := len(s.AxisShares) * 2 - if s.AxisID.AxisIndex > uint16(sqrLn) { - return fmt.Errorf("axis index exceeds square size: %d > %d", s.AxisID.AxisIndex, sqrLn) - } - - // TODO(@Wondertan): This computations are quite expensive and likely to be used further, - // so we need to find a way to cache them and pass to the caller on the Bitswap side - parity, err := share.DefaultRSMT2DCodec().Encode(s.AxisShares) - if err != nil { - return fmt.Errorf("while decoding erasure coded half: %w", err) - } - s.AxisShares = append(s.AxisShares, parity...) - - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(s.AxisShares)/2), uint(s.AxisID.AxisIndex)) - for _, shr := range s.AxisShares { - err := tree.Push(shr) - if err != nil { - return fmt.Errorf("while pushing shares to NMT: %w", err) - } - } - - root, err := tree.Root() - if err != nil { - return fmt.Errorf("while computing NMT root: %w", err) - } - - hashedRoot := hashBytes(root) - if !bytes.Equal(s.AxisID.AxisHash, hashedRoot) { - return fmt.Errorf("invalid axis hash: %X != %X", root, s.AxisID.AxisHash) - } - - return nil -} diff --git a/share/shwap/axis_hasher.go b/share/shwap/axis_hasher.go deleted file mode 100644 index d6fe7fdd9f..0000000000 --- a/share/shwap/axis_hasher.go +++ /dev/null @@ -1,53 +0,0 @@ -package shwap - -import ( - "crypto/sha256" - "fmt" -) - -// AxisHasher implements hash.Hash interface for Samples. -type AxisHasher struct { - sample Axis -} - -// Write expects a marshaled ShareSample to validate. -func (sh *AxisHasher) Write(data []byte) (int, error) { - if err := sh.sample.UnmarshalBinary(data); err != nil { - err = fmt.Errorf("while unmarshaling Axis: %w", err) - log.Error(err) - return 0, err - } - - if err := sh.sample.Validate(); err != nil { - err = fmt.Errorf("while validating Axis: %w", err) - log.Error(err) - return 0, err - } - - return len(data), nil -} - -// Sum returns the "multihash" of the ShareSampleID. -func (sh *AxisHasher) Sum([]byte) []byte { - sum, err := sh.sample.AxisID.MarshalBinary() - if err != nil { - err = fmt.Errorf("while marshaling AxisID: %w", err) - log.Error(err) - } - return sum -} - -// Reset resets the Hash to its initial state. -func (sh *AxisHasher) Reset() { - sh.sample = Axis{} -} - -// Size returns the number of bytes Sum will return. -func (sh *AxisHasher) Size() int { - return AxisIDSize -} - -// BlockSize returns the hash's underlying block size. -func (sh *AxisHasher) BlockSize() int { - return sha256.BlockSize -} diff --git a/share/shwap/axis_id.go b/share/shwap/axis_id.go deleted file mode 100644 index 619046ea5e..0000000000 --- a/share/shwap/axis_id.go +++ /dev/null @@ -1,128 +0,0 @@ -package shwap - -import ( - "encoding/binary" - "fmt" - - "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" -) - -// AxisIDSize is the size of the AxisID in bytes -const AxisIDSize = 43 - -// AxisID is an unique identifier of a Axis. -type AxisID struct { - // AxisType is Col or Row axis of the sample in the data square - AxisType rsmt2d.Axis - // AxisIndex is the index of the axis(row, col) in the data square - AxisIndex uint16 - // AxisHash is the sha256 hash of a Col or Row root taken from DAH of the data square - AxisHash []byte - // Height of the block. - // Needed to identify block's data square in the whole chain - Height uint64 -} - -// NewAxisID constructs a new AxisID. -func NewAxisID(axisType rsmt2d.Axis, axisIdx uint16, root *share.Root, height uint64) AxisID { - dahroot := root.RowRoots[axisIdx] - if axisType == rsmt2d.Col { - dahroot = root.ColumnRoots[axisIdx] - } - axisHash := hashBytes(dahroot) - - return AxisID{ - AxisType: axisType, - AxisIndex: axisIdx, - AxisHash: axisHash, - Height: height, - } -} - -// AxisIDFromCID coverts CID to AxisID. -func AxisIDFromCID(cid cid.Cid) (id AxisID, err error) { - if err = validateCID(cid); err != nil { - return id, err - } - - err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) - if err != nil { - return id, fmt.Errorf("while unmarhalling AxisID: %w", err) - } - - return id, nil -} - -// Cid returns sample ID encoded as CID. -func (s AxisID) Cid() (cid.Cid, error) { - data, err := s.MarshalBinary() - if err != nil { - return cid.Undef, err - } - - buf, err := mh.Encode(data, axisMultihashCode) - if err != nil { - return cid.Undef, err - } - - return cid.NewCidV1(axisCodec, buf), nil -} - -// MarshalTo encodes AxisID into given byte slice. -// NOTE: Proto is avoided because -// * Its size is not deterministic which is required for IPLD. -// * No support for uint16 -func (s AxisID) MarshalTo(data []byte) (int, error) { - data = append(data, byte(s.AxisType)) - data = binary.LittleEndian.AppendUint16(data, s.AxisIndex) - data = append(data, s.AxisHash...) - binary.LittleEndian.AppendUint64(data, s.Height) - return AxisIDSize, nil -} - -// UnmarshalFrom decodes AxisID from given byte slice. -func (s *AxisID) UnmarshalFrom(data []byte) (int, error) { - s.AxisType = rsmt2d.Axis(data[0]) - s.AxisIndex = binary.LittleEndian.Uint16(data[1:]) - s.AxisHash = append(s.AxisHash, data[3:hashSize+3]...) - s.Height = binary.LittleEndian.Uint64(data[hashSize+3:]) - return AxisIDSize, nil -} - -// MarshalBinary encodes AxisID into binary form. -func (s AxisID) MarshalBinary() ([]byte, error) { - data := make([]byte, 0, AxisIDSize) - n, err := s.MarshalTo(data) - return data[:n], err -} - -// UnmarshalBinary decodes AxisID from binary form. -func (s *AxisID) UnmarshalBinary(data []byte) error { - if len(data) != AxisIDSize { - return fmt.Errorf("invalid data length: %d != %d", len(data), AxisIDSize) - } - _, err := s.UnmarshalFrom(data) - return err -} - -// Validate validates fields of AxisID. -func (s AxisID) Validate() error { - if s.Height == 0 { - return fmt.Errorf("zero Height") - } - - if len(s.AxisHash) != hashSize { - return fmt.Errorf("invalid AxisHash size: %d != %d", len(s.AxisHash), hashSize) - } - - if s.AxisType != rsmt2d.Col && s.AxisType != rsmt2d.Row { - return fmt.Errorf("invalid AxisType: %d", s.AxisType) - } - - return nil -} diff --git a/share/shwap/axis_id_test.go b/share/shwap/axis_id_test.go deleted file mode 100644 index f2b61516e2..0000000000 --- a/share/shwap/axis_id_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package shwap - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/edstest" -) - -func TestAxisID(t *testing.T) { - square := edstest.RandEDS(t, 2) - root, err := share.NewRoot(square) - require.NoError(t, err) - - sid := NewAxisID(rsmt2d.Row, 2, root, 1) - - id, err := sid.Cid() - require.NoError(t, err) - - assert.EqualValues(t, axisCodec, id.Prefix().Codec) - assert.EqualValues(t, axisMultihashCode, id.Prefix().MhType) - assert.EqualValues(t, AxisIDSize, id.Prefix().MhLength) - - data, err := sid.MarshalBinary() - require.NoError(t, err) - - sidOut := AxisID{} - err = sidOut.UnmarshalBinary(data) - require.NoError(t, err) - assert.EqualValues(t, sid, sidOut) - - err = sidOut.Validate() - require.NoError(t, err) -} diff --git a/share/shwap/axis_test.go b/share/shwap/axis_test.go deleted file mode 100644 index 4095a4e804..0000000000 --- a/share/shwap/axis_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package shwap - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share/eds/edstest" -) - -func TestAxis(t *testing.T) { - square := edstest.RandEDS(t, 8) - - axis, err := NewAxisFromEDS(rsmt2d.Row, 1, square, 2) - require.NoError(t, err) - - data, err := axis.MarshalBinary() - require.NoError(t, err) - - blk, err := axis.IPLDBlock() - require.NoError(t, err) - - cid, err := axis.AxisID.Cid() - require.NoError(t, err) - assert.EqualValues(t, blk.Cid(), cid) - - axisOut := &Axis{} - err = axisOut.UnmarshalBinary(data) - require.NoError(t, err) - assert.EqualValues(t, axis, axisOut) - - err = axisOut.Validate() - require.NoError(t, err) -} diff --git a/share/shwap/blockstore.go b/share/shwap/blockstore.go index e1bc3acf2a..80c0c2087b 100644 --- a/share/shwap/blockstore.go +++ b/share/shwap/blockstore.go @@ -8,6 +8,8 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" + "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/share/eds" ) @@ -42,15 +44,15 @@ func (b Blockstore[F]) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) } return blk, nil - case axisCodec: - id, err := AxisIDFromCID(cid) + case rowCodec: + id, err := RowIDFromCID(cid) if err != nil { - err = fmt.Errorf("while converting CID to AxisID: %w", err) + err = fmt.Errorf("while converting CID to RowID: %w", err) log.Error(err) return nil, err } - blk, err := b.getAxisBlock(id) + blk, err := b.getRowBlock(id) if err != nil { log.Error(err) return nil, err @@ -83,12 +85,12 @@ func (b Blockstore[F]) getSampleBlock(id SampleID) (blocks.Block, error) { return nil, fmt.Errorf("while getting ODS file from FS: %w", err) } - shr, prf, err := f.ShareWithProof(id.AxisType, int(id.AxisIndex), int(id.ShareIndex)) + shr, prf, proofType, err := f.ShareWithProof(int(id.RowIndex), int(id.ShareIndex)) if err != nil { return nil, fmt.Errorf("while getting share with proof: %w", err) } - s := NewSample(id, shr, prf, f.Size()) + s := NewSample(id, shr, prf, proofType) blk, err := s.IPLDBlock() if err != nil { return nil, fmt.Errorf("while coverting to IPLD block: %w", err) @@ -102,18 +104,18 @@ func (b Blockstore[F]) getSampleBlock(id SampleID) (blocks.Block, error) { return blk, nil } -func (b Blockstore[F]) getAxisBlock(id AxisID) (blocks.Block, error) { +func (b Blockstore[F]) getRowBlock(id RowID) (blocks.Block, error) { f, err := b.fs.File(id.Height) if err != nil { return nil, fmt.Errorf("while getting EDS file from FS: %w", err) } - axisHalf, err := f.AxisHalf(id.AxisType, int(id.AxisIndex)) + axisHalf, err := f.AxisHalf(rsmt2d.Row, int(id.RowIndex)) if err != nil { - return nil, fmt.Errorf("while getting axis half: %w", err) + return nil, fmt.Errorf("while getting AxisHalf: %w", err) } - s := NewAxis(id, axisHalf) + s := NewRow(id, axisHalf) blk, err := s.IPLDBlock() if err != nil { return nil, fmt.Errorf("while coverting to IPLD block: %w", err) @@ -133,7 +135,7 @@ func (b Blockstore[F]) getDataBlock(id DataID) (blocks.Block, error) { return nil, fmt.Errorf("while getting ODS file from FS: %w", err) } - data, prf, err := f.Data(id.DataNamespace, int(id.AxisIndex)) + data, prf, err := f.Data(id.Namespace(), int(id.RowIndex)) if err != nil { return nil, fmt.Errorf("while getting Data: %w", err) } @@ -166,7 +168,7 @@ func (b Blockstore[F]) GetSize(ctx context.Context, cid cid.Cid) (int, error) { } func (b Blockstore[F]) Has(_ context.Context, cid cid.Cid) (bool, error) { - var id AxisID + var id RowID switch cid.Type() { case sampleCodec: sid, err := SampleIDFromCID(cid) @@ -176,12 +178,12 @@ func (b Blockstore[F]) Has(_ context.Context, cid cid.Cid) (bool, error) { return false, err } - id = sid.AxisID - case axisCodec: + id = sid.RowID + case rowCodec: var err error - id, err = AxisIDFromCID(cid) + id, err = RowIDFromCID(cid) if err != nil { - err = fmt.Errorf("while converting CID to AxisID: %w", err) + err = fmt.Errorf("while converting CID to RowID: %w", err) log.Error(err) return false, err } @@ -193,7 +195,7 @@ func (b Blockstore[F]) Has(_ context.Context, cid cid.Cid) (bool, error) { return false, err } - id = did.AxisID + id = did.RowID default: return false, fmt.Errorf("unsupported codec") } diff --git a/share/shwap/blockstore_test.go b/share/shwap/blockstore_test.go index b0dcf85c2a..d3da72e808 100644 --- a/share/shwap/blockstore_test.go +++ b/share/shwap/blockstore_test.go @@ -15,7 +15,7 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -// TODO(@Wondertan): Add axis and data code +// TODO(@Wondertan): Add row and data code func TestBlockstoreGetShareSample(t *testing.T) { ctx := context.Background() @@ -26,22 +26,19 @@ func TestBlockstoreGetShareSample(t *testing.T) { b := edsBlockstore(sqr) width := int(sqr.Width()) - for _, axisType := range axisTypes { - for i := 0; i < width*width; i++ { - id := NewSampleID(axisType, i, root, 1) - cid, err := id.Cid() - require.NoError(t, err) - - blk, err := b.Get(ctx, cid) - require.NoError(t, err) - - sample, err := SampleFromBlock(blk) - require.NoError(t, err) - - err = sample.Validate() - require.NoError(t, err) - assert.EqualValues(t, id, sample.SampleID) - } + for i := 0; i < width*width; i++ { + id, err := NewSampleID(1, i, root) + require.NoError(t, err) + + blk, err := b.Get(ctx, id.Cid()) + require.NoError(t, err) + + sample, err := SampleFromBlock(blk) + require.NoError(t, err) + + err = sample.Verify(root) + require.NoError(t, err) + assert.EqualValues(t, id, sample.SampleID) } } diff --git a/share/shwap/data.go b/share/shwap/data.go index 6b29ace634..69cb13af5f 100644 --- a/share/shwap/data.go +++ b/share/shwap/data.go @@ -17,8 +17,8 @@ import ( type Data struct { DataID - DataProof nmt.Proof DataShares []share.Share + DataProof nmt.Proof } // NewData constructs a new Data. @@ -47,6 +47,11 @@ func NewDataFromEDS( continue } + id, err := NewDataID(height, uint16(rowIdx), namespace, root) + if err != nil { + return nil, err + } + shrs := square.Row(uint(rowIdx)) // TDOD(@Wondertan): This will likely be removed nd, proof, err := eds.NDFromShares(shrs, namespace, rowIdx) @@ -54,7 +59,6 @@ func NewDataFromEDS( return nil, err } - id := NewDataID(rowIdx, root, height, namespace) datas = append(datas, NewData(id, nd, proof)) } @@ -78,17 +82,12 @@ func DataFromBlock(blk blocks.Block) (*Data, error) { // IPLDBlock converts Data to an IPLD block for Bitswap compatibility. func (s *Data) IPLDBlock() (blocks.Block, error) { - cid, err := s.DataID.Cid() - if err != nil { - return nil, err - } - data, err := s.MarshalBinary() if err != nil { return nil, err } - return blocks.NewBlockWithCid(data, cid) + return blocks.NewBlockWithCid(data, s.Cid()) } // MarshalBinary marshals Data to binary. @@ -107,8 +106,8 @@ func (s *Data) MarshalBinary() ([]byte, error) { return (&shwappb.Data{ DataId: id, - DataProof: proof, DataShares: s.DataShares, + DataProof: proof, }).Marshal() } @@ -124,19 +123,19 @@ func (s *Data) UnmarshalBinary(data []byte) error { return err } - s.DataProof = nmt.ProtoToProof(*proto.DataProof) s.DataShares = proto.DataShares + s.DataProof = nmt.ProtoToProof(*proto.DataProof) return nil } -// Validate performs basic validation of Data. -func (s *Data) Validate() error { - if err := s.DataID.Validate(); err != nil { +// Verify validates Data's fields and verifies Data inclusion. +func (s *Data) Verify(root *share.Root) error { + if err := s.DataID.Verify(root); err != nil { return err } - if len(s.DataShares) == 0 { - return fmt.Errorf("empty DataShares") + if len(s.DataShares) == 0 && s.DataProof.IsEmptyProof() { + return fmt.Errorf("empty Data") } shrs := make([][]byte, 0, len(s.DataShares)) @@ -144,8 +143,8 @@ func (s *Data) Validate() error { shrs = append(shrs, append(share.GetNamespace(shr), shr...)) } - s.DataProof.WithHashedProof(hasher()) - if !s.DataProof.VerifyNamespace(hasher(), s.DataNamespace.ToNMT(), shrs, s.AxisHash) { + rowRoot := root.RowRoots[s.RowIndex] + if !s.DataProof.VerifyNamespace(hashFn(), s.Namespace().ToNMT(), shrs, rowRoot) { return fmt.Errorf("invalid DataProof") } diff --git a/share/shwap/data_hasher.go b/share/shwap/data_hasher.go index dd8db7b05e..79c5e523df 100644 --- a/share/shwap/data_hasher.go +++ b/share/shwap/data_hasher.go @@ -7,47 +7,48 @@ import ( // DataHasher implements hash.Hash interface for Data. type DataHasher struct { - data Data + data []byte } // Write expects a marshaled Data to validate. -func (sh *DataHasher) Write(data []byte) (int, error) { - if err := sh.data.UnmarshalBinary(data); err != nil { - err = fmt.Errorf("while unmarshaling Data: %w", err) +func (h *DataHasher) Write(data []byte) (int, error) { + var d Data + if err := d.UnmarshalBinary(data); err != nil { + err = fmt.Errorf("unmarshaling Data: %w", err) log.Error(err) return 0, err } - if err := sh.data.Validate(); err != nil { - err = fmt.Errorf("while validating Data: %w", err) + if err := dataVerifiers.Verify(d.DataID, d); err != nil { + err = fmt.Errorf("verifying Data: %w", err) log.Error(err) return 0, err } + h.data = data return len(data), nil } // Sum returns the "multihash" of the DataID. -func (sh *DataHasher) Sum([]byte) []byte { - sum, err := sh.data.DataID.MarshalBinary() - if err != nil { - err = fmt.Errorf("while marshaling DataID: %w", err) - log.Error(err) +func (h *DataHasher) Sum([]byte) []byte { + if h.data == nil { + return nil } - return sum + const pbOffset = 2 + return h.data[pbOffset : DataIDSize+pbOffset] } // Reset resets the Hash to its initial state. -func (sh *DataHasher) Reset() { - sh.data = Data{} +func (h *DataHasher) Reset() { + h.data = nil } // Size returns the number of bytes Sum will return. -func (sh *DataHasher) Size() int { +func (h *DataHasher) Size() int { return DataIDSize } // BlockSize returns the hash's underlying block size. -func (sh *DataHasher) BlockSize() int { +func (h *DataHasher) BlockSize() int { return sha256.BlockSize } diff --git a/share/shwap/data_hasher_test.go b/share/shwap/data_hasher_test.go new file mode 100644 index 0000000000..ac6ae0db83 --- /dev/null +++ b/share/shwap/data_hasher_test.go @@ -0,0 +1,45 @@ +package shwap + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestDataHasher(t *testing.T) { + hasher := &DataHasher{} + + _, err := hasher.Write([]byte("hello")) + assert.Error(t, err) + + namespace := sharetest.RandV0Namespace() + square, root := edstest.RandEDSWithNamespace(t, namespace, 8) + + datas, err := NewDataFromEDS(square, 1, namespace) + require.NoError(t, err) + data := datas[0] + + dataVerifiers.Add(data.DataID, func(data Data) error { + return data.Verify(root) + }) + + dat, err := data.MarshalBinary() + require.NoError(t, err) + + n, err := hasher.Write(dat) + require.NoError(t, err) + assert.EqualValues(t, len(dat), n) + + digest := hasher.Sum(nil) + id, err := data.DataID.MarshalBinary() + require.NoError(t, err) + assert.EqualValues(t, id, digest) + + hasher.Reset() + digest = hasher.Sum(nil) + assert.NotEqualValues(t, digest, id) +} diff --git a/share/shwap/data_id.go b/share/shwap/data_id.go index 955560a07a..9bc329ef58 100644 --- a/share/shwap/data_id.go +++ b/share/shwap/data_id.go @@ -6,36 +6,31 @@ import ( "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" - "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/share" ) -// DataIDSize is the size of the DataID in bytes -// We cut 1 byte from AxisIDSize because we don't need AxisType -// as its value is always Row. -const DataIDSize = AxisIDSize - 1 + share.NamespaceSize +// DataIDSize is the size of the DataID in bytes. +const DataIDSize = RowIDSize + share.NamespaceSize -// DataID is an unique identifier of a namespaced Data inside EDS Axis. +// DataID is an unique identifier of a namespaced Data inside EDS Row. type DataID struct { - AxisID + RowID - // DataNamespace is the namespace of the data. - DataNamespace share.Namespace + // DataNamespace is the namespace of the data + // It's string formatted to keep DataID comparable + DataNamespace string } // NewDataID constructs a new DataID. -func NewDataID(axisIdx int, root *share.Root, height uint64, namespace share.Namespace) DataID { - axisHash := hashBytes(root.RowRoots[axisIdx]) - return DataID{ - AxisID: AxisID{ - AxisType: rsmt2d.Row, - AxisIndex: uint16(axisIdx), - AxisHash: axisHash, - Height: height, +func NewDataID(height uint64, rowIdx uint16, namespace share.Namespace, root *share.Root) (DataID, error) { + did := DataID{ + RowID: RowID{ + RowIndex: rowIdx, + Height: height, }, - DataNamespace: namespace, + DataNamespace: string(namespace), } + return did, did.Verify(root) } // DataIDFromCID coverts CID to DataID. @@ -46,26 +41,31 @@ func DataIDFromCID(cid cid.Cid) (id DataID, err error) { err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) if err != nil { - return id, fmt.Errorf("while unmarhalling DataID: %w", err) + return id, fmt.Errorf("unmarhalling DataID: %w", err) } return id, nil } -// Cid returns sample ID encoded as CID. -func (s DataID) Cid() (cid.Cid, error) { +// Namespace returns the namespace of the DataID. +func (s DataID) Namespace() share.Namespace { + return share.Namespace(s.DataNamespace) +} + +// Cid returns DataID encoded as CID. +func (s DataID) Cid() cid.Cid { // avoid using proto serialization for CID as it's not deterministic data, err := s.MarshalBinary() if err != nil { - return cid.Undef, err + panic(fmt.Errorf("marshaling DataID: %w", err)) } buf, err := mh.Encode(data, dataMultihashCode) if err != nil { - return cid.Undef, err + panic(fmt.Errorf("encoding DataID as CID: %w", err)) } - return cid.NewCidV1(dataCodec, buf), nil + return cid.NewCidV1(dataCodec, buf) } // MarshalBinary encodes DataID into binary form. @@ -73,12 +73,12 @@ func (s DataID) Cid() (cid.Cid, error) { // * Its size is not deterministic which is required for IPLD. // * No support for uint16 func (s DataID) MarshalBinary() ([]byte, error) { - data := make([]byte, 0, DataIDSize+1) - n, err := s.AxisID.MarshalTo(data) + data := make([]byte, 0, DataIDSize) + n, err := s.RowID.MarshalTo(data) if err != nil { return nil, err } - data = data[1:n] // cut the first byte with AxisType + data = data[:n] data = append(data, s.DataNamespace...) return data, nil } @@ -86,23 +86,29 @@ func (s DataID) MarshalBinary() ([]byte, error) { // UnmarshalBinary decodes DataID from binary form. func (s *DataID) UnmarshalBinary(data []byte) error { if len(data) != DataIDSize { - return fmt.Errorf("invalid data length: %d != %d", len(data), DataIDSize) + return fmt.Errorf("invalid DataID data length: %d != %d", len(data), DataIDSize) } - n, err := s.AxisID.UnmarshalFrom(append([]byte{byte(rsmt2d.Row)}, data...)) + n, err := s.RowID.UnmarshalFrom(data) if err != nil { return err } - s.DataNamespace = data[n-1:] + + ns := share.Namespace(data[n:]) + if err = ns.ValidateForData(); err != nil { + return err + } + + s.DataNamespace = string(ns) return nil } -// Validate validates fields of DataID. -func (s DataID) Validate() error { - if err := s.AxisID.Validate(); err != nil { - return fmt.Errorf("while validating AxisID: %w", err) +// Verify verifies DataID fields. +func (s DataID) Verify(root *share.Root) error { + if err := s.RowID.Verify(root); err != nil { + return fmt.Errorf("validating RowID: %w", err) } - if err := s.DataNamespace.ValidateForData(); err != nil { - return fmt.Errorf("while validating DataNamespace: %w", err) + if err := s.Namespace().ValidateForData(); err != nil { + return fmt.Errorf("validating DataNamespace: %w", err) } return nil diff --git a/share/shwap/data_id_test.go b/share/shwap/data_id_test.go index d5dcf56140..1068d4d56f 100644 --- a/share/shwap/data_id_test.go +++ b/share/shwap/data_id_test.go @@ -10,27 +10,26 @@ import ( "github.com/celestiaorg/celestia-node/share/sharetest" ) -// TODO: Add test that AxisType is not serialized func TestDataID(t *testing.T) { ns := sharetest.RandV0Namespace() _, root := edstest.RandEDSWithNamespace(t, ns, 4) - sid := NewDataID(2, root, 1, ns) - id, err := sid.Cid() + id, err := NewDataID(1, 1, ns, root) require.NoError(t, err) - assert.EqualValues(t, dataCodec, id.Prefix().Codec) - assert.EqualValues(t, dataMultihashCode, id.Prefix().MhType) - assert.EqualValues(t, DataIDSize, id.Prefix().MhLength) + cid := id.Cid() + assert.EqualValues(t, dataCodec, cid.Prefix().Codec) + assert.EqualValues(t, dataMultihashCode, cid.Prefix().MhType) + assert.EqualValues(t, DataIDSize, cid.Prefix().MhLength) - data, err := sid.MarshalBinary() + data, err := id.MarshalBinary() require.NoError(t, err) sidOut := DataID{} err = sidOut.UnmarshalBinary(data) require.NoError(t, err) - assert.EqualValues(t, sid, sidOut) + assert.EqualValues(t, id, sidOut) - err = sidOut.Validate() + err = sidOut.Verify(root) require.NoError(t, err) } diff --git a/share/shwap/data_test.go b/share/shwap/data_test.go index 65a9516716..43e44d547c 100644 --- a/share/shwap/data_test.go +++ b/share/shwap/data_test.go @@ -12,7 +12,7 @@ import ( func TestData(t *testing.T) { namespace := sharetest.RandV0Namespace() - square, _ := edstest.RandEDSWithNamespace(t, namespace, 8) + square, root := edstest.RandEDSWithNamespace(t, namespace, 8) nds, err := NewDataFromEDS(square, 1, namespace) require.NoError(t, err) @@ -23,16 +23,13 @@ func TestData(t *testing.T) { blk, err := nd.IPLDBlock() require.NoError(t, err) + assert.EqualValues(t, blk.Cid(), nd.Cid()) - cid, err := nd.DataID.Cid() + dataOut := &Data{} + err = dataOut.UnmarshalBinary(data) require.NoError(t, err) - assert.EqualValues(t, blk.Cid(), cid) + assert.EqualValues(t, nd, dataOut) - ndOut := &Data{} - err = ndOut.UnmarshalBinary(data) - require.NoError(t, err) - assert.EqualValues(t, nd, ndOut) - - err = ndOut.Validate() + err = dataOut.Verify(root) require.NoError(t, err) } diff --git a/share/shwap/getter.go b/share/shwap/getter.go index 4c3e2ef741..9845f618f1 100644 --- a/share/shwap/getter.go +++ b/share/shwap/getter.go @@ -3,7 +3,7 @@ package shwap import ( "context" "fmt" - "slices" + "sync" "github.com/ipfs/boxo/blockstore" "github.com/ipfs/boxo/exchange" @@ -17,6 +17,7 @@ import ( "github.com/celestiaorg/celestia-node/share" ) +// TODO: GetRow method type Getter struct { fetch exchange.SessionExchange bstore blockstore.Blockstore @@ -26,75 +27,94 @@ func NewGetter(fetch exchange.SessionExchange, bstore blockstore.Blockstore) *Ge return &Getter{fetch: fetch, bstore: bstore} } +// TODO: Make GetSamples so it provides proofs to users. // GetShares fetches in the Block/EDS by their indexes. // Automatically caches them on the Blockstore. // Guarantee that the returned shares are in the same order as shrIdxs. -func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, shrIdxs ...int) ([]share.Share, error) { - maxIdx := len(hdr.DAH.RowRoots) * len(hdr.DAH.ColumnRoots) - cids := make([]cid.Cid, len(shrIdxs)) - for i, shrIdx := range shrIdxs { - if shrIdx < 0 || shrIdx >= maxIdx { - return nil, fmt.Errorf("share index %d is out of bounds", shrIdx) +func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, smplIdxs ...int) ([]share.Share, error) { + sids := make([]SampleID, len(smplIdxs)) + for i, shrIdx := range smplIdxs { + sid, err := NewSampleID(hdr.Height(), shrIdx, hdr.DAH) + if err != nil { + return nil, err + } + + sids[i] = sid + } + + smplsMu := sync.Mutex{} + smpls := make(map[int]Sample, len(smplIdxs)) + verifyFn := func(s Sample) error { + err := s.Verify(hdr.DAH) + if err != nil { + return err } - cids[i] = MustSampleCID(shrIdx, hdr.DAH, hdr.Height()) + + smplIdx := int(s.SampleID.RowIndex)*len(hdr.DAH.RowRoots) + int(s.SampleID.ShareIndex) + smplsMu.Lock() + smpls[smplIdx] = s + smplsMu.Unlock() + return nil + } + + cids := make([]cid.Cid, len(smplIdxs)) + for i, sid := range sids { + sampleVerifiers.Add(sid, verifyFn) + cids[i] = sid.Cid() } ctx, cancel := context.WithCancel(ctx) defer cancel() ses := g.fetch.NewSession(ctx) - + // must start getting only after verifiers are registered blkCh, err := ses.GetBlocks(ctx, cids) if err != nil { return nil, fmt.Errorf("fetching blocks: %w", err) } - - blks := make([]block.Block, 0, len(cids)) - smpls := make(map[int]*Sample, len(cids)) - for blk := range blkCh { // NOTE: GetBlocks handles ctx, so we don't have to - smpl, err := SampleFromBlock(blk) - if err != nil { - // NOTE: Should never error in fact, as Hasher already validated the block - return nil, fmt.Errorf("converting block to Sample: %w", err) - } - - shrIdx := int(smpl.SampleID.AxisIndex)*len(hdr.DAH.RowRoots) + int(smpl.SampleID.ShareIndex) - smpls[shrIdx] = smpl - + // GetBlocks handles ctx and closes blkCh, so we don't have to + blks := make([]block.Block, 0, len(smplIdxs)) + for blk := range blkCh { blks = append(blks, blk) } - - if len(blks) != len(shrIdxs) { + // only persist when all samples received + if len(blks) != len(smplIdxs) { if ctx.Err() != nil { return nil, ctx.Err() } return nil, fmt.Errorf("not all shares were found") } - + // ensure we persist samples/blks and make them available for Bitswap err = g.bstore.PutMany(ctx, blks) if err != nil { return nil, fmt.Errorf("storing shares: %w", err) } - - err = g.fetch.NotifyNewBlocks(ctx, blks...) // tell bitswap that we stored the blks and can serve them now + // tell bitswap that we stored the blks and can serve them now + err = g.fetch.NotifyNewBlocks(ctx, blks...) if err != nil { return nil, fmt.Errorf("notifying new shares: %w", err) } - shrs := make([]share.Share, len(shrIdxs)) - for i, shrIdx := range shrIdxs { - shrs[i] = smpls[shrIdx].SampleShare + // ensure we return shares in the requested order + shrs := make([]share.Share, len(smplIdxs)) + for i, smplIdx := range smplIdxs { + shrs[i] = smpls[smplIdx].SampleShare } return shrs, nil } // GetEDS -// TODO(@Wondertan): Consider requesting randomized rows and cols instead of ODS only +// TODO(@Wondertan): Consider requesting randomized rows instead of ODS only func (g *Getter) GetEDS(ctx context.Context, hdr *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { sqrLn := len(hdr.DAH.RowRoots) - cids := make([]cid.Cid, sqrLn/2) + rids := make([]RowID, sqrLn/2) for i := 0; i < sqrLn/2; i++ { - cids[i] = MustAxisCID(rsmt2d.Row, i, hdr.DAH, hdr.Height()) + rid, err := NewRowID(hdr.Height(), uint16(i), hdr.DAH) + if err != nil { + return nil, err + } + + rids[i] = rid } square, err := rsmt2d.NewExtendedDataSquare( @@ -106,32 +126,42 @@ func (g *Getter) GetEDS(ctx context.Context, hdr *header.ExtendedHeader) (*rsmt2 return nil, err } - ctx, cancel := context.WithCancel(ctx) - defer cancel() - ses := g.fetch.NewSession(ctx) - - blkCh, err := ses.GetBlocks(ctx, cids) - if err != nil { - return nil, fmt.Errorf("fetching blocks: %w", err) - } - - for blk := range blkCh { // NOTE: GetBlocks handles ctx, so we don't have to - axis, err := AxisFromBlock(blk) + verifyFn := func(row Row) error { + err := row.Verify(hdr.DAH) if err != nil { - // NOTE: Should never error in fact, as Hasher already validated the block - return nil, fmt.Errorf("converting block to Axis: %w", err) + return err } - for shrIdx, shr := range axis.AxisShares { - err = square.SetCell(uint(axis.AxisIndex), uint(shrIdx), shr) + for shrIdx, shr := range row.RowShares { + err = square.SetCell(uint(row.RowIndex), uint(shrIdx), shr) // no synchronization needed if err != nil { panic(err) // this should never happen and if it is... something is really wrong } } + + return nil + } + + cids := make([]cid.Cid, sqrLn/2) + for i, rid := range rids { + rowVerifiers.Add(rid, verifyFn) + cids[i] = rid.Cid() + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + ses := g.fetch.NewSession(ctx) + // must start getting only after verifiers are registered + blkCh, err := ses.GetBlocks(ctx, cids) + if err != nil { + return nil, fmt.Errorf("fetching blocks: %w", err) + } + // GetBlocks handles ctx by closing blkCh, so we don't have to + for range blkCh { //nolint:revive // it complains on empty block, but the code is functional + // we handle writes in verifyFn so just wait for as many results as possible } - // TODO(@Wondertan): Figure out a way to avoid recompute of what has been already computed - // during verification in AxisHasher + // and try to repair err = square.Repair(hdr.DAH.RowRoots, hdr.DAH.ColumnRoots) if err != nil { if ctx.Err() != nil { @@ -152,53 +182,63 @@ func (g *Getter) GetSharesByNamespace( return nil, err } - var cids []cid.Cid //nolint:prealloc// we don't know how many rows with needed namespace there are + var dids []DataID //nolint:prealloc// we don't know how many rows with needed namespace there are for rowIdx, rowRoot := range hdr.DAH.RowRoots { if ns.IsOutsideRange(rowRoot, rowRoot) { continue } - cids = append(cids, MustDataCID(rowIdx, hdr.DAH, hdr.Height(), ns)) + did, err := NewDataID(hdr.Height(), uint16(rowIdx), ns, hdr.DAH) + if err != nil { + return nil, err + } + + dids = append(dids, did) } - if len(cids) == 0 { + if len(dids) == 0 { return share.NamespacedShares{}, nil } + datas := make([]Data, len(dids)) + verifyFn := func(d Data) error { + err := d.Verify(hdr.DAH) + if err != nil { + return err + } + + nsStartIdx := dids[0].RowIndex + idx := d.RowIndex - nsStartIdx + datas[idx] = d + return nil + } + + cids := make([]cid.Cid, len(dids)) + for i, did := range dids { + dataVerifiers.Add(did, verifyFn) + cids[i] = did.Cid() + } + ctx, cancel := context.WithCancel(ctx) defer cancel() ses := g.fetch.NewSession(ctx) - + // must start getting only after verifiers are registered blkCh, err := ses.GetBlocks(ctx, cids) if err != nil { return nil, fmt.Errorf("fetching blocks:%w", err) } - - datas := make([]*Data, 0, len(cids)) - for blk := range blkCh { // NOTE: GetBlocks handles ctx, so we don't have to - data, err := DataFromBlock(blk) - if err != nil { - // NOTE: Should never error in fact, as Hasher already validated the block - return nil, fmt.Errorf("converting block to Data: %w", err) - } - - datas = append(datas, data) + // GetBlocks handles ctx by closing blkCh, so we don't have to + for range blkCh { //nolint:revive // it complains on empty block, but the code is functional + // we handle writes in verifyFn so just wait for as many results as possible } - slices.SortFunc(datas, func(a, b *Data) int { - if a.DataID.AxisIndex < b.DataID.AxisIndex { - return -1 - } - return 1 - }) - - nShrs := make(share.NamespacedShares, len(datas)) - for i, row := range datas { - nShrs[i] = share.NamespacedRow{ + nShrs := make([]share.NamespacedRow, 0, len(datas)) + for _, row := range datas { + proof := row.DataProof + nShrs = append(nShrs, share.NamespacedRow{ Shares: row.DataShares, - Proof: &row.DataProof, - } + Proof: &proof, + }) } - // NOTE: We don't need to call Verify here as Bitswap already did it for us internal. return nShrs, nil } diff --git a/share/shwap/getter_test.go b/share/shwap/getter_test.go index a51d6775e6..b1e479b057 100644 --- a/share/shwap/getter_test.go +++ b/share/shwap/getter_test.go @@ -11,8 +11,10 @@ import ( "github.com/ipfs/boxo/blockstore" "github.com/ipfs/boxo/exchange" - "github.com/ipfs/boxo/exchange/offline" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" + format "github.com/ipfs/go-ipld-format" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -30,15 +32,15 @@ func TestGetter(t *testing.T) { defer cancel() ns := sharetest.RandV0Namespace() - square, root := edstest.RandEDSWithNamespace(t, ns, 16) - hdr := &header.ExtendedHeader{DAH: root} + square, root := edstest.RandEDSWithNamespace(t, ns, 4) + hdr := &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 1}, DAH: root} bstore := edsBlockstore(square) - exch := dummySessionExchange{offline.Exchange(bstore)} + exch := dummySessionExchange{bstore} get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) t.Run("GetShares", func(t *testing.T) { - idxs := rand.Perm(int(square.Width() ^ 2))[:30] + idxs := rand.Perm(int(square.Width() ^ 2))[:10] shrs, err := get.GetShares(ctx, hdr, idxs...) assert.NoError(t, err) @@ -83,10 +85,10 @@ func TestGetter(t *testing.T) { square := edstest.RandEDS(t, 8) root, err := share.NewRoot(square) require.NoError(t, err) - hdr := &header.ExtendedHeader{DAH: root} + hdr := &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 1}, DAH: root} bstore := edsBlockstore(square) - exch := &dummySessionExchange{offline.Exchange(bstore)} + exch := &dummySessionExchange{bstore} get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) maxNs := nmt.MaxNamespace(root.RowRoots[(len(root.RowRoots))/2-1], share.NamespaceSize) @@ -110,7 +112,7 @@ func addToNamespace(namespace share.Namespace, val int) (share.Namespace, error) if val == 0 { return namespace, nil } - // Convert the input integer to a byte slice and add it to result slice + // Convert the input integer to a byte slice and Add it to result slice result := make([]byte, len(namespace)) if val > 0 { binary.BigEndian.PutUint64(result[len(namespace)-8:], uint64(val)) @@ -151,9 +153,59 @@ func addToNamespace(namespace share.Namespace, val int) (share.Namespace, error) } type dummySessionExchange struct { - exchange.Interface + blockstore.Blockstore } -func (d dummySessionExchange) NewSession(context.Context) exchange.Fetcher { - return d +func (e dummySessionExchange) NewSession(context.Context) exchange.Fetcher { + return e +} + +func (e dummySessionExchange) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { + blk, err := e.Get(ctx, k) + if format.IsNotFound(err) { + return nil, fmt.Errorf("block was not found locally (offline): %w", err) + } + rbcid, err := k.Prefix().Sum(blk.RawData()) + if err != nil { + return nil, err + } + + if !rbcid.Equals(k) { + return nil, blockstore.ErrHashMismatch + } + return blk, err +} + +func (e dummySessionExchange) NotifyNewBlocks(context.Context, ...blocks.Block) error { + return nil +} + +func (e dummySessionExchange) GetBlocks(ctx context.Context, ks []cid.Cid) (<-chan blocks.Block, error) { + out := make(chan blocks.Block) + go func() { + defer close(out) + for _, k := range ks { + hit, err := e.GetBlock(ctx, k) + if err != nil { + select { + case <-ctx.Done(): + return + default: + continue + } + } + select { + case out <- hit: + case <-ctx.Done(): + return + } + } + }() + return out, nil +} + +func (e dummySessionExchange) Close() error { + // NB: exchange doesn't own the blockstore's underlying datastore, so it is + // not responsible for closing it. + return nil } diff --git a/share/shwap/pb/shwap_pb.pb.go b/share/shwap/pb/shwap_pb.pb.go index 87905655e1..bb72bed41a 100644 --- a/share/shwap/pb/shwap_pb.pb.go +++ b/share/shwap/pb/shwap_pb.pb.go @@ -23,73 +23,48 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -type AxisType int32 +type SampleProofType int32 const ( - AxisType_Row AxisType = 0 - AxisType_Col AxisType = 1 + SampleProofType_RowSampleProofType SampleProofType = 0 + SampleProofType_ColSampleProofType SampleProofType = 1 ) -var AxisType_name = map[int32]string{ - 0: "Row", - 1: "Col", +var SampleProofType_name = map[int32]string{ + 0: "RowSampleProofType", + 1: "ColSampleProofType", } -var AxisType_value = map[string]int32{ - "Row": 0, - "Col": 1, +var SampleProofType_value = map[string]int32{ + "RowSampleProofType": 0, + "ColSampleProofType": 1, } -func (x AxisType) String() string { - return proto.EnumName(AxisType_name, int32(x)) +func (x SampleProofType) String() string { + return proto.EnumName(SampleProofType_name, int32(x)) } -func (AxisType) EnumDescriptor() ([]byte, []int) { +func (SampleProofType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_fdfe0676a85dc852, []int{0} } -type SampleType int32 - -const ( - SampleType_DataSample SampleType = 0 - SampleType_ParitySample SampleType = 1 -) - -var SampleType_name = map[int32]string{ - 0: "DataSample", - 1: "ParitySample", -} - -var SampleType_value = map[string]int32{ - "DataSample": 0, - "ParitySample": 1, -} - -func (x SampleType) String() string { - return proto.EnumName(SampleType_name, int32(x)) +type Row struct { + RowId []byte `protobuf:"bytes,1,opt,name=row_id,json=rowId,proto3" json:"row_id,omitempty"` + RowHalf [][]byte `protobuf:"bytes,2,rep,name=row_half,json=rowHalf,proto3" json:"row_half,omitempty"` } -func (SampleType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_fdfe0676a85dc852, []int{1} -} - -type Axis struct { - AxisId []byte `protobuf:"bytes,1,opt,name=axis_id,json=axisId,proto3" json:"axis_id,omitempty"` - AxisHalf [][]byte `protobuf:"bytes,2,rep,name=axis_half,json=axisHalf,proto3" json:"axis_half,omitempty"` -} - -func (m *Axis) Reset() { *m = Axis{} } -func (m *Axis) String() string { return proto.CompactTextString(m) } -func (*Axis) ProtoMessage() {} -func (*Axis) Descriptor() ([]byte, []int) { +func (m *Row) Reset() { *m = Row{} } +func (m *Row) String() string { return proto.CompactTextString(m) } +func (*Row) ProtoMessage() {} +func (*Row) Descriptor() ([]byte, []int) { return fileDescriptor_fdfe0676a85dc852, []int{0} } -func (m *Axis) XXX_Unmarshal(b []byte) error { +func (m *Row) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Axis) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Row) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Axis.Marshal(b, m, deterministic) + return xxx_messageInfo_Row.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -99,37 +74,37 @@ func (m *Axis) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Axis) XXX_Merge(src proto.Message) { - xxx_messageInfo_Axis.Merge(m, src) +func (m *Row) XXX_Merge(src proto.Message) { + xxx_messageInfo_Row.Merge(m, src) } -func (m *Axis) XXX_Size() int { +func (m *Row) XXX_Size() int { return m.Size() } -func (m *Axis) XXX_DiscardUnknown() { - xxx_messageInfo_Axis.DiscardUnknown(m) +func (m *Row) XXX_DiscardUnknown() { + xxx_messageInfo_Row.DiscardUnknown(m) } -var xxx_messageInfo_Axis proto.InternalMessageInfo +var xxx_messageInfo_Row proto.InternalMessageInfo -func (m *Axis) GetAxisId() []byte { +func (m *Row) GetRowId() []byte { if m != nil { - return m.AxisId + return m.RowId } return nil } -func (m *Axis) GetAxisHalf() [][]byte { +func (m *Row) GetRowHalf() [][]byte { if m != nil { - return m.AxisHalf + return m.RowHalf } return nil } type Sample struct { - SampleId []byte `protobuf:"bytes,1,opt,name=sample_id,json=sampleId,proto3" json:"sample_id,omitempty"` - SampleType SampleType `protobuf:"varint,2,opt,name=sample_type,json=sampleType,proto3,enum=SampleType" json:"sample_type,omitempty"` - SampleShare []byte `protobuf:"bytes,3,opt,name=sample_share,json=sampleShare,proto3" json:"sample_share,omitempty"` - SampleProof *pb.Proof `protobuf:"bytes,4,opt,name=sample_proof,json=sampleProof,proto3" json:"sample_proof,omitempty"` + SampleId []byte `protobuf:"bytes,1,opt,name=sample_id,json=sampleId,proto3" json:"sample_id,omitempty"` + SampleType SampleProofType `protobuf:"varint,2,opt,name=sample_type,json=sampleType,proto3,enum=SampleProofType" json:"sample_type,omitempty"` + SampleShare []byte `protobuf:"bytes,3,opt,name=sample_share,json=sampleShare,proto3" json:"sample_share,omitempty"` + SampleProof *pb.Proof `protobuf:"bytes,4,opt,name=sample_proof,json=sampleProof,proto3" json:"sample_proof,omitempty"` } func (m *Sample) Reset() { *m = Sample{} } @@ -172,11 +147,11 @@ func (m *Sample) GetSampleId() []byte { return nil } -func (m *Sample) GetSampleType() SampleType { +func (m *Sample) GetSampleType() SampleProofType { if m != nil { return m.SampleType } - return SampleType_DataSample + return SampleProofType_RowSampleProofType } func (m *Sample) GetSampleShare() []byte { @@ -254,9 +229,8 @@ func (m *Data) GetDataProof() *pb.Proof { } func init() { - proto.RegisterEnum("AxisType", AxisType_name, AxisType_value) - proto.RegisterEnum("SampleType", SampleType_name, SampleType_value) - proto.RegisterType((*Axis)(nil), "Axis") + proto.RegisterEnum("SampleProofType", SampleProofType_name, SampleProofType_value) + proto.RegisterType((*Row)(nil), "Row") proto.RegisterType((*Sample)(nil), "Sample") proto.RegisterType((*Data)(nil), "Data") } @@ -264,32 +238,31 @@ func init() { func init() { proto.RegisterFile("share/shwap/pb/shwap_pb.proto", fileDescriptor_fdfe0676a85dc852) } var fileDescriptor_fdfe0676a85dc852 = []byte{ - // 337 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xbd, 0x6e, 0xf2, 0x40, - 0x10, 0xf4, 0x61, 0xc4, 0xcf, 0xda, 0xe2, 0xb3, 0xae, 0xf9, 0xac, 0xfc, 0x38, 0x0e, 0x95, 0x85, - 0x22, 0x23, 0x91, 0x36, 0x4d, 0x7e, 0x8a, 0xd0, 0x21, 0x93, 0x1e, 0x9d, 0x65, 0x23, 0x2c, 0x39, - 0xba, 0x93, 0xcf, 0x12, 0xf0, 0x16, 0x79, 0x8e, 0x3c, 0x49, 0x4a, 0xca, 0x94, 0x11, 0xbc, 0x48, - 0xb4, 0xeb, 0x0b, 0xa4, 0x48, 0x37, 0x33, 0x3b, 0xb7, 0xe3, 0x59, 0xc3, 0xa5, 0x5e, 0x89, 0x2a, - 0x1f, 0xeb, 0xd5, 0x5a, 0xa8, 0xb1, 0x4a, 0x1b, 0xb0, 0x50, 0x69, 0xac, 0x2a, 0x59, 0xcb, 0xb3, - 0x81, 0x4a, 0xc7, 0xaa, 0x92, 0x72, 0xd9, 0xf0, 0xe1, 0x1d, 0xb4, 0xef, 0x37, 0x85, 0xe6, 0xff, - 0xa1, 0x2b, 0x36, 0x85, 0x5e, 0x14, 0x99, 0xcf, 0x42, 0x16, 0xb9, 0x49, 0x07, 0xe9, 0x34, 0xe3, - 0xe7, 0xd0, 0xa7, 0xc1, 0x4a, 0x94, 0x4b, 0xbf, 0x15, 0xda, 0x91, 0x9b, 0xf4, 0x50, 0x78, 0x16, - 0xe5, 0x72, 0xf8, 0xce, 0xa0, 0x33, 0x17, 0xaf, 0xaa, 0xcc, 0xd1, 0xa7, 0x09, 0x9d, 0x56, 0xf4, - 0x1a, 0x61, 0x9a, 0xf1, 0x1b, 0x70, 0xcc, 0xb0, 0xde, 0xaa, 0xdc, 0x6f, 0x85, 0x2c, 0x1a, 0x4c, - 0x9c, 0xb8, 0x79, 0xfa, 0xb2, 0x55, 0x79, 0x02, 0xfa, 0x88, 0xf9, 0x35, 0xb8, 0xc6, 0x4d, 0x5d, - 0x7c, 0x9b, 0xb6, 0x99, 0x0d, 0x73, 0x94, 0xf8, 0xe4, 0x68, 0xa1, 0x32, 0x7e, 0x3b, 0x64, 0x91, - 0x33, 0xf9, 0x17, 0x9b, 0x6a, 0x69, 0x3c, 0x43, 0xf0, 0xf3, 0x86, 0xc8, 0x50, 0x41, 0xfb, 0x49, - 0xd4, 0x02, 0xab, 0x66, 0xa2, 0x16, 0xbf, 0xaa, 0x22, 0x9d, 0x66, 0xfc, 0x0a, 0x1c, 0x1a, 0x50, - 0xaa, 0x36, 0x65, 0x01, 0x25, 0x0a, 0xd5, 0x3c, 0x06, 0x62, 0x26, 0xd3, 0xfe, 0x3b, 0xb3, 0x8f, - 0x16, 0x82, 0xa3, 0x0b, 0xe8, 0xe1, 0x71, 0xa9, 0x54, 0x17, 0xec, 0x44, 0xae, 0x3d, 0x0b, 0xc1, - 0xa3, 0x2c, 0x3d, 0x36, 0x8a, 0x01, 0x4e, 0x07, 0xe0, 0x03, 0x00, 0xfc, 0xba, 0x46, 0xf1, 0x2c, - 0xee, 0x81, 0x3b, 0x13, 0x55, 0x51, 0x6f, 0x8d, 0xc2, 0x1e, 0xfc, 0x8f, 0x7d, 0xc0, 0x76, 0xfb, - 0x80, 0x7d, 0xed, 0x03, 0xf6, 0x76, 0x08, 0xac, 0xdd, 0x21, 0xb0, 0x3e, 0x0f, 0x81, 0x95, 0x76, - 0xe8, 0x5f, 0xde, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0xad, 0x11, 0x6f, 0x50, 0xfc, 0x01, 0x00, - 0x00, -} - -func (m *Axis) Marshal() (dAtA []byte, err error) { + // 322 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0x4d, 0x4f, 0xc2, 0x40, + 0x14, 0xec, 0x52, 0x2c, 0xf0, 0x20, 0x40, 0x36, 0x51, 0x57, 0x8d, 0x6b, 0xe5, 0xd4, 0x78, 0x28, + 0x11, 0x0f, 0x9e, 0xfd, 0x38, 0xc8, 0xcd, 0x14, 0xef, 0x64, 0x9b, 0x96, 0x60, 0x52, 0xb3, 0x9b, + 0x6d, 0x93, 0x0d, 0xff, 0xc2, 0x7f, 0xe2, 0xdf, 0xf0, 0xc8, 0xd1, 0xa3, 0x81, 0x3f, 0x62, 0xf6, + 0x75, 0x95, 0x84, 0x78, 0x9b, 0x99, 0x7d, 0xf3, 0xe6, 0x4d, 0x16, 0xce, 0xcb, 0xa5, 0xd0, 0xf9, + 0xb8, 0x5c, 0x1a, 0xa1, 0xc6, 0x2a, 0xad, 0xc1, 0x5c, 0xa5, 0xb1, 0xd2, 0xb2, 0x92, 0xa7, 0x7d, + 0x95, 0x8e, 0x95, 0x96, 0x72, 0x51, 0xf3, 0xd1, 0x2d, 0xf8, 0x89, 0x34, 0xf4, 0x10, 0x02, 0x2d, + 0xcd, 0xfc, 0x35, 0x63, 0x24, 0x24, 0x51, 0x2f, 0x39, 0xd0, 0xd2, 0x4c, 0x33, 0x7a, 0x02, 0x6d, + 0x2b, 0x2f, 0x45, 0xb1, 0x60, 0x8d, 0xd0, 0x8f, 0x7a, 0x49, 0x4b, 0x4b, 0xf3, 0x24, 0x8a, 0xc5, + 0xe8, 0x83, 0x40, 0x30, 0x13, 0x6f, 0xaa, 0xc8, 0xe9, 0x19, 0x74, 0x4a, 0x44, 0x3b, 0x7f, 0xbb, + 0x16, 0xa6, 0x19, 0xbd, 0x86, 0xae, 0x7b, 0xac, 0x56, 0x2a, 0x67, 0x8d, 0x90, 0x44, 0xfd, 0xc9, + 0x30, 0xae, 0xad, 0xcf, 0xf6, 0x92, 0x97, 0x95, 0xca, 0x13, 0xa8, 0x87, 0x2c, 0xa6, 0x97, 0xd0, + 0x73, 0x16, 0xec, 0xc2, 0x7c, 0x5c, 0xe9, 0xd6, 0xcc, 0xac, 0x44, 0x27, 0x7f, 0x23, 0x58, 0x86, + 0x35, 0x43, 0x12, 0x75, 0x27, 0x83, 0xd8, 0x55, 0x4b, 0x63, 0xdc, 0xfc, 0xeb, 0x41, 0x32, 0x52, + 0xd0, 0x7c, 0x14, 0x95, 0xa0, 0xc7, 0xd0, 0xca, 0x44, 0x25, 0x76, 0xc7, 0x06, 0x96, 0x4e, 0x33, + 0x7a, 0x01, 0x5d, 0x7c, 0xc0, 0xd4, 0xd2, 0x15, 0x06, 0x2b, 0x61, 0x68, 0x49, 0x63, 0x40, 0xe6, + 0x32, 0xfd, 0xff, 0x33, 0x3b, 0x76, 0x04, 0xe1, 0xd5, 0x1d, 0x0c, 0xf6, 0x7a, 0xd2, 0x23, 0xa0, + 0x89, 0x34, 0x7b, 0xea, 0xd0, 0xb3, 0xfa, 0x83, 0x2c, 0xf6, 0x75, 0x72, 0xcf, 0x3e, 0x37, 0x9c, + 0xac, 0x37, 0x9c, 0x7c, 0x6f, 0x38, 0x79, 0xdf, 0x72, 0x6f, 0xbd, 0xe5, 0xde, 0xd7, 0x96, 0x7b, + 0x69, 0x80, 0x1f, 0x78, 0xf3, 0x13, 0x00, 0x00, 0xff, 0xff, 0xa1, 0x0e, 0x39, 0xc1, 0xf1, 0x01, + 0x00, 0x00, +} + +func (m *Row) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -299,29 +272,29 @@ func (m *Axis) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Axis) MarshalTo(dAtA []byte) (int, error) { +func (m *Row) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Axis) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Row) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.AxisHalf) > 0 { - for iNdEx := len(m.AxisHalf) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AxisHalf[iNdEx]) - copy(dAtA[i:], m.AxisHalf[iNdEx]) - i = encodeVarintShwapPb(dAtA, i, uint64(len(m.AxisHalf[iNdEx]))) + if len(m.RowHalf) > 0 { + for iNdEx := len(m.RowHalf) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RowHalf[iNdEx]) + copy(dAtA[i:], m.RowHalf[iNdEx]) + i = encodeVarintShwapPb(dAtA, i, uint64(len(m.RowHalf[iNdEx]))) i-- dAtA[i] = 0x12 } } - if len(m.AxisId) > 0 { - i -= len(m.AxisId) - copy(dAtA[i:], m.AxisId) - i = encodeVarintShwapPb(dAtA, i, uint64(len(m.AxisId))) + if len(m.RowId) > 0 { + i -= len(m.RowId) + copy(dAtA[i:], m.RowId) + i = encodeVarintShwapPb(dAtA, i, uint64(len(m.RowId))) i-- dAtA[i] = 0xa } @@ -444,18 +417,18 @@ func encodeVarintShwapPb(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *Axis) Size() (n int) { +func (m *Row) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.AxisId) + l = len(m.RowId) if l > 0 { n += 1 + l + sovShwapPb(uint64(l)) } - if len(m.AxisHalf) > 0 { - for _, b := range m.AxisHalf { + if len(m.RowHalf) > 0 { + for _, b := range m.RowHalf { l = len(b) n += 1 + l + sovShwapPb(uint64(l)) } @@ -516,7 +489,7 @@ func sovShwapPb(x uint64) (n int) { func sozShwapPb(x uint64) (n int) { return sovShwapPb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *Axis) Unmarshal(dAtA []byte) error { +func (m *Row) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -539,15 +512,15 @@ func (m *Axis) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Axis: wiretype end group for non-group") + return fmt.Errorf("proto: Row: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Axis: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Row: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AxisId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowId", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -574,14 +547,14 @@ func (m *Axis) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AxisId = append(m.AxisId[:0], dAtA[iNdEx:postIndex]...) - if m.AxisId == nil { - m.AxisId = []byte{} + m.RowId = append(m.RowId[:0], dAtA[iNdEx:postIndex]...) + if m.RowId == nil { + m.RowId = []byte{} } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AxisHalf", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowHalf", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -608,8 +581,8 @@ func (m *Axis) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AxisHalf = append(m.AxisHalf, make([]byte, postIndex-iNdEx)) - copy(m.AxisHalf[len(m.AxisHalf)-1], dAtA[iNdEx:postIndex]) + m.RowHalf = append(m.RowHalf, make([]byte, postIndex-iNdEx)) + copy(m.RowHalf[len(m.RowHalf)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -709,7 +682,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SampleType |= SampleType(b&0x7F) << shift + m.SampleType |= SampleProofType(b&0x7F) << shift if b < 0x80 { break } diff --git a/share/shwap/pb/shwap_pb.proto b/share/shwap/pb/shwap_pb.proto index e6f19f252a..2cc68d2333 100644 --- a/share/shwap/pb/shwap_pb.proto +++ b/share/shwap/pb/shwap_pb.proto @@ -2,24 +2,19 @@ syntax = "proto3"; import "pb/proof.proto"; // celestiaorg/nmt/pb/proof.proto -enum AxisType { - Row = 0; - Col = 1; +message Row { + bytes row_id = 1; + repeated bytes row_half = 2; } -message Axis { - bytes axis_id = 1; - repeated bytes axis_half = 2; -} - -enum SampleType { - DataSample = 0; - ParitySample = 1; +enum SampleProofType { + RowSampleProofType = 0; + ColSampleProofType = 1; } message Sample { bytes sample_id = 1; - SampleType sample_type = 2; + SampleProofType sample_type = 2; bytes sample_share = 3; proof.pb.Proof sample_proof = 4; } diff --git a/share/shwap/row.go b/share/shwap/row.go new file mode 100644 index 0000000000..f2785870c4 --- /dev/null +++ b/share/shwap/row.go @@ -0,0 +1,140 @@ +package shwap + +import ( + "bytes" + "fmt" + + blocks "github.com/ipfs/go-block-format" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + shwappb "github.com/celestiaorg/celestia-node/share/shwap/pb" +) + +// Row represents a Row of an EDS. +type Row struct { + RowID + + // RowShares is the original non erasure-coded half of the Row. + RowShares []share.Share +} + +// NewRow constructs a new Row. +func NewRow(id RowID, axisHalf []share.Share) *Row { + return &Row{ + RowID: id, + RowShares: axisHalf, + } +} + +// NewRowFromEDS constructs a new Row from the given EDS. +func NewRowFromEDS( + height uint64, + rowIdx int, + square *rsmt2d.ExtendedDataSquare, +) (*Row, error) { + sqrLn := int(square.Width()) + axisHalf := square.Row(uint(rowIdx))[:sqrLn/2] + + root, err := share.NewRoot(square) + if err != nil { + return nil, err + } + + id, err := NewRowID(height, uint16(rowIdx), root) + if err != nil { + return nil, err + } + + return NewRow(id, axisHalf), nil +} + +// RowFromBlock converts blocks.Block into Row. +func RowFromBlock(blk blocks.Block) (*Row, error) { + if err := validateCID(blk.Cid()); err != nil { + return nil, err + } + + s := &Row{} + err := s.UnmarshalBinary(blk.RawData()) + if err != nil { + return nil, fmt.Errorf("while unmarshalling Row: %w", err) + } + + return s, nil +} + +// IPLDBlock converts Row to an IPLD block for Bitswap compatibility. +func (r *Row) IPLDBlock() (blocks.Block, error) { + data, err := r.MarshalBinary() + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, r.Cid()) +} + +// MarshalBinary marshals Row to binary. +func (r *Row) MarshalBinary() ([]byte, error) { + id, err := r.RowID.MarshalBinary() + if err != nil { + return nil, err + } + + return (&shwappb.Row{ + RowId: id, + RowHalf: r.RowShares, + }).Marshal() +} + +// UnmarshalBinary unmarshal Row from binary. +func (r *Row) UnmarshalBinary(data []byte) error { + proto := &shwappb.Row{} + if err := proto.Unmarshal(data); err != nil { + return err + } + + err := r.RowID.UnmarshalBinary(proto.RowId) + if err != nil { + return err + } + + r.RowShares = proto.RowHalf + return nil +} + +// Verify validates Row's fields and verifies Row inclusion. +func (r *Row) Verify(root *share.Root) error { + if err := r.RowID.Verify(root); err != nil { + return err + } + + encoded, err := share.DefaultRSMT2DCodec().Encode(r.RowShares) + if err != nil { + return fmt.Errorf("while decoding erasure coded half: %w", err) + } + // TODO: encoded already contains all the shares initially [-len(RowShares):] + r.RowShares = append(r.RowShares, encoded...) + + sqrLn := uint64(len(r.RowShares) / 2) + tree := wrapper.NewErasuredNamespacedMerkleTree(sqrLn, uint(r.RowID.RowIndex)) + for _, shr := range r.RowShares { + err := tree.Push(shr) + if err != nil { + return fmt.Errorf("while pushing shares to NMT: %w", err) + } + } + + rowRoot, err := tree.Root() + if err != nil { + return fmt.Errorf("while computing NMT root: %w", err) + } + + if !bytes.Equal(root.RowRoots[r.RowIndex], rowRoot) { + return fmt.Errorf("invalid RowHash: %X != %X", root, root.RowRoots[r.RowIndex]) + } + + return nil +} diff --git a/share/shwap/row_hasher.go b/share/shwap/row_hasher.go new file mode 100644 index 0000000000..0da51bc0e4 --- /dev/null +++ b/share/shwap/row_hasher.go @@ -0,0 +1,54 @@ +package shwap + +import ( + "crypto/sha256" + "fmt" +) + +// RowHasher implements hash.Hash interface for Row. +type RowHasher struct { + data []byte +} + +// Write expects a marshaled Row to validate. +func (h *RowHasher) Write(data []byte) (int, error) { + var row Row + if err := row.UnmarshalBinary(data); err != nil { + err = fmt.Errorf("unmarshaling Row: %w", err) + log.Error(err) + return 0, err + } + + if err := rowVerifiers.Verify(row.RowID, row); err != nil { + err = fmt.Errorf("verifying Row: %w", err) + log.Error(err) + return 0, err + } + + h.data = data + return len(data), nil +} + +// Sum returns the "multihash" of the RowID. +func (h *RowHasher) Sum([]byte) []byte { + if h.data == nil { + return nil + } + const pbOffset = 2 + return h.data[pbOffset : RowIDSize+pbOffset] +} + +// Reset resets the Hash to its initial state. +func (h *RowHasher) Reset() { + h.data = nil +} + +// Size returns the number of bytes Sum will return. +func (h *RowHasher) Size() int { + return RowIDSize +} + +// BlockSize returns the hash's underlying block size. +func (h *RowHasher) BlockSize() int { + return sha256.BlockSize +} diff --git a/share/shwap/axis_hasher_test.go b/share/shwap/row_hasher_test.go similarity index 54% rename from share/shwap/axis_hasher_test.go rename to share/shwap/row_hasher_test.go index afb378a4b5..1db8bf5038 100644 --- a/share/shwap/axis_hasher_test.go +++ b/share/shwap/row_hasher_test.go @@ -6,23 +6,28 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/rsmt2d" - + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" ) -func TestAxisHasher(t *testing.T) { - hasher := &AxisHasher{} +func TestRowHasher(t *testing.T) { + hasher := &RowHasher{} _, err := hasher.Write([]byte("hello")) assert.Error(t, err) square := edstest.RandEDS(t, 2) + root, err := share.NewRoot(square) + require.NoError(t, err) - sample, err := NewAxisFromEDS(rsmt2d.Row, 2, square, 1) + row, err := NewRowFromEDS(2, 1, square) require.NoError(t, err) - data, err := sample.MarshalBinary() + rowVerifiers.Add(row.RowID, func(row Row) error { + return row.Verify(root) + }) + + data, err := row.MarshalBinary() require.NoError(t, err) n, err := hasher.Write(data) @@ -30,11 +35,11 @@ func TestAxisHasher(t *testing.T) { assert.EqualValues(t, len(data), n) digest := hasher.Sum(nil) - sid, err := sample.AxisID.MarshalBinary() + id, err := row.RowID.MarshalBinary() require.NoError(t, err) - assert.EqualValues(t, sid, digest) + assert.EqualValues(t, id, digest) hasher.Reset() digest = hasher.Sum(nil) - assert.NotEqualValues(t, digest, sid) + assert.NotEqualValues(t, digest, id) } diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go new file mode 100644 index 0000000000..a77c8bf36d --- /dev/null +++ b/share/shwap/row_id.go @@ -0,0 +1,116 @@ +package shwap + +import ( + "encoding/binary" + "fmt" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + + "github.com/celestiaorg/celestia-node/share" +) + +// TODO: +// * Remove RowHash +// * Change validation +// * Remove IDs from responses + +// RowIDSize is the size of the RowID in bytes +const RowIDSize = 10 + +// RowID is an unique identifier of a Row. +type RowID struct { + // Height of the block. + // Needed to identify block's data square in the whole chain + Height uint64 + // RowIndex is the index of the axis(row, col) in the data square + RowIndex uint16 +} + +// NewRowID constructs a new RowID. +func NewRowID(height uint64, rowIdx uint16, root *share.Root) (RowID, error) { + rid := RowID{ + RowIndex: rowIdx, + Height: height, + } + return rid, rid.Verify(root) +} + +// RowIDFromCID coverts CID to RowID. +func RowIDFromCID(cid cid.Cid) (id RowID, err error) { + if err = validateCID(cid); err != nil { + return id, err + } + + err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) + if err != nil { + return id, fmt.Errorf("while unmarhaling RowID: %w", err) + } + + return id, nil +} + +// Cid returns RowID encoded as CID. +func (rid RowID) Cid() cid.Cid { + data, err := rid.MarshalBinary() + if err != nil { + panic(fmt.Errorf("marshaling RowID: %w", err)) + } + + buf, err := mh.Encode(data, rowMultihashCode) + if err != nil { + panic(fmt.Errorf("encoding RowID as CID: %w", err)) + } + + return cid.NewCidV1(rowCodec, buf) +} + +// MarshalTo encodes RowID into given byte slice. +// NOTE: Proto is avoided because +// * Its size is not deterministic which is required for IPLD. +// * No support for uint16 +func (rid RowID) MarshalTo(data []byte) (int, error) { + data = binary.LittleEndian.AppendUint64(data, rid.Height) + data = binary.LittleEndian.AppendUint16(data, rid.RowIndex) + return RowIDSize, nil +} + +// UnmarshalFrom decodes RowID from given byte slice. +func (rid *RowID) UnmarshalFrom(data []byte) (int, error) { + rid.Height = binary.LittleEndian.Uint64(data[2:]) + rid.RowIndex = binary.LittleEndian.Uint16(data) + return RowIDSize, nil +} + +// MarshalBinary encodes RowID into binary form. +func (rid RowID) MarshalBinary() ([]byte, error) { + data := make([]byte, 0, RowIDSize) + n, err := rid.MarshalTo(data) + return data[:n], err +} + +// UnmarshalBinary decodes RowID from binary form. +func (rid *RowID) UnmarshalBinary(data []byte) error { + if len(data) != RowIDSize { + return fmt.Errorf("invalid RowID data length: %d != %d", len(data), RowIDSize) + } + _, err := rid.UnmarshalFrom(data) + return err +} + +// Verify verifies RowID fields. +func (rid RowID) Verify(root *share.Root) error { + if root == nil { + return fmt.Errorf("nil Root") + } + if rid.Height == 0 { + return fmt.Errorf("zero Height") + } + + sqrLn := len(root.RowRoots) + if int(rid.RowIndex) >= sqrLn { + return fmt.Errorf("RowIndex exceeds square size: %d >= %d", rid.RowIndex, sqrLn) + } + + return nil +} diff --git a/share/shwap/row_id_test.go b/share/shwap/row_id_test.go new file mode 100644 index 0000000000..a34aab14c9 --- /dev/null +++ b/share/shwap/row_id_test.go @@ -0,0 +1,36 @@ +package shwap + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestRowID(t *testing.T) { + square := edstest.RandEDS(t, 2) + root, err := share.NewRoot(square) + require.NoError(t, err) + + id, err := NewRowID(2, 1, root) + require.NoError(t, err) + + cid := id.Cid() + assert.EqualValues(t, rowCodec, cid.Prefix().Codec) + assert.EqualValues(t, rowMultihashCode, cid.Prefix().MhType) + assert.EqualValues(t, RowIDSize, cid.Prefix().MhLength) + + data, err := id.MarshalBinary() + require.NoError(t, err) + + idOut := RowID{} + err = idOut.UnmarshalBinary(data) + require.NoError(t, err) + assert.EqualValues(t, id, idOut) + + err = idOut.Verify(root) + require.NoError(t, err) +} diff --git a/share/shwap/row_test.go b/share/shwap/row_test.go new file mode 100644 index 0000000000..69c3c2334c --- /dev/null +++ b/share/shwap/row_test.go @@ -0,0 +1,35 @@ +package shwap + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestRow(t *testing.T) { + square := edstest.RandEDS(t, 8) + root, err := share.NewRoot(square) + require.NoError(t, err) + + row, err := NewRowFromEDS(1, 2, square) + require.NoError(t, err) + + data, err := row.MarshalBinary() + require.NoError(t, err) + + blk, err := row.IPLDBlock() + require.NoError(t, err) + assert.EqualValues(t, blk.Cid(), row.Cid()) + + rowOut := &Row{} + err = rowOut.UnmarshalBinary(data) + require.NoError(t, err) + assert.EqualValues(t, row, rowOut) + + err = rowOut.Verify(root) + require.NoError(t, err) +} diff --git a/share/shwap/sample.go b/share/shwap/sample.go index de73a802ec..5330ecfc5b 100644 --- a/share/shwap/sample.go +++ b/share/shwap/sample.go @@ -15,22 +15,22 @@ import ( shwappb "github.com/celestiaorg/celestia-node/share/shwap/pb" ) -// SampleType represents type of sample. -type SampleType uint8 +// SampleProofType is either row or column proven Sample. +type SampleProofType = rsmt2d.Axis const ( - // DataSample is a sample of a data share. - DataSample SampleType = iota - // ParitySample is a sample of a parity share. - ParitySample + // RowProofType is a sample proven via row root of the square. + RowProofType = rsmt2d.Row + // ColProofType is a sample proven via column root of the square. + ColProofType = rsmt2d.Col ) // Sample represents a sample of an NMT in EDS. type Sample struct { SampleID - // Type of the Sample - Type SampleType + // SampleProofType of the Sample + SampleProofType SampleProofType // SampleProof of SampleShare inclusion in the NMT SampleProof nmt.Proof // SampleShare is a share being sampled @@ -38,48 +38,48 @@ type Sample struct { } // NewSample constructs a new Sample. -func NewSample(id SampleID, shr share.Share, proof nmt.Proof, sqrLn int) *Sample { - tp := ParitySample - if int(id.AxisIndex) < sqrLn/2 && int(id.ShareIndex) < sqrLn/2 { - tp = DataSample - } - +func NewSample(id SampleID, shr share.Share, proof nmt.Proof, proofTp SampleProofType) *Sample { return &Sample{ - SampleID: id, - Type: tp, - SampleProof: proof, - SampleShare: shr, + SampleID: id, + SampleProofType: proofTp, + SampleProof: proof, + SampleShare: shr, } } -// NewSampleFromEDS samples the EDS and constructs a new Sample. +// NewSampleFromEDS samples the EDS and constructs a new row-proven Sample. func NewSampleFromEDS( - axisType rsmt2d.Axis, - idx int, + proofType SampleProofType, + smplIdx int, square *rsmt2d.ExtendedDataSquare, height uint64, ) (*Sample, error) { + root, err := share.NewRoot(square) + if err != nil { + return nil, err + } + + id, err := NewSampleID(height, smplIdx, root) + if err != nil { + return nil, err + } + sqrLn := int(square.Width()) - axisIdx, shrIdx := idx/sqrLn, idx%sqrLn + rowIdx, shrIdx := uint16(smplIdx/sqrLn), uint16(smplIdx%sqrLn) // TODO(@Wondertan): Should be an rsmt2d method var shrs [][]byte - switch axisType { + switch proofType { case rsmt2d.Row: - shrs = square.Row(uint(axisIdx)) + shrs = square.Row(uint(rowIdx)) case rsmt2d.Col: - axisIdx, shrIdx = shrIdx, axisIdx - shrs = square.Col(uint(axisIdx)) + rowIdx, shrIdx = shrIdx, rowIdx + shrs = square.Col(uint(rowIdx)) default: panic("invalid axis") } - root, err := share.NewRoot(square) - if err != nil { - return nil, fmt.Errorf("while computing root: %w", err) - } - - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axisIdx)) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(rowIdx)) for _, shr := range shrs { err := tree.Push(shr) if err != nil { @@ -87,13 +87,12 @@ func NewSampleFromEDS( } } - prf, err := tree.ProveRange(shrIdx, shrIdx+1) + prf, err := tree.ProveRange(int(shrIdx), int(shrIdx+1)) if err != nil { return nil, fmt.Errorf("while proving range share over NMT: %w", err) } - id := NewSampleID(axisType, idx, root, height) - return NewSample(id, shrs[shrIdx], prf, len(root.RowRoots)), nil + return NewSample(id, shrs[shrIdx], prf, proofType), nil } // SampleFromBlock converts blocks.Block into Sample. @@ -113,17 +112,12 @@ func SampleFromBlock(blk blocks.Block) (*Sample, error) { // IPLDBlock converts Sample to an IPLD block for Bitswap compatibility. func (s *Sample) IPLDBlock() (blocks.Block, error) { - cid, err := s.SampleID.Cid() - if err != nil { - return nil, err - } - data, err := s.MarshalBinary() if err != nil { return nil, err } - return blocks.NewBlockWithCid(data, cid) + return blocks.NewBlockWithCid(data, s.Cid()) } // MarshalBinary marshals Sample to binary. @@ -142,7 +136,7 @@ func (s *Sample) MarshalBinary() ([]byte, error) { return (&shwappb.Sample{ SampleId: id, - SampleType: shwappb.SampleType(s.Type), + SampleType: shwappb.SampleProofType(s.SampleProofType), SampleProof: proof, SampleShare: s.SampleShare, }).Marshal() @@ -160,30 +154,35 @@ func (s *Sample) UnmarshalBinary(data []byte) error { return err } - s.Type = SampleType(proto.SampleType) + s.SampleProofType = SampleProofType(proto.SampleType) s.SampleProof = nmt.ProtoToProof(*proto.SampleProof) s.SampleShare = proto.SampleShare return nil } -// Validate validates Sample's fields and proof of SampleShare inclusion in the NMT. -func (s *Sample) Validate() error { - if err := s.SampleID.Validate(); err != nil { +// Verify validates Sample's fields and verifies SampleShare inclusion. +func (s *Sample) Verify(root *share.Root) error { + if err := s.SampleID.Verify(root); err != nil { return err } - if s.Type != DataSample && s.Type != ParitySample { - return fmt.Errorf("invalid SampleType: %d", s.Type) + if s.SampleProofType != RowProofType && s.SampleProofType != ColProofType { + return fmt.Errorf("invalid SampleProofType: %d", s.SampleProofType) } + sqrLn := len(root.RowRoots) namespace := share.ParitySharesNamespace - if s.Type == DataSample { + if int(s.RowIndex) < sqrLn/2 && int(s.ShareIndex) < sqrLn/2 { namespace = share.GetNamespace(s.SampleShare) } - s.SampleProof.WithHashedProof(hasher()) - if !s.SampleProof.VerifyInclusion(hasher(), namespace.ToNMT(), [][]byte{s.SampleShare}, s.AxisHash) { - return errors.New("invalid ") + rootHash := root.RowRoots[s.RowIndex] + if s.SampleProofType == ColProofType { + rootHash = root.ColumnRoots[s.ShareIndex] + } + + if !s.SampleProof.VerifyInclusion(hashFn(), namespace.ToNMT(), [][]byte{s.SampleShare}, rootHash) { + return errors.New("invalid Sample") } return nil diff --git a/share/shwap/sample_hasher.go b/share/shwap/sample_hasher.go index 7a427cce83..30867dc04c 100644 --- a/share/shwap/sample_hasher.go +++ b/share/shwap/sample_hasher.go @@ -5,49 +5,50 @@ import ( "fmt" ) -// SampleHasher implements hash.Hash interface for Samples. +// SampleHasher implements hash.Hash interface for Sample. type SampleHasher struct { - sample Sample + data []byte } // Write expects a marshaled Sample to validate. -func (sh *SampleHasher) Write(data []byte) (int, error) { - if err := sh.sample.UnmarshalBinary(data); err != nil { - err = fmt.Errorf("while unmarshaling Sample: %w", err) +func (h *SampleHasher) Write(data []byte) (int, error) { + var s Sample + if err := s.UnmarshalBinary(data); err != nil { + err = fmt.Errorf("unmarshaling Sample: %w", err) log.Error(err) return 0, err } - if err := sh.sample.Validate(); err != nil { - err = fmt.Errorf("while validating Sample: %w", err) + if err := sampleVerifiers.Verify(s.SampleID, s); err != nil { + err = fmt.Errorf("verifying Sample: %w", err) log.Error(err) return 0, err } + h.data = data return len(data), nil } // Sum returns the "multihash" of the SampleID. -func (sh *SampleHasher) Sum([]byte) []byte { - sum, err := sh.sample.SampleID.MarshalBinary() - if err != nil { - err = fmt.Errorf("while marshaling SampleID: %w", err) - log.Error(err) +func (h *SampleHasher) Sum([]byte) []byte { + if h.data == nil { + return nil } - return sum + const pbOffset = 2 + return h.data[pbOffset : SampleIDSize+pbOffset] } // Reset resets the Hash to its initial state. -func (sh *SampleHasher) Reset() { - sh.sample = Sample{} +func (h *SampleHasher) Reset() { + h.data = nil } // Size returns the number of bytes Sum will return. -func (sh *SampleHasher) Size() int { +func (h *SampleHasher) Size() int { return SampleIDSize } // BlockSize returns the hash's underlying block size. -func (sh *SampleHasher) BlockSize() int { +func (h *SampleHasher) BlockSize() int { return sha256.BlockSize } diff --git a/share/shwap/sample_hasher_test.go b/share/shwap/sample_hasher_test.go index ebf8da1d6c..a6e46b1301 100644 --- a/share/shwap/sample_hasher_test.go +++ b/share/shwap/sample_hasher_test.go @@ -6,8 +6,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/rsmt2d" - + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" ) @@ -18,10 +17,16 @@ func TestSampleHasher(t *testing.T) { assert.Error(t, err) square := edstest.RandEDS(t, 2) + root, err := share.NewRoot(square) + require.NoError(t, err) - sample, err := NewSampleFromEDS(rsmt2d.Row, 2, square, 1) + sample, err := NewSampleFromEDS(RowProofType, 10, square, 1) require.NoError(t, err) + sampleVerifiers.Add(sample.SampleID, func(s Sample) error { + return s.Verify(root) + }) + data, err := sample.MarshalBinary() require.NoError(t, err) @@ -30,11 +35,11 @@ func TestSampleHasher(t *testing.T) { assert.EqualValues(t, len(data), n) digest := hasher.Sum(nil) - sid, err := sample.SampleID.MarshalBinary() + id, err := sample.SampleID.MarshalBinary() require.NoError(t, err) - assert.EqualValues(t, sid, digest) + assert.EqualValues(t, id, digest) hasher.Reset() digest = hasher.Sum(nil) - assert.NotEqualValues(t, digest, sid) + assert.NotEqualValues(t, digest, id) } diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index 9667002159..5561335cff 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -7,42 +7,32 @@ import ( "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" - "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/share" ) // SampleIDSize is the size of the SampleID in bytes -const SampleIDSize = AxisIDSize + 2 +const SampleIDSize = RowIDSize + 2 // SampleID is an unique identifier of a Sample. type SampleID struct { - AxisID + RowID - // ShareIndex is the index of the sampled share in the axis + // ShareIndex is the index of the sampled share in the Row ShareIndex uint16 } // NewSampleID constructs a new SampleID. -func NewSampleID(axisType rsmt2d.Axis, idx int, root *share.Root, height uint64) SampleID { +func NewSampleID(height uint64, smplIdx int, root *share.Root) (SampleID, error) { sqrLn := len(root.RowRoots) - axisIdx, shrIdx := idx/sqrLn, idx%sqrLn - dahroot := root.RowRoots[axisIdx] - if axisType == rsmt2d.Col { - axisIdx, shrIdx = shrIdx, axisIdx - dahroot = root.ColumnRoots[axisIdx] - } - axisHash := hashBytes(dahroot) - - return SampleID{ - AxisID: AxisID{ - AxisType: axisType, - AxisIndex: uint16(axisIdx), - AxisHash: axisHash, - Height: height, + rowIdx, shrIdx := uint16(smplIdx/sqrLn), uint16(smplIdx%sqrLn) + sid := SampleID{ + RowID: RowID{ + RowIndex: rowIdx, + Height: height, }, - ShareIndex: uint16(shrIdx), + ShareIndex: shrIdx, } + return sid, sid.Verify(root) } // SampleIDFromCID coverts CID to SampleID. @@ -53,57 +43,63 @@ func SampleIDFromCID(cid cid.Cid) (id SampleID, err error) { err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) if err != nil { - return id, fmt.Errorf("while unmarhalling SampleID: %w", err) + return id, fmt.Errorf("while unmarhaling SampleID: %w", err) } return id, nil } -// Cid returns sample ID encoded as CID. -func (s SampleID) Cid() (cid.Cid, error) { +// Cid returns SampleID encoded as CID. +func (sid SampleID) Cid() cid.Cid { // avoid using proto serialization for CID as it's not deterministic - data, err := s.MarshalBinary() + data, err := sid.MarshalBinary() if err != nil { - return cid.Undef, err + panic(fmt.Errorf("marshaling SampleID: %w", err)) } buf, err := mh.Encode(data, sampleMultihashCode) if err != nil { - return cid.Undef, err + panic(fmt.Errorf("encoding SampleID as CID: %w", err)) } - return cid.NewCidV1(sampleCodec, buf), nil + return cid.NewCidV1(sampleCodec, buf) } // MarshalBinary encodes SampleID into binary form. // NOTE: Proto is avoided because // * Its size is not deterministic which is required for IPLD. // * No support for uint16 -func (s SampleID) MarshalBinary() ([]byte, error) { +func (sid SampleID) MarshalBinary() ([]byte, error) { data := make([]byte, 0, SampleIDSize) - n, err := s.AxisID.MarshalTo(data) + n, err := sid.RowID.MarshalTo(data) if err != nil { return nil, err } data = data[:n] - data = binary.LittleEndian.AppendUint16(data, s.ShareIndex) + data = binary.LittleEndian.AppendUint16(data, sid.ShareIndex) return data, nil } // UnmarshalBinary decodes SampleID from binary form. -func (s *SampleID) UnmarshalBinary(data []byte) error { +func (sid *SampleID) UnmarshalBinary(data []byte) error { if len(data) != SampleIDSize { - return fmt.Errorf("invalid data length: %d != %d", len(data), SampleIDSize) + return fmt.Errorf("invalid SampleID data length: %d != %d", len(data), SampleIDSize) } - n, err := s.AxisID.UnmarshalFrom(data) + n, err := sid.RowID.UnmarshalFrom(data) if err != nil { return err } - s.ShareIndex = binary.LittleEndian.Uint16(data[n:]) + data = data[n:] + sid.ShareIndex = binary.LittleEndian.Uint16(data) return nil } -// Validate validates fields of SampleID. -func (s SampleID) Validate() error { - return s.AxisID.Validate() +// Verify verifies SampleID fields. +func (sid SampleID) Verify(root *share.Root) error { + sqrLn := len(root.ColumnRoots) + if int(sid.ShareIndex) >= sqrLn { + return fmt.Errorf("ShareIndex exceeds square size: %d >= %d", sid.ShareIndex, sqrLn) + } + + return sid.RowID.Verify(root) } diff --git a/share/shwap/sample_id_test.go b/share/shwap/sample_id_test.go index a060dd6793..7c19c5d923 100644 --- a/share/shwap/sample_id_test.go +++ b/share/shwap/sample_id_test.go @@ -6,8 +6,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" ) @@ -17,23 +15,22 @@ func TestSampleID(t *testing.T) { root, err := share.NewRoot(square) require.NoError(t, err) - sid := NewSampleID(rsmt2d.Row, 2, root, 1) - - id, err := sid.Cid() + id, err := NewSampleID(1, 1, root) require.NoError(t, err) - assert.EqualValues(t, sampleCodec, id.Prefix().Codec) - assert.EqualValues(t, sampleMultihashCode, id.Prefix().MhType) - assert.EqualValues(t, SampleIDSize, id.Prefix().MhLength) + cid := id.Cid() + assert.EqualValues(t, sampleCodec, cid.Prefix().Codec) + assert.EqualValues(t, sampleMultihashCode, cid.Prefix().MhType) + assert.EqualValues(t, SampleIDSize, cid.Prefix().MhLength) - data, err := sid.MarshalBinary() + data, err := id.MarshalBinary() require.NoError(t, err) - sidOut := SampleID{} - err = sidOut.UnmarshalBinary(data) + idOut := SampleID{} + err = idOut.UnmarshalBinary(data) require.NoError(t, err) - assert.EqualValues(t, sid, sidOut) + assert.EqualValues(t, id, idOut) - err = sidOut.Validate() + err = idOut.Verify(root) require.NoError(t, err) } diff --git a/share/shwap/sample_test.go b/share/shwap/sample_test.go index dd666f9502..d1679a8ec3 100644 --- a/share/shwap/sample_test.go +++ b/share/shwap/sample_test.go @@ -6,15 +6,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/rsmt2d" - + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" ) func TestSample(t *testing.T) { square := edstest.RandEDS(t, 8) + root, err := share.NewRoot(square) + require.NoError(t, err) - sample, err := NewSampleFromEDS(rsmt2d.Row, 2, square, 1) + sample, err := NewSampleFromEDS(RowProofType, 1, square, 1) require.NoError(t, err) data, err := sample.MarshalBinary() @@ -22,16 +23,13 @@ func TestSample(t *testing.T) { blk, err := sample.IPLDBlock() require.NoError(t, err) - - cid, err := sample.SampleID.Cid() - require.NoError(t, err) - assert.EqualValues(t, blk.Cid(), cid) + assert.EqualValues(t, blk.Cid(), sample.Cid()) sampleOut := &Sample{} err = sampleOut.UnmarshalBinary(data) require.NoError(t, err) assert.EqualValues(t, sample, sampleOut) - err = sampleOut.Validate() + err = sampleOut.Verify(root) require.NoError(t, err) } diff --git a/share/shwap/ipldv2.go b/share/shwap/shwap.go similarity index 57% rename from share/shwap/ipldv2.go rename to share/shwap/shwap.go index a71c62a838..66fb819349 100644 --- a/share/shwap/ipldv2.go +++ b/share/shwap/shwap.go @@ -4,6 +4,7 @@ import ( "crypto/sha256" "fmt" "hash" + "sync" "github.com/ipfs/boxo/blockservice" "github.com/ipfs/boxo/blockstore" @@ -11,40 +12,8 @@ import ( "github.com/ipfs/go-cid" logger "github.com/ipfs/go-log/v2" mh "github.com/multiformats/go-multihash" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" ) -// MustSampleCID constructs a sample CID or panics. -func MustSampleCID(axisIdx int, root *share.Root, height uint64) cid.Cid { - axisTp := rsmt2d.Row // TODO: Randomize axis type - cid, err := NewSampleID(axisTp, axisIdx, root, height).Cid() - if err != nil { - panic("failed to create sample CID") - } - return cid -} - -// MustAxisCID constructs an axis CID or panics. -func MustAxisCID(axisTp rsmt2d.Axis, axisIdx int, root *share.Root, height uint64) cid.Cid { - cid, err := NewAxisID(axisTp, uint16(axisIdx), root, height).Cid() - if err != nil { - panic("failed to create axis CID") - } - return cid -} - -// MustDataCID constructs a data CID or panics. -func MustDataCID(axisIdx int, root *share.Root, height uint64, namespace share.Namespace) cid.Cid { - cid, err := NewDataID(axisIdx, root, height, namespace).Cid() - if err != nil { - panic("failed to create data CID") - } - return cid -} - // NewBlockService creates a new blockservice.BlockService with allowlist supporting the protocol. func NewBlockService(b blockstore.Blockstore, ex exchange.Interface) blockservice.BlockService { return blockservice.New(b, ex, blockservice.WithAllowlist(defaultAllowlist)) @@ -60,12 +29,12 @@ const ( // sampleMultihashCode is the multihash code for share sampling multihash function. sampleMultihashCode = 0x7801 - // axisCodec is a CID codec used for axis Bitswap requests over Namespaced Merkle + // rowCodec is a CID codec used for row Bitswap requests over Namespaced Merkle // Tree. - axisCodec = 0x7810 + rowCodec = 0x7810 - // axisMultihashCode is the multihash code for custom axis sampling multihash function. - axisMultihashCode = 0x7811 + // rowMultihashCode is the multihash code for custom axis sampling multihash function. + rowMultihashCode = 0x7811 // dataCodec is a CID codec used for data Bitswap requests over Namespaced Merkle Tree. dataCodec = 0x7820 @@ -78,8 +47,7 @@ const ( ) var ( - hashSize = sha256.Size - hasher = sha256.New + hashFn = sha256.New ) func init() { @@ -87,14 +55,37 @@ func init() { mh.Register(sampleMultihashCode, func() hash.Hash { return &SampleHasher{} }) - mh.Register(axisMultihashCode, func() hash.Hash { - return &AxisHasher{} + mh.Register(rowMultihashCode, func() hash.Hash { + return &RowHasher{} }) mh.Register(dataMultihashCode, func() hash.Hash { return &DataHasher{} }) } +var ( + rowVerifiers verifiers[RowID, Row] + sampleVerifiers verifiers[SampleID, Sample] + dataVerifiers verifiers[DataID, Data] +) + +type verifiers[ID comparable, V any] struct { + mp sync.Map +} + +func (v *verifiers[ID, V]) Add(id ID, f func(V) error) { + v.mp.Store(id, f) +} + +func (v *verifiers[ID, V]) Verify(id ID, val V) error { + f, ok := v.mp.LoadAndDelete(id) + if !ok { + return fmt.Errorf("no verifier") + } + + return f.(func(V) error)(val) +} + // defaultAllowlist keeps default list of hashes allowed in the network. var defaultAllowlist allowlist @@ -103,7 +94,7 @@ type allowlist struct{} func (a allowlist) IsAllowed(code uint64) bool { // we disable all codes except home-baked code switch code { - case sampleMultihashCode, axisMultihashCode, dataMultihashCode: + case sampleMultihashCode, rowMultihashCode, dataMultihashCode: return true } return false @@ -118,20 +109,14 @@ func validateCID(cid cid.Cid) error { switch prefix.Codec { default: return fmt.Errorf("unsupported codec %d", prefix.Codec) - case sampleCodec, axisCodec, dataCodec: + case sampleCodec, rowCodec, dataCodec: } switch prefix.MhLength { default: return fmt.Errorf("unsupported multihash length %d", prefix.MhLength) - case SampleIDSize, AxisIDSize, DataIDSize: + case SampleIDSize, RowIDSize, DataIDSize: } return nil } - -func hashBytes(preimage []byte) []byte { - hsh := hasher() - hsh.Write(preimage) - return hsh.Sum(nil) -} diff --git a/share/shwap/ipldv2_test.go b/share/shwap/shwap_test.go similarity index 55% rename from share/shwap/ipldv2_test.go rename to share/shwap/shwap_test.go index c9cd2a13cd..3ddc4b3a73 100644 --- a/share/shwap/ipldv2_test.go +++ b/share/shwap/shwap_test.go @@ -7,8 +7,8 @@ import ( "github.com/ipfs/boxo/bitswap" "github.com/ipfs/boxo/bitswap/network" - "github.com/ipfs/boxo/blockservice" "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" "github.com/ipfs/boxo/routing/offline" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" @@ -18,70 +18,73 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/rsmt2d" - + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/sharetest" ) -var axisTypes = []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} - // TestSampleRoundtripGetBlock tests full protocol round trip of: // EDS -> Sample -> IPLDBlock -> BlockService -> Bitswap and in reverse. func TestSampleRoundtripGetBlock(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - sqr := edstest.RandEDS(t, 8) - b := edsBlockstore(sqr) + square := edstest.RandEDS(t, 8) + root, err := share.NewRoot(square) + require.NoError(t, err) + + b := edsBlockstore(square) client := remoteClient(ctx, t, b) - width := int(sqr.Width()) - for _, axisType := range axisTypes { - for i := 0; i < width*width; i++ { - smpl, err := NewSampleFromEDS(axisType, i, sqr, 1) - require.NoError(t, err) + width := int(square.Width()) + for i := 0; i < width*width; i++ { + smpl, err := NewSampleFromEDS(RowProofType, i, square, 1) // TODO: Col + require.NoError(t, err) - cid, err := smpl.SampleID.Cid() - require.NoError(t, err) + sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { + return sample.Verify(root) + }) - blkOut, err := client.GetBlock(ctx, cid) - require.NoError(t, err) - assert.EqualValues(t, cid, blkOut.Cid()) + cid := smpl.Cid() + blkOut, err := client.GetBlock(ctx, cid) + require.NoError(t, err) + assert.EqualValues(t, cid, blkOut.Cid()) - smpl, err = SampleFromBlock(blkOut) - assert.NoError(t, err) + smpl, err = SampleFromBlock(blkOut) + assert.NoError(t, err) - err = smpl.Validate() // bitswap already performed validation and this is only for testing - assert.NoError(t, err) - } + err = smpl.Verify(root) + assert.NoError(t, err) } } +// TODO: Debug why is it flaky func TestSampleRoundtripGetBlocks(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*100) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - sqr := edstest.RandEDS(t, 8) - b := edsBlockstore(sqr) + square := edstest.RandEDS(t, 8) + root, err := share.NewRoot(square) + require.NoError(t, err) + b := edsBlockstore(square) client := remoteClient(ctx, t, b) set := cid.NewSet() - width := int(sqr.Width()) - for _, axisType := range axisTypes { - for i := 0; i < width*width; i++ { - smpl, err := NewSampleFromEDS(axisType, i, sqr, 1) - require.NoError(t, err) - - cid, err := smpl.SampleID.Cid() - require.NoError(t, err) + width := int(square.Width()) + for i := 0; i < width*width; i++ { + smpl, err := NewSampleFromEDS(RowProofType, i, square, 1) // TODO: Col + require.NoError(t, err) + set.Add(smpl.Cid()) - set.Add(cid) - } + sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { + return sample.Verify(root) + }) } - blks := client.GetBlocks(ctx, set.Keys()) - err := set.ForEach(func(c cid.Cid) error { + blks, err := client.GetBlocks(ctx, set.Keys()) + require.NoError(t, err) + + err = set.ForEach(func(c cid.Cid) error { select { case blk := <-blks: assert.True(t, set.Has(blk.Cid())) @@ -89,7 +92,7 @@ func TestSampleRoundtripGetBlocks(t *testing.T) { smpl, err := SampleFromBlock(blk) assert.NoError(t, err) - err = smpl.Validate() // bitswap already performed validation and this is only for testing + err = smpl.Verify(root) // bitswap already performed validation and this is only for testing assert.NoError(t, err) case <-ctx.Done(): return ctx.Err() @@ -99,68 +102,72 @@ func TestSampleRoundtripGetBlocks(t *testing.T) { assert.NoError(t, err) } -func TestAxisRoundtripGetBlock(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10000) +func TestRowRoundtripGetBlock(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - sqr := edstest.RandEDS(t, 16) - b := edsBlockstore(sqr) + square := edstest.RandEDS(t, 16) + root, err := share.NewRoot(square) + require.NoError(t, err) + b := edsBlockstore(square) client := remoteClient(ctx, t, b) - width := int(sqr.Width()) - for _, axisType := range axisTypes { - for i := 0; i < width; i++ { - smpl, err := NewAxisFromEDS(axisType, i, sqr, 1) - require.NoError(t, err) + width := int(square.Width()) + for i := 0; i < width; i++ { + row, err := NewRowFromEDS(1, i, square) + require.NoError(t, err) - cid, err := smpl.AxisID.Cid() - require.NoError(t, err) + rowVerifiers.Add(row.RowID, func(row Row) error { + return row.Verify(root) + }) - blkOut, err := client.GetBlock(ctx, cid) - require.NoError(t, err) - assert.EqualValues(t, cid, blkOut.Cid()) + cid := row.Cid() + blkOut, err := client.GetBlock(ctx, cid) + require.NoError(t, err) + assert.EqualValues(t, cid, blkOut.Cid()) - smpl, err = AxisFromBlock(blkOut) - assert.NoError(t, err) + row, err = RowFromBlock(blkOut) + assert.NoError(t, err) - err = smpl.Validate() // bitswap already performed validation and this is only for testing - assert.NoError(t, err) - } + err = row.Verify(root) + assert.NoError(t, err) } } -func TestAxisRoundtripGetBlocks(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) +func TestRowRoundtripGetBlocks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - sqr := edstest.RandEDS(t, 16) - b := edsBlockstore(sqr) + square := edstest.RandEDS(t, 16) + root, err := share.NewRoot(square) + require.NoError(t, err) + b := edsBlockstore(square) client := remoteClient(ctx, t, b) set := cid.NewSet() - width := int(sqr.Width()) - for _, axisType := range axisTypes { - for i := 0; i < width; i++ { - smpl, err := NewAxisFromEDS(axisType, i, sqr, 1) - require.NoError(t, err) - - cid, err := smpl.AxisID.Cid() - require.NoError(t, err) + width := int(square.Width()) + for i := 0; i < width; i++ { + row, err := NewRowFromEDS(1, i, square) + require.NoError(t, err) + set.Add(row.Cid()) - set.Add(cid) - } + rowVerifiers.Add(row.RowID, func(row Row) error { + return row.Verify(root) + }) } - blks := client.GetBlocks(ctx, set.Keys()) - err := set.ForEach(func(c cid.Cid) error { + blks, err := client.GetBlocks(ctx, set.Keys()) + require.NoError(t, err) + + err = set.ForEach(func(c cid.Cid) error { select { case blk := <-blks: assert.True(t, set.Has(blk.Cid())) - smpl, err := AxisFromBlock(blk) + row, err := RowFromBlock(blk) assert.NoError(t, err) - err = smpl.Validate() // bitswap already performed validation and this is only for testing + err = row.Verify(root) assert.NoError(t, err) case <-ctx.Done(): return ctx.Err() @@ -171,11 +178,11 @@ func TestAxisRoundtripGetBlocks(t *testing.T) { } func TestDataRoundtripGetBlock(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() namespace := sharetest.RandV0Namespace() - sqr, _ := edstest.RandEDSWithNamespace(t, namespace, 16) + sqr, root := edstest.RandEDSWithNamespace(t, namespace, 16) b := edsBlockstore(sqr) client := remoteClient(ctx, t, b) @@ -183,9 +190,11 @@ func TestDataRoundtripGetBlock(t *testing.T) { require.NoError(t, err) for _, nd := range nds { - cid, err := nd.DataID.Cid() - require.NoError(t, err) + dataVerifiers.Add(nd.DataID, func(data Data) error { + return data.Verify(root) + }) + cid := nd.Cid() blkOut, err := client.GetBlock(ctx, cid) require.NoError(t, err) assert.EqualValues(t, cid, blkOut.Cid()) @@ -193,17 +202,17 @@ func TestDataRoundtripGetBlock(t *testing.T) { ndOut, err := DataFromBlock(blkOut) assert.NoError(t, err) - err = ndOut.Validate() // bitswap already performed validation and this is only for testing + err = ndOut.Verify(root) assert.NoError(t, err) } } func TestDataRoundtripGetBlocks(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() namespace := sharetest.RandV0Namespace() - sqr, _ := edstest.RandEDSWithNamespace(t, namespace, 16) + sqr, root := edstest.RandEDSWithNamespace(t, namespace, 16) b := edsBlockstore(sqr) client := remoteClient(ctx, t, b) @@ -212,12 +221,16 @@ func TestDataRoundtripGetBlocks(t *testing.T) { set := cid.NewSet() for _, nd := range nds { - cid, err := nd.DataID.Cid() - require.NoError(t, err) - set.Add(cid) + set.Add(nd.Cid()) + + dataVerifiers.Add(nd.DataID, func(data Data) error { + return data.Verify(root) + }) } - blks := client.GetBlocks(ctx, set.Keys()) + blks, err := client.GetBlocks(ctx, set.Keys()) + require.NoError(t, err) + err = set.ForEach(func(c cid.Cid) error { select { case blk := <-blks: @@ -226,7 +239,7 @@ func TestDataRoundtripGetBlocks(t *testing.T) { smpl, err := DataFromBlock(blk) assert.NoError(t, err) - err = smpl.Validate() // bitswap already performed validation and this is only for testing + err = smpl.Verify(root) assert.NoError(t, err) case <-ctx.Done(): return ctx.Err() @@ -236,7 +249,7 @@ func TestDataRoundtripGetBlocks(t *testing.T) { assert.NoError(t, err) } -func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstore) blockservice.BlockService { +func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstore) exchange.Fetcher { net, err := mocknet.FullMeshLinked(2) require.NoError(t, err) @@ -261,5 +274,5 @@ func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstor err = net.ConnectAllButSelf() require.NoError(t, err) - return NewBlockService(bstoreClient, bitswapClient) + return bitswapClient } From c8e19260a3a070ab564999b6f7b8ad51fb68c889 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 5 Jan 2024 15:36:25 +0700 Subject: [PATCH 045/132] Switch implementation to reconstructSome --- share/store/codec.go | 2 +- share/store/ods_file.go | 42 +++++++++++++++++++++--------------- share/store/ods_file_test.go | 15 +++++++++++++ 3 files changed, 41 insertions(+), 18 deletions(-) diff --git a/share/store/codec.go b/share/store/codec.go index af9dd5af8e..4fc1a1ef1d 100644 --- a/share/store/codec.go +++ b/share/store/codec.go @@ -22,7 +22,7 @@ func (l *codec) Encoder(len int) (reedsolomon.Encoder, error) { enc, ok := l.encCache.Load(len) if !ok { var err error - enc, err = reedsolomon.New(len/2, len/2, reedsolomon.WithLeopardGF(true)) + enc, err = reedsolomon.New(len/2, len/2, reedsolomon.WithLeopardGF(false)) if err != nil { return nil, err } diff --git a/share/store/ods_file.go b/share/store/ods_file.go index 6ab7870a9a..5dbf63f775 100644 --- a/share/store/ods_file.go +++ b/share/store/ods_file.go @@ -237,21 +237,21 @@ func computeAxisHalf( shards := make([][]byte, f.Size()) copy(shards, original) - for j := len(original); j < len(shards); j++ { - shards[j] = make([]byte, len(original[0])) - } + //for j := len(original); j < len(shards); j++ { + // shards[j] = make([]byte, len(original[0])) + //} - //target := make([]bool, f.Size()) - //target[axisIdx] = true - // - //err = enc.ReconstructSome(shards, target) + //err = enc.Encode(shards) //if err != nil { - // return fmt.Errorf("reconstruct some: %w", err) + // return fmt.Errorf("encode: %w", err) //} - err = enc.Encode(shards) + target := make([]bool, f.Size()) + target[axisIdx] = true + + err = enc.ReconstructSome(shards, target) if err != nil { - return fmt.Errorf("encode: %w", err) + return fmt.Errorf("reconstruct some: %w", err) } shares[i] = shards[axisIdx] @@ -269,18 +269,26 @@ func (f *OdsFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ( return nil, err } - return extendShares(original) + return extendShares(f.memPool.codec, original) } -func extendShares(original []share.Share) ([]share.Share, error) { - parity, err := rsmt2d.NewLeoRSCodec().Encode(original) +func extendShares(codec Codec, original []share.Share) ([]share.Share, error) { + sqLen := len(original) * 2 + enc, err := codec.Encoder(sqLen) if err != nil { - return nil, err + return nil, fmt.Errorf("encoder: %w", err) } - shares := make([]share.Share, 0, len(original)+len(parity)) - shares = append(shares, original...) - shares = append(shares, parity...) + shares := make([]share.Share, sqLen) + copy(shares, original) + for j := len(original); j < len(shares); j++ { + shares[j] = make([]byte, len(original[0])) + } + + err = enc.Encode(shares) + if err != nil { + return nil, err + } return shares, nil } diff --git a/share/store/ods_file_test.go b/share/store/ods_file_test.go index 00668f0b4b..4286d311c1 100644 --- a/share/store/ods_file_test.go +++ b/share/store/ods_file_test.go @@ -71,6 +71,7 @@ func TestReadOdsFile(t *testing.T) { } } +// Leopard full encode // BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 418206 2545 ns/op // BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 4968 227265 ns/op // BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 57007 20707 ns/op @@ -83,6 +84,20 @@ func TestReadOdsFile(t *testing.T) { // BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 428 2616150 ns/op // BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 14338 83598 ns/op // BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 488 2213146 ns/op + +// ReconstructSome, default codec +// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 455848 2588 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 9015 203950 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 52734 21178 ns/op +// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 8830 127452 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 303834 4763 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 2940 426246 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 27758 42842 ns/op +// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 3385 353868 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 172086 6455 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 672 1550386 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 14202 84316 ns/op +// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 978 1230980 ns/op func BenchmarkAxisFromOdsFile(b *testing.B) { minSize, maxSize := 32, 128 dir := b.TempDir() From ec71e1297f6ccc4382800f79da0cbf5d449e6d31 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 5 Jan 2024 15:55:48 +0700 Subject: [PATCH 046/132] add codec benchmark --- share/store/codec_test.go | 68 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 share/store/codec_test.go diff --git a/share/store/codec_test.go b/share/store/codec_test.go new file mode 100644 index 0000000000..b223fa9e22 --- /dev/null +++ b/share/store/codec_test.go @@ -0,0 +1,68 @@ +package store + +import ( + "testing" + + "github.com/klauspost/reedsolomon" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func BenchmarkCodec(b *testing.B) { + size := 128 + + shards := make([][]byte, size) + original := sharetest.RandShares(b, size/2) + copy(shards, original) + + // BenchmarkLeoCodec/Leopard-10 81866 14611 ns/op + b.Run("Leopard", func(b *testing.B) { + enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(true)) + require.NoError(b, err) + + // fill with parity empty shares + for j := len(original); j < len(shards); j++ { + shards[j] = make([]byte, len(original[0])) + } + + for i := 0; i < b.N; i++ { + err = enc.Encode(shards) + require.NoError(b, err) + } + }) + + // BenchmarkLeoCodec/Leopard-10 81646 14641 ns/op + b.Run("default", func(b *testing.B) { + enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(false)) + require.NoError(b, err) + + // fill with parity empty shares + for j := len(original); j < len(shards); j++ { + shards[j] = make([]byte, len(original[0])) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = enc.Encode(shards) + require.NoError(b, err) + } + }) + + // BenchmarkLeoCodec/default,_reconstructSome-10 407635 2728 ns/op + b.Run("default, reconstructSome", func(b *testing.B) { + enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(false)) + require.NoError(b, err) + + targets := make([]bool, size) + target := size - 2 + targets[target] = true + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = enc.ReconstructSome(shards, targets) + require.NoError(b, err) + shards[target] = nil + } + }) +} From 74ab0b9d1dc63a4918202c35779df087b4063cfd Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 5 Jan 2024 16:23:28 +0700 Subject: [PATCH 047/132] add different sizes to codec benchmark --- share/store/codec_test.go | 99 ++++++++++++++++++++++----------------- 1 file changed, 57 insertions(+), 42 deletions(-) diff --git a/share/store/codec_test.go b/share/store/codec_test.go index b223fa9e22..a13fba156d 100644 --- a/share/store/codec_test.go +++ b/share/store/codec_test.go @@ -1,6 +1,7 @@ package store import ( + "fmt" "testing" "github.com/klauspost/reedsolomon" @@ -10,59 +11,73 @@ import ( ) func BenchmarkCodec(b *testing.B) { - size := 128 + minSize, maxSize := 32, 128 - shards := make([][]byte, size) - original := sharetest.RandShares(b, size/2) - copy(shards, original) + for size := minSize; size <= maxSize; size *= 2 { + // BenchmarkCodec/Leopard/size:32-10 409194 2793 ns/op + // BenchmarkCodec/Leopard/size:64-10 190969 6170 ns/op + // BenchmarkCodec/Leopard/size:128-10 82821 14287 ns/op + b.Run(fmt.Sprintf("Leopard/size:%v", size), func(b *testing.B) { + enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(true)) + require.NoError(b, err) - // BenchmarkLeoCodec/Leopard-10 81866 14611 ns/op - b.Run("Leopard", func(b *testing.B) { - enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(true)) - require.NoError(b, err) + shards := newShards(b, size, true) - // fill with parity empty shares - for j := len(original); j < len(shards); j++ { - shards[j] = make([]byte, len(original[0])) - } + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = enc.Encode(shards) + require.NoError(b, err) + } + }) - for i := 0; i < b.N; i++ { - err = enc.Encode(shards) + // BenchmarkCodec/default/size:32-10 222153 5364 ns/op + // BenchmarkCodec/default/size:64-10 58831 20349 ns/op + // BenchmarkCodec/default/size:128-10 14940 80471 ns/op + b.Run(fmt.Sprintf("default/size:%v", size), func(b *testing.B) { + enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(false)) require.NoError(b, err) - } - }) - // BenchmarkLeoCodec/Leopard-10 81646 14641 ns/op - b.Run("default", func(b *testing.B) { - enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(false)) - require.NoError(b, err) + shards := newShards(b, size, true) - // fill with parity empty shares - for j := len(original); j < len(shards); j++ { - shards[j] = make([]byte, len(original[0])) - } + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = enc.Encode(shards) + require.NoError(b, err) + } + }) - b.ResetTimer() - for i := 0; i < b.N; i++ { - err = enc.Encode(shards) + // BenchmarkCodec/default-reconstructSome/size:32-10 1263585 954.4 ns/op + // BenchmarkCodec/default-reconstructSome/size:64-10 762273 1554 ns/op + // BenchmarkCodec/default-reconstructSome/size:128-10 429268 2974 ns/op + b.Run(fmt.Sprintf("default-reconstructSome/size:%v", size), func(b *testing.B) { + enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(false)) require.NoError(b, err) - } - }) - // BenchmarkLeoCodec/default,_reconstructSome-10 407635 2728 ns/op - b.Run("default, reconstructSome", func(b *testing.B) { - enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(false)) - require.NoError(b, err) + shards := newShards(b, size, false) + targets := make([]bool, size) + target := size - 2 + targets[target] = true - targets := make([]bool, size) - target := size - 2 - targets[target] = true + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = enc.ReconstructSome(shards, targets) + require.NoError(b, err) + shards[target] = nil + } + }) + } +} - b.ResetTimer() - for i := 0; i < b.N; i++ { - err = enc.ReconstructSome(shards, targets) - require.NoError(b, err) - shards[target] = nil +func newShards(b require.TestingT, size int, fillParity bool) [][]byte { + shards := make([][]byte, size) + original := sharetest.RandShares(b, size/2) + copy(shards, original) + + if fillParity { + // fill with parity empty shares + for j := len(original); j < len(shards); j++ { + shards[j] = make([]byte, len(original[0])) } - }) + } + return shards } From ed9b593e74869e47a6b7d97ce3cf5fbf16740039 Mon Sep 17 00:00:00 2001 From: Vlad Date: Sat, 13 Jan 2024 13:44:24 +0700 Subject: [PATCH 048/132] update EDSFile implementations to support new Share interface --- core/exchange.go | 4 +- core/listener.go | 2 +- go.mod | 6 +- go.sum | 8 ++ header/headertest/fraud/testing.go | 2 +- nodebuilder/store_test.go | 2 +- share/availability/full/availability.go | 2 +- share/eds/eds.go | 2 +- share/eds/file.go | 19 +-- share/eds/utils.go | 14 +- share/ipld/get_shares.go | 5 +- share/ipld/nmt_adder.go | 17 ++- share/ipld/utils.go | 8 +- share/store/cache_file.go | 166 +++++++++--------------- share/store/cache_file_test.go | 34 +++++ share/store/codec.go | 2 +- share/store/eds_file_test.go | 103 --------------- share/store/file.go | 25 ---- share/store/file_closer.go | 67 ++++++++++ share/store/file_test.go | 102 ++++++++++++--- share/store/mem_file.go | 43 +++--- share/store/mem_file_test.go | 57 +++----- share/store/ods_file.go | 39 +++--- share/store/ods_file_test.go | 68 +--------- 24 files changed, 361 insertions(+), 436 deletions(-) create mode 100644 share/store/cache_file_test.go delete mode 100644 share/store/eds_file_test.go delete mode 100644 share/store/file.go create mode 100644 share/store/file_closer.go diff --git a/core/exchange.go b/core/exchange.go index 79f3d6337a..2a7429ed60 100644 --- a/core/exchange.go +++ b/core/exchange.go @@ -132,7 +132,7 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende } // extend block data - adder := ipld.NewProofsAdder(int(block.Data.SquareSize)) + adder := ipld.NewProofsAdder(int(block.Data.SquareSize), false) defer adder.Purge() eds, err := extendBlock(block.Data, block.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) @@ -177,7 +177,7 @@ func (ce *Exchange) getExtendedHeaderByHeight(ctx context.Context, height *int64 log.Debugw("fetched signed block from core", "height", b.Header.Height) // extend block data - adder := ipld.NewProofsAdder(int(b.Data.SquareSize)) + adder := ipld.NewProofsAdder(int(b.Data.SquareSize), false) defer adder.Purge() eds, err := extendBlock(b.Data, b.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) diff --git a/core/listener.go b/core/listener.go index 8447733506..93a28ac33d 100644 --- a/core/listener.go +++ b/core/listener.go @@ -193,7 +193,7 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS attribute.Int64("height", b.Header.Height), ) // extend block data - adder := ipld.NewProofsAdder(int(b.Data.SquareSize)) + adder := ipld.NewProofsAdder(int(b.Data.SquareSize), false) defer adder.Purge() eds, err := extendBlock(b.Data, b.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) diff --git a/go.mod b/go.mod index b4441c6f6b..ec4079a554 100644 --- a/go.mod +++ b/go.mod @@ -231,8 +231,8 @@ require ( github.com/jmhodges/levigo v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.17.2 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect - github.com/klauspost/reedsolomon v1.11.8 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/reedsolomon v1.12.1-0.20240110152930-bb8917fa442f github.com/koron/go-ssdp v0.0.4 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect @@ -325,7 +325,7 @@ require ( golang.org/x/mod v0.13.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sys v0.13.0 // indirect + golang.org/x/sys v0.16.0 // indirect golang.org/x/term v0.13.0 // indirect golang.org/x/tools v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect diff --git a/go.sum b/go.sum index 962e46067b..52cd0fce20 100644 --- a/go.sum +++ b/go.sum @@ -1346,10 +1346,16 @@ github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8t github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY= github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A= +github.com/klauspost/reedsolomon v1.12.0 h1:I5FEp3xSwVCcEh3F5A7dofEfhXdF/bWhQWPH+XwBFno= +github.com/klauspost/reedsolomon v1.12.0/go.mod h1:EPLZJeh4l27pUGC3aXOjheaoh1I9yut7xTURiW3LQ9Y= +github.com/klauspost/reedsolomon v1.12.1-0.20240110152930-bb8917fa442f h1:QEQvCKqgPSTRn9UIT65LSKY+7LCcGyiH6tIh6vCeHEw= +github.com/klauspost/reedsolomon v1.12.1-0.20240110152930-bb8917fa442f/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= @@ -2860,6 +2866,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/header/headertest/fraud/testing.go b/header/headertest/fraud/testing.go index e2ff13a4e0..68d38f63ad 100644 --- a/header/headertest/fraud/testing.go +++ b/header/headertest/fraud/testing.go @@ -58,7 +58,7 @@ func (f *FraudMaker) MakeExtendedHeader(odsSize int, edsStore *eds.Store) header hdr := *h if h.Height == f.height { - adder := ipld.NewProofsAdder(odsSize) + adder := ipld.NewProofsAdder(odsSize, false) square := edstest.RandByzantineEDS(f.t, odsSize, nmt.NodeVisitor(adder.VisitFn())) dah, err := da.NewDataAvailabilityHeader(square) require.NoError(f.t, err) diff --git a/nodebuilder/store_test.go b/nodebuilder/store_test.go index bd179c1258..43450d0a34 100644 --- a/nodebuilder/store_test.go +++ b/nodebuilder/store_test.go @@ -79,7 +79,7 @@ func BenchmarkStore(b *testing.B) { b.StopTimer() b.ResetTimer() for i := 0; i < b.N; i++ { - adder := ipld.NewProofsAdder(size * 2) + adder := ipld.NewProofsAdder(size*2, false) shares := sharetest.RandShares(b, size*size) eds, err := rsmt2d.ComputeExtendedDataSquare( shares, diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go index 4ea211cb1e..ff26404d45 100644 --- a/share/availability/full/availability.go +++ b/share/availability/full/availability.go @@ -77,7 +77,7 @@ func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header return nil } - adder := ipld.NewProofsAdder(len(dah.RowRoots)) + adder := ipld.NewProofsAdder(len(dah.RowRoots), false) ctx = ipld.CtxWithProofsAdder(ctx, adder) defer adder.Purge() diff --git a/share/eds/eds.go b/share/eds/eds.go index e0433a1b6b..64e12e162b 100644 --- a/share/eds/eds.go +++ b/share/eds/eds.go @@ -122,7 +122,7 @@ func getProofs(ctx context.Context, eds *rsmt2d.ExtendedDataSquare) (map[cid.Cid // this adder ignores leaves, so that they are not added to the store we iterate through in // writeProofs - adder := ipld.NewProofsAdder(odsWidth * 2) + adder := ipld.NewProofsAdder(odsWidth*2, false) defer adder.Purge() eds, err := rsmt2d.ImportExtendedDataSquare( diff --git a/share/eds/file.go b/share/eds/file.go index 56cc218654..e3870db9bc 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -17,15 +17,16 @@ import ( "github.com/celestiaorg/celestia-node/share/ipld" ) -type File interface { - io.Closer - Size() int - ShareWithProof(xisIdx, shrIdx int) (share.Share, nmt.Proof, rsmt2d.Axis, error) - Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) - AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) - Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) - EDS() (*rsmt2d.ExtendedDataSquare, error) -} +// TODO: remove +//type File interface { +// io.Closer +// Size() int +// ShareWithProof(xisIdx, shrIdx int) (share.Share, nmt.Proof, rsmt2d.Axis, error) +// Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) +// AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) +// Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) +// EDS() (*rsmt2d.ExtendedDataSquare, error) +//} type FileConfig struct { Version FileVersion diff --git a/share/eds/utils.go b/share/eds/utils.go index b897dd14b5..0dda9c0da3 100644 --- a/share/eds/utils.go +++ b/share/eds/utils.go @@ -121,24 +121,24 @@ func CollectSharesByNamespace( utils.SetStatusAndEnd(span, err) }() - rootCIDs := ipld.FilterRootByNamespace(root, namespace) - if len(rootCIDs) == 0 { + rows := ipld.FilterRootByNamespace(root, namespace) + if len(rows) == 0 { return []share.NamespacedRow{}, nil } errGroup, ctx := errgroup.WithContext(ctx) - shares = make([]share.NamespacedRow, len(rootCIDs)) - for i, rootCID := range rootCIDs { + shares = make([]share.NamespacedRow, len(rows)) + for i, row := range rows { // shadow loop variables, to ensure correct values are captured - i, rootCID := i, rootCID + i, row := i, row errGroup.Go(func() error { - row, proof, err := ipld.GetSharesByNamespace(ctx, bg, rootCID, namespace, len(root.RowRoots)) + row, proof, err := ipld.GetSharesByNamespace(ctx, bg, row, namespace, len(root.RowRoots)) shares[i] = share.NamespacedRow{ Shares: row, Proof: proof, } if err != nil { - return fmt.Errorf("retrieving shares by namespace %s for row %x: %w", namespace.String(), rootCID, err) + return fmt.Errorf("retrieving shares by namespace %s for row %x: %w", namespace.String(), row, err) } return nil }) diff --git a/share/ipld/get_shares.go b/share/ipld/get_shares.go index 98db7012b5..2883e62761 100644 --- a/share/ipld/get_shares.go +++ b/share/ipld/get_shares.go @@ -44,12 +44,13 @@ func GetShares(ctx context.Context, bg blockservice.BlockGetter, root cid.Cid, s func GetSharesByNamespace( ctx context.Context, bGetter blockservice.BlockGetter, - root cid.Cid, + root []byte, namespace share.Namespace, maxShares int, ) ([]share.Share, *nmt.Proof, error) { + rootCid := MustCidFromNamespacedSha256(root) data := NewNamespaceData(maxShares, namespace, WithLeaves(), WithProofs()) - err := data.CollectLeavesByNamespace(ctx, bGetter, root) + err := data.CollectLeavesByNamespace(ctx, bGetter, rootCid) if err != nil { return nil, nil, err } diff --git a/share/ipld/nmt_adder.go b/share/ipld/nmt_adder.go index 7ce52859b2..f5065df224 100644 --- a/share/ipld/nmt_adder.go +++ b/share/ipld/nmt_adder.go @@ -103,13 +103,15 @@ func BatchSize(squareSize int) int { // ProofsAdder is used to collect proof nodes, while traversing merkle tree type ProofsAdder struct { - lock sync.RWMutex - proofs map[cid.Cid][]byte + lock sync.RWMutex + collectShares bool + proofs map[cid.Cid][]byte } // NewProofsAdder creates new instance of ProofsAdder. -func NewProofsAdder(squareSize int) *ProofsAdder { +func NewProofsAdder(squareSize int, collectShares bool) *ProofsAdder { return &ProofsAdder{ + collectShares: collectShares, // preallocate map to fit all inner nodes for given square size proofs: make(map[cid.Cid][]byte, innerNodesAmount(squareSize)), } @@ -156,7 +158,7 @@ func (a *ProofsAdder) VisitFn() nmt.NodeVisitorFn { if len(a.proofs) > 0 { return nil } - return a.visitInnerNodes + return a.visitNodes } // Purge removed proofs from ProofsAdder allowing GC to collect the memory @@ -171,10 +173,13 @@ func (a *ProofsAdder) Purge() { a.proofs = nil } -func (a *ProofsAdder) visitInnerNodes(hash []byte, children ...[]byte) { +func (a *ProofsAdder) visitNodes(hash []byte, children ...[]byte) { switch len(children) { case 1: - break + if a.collectShares { + id := MustCidFromNamespacedSha256(hash) + a.addProof(id, children[0]) + } case 2: id := MustCidFromNamespacedSha256(hash) a.addProof(id, append(children[0], children[1]...)) diff --git a/share/ipld/utils.go b/share/ipld/utils.go index d3e987e7f3..4c4da4b290 100644 --- a/share/ipld/utils.go +++ b/share/ipld/utils.go @@ -1,17 +1,15 @@ package ipld import ( - "github.com/ipfs/go-cid" - "github.com/celestiaorg/celestia-node/share" ) // FilterRootByNamespace returns the row roots from the given share.Root that contain the namespace. -func FilterRootByNamespace(root *share.Root, namespace share.Namespace) []cid.Cid { - rowRootCIDs := make([]cid.Cid, 0, len(root.RowRoots)) +func FilterRootByNamespace(root *share.Root, namespace share.Namespace) [][]byte { + rowRootCIDs := make([][]byte, 0, len(root.RowRoots)) for _, row := range root.RowRoots { if !namespace.IsOutsideRange(row, row) { - rowRootCIDs = append(rowRootCIDs, MustCidFromNamespacedSha256(row)) + rowRootCIDs = append(rowRootCIDs, row) } } return rowRootCIDs diff --git a/share/store/cache_file.go b/share/store/cache_file.go index 97a54447a4..4893fea415 100644 --- a/share/store/cache_file.go +++ b/share/store/cache_file.go @@ -14,76 +14,70 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/byzantine" "github.com/celestiaorg/celestia-node/share/ipld" ) +var _ EdsFile = (*CacheFile)(nil) + // TODO: allow concurrency safety fpr CacheFile methods type CacheFile struct { - File + EdsFile - codec rsmt2d.Codec + codec Codec axisCache []map[int]inMemoryAxis // disableCache disables caching of rows for testing purposes disableCache bool } type inMemoryAxis struct { + root []byte shares []share.Share proofs blockservice.BlockGetter } -func NewCacheFile(f File, codec rsmt2d.Codec) *CacheFile { +func NewCacheFile(f EdsFile, codec Codec) *CacheFile { return &CacheFile{ - File: f, + EdsFile: f, codec: codec, axisCache: []map[int]inMemoryAxis{make(map[int]inMemoryAxis), make(map[int]inMemoryAxis)}, } } -func (f *CacheFile) ShareWithProof( - ctx context.Context, - idx int, - axis rsmt2d.Axis, - axisRoot []byte, -) (*byzantine.ShareWithProof, error) { - sqrLn := f.Size() - axsIdx, shrIdx := idx/sqrLn, idx%sqrLn - if axis == rsmt2d.Col { - axsIdx, shrIdx = shrIdx, axsIdx +func (f *CacheFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { + axisType, axisIdx, shrIdx := rsmt2d.Row, y, x + if x < f.Size()/2 && y >= f.Size()/2 { + axisType, axisIdx, shrIdx = rsmt2d.Col, x, y } - ax, err := f.axisWithProofs(axsIdx, axis) + ax, err := f.axisWithProofs(ctx, axisType, axisIdx) if err != nil { return nil, err } - // TODO(@walldiss): add proper calc to prealloc size for proofs - proof := make([]cid.Cid, 0, 16) - rootCid := ipld.MustCidFromNamespacedSha256(axisRoot) - proofs, err := ipld.GetProof(ctx, ax.proofs, rootCid, proof, shrIdx, sqrLn) + share, err := ipld.GetShareWithProof(ctx, ax.proofs, ax.root, shrIdx, f.Size(), axisType) if err != nil { - return nil, fmt.Errorf("bulding proof from cache: %w", err) + return nil, fmt.Errorf("building proof from cache: %w", err) } - return byzantine.NewShareWithProof(shrIdx, ax.shares[shrIdx], proofs), nil + return share, nil } -func (f *CacheFile) axisWithProofs(idx int, axis rsmt2d.Axis) (inMemoryAxis, error) { - ax := f.axisCache[axis][idx] +func (f *CacheFile) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (inMemoryAxis, error) { + // return axis from cache if possible + ax := f.axisCache[axisType][axisIdx] if ax.proofs != nil { return ax, nil } // build proofs from shares and cache them - shrs, err := f.Axis(idx, axis) + shrs, err := f.axis(ctx, axisType, axisIdx) if err != nil { return inMemoryAxis{}, err } // calculate proofs - adder := ipld.NewProofsAdder(f.Size(), ipld.CollectShares) - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(idx), + adder := ipld.NewProofsAdder(f.Size(), true) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(axisIdx), nmt.NodeVisitor(adder.VisitFn())) for _, shr := range shrs { err = tree.Push(shr) @@ -93,79 +87,78 @@ func (f *CacheFile) axisWithProofs(idx int, axis rsmt2d.Axis) (inMemoryAxis, err } // build the tree - if _, err := tree.Root(); err != nil { + root, err := tree.Root() + if err != nil { return inMemoryAxis{}, err } - ax = f.axisCache[axis][idx] + ax = f.axisCache[axisType][axisIdx] + ax.root = root + ax.shares = shrs ax.proofs = newRowProofsGetter(adder.Proofs()) if !f.disableCache { - f.axisCache[axis][idx] = ax + f.axisCache[axisType][axisIdx] = ax } return ax, nil } -func (f *CacheFile) Axis(idx int, axis rsmt2d.Axis) ([]share.Share, error) { +func (f *CacheFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { // return axis from cache if possible - ax, ok := f.axisCache[axis][idx] + ax, ok := f.axisCache[axisType][axisIdx] if ok { - return ax.shares, nil - } - - // recompute axis from half - original, err := f.AxisHalf(idx, axis) - if err != nil { - return nil, err + return ax.shares[:f.Size()/2], nil } - parity, err := f.codec.Encode(original) + // read axis from file if axis is in the first quadrant + half, err := f.EdsFile.AxisHalf(ctx, axisType, axisIdx) if err != nil { - return nil, err + return nil, fmt.Errorf("reading axis from inner file: %w", err) } - shares := make([]share.Share, 0, len(original)+len(parity)) - shares = append(shares, original...) - shares = append(shares, parity...) - - // cache axis shares if !f.disableCache { - f.axisCache[axis][idx] = inMemoryAxis{ - shares: shares, + axis, err := extendShares(f.codec, half) + if err != nil { + return nil, fmt.Errorf("extending shares: %w", err) + } + f.axisCache[axisType][axisIdx] = inMemoryAxis{ + shares: axis, } } - return shares, nil + + return half, nil } -func (f *CacheFile) AxisHalf(idx int, axis rsmt2d.Axis) ([]share.Share, error) { - // return axis from cache if possible - ax, ok := f.axisCache[axis][idx] - if ok { - return ax.shares[:f.Size()/2], nil +func (f *CacheFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + original, err := f.AxisHalf(ctx, axisType, axisIdx) + if err != nil { + return nil, err } - // read axis from file if axis is in the first quadrant - if idx < f.Size()/2 { - return f.File.AxisHalf(idx, axis) + return extendShares(f.codec, original) +} + +func (f *CacheFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { + ax, err := f.axisWithProofs(ctx, rsmt2d.Row, rowIdx) + if err != nil { + return share.NamespacedRow{}, err } - shares := make([]share.Share, 0, f.Size()/2) - // extend opposite half of the square while collecting shares for the first half of required axis - //TODO: parallelize this - for i := 0; i < f.Size()/2; i++ { - ax, err := f.Axis(i, oppositeAxis(axis)) - if err != nil { - return nil, err - } - shares = append(shares, ax[idx]) + row, proof, err := ipld.GetSharesByNamespace(ctx, ax.proofs, ax.root, namespace, f.Size()) + if err != nil { + return share.NamespacedRow{}, fmt.Errorf("retrieving shares by namespace %s for row %x: %w", namespace.String(), row, err) } - return shares, nil + + return share.NamespacedRow{ + Shares: row, + Proof: proof, + }, nil } -func (f *CacheFile) EDS() (*rsmt2d.ExtendedDataSquare, error) { +func (f *CacheFile) EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) { shares := make([][]byte, 0, f.Size()*f.Size()) for i := 0; i < f.Size(); i++ { - ax, err := f.Axis(i, rsmt2d.Row) + ax, err := f.axis(ctx, rsmt2d.Row, i) if err != nil { return nil, err } @@ -182,34 +175,6 @@ func (f *CacheFile) EDS() (*rsmt2d.ExtendedDataSquare, error) { return eds, nil } -func (f *CacheFile) SharesByNamespace( - ctx context.Context, - root *share.Root, - namespace share.Namespace, -) (share.NamespacedShares, error) { - // collect all shares with proofs within the namespace - var shares []share.NamespacedRow - for i, row := range root.RowRoots { - if !namespace.IsOutsideRange(row, row) { - ax, err := f.axisWithProofs(i, rsmt2d.Row) - if err != nil { - return nil, err - } - - rowCid := ipld.MustCidFromNamespacedSha256(row) - row, proof, err := ipld.GetSharesByNamespace(ctx, ax.proofs, rowCid, namespace, f.Size()) - if err != nil { - return nil, fmt.Errorf("retrieving shares by namespace %s for row %x: %w", namespace.String(), row, err) - } - shares = append(shares, share.NamespacedRow{ - Shares: row, - Proof: proof, - }) - } - } - return shares, nil -} - // rowProofsGetter implements blockservice.BlockGetter interface type rowProofsGetter struct { proofs map[cid.Cid]blocks.Block @@ -235,10 +200,3 @@ func (r rowProofsGetter) GetBlock(_ context.Context, c cid.Cid) (blocks.Block, e func (r rowProofsGetter) GetBlocks(_ context.Context, _ []cid.Cid) <-chan blocks.Block { panic("not implemented") } - -func oppositeAxis(axis rsmt2d.Axis) rsmt2d.Axis { - if axis == rsmt2d.Col { - return rsmt2d.Row - } - return rsmt2d.Col -} diff --git a/share/store/cache_file_test.go b/share/store/cache_file_test.go new file mode 100644 index 0000000000..b58ab8bd7e --- /dev/null +++ b/share/store/cache_file_test.go @@ -0,0 +1,34 @@ +package store + +import ( + "github.com/celestiaorg/rsmt2d" + "github.com/stretchr/testify/require" + "testing" +) + +func TestCacheFile(t *testing.T) { + size := 8 + mem := newMemPools(NewCodec()) + newFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { + path := t.TempDir() + "/testfile" + fl, err := CreateOdsFile(path, eds, mem) + require.NoError(t, err) + return NewCacheFile(fl, mem.codec) + } + + t.Run("Share", func(t *testing.T) { + testFileShare(t, newFile, size) + }) + + t.Run("AxisHalf", func(t *testing.T) { + testFileAxisHalf(t, newFile, size) + }) + + t.Run("Data", func(t *testing.T) { + testFileData(t, newFile, size) + }) + + t.Run("EDS", func(t *testing.T) { + testFileEds(t, newFile, size) + }) +} diff --git a/share/store/codec.go b/share/store/codec.go index 4fc1a1ef1d..af9dd5af8e 100644 --- a/share/store/codec.go +++ b/share/store/codec.go @@ -22,7 +22,7 @@ func (l *codec) Encoder(len int) (reedsolomon.Encoder, error) { enc, ok := l.encCache.Load(len) if !ok { var err error - enc, err = reedsolomon.New(len/2, len/2, reedsolomon.WithLeopardGF(false)) + enc, err = reedsolomon.New(len/2, len/2, reedsolomon.WithLeopardGF(true)) if err != nil { return nil, err } diff --git a/share/store/eds_file_test.go b/share/store/eds_file_test.go deleted file mode 100644 index 659eab06b2..0000000000 --- a/share/store/eds_file_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package store - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share/eds/edstest" -) - -func TestCreateEdsFile(t *testing.T) { - path := t.TempDir() + "/testfile" - edsIn := edstest.RandEDS(t, 8) - - _, err := CreateEdsFile(path, edsIn) - require.NoError(t, err) - - f, err := OpenEdsFile(path) - require.NoError(t, err) - edsOut, err := f.EDS(context.TODO()) - require.NoError(t, err) - assert.True(t, edsIn.Equals(edsOut)) -} - -func TestEdsFile(t *testing.T) { - size := 32 - createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) File { - path := t.TempDir() + "/testfile" - fl, err := CreateEdsFile(path, eds) - require.NoError(t, err) - return fl - } - - t.Run("Share", func(t *testing.T) { - testFileShare(t, createOdsFile, size) - }) - - t.Run("AxisHalf", func(t *testing.T) { - testFileAxisHalf(t, createOdsFile, size) - }) - - t.Run("Data", func(t *testing.T) { - testFileDate(t, createOdsFile, size) - }) - - t.Run("EDS", func(t *testing.T) { - testFileEds(t, createOdsFile, size) - }) -} - -// BenchmarkAxisFromEdsFile/Size:32/Axis:row/squareHalf:first(original)-10 288624 3758 ns/op -// BenchmarkAxisFromEdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 313893 3729 ns/op -// BenchmarkAxisFromEdsFile/Size:32/Axis:col/squareHalf:first(original)-10 29406 41051 ns/op -// BenchmarkAxisFromEdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 29145 41047 ns/op -// BenchmarkAxisFromEdsFile/Size:64/Axis:row/squareHalf:first(original)-10 186302 6532 ns/op -// BenchmarkAxisFromEdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 186172 6383 ns/op -// BenchmarkAxisFromEdsFile/Size:64/Axis:col/squareHalf:first(original)-10 14451 82114 ns/op -// BenchmarkAxisFromEdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 14572 82047 ns/op -// BenchmarkAxisFromEdsFile/Size:128/Axis:row/squareHalf:first(original)-10 94576 11349 ns/op -// BenchmarkAxisFromEdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 103954 11276 ns/op -// BenchmarkAxisFromEdsFile/Size:128/Axis:col/squareHalf:first(original)-10 7072 165301 ns/op -// BenchmarkAxisFromEdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 6805 165173 ns/op -func BenchmarkAxisFromEdsFile(b *testing.B) { - minSize, maxSize := 32, 128 - dir := b.TempDir() - newFile := func(size int) File { - eds := edstest.RandEDS(b, size) - path := dir + "/testfile" - f, err := CreateEdsFile(path, eds) - require.NoError(b, err) - return f - } - benchGetAxisFromFile(b, newFile, minSize, maxSize) -} - -// BenchmarkShareFromEdsFile/Size:32/Axis:row/squareHalf:first(original)-10 17850 66716 ns/op -// BenchmarkShareFromEdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 18517 64462 ns/op -// BenchmarkShareFromEdsFile/Size:32/Axis:col/squareHalf:first(original)-10 10000 104241 ns/op -// BenchmarkShareFromEdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 10000 101964 ns/op -// BenchmarkShareFromEdsFile/Size:64/Axis:row/squareHalf:first(original)-10 8641 129674 ns/op -// BenchmarkShareFromEdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 9022 124899 ns/op -// BenchmarkShareFromEdsFile/Size:64/Axis:col/squareHalf:first(original)-10 5625 204934 ns/op -// BenchmarkShareFromEdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 5785 200634 ns/op -// BenchmarkShareFromEdsFile/Size:128/Axis:row/squareHalf:first(original)-10 4424 262753 ns/op -// BenchmarkShareFromEdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 4690 252676 ns/op -// BenchmarkShareFromEdsFile/Size:128/Axis:col/squareHalf:first(original)-10 2834 415072 ns/op -// BenchmarkShareFromEdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 2934 426160 ns/op -func BenchmarkShareFromEdsFile(b *testing.B) { - minSize, maxSize := 32, 128 - dir := b.TempDir() - newFile := func(size int) File { - eds := edstest.RandEDS(b, size) - path := dir + "/testfile" - f, err := CreateEdsFile(path, eds) - require.NoError(b, err) - return f - } - benchGetShareFromFile(b, newFile, minSize, maxSize) -} diff --git a/share/store/file.go b/share/store/file.go deleted file mode 100644 index c3d5f4b6c5..0000000000 --- a/share/store/file.go +++ /dev/null @@ -1,25 +0,0 @@ -package store - -import ( - "context" - "io" - - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" -) - -type File interface { - io.Closer - // Size returns square size of the file. - Size() int - // Share returns share and corresponding proof for the given axis and share index in this axis. - Share(ctx context.Context, axisType rsmt2d.Axis, axisIdx, shrIdx int) (share.Share, nmt.Proof, error) - // AxisHalf returns shares for the first half of the axis of the given type and index. - AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) - // Data returns data for the given namespace and row index. - Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) - // EDS returns extended data square stored in the file. - EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) -} diff --git a/share/store/file_closer.go b/share/store/file_closer.go new file mode 100644 index 0000000000..fdc850854b --- /dev/null +++ b/share/store/file_closer.go @@ -0,0 +1,67 @@ +package store + +import ( + "context" + "errors" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/rsmt2d" + "sync/atomic" +) + +var _ EdsFile = (*closeOnceFile)(nil) + +var errFileClosed = errors.New("file closed") + +type closeOnceFile struct { + f EdsFile + closed atomic.Bool +} + +func CloseOnceFile(f EdsFile) EdsFile { + return &closeOnceFile{f: f} +} + +func (c *closeOnceFile) Close() error { + if !c.closed.Swap(true) { + err := c.f.Close() + // release reference to the file + c.f = nil + return err + } + return nil +} + +func (c *closeOnceFile) Size() int { + if c.closed.Load() { + return 0 + } + return c.f.Size() +} + +func (c *closeOnceFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { + if c.closed.Load() { + return nil, errFileClosed + } + return c.f.Share(ctx, x, y) +} + +func (c *closeOnceFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + if c.closed.Load() { + return nil, errFileClosed + } + return c.f.AxisHalf(ctx, axisType, axisIdx) +} + +func (c *closeOnceFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { + if c.closed.Load() { + return share.NamespacedRow{}, errFileClosed + } + return c.f.Data(ctx, namespace, rowIdx) +} + +func (c *closeOnceFile) EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) { + if c.closed.Load() { + return nil, errFileClosed + } + return c.f.EDS(ctx) +} diff --git a/share/store/file_test.go b/share/store/file_test.go index 2d31496551..c31f5c16e0 100644 --- a/share/store/file_test.go +++ b/share/store/file_test.go @@ -2,8 +2,9 @@ package store import ( "context" - "crypto/sha256" + "fmt" mrand "math/rand" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -15,7 +16,7 @@ import ( "github.com/celestiaorg/celestia-node/share/sharetest" ) -type createFile func(eds *rsmt2d.ExtendedDataSquare) File +type createFile func(eds *rsmt2d.ExtendedDataSquare) EdsFile func testFileShare(t *testing.T, createFile createFile, size int) { eds := edstest.RandEDS(t, size) @@ -25,33 +26,27 @@ func testFileShare(t *testing.T, createFile createFile, size int) { require.NoError(t, err) width := int(eds.Width()) - for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { - for i := 0; i < width*width; i++ { - axisIdx, shrIdx := i/width, i%width - if axisType == rsmt2d.Col { - axisIdx, shrIdx = shrIdx, axisIdx - } - - shr, prf, err := fl.Share(context.TODO(), axisType, axisIdx, shrIdx) + for x := 0; x < width; x++ { + for y := 0; y < width; y++ { + shr, err := fl.Share(context.TODO(), x, y) require.NoError(t, err) - namespace := share.ParitySharesNamespace - if axisIdx < width/2 && shrIdx < width/2 { - namespace = share.GetNamespace(shr) + var axishash []byte + if shr.Axis == rsmt2d.Row { + require.Equal(t, getAxis(eds, shr.Axis, y)[x], shr.Share) + axishash = root.RowRoots[y] + } else { + require.Equal(t, getAxis(eds, shr.Axis, x)[y], shr.Share) + axishash = root.ColumnRoots[x] } - axishash := root.RowRoots[axisIdx] - if axisType == rsmt2d.Col { - axishash = root.ColumnRoots[axisIdx] - } - - ok := prf.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, axishash) + ok := shr.Validate(axishash, x, y, width) require.True(t, ok) } } } -func testFileDate(t *testing.T, createFile createFile, size int) { +func testFileData(t *testing.T, createFile createFile, size int) { // generate EDS with random data and some shares with the same namespace namespace := sharetest.RandV0Namespace() amount := mrand.Intn(size*size-1) + 1 @@ -90,3 +85,70 @@ func testFileEds(t *testing.T, createFile createFile, size int) { require.NoError(t, err) require.True(t, eds.Equals(eds2)) } + +func benchGetAxisFromFile(b *testing.B, newFile func(size int) EdsFile, minSize, maxSize int) { + for size := minSize; size <= maxSize; size *= 2 { + f := newFile(size) + + // loop over all possible axis types and quadrants + for _, axisType := range []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} { + for _, squareHalf := range []int{0, 1} { + name := fmt.Sprintf("Size:%v/Axis:%s/squareHalf:%s", size, axisType, strconv.Itoa(squareHalf)) + b.Run(name, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := f.AxisHalf(context.TODO(), axisType, f.Size()/2*(squareHalf)) + require.NoError(b, err) + } + }) + } + } + } +} + +func benchGetShareFromFile(b *testing.B, newFile func(size int) EdsFile, minSize, maxSize int) { + for size := minSize; size <= maxSize; size *= 2 { + f := newFile(size) + + // loop over all possible axis types and quadrants + for _, q := range quadrants { + name := fmt.Sprintf("Size:%v/quadrant:%s", size, q) + b.Run(name, func(b *testing.B) { + x, y := q.coordinates(f.Size()) + // warm up cache + _, err := f.Share(context.TODO(), x, y) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := f.Share(context.TODO(), x, y) + require.NoError(b, err) + } + }) + } + + } +} + +type quadrant int + +var ( + quadrants = []quadrant{1, 2, 3, 4} +) + +func (q quadrant) String() string { + return strconv.Itoa(int(q)) +} + +func (q quadrant) coordinates(edsSize int) (x, y int) { + x = edsSize/2*(int(q-1)%2) + 1 + y = edsSize/2*(int(q-1)/2) + 1 + return +} + +func TestQuandrant(t *testing.T) { + for _, q := range quadrants { + x, y := q.coordinates(4) + fmt.Println(x, y) + } +} diff --git a/share/store/mem_file.go b/share/store/mem_file.go index 24f9cfd110..e9cc64ec68 100644 --- a/share/store/mem_file.go +++ b/share/store/mem_file.go @@ -32,7 +32,7 @@ func (f *MemFile) Share( axisType := rsmt2d.Row axisIdx, shrIdx := y, x - shares := f.axis(axisType, axisIdx) + shares := getAxis(f.Eds, axisType, axisIdx) tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(axisIdx)) for _, shr := range shares { err := tree.Push(shr) @@ -54,11 +54,30 @@ func (f *MemFile) Share( } func (f *MemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { - return f.axis(axisType, axisIdx)[:f.Size()/2], nil + return getAxis(f.Eds, axisType, axisIdx)[:f.Size()/2], nil } func (f *MemFile) Data(_ context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { - shares := f.axis(rsmt2d.Row, rowIdx) + shares := getAxis(f.Eds, rsmt2d.Row, rowIdx) + return ndDataFromShares(shares, namespace, rowIdx) +} + +func (f *MemFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { + return f.Eds, nil +} + +func getAxis(eds *rsmt2d.ExtendedDataSquare, axisType rsmt2d.Axis, axisIdx int) []share.Share { + switch axisType { + case rsmt2d.Row: + return eds.Row(uint(axisIdx)) + case rsmt2d.Col: + return eds.Col(uint(axisIdx)) + default: + panic("unknown axis") + } +} + +func ndDataFromShares(shares []share.Share, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { bserv := ipld.NewMemBlockservice() batchAdder := ipld.NewNmtNodeAdder(context.TODO(), bserv, ipld.MaxSizeBatchOption(len(shares))) tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(shares)/2), uint(rowIdx), @@ -80,8 +99,7 @@ func (f *MemFile) Data(_ context.Context, namespace share.Namespace, rowIdx int) return share.NamespacedRow{}, err } - cid := ipld.MustCidFromNamespacedSha256(root) - row, proof, err := ipld.GetSharesByNamespace(context.TODO(), bserv, cid, namespace, len(shares)) + row, proof, err := ipld.GetSharesByNamespace(context.TODO(), bserv, root, namespace, len(shares)) if err != nil { return share.NamespacedRow{}, err } @@ -90,18 +108,3 @@ func (f *MemFile) Data(_ context.Context, namespace share.Namespace, rowIdx int) Proof: proof, }, nil } - -func (f *MemFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { - return f.Eds, nil -} - -func (f *MemFile) axis(axisType rsmt2d.Axis, axisIdx int) []share.Share { - switch axisType { - case rsmt2d.Row: - return f.Eds.Row(uint(axisIdx)) - case rsmt2d.Col: - return f.Eds.Col(uint(axisIdx)) - default: - panic("unknown axis") - } -} diff --git a/share/store/mem_file_test.go b/share/store/mem_file_test.go index 9df1616afd..d502a9f7e2 100644 --- a/share/store/mem_file_test.go +++ b/share/store/mem_file_test.go @@ -1,52 +1,29 @@ package store import ( - "context" - mrand "math/rand" + "github.com/celestiaorg/rsmt2d" "testing" - - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/sharetest" ) -func TestMemFileShare(t *testing.T) { - eds := edstest.RandEDS(t, 32) - root, err := share.NewRoot(eds) - require.NoError(t, err) - fl := &MemFile{Eds: eds} - - width := int(eds.Width()) - for x := 0; x < width; x++ { - for y := 0; y < width; y++ { - shr, err := fl.Share(context.TODO(), x, y) - require.NoError(t, err) - - axishash := root.RowRoots[y] - ok := shr.Validate(axishash, x, y, width) - require.True(t, ok) - } +func TestMemFile(t *testing.T) { + size := 8 + newFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { + return &MemFile{Eds: eds} } -} -func TestMemFileDate(t *testing.T) { - size := 32 + t.Run("Share", func(t *testing.T) { + testFileShare(t, newFile, size) + }) - // generate EDS with random data and some shares with the same namespace - namespace := sharetest.RandV0Namespace() - amount := mrand.Intn(size*size-1) + 1 - eds, dah := edstest.RandEDSWithNamespace(t, namespace, amount, size) + t.Run("AxisHalf", func(t *testing.T) { + testFileAxisHalf(t, newFile, size) + }) - file := &MemFile{Eds: eds} + t.Run("Data", func(t *testing.T) { + testFileData(t, newFile, size) + }) - for i, root := range dah.RowRoots { - if !namespace.IsOutsideRange(root, root) { - nd, err := file.Data(context.Background(), namespace, i) - require.NoError(t, err) - ok := nd.Verify(root, namespace) - require.True(t, ok) - } - } + t.Run("EDS", func(t *testing.T) { + testFileEds(t, newFile, size) + }) } diff --git a/share/store/ods_file.go b/share/store/ods_file.go index 5dbf63f775..7f9d53eb15 100644 --- a/share/store/ods_file.go +++ b/share/store/ods_file.go @@ -3,20 +3,18 @@ package store import ( "context" "fmt" - "io" "os" "sync" "golang.org/x/sync/errgroup" "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" ) -var _ File = (*OdsFile)(nil) +var _ EdsFile = (*OdsFile)(nil) type OdsFile struct { path string @@ -26,11 +24,6 @@ type OdsFile struct { memPool memPool } -type fileBackend interface { - io.ReaderAt - io.Closer -} - // OpenOdsFile opens an existing file. File has to be closed after usage. func OpenOdsFile(path string) (*OdsFile, error) { f, err := os.Open(path) @@ -123,7 +116,7 @@ func (f *OdsFile) odsAxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, } type odsInMemFile struct { - File + EdsFile axisType rsmt2d.Axis square [][]share.Share } @@ -212,7 +205,7 @@ func (f *OdsFile) readCol(idx int) ([]share.Share, error) { func computeAxisHalf( ctx context.Context, - f File, + f EdsFile, codec Codec, axisType rsmt2d.Axis, axisIdx int, @@ -287,36 +280,40 @@ func extendShares(codec Codec, original []share.Share) ([]share.Share, error) { err = enc.Encode(shares) if err != nil { - return nil, err + return nil, fmt.Errorf("encoder: %w", err) } return shares, nil } -func (f *OdsFile) Share( - ctx context.Context, - axisType rsmt2d.Axis, - axisIdx, shrIdx int, -) (share.Share, nmt.Proof, error) { +func (f *OdsFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { + axisType, axisIdx, shrIdx := rsmt2d.Row, y, x + if x < f.Size()/2 && y >= f.Size()/2 { + axisType, axisIdx, shrIdx = rsmt2d.Col, x, y + } shares, err := f.axis(ctx, axisType, axisIdx) if err != nil { - return nil, nmt.Proof{}, err + return nil, err } tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(axisIdx)) for _, shr := range shares { err := tree.Push(shr) if err != nil { - return nil, nmt.Proof{}, err + return nil, err } } proof, err := tree.ProveRange(shrIdx, shrIdx+1) if err != nil { - return nil, nmt.Proof{}, err + return nil, err } - return shares[shrIdx], proof, nil + return &share.ShareWithProof{ + Share: shares[shrIdx], + Proof: &proof, + Axis: axisType, + }, nil } func (f *OdsFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { @@ -324,7 +321,7 @@ func (f *OdsFile) Data(ctx context.Context, namespace share.Namespace, rowIdx in if err != nil { return share.NamespacedRow{}, err } - return ndDateFromShares(shares, namespace, rowIdx) + return ndDataFromShares(shares, namespace, rowIdx) } func (f *OdsFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { diff --git a/share/store/ods_file_test.go b/share/store/ods_file_test.go index 4286d311c1..ed21f32857 100644 --- a/share/store/ods_file_test.go +++ b/share/store/ods_file_test.go @@ -2,7 +2,6 @@ package store import ( "context" - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -30,7 +29,7 @@ func TestCreateOdsFile(t *testing.T) { func TestOdsFile(t *testing.T) { size := 32 mem := newMemPools(NewCodec()) - createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) File { + createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { path := t.TempDir() + "/testfile" fl, err := CreateOdsFile(path, eds, mem) require.NoError(t, err) @@ -46,7 +45,7 @@ func TestOdsFile(t *testing.T) { }) t.Run("Data", func(t *testing.T) { - testFileDate(t, createOdsFile, size) + testFileData(t, createOdsFile, size) }) t.Run("EDS", func(t *testing.T) { @@ -103,7 +102,7 @@ func BenchmarkAxisFromOdsFile(b *testing.B) { dir := b.TempDir() mem := newMemPools(NewCodec()) - newFile := func(size int) File { + newFile := func(size int) EdsFile { eds := edstest.RandEDS(b, size) path := dir + "/testfile" f, err := CreateOdsFile(path, eds, mem) @@ -126,11 +125,11 @@ func BenchmarkAxisFromOdsFile(b *testing.B) { // BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 2114 514642 ns/op // BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 373 3068104 ns/op func BenchmarkShareFromOdsFile(b *testing.B) { - minSize, maxSize := 32, 128 + minSize, maxSize := 128, 128 dir := b.TempDir() mem := newMemPools(NewCodec()) - newFile := func(size int) File { + newFile := func(size int) EdsFile { eds := edstest.RandEDS(b, size) path := dir + "/testfile" f, err := CreateOdsFile(path, eds, mem) @@ -140,60 +139,3 @@ func BenchmarkShareFromOdsFile(b *testing.B) { benchGetShareFromFile(b, newFile, minSize, maxSize) } - -type squareHalf int - -func (q squareHalf) String() string { - switch q { - case 0: - return "first(original)" - case 1: - return "second(extended)" - } - return "unknown" -} - -func benchGetAxisFromFile(b *testing.B, newFile func(size int) File, minSize, maxSize int) { - for size := minSize; size <= maxSize; size *= 2 { - f := newFile(size) - - // loop over all possible axis types and quadrants - for _, axisType := range []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} { - for _, squareHalf := range []squareHalf{0, 1} { - name := fmt.Sprintf("Size:%v/Axis:%s/squareHalf:%s", size, axisType, squareHalf) - b.Run(name, func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := f.AxisHalf(context.TODO(), axisType, f.Size()/2*int(squareHalf)) - require.NoError(b, err) - } - }) - } - } - } -} - -func benchGetShareFromFile(b *testing.B, newFile func(size int) File, minSize, maxSize int) { - for size := minSize; size <= maxSize; size *= 2 { - f := newFile(size) - - // loop over all possible axis types and quadrants - for _, axisType := range []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} { - for _, squareHalf := range []squareHalf{0, 1} { - name := fmt.Sprintf("Size:%v/Axis:%s/squareHalf:%s", size, axisType, squareHalf) - b.Run(name, func(b *testing.B) { - idx := f.Size() - 1 - // warm up cache - _, _, err := f.Share(context.TODO(), axisType, f.Size()/2*int(squareHalf), idx) - require.NoError(b, err) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, err = f.Share(context.TODO(), axisType, f.Size()/2*int(squareHalf), idx) - require.NoError(b, err) - } - }) - } - } - } -} From f5c118327149de03690eb4fe98d2cf66503675c0 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 15 Jan 2024 01:16:50 +0700 Subject: [PATCH 049/132] - update file interface - add file streaming - add height and datahash to header - add thread safety to ods file - extract mem pool --- share/store/{ => file}/cache_file.go | 10 +- share/store/{ => file}/cache_file_test.go | 7 +- share/store/{ => file}/codec.go | 20 +- share/store/{ => file}/codec_test.go | 2 +- share/store/{ => file}/eds_file.go | 8 +- share/store/{ => file}/file_closer.go | 26 +- share/store/file/file_header.go | 77 ++++++ share/store/{ => file}/file_test.go | 2 +- share/store/file/in_mem_ods_file.go | 217 +++++++++++++++++ share/store/{ => file}/mem_file.go | 4 +- share/store/file/mem_file_test.go | 24 ++ share/store/file/mempool.go | 75 ++++++ share/store/{ => file}/ods_file.go | 274 +++++++--------------- share/store/{ => file}/ods_file_test.go | 21 +- share/store/file_header.go | 73 ------ share/store/mem_file_test.go | 29 --- 16 files changed, 541 insertions(+), 328 deletions(-) rename share/store/{ => file}/cache_file.go (96%) rename share/store/{ => file}/cache_file_test.go (81%) rename share/store/{ => file}/codec.go (57%) rename share/store/{ => file}/codec_test.go (99%) rename share/store/{ => file}/eds_file.go (79%) rename share/store/{ => file}/file_closer.go (76%) create mode 100644 share/store/file/file_header.go rename share/store/{ => file}/file_test.go (99%) create mode 100644 share/store/file/in_mem_ods_file.go rename share/store/{ => file}/mem_file.go (98%) create mode 100644 share/store/file/mem_file_test.go create mode 100644 share/store/file/mempool.go rename share/store/{ => file}/ods_file.go (50%) rename share/store/{ => file}/ods_file_test.go (92%) delete mode 100644 share/store/file_header.go delete mode 100644 share/store/mem_file_test.go diff --git a/share/store/cache_file.go b/share/store/file/cache_file.go similarity index 96% rename from share/store/cache_file.go rename to share/store/file/cache_file.go index 4893fea415..f2e26a707f 100644 --- a/share/store/cache_file.go +++ b/share/store/file/cache_file.go @@ -1,4 +1,4 @@ -package store +package file import ( "context" @@ -23,7 +23,6 @@ var _ EdsFile = (*CacheFile)(nil) type CacheFile struct { EdsFile - codec Codec axisCache []map[int]inMemoryAxis // disableCache disables caching of rows for testing purposes disableCache bool @@ -35,10 +34,9 @@ type inMemoryAxis struct { proofs blockservice.BlockGetter } -func NewCacheFile(f EdsFile, codec Codec) *CacheFile { +func NewCacheFile(f EdsFile) *CacheFile { return &CacheFile{ EdsFile: f, - codec: codec, axisCache: []map[int]inMemoryAxis{make(map[int]inMemoryAxis), make(map[int]inMemoryAxis)}, } } @@ -117,7 +115,7 @@ func (f *CacheFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx } if !f.disableCache { - axis, err := extendShares(f.codec, half) + axis, err := extendShares(codec, half) if err != nil { return nil, fmt.Errorf("extending shares: %w", err) } @@ -135,7 +133,7 @@ func (f *CacheFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) return nil, err } - return extendShares(f.codec, original) + return extendShares(codec, original) } func (f *CacheFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { diff --git a/share/store/cache_file_test.go b/share/store/file/cache_file_test.go similarity index 81% rename from share/store/cache_file_test.go rename to share/store/file/cache_file_test.go index b58ab8bd7e..c5a74f689c 100644 --- a/share/store/cache_file_test.go +++ b/share/store/file/cache_file_test.go @@ -1,4 +1,4 @@ -package store +package file import ( "github.com/celestiaorg/rsmt2d" @@ -8,12 +8,11 @@ import ( func TestCacheFile(t *testing.T) { size := 8 - mem := newMemPools(NewCodec()) newFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { path := t.TempDir() + "/testfile" - fl, err := CreateOdsFile(path, eds, mem) + fl, err := CreateOdsFile(path, 1, []byte{}, eds) require.NoError(t, err) - return NewCacheFile(fl, mem.codec) + return NewCacheFile(fl) } t.Run("Share", func(t *testing.T) { diff --git a/share/store/codec.go b/share/store/file/codec.go similarity index 57% rename from share/store/codec.go rename to share/store/file/codec.go index af9dd5af8e..a27280be11 100644 --- a/share/store/codec.go +++ b/share/store/file/codec.go @@ -1,4 +1,4 @@ -package store +package file import ( "sync" @@ -6,27 +6,33 @@ import ( "github.com/klauspost/reedsolomon" ) +var codec Codec + +func init() { + codec = NewCodec() +} + type Codec interface { Encoder(len int) (reedsolomon.Encoder, error) } -type codec struct { - encCache sync.Map +type codecCache struct { + cache sync.Map } func NewCodec() Codec { - return &codec{} + return &codecCache{} } -func (l *codec) Encoder(len int) (reedsolomon.Encoder, error) { - enc, ok := l.encCache.Load(len) +func (l *codecCache) Encoder(len int) (reedsolomon.Encoder, error) { + enc, ok := l.cache.Load(len) if !ok { var err error enc, err = reedsolomon.New(len/2, len/2, reedsolomon.WithLeopardGF(true)) if err != nil { return nil, err } - l.encCache.Store(len, enc) + l.cache.Store(len, enc) } return enc.(reedsolomon.Encoder), nil } diff --git a/share/store/codec_test.go b/share/store/file/codec_test.go similarity index 99% rename from share/store/codec_test.go rename to share/store/file/codec_test.go index a13fba156d..2a214165c8 100644 --- a/share/store/codec_test.go +++ b/share/store/file/codec_test.go @@ -1,4 +1,4 @@ -package store +package file import ( "fmt" diff --git a/share/store/eds_file.go b/share/store/file/eds_file.go similarity index 79% rename from share/store/eds_file.go rename to share/store/file/eds_file.go index 5d1eefb758..fa2fcc798f 100644 --- a/share/store/eds_file.go +++ b/share/store/file/eds_file.go @@ -1,4 +1,4 @@ -package store +package file import ( "context" @@ -11,8 +11,14 @@ import ( type EdsFile interface { io.Closer + // Reader returns binary reader for the file. + Reader() (io.Reader, error) // Size returns square size of the file. Size() int + // Height returns height of the file. + Height() uint64 + // DataHash returns data hash of the file. + DataHash() share.DataHash // Share returns share and corresponding proof for the given axis and share index in this axis. Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) // AxisHalf returns shares for the first half of the axis of the given type and index. diff --git a/share/store/file_closer.go b/share/store/file/file_closer.go similarity index 76% rename from share/store/file_closer.go rename to share/store/file/file_closer.go index fdc850854b..13cb71a40a 100644 --- a/share/store/file_closer.go +++ b/share/store/file/file_closer.go @@ -1,10 +1,11 @@ -package store +package file import ( "context" "errors" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/rsmt2d" + "io" "sync/atomic" ) @@ -17,7 +18,7 @@ type closeOnceFile struct { closed atomic.Bool } -func CloseOnceFile(f EdsFile) EdsFile { +func CloseOnceFile(f EdsFile) *closeOnceFile { return &closeOnceFile{f: f} } @@ -31,6 +32,13 @@ func (c *closeOnceFile) Close() error { return nil } +func (c *closeOnceFile) Reader() (io.Reader, error) { + if c.closed.Load() { + return nil, errFileClosed + } + return c.f.Reader() +} + func (c *closeOnceFile) Size() int { if c.closed.Load() { return 0 @@ -38,6 +46,20 @@ func (c *closeOnceFile) Size() int { return c.f.Size() } +func (c *closeOnceFile) Height() uint64 { + if c.closed.Load() { + return 0 + } + return c.f.Height() +} + +func (c *closeOnceFile) DataHash() share.DataHash { + if c.closed.Load() { + return nil + } + return c.f.DataHash() +} + func (c *closeOnceFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { if c.closed.Load() { return nil, errFileClosed diff --git a/share/store/file/file_header.go b/share/store/file/file_header.go new file mode 100644 index 0000000000..1222b0a4f6 --- /dev/null +++ b/share/store/file/file_header.go @@ -0,0 +1,77 @@ +package file + +import ( + "bytes" + "encoding/binary" + "github.com/celestiaorg/celestia-node/share" + "io" +) + +const HeaderSize = 64 + +type Header struct { + version FileVersion + + // Taken directly from EDS + shareSize uint16 + squareSize uint16 + + height uint64 + datahash share.DataHash +} + +type FileVersion uint8 + +const ( + FileV0 FileVersion = iota +) + +func (h *Header) Version() FileVersion { + return h.version +} + +func (h *Header) ShareSize() int { + return int(h.shareSize) +} + +func (h *Header) SquareSize() int { + return int(h.squareSize) +} + +func (h *Header) Height() uint64 { + return h.height +} + +func (h *Header) DataHash() share.DataHash { + return h.datahash +} + +func (h *Header) WriteTo(w io.Writer) (int64, error) { + buf := make([]byte, HeaderSize) + buf[0] = byte(h.version) + binary.LittleEndian.PutUint16(buf[1:3], h.shareSize) + binary.LittleEndian.PutUint16(buf[3:5], h.squareSize) + binary.LittleEndian.PutUint64(buf[5:13], h.height) + copy(buf[13:45], h.datahash) + _, err := io.Copy(w, bytes.NewBuffer(buf)) + return HeaderSize, err +} + +func ReadHeader(r io.Reader) (*Header, error) { + buf := make([]byte, HeaderSize) + _, err := io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + h := &Header{ + version: FileVersion(buf[0]), + shareSize: binary.LittleEndian.Uint16(buf[1:3]), + squareSize: binary.LittleEndian.Uint16(buf[3:5]), + height: binary.LittleEndian.Uint64(buf[5:13]), + datahash: make([]byte, 32), + } + + copy(h.datahash, buf[13:45]) + return h, err +} diff --git a/share/store/file_test.go b/share/store/file/file_test.go similarity index 99% rename from share/store/file_test.go rename to share/store/file/file_test.go index c31f5c16e0..166c80bba9 100644 --- a/share/store/file_test.go +++ b/share/store/file/file_test.go @@ -1,4 +1,4 @@ -package store +package file import ( "context" diff --git a/share/store/file/in_mem_ods_file.go b/share/store/file/in_mem_ods_file.go new file mode 100644 index 0000000000..7a69334243 --- /dev/null +++ b/share/store/file/in_mem_ods_file.go @@ -0,0 +1,217 @@ +package file + +import ( + "bytes" + "context" + "fmt" + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/rsmt2d" + "golang.org/x/sync/errgroup" + "io" +) + +type odsInMemFile struct { + inner *OdsFile + square [][]share.Share +} + +func ReadEds(ctx context.Context, r io.Reader, root share.DataHash) (*rsmt2d.ExtendedDataSquare, error) { + h, err := ReadHeader(r) + if err != nil { + return nil, err + } + + ods, err := readOdsInMem(h, r) + if err != nil { + return nil, err + } + + eds, err := ods.EDS(ctx) + if err != nil { + return nil, fmt.Errorf("computing EDS: %w", err) + } + + newDah, err := share.NewRoot(eds) + if err != nil { + return nil, err + } + if !bytes.Equal(newDah.Hash(), root) { + return nil, fmt.Errorf( + "share: content integrity mismatch: imported root %s doesn't match expected root %s", + newDah.Hash(), + root, + ) + } + return eds, nil +} + +func readOdsInMem(hdr *Header, reader io.Reader) (*odsInMemFile, error) { + shrLn := int(hdr.shareSize) + odsLn := int(hdr.squareSize) / 2 + + ods := memPools.get(odsLn).getOds() + buf := memPools.get(odsLn).getHalfAxis() + defer memPools.get(odsLn).putHalfAxis(buf) + + for i := 0; i < odsLn; i++ { + if _, err := reader.Read(buf); err != nil { + return nil, err + } + + for j := 0; j < odsLn; j++ { + copy(ods[i][j], buf[j*shrLn:(j+1)*shrLn]) + } + } + + return &odsInMemFile{square: ods}, nil +} + +func (f *odsInMemFile) Size() int { + f.inner.lock.RLock() + defer f.inner.lock.RUnlock() + return len(f.square) * 2 +} + +func (f *odsInMemFile) Сlose() error { + f.inner.lock.RLock() + defer f.inner.lock.RUnlock() + if f != nil { + memPools.get(f.Size() / 2).putOds(f.square) + } + return nil +} + +func (f *odsInMemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + f.inner.lock.RLock() + defer f.inner.lock.RUnlock() + if f == nil { + return nil, fmt.Errorf("ods file not cached") + } + + if axisIdx >= f.Size()/2 { + return nil, fmt.Errorf("index is out of ods bounds") + } + if axisType == rsmt2d.Row { + return f.square[axisIdx], nil + } + + // TODO: this is not efficient, but it is better than reading from file + shrs := make([]share.Share, f.Size()/2) + for i := 0; i < f.Size()/2; i++ { + shrs[i] = f.square[i][axisIdx] + } + return shrs, nil +} + +func (f *odsInMemFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { + shrs := make([]share.Share, 0, f.Size()*f.Size()) + for _, row := range f.square { + shrs = append(shrs, row...) + } + + treeFn := wrapper.NewConstructor(uint64(f.Size() / 2)) + return rsmt2d.ComputeExtendedDataSquare(shrs, share.DefaultRSMT2DCodec(), treeFn) +} + +func (f *odsInMemFile) Reader() (io.Reader, error) { + f.inner.lock.RLock() + defer f.inner.lock.RUnlock() + if f == nil { + return nil, fmt.Errorf("ods file not cached") + } + + odsR := &bufferedODSReader{ + f: f, + buf: bytes.NewBuffer(make([]byte, int(f.inner.hdr.shareSize))), + } + + // write header to the buffer + _, err := f.inner.hdr.WriteTo(odsR.buf) + if err != nil { + return nil, fmt.Errorf("writing header: %w", err) + } + + return odsR, nil +} + +func (f *odsInMemFile) computeAxisHalf( + ctx context.Context, + axisType rsmt2d.Axis, + axisIdx int, +) ([]share.Share, error) { + shares := make([]share.Share, f.Size()/2) + + // extend opposite half of the square while collecting shares for the first half of required axis + g, ctx := errgroup.WithContext(ctx) + opposite := oppositeAxis(axisType) + for i := 0; i < f.Size()/2; i++ { + i := i + g.Go(func() error { + original, err := f.AxisHalf(ctx, opposite, i) + if err != nil { + return err + } + + enc, err := codec.Encoder(f.Size()) + if err != nil { + return fmt.Errorf("encoder: %w", err) + } + + shards := make([][]byte, f.Size()) + copy(shards, original) + //for j := len(original); j < len(shards); j++ { + // shards[j] = make([]byte, len(original[0])) + //} + + //err = enc.Encode(shards) + //if err != nil { + // return fmt.Errorf("encode: %w", err) + //} + + target := make([]bool, f.Size()) + target[axisIdx] = true + + err = enc.ReconstructSome(shards, target) + if err != nil { + return fmt.Errorf("reconstruct some: %w", err) + } + + shares[i] = shards[axisIdx] + return nil + }) + } + + err := g.Wait() + return shares, err +} + +func oppositeAxis(axis rsmt2d.Axis) rsmt2d.Axis { + if axis == rsmt2d.Col { + return rsmt2d.Row + } + return rsmt2d.Col +} + +// bufferedODSReader will reads shares from odsInMemFile into the buffer. +// It exposes the buffer to be read by io.Reader interface implementation +type bufferedODSReader struct { + f *odsInMemFile + // current is the amount of shares stored in ods file that have been read from reader. When current + // reaches total, bufferedODSReader will prevent further reads by returning io.EOF + current, total int + buf *bytes.Buffer +} + +func (r *bufferedODSReader) Read(p []byte) (n int, err error) { + // read shares to the buffer until it has sufficient data to fill provided container or full ods is + // read + for r.current < r.total && r.buf.Len() < len(p) { + x, y := r.current%r.f.Size(), r.current/r.f.Size() + r.buf.Write(r.f.square[y][x]) + r.current++ + } + + // read buffer to slice + return r.buf.Read(p) +} diff --git a/share/store/mem_file.go b/share/store/file/mem_file.go similarity index 98% rename from share/store/mem_file.go rename to share/store/file/mem_file.go index e9cc64ec68..ddf6e10610 100644 --- a/share/store/mem_file.go +++ b/share/store/file/mem_file.go @@ -1,4 +1,4 @@ -package store +package file import ( "context" @@ -11,7 +11,7 @@ import ( "github.com/celestiaorg/celestia-node/share/ipld" ) -var _ EdsFile = (*MemFile)(nil) +//var _ EdsFile = (*MemFile)(nil) type MemFile struct { Eds *rsmt2d.ExtendedDataSquare diff --git a/share/store/file/mem_file_test.go b/share/store/file/mem_file_test.go new file mode 100644 index 0000000000..c2bb545142 --- /dev/null +++ b/share/store/file/mem_file_test.go @@ -0,0 +1,24 @@ +package file + +//func TestMemFile(t *testing.T) { +// size := 8 +// newFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { +// return &MemFile{Eds: eds} +// } +// +// t.Run("Share", func(t *testing.T) { +// testFileShare(t, newFile, size) +// }) +// +// t.Run("AxisHalf", func(t *testing.T) { +// testFileAxisHalf(t, newFile, size) +// }) +// +// t.Run("Data", func(t *testing.T) { +// testFileData(t, newFile, size) +// }) +// +// t.Run("EDS", func(t *testing.T) { +// testFileEds(t, newFile, size) +// }) +//} diff --git a/share/store/file/mempool.go b/share/store/file/mempool.go new file mode 100644 index 0000000000..2b743f80d9 --- /dev/null +++ b/share/store/file/mempool.go @@ -0,0 +1,75 @@ +package file + +import ( + "github.com/celestiaorg/celestia-node/share" + "sync" +) + +// TODO: need better name +var memPools poolsMap + +func init() { + memPools = make(map[int]*memPool) +} + +type poolsMap map[int]*memPool + +type memPool struct { + ods *sync.Pool + halfAxis *sync.Pool +} + +// TODO: test me +func (m poolsMap) get(size int) *memPool { + pool, ok := m[size] + if !ok { + pool = &memPool{ + ods: newOdsPool(size), + halfAxis: newHalfAxisPool(size), + } + m[size] = pool + } + return pool +} + +func (m *memPool) putOds(ods [][]share.Share) { + m.ods.Put(ods) +} + +func (m *memPool) getOds() [][]share.Share { + return m.ods.Get().([][]share.Share) +} + +func (m *memPool) putHalfAxis(buf []byte) { + m.halfAxis.Put(buf) +} + +func (m *memPool) getHalfAxis() []byte { + return m.halfAxis.Get().([]byte) +} + +func newOdsPool(size int) *sync.Pool { + return &sync.Pool{ + New: func() interface{} { + shrs := make([][]share.Share, size) + for i := range shrs { + if shrs[i] == nil { + shrs[i] = make([]share.Share, size) + for j := range shrs[i] { + shrs[i][j] = make(share.Share, share.Size) + } + } + } + return shrs + }, + } +} + +func newHalfAxisPool(size int) *sync.Pool { + return &sync.Pool{ + New: func() interface{} { + buf := make([]byte, size*share.Size) + return buf + }, + } +} diff --git a/share/store/ods_file.go b/share/store/file/ods_file.go similarity index 50% rename from share/store/ods_file.go rename to share/store/file/ods_file.go index 7f9d53eb15..5539c60bba 100644 --- a/share/store/ods_file.go +++ b/share/store/file/ods_file.go @@ -1,13 +1,12 @@ -package store +package file import ( "context" "fmt" + "io" "os" "sync" - "golang.org/x/sync/errgroup" - "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/rsmt2d" @@ -21,7 +20,8 @@ type OdsFile struct { hdr *Header fl *os.File - memPool memPool + lock sync.RWMutex + ods *odsInMemFile } // OpenOdsFile opens an existing file. File has to be closed after usage. @@ -44,38 +44,53 @@ func OpenOdsFile(path string) (*OdsFile, error) { }, nil } -func CreateOdsFile(path string, eds *rsmt2d.ExtendedDataSquare, memPools memPools) (*OdsFile, error) { +func CreateOdsFile( + path string, + height uint64, + datahash share.DataHash, + eds *rsmt2d.ExtendedDataSquare) (*OdsFile, error) { f, err := os.Create(path) if err != nil { return nil, err } h := &Header{ + version: FileV0, shareSize: uint16(len(eds.GetCell(0, 0))), // TODO: rsmt2d should expose this field squareSize: uint16(eds.Width()), - version: FileV0, + height: height, + datahash: datahash, } - if _, err = h.WriteTo(f); err != nil { - return nil, err + err = writeOdsFile(f, h, eds) + if err != nil { + return nil, fmt.Errorf("writing ODS file: %w", err) + } + + // TODO: fill odsInMemFile with data from eds + return &OdsFile{ + path: path, + fl: f, + hdr: h, + }, f.Sync() +} + +func writeOdsFile(w io.Writer, h *Header, eds *rsmt2d.ExtendedDataSquare) error { + _, err := h.WriteTo(w) + if err != nil { + return err } for i := uint(0); i < eds.Width()/2; i++ { for j := uint(0); j < eds.Width()/2; j++ { // TODO: Implemented buffered write through io.CopyBuffer shr := eds.GetCell(i, j) - if _, err := f.Write(shr); err != nil { - return nil, err + if _, err := w.Write(shr); err != nil { + return err } } } - - return &OdsFile{ - path: path, - fl: f, - hdr: h, - memPool: memPools.get(int(h.squareSize) / 2), - }, f.Sync() + return nil } func (f *OdsFile) Size() int { @@ -83,11 +98,26 @@ func (f *OdsFile) Size() int { } func (f *OdsFile) Close() error { + if err := f.ods.Сlose(); err != nil { + return err + } return f.fl.Close() } -func (f *OdsFile) Header() *Header { - return f.hdr +func (f *OdsFile) Height() uint64 { + return f.hdr.Height() +} + +func (f *OdsFile) DataHash() share.DataHash { + return f.hdr.DataHash() +} + +func (f *OdsFile) Reader() (io.Reader, error) { + err := f.readOds() + if err != nil { + return nil, fmt.Errorf("reading ods: %w", err) + } + return f.ods.Reader() } func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { @@ -96,16 +126,22 @@ func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx in return f.odsAxisHalf(axisType, axisIdx) } - ods, err := f.readOds(oppositeAxis(axisType)) + err := f.readOds() if err != nil { return nil, err } - defer f.memPool.ods.Put(ods.square) - return computeAxisHalf(ctx, ods, f.memPool.codec, axisType, axisIdx) + return f.ods.computeAxisHalf(ctx, axisType, axisIdx) } func (f *OdsFile) odsAxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + f.lock.RLock() + defer f.lock.RUnlock() + shrs, err := f.ods.AxisHalf(context.Background(), axisType, axisIdx) + if err == nil { + return shrs, nil + } + switch axisType { case rsmt2d.Col: return f.readCol(axisIdx) @@ -115,56 +151,32 @@ func (f *OdsFile) odsAxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, return nil, fmt.Errorf("unknown axis") } -type odsInMemFile struct { - EdsFile - axisType rsmt2d.Axis - square [][]share.Share -} - -func (f *odsInMemFile) Size() int { - return len(f.square) * 2 -} - -func (f *odsInMemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { - if axisType != f.axisType { - return nil, fmt.Errorf("order of shares is not preserved") - } - if axisIdx >= f.Size()/2 { - return nil, fmt.Errorf("index is out of ods bounds") +func (f *OdsFile) readOds() error { + f.lock.Lock() + defer f.lock.Unlock() + if f.ods != nil { + return nil } - return f.square[axisIdx], nil -} - -func (f *OdsFile) readOds(axisType rsmt2d.Axis) (*odsInMemFile, error) { - shrLn := int(f.hdr.shareSize) - odsLn := int(f.hdr.squareSize) / 2 - - buf := f.memPool.halfAxis.Get().([]byte) - defer f.memPool.halfAxis.Put(buf) - ods := f.memPool.ods.Get().([][]share.Share) - for i := 0; i < odsLn; i++ { - pos := HeaderSize + odsLn*shrLn*i - if _, err := f.fl.ReadAt(buf, int64(pos)); err != nil { - return nil, err - } - - for j := 0; j < odsLn; j++ { - if axisType == rsmt2d.Row { - copy(ods[i][j], buf[j*shrLn:(j+1)*shrLn]) - } else { - copy(ods[j][i], buf[j*shrLn:(j+1)*shrLn]) - } - } + // reset file pointer to the beginning of the file + _, err := f.fl.Seek(HeaderSize, io.SeekStart) + if err != nil { + return fmt.Errorf("discarding header: %w", err) } - return &odsInMemFile{ - axisType: axisType, - square: ods, - }, nil + ods, err := readOdsInMem(f.hdr, f.fl) + if err != nil { + return fmt.Errorf("reading ods: %w", err) + } + f.ods = ods + return nil } func (f *OdsFile) readRow(idx int) ([]share.Share, error) { + if idx >= f.Size()/2 { + return nil, fmt.Errorf("index is out of ods bounds") + } + shrLn := int(f.hdr.shareSize) odsLn := int(f.hdr.squareSize) / 2 @@ -185,6 +197,10 @@ func (f *OdsFile) readRow(idx int) ([]share.Share, error) { } func (f *OdsFile) readCol(idx int) ([]share.Share, error) { + if idx >= f.Size()/2 { + return nil, fmt.Errorf("index is out of ods bounds") + } + shrLn := int(f.hdr.shareSize) odsLn := int(f.hdr.squareSize) / 2 @@ -203,66 +219,13 @@ func (f *OdsFile) readCol(idx int) ([]share.Share, error) { return shrs, nil } -func computeAxisHalf( - ctx context.Context, - f EdsFile, - codec Codec, - axisType rsmt2d.Axis, - axisIdx int, -) ([]share.Share, error) { - shares := make([]share.Share, f.Size()/2) - - // extend opposite half of the square while collecting shares for the first half of required axis - g, ctx := errgroup.WithContext(ctx) - opposite := oppositeAxis(axisType) - for i := 0; i < f.Size()/2; i++ { - i := i - g.Go(func() error { - original, err := f.AxisHalf(ctx, opposite, i) - if err != nil { - return err - } - - enc, err := codec.Encoder(f.Size()) - if err != nil { - return fmt.Errorf("encoder: %w", err) - } - - shards := make([][]byte, f.Size()) - copy(shards, original) - //for j := len(original); j < len(shards); j++ { - // shards[j] = make([]byte, len(original[0])) - //} - - //err = enc.Encode(shards) - //if err != nil { - // return fmt.Errorf("encode: %w", err) - //} - - target := make([]bool, f.Size()) - target[axisIdx] = true - - err = enc.ReconstructSome(shards, target) - if err != nil { - return fmt.Errorf("reconstruct some: %w", err) - } - - shares[i] = shards[axisIdx] - return nil - }) - } - - err := g.Wait() - return shares, err -} - func (f *OdsFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { original, err := f.AxisHalf(ctx, axisType, axisIdx) if err != nil { return nil, err } - return extendShares(f.memPool.codec, original) + return extendShares(codec, original) } func extendShares(codec Codec, original []share.Share) ([]share.Share, error) { @@ -324,78 +287,11 @@ func (f *OdsFile) Data(ctx context.Context, namespace share.Namespace, rowIdx in return ndDataFromShares(shares, namespace, rowIdx) } -func (f *OdsFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { - ods, err := f.readOds(rsmt2d.Row) +func (f *OdsFile) EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) { + err := f.readOds() if err != nil { return nil, err } - shrs := make([]share.Share, 0, len(ods.square)*len(ods.square)) - for _, row := range ods.square { - shrs = append(shrs, row...) - } - - treeFn := wrapper.NewConstructor(uint64(f.hdr.squareSize / 2)) - return rsmt2d.ComputeExtendedDataSquare(shrs, share.DefaultRSMT2DCodec(), treeFn) -} - -type memPools struct { - pools map[int]memPool - codec Codec -} - -type memPool struct { - codec Codec - ods, halfAxis *sync.Pool -} - -func newMemPools(codec Codec) memPools { - return memPools{ - pools: make(map[int]memPool), - codec: codec, - } -} -func (m memPools) get(size int) memPool { - if pool, ok := m.pools[size]; ok { - return pool - } - pool := newMemPool(m.codec, size) - m.pools[size] = pool - return pool -} - -func newMemPool(codec Codec, size int) memPool { - ods := &sync.Pool{ - New: func() interface{} { - shrs := make([][]share.Share, size) - for i := range shrs { - if shrs[i] == nil { - shrs[i] = make([]share.Share, size) - for j := range shrs[i] { - shrs[i][j] = make(share.Share, share.Size) - } - } - } - return shrs - }, - } - - halfAxis := &sync.Pool{ - New: func() interface{} { - buf := make([]byte, size*share.Size) - return buf - }, - } - return memPool{ - halfAxis: halfAxis, - ods: ods, - codec: codec, - } -} - -func oppositeAxis(axis rsmt2d.Axis) rsmt2d.Axis { - if axis == rsmt2d.Col { - return rsmt2d.Row - } - return rsmt2d.Col + return f.ods.EDS(ctx) } diff --git a/share/store/ods_file_test.go b/share/store/file/ods_file_test.go similarity index 92% rename from share/store/ods_file_test.go rename to share/store/file/ods_file_test.go index ed21f32857..8c4ff5b0ba 100644 --- a/share/store/ods_file_test.go +++ b/share/store/file/ods_file_test.go @@ -1,4 +1,4 @@ -package store +package file import ( "context" @@ -15,8 +15,7 @@ import ( func TestCreateOdsFile(t *testing.T) { path := t.TempDir() + "/testfile" edsIn := edstest.RandEDS(t, 8) - mem := newMemPools(NewCodec()) - _, err := CreateOdsFile(path, edsIn, mem) + _, err := CreateOdsFile(path, 1, []byte{}, edsIn) require.NoError(t, err) f, err := OpenOdsFile(path) @@ -28,10 +27,9 @@ func TestCreateOdsFile(t *testing.T) { func TestOdsFile(t *testing.T) { size := 32 - mem := newMemPools(NewCodec()) createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { path := t.TempDir() + "/testfile" - fl, err := CreateOdsFile(path, eds, mem) + fl, err := CreateOdsFile(path, 1, []byte{}, eds) require.NoError(t, err) return fl } @@ -55,14 +53,13 @@ func TestOdsFile(t *testing.T) { func TestReadOdsFile(t *testing.T) { eds := edstest.RandEDS(t, 8) - mem := newMemPools(NewCodec()) path := t.TempDir() + "/testfile" - f, err := CreateOdsFile(path, eds, mem) + f, err := CreateOdsFile(path, 1, []byte{}, eds) require.NoError(t, err) - ods, err := f.readOds(rsmt2d.Row) + err = f.readOds() require.NoError(t, err) - for i, row := range ods.square { + for i, row := range f.ods.square { original, err := f.readRow(i) require.NoError(t, err) require.True(t, len(original) == len(row)) @@ -100,12 +97,11 @@ func TestReadOdsFile(t *testing.T) { func BenchmarkAxisFromOdsFile(b *testing.B) { minSize, maxSize := 32, 128 dir := b.TempDir() - mem := newMemPools(NewCodec()) newFile := func(size int) EdsFile { eds := edstest.RandEDS(b, size) path := dir + "/testfile" - f, err := CreateOdsFile(path, eds, mem) + f, err := CreateOdsFile(path, 1, []byte{}, eds) require.NoError(b, err) return f } @@ -127,12 +123,11 @@ func BenchmarkAxisFromOdsFile(b *testing.B) { func BenchmarkShareFromOdsFile(b *testing.B) { minSize, maxSize := 128, 128 dir := b.TempDir() - mem := newMemPools(NewCodec()) newFile := func(size int) EdsFile { eds := edstest.RandEDS(b, size) path := dir + "/testfile" - f, err := CreateOdsFile(path, eds, mem) + f, err := CreateOdsFile(path, 1, []byte{}, eds) require.NoError(b, err) return f } diff --git a/share/store/file_header.go b/share/store/file_header.go deleted file mode 100644 index 83c9d69190..0000000000 --- a/share/store/file_header.go +++ /dev/null @@ -1,73 +0,0 @@ -package store - -import ( - "encoding/binary" - "io" -) - -const HeaderSize = 32 - -type Header struct { - version FileVersion - - // Taken directly from EDS - shareSize uint16 - squareSize uint16 -} - -type FileVersion uint8 - -const ( - FileV0 FileVersion = iota -) - -func (h *Header) Version() FileVersion { - return h.version -} - -func (h *Header) ShareSize() int { - return int(h.shareSize) -} - -func (h *Header) SquareSize() int { - return int(h.squareSize) -} - -func (h *Header) WriteTo(w io.Writer) (int64, error) { - buf := make([]byte, HeaderSize) - buf[0] = byte(h.version) - binary.LittleEndian.PutUint16(buf[1:3], h.shareSize) - binary.LittleEndian.PutUint16(buf[3:5], h.squareSize) - // TODO: Extensions - n, err := w.Write(buf) - return int64(n), err -} - -func (h *Header) ReadFrom(r io.Reader) (int64, error) { - buf := make([]byte, HeaderSize) - n, err := io.ReadFull(r, buf) - if err != nil { - return int64(n), err - } - - h.version = FileVersion(buf[0]) - h.shareSize = binary.LittleEndian.Uint16(buf[1:3]) - h.squareSize = binary.LittleEndian.Uint16(buf[3:5]) - - // TODO: Extensions - return int64(n), err -} - -func ReadHeader(r io.ReaderAt) (*Header, error) { - h := &Header{} - buf := make([]byte, HeaderSize) - _, err := r.ReadAt(buf, 0) - if err != nil { - return h, err - } - - h.version = FileVersion(buf[0]) - h.shareSize = binary.LittleEndian.Uint16(buf[1:3]) - h.squareSize = binary.LittleEndian.Uint16(buf[3:5]) - return h, nil -} diff --git a/share/store/mem_file_test.go b/share/store/mem_file_test.go deleted file mode 100644 index d502a9f7e2..0000000000 --- a/share/store/mem_file_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package store - -import ( - "github.com/celestiaorg/rsmt2d" - "testing" -) - -func TestMemFile(t *testing.T) { - size := 8 - newFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { - return &MemFile{Eds: eds} - } - - t.Run("Share", func(t *testing.T) { - testFileShare(t, newFile, size) - }) - - t.Run("AxisHalf", func(t *testing.T) { - testFileAxisHalf(t, newFile, size) - }) - - t.Run("Data", func(t *testing.T) { - testFileData(t, newFile, size) - }) - - t.Run("EDS", func(t *testing.T) { - testFileEds(t, newFile, size) - }) -} From 1d21a223bf38e482e26edc56d50326845a381429 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 15 Jan 2024 01:20:18 +0700 Subject: [PATCH 050/132] - add store --- share/store/store.go | 223 +++++++++++++++++++++++++++++++++++ share/store/store_options.go | 32 +++++ share/store/striplock.go | 54 +++++++++ 3 files changed, 309 insertions(+) create mode 100644 share/store/store.go create mode 100644 share/store/store_options.go create mode 100644 share/store/striplock.go diff --git a/share/store/store.go b/share/store/store.go new file mode 100644 index 0000000000..6527a89702 --- /dev/null +++ b/share/store/store.go @@ -0,0 +1,223 @@ +package store + +import ( + "context" + "errors" + "fmt" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/store/cache" + "github.com/celestiaorg/celestia-node/share/store/file" + "github.com/celestiaorg/rsmt2d" + logging "github.com/ipfs/go-log/v2" + "go.opentelemetry.io/otel" + "os" +) + +var ( + log = logging.Logger("share/eds") + tracer = otel.Tracer("share/eds") +) + +const ( + blocksPath = "/blocks/" + heightsPath = "/heights/" +) + +var ErrNotFound = errors.New("eds not found in store") + +// Store maintains (via DAGStore) a top-level index enabling granular and efficient random access to +// every share and/or Merkle proof over every registered CARv1 file. The EDSStore provides a custom +// blockstore interface implementation to achieve access. The main use-case is randomized sampling +// over the whole chain of EDS block data and getting data by namespace. +type Store struct { + cancel context.CancelFunc + + basepath string + + // cache is used to cache recent blocks and blocks that are accessed frequently + cache *cache.DoubleCache + + //TODO: maintain in-memory missing files index / bloom-filter to fast return for not stored files. + + // stripedLocks is used to synchronize parallel operations + stripLock *striplock + + //metrics *metrics +} + +// NewStore creates a new EDS Store under the given basepath and datastore. +func NewStore(params *Parameters, basePath string) (*Store, error) { + if err := params.Validate(); err != nil { + return nil, err + } + + //TODO: acquire DirectoryLock store lockGuard + + recentBlocksCache, err := cache.NewFileCache("recent", params.RecentBlocksCacheSize) + if err != nil { + return nil, fmt.Errorf("failed to create recent blocks cache: %w", err) + } + + blockstoreCache, err := cache.NewFileCache("blockstore", params.BlockstoreCacheSize) + if err != nil { + return nil, fmt.Errorf("failed to create blockstore cache: %w", err) + } + + dcache := cache.NewDoubleCache(recentBlocksCache, blockstoreCache) + + store := &Store{ + basepath: basePath, + cache: dcache, + stripLock: newStripLock(1024), + //metrics: newMetrics(), + } + return store, nil +} + +func (s *Store) Put( + ctx context.Context, + datahash share.DataHash, + height uint64, + square *rsmt2d.ExtendedDataSquare, +) (file.EdsFile, error) { + lock := s.stripLock.byDatahashAndHeight(datahash, height) + lock.lock() + defer lock.unlock() + + path := s.basepath + blocksPath + datahash.String() + odsFile, err := file.CreateOdsFile(path, height, datahash, square) + if err != nil { + return nil, fmt.Errorf("writing ODS file: %w", err) + } + + // create hard link with height as name + err = os.Link(path, s.basepath+heightsPath+fmt.Sprintf("%d", height)) + if err != nil { + return nil, fmt.Errorf("creating hard link: %w", err) + } + + // put in recent cache + f, err := s.cache.First().GetOrLoad(ctx, cache.Key{Height: height}, wrapWithCache(odsFile)) + if err != nil { + return nil, fmt.Errorf("putting in cache: %w", err) + } + return f, nil +} + +func (s *Store) GetByHash(_ context.Context, datahash share.DataHash) (file.EdsFile, error) { + lock := s.stripLock.byDatahash(datahash) + lock.RLock() + defer lock.RUnlock() + + f, err := s.cache.Get(cache.Key{Datahash: datahash}) + if err == nil { + return f, nil + } + + path := s.basepath + blocksPath + datahash.String() + odsFile, err := file.OpenOdsFile(path) + if err != nil { + return nil, fmt.Errorf("opening ODS file: %w", err) + } + return odsFile, nil +} + +func (s *Store) GetByHeight(_ context.Context, height uint64) (file.EdsFile, error) { + lock := s.stripLock.byHeight(height) + lock.RLock() + defer lock.RUnlock() + + f, err := s.cache.Get(cache.Key{Height: height}) + if err == nil { + return f, nil + } + + path := s.basepath + heightsPath + fmt.Sprintf("%d", height) + odsFile, err := file.OpenOdsFile(path) + if err != nil { + return nil, fmt.Errorf("opening ODS file: %w", err) + } + return odsFile, nil +} + +func (s *Store) HasByHash(_ context.Context, datahash share.DataHash) (bool, error) { + lock := s.stripLock.byDatahash(datahash) + lock.RLock() + defer lock.RUnlock() + + _, err := s.cache.Get(cache.Key{Datahash: datahash}) + if err == nil { + return true, nil + } + + path := s.basepath + blocksPath + datahash.String() + _, err = file.OpenOdsFile(path) + if err != nil { + return false, fmt.Errorf("opening ODS file: %w", err) + } + return true, nil +} + +func (s *Store) HasByHeight(_ context.Context, height uint64) (bool, error) { + lock := s.stripLock.byHeight(height) + lock.RLock() + defer lock.RUnlock() + + _, err := s.cache.Get(cache.Key{Height: height}) + if err == nil { + return true, nil + } + + path := s.basepath + heightsPath + fmt.Sprintf("%d", height) + _, err = file.OpenOdsFile(path) + if err != nil { + return false, fmt.Errorf("opening ODS file: %w", err) + } + return true, nil +} + +func wrapWithCache(f file.EdsFile) cache.OpenFileFn { + return func(ctx context.Context) (cache.Key, file.EdsFile, error) { + f := file.NewCacheFile(f) + key := cache.Key{ + Datahash: f.DataHash(), + Height: f.Height(), + } + return key, f, nil + } +} + +func (s *Store) openFileByHeight(height uint64) cache.OpenFileFn { + return func(ctx context.Context) (cache.Key, file.EdsFile, error) { + path := s.basepath + heightsPath + fmt.Sprintf("%d", height) + f, err := file.OpenOdsFile(path) + if err != nil { + return cache.Key{}, nil, fmt.Errorf("opening ODS file: %w", err) + } + key := cache.Key{ + Datahash: f.DataHash(), + Height: height, + } + return key, file.NewCacheFile(f), nil + } +} + +func (s *Store) openFileByDatahash(datahash share.DataHash) cache.OpenFileFn { + return func(ctx context.Context) (cache.Key, file.EdsFile, error) { + path := s.basepath + blocksPath + datahash.String() + f, err := file.OpenOdsFile(path) + if err != nil { + return cache.Key{}, nil, fmt.Errorf("opening ODS file: %w", err) + } + key := cache.Key{ + Datahash: f.DataHash(), + Height: f.Height(), + } + return key, file.NewCacheFile(f), nil + } +} + +func (s *Store) Close() error { + panic("implement me") + return nil +} diff --git a/share/store/store_options.go b/share/store/store_options.go new file mode 100644 index 0000000000..cc7dfbd597 --- /dev/null +++ b/share/store/store_options.go @@ -0,0 +1,32 @@ +package store + +import ( + "fmt" +) + +type Parameters struct { + // RecentBlocksCacheSize is the size of the cache for recent blocks. + RecentBlocksCacheSize int + + // BlockstoreCacheSize is the size of the cache for blockstore requested accessors. + BlockstoreCacheSize int +} + +// DefaultParameters returns the default configuration values for the EDS store parameters. +func DefaultParameters() *Parameters { + return &Parameters{ + RecentBlocksCacheSize: 10, + BlockstoreCacheSize: 128, + } +} + +func (p *Parameters) Validate() error { + if p.RecentBlocksCacheSize < 1 { + return fmt.Errorf("eds: recent blocks cache size must be positive") + } + + if p.BlockstoreCacheSize < 1 { + return fmt.Errorf("eds: blockstore cache size must be positive") + } + return nil +} diff --git a/share/store/striplock.go b/share/store/striplock.go new file mode 100644 index 0000000000..20669a97b9 --- /dev/null +++ b/share/store/striplock.go @@ -0,0 +1,54 @@ +package store + +import ( + "encoding/binary" + "github.com/celestiaorg/celestia-node/share" + "sync" +) + +// TODO: move to utils +type striplock struct { + heights []*sync.RWMutex + datahashes []*sync.RWMutex +} + +type multiLock struct { + mu []*sync.RWMutex +} + +func newStripLock(size int) *striplock { + heights := make([]*sync.RWMutex, size) + datahashes := make([]*sync.RWMutex, size) + for i := 0; i < size; i++ { + heights[i] = &sync.RWMutex{} + datahashes[i] = &sync.RWMutex{} + } + return &striplock{heights, datahashes} +} + +func (l *striplock) byHeight(height uint64) *sync.RWMutex { + lkIdx := height % uint64(len(l.heights)) + return l.heights[lkIdx] +} + +func (l *striplock) byDatahash(datahash share.DataHash) *sync.RWMutex { + key := binary.LittleEndian.Uint16(datahash[len(datahash)-3:]) + lkIdx := key % uint16(len(l.datahashes)) + return l.datahashes[lkIdx] +} + +func (l *striplock) byDatahashAndHeight(datahash share.DataHash, height uint64) *multiLock { + return &multiLock{[]*sync.RWMutex{l.byDatahash(datahash), l.byHeight(height)}} +} + +func (m *multiLock) lock() { + for _, lk := range m.mu { + lk.Lock() + } +} + +func (m *multiLock) unlock() { + for _, lk := range m.mu { + lk.Unlock() + } +} From 127291712f05248af0d8c9ae5233366559147ba6 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 15 Jan 2024 01:21:00 +0700 Subject: [PATCH 051/132] add cache --- share/store/cache/accessor_cache.go | 265 ++++++++++++++++++++++++++++ share/store/cache/cache.go | 48 +++++ share/store/cache/doublecache.go | 50 ++++++ share/store/cache/metrics.go | 69 ++++++++ share/store/cache/noop.go | 71 ++++++++ 5 files changed, 503 insertions(+) create mode 100644 share/store/cache/accessor_cache.go create mode 100644 share/store/cache/cache.go create mode 100644 share/store/cache/doublecache.go create mode 100644 share/store/cache/metrics.go create mode 100644 share/store/cache/noop.go diff --git a/share/store/cache/accessor_cache.go b/share/store/cache/accessor_cache.go new file mode 100644 index 0000000000..05ec00b64c --- /dev/null +++ b/share/store/cache/accessor_cache.go @@ -0,0 +1,265 @@ +package cache + +import ( + "context" + "errors" + "fmt" + "github.com/celestiaorg/celestia-node/share/store/file" + "sync" + "sync/atomic" + "time" + + lru "github.com/hashicorp/golang-lru/v2" +) + +const defaultCloseTimeout = time.Minute + +var _ Cache = (*FileCache)(nil) + +// FileCache implements the Cache interface using an LRU cache backend. +type FileCache struct { + idxLock sync.RWMutex + // The name is a prefix that will be used for cache metrics if they are enabled. + name string + // HeightsIdx is a map of Height to Datahash. It is used to find the Datahash for a given Height. + HeightsIdx map[uint64]string + // stripedLocks prevents simultaneous RW access to the file cache for an accessor. Instead + // of using only one lock or one lock per key, we stripe the keys across 256 locks. 256 is + // chosen because it 0-255 is the range of values we get looking at the last byte of the key. + stripedLocks [256]sync.Mutex + // Caches the file for a given key for file read affinity, i.e., further reads will likely + // be from the same file. Maps (Datahash -> accessor). + cache *lru.Cache[string, *accessor] + + metrics *metrics +} + +// accessor is the value stored in Cache. It implements the file.EdsFile interface. It has a +// reference counted so that it can be removed from the cache only when all references are released. +type accessor struct { + lock sync.RWMutex + file.EdsFile + + Height uint64 + done chan struct{} + refs atomic.Int32 + isClosed bool +} + +func (s *accessor) addRef() error { + s.lock.Lock() + defer s.lock.Unlock() + if s.isClosed { + // item is already closed and soon will be removed after all refs are released + return errCacheMiss + } + if s.refs.Add(1) == 1 { + // there were no refs previously and done channel was closed, reopen it by recreating + s.done = make(chan struct{}) + } + return nil +} + +func (s *accessor) removeRef() { + s.lock.Lock() + defer s.lock.Unlock() + if s.refs.Add(-1) <= 0 { + close(s.done) + } +} + +func (s *accessor) close() error { + s.lock.Lock() + if s.isClosed { + s.lock.Unlock() + // accessor will be closed by another goroutine + return nil + } + s.isClosed = true + done := s.done + s.lock.Unlock() + + select { + case <-done: + case <-time.After(defaultCloseTimeout): + return fmt.Errorf("closing file, some readers didn't close the file within timeout,"+ + " amount left: %v", s.refs.Load()) + } + if err := s.EdsFile.Close(); err != nil { + return fmt.Errorf("closing accessor: %w", err) + } + return nil +} + +func NewFileCache(name string, cacheSize int) (*FileCache, error) { + bc := &FileCache{ + name: name, + } + // Instantiate the file Cache. + bslru, err := lru.NewWithEvict[string, *accessor](cacheSize, bc.evictFn()) + if err != nil { + return nil, fmt.Errorf("failed to instantiate accessor cache: %w", err) + } + bc.cache = bslru + return bc, nil +} + +// evictFn will be invoked when an item is evicted from the cache. +func (bc *FileCache) evictFn() func(string, *accessor) { + return func(_ string, fa *accessor) { + bc.idxLock.Lock() + defer bc.idxLock.Unlock() + delete(bc.HeightsIdx, fa.Height) + // we can release accessor from cache early, while it is being closed in parallel routine + go func() { + err := fa.close() + if err != nil { + bc.metrics.observeEvicted(true) + log.Errorf("couldn't close accessor after cache eviction: %s", err) + return + } + bc.metrics.observeEvicted(false) + }() + } +} + +// Get retrieves the accessor for a given key from the Cache. If the Accessor is not in +// the Cache, it returns an errCacheMiss. +func (bc *FileCache) Get(key Key) (file.EdsFile, error) { + lk := &bc.stripedLocks[keyToStriped(key)] + lk.Lock() + defer lk.Unlock() + + accessor, err := bc.get(key) + if err != nil { + bc.metrics.observeGet(false) + return nil, err + } + bc.metrics.observeGet(true) + return newRefCloser(accessor) +} + +func (bc *FileCache) get(key Key) (*accessor, error) { + hashStr := key.Datahash.String() + if hashStr == "" { + var ok bool + bc.idxLock.RLock() + hashStr, ok = bc.HeightsIdx[key.Height] + bc.idxLock.RUnlock() + if !ok { + return nil, errCacheMiss + } + } + abs, ok := bc.cache.Get(hashStr) + if !ok { + return nil, errCacheMiss + } + return abs, nil +} + +// GetOrLoad attempts to get an item from the cache, and if not found, invokes +// the provided loader function to load it. +func (bc *FileCache) GetOrLoad(ctx context.Context, key Key, loader OpenFileFn) (file.EdsFile, error) { + if !key.isComplete() { + return nil, errors.New("key is not complete") + } + + lk := &bc.stripedLocks[keyToStriped(key)] + lk.Lock() + defer lk.Unlock() + + abs, err := bc.get(key) + if err == nil { + // return accessor, only if it is not closed yet + accessorWithRef, err := newRefCloser(abs) + if err == nil { + bc.metrics.observeGet(true) + return accessorWithRef, nil + } + } + + // accessor not found in cache or closed, so load new one using loader + key, file, err := loader(ctx) + if err != nil { + return nil, fmt.Errorf("unable to load accessor: %w", err) + } + + fa := &accessor{EdsFile: file} + // Create a new accessor first to increment the reference count in it, so it cannot get evicted + // from the inner lru cache before it is used. + rc, err := newRefCloser(fa) + if err != nil { + return nil, err + } + return rc, bc.add(key, fa) +} + +func (bc *FileCache) add(key Key, fa *accessor) error { + keyStr := key.Datahash.String() + bc.idxLock.Lock() + defer bc.idxLock.Unlock() + // Create a new accessor first to increment the reference count in it, so it cannot get evicted + // from the inner lru cache before it is used. + bc.cache.Add(keyStr, fa) + bc.HeightsIdx[key.Height] = keyStr + return nil +} + +// Remove removes the Accessor for a given key from the cache. +func (bc *FileCache) Remove(key Key) error { + lk := &bc.stripedLocks[keyToStriped(key)] + lk.Lock() + accessor, err := bc.get(key) + lk.Unlock() + if errors.Is(err, errCacheMiss) { + // item is not in cache + return nil + } + if err = accessor.close(); err != nil { + return err + } + // The cache will call evictFn on removal, where accessor close will be called. + bc.cache.Remove(key.Datahash.String()) + return nil +} + +// EnableMetrics enables metrics for the cache. +func (bc *FileCache) EnableMetrics() error { + var err error + bc.metrics, err = newMetrics(bc) + return err +} + +// refCloser manages references to accessor from provided reader and removes the ref, when the +// Close is called +type refCloser struct { + *accessor + closeFn func() +} + +// newRefCloser creates new refCloser +func newRefCloser(abs *accessor) (*refCloser, error) { + if err := abs.addRef(); err != nil { + return nil, err + } + + var closeOnce sync.Once + return &refCloser{ + accessor: abs, + closeFn: func() { + closeOnce.Do(abs.removeRef) + }, + }, nil +} + +func (c *refCloser) Close() error { + c.closeFn() + return nil +} + +// keyToStriped returns the index of the lock to use for a given key. We use the last +// byte of the Datahash as the pseudo-random index. +func keyToStriped(sk Key) byte { + str := sk.Datahash.String() + return str[len(str)-1] +} diff --git a/share/store/cache/cache.go b/share/store/cache/cache.go new file mode 100644 index 0000000000..4c12ff1eaf --- /dev/null +++ b/share/store/cache/cache.go @@ -0,0 +1,48 @@ +package cache + +import ( + "context" + "errors" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/store/file" + logging "github.com/ipfs/go-log/v2" + "go.opentelemetry.io/otel" +) + +var ( + log = logging.Logger("share/eds/cache") + meter = otel.Meter("eds_store_cache") +) + +var ( + errCacheMiss = errors.New("accessor not found in blockstore cache") +) + +// Key is a unique identifier for an item in the Cache. Either Datahash or Height should be set. +type Key struct { + Datahash share.DataHash + Height uint64 +} + +// A Key is considered complete if it has both Datahash and Height set. +func (k Key) isComplete() bool { + return k.Datahash != nil && k.Height != 0 +} + +type OpenFileFn func(context.Context) (Key, file.EdsFile, error) + +// Cache is an interface that defines the basic Cache operations. +type Cache interface { + // Get returns the EDS file for the given key. + Get(Key) (file.EdsFile, error) + + // GetOrLoad attempts to get an item from the Cache and, if not found, invokes + // the provided loader function to load it into the Cache. + GetOrLoad(context.Context, Key, OpenFileFn) (file.EdsFile, error) + + // Remove removes an item from Cache. + Remove(Key) error + + // EnableMetrics enables metrics in Cache + EnableMetrics() error +} diff --git a/share/store/cache/doublecache.go b/share/store/cache/doublecache.go new file mode 100644 index 0000000000..39107d86cf --- /dev/null +++ b/share/store/cache/doublecache.go @@ -0,0 +1,50 @@ +package cache + +import ( + "errors" + "github.com/celestiaorg/celestia-node/share/store/file" +) + +// DoubleCache represents a Cache that looks into multiple caches one by one. +type DoubleCache struct { + first, second Cache +} + +// NewDoubleCache creates a new DoubleCache with the provided caches. +func NewDoubleCache(first, second Cache) *DoubleCache { + return &DoubleCache{ + first: first, + second: second, + } +} + +// Get looks for an item in all the caches one by one and returns the Cache found item. +func (mc *DoubleCache) Get(key Key) (file.EdsFile, error) { + accessor, err := mc.first.Get(key) + if err == nil { + return accessor, nil + } + return mc.second.Get(key) +} + +// Remove removes an item from all underlying caches +func (mc *DoubleCache) Remove(key Key) error { + err1 := mc.first.Remove(key) + err2 := mc.second.Remove(key) + return errors.Join(err1, err2) +} + +func (mc *DoubleCache) First() Cache { + return mc.first +} + +func (mc *DoubleCache) Second() Cache { + return mc.second +} + +func (mc *DoubleCache) EnableMetrics() error { + if err := mc.first.EnableMetrics(); err != nil { + return err + } + return mc.second.EnableMetrics() +} diff --git a/share/store/cache/metrics.go b/share/store/cache/metrics.go new file mode 100644 index 0000000000..6e41357688 --- /dev/null +++ b/share/store/cache/metrics.go @@ -0,0 +1,69 @@ +package cache + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +const ( + cacheFoundKey = "found" + failedKey = "failed" +) + +type metrics struct { + getCounter metric.Int64Counter + evictedCounter metric.Int64Counter +} + +func newMetrics(bc *FileCache) (*metrics, error) { + metricsPrefix := "eds_blockstore_cache_" + bc.name + + evictedCounter, err := meter.Int64Counter(metricsPrefix+"_evicted_counter", + metric.WithDescription("eds blockstore cache evicted event counter")) + if err != nil { + return nil, err + } + + getCounter, err := meter.Int64Counter(metricsPrefix+"_get_counter", + metric.WithDescription("eds blockstore cache evicted event counter")) + if err != nil { + return nil, err + } + + cacheSize, err := meter.Int64ObservableGauge(metricsPrefix+"_size", + metric.WithDescription("total amount of items in blockstore cache"), + ) + if err != nil { + return nil, err + } + + callback := func(ctx context.Context, observer metric.Observer) error { + observer.ObserveInt64(cacheSize, int64(bc.cache.Len())) + return nil + } + _, err = meter.RegisterCallback(callback, cacheSize) + + return &metrics{ + getCounter: getCounter, + evictedCounter: evictedCounter, + }, err +} + +func (m *metrics) observeEvicted(failed bool) { + if m == nil { + return + } + m.evictedCounter.Add(context.Background(), 1, + metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeGet(found bool) { + if m == nil { + return + } + m.getCounter.Add(context.Background(), 1, metric.WithAttributes( + attribute.Bool(cacheFoundKey, found))) +} diff --git a/share/store/cache/noop.go b/share/store/cache/noop.go new file mode 100644 index 0000000000..1977e98dc0 --- /dev/null +++ b/share/store/cache/noop.go @@ -0,0 +1,71 @@ +package cache + +import ( + "context" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/store/file" + "github.com/celestiaorg/rsmt2d" + "io" +) + +var _ Cache = (*NoopCache)(nil) + +// NoopCache implements noop version of Cache interface +type NoopCache struct{} + +func (n NoopCache) Get(Key) (file.EdsFile, error) { + return nil, errCacheMiss +} + +func (n NoopCache) GetOrLoad(context.Context, Key, OpenFileFn) (file.EdsFile, error) { + return NoopFile{}, nil +} + +func (n NoopCache) Remove(Key) error { + return nil +} + +func (n NoopCache) EnableMetrics() error { + return nil +} + +var _ file.EdsFile = (*NoopFile)(nil) + +// NoopFile implements noop version of file.EdsFile interface +type NoopFile struct{} + +func (n NoopFile) Close() error { + return nil +} + +func (n NoopFile) Reader() (io.Reader, error) { + return nil, nil +} + +func (n NoopFile) Size() int { + return 0 +} + +func (n NoopFile) Height() uint64 { + return 0 +} + +func (n NoopFile) DataHash() share.DataHash { + return nil +} + +func (n NoopFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { + return nil, nil +} + +func (n NoopFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + return nil, nil +} + +func (n NoopFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { + return share.NamespacedRow{}, nil +} + +func (n NoopFile) EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) { + return nil, nil +} From 41903628e93afa8075b628378cb9c41b432dd317 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 15 Jan 2024 01:23:37 +0700 Subject: [PATCH 052/132] add shwamp multiplexer --- share/shwap/data_id.go | 28 +++++++++++++++++++++++++ share/shwap/handler.go | 45 ++++++++++++++++++++++++++++++++++++++++ share/shwap/row_id.go | 30 +++++++++++++++++++++++++++ share/shwap/sample_id.go | 30 +++++++++++++++++++++++++++ 4 files changed, 133 insertions(+) create mode 100644 share/shwap/handler.go diff --git a/share/shwap/data_id.go b/share/shwap/data_id.go index 9bc329ef58..1bbacbd31b 100644 --- a/share/shwap/data_id.go +++ b/share/shwap/data_id.go @@ -1,7 +1,10 @@ package shwap import ( + "context" "fmt" + "github.com/celestiaorg/celestia-node/share/store/file" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" @@ -14,6 +17,7 @@ const DataIDSize = RowIDSize + share.NamespaceSize // DataID is an unique identifier of a namespaced Data inside EDS Row. type DataID struct { + // TODO(@walldiss): why embed instead of just having a field? RowID // DataNamespace is the namespace of the data @@ -113,3 +117,27 @@ func (s DataID) Verify(root *share.Root) error { return nil } + +func (s DataID) GetHeight() uint64 { + return s.RowID.GetHeight() +} + +func (s DataID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block, error) { + data, err := f.Data(ctx, s.Namespace(), int(s.RowIndex)) + if err != nil { + return nil, fmt.Errorf("while getting Data: %w", err) + } + + d := NewData(s, data.Shares, *data.Proof) + blk, err := d.IPLDBlock() + if err != nil { + return nil, fmt.Errorf("while coverting Data to IPLD block: %w", err) + } + + err = f.Close() + if err != nil { + return nil, fmt.Errorf("while closing ODS file: %w", err) + } + + return blk, nil +} diff --git a/share/shwap/handler.go b/share/shwap/handler.go new file mode 100644 index 0000000000..a296f4d2d1 --- /dev/null +++ b/share/shwap/handler.go @@ -0,0 +1,45 @@ +package shwap + +import ( + "context" + "fmt" + "github.com/celestiaorg/celestia-node/share/store/file" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +// BlockBuilder is an interface for building blocks from files. +type BlockBuilder interface { + // TODO(@walldiss): don't like this name, but it collides with field name in RowID + GetHeight() uint64 + BlockFromFile(ctx context.Context, file file.EdsFile) (blocks.Block, error) +} + +// BlockBuilderFromCID returns a BlockBuilder from a CID. it acts as multiplexer for +// different block types. +func BlockBuilderFromCID(cid cid.Cid) (BlockBuilder, error) { + switch cid.Type() { + case sampleCodec: + h, err := SampleIDFromCID(cid) + if err != nil { + return nil, fmt.Errorf("while converting CID to SampleID: %w", err) + } + + return h, nil + case rowCodec: + var err error + rid, err := RowIDFromCID(cid) + if err != nil { + return nil, fmt.Errorf("while converting CID to RowID: %w", err) + } + return rid, nil + case dataCodec: + did, err := DataIDFromCID(cid) + if err != nil { + return nil, fmt.Errorf("while converting CID to DataID: %w", err) + } + return did, nil + default: + return nil, fmt.Errorf("unsupported codec") + } +} diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index a77c8bf36d..792e075b5b 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -1,8 +1,12 @@ package shwap import ( + "context" "encoding/binary" "fmt" + "github.com/celestiaorg/celestia-node/share/store/file" + "github.com/celestiaorg/rsmt2d" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" @@ -10,6 +14,8 @@ import ( "github.com/celestiaorg/celestia-node/share" ) +//TODO(@walldiss): maybe move into separate subpkg? + // TODO: // * Remove RowHash // * Change validation @@ -114,3 +120,27 @@ func (rid RowID) Verify(root *share.Root) error { return nil } + +func (rid RowID) GetHeight() uint64 { + return rid.Height +} + +func (rid RowID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block, error) { + axisHalf, err := f.AxisHalf(ctx, rsmt2d.Row, int(rid.RowIndex)) + if err != nil { + return nil, fmt.Errorf("while getting AxisHalf: %w", err) + } + + s := NewRow(rid, axisHalf) + blk, err := s.IPLDBlock() + if err != nil { + return nil, fmt.Errorf("while coverting to IPLD block: %w", err) + } + + err = f.Close() + if err != nil { + return nil, fmt.Errorf("while closing EDS file: %w", err) + } + + return blk, nil +} diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index 5561335cff..83056c66eb 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -1,8 +1,11 @@ package shwap import ( + "context" "encoding/binary" "fmt" + "github.com/celestiaorg/celestia-node/share/store/file" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" @@ -10,11 +13,14 @@ import ( "github.com/celestiaorg/celestia-node/share" ) +//TODO(@walldiss): maybe move into separate subpkg? + // SampleIDSize is the size of the SampleID in bytes const SampleIDSize = RowIDSize + 2 // SampleID is an unique identifier of a Sample. type SampleID struct { + // TODO(@walldiss): why embed instead of just having a field? RowID // ShareIndex is the index of the sampled share in the Row @@ -103,3 +109,27 @@ func (sid SampleID) Verify(root *share.Root) error { return sid.RowID.Verify(root) } + +func (sid SampleID) GetHeight() uint64 { + return sid.RowID.Height +} + +func (sid SampleID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block, error) { + shr, err := f.Share(ctx, int(sid.RowID.RowIndex), int(sid.ShareIndex)) + if err != nil { + return nil, fmt.Errorf("while getting share with proof: %w", err) + } + + s := NewSample(sid, shr.Share, *shr.Proof, shr.Axis) + blk, err := s.IPLDBlock() + if err != nil { + return nil, fmt.Errorf("while coverting to IPLD block: %w", err) + } + + err = f.Close() + if err != nil { + return nil, fmt.Errorf("while closing ODS file: %w", err) + } + + return blk, nil +} From 5c6aa8d6ab73c83baecca9e792e7e7fff2f8281a Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 15 Jan 2024 01:24:24 +0700 Subject: [PATCH 053/132] add blockstore --- share/shwap/blockstore.go | 236 ---------------------- share/store/blockstore.go | 152 ++++++++++++++ share/{shwap => store}/blockstore_test.go | 11 +- 3 files changed, 158 insertions(+), 241 deletions(-) delete mode 100644 share/shwap/blockstore.go create mode 100644 share/store/blockstore.go rename share/{shwap => store}/blockstore_test.go (79%) diff --git a/share/shwap/blockstore.go b/share/shwap/blockstore.go deleted file mode 100644 index 80c0c2087b..0000000000 --- a/share/shwap/blockstore.go +++ /dev/null @@ -1,236 +0,0 @@ -package shwap - -import ( - "context" - "fmt" - - "github.com/ipfs/boxo/blockstore" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share/eds" -) - -// fileStore is a mocking friendly local interface over eds.FileStore -// TODO(@Wondertan): Consider making an actual interface of eds pkg -type fileStore[F eds.File] interface { - File(height uint64) (F, error) -} - -type Blockstore[F eds.File] struct { - fs fileStore[F] -} - -func NewBlockstore[F eds.File](fs fileStore[F]) blockstore.Blockstore { - return &Blockstore[F]{fs} -} - -func (b Blockstore[F]) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { - switch cid.Type() { - case sampleCodec: - id, err := SampleIDFromCID(cid) - if err != nil { - err = fmt.Errorf("while converting CID to SampleId: %w", err) - log.Error(err) - return nil, err - } - - blk, err := b.getSampleBlock(id) - if err != nil { - log.Error(err) - return nil, err - } - - return blk, nil - case rowCodec: - id, err := RowIDFromCID(cid) - if err != nil { - err = fmt.Errorf("while converting CID to RowID: %w", err) - log.Error(err) - return nil, err - } - - blk, err := b.getRowBlock(id) - if err != nil { - log.Error(err) - return nil, err - } - - return blk, nil - case dataCodec: - id, err := DataIDFromCID(cid) - if err != nil { - err = fmt.Errorf("while converting CID to DataID: %w", err) - log.Error(err) - return nil, err - } - - blk, err := b.getDataBlock(id) - if err != nil { - log.Error(err) - return nil, err - } - - return blk, nil - default: - return nil, fmt.Errorf("unsupported codec") - } -} - -func (b Blockstore[F]) getSampleBlock(id SampleID) (blocks.Block, error) { - f, err := b.fs.File(id.Height) - if err != nil { - return nil, fmt.Errorf("while getting ODS file from FS: %w", err) - } - - shr, prf, proofType, err := f.ShareWithProof(int(id.RowIndex), int(id.ShareIndex)) - if err != nil { - return nil, fmt.Errorf("while getting share with proof: %w", err) - } - - s := NewSample(id, shr, prf, proofType) - blk, err := s.IPLDBlock() - if err != nil { - return nil, fmt.Errorf("while coverting to IPLD block: %w", err) - } - - err = f.Close() - if err != nil { - return nil, fmt.Errorf("while closing ODS file: %w", err) - } - - return blk, nil -} - -func (b Blockstore[F]) getRowBlock(id RowID) (blocks.Block, error) { - f, err := b.fs.File(id.Height) - if err != nil { - return nil, fmt.Errorf("while getting EDS file from FS: %w", err) - } - - axisHalf, err := f.AxisHalf(rsmt2d.Row, int(id.RowIndex)) - if err != nil { - return nil, fmt.Errorf("while getting AxisHalf: %w", err) - } - - s := NewRow(id, axisHalf) - blk, err := s.IPLDBlock() - if err != nil { - return nil, fmt.Errorf("while coverting to IPLD block: %w", err) - } - - err = f.Close() - if err != nil { - return nil, fmt.Errorf("while closing EDS file: %w", err) - } - - return blk, nil -} - -func (b Blockstore[F]) getDataBlock(id DataID) (blocks.Block, error) { - f, err := b.fs.File(id.Height) - if err != nil { - return nil, fmt.Errorf("while getting ODS file from FS: %w", err) - } - - data, prf, err := f.Data(id.Namespace(), int(id.RowIndex)) - if err != nil { - return nil, fmt.Errorf("while getting Data: %w", err) - } - - s := NewData(id, data, prf) - blk, err := s.IPLDBlock() - if err != nil { - return nil, fmt.Errorf("while coverting Data to IPLD block: %w", err) - } - - err = f.Close() - if err != nil { - return nil, fmt.Errorf("while closing ODS file: %w", err) - } - - return blk, nil -} - -func (b Blockstore[F]) GetSize(ctx context.Context, cid cid.Cid) (int, error) { - // TODO(@Wondertan): There must be a way to derive size without reading, proving, serializing and - // allocating Sample's block.Block. - // NOTE:Bitswap uses GetSize also to determine if we have content stored or not - // so simply returning constant size is not an option - blk, err := b.Get(ctx, cid) - if err != nil { - return 0, err - } - - return len(blk.RawData()), nil -} - -func (b Blockstore[F]) Has(_ context.Context, cid cid.Cid) (bool, error) { - var id RowID - switch cid.Type() { - case sampleCodec: - sid, err := SampleIDFromCID(cid) - if err != nil { - err = fmt.Errorf("while converting CID to SampleID: %w", err) - log.Error(err) - return false, err - } - - id = sid.RowID - case rowCodec: - var err error - id, err = RowIDFromCID(cid) - if err != nil { - err = fmt.Errorf("while converting CID to RowID: %w", err) - log.Error(err) - return false, err - } - case dataCodec: - did, err := DataIDFromCID(cid) - if err != nil { - err = fmt.Errorf("while converting CID to DataID: %w", err) - log.Error(err) - return false, err - } - - id = did.RowID - default: - return false, fmt.Errorf("unsupported codec") - } - - f, err := b.fs.File(id.Height) - if err != nil { - err = fmt.Errorf("while getting ODS file from FS: %w", err) - log.Error(err) - return false, err - } - - err = f.Close() - if err != nil { - err = fmt.Errorf("while closing ODS file: %w", err) - log.Error(err) - return false, err - } - // existence of the file confirms existence of the share - return true, nil -} - -func (b Blockstore[F]) AllKeysChan(context.Context) (<-chan cid.Cid, error) { - return nil, fmt.Errorf("AllKeysChan is unsupported") -} - -func (b Blockstore[F]) DeleteBlock(context.Context, cid.Cid) error { - return fmt.Errorf("writes are not supported") -} - -func (b Blockstore[F]) Put(context.Context, blocks.Block) error { - return fmt.Errorf("writes are not supported") -} - -func (b Blockstore[F]) PutMany(context.Context, []blocks.Block) error { - return fmt.Errorf("writes are not supported") -} - -func (b Blockstore[F]) HashOnRead(bool) {} diff --git a/share/store/blockstore.go b/share/store/blockstore.go new file mode 100644 index 0000000000..48aa0cb15a --- /dev/null +++ b/share/store/blockstore.go @@ -0,0 +1,152 @@ +package store + +import ( + "context" + "errors" + "fmt" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/store/cache" + + bstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/datastore/dshelp" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + ipld "github.com/ipfs/go-ipld-format" +) + +var _ bstore.Blockstore = (*Blockstore)(nil) + +var ( + blockstoreCacheKey = datastore.NewKey("bs-cache") + errUnsupportedOperation = errors.New("unsupported operation") +) + +// Blockstore implements the bstore.Blockstore interface on an EDSStore. +// It is used to provide a custom blockstore interface implementation to achieve access to the +// underlying EDSStore. The main use-case is randomized sampling over the whole chain of EDS block +// data and getting data by namespace. +type Blockstore struct { + store *Store + ds datastore.Batching +} + +func NewBlockstore(store *Store, ds datastore.Batching) *Blockstore { + return &Blockstore{ + store: store, + ds: namespace.Wrap(ds, blockstoreCacheKey), + } +} + +func (bs *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + h, err := shwap.BlockBuilderFromCID(cid) + if err != nil { + return false, fmt.Errorf("while getting height from CID: %w", err) + } + + // check cache first + height := h.GetHeight() + _, err = bs.store.cache.Get(cache.Key{Height: height}) + if err == nil { + return true, nil + } + + _, err = bs.store.GetByHeight(ctx, height) + if err == nil { + return true, nil + } + if !errors.Is(err, ErrNotFound) { + return false, fmt.Errorf("failed to get file: %w", err) + } + + // key wasn't found in top level blockstore, but could be in datastore while being reconstructed + dsHas, dsErr := bs.ds.Has(ctx, dshelp.MultihashToDsKey(cid.Hash())) + if dsErr != nil { + return false, nil + } + return dsHas, nil +} + +func (bs *Blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + h, err := shwap.BlockBuilderFromCID(cid) + if err != nil { + return nil, fmt.Errorf("while getting height from CID: %w", err) + } + + height := h.GetHeight() + f, err := bs.store.cache.Second().GetOrLoad(ctx, cache.Key{Height: height}, bs.store.openFileByHeight(height)) + if err == nil { + return h.BlockFromFile(ctx, f) + } + + if errors.Is(err, ErrNotFound) { + k := dshelp.MultihashToDsKey(cid.Hash()) + blockData, err := bs.ds.Get(ctx, k) + if err == nil { + return blocks.NewBlockWithCid(blockData, cid) + } + // nmt's GetNode expects an ipld.ErrNotFound when a cid is not found. + return nil, ipld.ErrNotFound{Cid: cid} + } + + log.Debugf("failed to get blockstore for cid %s: %s", cid, err) + return nil, err +} + +func (bs *Blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + // TODO(@Wondertan): There must be a way to derive size without reading, proving, serializing and + // allocating Sample's block.Block. + // NOTE:Bitswap uses GetSize also to determine if we have content stored or not + // so simply returning constant size is not an option + blk, err := bs.Get(ctx, cid) + if err != nil { + return 0, err + } + + return len(blk.RawData()), nil +} + +func (bs *Blockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { + k := dshelp.MultihashToDsKey(cid.Hash()) + return bs.ds.Delete(ctx, k) +} + +func (bs *Blockstore) Put(ctx context.Context, blk blocks.Block) error { + k := dshelp.MultihashToDsKey(blk.Cid().Hash()) + // note: we leave duplicate resolution to the underlying datastore + return bs.ds.Put(ctx, k, blk.RawData()) +} + +func (bs *Blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { + if len(blocks) == 1 { + // performance fast-path + return bs.Put(ctx, blocks[0]) + } + + t, err := bs.ds.Batch(ctx) + if err != nil { + return err + } + for _, b := range blocks { + k := dshelp.MultihashToDsKey(b.Cid().Hash()) + err = t.Put(ctx, k, b.RawData()) + if err != nil { + return err + } + } + return t.Commit(ctx) +} + +// AllKeysChan is a noop on the EDS blockstore because the keys are not stored in a single CAR file. +func (bs *Blockstore) AllKeysChan(context.Context) (<-chan cid.Cid, error) { + err := fmt.Errorf("AllKeysChan is: %w", errUnsupportedOperation) + log.Warn(err) + return nil, err +} + +// HashOnRead is a noop on the EDS blockstore but an error cannot be returned due to the method +// signature from the blockstore interface. +func (bs *Blockstore) HashOnRead(bool) { + log.Warn("HashOnRead is a noop on the EDS blockstore") +} diff --git a/share/shwap/blockstore_test.go b/share/store/blockstore_test.go similarity index 79% rename from share/shwap/blockstore_test.go rename to share/store/blockstore_test.go index d3da72e808..da618212da 100644 --- a/share/shwap/blockstore_test.go +++ b/share/store/blockstore_test.go @@ -1,10 +1,11 @@ -package shwap +package store import ( "context" + "github.com/celestiaorg/celestia-node/share/shwap" "testing" - "github.com/ipfs/boxo/blockstore" + boxobs "github.com/ipfs/boxo/blockstore" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,13 +28,13 @@ func TestBlockstoreGetShareSample(t *testing.T) { width := int(sqr.Width()) for i := 0; i < width*width; i++ { - id, err := NewSampleID(1, i, root) + id, err := shwap.NewSampleID(1, i, root) require.NoError(t, err) blk, err := b.Get(ctx, id.Cid()) require.NoError(t, err) - sample, err := SampleFromBlock(blk) + sample, err := shwap.SampleFromBlock(blk) require.NoError(t, err) err = sample.Verify(root) @@ -48,6 +49,6 @@ func (m *edsFileAndFS) File(uint64) (*eds.MemFile, error) { return (*eds.MemFile)(m), nil } -func edsBlockstore(sqr *rsmt2d.ExtendedDataSquare) blockstore.Blockstore { +func edsBlockstore(sqr *rsmt2d.ExtendedDataSquare) boxobs.Blockstore { return NewBlockstore[*eds.MemFile]((*edsFileAndFS)(&eds.MemFile{Eds: sqr})) } From e332f0ab014a11f349aff1ac226109b1128abc99 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 15 Jan 2024 01:26:13 +0700 Subject: [PATCH 054/132] add support for store in shrex --- libs/utils/logcloser.go | 12 ++++++++++++ share/eds/file.go | 3 +-- share/p2p/shrexeds/client.go | 4 ++-- share/p2p/shrexeds/server.go | 31 +++++++++++++++---------------- share/p2p/shrexnd/client.go | 3 +++ share/p2p/shrexnd/pb/share.pb.go | 3 +++ share/p2p/shrexnd/pb/share.proto | 4 +++- share/p2p/shrexnd/server.go | 28 +++++++++++++++++++--------- share/utils.go | 14 ++++++++++++++ 9 files changed, 72 insertions(+), 30 deletions(-) create mode 100644 libs/utils/logcloser.go create mode 100644 share/utils.go diff --git a/libs/utils/logcloser.go b/libs/utils/logcloser.go new file mode 100644 index 0000000000..73b3820a08 --- /dev/null +++ b/libs/utils/logcloser.go @@ -0,0 +1,12 @@ +package utils + +import ( + "github.com/ipfs/go-log/v2" + "io" +) + +func CloseAndLog(log log.StandardLogger, name string, closer io.Closer) { + if err := closer.Close(); err != nil { + log.Warnf("closing %s: %s", name, err) + } +} diff --git a/share/eds/file.go b/share/eds/file.go index e3870db9bc..b38b0c6e05 100644 --- a/share/eds/file.go +++ b/share/eds/file.go @@ -276,8 +276,7 @@ func NDFromShares(shrs []share.Share, namespace share.Namespace, axisIdx int) ([ return nil, nmt.Proof{}, err } - cid := ipld.MustCidFromNamespacedSha256(root) - row, proof, err := ipld.GetSharesByNamespace(context.TODO(), bserv, cid, namespace, len(shrs)) + row, proof, err := ipld.GetSharesByNamespace(context.TODO(), bserv, root, namespace, len(shrs)) if err != nil { return nil, nmt.Proof{}, err } diff --git a/share/p2p/shrexeds/client.go b/share/p2p/shrexeds/client.go index 7602bb5fb0..956dc1243d 100644 --- a/share/p2p/shrexeds/client.go +++ b/share/p2p/shrexeds/client.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/celestiaorg/celestia-node/share/store/file" "io" "net" "time" @@ -17,7 +18,6 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/p2p" pb "github.com/celestiaorg/celestia-node/share/p2p/shrexeds/pb" ) @@ -129,7 +129,7 @@ func (c *Client) doRequest( // reset stream deadlines to original values, since read deadline was changed during status read c.setStreamDeadlines(ctx, stream) // use header and ODS bytes to construct EDS and verify it against dataHash - eds, err := eds.ReadEDS(ctx, stream, dataHash) + eds, err := file.ReadEds(ctx, stream, dataHash) if err != nil { return nil, fmt.Errorf("failed to read eds from ods bytes: %w", err) } diff --git a/share/p2p/shrexeds/server.go b/share/p2p/shrexeds/server.go index 11b99a3438..a6e1e002d8 100644 --- a/share/p2p/shrexeds/server.go +++ b/share/p2p/shrexeds/server.go @@ -4,6 +4,9 @@ import ( "context" "errors" "fmt" + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share/store" + "github.com/celestiaorg/celestia-node/share/store/file" "io" "time" @@ -28,7 +31,7 @@ type Server struct { host host.Host protocolID protocol.ID - store *eds.Store + store *store.Store params *Parameters middleware *p2p.Middleware @@ -36,7 +39,7 @@ type Server struct { } // NewServer creates a new ShrEx/EDS server. -func NewServer(params *Parameters, host host.Host, store *eds.Store) (*Server, error) { +func NewServer(params *Parameters, host host.Host, store *store.Store) (*Server, error) { if err := params.Validate(); err != nil { return nil, fmt.Errorf("shrex-eds: server creation failed: %w", err) } @@ -99,15 +102,11 @@ func (s *Server) handleStream(stream network.Stream) { // determine whether the EDS is available in our store // we do not close the reader, so that other requests will not need to re-open the file. // closing is handled by the LRU cache. - edsReader, err := s.store.GetCAR(ctx, hash) + file, err := s.store.GetByHash(ctx, hash) var status p2p_pb.Status switch { case err == nil: - defer func() { - if err := edsReader.Close(); err != nil { - log.Warnw("closing car reader", "err", err) - } - }() + defer utils.CloseAndLog(logger, "file", file) status = p2p_pb.Status_OK case errors.Is(err, eds.ErrNotFound): logger.Warnw("server: request hash not found") @@ -135,7 +134,7 @@ func (s *Server) handleStream(stream network.Stream) { } // start streaming the ODS to the client - err = s.writeODS(logger, edsReader, stream) + err = s.writeODS(logger, file, stream) if err != nil { logger.Warnw("server: writing ods to stream", "err", err) stream.Reset() //nolint:errcheck @@ -179,18 +178,18 @@ func (s *Server) writeStatus(logger *zap.SugaredLogger, status p2p_pb.Status, st return err } -func (s *Server) writeODS(logger *zap.SugaredLogger, edsReader io.Reader, stream network.Stream) error { - err := stream.SetWriteDeadline(time.Now().Add(s.params.ServerWriteTimeout)) +func (s *Server) writeODS(logger *zap.SugaredLogger, file file.EdsFile, stream network.Stream) error { + reader, err := file.Reader() if err != nil { - logger.Debugw("server: set read deadline", "err", err) + return fmt.Errorf("getting ODS reader: %w", err) } - - odsReader, err := eds.ODSReader(edsReader) + err = stream.SetWriteDeadline(time.Now().Add(s.params.ServerWriteTimeout)) if err != nil { - return fmt.Errorf("creating ODS reader: %w", err) + logger.Debugw("server: set read deadline", "err", err) } + buf := make([]byte, s.params.BufferSize) - _, err = io.CopyBuffer(stream, odsReader, buf) + _, err = io.CopyBuffer(stream, reader, buf) if err != nil { return fmt.Errorf("writing ODS bytes: %w", err) } diff --git a/share/p2p/shrexnd/client.go b/share/p2p/shrexnd/client.go index 86c5150095..3ae3cc0134 100644 --- a/share/p2p/shrexnd/client.go +++ b/share/p2p/shrexnd/client.go @@ -93,9 +93,12 @@ func (c *Client) doRequest( c.setStreamDeadlines(ctx, stream) + from, to := share.RowRangeForNamespace(root, namespace) req := &pb.GetSharesByNamespaceRequest{ RootHash: root.Hash(), Namespace: namespace, + FromRow: uint32(from), + ToRow: uint32(to), } _, err = serde.Write(stream, req) diff --git a/share/p2p/shrexnd/pb/share.pb.go b/share/p2p/shrexnd/pb/share.pb.go index 7e3c11416f..80c12c6465 100644 --- a/share/p2p/shrexnd/pb/share.pb.go +++ b/share/p2p/shrexnd/pb/share.pb.go @@ -54,9 +54,12 @@ func (StatusCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor_ed9f13149b0de397, []int{0} } +// FIXME(@walldiss): Needs to be regenerated type GetSharesByNamespaceRequest struct { RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` Namespace []byte `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + FromRow uint32 + ToRow uint32 } func (m *GetSharesByNamespaceRequest) Reset() { *m = GetSharesByNamespaceRequest{} } diff --git a/share/p2p/shrexnd/pb/share.proto b/share/p2p/shrexnd/pb/share.proto index a5bdbfa071..026965f6c3 100644 --- a/share/p2p/shrexnd/pb/share.proto +++ b/share/p2p/shrexnd/pb/share.proto @@ -1,11 +1,13 @@ syntax = "proto3"; package share.p2p.shrex.nd; -import "pb/proof.proto"; +import "pb/proof"; message GetSharesByNamespaceRequest{ bytes root_hash = 1; bytes namespace = 2; + uint32 fromRow = 3; + uint32 toRow = 4; } message GetSharesByNamespaceStatusResponse{ diff --git a/share/p2p/shrexnd/server.go b/share/p2p/shrexnd/server.go index 33e61ff472..accdc25f56 100644 --- a/share/p2p/shrexnd/server.go +++ b/share/p2p/shrexnd/server.go @@ -5,6 +5,8 @@ import ( "crypto/sha256" "errors" "fmt" + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share/store" "time" "github.com/libp2p/go-libp2p/core/host" @@ -30,7 +32,7 @@ type Server struct { protocolID protocol.ID handler network.StreamHandler - store *eds.Store + store *store.Store params *Parameters middleware *p2p.Middleware @@ -38,7 +40,7 @@ type Server struct { } // NewServer creates new Server -func NewServer(params *Parameters, host host.Host, store *eds.Store) (*Server, error) { +func NewServer(params *Parameters, host host.Host, store *store.Store) (*Server, error) { if err := params.Validate(); err != nil { return nil, fmt.Errorf("shrex-nd: server creation failed: %w", err) } @@ -114,7 +116,7 @@ func (srv *Server) handleNamespacedData(ctx context.Context, stream network.Stre ctx, cancel := context.WithTimeout(ctx, srv.params.HandleRequestTimeout) defer cancel() - shares, status, err := srv.getNamespaceData(ctx, req.RootHash, req.Namespace) + shares, status, err := srv.getNamespaceData(ctx, req.RootHash, req.Namespace, int(req.FromRow), int(req.ToRow)) if err != nil { // server should respond with status regardless if there was an error getting data sendErr := srv.respondStatus(ctx, logger, stream, status) @@ -172,21 +174,29 @@ func (srv *Server) readRequest( } func (srv *Server) getNamespaceData(ctx context.Context, - hash share.DataHash, namespace share.Namespace) (share.NamespacedShares, pb.StatusCode, error) { - dah, err := srv.store.GetDAH(ctx, hash) + hash share.DataHash, + namespace share.Namespace, + fromRow, toRow int, +) (share.NamespacedShares, pb.StatusCode, error) { + file, err := srv.store.GetByHash(ctx, hash) if err != nil { if errors.Is(err, eds.ErrNotFound) { return nil, pb.StatusCode_NOT_FOUND, nil } return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving DAH: %w", err) } + defer utils.CloseAndLog(log, "file", file) - shares, err := eds.RetrieveNamespaceFromStore(ctx, srv.store, dah, namespace) - if err != nil { - return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving shares: %w", err) + namespacedRows := make(share.NamespacedShares, 0, toRow-fromRow+1) + for rowIdx := fromRow; rowIdx <= toRow; rowIdx++ { + data, err := file.Data(ctx, namespace, rowIdx) + if err != nil { + return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving data: %w", err) + } + namespacedRows = append(namespacedRows, data) } - return shares, pb.StatusCode_OK, nil + return namespacedRows, pb.StatusCode_OK, nil } func (srv *Server) respondStatus( diff --git a/share/utils.go b/share/utils.go new file mode 100644 index 0000000000..9fe87d4110 --- /dev/null +++ b/share/utils.go @@ -0,0 +1,14 @@ +package share + +// TODO(@walldiss): refactor this into proper package once we have a better idea of what it should look like +func RowRangeForNamespace(root *Root, namespace Namespace) (from, to int) { + for i, row := range root.RowRoots { + if !namespace.IsOutsideRange(row, row) { + if from == 0 { + from = i + } + to = i + } + } + return from, to +} From 4ab0fbf0fbf9e0a39734431ca821e468b2a87806 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 15 Jan 2024 01:26:29 +0700 Subject: [PATCH 055/132] add support for store in store getter --- share/getters/store.go | 60 +++++++++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 21 deletions(-) diff --git a/share/getters/store.go b/share/getters/store.go index d66a057c56..e5543c42b4 100644 --- a/share/getters/store.go +++ b/share/getters/store.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/celestiaorg/celestia-node/share/store" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -14,7 +15,6 @@ import ( "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/ipld" ) var _ share.Getter = (*StoreGetter)(nil) @@ -22,11 +22,11 @@ var _ share.Getter = (*StoreGetter)(nil) // StoreGetter is a share.Getter that retrieves shares from an eds.Store. No results are saved to // the eds.Store after retrieval. type StoreGetter struct { - store *eds.Store + store *store.Store } // NewStoreGetter creates a new share.Getter that retrieves shares from an eds.Store. -func NewStoreGetter(store *eds.Store) *StoreGetter { +func NewStoreGetter(store *store.Store) *StoreGetter { return &StoreGetter{ store: store, } @@ -51,25 +51,19 @@ func (sg *StoreGetter) GetShare(ctx context.Context, header *header.ExtendedHead span.RecordError(err) return nil, err } - root, leaf := ipld.Translate(dah, row, col) - bs, err := sg.store.CARBlockstore(ctx, dah.Hash()) + + file, err := sg.store.GetByHash(ctx, dah.Hash()) if errors.Is(err, eds.ErrNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound } if err != nil { - return nil, fmt.Errorf("getter/store: failed to retrieve blockstore: %w", err) + return nil, fmt.Errorf("getter/store: failed to retrieve file: %w", err) } - defer func() { - if err := bs.Close(); err != nil { - log.Warnw("closing blockstore", "err", err) - } - }() + defer utils.CloseAndLog(log, "file", file) - // wrap the read-only CAR blockstore in a getter - blockGetter := eds.NewBlockGetter(bs) - s, err := ipld.GetShare(ctx, blockGetter, root, leaf, len(dah.RowRoots)) - if errors.Is(err, ipld.ErrNodeNotFound) { + sh, err := file.Share(ctx, col, row) + if errors.Is(err, store.ErrNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound } @@ -77,7 +71,7 @@ func (sg *StoreGetter) GetShare(ctx context.Context, header *header.ExtendedHead return nil, fmt.Errorf("getter/store: failed to retrieve share: %w", err) } - return s, nil + return sh.Share, nil } // GetEDS gets the EDS identified by the given root from the EDS store. @@ -89,15 +83,21 @@ func (sg *StoreGetter) GetEDS( utils.SetStatusAndEnd(span, err) }() - data, err = sg.store.Get(ctx, header.DAH.Hash()) + file, err := sg.store.GetByHash(ctx, header.DAH.Hash()) if errors.Is(err, eds.ErrNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound } + if err != nil { + return nil, fmt.Errorf("getter/store: failed to retrieve file: %w", err) + } + defer utils.CloseAndLog(log, "file", file) + + eds, err := file.EDS(ctx) if err != nil { return nil, fmt.Errorf("getter/store: failed to retrieve eds: %w", err) } - return data, nil + return eds, nil } // GetSharesByNamespace gets all EDS shares in the given namespace from the EDS store through the @@ -114,9 +114,27 @@ func (sg *StoreGetter) GetSharesByNamespace( utils.SetStatusAndEnd(span, err) }() - ns, err := eds.RetrieveNamespaceFromStore(ctx, sg.store, header.DAH, namespace) + file, err := sg.store.GetByHash(ctx, header.DAH.Hash()) + if errors.Is(err, eds.ErrNotFound) { + // convert error to satisfy getter interface contract + err = share.ErrNotFound + } if err != nil { - return nil, fmt.Errorf("getter/store: %w", err) + return nil, fmt.Errorf("getter/store: failed to retrieve file: %w", err) + } + defer utils.CloseAndLog(log, "file", file) + + // get all shares in the namespace + from, to := share.RowRangeForNamespace(header.DAH, namespace) + + shares = make(share.NamespacedShares, 0, to-from+1) + for row := from; row <= to; row++ { + data, err := file.Data(ctx, namespace, row) + if err != nil { + return nil, fmt.Errorf("getter/store: failed to retrieve namespcaed data: %w", err) + } + shares = append(shares, data) } - return ns, nil + + return shares, nil } From 2d5299a4ace1d9f12a6ce27f4a6183c925202e0e Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 15 Jan 2024 01:26:45 +0700 Subject: [PATCH 056/132] add support for store in availability --- share/availability/full/availability.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go index ff26404d45..bea2d61236 100644 --- a/share/availability/full/availability.go +++ b/share/availability/full/availability.go @@ -4,13 +4,13 @@ import ( "context" "errors" "fmt" + "github.com/celestiaorg/celestia-node/share/store" "github.com/filecoin-project/dagstore" logging "github.com/ipfs/go-log/v2" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/byzantine" "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/p2p/discovery" @@ -22,7 +22,7 @@ var log = logging.Logger("share/full") // recovery technique. It is considered "full" because it is required // to download enough shares to fully reconstruct the data square. type ShareAvailability struct { - store *eds.Store + store *store.Store getter share.Getter disc *discovery.Discovery @@ -31,7 +31,7 @@ type ShareAvailability struct { // NewShareAvailability creates a new full ShareAvailability. func NewShareAvailability( - store *eds.Store, + store *store.Store, getter share.Getter, disc *discovery.Discovery, ) *ShareAvailability { @@ -73,7 +73,7 @@ func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header } // a hack to avoid loading the whole EDS in mem if we store it already. - if ok, _ := fa.store.Has(ctx, dah.Hash()); ok { + if ok, _ := fa.store.HasByHash(ctx, dah.Hash()); ok { return nil } @@ -94,7 +94,7 @@ func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header return err } - err = fa.store.Put(ctx, dah.Hash(), eds) + _, err = fa.store.Put(ctx, dah.Hash(), header.Height(), eds) if err != nil && !errors.Is(err, dagstore.ErrShardExists) { return fmt.Errorf("full availability: failed to store eds: %w", err) } From 85ba38d09ad00b560849c489e2cb25712ff46dbc Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 15 Jan 2024 01:35:22 +0700 Subject: [PATCH 057/132] update mem file --- share/store/file/mem_file.go | 26 ++++++++++++++-- share/store/file/mem_file_test.go | 49 +++++++++++++++++-------------- 2 files changed, 51 insertions(+), 24 deletions(-) diff --git a/share/store/file/mem_file.go b/share/store/file/mem_file.go index ddf6e10610..1ce362049d 100644 --- a/share/store/file/mem_file.go +++ b/share/store/file/mem_file.go @@ -1,7 +1,10 @@ package file import ( + "bytes" "context" + "github.com/celestiaorg/celestia-app/pkg/da" + "io" "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" @@ -11,16 +14,35 @@ import ( "github.com/celestiaorg/celestia-node/share/ipld" ) -//var _ EdsFile = (*MemFile)(nil) +var _ EdsFile = (*MemFile)(nil) type MemFile struct { - Eds *rsmt2d.ExtendedDataSquare + height uint64 + Eds *rsmt2d.ExtendedDataSquare } func (f *MemFile) Close() error { return nil } +func (f *MemFile) Reader() (io.Reader, error) { + bs, err := f.Eds.MarshalJSON() + if err != nil { + return nil, err + } + + return bytes.NewReader(bs), nil +} + +func (f *MemFile) Height() uint64 { + return f.height +} + +func (f *MemFile) DataHash() share.DataHash { + dah, _ := da.NewDataAvailabilityHeader(f.Eds) + return dah.Hash() +} + func (f *MemFile) Size() int { return int(f.Eds.Width()) } diff --git a/share/store/file/mem_file_test.go b/share/store/file/mem_file_test.go index c2bb545142..5fb53ff41f 100644 --- a/share/store/file/mem_file_test.go +++ b/share/store/file/mem_file_test.go @@ -1,24 +1,29 @@ package file -//func TestMemFile(t *testing.T) { -// size := 8 -// newFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { -// return &MemFile{Eds: eds} -// } -// -// t.Run("Share", func(t *testing.T) { -// testFileShare(t, newFile, size) -// }) -// -// t.Run("AxisHalf", func(t *testing.T) { -// testFileAxisHalf(t, newFile, size) -// }) -// -// t.Run("Data", func(t *testing.T) { -// testFileData(t, newFile, size) -// }) -// -// t.Run("EDS", func(t *testing.T) { -// testFileEds(t, newFile, size) -// }) -//} +import ( + "github.com/celestiaorg/rsmt2d" + "testing" +) + +func TestMemFile(t *testing.T) { + size := 8 + newFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { + return &MemFile{Eds: eds} + } + + t.Run("Share", func(t *testing.T) { + testFileShare(t, newFile, size) + }) + + t.Run("AxisHalf", func(t *testing.T) { + testFileAxisHalf(t, newFile, size) + }) + + t.Run("Data", func(t *testing.T) { + testFileData(t, newFile, size) + }) + + t.Run("EDS", func(t *testing.T) { + testFileEds(t, newFile, size) + }) +} From 8f0315b4af28b72285a2f5ed40115e592b56a822 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 31 Jan 2024 22:26:27 +0500 Subject: [PATCH 058/132] lots of changes --- core/eds.go | 19 - core/exchange.go | 10 +- core/listener.go | 8 +- libs/utils/logcloser.go | 3 +- nodebuilder/core/module.go | 6 +- nodebuilder/node.go | 2 +- nodebuilder/p2p/bitswap.go | 8 +- nodebuilder/share/config.go | 6 +- nodebuilder/share/constructors.go | 29 +- nodebuilder/share/module.go | 26 +- nodebuilder/share/opts.go | 4 +- share/availability/full/availability.go | 2 +- share/availability/full/testing.go | 103 ++- share/availability/light/availability.go | 5 - share/availability/light/testing.go | 99 ++- share/eds/adapters.go | 66 -- share/eds/adapters_test.go | 148 ---- share/eds/blockstore.go | 168 ----- share/eds/blockstore_test.go | 81 --- share/eds/cache/accessor_cache.go | 262 ------- share/eds/cache/cache.go | 49 -- share/eds/cache/doublecache.go | 51 -- share/eds/cache/metrics.go | 69 -- share/eds/cache/noop.go | 50 -- share/eds/eds.go | 274 -------- share/eds/eds_test.go | 283 -------- share/eds/file.go | 284 -------- share/eds/file_features.go | 20 - share/eds/file_header.go | 74 -- share/eds/file_store.go | 12 - share/eds/file_test.go | 85 --- share/eds/inverted_index.go | 103 --- share/eds/inverted_index_test.go | 55 -- share/eds/metrics.go | 292 -------- share/eds/ods.go | 98 --- share/eds/ods_file.go | 84 --- share/eds/ods_test.go | 110 --- share/eds/store.go | 644 ------------------ share/eds/store_test.go | 539 --------------- share/eds/utils.go | 152 ----- share/getters/ipld.go | 165 ----- share/getters/shrex.go | 17 +- share/getters/shwap.go | 259 +++++++ share/getters/store.go | 9 +- share/ipld/blockserv.go | 2 +- share/ipld/corrupted_data_test.go | 69 +- share/ipld/get_shares_test.go | 10 +- share/ipld/namespace_data.go | 2 + share/p2p/shrexeds/client.go | 19 +- share/p2p/shrexeds/params.go | 2 +- .../shrexeds/pb/extended_data_square.pb.go | 70 +- .../shrexeds/pb/extended_data_square.proto | 2 +- share/p2p/shrexeds/server.go | 22 +- share/p2p/shrexnd/client.go | 6 +- share/p2p/shrexnd/params.go | 2 +- share/p2p/shrexnd/pb/share.pb.go | 163 +++-- share/p2p/shrexnd/pb/share.proto | 4 +- share/p2p/shrexnd/server.go | 28 +- share/shwap/data.go | 35 +- share/shwap/data_hasher_test.go | 3 +- share/shwap/data_id.go | 4 +- share/shwap/getter.go | 14 + share/shwap/getter_test.go | 38 +- share/shwap/handler.go | 4 +- share/shwap/row_id.go | 11 +- share/shwap/sample_id.go | 6 +- share/store/blockstore.go | 10 +- share/store/blockstore_test.go | 115 +++- share/store/cache/accessor_cache.go | 219 +++--- .../cache/accessor_cache_test.go | 26 +- share/store/cache/cache.go | 26 +- share/store/cache/doublecache.go | 5 +- share/store/cache/noop.go | 16 +- share/store/file/cache_file_test.go | 6 +- share/store/file/file_closer.go | 6 +- share/store/file/file_header.go | 3 +- share/store/file/mem_file.go | 6 +- share/store/file/mem_file_test.go | 3 +- share/store/file/mempool.go | 9 +- share/store/file/ods_file.go | 19 +- share/store/file/ods_file_test.go | 33 +- .../file/{in_mem_ods_file.go => square.go} | 116 ++-- share/store/metrics.go | 132 ++++ share/store/store.go | 213 ++++-- share/store/striplock.go | 3 +- 85 files changed, 1313 insertions(+), 5002 deletions(-) delete mode 100644 share/eds/adapters.go delete mode 100644 share/eds/adapters_test.go delete mode 100644 share/eds/blockstore.go delete mode 100644 share/eds/blockstore_test.go delete mode 100644 share/eds/cache/accessor_cache.go delete mode 100644 share/eds/cache/cache.go delete mode 100644 share/eds/cache/doublecache.go delete mode 100644 share/eds/cache/metrics.go delete mode 100644 share/eds/cache/noop.go delete mode 100644 share/eds/eds.go delete mode 100644 share/eds/eds_test.go delete mode 100644 share/eds/file.go delete mode 100644 share/eds/file_features.go delete mode 100644 share/eds/file_header.go delete mode 100644 share/eds/file_store.go delete mode 100644 share/eds/file_test.go delete mode 100644 share/eds/inverted_index.go delete mode 100644 share/eds/inverted_index_test.go delete mode 100644 share/eds/metrics.go delete mode 100644 share/eds/ods.go delete mode 100644 share/eds/ods_file.go delete mode 100644 share/eds/ods_test.go delete mode 100644 share/eds/store.go delete mode 100644 share/eds/store_test.go delete mode 100644 share/eds/utils.go delete mode 100644 share/getters/ipld.go create mode 100644 share/getters/shwap.go rename share/{eds => store}/cache/accessor_cache_test.go (93%) rename share/store/file/{in_mem_ods_file.go => square.go} (58%) create mode 100644 share/store/metrics.go diff --git a/core/eds.go b/core/eds.go index eb93c249ba..a6fb8cb883 100644 --- a/core/eds.go +++ b/core/eds.go @@ -1,11 +1,8 @@ package core import ( - "context" - "errors" "fmt" - "github.com/filecoin-project/dagstore" "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-app/app" @@ -15,9 +12,6 @@ import ( "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" ) // extendBlock extends the given block data, returning the resulting @@ -49,16 +43,3 @@ func extendShares(s [][]byte, options ...nmt.Option) (*rsmt2d.ExtendedDataSquare wrapper.NewConstructor(uint64(squareSize), options...)) } - -// storeEDS will only store extended block if it is not empty and doesn't already exist. -func storeEDS(ctx context.Context, hash share.DataHash, eds *rsmt2d.ExtendedDataSquare, store *eds.Store) error { - if eds == nil { - return nil - } - err := store.Put(ctx, hash, eds) - if errors.Is(err, dagstore.ErrShardExists) { - // block with given root already exists, return nil - return nil - } - return err -} diff --git a/core/exchange.go b/core/exchange.go index 2a7429ed60..a12ae6e13f 100644 --- a/core/exchange.go +++ b/core/exchange.go @@ -12,15 +12,15 @@ import ( "github.com/celestiaorg/nmt" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/store" ) const concurrencyLimit = 4 type Exchange struct { fetcher *BlockFetcher - store *eds.Store + store *store.Store construct header.ConstructFn metrics *exchangeMetrics @@ -28,7 +28,7 @@ type Exchange struct { func NewExchange( fetcher *BlockFetcher, - store *eds.Store, + store *store.Store, construct header.ConstructFn, opts ...Option, ) (*Exchange, error) { @@ -151,7 +151,7 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende } ctx = ipld.CtxWithProofsAdder(ctx, adder) - err = storeEDS(ctx, eh.DAH.Hash(), eds, ce.store) + _, err = ce.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) if err != nil { return nil, fmt.Errorf("storing EDS to eds.Store for height %d: %w", &block.Height, err) } @@ -191,7 +191,7 @@ func (ce *Exchange) getExtendedHeaderByHeight(ctx context.Context, height *int64 } ctx = ipld.CtxWithProofsAdder(ctx, adder) - err = storeEDS(ctx, eh.DAH.Hash(), eds, ce.store) + _, err = ce.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) if err != nil { return nil, fmt.Errorf("storing EDS to eds.Store for block height %d: %w", b.Header.Height, err) } diff --git a/core/listener.go b/core/listener.go index 93a28ac33d..fefea40ba9 100644 --- a/core/listener.go +++ b/core/listener.go @@ -15,9 +15,9 @@ import ( "github.com/celestiaorg/nmt" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/store" ) var ( @@ -36,7 +36,7 @@ type Listener struct { fetcher *BlockFetcher construct header.ConstructFn - store *eds.Store + store *store.Store headerBroadcaster libhead.Broadcaster[*header.ExtendedHeader] hashBroadcaster shrexsub.BroadcastFn @@ -53,7 +53,7 @@ func NewListener( fetcher *BlockFetcher, hashBroadcaster shrexsub.BroadcastFn, construct header.ConstructFn, - store *eds.Store, + store *store.Store, blocktime time.Duration, opts ...Option, ) (*Listener, error) { @@ -209,7 +209,7 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS // attempt to store block data if not empty ctx = ipld.CtxWithProofsAdder(ctx, adder) - err = storeEDS(ctx, b.Header.DataHash.Bytes(), eds, cl.store) + _, err = cl.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) if err != nil { return fmt.Errorf("storing EDS: %w", err) } diff --git a/libs/utils/logcloser.go b/libs/utils/logcloser.go index 73b3820a08..9027a50059 100644 --- a/libs/utils/logcloser.go +++ b/libs/utils/logcloser.go @@ -1,8 +1,9 @@ package utils import ( - "github.com/ipfs/go-log/v2" "io" + + "github.com/ipfs/go-log/v2" ) func CloseAndLog(log log.StandardLogger, name string, closer io.Closer) { diff --git a/nodebuilder/core/module.go b/nodebuilder/core/module.go index fec7c14b1b..1ec803f965 100644 --- a/nodebuilder/core/module.go +++ b/nodebuilder/core/module.go @@ -12,8 +12,8 @@ import ( "github.com/celestiaorg/celestia-node/libs/fxutil" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/store" ) // ConstructModule collects all the components and services related to managing the relationship @@ -38,7 +38,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option fxutil.ProvideAs( func( fetcher *core.BlockFetcher, - store *eds.Store, + store *store.Store, construct header.ConstructFn, ) (*core.Exchange, error) { var opts []core.Option @@ -55,7 +55,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option fetcher *core.BlockFetcher, pubsub *shrexsub.PubSub, construct header.ConstructFn, - store *eds.Store, + store *store.Store, ) (*core.Listener, error) { var opts []core.Option if MetricsEnabled { diff --git a/nodebuilder/node.go b/nodebuilder/node.go index 3e6950a6ae..f0f5eda7ae 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -59,7 +59,7 @@ type Node struct { Host host.Host ConnGater *conngater.BasicConnectionGater Routing routing.PeerRouting - DataExchange exchange.Interface + DataExchange exchange.SessionExchange BlockService blockservice.BlockService // p2p protocols PubSub *pubsub.PubSub diff --git a/nodebuilder/p2p/bitswap.go b/nodebuilder/p2p/bitswap.go index 014435071a..0ea33cf683 100644 --- a/nodebuilder/p2p/bitswap.go +++ b/nodebuilder/p2p/bitswap.go @@ -16,7 +16,7 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "go.uber.org/fx" - "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/store" ) const ( @@ -29,7 +29,7 @@ const ( ) // dataExchange provides a constructor for IPFS block's DataExchange over BitSwap. -func dataExchange(params bitSwapParams) exchange.Interface { +func dataExchange(params bitSwapParams) exchange.SessionExchange { prefix := protocolID(params.Net) net := network.NewFromIpfsHost(params.Host, &routinghelpers.Null{}, network.Prefix(prefix)) srvr := server.New( @@ -76,10 +76,10 @@ func blockstoreFromDatastore(ctx context.Context, ds datastore.Batching) (blocks ) } -func blockstoreFromEDSStore(ctx context.Context, store *eds.Store) (blockstore.Blockstore, error) { +func blockstoreFromEDSStore(ctx context.Context, s *store.Store, ds datastore.Batching) (blockstore.Blockstore, error) { return blockstore.CachedBlockstore( ctx, - store.Blockstore(), + store.NewBlockstore(s, ds), blockstore.CacheOpts{ HasTwoQueueCacheSize: defaultARCCacheSize, }, diff --git a/nodebuilder/share/config.go b/nodebuilder/share/config.go index 1d984b6dca..d08048ebc9 100644 --- a/nodebuilder/share/config.go +++ b/nodebuilder/share/config.go @@ -5,17 +5,17 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/share/availability/light" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/p2p/discovery" "github.com/celestiaorg/celestia-node/share/p2p/peers" "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" + "github.com/celestiaorg/celestia-node/share/store" ) // TODO: some params are pointers and other are not, Let's fix this. type Config struct { // EDSStoreParams sets eds store configuration parameters - EDSStoreParams *eds.Parameters + EDSStoreParams *store.Parameters UseShareExchange bool // ShrExEDSParams sets shrexeds client and server configuration parameters @@ -31,7 +31,7 @@ type Config struct { func DefaultConfig(tp node.Type) Config { cfg := Config{ - EDSStoreParams: eds.DefaultParameters(), + EDSStoreParams: store.DefaultParameters(), Discovery: discovery.DefaultParameters(), ShrExEDSParams: shrexeds.DefaultParameters(), ShrExNDParams: shrexnd.DefaultParameters(), diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index aa2ac5bec1..816232bf9f 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -2,22 +2,18 @@ package share import ( "context" - "errors" - "github.com/filecoin-project/dagstore" "github.com/ipfs/boxo/blockservice" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/routing" routingdisc "github.com/libp2p/go-libp2p/p2p/discovery/routing" - "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/getters" "github.com/celestiaorg/celestia-node/share/ipld" disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" "github.com/celestiaorg/celestia-node/share/p2p/peers" + "github.com/celestiaorg/celestia-node/share/shwap" ) const ( @@ -46,21 +42,6 @@ func newModule(getter share.Getter, avail share.Availability) Module { return &module{getter, avail} } -// ensureEmptyCARExists adds an empty EDS to the provided EDS store. -func ensureEmptyCARExists(ctx context.Context, store *eds.Store) error { - emptyEDS := share.EmptyExtendedDataSquare() - emptyDAH, err := da.NewDataAvailabilityHeader(emptyEDS) - if err != nil { - return err - } - - err = store.Put(ctx, emptyDAH.Hash(), emptyEDS) - if errors.Is(err, dagstore.ErrShardExists) { - return nil - } - return err -} - // ensureEmptyEDSInBS checks if the given DAG contains an empty block data square. // If it does not, it stores an empty block. This optimization exists to prevent // redundant storing of empty block data so that it is only stored once and returned @@ -72,14 +53,14 @@ func ensureEmptyEDSInBS(ctx context.Context, bServ blockservice.BlockService) er func lightGetter( shrexGetter *getters.ShrexGetter, - ipldGetter *getters.IPLDGetter, + shwapGetter *shwap.Getter, cfg Config, ) share.Getter { var cascade []share.Getter if cfg.UseShareExchange { cascade = append(cascade, shrexGetter) } - cascade = append(cascade, ipldGetter) + cascade = append(cascade, shwapGetter) return getters.NewCascadeGetter(cascade) } @@ -103,7 +84,7 @@ func bridgeGetter( func fullGetter( storeGetter *getters.StoreGetter, shrexGetter *getters.ShrexGetter, - ipldGetter *getters.IPLDGetter, + shwapGetter *shwap.Getter, cfg Config, ) share.Getter { var cascade []share.Getter @@ -111,6 +92,6 @@ func fullGetter( if cfg.UseShareExchange { cascade = append(cascade, shrexGetter) } - cascade = append(cascade, ipldGetter) + cascade = append(cascade, shwapGetter) return getters.NewCascadeGetter(cascade) } diff --git a/nodebuilder/share/module.go b/nodebuilder/share/module.go index 3fa55b2d35..b58d833c41 100644 --- a/nodebuilder/share/module.go +++ b/nodebuilder/share/module.go @@ -3,7 +3,6 @@ package share import ( "context" - "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/p2p/net/conngater" "go.uber.org/fx" @@ -17,13 +16,14 @@ import ( "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/availability/full" "github.com/celestiaorg/celestia-node/share/availability/light" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/getters" disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" "github.com/celestiaorg/celestia-node/share/p2p/peers" "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/store" ) func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option { @@ -83,7 +83,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option fx.Provide(getters.NewStoreGetter), fx.Invoke(func(edsSrv *shrexeds.Server, ndSrc *shrexnd.Server) {}), fx.Provide(fx.Annotate( - func(host host.Host, store *eds.Store, network modp2p.Network) (*shrexeds.Server, error) { + func(host host.Host, store *store.Store, network modp2p.Network) (*shrexeds.Server, error) { cfg.ShrExEDSParams.WithNetworkID(network.String()) return shrexeds.NewServer(cfg.ShrExEDSParams, host, store) }, @@ -97,7 +97,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option fx.Provide(fx.Annotate( func( host host.Host, - store *eds.Store, + store *store.Store, network modp2p.Network, ) (*shrexnd.Server, error) { cfg.ShrExNDParams.WithNetworkID(network.String()) @@ -111,19 +111,9 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option }), )), fx.Provide(fx.Annotate( - func(path node.StorePath, ds datastore.Batching) (*eds.Store, error) { - return eds.NewStore(cfg.EDSStoreParams, string(path), ds) + func(path node.StorePath) (*store.Store, error) { + return store.NewStore(cfg.EDSStoreParams, string(path)) }, - fx.OnStart(func(ctx context.Context, store *eds.Store) error { - err := store.Start(ctx) - if err != nil { - return err - } - return ensureEmptyCARExists(ctx, store) - }), - fx.OnStop(func(ctx context.Context, store *eds.Store) error { - return store.Stop(ctx) - }), )), fx.Provide(fx.Annotate( full.NewShareAvailability, @@ -188,7 +178,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option baseComponents, bridgeAndFullComponents, shrexGetterComponents, - fx.Provide(getters.NewIPLDGetter), + fx.Provide(shwap.NewGetter), fx.Provide(fullGetter), ) case node.Light: @@ -203,7 +193,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option peerManagerWithShrexPools, shrexGetterComponents, fx.Invoke(ensureEmptyEDSInBS), - fx.Provide(getters.NewIPLDGetter), + fx.Provide(shwap.NewGetter), fx.Provide(lightGetter), // shrexsub broadcaster stub for daser fx.Provide(func() shrexsub.BroadcastFn { diff --git a/nodebuilder/share/opts.go b/nodebuilder/share/opts.go index e236847f41..d0d381d5f4 100644 --- a/nodebuilder/share/opts.go +++ b/nodebuilder/share/opts.go @@ -1,12 +1,12 @@ package share import ( - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/getters" disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" "github.com/celestiaorg/celestia-node/share/p2p/peers" "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" + "github.com/celestiaorg/celestia-node/share/store" ) // WithPeerManagerMetrics is a utility function to turn on peer manager metrics and that is @@ -43,6 +43,6 @@ func WithShrexGetterMetrics(sg *getters.ShrexGetter) error { return sg.WithMetrics() } -func WithStoreMetrics(s *eds.Store) error { +func WithStoreMetrics(s *store.Store) error { return s.WithMetrics() } diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go index bea2d61236..cf62a9a065 100644 --- a/share/availability/full/availability.go +++ b/share/availability/full/availability.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/celestiaorg/celestia-node/share/store" "github.com/filecoin-project/dagstore" logging "github.com/ipfs/go-log/v2" @@ -14,6 +13,7 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/byzantine" "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/p2p/discovery" + "github.com/celestiaorg/celestia-node/share/store" ) var log = logging.Logger("share/full") diff --git a/share/availability/full/testing.go b/share/availability/full/testing.go index 46e97581f2..0dfb167e01 100644 --- a/share/availability/full/testing.go +++ b/share/availability/full/testing.go @@ -1,64 +1,47 @@ package full -import ( - "context" - "testing" - "time" - - "github.com/ipfs/go-datastore" - routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" - "github.com/libp2p/go-libp2p/p2p/discovery/routing" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/share" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/getters" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/p2p/discovery" -) - +// FIXME: rework testing pkg // GetterWithRandSquare provides a share.Getter filled with 'n' NMT // trees of 'n' random shares, essentially storing a whole square. -func GetterWithRandSquare(t *testing.T, n int) (share.Getter, *share.Root) { - bServ := ipld.NewMemBlockservice() - getter := getters.NewIPLDGetter(bServ) - return getter, availability_test.RandFillBS(t, n, bServ) -} - -// RandNode creates a Full Node filled with a random block of the given size. -func RandNode(dn *availability_test.TestDagNet, squareSize int) (*availability_test.TestNode, *share.Root) { - nd := Node(dn) - return nd, availability_test.RandFillBS(dn.T, squareSize, nd.BlockService) -} - -// Node creates a new empty Full Node. -func Node(dn *availability_test.TestDagNet) *availability_test.TestNode { - nd := dn.NewTestNode() - nd.Getter = getters.NewIPLDGetter(nd.BlockService) - nd.Availability = TestAvailability(dn.T, nd.Getter) - return nd -} - -func TestAvailability(t *testing.T, getter share.Getter) *ShareAvailability { - params := discovery.DefaultParameters() - params.AdvertiseInterval = time.Second - params.PeersLimit = 10 - disc, err := discovery.NewDiscovery( - params, - nil, - routing.NewRoutingDiscovery(routinghelpers.Null{}), - "full", - ) - require.NoError(t, err) - store, err := eds.NewStore(eds.DefaultParameters(), t.TempDir(), datastore.NewMapDatastore()) - require.NoError(t, err) - err = store.Start(context.Background()) - require.NoError(t, err) - - t.Cleanup(func() { - err = store.Stop(context.Background()) - require.NoError(t, err) - }) - return NewShareAvailability(store, getter, disc) -} +//func GetterWithRandSquare(t *testing.T, n int) (share.Getter, *share.Root) { +// bServ := ipld.NewMemBlockservice() +// getter := getters.NewIPLDGetter(bServ) +// return getter, availability_test.RandFillBS(t, n, bServ) +//} +// +//// RandNode creates a Full Node filled with a random block of the given size. +//func RandNode(dn *availability_test.TestDagNet, squareSize int) (*availability_test.TestNode, *share.Root) { +// nd := Node(dn) +// return nd, availability_test.RandFillBS(dn.T, squareSize, nd.BlockService) +//} +// +//// Node creates a new empty Full Node. +//func Node(dn *availability_test.TestDagNet) *availability_test.TestNode { +// nd := dn.NewTestNode() +// nd.Getter = getters.NewIPLDGetter(nd.BlockService) +// nd.Availability = TestAvailability(dn.T, nd.Getter) +// return nd +//} +// +//func TestAvailability(t *testing.T, getter share.Getter) *ShareAvailability { +// params := discovery.DefaultParameters() +// params.AdvertiseInterval = time.Second +// params.PeersLimit = 10 +// disc, err := discovery.NewDiscovery( +// params, +// nil, +// routing.NewRoutingDiscovery(routinghelpers.Null{}), +// "full", +// ) +// require.NoError(t, err) +// store, err := eds.NewStore(eds.DefaultParameters(), t.TempDir(), datastore.NewMapDatastore()) +// require.NoError(t, err) +// err = store.Start(context.Background()) +// require.NoError(t, err) +// +// t.Cleanup(func() { +// err = store.Stop(context.Background()) +// require.NoError(t, err) +// }) +// return NewShareAvailability(store, getter, disc) +//} diff --git a/share/availability/light/availability.go b/share/availability/light/availability.go index 1d35542344..4b6ec28778 100644 --- a/share/availability/light/availability.go +++ b/share/availability/light/availability.go @@ -13,7 +13,6 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/getters" ) var ( @@ -90,10 +89,6 @@ func (la *ShareAvailability) SharesAvailable(ctx context.Context, header *header return err } - // indicate to the share.Getter that a blockservice session should be created. This - // functionality is optional and must be supported by the used share.Getter. - ctx = getters.WithSession(ctx) - log.Debugw("starting sampling session", "root", dah.String()) errs := make(chan error, len(samples)) for _, s := range samples { diff --git a/share/availability/light/testing.go b/share/availability/light/testing.go index 9efc9ff14a..52bb3a89d5 100644 --- a/share/availability/light/testing.go +++ b/share/availability/light/testing.go @@ -1,60 +1,47 @@ package light -import ( - "testing" - - "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/go-datastore" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/header/headertest" - "github.com/celestiaorg/celestia-node/share" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/getters" - "github.com/celestiaorg/celestia-node/share/ipld" -) - +// FIXME: rework testing pkg // GetterWithRandSquare provides a share.Getter filled with 'n' NMT trees of 'n' random shares, // essentially storing a whole square. -func GetterWithRandSquare(t *testing.T, n int) (share.Getter, *header.ExtendedHeader) { - bServ := ipld.NewMemBlockservice() - getter := getters.NewIPLDGetter(bServ) - root := availability_test.RandFillBS(t, n, bServ) - eh := headertest.RandExtendedHeader(t) - eh.DAH = root - - return getter, eh -} - -// EmptyGetter provides an unfilled share.Getter with corresponding blockservice.BlockService than -// can be filled by the test. -func EmptyGetter() (share.Getter, blockservice.BlockService) { - bServ := ipld.NewMemBlockservice() - getter := getters.NewIPLDGetter(bServ) - return getter, bServ -} - -// RandNode creates a Light Node filled with a random block of the given size. -func RandNode(dn *availability_test.TestDagNet, squareSize int) (*availability_test.TestNode, *share.Root) { - nd := Node(dn) - return nd, availability_test.RandFillBS(dn.T, squareSize, nd.BlockService) -} - -// Node creates a new empty Light Node. -func Node(dn *availability_test.TestDagNet) *availability_test.TestNode { - nd := dn.NewTestNode() - nd.Getter = getters.NewIPLDGetter(nd.BlockService) - nd.Availability = TestAvailability(nd.Getter) - return nd -} - -func TestAvailability(getter share.Getter) *ShareAvailability { - ds := datastore.NewMapDatastore() - return NewShareAvailability(getter, ds) -} - -func SubNetNode(sn *availability_test.SubNet) *availability_test.TestNode { - nd := Node(sn.TestDagNet) - sn.AddNode(nd) - return nd -} +//func GetterWithRandSquare(t *testing.T, n int) (share.Getter, *header.ExtendedHeader) { +// bServ := ipld.NewMemBlockservice() +// getter := getters.NewIPLDGetter(bServ) +// root := availability_test.RandFillBS(t, n, bServ) +// eh := headertest.RandExtendedHeader(t) +// eh.DAH = root +// +// return getter, eh +//} +// +//// EmptyGetter provides an unfilled share.Getter with corresponding blockservice.BlockService than +//// can be filled by the test. +//func EmptyGetter() (share.Getter, blockservice.BlockService) { +// bServ := ipld.NewMemBlockservice() +// getter := getters.NewIPLDGetter(bServ) +// return getter, bServ +//} +// +//// RandNode creates a Light Node filled with a random block of the given size. +//func RandNode(dn *availability_test.TestDagNet, squareSize int) (*availability_test.TestNode, *share.Root) { +// nd := Node(dn) +// return nd, availability_test.RandFillBS(dn.T, squareSize, nd.BlockService) +//} +// +//// Node creates a new empty Light Node. +//func Node(dn *availability_test.TestDagNet) *availability_test.TestNode { +// nd := dn.NewTestNode() +// nd.Getter = getters.NewIPLDGetter(nd.BlockService) +// nd.Availability = TestAvailability(nd.Getter) +// return nd +//} +// +//func TestAvailability(getter share.Getter) *ShareAvailability { +// ds := datastore.NewMapDatastore() +// return NewShareAvailability(getter, ds) +//} +// +//func SubNetNode(sn *availability_test.SubNet) *availability_test.TestNode { +// nd := Node(sn.TestDagNet) +// sn.AddNode(nd) +// return nd +//} diff --git a/share/eds/adapters.go b/share/eds/adapters.go deleted file mode 100644 index 8bf2340d91..0000000000 --- a/share/eds/adapters.go +++ /dev/null @@ -1,66 +0,0 @@ -package eds - -import ( - "context" - "sync" - - "github.com/filecoin-project/dagstore" - "github.com/ipfs/boxo/blockservice" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" -) - -var _ blockservice.BlockGetter = (*BlockGetter)(nil) - -// NewBlockGetter creates new blockservice.BlockGetter adapter from dagstore.ReadBlockstore -func NewBlockGetter(store dagstore.ReadBlockstore) *BlockGetter { - return &BlockGetter{store: store} -} - -// BlockGetter is an adapter for dagstore.ReadBlockstore to implement blockservice.BlockGetter -// interface. -type BlockGetter struct { - store dagstore.ReadBlockstore -} - -// GetBlock gets the requested block by the given CID. -func (bg *BlockGetter) GetBlock(ctx context.Context, cid cid.Cid) (blocks.Block, error) { - return bg.store.Get(ctx, cid) -} - -// GetBlocks does a batch request for the given cids, returning blocks as -// they are found, in no particular order. -// -// It implements blockservice.BlockGetter interface, that requires: -// It may not be able to find all requested blocks (or the context may -// be canceled). In that case, it will close the channel early. It is up -// to the consumer to detect this situation and keep track which blocks -// it has received and which it hasn't. -func (bg *BlockGetter) GetBlocks(ctx context.Context, cids []cid.Cid) <-chan blocks.Block { - bCh := make(chan blocks.Block) - - go func() { - var wg sync.WaitGroup - wg.Add(len(cids)) - for _, c := range cids { - go func(cid cid.Cid) { - defer wg.Done() - block, err := bg.store.Get(ctx, cid) - if err != nil { - log.Debugw("getblocks: error getting block by cid", "cid", cid, "error", err) - return - } - - select { - case bCh <- block: - case <-ctx.Done(): - return - } - }(c) - } - wg.Wait() - close(bCh) - }() - - return bCh -} diff --git a/share/eds/adapters_test.go b/share/eds/adapters_test.go deleted file mode 100644 index 70165b81c8..0000000000 --- a/share/eds/adapters_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package eds - -import ( - "context" - "errors" - mrand "math/rand" - "sort" - "testing" - "time" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/share/ipld" -) - -func TestBlockGetter_GetBlocks(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - cids := randCIDs(t, 32) - // sort cids in asc order - sort.Slice(cids, func(i, j int) bool { - return cids[i].String() < cids[j].String() - }) - - bg := &BlockGetter{store: rbsMock{}} - blocksCh := bg.GetBlocks(context.Background(), cids) - - // collect blocks from channel - blocks := make([]blocks.Block, 0, len(cids)) - for block := range blocksCh { - blocks = append(blocks, block) - } - - // sort blocks in cid asc order - sort.Slice(blocks, func(i, j int) bool { - return blocks[i].Cid().String() < blocks[j].Cid().String() - }) - - // validate results - require.Equal(t, len(cids), len(blocks)) - for i, block := range blocks { - require.Equal(t, cids[i].String(), block.Cid().String()) - } - }) - t.Run("retrieval error", func(t *testing.T) { - cids := randCIDs(t, 32) - - // split cids into failed and succeeded - failedLen := mrand.Intn(len(cids)-1) + 1 - failed := make(map[cid.Cid]struct{}, failedLen) - succeeded := make([]cid.Cid, 0, len(cids)-failedLen) - for i, cid := range cids { - if i < failedLen { - failed[cid] = struct{}{} - continue - } - succeeded = append(succeeded, cid) - } - - // sort succeeded cids in asc order - sort.Slice(succeeded, func(i, j int) bool { - return succeeded[i].String() < succeeded[j].String() - }) - - bg := &BlockGetter{store: rbsMock{failed: failed}} - blocksCh := bg.GetBlocks(context.Background(), cids) - - // collect blocks from channel - blocks := make([]blocks.Block, 0, len(cids)) - for block := range blocksCh { - blocks = append(blocks, block) - } - - // sort blocks in cid asc order - sort.Slice(blocks, func(i, j int) bool { - return blocks[i].Cid().String() < blocks[j].Cid().String() - }) - - // validate results - require.Equal(t, len(succeeded), len(blocks)) - for i, block := range blocks { - require.Equal(t, succeeded[i].String(), block.Cid().String()) - } - }) - t.Run("retrieval timeout", func(t *testing.T) { - cids := randCIDs(t, 128) - - bg := &BlockGetter{ - store: rbsMock{}, - } - - // cancel the context before any blocks are collected - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - blocksCh := bg.GetBlocks(ctx, cids) - - // pretend nobody is reading from blocksCh after context is canceled - time.Sleep(50 * time.Millisecond) - - // blocksCh should be closed indicating GetBlocks exited - select { - case _, ok := <-blocksCh: - require.False(t, ok) - default: - t.Error("channel is not closed on canceled context") - } - }) -} - -// rbsMock is a dagstore.ReadBlockstore mock -type rbsMock struct { - failed map[cid.Cid]struct{} -} - -func (r rbsMock) Has(context.Context, cid.Cid) (bool, error) { - panic("implement me") -} - -func (r rbsMock) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { - // return error for failed items - if _, ok := r.failed[cid]; ok { - return nil, errors.New("not found") - } - - return blocks.NewBlockWithCid(nil, cid) -} - -func (r rbsMock) GetSize(context.Context, cid.Cid) (int, error) { - panic("implement me") -} - -func (r rbsMock) AllKeysChan(context.Context) (<-chan cid.Cid, error) { - panic("implement me") -} - -func (r rbsMock) HashOnRead(bool) { - panic("implement me") -} - -func randCIDs(t *testing.T, n int) []cid.Cid { - cids := make([]cid.Cid, n) - for i := range cids { - cids[i] = ipld.RandNamespacedCID(t) - } - return cids -} diff --git a/share/eds/blockstore.go b/share/eds/blockstore.go deleted file mode 100644 index e44601870e..0000000000 --- a/share/eds/blockstore.go +++ /dev/null @@ -1,168 +0,0 @@ -package eds - -import ( - "context" - "errors" - "fmt" - - bstore "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/boxo/datastore/dshelp" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - ipld "github.com/ipfs/go-ipld-format" -) - -var _ bstore.Blockstore = (*blockstore)(nil) - -var ( - blockstoreCacheKey = datastore.NewKey("bs-cache") - errUnsupportedOperation = errors.New("unsupported operation") -) - -// blockstore implements the store.Blockstore interface on an EDSStore. -// The lru cache approach is heavily inspired by the existing implementation upstream. -// We simplified the design to not support multiple shards per key, call GetSize directly on the -// underlying RO blockstore, and do not throw errors on Put/PutMany. Also, we do not abstract away -// the blockstore operations. -// -// The intuition here is that each CAR file is its own blockstore, so we need this top level -// implementation to allow for the blockstore operations to be routed to the underlying stores. -type blockstore struct { - store *Store - ds datastore.Batching -} - -func newBlockstore(store *Store, ds datastore.Batching) *blockstore { - return &blockstore{ - store: store, - ds: namespace.Wrap(ds, blockstoreCacheKey), - } -} - -func (bs *blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { - keys, err := bs.store.dgstr.ShardsContainingMultihash(ctx, cid.Hash()) - if errors.Is(err, ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { - // key wasn't found in top level blockstore, but could be in datastore while being reconstructed - dsHas, dsErr := bs.ds.Has(ctx, dshelp.MultihashToDsKey(cid.Hash())) - if dsErr != nil { - return false, nil - } - return dsHas, nil - } - if err != nil { - return false, err - } - - return len(keys) > 0, nil -} - -func (bs *blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { - blockstr, err := bs.getReadOnlyBlockstore(ctx, cid) - if err == nil { - defer closeAndLog("blockstore", blockstr) - return blockstr.Get(ctx, cid) - } - - if errors.Is(err, ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { - k := dshelp.MultihashToDsKey(cid.Hash()) - blockData, err := bs.ds.Get(ctx, k) - if err == nil { - return blocks.NewBlockWithCid(blockData, cid) - } - // nmt's GetNode expects an ipld.ErrNotFound when a cid is not found. - return nil, ipld.ErrNotFound{Cid: cid} - } - - log.Debugf("failed to get blockstore for cid %s: %s", cid, err) - return nil, err -} - -func (bs *blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { - blockstr, err := bs.getReadOnlyBlockstore(ctx, cid) - if err == nil { - defer closeAndLog("blockstore", blockstr) - return blockstr.GetSize(ctx, cid) - } - - if errors.Is(err, ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { - k := dshelp.MultihashToDsKey(cid.Hash()) - size, err := bs.ds.GetSize(ctx, k) - if err == nil { - return size, nil - } - // nmt's GetSize expects an ipld.ErrNotFound when a cid is not found. - return 0, ipld.ErrNotFound{Cid: cid} - } - - log.Debugf("failed to get size for cid %s: %s", cid, err) - return 0, err -} - -func (bs *blockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { - k := dshelp.MultihashToDsKey(cid.Hash()) - return bs.ds.Delete(ctx, k) -} - -func (bs *blockstore) Put(ctx context.Context, blk blocks.Block) error { - k := dshelp.MultihashToDsKey(blk.Cid().Hash()) - // note: we leave duplicate resolution to the underlying datastore - return bs.ds.Put(ctx, k, blk.RawData()) -} - -func (bs *blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { - if len(blocks) == 1 { - // performance fast-path - return bs.Put(ctx, blocks[0]) - } - - t, err := bs.ds.Batch(ctx) - if err != nil { - return err - } - for _, b := range blocks { - k := dshelp.MultihashToDsKey(b.Cid().Hash()) - err = t.Put(ctx, k, b.RawData()) - if err != nil { - return err - } - } - return t.Commit(ctx) -} - -// AllKeysChan is a noop on the EDS blockstore because the keys are not stored in a single CAR file. -func (bs *blockstore) AllKeysChan(context.Context) (<-chan cid.Cid, error) { - return nil, errUnsupportedOperation -} - -// HashOnRead is a noop on the EDS blockstore but an error cannot be returned due to the method -// signature from the blockstore interface. -func (bs *blockstore) HashOnRead(bool) { - log.Warnf("HashOnRead is a noop on the EDS blockstore") -} - -// getReadOnlyBlockstore finds the underlying blockstore of the shard that contains the given CID. -func (bs *blockstore) getReadOnlyBlockstore(ctx context.Context, cid cid.Cid) (*BlockstoreCloser, error) { - keys, err := bs.store.dgstr.ShardsContainingMultihash(ctx, cid.Hash()) - if errors.Is(err, datastore.ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { - return nil, ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("failed to find shards containing multihash: %w", err) - } - - // check if either cache contains an accessor - shardKey := keys[0] - accessor, err := bs.store.cache.Load().Get(shardKey) - if err == nil { - return blockstoreCloser(accessor) - } - - // load accessor to the blockstore cache and use it as blockstoreCloser - accessor, err = bs.store.cache.Load().Second().GetOrLoad(ctx, shardKey, bs.store.getAccessor) - if err != nil { - return nil, fmt.Errorf("failed to get accessor for shard %s: %w", shardKey, err) - } - return blockstoreCloser(accessor) -} diff --git a/share/eds/blockstore_test.go b/share/eds/blockstore_test.go deleted file mode 100644 index d9dbf7ed30..0000000000 --- a/share/eds/blockstore_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package eds - -import ( - "context" - "io" - "testing" - - "github.com/filecoin-project/dagstore" - ipld "github.com/ipfs/go-ipld-format" - "github.com/ipld/go-car" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - ipld2 "github.com/celestiaorg/celestia-node/share/ipld" -) - -// TestBlockstore_Operations tests Has, Get, and GetSize on the top level eds.Store blockstore. -// It verifies that these operations are valid and successful on all blocks stored in a CAR file. -func TestBlockstore_Operations(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - r, err := edsStore.GetCAR(ctx, dah.Hash()) - require.NoError(t, err) - carReader, err := car.NewCarReader(r) - require.NoError(t, err) - - topLevelBS := edsStore.Blockstore() - carBS, err := edsStore.CARBlockstore(ctx, dah.Hash()) - require.NoError(t, err) - defer func() { - require.NoError(t, carBS.Close()) - }() - - root, err := edsStore.GetDAH(ctx, dah.Hash()) - require.NoError(t, err) - require.True(t, dah.Equals(root)) - - blockstores := []dagstore.ReadBlockstore{topLevelBS, carBS} - - for { - next, err := carReader.Next() - if err != nil { - require.ErrorIs(t, err, io.EOF) - break - } - blockCid := next.Cid() - randomCid := ipld2.RandNamespacedCID(t) - - for _, bs := range blockstores { - // test GetSize - has, err := bs.Has(ctx, blockCid) - require.NoError(t, err, "blockstore.Has could not find root CID") - require.True(t, has) - - // test GetSize - block, err := bs.Get(ctx, blockCid) - assert.NoError(t, err, "blockstore.Get could not get a leaf CID") - assert.Equal(t, block.Cid(), blockCid) - assert.Equal(t, block.RawData(), next.RawData()) - - // test Get (cid not found) - _, err = bs.Get(ctx, randomCid) - require.ErrorAs(t, err, &ipld.ErrNotFound{Cid: randomCid}) - - // test GetSize - size, err := bs.GetSize(ctx, blockCid) - assert.NotZerof(t, size, "blocksize.GetSize reported a root block from blockstore was empty") - assert.NoError(t, err) - } - } -} diff --git a/share/eds/cache/accessor_cache.go b/share/eds/cache/accessor_cache.go deleted file mode 100644 index 6f937818f8..0000000000 --- a/share/eds/cache/accessor_cache.go +++ /dev/null @@ -1,262 +0,0 @@ -package cache - -import ( - "context" - "errors" - "fmt" - "io" - "sync" - "sync/atomic" - "time" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" - lru "github.com/hashicorp/golang-lru/v2" -) - -const defaultCloseTimeout = time.Minute - -var _ Cache = (*AccessorCache)(nil) - -// AccessorCache implements the Cache interface using an LRU cache backend. -type AccessorCache struct { - // The name is a prefix that will be used for cache metrics if they are enabled. - name string - // stripedLocks prevents simultaneous RW access to the blockstore cache for a shard. Instead - // of using only one lock or one lock per key, we stripe the shard keys across 256 locks. 256 is - // chosen because it 0-255 is the range of values we get looking at the last byte of the key. - stripedLocks [256]sync.Mutex - // Caches the blockstore for a given shard for shard read affinity, i.e., further reads will likely - // be from the same shard. Maps (shard key -> blockstore). - cache *lru.Cache[shard.Key, *accessorWithBlockstore] - - metrics *metrics -} - -// accessorWithBlockstore is the value that we store in the blockstore Cache. It implements the -// Accessor interface. -type accessorWithBlockstore struct { - sync.RWMutex - shardAccessor Accessor - // The blockstore is stored separately because each access to the blockstore over the shard - // accessor reopens the underlying CAR. - bs dagstore.ReadBlockstore - - done chan struct{} - refs atomic.Int32 - isClosed bool -} - -// Blockstore implements the Blockstore of the Accessor interface. It creates the blockstore on the -// first request and reuses the created instance for all subsequent requests. -func (s *accessorWithBlockstore) Blockstore() (dagstore.ReadBlockstore, error) { - s.Lock() - defer s.Unlock() - var err error - if s.bs == nil { - s.bs, err = s.shardAccessor.Blockstore() - } - return s.bs, err -} - -// Reader returns a new copy of the reader to read data. -func (s *accessorWithBlockstore) Reader() io.Reader { - return s.shardAccessor.Reader() -} - -func (s *accessorWithBlockstore) addRef() error { - s.Lock() - defer s.Unlock() - if s.isClosed { - // item is already closed and soon will be removed after all refs are released - return errCacheMiss - } - if s.refs.Add(1) == 1 { - // there were no refs previously and done channel was closed, reopen it by recreating - s.done = make(chan struct{}) - } - return nil -} - -func (s *accessorWithBlockstore) removeRef() { - s.Lock() - defer s.Unlock() - if s.refs.Add(-1) <= 0 { - close(s.done) - } -} - -func (s *accessorWithBlockstore) close() error { - s.Lock() - if s.isClosed { - s.Unlock() - // accessor will be closed by another goroutine - return nil - } - s.isClosed = true - done := s.done - s.Unlock() - - select { - case <-done: - case <-time.After(defaultCloseTimeout): - return fmt.Errorf("closing accessor, some readers didn't close the accessor within timeout,"+ - " amount left: %v", s.refs.Load()) - } - if err := s.shardAccessor.Close(); err != nil { - return fmt.Errorf("closing accessor: %w", err) - } - return nil -} - -func NewAccessorCache(name string, cacheSize int) (*AccessorCache, error) { - bc := &AccessorCache{ - name: name, - } - // Instantiate the blockstore Cache. - bslru, err := lru.NewWithEvict[shard.Key, *accessorWithBlockstore](cacheSize, bc.evictFn()) - if err != nil { - return nil, fmt.Errorf("failed to instantiate blockstore cache: %w", err) - } - bc.cache = bslru - return bc, nil -} - -// evictFn will be invoked when an item is evicted from the cache. -func (bc *AccessorCache) evictFn() func(shard.Key, *accessorWithBlockstore) { - return func(_ shard.Key, abs *accessorWithBlockstore) { - // we can release accessor from cache early, while it is being closed in parallel routine - go func() { - err := abs.close() - if err != nil { - bc.metrics.observeEvicted(true) - log.Errorf("couldn't close accessor after cache eviction: %s", err) - return - } - bc.metrics.observeEvicted(false) - }() - } -} - -// Get retrieves the Accessor for a given shard key from the Cache. If the Accessor is not in -// the Cache, it returns an errCacheMiss. -func (bc *AccessorCache) Get(key shard.Key) (Accessor, error) { - lk := &bc.stripedLocks[shardKeyToStriped(key)] - lk.Lock() - defer lk.Unlock() - - accessor, err := bc.get(key) - if err != nil { - bc.metrics.observeGet(false) - return nil, err - } - bc.metrics.observeGet(true) - return newRefCloser(accessor) -} - -func (bc *AccessorCache) get(key shard.Key) (*accessorWithBlockstore, error) { - abs, ok := bc.cache.Get(key) - if !ok { - return nil, errCacheMiss - } - return abs, nil -} - -// GetOrLoad attempts to get an item from the cache, and if not found, invokes -// the provided loader function to load it. -func (bc *AccessorCache) GetOrLoad( - ctx context.Context, - key shard.Key, - loader func(context.Context, shard.Key) (Accessor, error), -) (Accessor, error) { - lk := &bc.stripedLocks[shardKeyToStriped(key)] - lk.Lock() - defer lk.Unlock() - - abs, err := bc.get(key) - if err == nil { - // return accessor, only of it is not closed yet - accessorWithRef, err := newRefCloser(abs) - if err == nil { - bc.metrics.observeGet(true) - return accessorWithRef, nil - } - } - - // accessor not found in cache, so load new one using loader - accessor, err := loader(ctx, key) - if err != nil { - return nil, fmt.Errorf("unable to load accessor: %w", err) - } - - abs = &accessorWithBlockstore{ - shardAccessor: accessor, - } - - // Create a new accessor first to increment the reference count in it, so it cannot get evicted - // from the inner lru cache before it is used. - accessorWithRef, err := newRefCloser(abs) - if err != nil { - return nil, err - } - bc.cache.Add(key, abs) - return accessorWithRef, nil -} - -// Remove removes the Accessor for a given key from the cache. -func (bc *AccessorCache) Remove(key shard.Key) error { - lk := &bc.stripedLocks[shardKeyToStriped(key)] - lk.Lock() - accessor, err := bc.get(key) - lk.Unlock() - if errors.Is(err, errCacheMiss) { - // item is not in cache - return nil - } - if err = accessor.close(); err != nil { - return err - } - // The cache will call evictFn on removal, where accessor close will be called. - bc.cache.Remove(key) - return nil -} - -// EnableMetrics enables metrics for the cache. -func (bc *AccessorCache) EnableMetrics() error { - var err error - bc.metrics, err = newMetrics(bc) - return err -} - -// refCloser manages references to accessor from provided reader and removes the ref, when the -// Close is called -type refCloser struct { - *accessorWithBlockstore - closeFn func() -} - -// newRefCloser creates new refCloser -func newRefCloser(abs *accessorWithBlockstore) (*refCloser, error) { - if err := abs.addRef(); err != nil { - return nil, err - } - - var closeOnce sync.Once - return &refCloser{ - accessorWithBlockstore: abs, - closeFn: func() { - closeOnce.Do(abs.removeRef) - }, - }, nil -} - -func (c *refCloser) Close() error { - c.closeFn() - return nil -} - -// shardKeyToStriped returns the index of the lock to use for a given shard key. We use the last -// byte of the shard key as the pseudo-random index. -func shardKeyToStriped(sk shard.Key) byte { - return sk.String()[len(sk.String())-1] -} diff --git a/share/eds/cache/cache.go b/share/eds/cache/cache.go deleted file mode 100644 index 13e207d7c0..0000000000 --- a/share/eds/cache/cache.go +++ /dev/null @@ -1,49 +0,0 @@ -package cache - -import ( - "context" - "errors" - "io" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" - logging "github.com/ipfs/go-log/v2" - "go.opentelemetry.io/otel" -) - -var ( - log = logging.Logger("share/eds/cache") - meter = otel.Meter("eds_store_cache") -) - -var ( - errCacheMiss = errors.New("accessor not found in blockstore cache") -) - -// Cache is an interface that defines the basic Cache operations. -type Cache interface { - // Get retrieves an item from the Cache. - Get(shard.Key) (Accessor, error) - - // GetOrLoad attempts to get an item from the Cache and, if not found, invokes - // the provided loader function to load it into the Cache. - GetOrLoad( - ctx context.Context, - key shard.Key, - loader func(context.Context, shard.Key) (Accessor, error), - ) (Accessor, error) - - // Remove removes an item from Cache. - Remove(shard.Key) error - - // EnableMetrics enables metrics in Cache - EnableMetrics() error -} - -// Accessor is a interface type returned by cache, that allows to read raw data by reader or create -// readblockstore -type Accessor interface { - Blockstore() (dagstore.ReadBlockstore, error) - Reader() io.Reader - io.Closer -} diff --git a/share/eds/cache/doublecache.go b/share/eds/cache/doublecache.go deleted file mode 100644 index a63eadee9e..0000000000 --- a/share/eds/cache/doublecache.go +++ /dev/null @@ -1,51 +0,0 @@ -package cache - -import ( - "errors" - - "github.com/filecoin-project/dagstore/shard" -) - -// DoubleCache represents a Cache that looks into multiple caches one by one. -type DoubleCache struct { - first, second Cache -} - -// NewDoubleCache creates a new DoubleCache with the provided caches. -func NewDoubleCache(first, second Cache) *DoubleCache { - return &DoubleCache{ - first: first, - second: second, - } -} - -// Get looks for an item in all the caches one by one and returns the Cache found item. -func (mc *DoubleCache) Get(key shard.Key) (Accessor, error) { - ac, err := mc.first.Get(key) - if err == nil { - return ac, nil - } - return mc.second.Get(key) -} - -// Remove removes an item from all underlying caches -func (mc *DoubleCache) Remove(key shard.Key) error { - err1 := mc.first.Remove(key) - err2 := mc.second.Remove(key) - return errors.Join(err1, err2) -} - -func (mc *DoubleCache) First() Cache { - return mc.first -} - -func (mc *DoubleCache) Second() Cache { - return mc.second -} - -func (mc *DoubleCache) EnableMetrics() error { - if err := mc.first.EnableMetrics(); err != nil { - return err - } - return mc.second.EnableMetrics() -} diff --git a/share/eds/cache/metrics.go b/share/eds/cache/metrics.go deleted file mode 100644 index b2e3bec8d8..0000000000 --- a/share/eds/cache/metrics.go +++ /dev/null @@ -1,69 +0,0 @@ -package cache - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" -) - -const ( - cacheFoundKey = "found" - failedKey = "failed" -) - -type metrics struct { - getCounter metric.Int64Counter - evictedCounter metric.Int64Counter -} - -func newMetrics(bc *AccessorCache) (*metrics, error) { - metricsPrefix := "eds_blockstore_cache_" + bc.name - - evictedCounter, err := meter.Int64Counter(metricsPrefix+"_evicted_counter", - metric.WithDescription("eds blockstore cache evicted event counter")) - if err != nil { - return nil, err - } - - getCounter, err := meter.Int64Counter(metricsPrefix+"_get_counter", - metric.WithDescription("eds blockstore cache evicted event counter")) - if err != nil { - return nil, err - } - - cacheSize, err := meter.Int64ObservableGauge(metricsPrefix+"_size", - metric.WithDescription("total amount of items in blockstore cache"), - ) - if err != nil { - return nil, err - } - - callback := func(ctx context.Context, observer metric.Observer) error { - observer.ObserveInt64(cacheSize, int64(bc.cache.Len())) - return nil - } - _, err = meter.RegisterCallback(callback, cacheSize) - - return &metrics{ - getCounter: getCounter, - evictedCounter: evictedCounter, - }, err -} - -func (m *metrics) observeEvicted(failed bool) { - if m == nil { - return - } - m.evictedCounter.Add(context.Background(), 1, - metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeGet(found bool) { - if m == nil { - return - } - m.getCounter.Add(context.Background(), 1, metric.WithAttributes( - attribute.Bool(cacheFoundKey, found))) -} diff --git a/share/eds/cache/noop.go b/share/eds/cache/noop.go deleted file mode 100644 index 0a1a39ec7e..0000000000 --- a/share/eds/cache/noop.go +++ /dev/null @@ -1,50 +0,0 @@ -package cache - -import ( - "context" - "io" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" -) - -var _ Cache = (*NoopCache)(nil) - -// NoopCache implements noop version of Cache interface -type NoopCache struct{} - -func (n NoopCache) Get(shard.Key) (Accessor, error) { - return nil, errCacheMiss -} - -func (n NoopCache) GetOrLoad( - context.Context, shard.Key, - func(context.Context, shard.Key) (Accessor, error), -) (Accessor, error) { - return NoopAccessor{}, nil -} - -func (n NoopCache) Remove(shard.Key) error { - return nil -} - -func (n NoopCache) EnableMetrics() error { - return nil -} - -var _ Accessor = (*NoopAccessor)(nil) - -// NoopAccessor implements noop version of Accessor interface -type NoopAccessor struct{} - -func (n NoopAccessor) Blockstore() (dagstore.ReadBlockstore, error) { - return nil, nil -} - -func (n NoopAccessor) Reader() io.Reader { - return nil -} - -func (n NoopAccessor) Close() error { - return nil -} diff --git a/share/eds/eds.go b/share/eds/eds.go deleted file mode 100644 index 64e12e162b..0000000000 --- a/share/eds/eds.go +++ /dev/null @@ -1,274 +0,0 @@ -package eds - -import ( - "bytes" - "context" - "crypto/sha256" - "errors" - "fmt" - "io" - "math" - - "github.com/ipfs/go-cid" - "github.com/ipld/go-car" - "github.com/ipld/go-car/util" - - "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -var ErrEmptySquare = errors.New("share: importing empty data") - -// WriteEDS writes the entire EDS into the given io.Writer as CARv1 file. -// This includes all shares in quadrant order, followed by all inner nodes of the NMT tree. -// Order: [ Carv1Header | Q1 | Q2 | Q3 | Q4 | inner nodes ] -// For more information about the header: https://ipld.io/specs/transport/car/carv1/#header -func WriteEDS(ctx context.Context, eds *rsmt2d.ExtendedDataSquare, w io.Writer) (err error) { - ctx, span := tracer.Start(ctx, "write-eds") - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - // Creates and writes Carv1Header. Roots are the eds Row + Col roots - err = writeHeader(eds, w) - if err != nil { - return fmt.Errorf("share: writing carv1 header: %w", err) - } - // Iterates over shares in quadrant order via eds.GetCell - err = writeQuadrants(eds, w) - if err != nil { - return fmt.Errorf("share: writing shares: %w", err) - } - - // Iterates over proofs and writes them to the CAR - err = writeProofs(ctx, eds, w) - if err != nil { - return fmt.Errorf("share: writing proofs: %w", err) - } - return nil -} - -// writeHeader creates a CarV1 header using the EDS's Row and Column roots as the list of DAG roots. -func writeHeader(eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { - rootCids, err := rootsToCids(eds) - if err != nil { - return fmt.Errorf("getting root cids: %w", err) - } - - return car.WriteHeader(&car.CarHeader{ - Roots: rootCids, - Version: 1, - }, w) -} - -// writeQuadrants reorders the shares to quadrant order and writes them to the CARv1 file. -func writeQuadrants(eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { - hasher := nmt.NewNmtHasher(sha256.New(), share.NamespaceSize, ipld.NMTIgnoreMaxNamespace) - shares := quadrantOrder(eds) - for _, share := range shares { - leaf, err := hasher.HashLeaf(share) - if err != nil { - return fmt.Errorf("hashing share: %w", err) - } - cid, err := ipld.CidFromNamespacedSha256(leaf) - if err != nil { - return fmt.Errorf("getting cid from share: %w", err) - } - err = util.LdWrite(w, cid.Bytes(), share) - if err != nil { - return fmt.Errorf("writing share to file: %w", err) - } - } - return nil -} - -// writeProofs iterates over the in-memory blockstore's keys and writes all inner nodes to the -// CARv1 file. -func writeProofs(ctx context.Context, eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { - // check if proofs are collected by ipld.ProofsAdder in previous reconstructions of eds - proofs, err := getProofs(ctx, eds) - if err != nil { - return fmt.Errorf("recomputing proofs: %w", err) - } - - for id, proof := range proofs { - err := util.LdWrite(w, id.Bytes(), proof) - if err != nil { - return fmt.Errorf("writing proof to the car: %w", err) - } - } - return nil -} - -func getProofs(ctx context.Context, eds *rsmt2d.ExtendedDataSquare) (map[cid.Cid][]byte, error) { - // check if there are proofs collected by ipld.ProofsAdder in previous reconstruction of eds - if adder := ipld.ProofsAdderFromCtx(ctx); adder != nil { - defer adder.Purge() - return adder.Proofs(), nil - } - - // recompute proofs from eds - shares := eds.Flattened() - shareCount := len(shares) - if shareCount == 0 { - return nil, ErrEmptySquare - } - odsWidth := int(math.Sqrt(float64(shareCount)) / 2) - - // this adder ignores leaves, so that they are not added to the store we iterate through in - // writeProofs - adder := ipld.NewProofsAdder(odsWidth*2, false) - defer adder.Purge() - - eds, err := rsmt2d.ImportExtendedDataSquare( - shares, - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(odsWidth), - nmt.NodeVisitor(adder.VisitFn())), - ) - if err != nil { - return nil, fmt.Errorf("recomputing data square: %w", err) - } - // compute roots - if _, err = eds.RowRoots(); err != nil { - return nil, fmt.Errorf("computing row roots: %w", err) - } - - return adder.Proofs(), nil -} - -// quadrantOrder reorders the shares in the EDS to quadrant row-by-row order, prepending the -// respective namespace to the shares. -// e.g. [ Q1 R1 | Q1 R2 | Q1 R3 | Q1 R4 | Q2 R1 | Q2 R2 .... ] -func quadrantOrder(eds *rsmt2d.ExtendedDataSquare) [][]byte { - size := eds.Width() * eds.Width() - shares := make([][]byte, size) - - quadrantWidth := int(eds.Width() / 2) - quadrantSize := quadrantWidth * quadrantWidth - for i := 0; i < quadrantWidth; i++ { - for j := 0; j < quadrantWidth; j++ { - cells := getQuadrantCells(eds, uint(i), uint(j)) - innerOffset := i*quadrantWidth + j - for quadrant := 0; quadrant < 4; quadrant++ { - shares[(quadrant*quadrantSize)+innerOffset] = prependNamespace(quadrant, cells[quadrant]) - } - } - } - return shares -} - -// getQuadrantCells returns the cell of each EDS quadrant with the passed inner-quadrant coordinates -func getQuadrantCells(eds *rsmt2d.ExtendedDataSquare, i, j uint) [][]byte { - cells := make([][]byte, 4) - quadrantWidth := eds.Width() / 2 - cells[0] = eds.GetCell(i, j) - cells[1] = eds.GetCell(i, j+quadrantWidth) - cells[2] = eds.GetCell(i+quadrantWidth, j) - cells[3] = eds.GetCell(i+quadrantWidth, j+quadrantWidth) - return cells -} - -// prependNamespace adds the namespace to the passed share if in the first quadrant, -// otherwise it adds the ParitySharesNamespace to the beginning. -func prependNamespace(quadrant int, shr share.Share) []byte { - namespacedShare := make([]byte, 0, share.NamespaceSize+share.Size) - switch quadrant { - case 0: - return append(append(namespacedShare, share.GetNamespace(shr)...), shr...) - case 1, 2, 3: - return append(append(namespacedShare, share.ParitySharesNamespace...), shr...) - default: - panic("invalid quadrant") - } -} - -// rootsToCids converts the EDS's Row and Column roots to CIDs. -func rootsToCids(eds *rsmt2d.ExtendedDataSquare) ([]cid.Cid, error) { - rowRoots, err := eds.RowRoots() - if err != nil { - return nil, err - } - colRoots, err := eds.ColRoots() - if err != nil { - return nil, err - } - - roots := make([][]byte, 0, len(rowRoots)+len(colRoots)) - roots = append(roots, rowRoots...) - roots = append(roots, colRoots...) - rootCids := make([]cid.Cid, len(roots)) - for i, r := range roots { - rootCids[i], err = ipld.CidFromNamespacedSha256(r) - if err != nil { - return nil, fmt.Errorf("getting cid from root: %w", err) - } - } - return rootCids, nil -} - -// ReadEDS reads the first EDS quadrant (1/4) from an io.Reader CAR file. -// Only the first quadrant will be read, which represents the original data. -// The returned EDS is guaranteed to be full and valid against the DataRoot, otherwise ReadEDS -// errors. -func ReadEDS(ctx context.Context, r io.Reader, root share.DataHash) (eds *rsmt2d.ExtendedDataSquare, err error) { - _, span := tracer.Start(ctx, "read-eds") - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - carReader, err := car.NewCarReader(r) - if err != nil { - return nil, fmt.Errorf("share: reading car file: %w", err) - } - - // car header includes both row and col roots in header - odsWidth := len(carReader.Header.Roots) / 4 - odsSquareSize := odsWidth * odsWidth - shares := make([][]byte, odsSquareSize) - // the first quadrant is stored directly after the header, - // so we can just read the first odsSquareSize blocks - for i := 0; i < odsSquareSize; i++ { - block, err := carReader.Next() - if err != nil { - return nil, fmt.Errorf("share: reading next car entry: %w", err) - } - // the stored first quadrant shares are wrapped with the namespace twice. - // we cut it off here, because it is added again while importing to the tree below - shares[i] = share.GetData(block.RawData()) - } - - // use proofs adder if provided, to cache collected proofs while recomputing the eds - var opts []nmt.Option - visitor := ipld.ProofsAdderFromCtx(ctx).VisitFn() - if visitor != nil { - opts = append(opts, nmt.NodeVisitor(visitor)) - } - - eds, err = rsmt2d.ComputeExtendedDataSquare( - shares, - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(odsWidth), opts...), - ) - if err != nil { - return nil, fmt.Errorf("share: computing eds: %w", err) - } - - newDah, err := share.NewRoot(eds) - if err != nil { - return nil, err - } - if !bytes.Equal(newDah.Hash(), root) { - return nil, fmt.Errorf( - "share: content integrity mismatch: imported root %s doesn't match expected root %s", - newDah.Hash(), - root, - ) - } - return eds, nil -} diff --git a/share/eds/eds_test.go b/share/eds/eds_test.go deleted file mode 100644 index ffb05343b9..0000000000 --- a/share/eds/eds_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package eds - -import ( - "bytes" - "context" - "embed" - "encoding/json" - "fmt" - "os" - "testing" - - bstore "github.com/ipfs/boxo/blockstore" - ds "github.com/ipfs/go-datastore" - carv1 "github.com/ipld/go-car" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/libs/rand" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/edstest" -) - -//go:embed "testdata/example-root.json" -var exampleRoot string - -//go:embed "testdata/example.car" -var f embed.FS - -func TestQuadrantOrder(t *testing.T) { - testCases := []struct { - name string - squareSize int - }{ - {"smol", 2}, - {"still smol", 8}, - {"default mainnet", appconsts.DefaultGovMaxSquareSize}, - {"max", share.MaxSquareSize}, - } - - testShareSize := 64 - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - shares := make([][]byte, tc.squareSize*tc.squareSize) - - for i := 0; i < tc.squareSize*tc.squareSize; i++ { - shares[i] = rand.Bytes(testShareSize) - } - - eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), rsmt2d.NewDefaultTree) - require.NoError(t, err) - - res := quadrantOrder(eds) - for _, s := range res { - require.Len(t, s, testShareSize+share.NamespaceSize) - } - - for q := 0; q < 4; q++ { - for i := 0; i < tc.squareSize; i++ { - for j := 0; j < tc.squareSize; j++ { - resIndex := q*tc.squareSize*tc.squareSize + i*tc.squareSize + j - edsRow := q/2*tc.squareSize + i - edsCol := (q%2)*tc.squareSize + j - - assert.Equal(t, res[resIndex], prependNamespace(q, eds.Row(uint(edsRow))[edsCol])) - } - } - } - }) - } -} - -func TestWriteEDS(t *testing.T) { - writeRandomEDS(t) -} - -func TestWriteEDSHeaderRoots(t *testing.T) { - eds := writeRandomEDS(t) - f := openWrittenEDS(t) - defer f.Close() - - reader, err := carv1.NewCarReader(f) - require.NoError(t, err, "error creating car reader") - roots, err := rootsToCids(eds) - require.NoError(t, err, "error converting roots to cids") - require.Equal(t, roots, reader.Header.Roots) -} - -func TestWriteEDSStartsWithLeaves(t *testing.T) { - eds := writeRandomEDS(t) - f := openWrittenEDS(t) - defer f.Close() - - reader, err := carv1.NewCarReader(f) - require.NoError(t, err, "error creating car reader") - block, err := reader.Next() - require.NoError(t, err, "error getting first block") - - require.Equal(t, share.GetData(block.RawData()), eds.GetCell(0, 0)) -} - -func TestWriteEDSIncludesRoots(t *testing.T) { - writeRandomEDS(t) - f := openWrittenEDS(t) - defer f.Close() - - bs := bstore.NewBlockstore(ds.NewMapDatastore()) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - loaded, err := carv1.LoadCar(ctx, bs, f) - require.NoError(t, err, "error loading car file") - for _, root := range loaded.Roots { - ok, err := bs.Has(context.Background(), root) - require.NoError(t, err, "error checking if blockstore has root") - require.True(t, ok, "blockstore does not have root") - } -} - -func TestWriteEDSInQuadrantOrder(t *testing.T) { - eds := writeRandomEDS(t) - f := openWrittenEDS(t) - defer f.Close() - - reader, err := carv1.NewCarReader(f) - require.NoError(t, err, "error creating car reader") - - shares := quadrantOrder(eds) - for i := 0; i < len(shares); i++ { - block, err := reader.Next() - require.NoError(t, err, "error getting block") - require.Equal(t, block.RawData(), shares[i]) - } -} - -func TestReadWriteRoundtrip(t *testing.T) { - eds := writeRandomEDS(t) - dah, err := share.NewRoot(eds) - require.NoError(t, err) - f := openWrittenEDS(t) - defer f.Close() - - loaded, err := ReadEDS(context.Background(), f, dah.Hash()) - require.NoError(t, err, "error reading EDS from file") - - rowRoots, err := eds.RowRoots() - require.NoError(t, err) - loadedRowRoots, err := loaded.RowRoots() - require.NoError(t, err) - require.Equal(t, rowRoots, loadedRowRoots) - - colRoots, err := eds.ColRoots() - require.NoError(t, err) - loadedColRoots, err := loaded.ColRoots() - require.NoError(t, err) - require.Equal(t, colRoots, loadedColRoots) -} - -func TestReadEDS(t *testing.T) { - f, err := f.Open("testdata/example.car") - require.NoError(t, err, "error opening file") - - var dah da.DataAvailabilityHeader - err = json.Unmarshal([]byte(exampleRoot), &dah) - require.NoError(t, err, "error unmarshaling example root") - - loaded, err := ReadEDS(context.Background(), f, dah.Hash()) - require.NoError(t, err, "error reading EDS from file") - rowRoots, err := loaded.RowRoots() - require.NoError(t, err) - require.Equal(t, dah.RowRoots, rowRoots) - colRoots, err := loaded.ColRoots() - require.NoError(t, err) - require.Equal(t, dah.ColumnRoots, colRoots) -} - -func TestReadEDSContentIntegrityMismatch(t *testing.T) { - writeRandomEDS(t) - dah, err := da.NewDataAvailabilityHeader(edstest.RandEDS(t, 4)) - require.NoError(t, err) - f := openWrittenEDS(t) - defer f.Close() - - _, err = ReadEDS(context.Background(), f, dah.Hash()) - require.ErrorContains(t, err, "share: content integrity mismatch: imported root") -} - -// BenchmarkReadWriteEDS benchmarks the time it takes to write and read an EDS from disk. The -// benchmark is run with a 4x4 ODS to a 64x64 ODS - a higher value can be used, but it will run for -// much longer. -func BenchmarkReadWriteEDS(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) - b.Cleanup(cancel) - for originalDataWidth := 4; originalDataWidth <= 64; originalDataWidth *= 2 { - eds := edstest.RandEDS(b, originalDataWidth) - dah, err := share.NewRoot(eds) - require.NoError(b, err) - b.Run(fmt.Sprintf("Writing %dx%d", originalDataWidth, originalDataWidth), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - f := new(bytes.Buffer) - err := WriteEDS(ctx, eds, f) - require.NoError(b, err) - } - }) - b.Run(fmt.Sprintf("Reading %dx%d", originalDataWidth, originalDataWidth), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - b.StopTimer() - f := new(bytes.Buffer) - _ = WriteEDS(ctx, eds, f) - b.StartTimer() - _, err := ReadEDS(ctx, f, dah.Hash()) - require.NoError(b, err) - } - }) - } -} - -func writeRandomEDS(t *testing.T) *rsmt2d.ExtendedDataSquare { - t.Helper() - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - tmpDir := t.TempDir() - err := os.Chdir(tmpDir) - require.NoError(t, err, "error changing to the temporary test directory") - f, err := os.OpenFile("test.car", os.O_WRONLY|os.O_CREATE, 0600) - require.NoError(t, err, "error opening file") - - eds := edstest.RandEDS(t, 4) - err = WriteEDS(ctx, eds, f) - require.NoError(t, err, "error writing EDS to file") - f.Close() - return eds -} - -func openWrittenEDS(t *testing.T) *os.File { - t.Helper() - f, err := os.OpenFile("test.car", os.O_RDONLY, 0600) - require.NoError(t, err, "error opening file") - return f -} - -/* -use this function as needed to create new test data. - -example: - - func Test_CreateData(t *testing.T) { - createTestData(t, "celestia-node/share/eds/testdata") - } -*/ -func createTestData(t *testing.T, testDir string) { //nolint:unused - t.Helper() - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - err := os.Chdir(testDir) - require.NoError(t, err, "changing to the directory") - os.RemoveAll("example.car") - require.NoError(t, err, "removing old file") - f, err := os.OpenFile("example.car", os.O_WRONLY|os.O_CREATE, 0600) - require.NoError(t, err, "opening file") - - eds := edstest.RandEDS(t, 4) - err = WriteEDS(ctx, eds, f) - require.NoError(t, err, "writing EDS to file") - f.Close() - dah, err := share.NewRoot(eds) - require.NoError(t, err) - - header, err := json.MarshalIndent(dah, "", "") - require.NoError(t, err, "marshaling example root") - os.RemoveAll("example-root.json") - require.NoError(t, err, "removing old file") - f, err = os.OpenFile("example-root.json", os.O_WRONLY|os.O_CREATE, 0600) - require.NoError(t, err, "opening file") - _, err = f.Write(header) - require.NoError(t, err, "writing example root to file") - f.Close() -} diff --git a/share/eds/file.go b/share/eds/file.go deleted file mode 100644 index b38b0c6e05..0000000000 --- a/share/eds/file.go +++ /dev/null @@ -1,284 +0,0 @@ -package eds - -import ( - "context" - "fmt" - "io" - "math/rand" - "os" - - "golang.org/x/exp/mmap" - - "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -// TODO: remove -//type File interface { -// io.Closer -// Size() int -// ShareWithProof(xisIdx, shrIdx int) (share.Share, nmt.Proof, rsmt2d.Axis, error) -// Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) -// AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) -// Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) -// EDS() (*rsmt2d.ExtendedDataSquare, error) -//} - -type FileConfig struct { - Version FileVersion - Compression FileCompression - Mode FileMode - - // extensions map[string]string - // TODO: Add codec -} - -// LazyFile -// * immutable -// * versionable -// TODO: -// - Cache Rows and Cols -// - Avoid storing constant shares, like padding -type LazyFile struct { - path string - hdr *Header - fl fileBackend -} - -type fileBackend interface { - io.ReaderAt - io.Closer -} - -func OpenFile(path string) (*LazyFile, error) { - f, err := mmap.Open(path) - if err != nil { - return nil, err - } - - h, err := ReadHeaderAt(f, 0) - if err != nil { - return nil, err - } - - // TODO(WWondertan): Validate header - return &LazyFile{ - path: path, - hdr: h, - fl: f, - }, nil -} - -func CreateFile(path string, eds *rsmt2d.ExtendedDataSquare, cfgs ...FileConfig) (*LazyFile, error) { - f, err := os.Create(path) - if err != nil { - return nil, err - } - - cfg := FileConfig{} - if cfgs != nil { - cfg = cfgs[0] - } - - h := &Header{ - shareSize: uint16(len(eds.GetCell(0, 0))), // TODO: rsmt2d should expose this field - squareSize: uint32(eds.Width()), - cfg: cfg, - } - - if _, err = h.WriteTo(f); err != nil { - return nil, err - } - - width := eds.Width() - if cfg.Mode == ODSMode { - width /= 2 - } - for i := uint(0); i < width; i++ { - for j := uint(0); j < width; j++ { - // TODO: Buffer and write as single? - shr := eds.GetCell(i, j) - if _, err := f.Write(shr); err != nil { - return nil, err - } - } - } - - return &LazyFile{ - path: path, - fl: f, - hdr: h, - }, f.Sync() -} - -func (f *LazyFile) Size() int { - return f.hdr.SquareSize() -} - -func (f *LazyFile) Close() error { - return f.fl.Close() -} - -func (f *LazyFile) Header() *Header { - return f.hdr -} - -func (f *LazyFile) Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { - shrLn := int(f.hdr.shareSize) - sqrLn := int(f.hdr.squareSize) - if f.Header().Config().Mode == ODSMode { - sqrLn /= 2 - } - - shrs := make([]share.Share, sqrLn) - switch axisType { - case rsmt2d.Col: - for i := 0; i < sqrLn; i++ { - pos := axisIdx + i*sqrLn - offset := pos*shrLn + HeaderSize - - shr := make(share.Share, shrLn) - if _, err := f.fl.ReadAt(shr, int64(offset)); err != nil { - return nil, err - } - shrs[i] = shr - } - case rsmt2d.Row: - pos := axisIdx * sqrLn - offset := pos*shrLn + HeaderSize - - axsData := make([]byte, sqrLn*shrLn) - if _, err := f.fl.ReadAt(axsData, int64(offset)); err != nil { - return nil, err - } - - for i := range shrs { - shrs[i] = axsData[i*shrLn : (i+1)*shrLn] - } - default: - return nil, fmt.Errorf("unknown axis") - } - - if f.Header().Config().Mode == ODSMode { - parity, err := share.DefaultRSMT2DCodec().Decode(shrs) - if err != nil { - return nil, err - } - - return append(shrs, parity...), nil - } - return shrs, nil -} - -func (f *LazyFile) AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { - // TODO(@Wondertan): this has to read directly from the file, avoiding recompute - fullAxis, err := f.Axis(axisType, axisIdx) - if err != nil { - return nil, err - } - - return fullAxis[:len(fullAxis)/2], nil -} - -func (f *LazyFile) ShareWithProof(axisIdx, shrIdx int) (share.Share, nmt.Proof, rsmt2d.Axis, error) { - // TODO: Cache the axis as well as computed tree - axisType := rsmt2d.Row - if rand.Int()/2 == 0 { - axisType = rsmt2d.Col - } - sqrLn := int(f.hdr.squareSize) - shrs, err := f.Axis(axisType, axisIdx) - if err != nil { - return nil, nmt.Proof{}, axisType, err - } - - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axisIdx)) - for _, shr := range shrs { - err = tree.Push(shr) - if err != nil { - return nil, nmt.Proof{}, axisType, err - } - } - - proof, err := tree.ProveRange(shrIdx, shrIdx+1) - if err != nil { - return nil, nmt.Proof{}, axisType, err - } - - return shrs[shrIdx], proof, axisType, nil -} - -func (f *LazyFile) Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) { - shrs, err := f.Axis(rsmt2d.Row, axisIdx) - if err != nil { - return nil, nmt.Proof{}, err - } - - return NDFromShares(shrs, namespace, axisIdx) -} - -func (f *LazyFile) EDS() (*rsmt2d.ExtendedDataSquare, error) { - shrLn := int(f.hdr.shareSize) - sqrLn := int(f.hdr.squareSize) - if f.Header().Config().Mode == ODSMode { - sqrLn /= 2 - } - - buf := make([]byte, sqrLn*sqrLn*shrLn) - if _, err := f.fl.ReadAt(buf, HeaderSize); err != nil { - return nil, err - } - - shrs := make([][]byte, sqrLn*sqrLn) - for i := 0; i < sqrLn; i++ { - for j := 0; j < sqrLn; j++ { - pos := i*sqrLn + j - shrs[pos] = buf[pos*shrLn : (pos+1)*shrLn] - } - } - - codec := share.DefaultRSMT2DCodec() - treeFn := wrapper.NewConstructor(uint64(f.hdr.squareSize / 2)) - - switch f.Header().Config().Mode { - case EDSMode: - return rsmt2d.ImportExtendedDataSquare(shrs, codec, treeFn) - case ODSMode: - return rsmt2d.ComputeExtendedDataSquare(shrs, codec, treeFn) - default: - return nil, fmt.Errorf("invalid mode type") // TODO(@Wondertan): Do fields validation right after read - } -} - -func NDFromShares(shrs []share.Share, namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) { - bserv := ipld.NewMemBlockservice() - batchAdder := ipld.NewNmtNodeAdder(context.TODO(), bserv, ipld.MaxSizeBatchOption(len(shrs))) - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(shrs)/2), uint(axisIdx), - nmt.NodeVisitor(batchAdder.Visit)) - for _, shr := range shrs { - err := tree.Push(shr) - if err != nil { - return nil, nmt.Proof{}, err - } - } - - root, err := tree.Root() - if err != nil { - return nil, nmt.Proof{}, err - } - - err = batchAdder.Commit() - if err != nil { - return nil, nmt.Proof{}, err - } - - row, proof, err := ipld.GetSharesByNamespace(context.TODO(), bserv, root, namespace, len(shrs)) - if err != nil { - return nil, nmt.Proof{}, err - } - return row, *proof, nil -} diff --git a/share/eds/file_features.go b/share/eds/file_features.go deleted file mode 100644 index 1374938548..0000000000 --- a/share/eds/file_features.go +++ /dev/null @@ -1,20 +0,0 @@ -package eds - -type FileMode uint8 - -const ( - EDSMode FileMode = iota - ODSMode -) - -type FileVersion uint8 - -const ( - FileV0 FileVersion = iota -) - -type FileCompression uint8 - -const ( - NoCompression FileCompression = iota -) diff --git a/share/eds/file_header.go b/share/eds/file_header.go deleted file mode 100644 index 2b6c767e07..0000000000 --- a/share/eds/file_header.go +++ /dev/null @@ -1,74 +0,0 @@ -package eds - -import ( - "encoding/binary" - "io" -) - -const HeaderSize = 32 - -type Header struct { - // User set features - cfg FileConfig - - // Taken directly from EDS - shareSize uint16 - squareSize uint32 -} - -func (h *Header) Config() FileConfig { - return h.cfg -} - -func (h *Header) ShareSize() int { - return int(h.shareSize) -} - -func (h *Header) SquareSize() int { - return int(h.squareSize) -} - -func (h *Header) WriteTo(w io.Writer) (int64, error) { - buf := make([]byte, HeaderSize) - buf[0] = byte(h.Config().Version) - buf[1] = byte(h.Config().Compression) - buf[2] = byte(h.Config().Mode) - binary.LittleEndian.PutUint16(buf[2:4], h.shareSize) - binary.LittleEndian.PutUint32(buf[4:12], h.squareSize) - // TODO: Extensions - n, err := w.Write(buf) - return int64(n), err -} - -func (h *Header) ReadFrom(r io.Reader) (int64, error) { - buf := make([]byte, HeaderSize) - n, err := io.ReadFull(r, buf) - if err != nil { - return int64(n), err - } - - h.cfg.Version = FileVersion(buf[0]) - h.cfg.Compression = FileCompression(buf[1]) - h.cfg.Mode = FileMode(buf[2]) - h.shareSize = binary.LittleEndian.Uint16(buf[2:4]) - h.squareSize = binary.LittleEndian.Uint32(buf[4:12]) - - // TODO: Extensions - return int64(n), err -} - -func ReadHeaderAt(r io.ReaderAt, offset int64) (*Header, error) { - h := &Header{} - buf := make([]byte, HeaderSize) - _, err := r.ReadAt(buf, offset) - if err != nil { - return h, err - } - - h.cfg.Version = FileVersion(buf[0]) - h.cfg.Compression = FileCompression(buf[1]) - h.cfg.Mode = FileMode(buf[2]) - h.shareSize = binary.LittleEndian.Uint16(buf[2:4]) - h.squareSize = binary.LittleEndian.Uint32(buf[4:12]) - return h, nil -} diff --git a/share/eds/file_store.go b/share/eds/file_store.go deleted file mode 100644 index d580bdc43d..0000000000 --- a/share/eds/file_store.go +++ /dev/null @@ -1,12 +0,0 @@ -package eds - -import "github.com/celestiaorg/celestia-node/share" - -type FileStore struct { - baspath string -} - -func (fs *FileStore) File(hash share.DataHash) (File, error) { - // TODO(@Wondertan): Caching - return OpenFile(fs.baspath + "/" + hash.String()) -} diff --git a/share/eds/file_test.go b/share/eds/file_test.go deleted file mode 100644 index 73a3f33855..0000000000 --- a/share/eds/file_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package eds - -import ( - "crypto/sha256" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/edstest" -) - -func TestCreateFile(t *testing.T) { - path := t.TempDir() + "/testfile" - edsIn := edstest.RandEDS(t, 8) - - for _, mode := range []FileMode{EDSMode, ODSMode} { - f, err := CreateFile(path, edsIn, FileConfig{Mode: mode}) - require.NoError(t, err) - edsOut, err := f.EDS() - require.NoError(t, err) - assert.True(t, edsIn.Equals(edsOut)) - } -} - -func TestFile(t *testing.T) { - path := t.TempDir() + "/testfile" - eds := edstest.RandEDS(t, 8) - root, err := share.NewRoot(eds) - require.NoError(t, err) - - // TODO(@Wondartan): Test in multiple modes - fl, err := CreateFile(path, eds) - require.NoError(t, err) - err = fl.Close() - require.NoError(t, err) - - fl, err = OpenFile(path) - require.NoError(t, err) - - axisTypes := []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} - for _, axisType := range axisTypes { - for i := 0; i < int(eds.Width()); i++ { - row, err := fl.Axis(axisType, i) - require.NoError(t, err) - assert.EqualValues(t, getAxis(axisType, i, eds), row) - } - } - - width := int(eds.Width()) - for _, axisType := range axisTypes { - for i := 0; i < width*width; i++ { - axisIdx, shrIdx := i/width, i%width - if axisType == rsmt2d.Col { - axisIdx, shrIdx = shrIdx, axisIdx - } - - shr, prf, _, err := fl.ShareWithProof(axisIdx, shrIdx) - require.NoError(t, err) - - namespace := share.ParitySharesNamespace - if axisIdx < width/2 && shrIdx < width/2 { - namespace = share.GetNamespace(shr) - } - - axishash := root.RowRoots[axisIdx] - if axisType == rsmt2d.Col { - axishash = root.ColumnRoots[axisIdx] - } - - ok := prf.VerifyInclusion(sha256.New(), namespace.ToNMT(), [][]byte{shr}, axishash) - assert.True(t, ok) - } - } - - out, err := fl.EDS() - require.NoError(t, err) - assert.True(t, eds.Equals(out)) - - err = fl.Close() - require.NoError(t, err) -} diff --git a/share/eds/inverted_index.go b/share/eds/inverted_index.go deleted file mode 100644 index dc33c70447..0000000000 --- a/share/eds/inverted_index.go +++ /dev/null @@ -1,103 +0,0 @@ -package eds - -import ( - "context" - "errors" - "fmt" - "runtime" - - "github.com/dgraph-io/badger/v4/options" - "github.com/filecoin-project/dagstore/index" - "github.com/filecoin-project/dagstore/shard" - ds "github.com/ipfs/go-datastore" - "github.com/multiformats/go-multihash" - - dsbadger "github.com/celestiaorg/go-ds-badger4" -) - -const invertedIndexPath = "/inverted_index/" - -// ErrNotFoundInIndex is returned instead of ErrNotFound if the multihash doesn't exist in the index -var ErrNotFoundInIndex = fmt.Errorf("does not exist in index") - -// simpleInvertedIndex is an inverted index that only stores a single shard key per multihash. Its -// implementation is modified from the default upstream implementation in dagstore/index. -type simpleInvertedIndex struct { - ds ds.Batching -} - -// newSimpleInvertedIndex returns a new inverted index that only stores a single shard key per -// multihash. This is because we use badger as a storage backend, so updates are expensive, and we -// don't care which shard is used to serve a cid. -func newSimpleInvertedIndex(storePath string) (*simpleInvertedIndex, error) { - opts := dsbadger.DefaultOptions // this should be copied - // turn off value log GC as we don't use value log - opts.GcInterval = 0 - // use minimum amount of NumLevelZeroTables to trigger L0 compaction faster - opts.NumLevelZeroTables = 1 - // MaxLevels = 8 will allow the db to grow to ~11.1 TiB - opts.MaxLevels = 8 - // inverted index stores unique hash keys, so we don't need to detect conflicts - opts.DetectConflicts = false - // we don't need compression for inverted index as it just hashes - opts.Compression = options.None - compactors := runtime.NumCPU() - if compactors < 2 { - compactors = 2 - } - if compactors > opts.MaxLevels { // ensure there is no more compactors than db table levels - compactors = opts.MaxLevels - } - opts.NumCompactors = compactors - - ds, err := dsbadger.NewDatastore(storePath+invertedIndexPath, &opts) - if err != nil { - return nil, fmt.Errorf("can't open Badger Datastore: %w", err) - } - - return &simpleInvertedIndex{ds: ds}, nil -} - -func (s *simpleInvertedIndex) AddMultihashesForShard( - ctx context.Context, - mhIter index.MultihashIterator, - sk shard.Key, -) error { - // in the original implementation, a mutex is used here to prevent unnecessary updates to the - // key. The amount of extra data produced by this is negligible, and the performance benefits - // from removing the lock are significant (indexing is a hot path during sync). - batch, err := s.ds.Batch(ctx) - if err != nil { - return fmt.Errorf("failed to create ds batch: %w", err) - } - - err = mhIter.ForEach(func(mh multihash.Multihash) error { - key := ds.NewKey(string(mh)) - if err := batch.Put(ctx, key, []byte(sk.String())); err != nil { - return fmt.Errorf("failed to put mh=%s, err=%w", mh, err) - } - return nil - }) - if err != nil { - return fmt.Errorf("failed to add index entry: %w", err) - } - - if err := batch.Commit(ctx); err != nil { - return fmt.Errorf("failed to commit batch: %w", err) - } - return nil -} - -func (s *simpleInvertedIndex) GetShardsForMultihash(ctx context.Context, mh multihash.Multihash) ([]shard.Key, error) { - key := ds.NewKey(string(mh)) - sbz, err := s.ds.Get(ctx, key) - if err != nil { - return nil, errors.Join(ErrNotFoundInIndex, err) - } - - return []shard.Key{shard.KeyFromString(string(sbz))}, nil -} - -func (s *simpleInvertedIndex) close() error { - return s.ds.Close() -} diff --git a/share/eds/inverted_index_test.go b/share/eds/inverted_index_test.go deleted file mode 100644 index e83c2be267..0000000000 --- a/share/eds/inverted_index_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package eds - -import ( - "context" - "testing" - - "github.com/filecoin-project/dagstore/shard" - "github.com/multiformats/go-multihash" - "github.com/stretchr/testify/require" -) - -type mockIterator struct { - mhs []multihash.Multihash -} - -func (m *mockIterator) ForEach(f func(mh multihash.Multihash) error) error { - for _, mh := range m.mhs { - if err := f(mh); err != nil { - return err - } - } - return nil -} - -// TestMultihashesForShard ensures that the inverted index correctly stores a single shard key per -// duplicate multihash -func TestMultihashesForShard(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - mhs := []multihash.Multihash{ - multihash.Multihash("mh1"), - multihash.Multihash("mh2"), - multihash.Multihash("mh3"), - } - - mi := &mockIterator{mhs: mhs} - path := t.TempDir() - invertedIndex, err := newSimpleInvertedIndex(path) - require.NoError(t, err) - - // 1. Add all 3 multihashes to shard1 - err = invertedIndex.AddMultihashesForShard(ctx, mi, shard.KeyFromString("shard1")) - require.NoError(t, err) - shardKeys, err := invertedIndex.GetShardsForMultihash(ctx, mhs[0]) - require.NoError(t, err) - require.Equal(t, []shard.Key{shard.KeyFromString("shard1")}, shardKeys) - - // 2. Add mh1 to shard2, and ensure that mh1 no longer points to shard1 - err = invertedIndex.AddMultihashesForShard(ctx, &mockIterator{mhs: mhs[:1]}, shard.KeyFromString("shard2")) - require.NoError(t, err) - shardKeys, err = invertedIndex.GetShardsForMultihash(ctx, mhs[0]) - require.NoError(t, err) - require.Equal(t, []shard.Key{shard.KeyFromString("shard2")}, shardKeys) -} diff --git a/share/eds/metrics.go b/share/eds/metrics.go deleted file mode 100644 index cbebf8321a..0000000000 --- a/share/eds/metrics.go +++ /dev/null @@ -1,292 +0,0 @@ -package eds - -import ( - "context" - "time" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" -) - -const ( - failedKey = "failed" - sizeKey = "eds_size" - - putResultKey = "result" - putOK putResult = "ok" - putExists putResult = "exists" - putFailed putResult = "failed" - - opNameKey = "op" - longOpResultKey = "result" - longOpUnresolved longOpResult = "unresolved" - longOpOK longOpResult = "ok" - longOpFailed longOpResult = "failed" - - dagstoreShardStatusKey = "shard_status" -) - -var ( - meter = otel.Meter("eds_store") -) - -type putResult string - -type longOpResult string - -type metrics struct { - putTime metric.Float64Histogram - getCARTime metric.Float64Histogram - getCARBlockstoreTime metric.Float64Histogram - getDAHTime metric.Float64Histogram - removeTime metric.Float64Histogram - getTime metric.Float64Histogram - hasTime metric.Float64Histogram - listTime metric.Float64Histogram - - shardFailureCount metric.Int64Counter - - longOpTime metric.Float64Histogram - gcTime metric.Float64Histogram -} - -func (s *Store) WithMetrics() error { - putTime, err := meter.Float64Histogram("eds_store_put_time_histogram", - metric.WithDescription("eds store put time histogram(s)")) - if err != nil { - return err - } - - getCARTime, err := meter.Float64Histogram("eds_store_get_car_time_histogram", - metric.WithDescription("eds store get car time histogram(s)")) - if err != nil { - return err - } - - getCARBlockstoreTime, err := meter.Float64Histogram("eds_store_get_car_blockstore_time_histogram", - metric.WithDescription("eds store get car blockstore time histogram(s)")) - if err != nil { - return err - } - - getDAHTime, err := meter.Float64Histogram("eds_store_get_dah_time_histogram", - metric.WithDescription("eds store get dah time histogram(s)")) - if err != nil { - return err - } - - removeTime, err := meter.Float64Histogram("eds_store_remove_time_histogram", - metric.WithDescription("eds store remove time histogram(s)")) - if err != nil { - return err - } - - getTime, err := meter.Float64Histogram("eds_store_get_time_histogram", - metric.WithDescription("eds store get time histogram(s)")) - if err != nil { - return err - } - - hasTime, err := meter.Float64Histogram("eds_store_has_time_histogram", - metric.WithDescription("eds store has time histogram(s)")) - if err != nil { - return err - } - - listTime, err := meter.Float64Histogram("eds_store_list_time_histogram", - metric.WithDescription("eds store list time histogram(s)")) - if err != nil { - return err - } - - shardFailureCount, err := meter.Int64Counter("eds_store_shard_failure_counter", - metric.WithDescription("eds store OpShardFail counter")) - if err != nil { - return err - } - - longOpTime, err := meter.Float64Histogram("eds_store_long_operation_time_histogram", - metric.WithDescription("eds store long operation time histogram(s)")) - if err != nil { - return err - } - - gcTime, err := meter.Float64Histogram("eds_store_gc_time", - metric.WithDescription("dagstore gc time histogram(s)")) - if err != nil { - return err - } - - dagStoreShards, err := meter.Int64ObservableGauge("eds_store_dagstore_shards", - metric.WithDescription("dagstore amount of shards by status")) - if err != nil { - return err - } - - if err = s.cache.Load().EnableMetrics(); err != nil { - return err - } - - callback := func(ctx context.Context, observer metric.Observer) error { - stats := s.dgstr.Stats() - for status, amount := range stats { - observer.ObserveInt64(dagStoreShards, int64(amount), - metric.WithAttributes( - attribute.String(dagstoreShardStatusKey, status.String()), - )) - } - return nil - } - - if _, err := meter.RegisterCallback(callback, dagStoreShards); err != nil { - return err - } - - s.metrics = &metrics{ - putTime: putTime, - getCARTime: getCARTime, - getCARBlockstoreTime: getCARBlockstoreTime, - getDAHTime: getDAHTime, - removeTime: removeTime, - getTime: getTime, - hasTime: hasTime, - listTime: listTime, - shardFailureCount: shardFailureCount, - longOpTime: longOpTime, - gcTime: gcTime, - } - return nil -} - -func (m *metrics) observeGCtime(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - m.gcTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeShardFailure(ctx context.Context, shardKey string) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - - m.shardFailureCount.Add(ctx, 1, metric.WithAttributes(attribute.String("shard_key", shardKey))) -} - -func (m *metrics) observePut(ctx context.Context, dur time.Duration, result putResult, size uint) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - - m.putTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.String(putResultKey, string(result)), - attribute.Int(sizeKey, int(size)))) -} - -func (m *metrics) observeLongOp(ctx context.Context, opName string, dur time.Duration, result longOpResult) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - - m.longOpTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.String(opNameKey, opName), - attribute.String(longOpResultKey, string(result)))) -} - -func (m *metrics) observeGetCAR(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - - m.getCARTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeCARBlockstore(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - - m.getCARBlockstoreTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeGetDAH(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - - m.getDAHTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeRemove(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - - m.removeTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeGet(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - - m.getTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeHas(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - - m.hasTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeList(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - - m.listTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} diff --git a/share/eds/ods.go b/share/eds/ods.go deleted file mode 100644 index aa1219d41a..0000000000 --- a/share/eds/ods.go +++ /dev/null @@ -1,98 +0,0 @@ -package eds - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/ipld/go-car" - "github.com/ipld/go-car/util" -) - -// bufferedODSReader will read odsSquareSize amount of leaves from reader into the buffer. -// It exposes the buffer to be read by io.Reader interface implementation -type bufferedODSReader struct { - carReader *bufio.Reader - // current is the amount of CARv1 encoded leaves that have been read from reader. When current - // reaches odsSquareSize, bufferedODSReader will prevent further reads by returning io.EOF - current, odsSquareSize int - buf *bytes.Buffer -} - -// ODSReader reads CARv1 encoded data from io.ReadCloser and limits the reader to the CAR header -// and first quadrant (ODS) -func ODSReader(carReader io.Reader) (io.Reader, error) { - if carReader == nil { - return nil, errors.New("eds: can't create ODSReader over nil reader") - } - - odsR := &bufferedODSReader{ - carReader: bufio.NewReader(carReader), - buf: new(bytes.Buffer), - } - - // first LdRead reads the full CAR header to determine amount of shares in the ODS - data, err := util.LdRead(odsR.carReader) - if err != nil { - return nil, fmt.Errorf("reading header: %v", err) - } - - var header car.CarHeader - err = cbor.DecodeInto(data, &header) - if err != nil { - return nil, fmt.Errorf("invalid header: %w", err) - } - - // car header contains both row roots and col roots which is why - // we divide by 4 to get the ODSWidth - odsWidth := len(header.Roots) / 4 - odsR.odsSquareSize = odsWidth * odsWidth - - // NewCarReader will expect to read the header first, so write it first - return odsR, util.LdWrite(odsR.buf, data) -} - -func (r *bufferedODSReader) Read(p []byte) (n int, err error) { - // read leafs to the buffer until it has sufficient data to fill provided container or full ods is - // read - for r.current < r.odsSquareSize && r.buf.Len() < len(p) { - if err := r.readLeaf(); err != nil { - return 0, err - } - - r.current++ - } - - // read buffer to slice - return r.buf.Read(p) -} - -// readLeaf reads one leaf from reader into bufferedODSReader buffer -func (r *bufferedODSReader) readLeaf() error { - if _, err := r.carReader.Peek(1); err != nil { // no more blocks, likely clean io.EOF - return err - } - - l, err := binary.ReadUvarint(r.carReader) - if err != nil { - if err == io.EOF { - return io.ErrUnexpectedEOF // don't silently pretend this is a clean EOF - } - return err - } - - if l > uint64(util.MaxAllowedSectionSize) { // Don't OOM - return fmt.Errorf("malformed car; header `length`: %v is bigger than %v", l, util.MaxAllowedSectionSize) - } - - buf := make([]byte, 8) - n := binary.PutUvarint(buf, l) - r.buf.Write(buf[:n]) - - _, err = r.buf.ReadFrom(io.LimitReader(r.carReader, int64(l))) - return err -} diff --git a/share/eds/ods_file.go b/share/eds/ods_file.go deleted file mode 100644 index 22a4adf7dd..0000000000 --- a/share/eds/ods_file.go +++ /dev/null @@ -1,84 +0,0 @@ -package eds - -import ( - "math/rand" - - "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" -) - -type MemFile struct { - Eds *rsmt2d.ExtendedDataSquare -} - -func (f *MemFile) Close() error { - return nil -} - -func (f *MemFile) Size() int { - return int(f.Eds.Width()) -} - -func (f *MemFile) ShareWithProof(axisIdx, shrIdx int) (share.Share, nmt.Proof, rsmt2d.Axis, error) { - sqrLn := f.Size() - axisType := rsmt2d.Row - if rand.Int()/2 == 0 { - axisType = rsmt2d.Col - } - shrs, err := f.Axis(axisType, axisIdx) - if err != nil { - return nil, nmt.Proof{}, axisType, err - } - - // TODO(@Wondartan): this must access cached NMT on EDS instead of computing a new one - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(sqrLn/2), uint(axisIdx)) - for _, shr := range shrs { - err = tree.Push(shr) - if err != nil { - return nil, nmt.Proof{}, axisType, err - } - } - - proof, err := tree.ProveRange(shrIdx, shrIdx+1) - if err != nil { - return nil, nmt.Proof{}, axisType, err - } - - return shrs[shrIdx], proof, axisType, nil -} - -func (f *MemFile) Axis(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { - return getAxis(axisType, axisIdx, f.Eds), nil -} - -func (f *MemFile) AxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { - return getAxis(axisType, axisIdx, f.Eds)[:f.Size()/2], nil -} - -func (f *MemFile) Data(namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) { - shrs, err := f.Axis(rsmt2d.Row, axisIdx) - if err != nil { - return nil, nmt.Proof{}, err - } - - return NDFromShares(shrs, namespace, axisIdx) -} - -func (f *MemFile) EDS() (*rsmt2d.ExtendedDataSquare, error) { - return f.Eds, nil -} - -// TODO(@Wondertan): Should be a method on eds -func getAxis(axisType rsmt2d.Axis, axisIdx int, eds *rsmt2d.ExtendedDataSquare) [][]byte { - switch axisType { - case rsmt2d.Row: - return eds.Row(uint(axisIdx)) - case rsmt2d.Col: - return eds.Col(uint(axisIdx)) - default: - panic("unknown axis") - } -} diff --git a/share/eds/ods_test.go b/share/eds/ods_test.go deleted file mode 100644 index 0f7c69e708..0000000000 --- a/share/eds/ods_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package eds - -import ( - "context" - "io" - "testing" - - "github.com/ipld/go-car" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/share" -) - -// TestODSReader ensures that the reader returned from ODSReader is capable of reading the CAR -// header and ODS. -func TestODSReader(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // launch eds store - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - // generate random eds data and put it into the store - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // get CAR reader from store - r, err := edsStore.GetCAR(ctx, dah.Hash()) - assert.NoError(t, err) - defer func() { - require.NoError(t, r.Close()) - }() - - // create ODSReader wrapper based on car reader to limit reads to ODS only - odsR, err := ODSReader(r) - assert.NoError(t, err) - - // create CAR reader from ODSReader - carReader, err := car.NewCarReader(odsR) - assert.NoError(t, err) - - // validate ODS could be obtained from reader - for i := 0; i < 4; i++ { - for j := 0; j < 4; j++ { - // pick share from original eds - original := eds.GetCell(uint(i), uint(j)) - - // read block from odsReader based reader - block, err := carReader.Next() - assert.NoError(t, err) - - // check that original data from eds is same as data from reader - assert.Equal(t, original, share.GetData(block.RawData())) - } - } - - // Make sure no excess data is available to get from reader - _, err = carReader.Next() - assert.Error(t, io.EOF, err) -} - -// TestODSReaderReconstruction ensures that the reader returned from ODSReader provides sufficient -// data for EDS reconstruction -func TestODSReaderReconstruction(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // launch eds store - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - // generate random eds data and put it into the store - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // get CAR reader from store - r, err := edsStore.GetCAR(ctx, dah.Hash()) - assert.NoError(t, err) - defer func() { - require.NoError(t, r.Close()) - }() - - // create ODSReader wrapper based on car reader to limit reads to ODS only - odsR, err := ODSReader(r) - assert.NoError(t, err) - - // reconstruct EDS from ODSReader - loaded, err := ReadEDS(ctx, odsR, dah.Hash()) - assert.NoError(t, err) - - rowRoots, err := eds.RowRoots() - require.NoError(t, err) - loadedRowRoots, err := loaded.RowRoots() - require.NoError(t, err) - require.Equal(t, rowRoots, loadedRowRoots) - - colRoots, err := eds.ColRoots() - require.NoError(t, err) - loadedColRoots, err := loaded.ColRoots() - require.NoError(t, err) - require.Equal(t, colRoots, loadedColRoots) -} diff --git a/share/eds/store.go b/share/eds/store.go deleted file mode 100644 index 816065909e..0000000000 --- a/share/eds/store.go +++ /dev/null @@ -1,644 +0,0 @@ -package eds - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io" - "os" - "sync" - "sync/atomic" - "time" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/index" - "github.com/filecoin-project/dagstore/mount" - "github.com/filecoin-project/dagstore/shard" - bstore "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/go-datastore" - carv1 "github.com/ipld/go-car" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/cache" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -const ( - blocksPath = "/blocks/" - indexPath = "/index/" - transientsPath = "/transients/" -) - -var ErrNotFound = errors.New("eds not found in store") - -// Store maintains (via DAGStore) a top-level index enabling granular and efficient random access to -// every share and/or Merkle proof over every registered CARv1 file. The EDSStore provides a custom -// blockstore interface implementation to achieve access. The main use-case is randomized sampling -// over the whole chain of EDS block data and getting data by namespace. -type Store struct { - cancel context.CancelFunc - - dgstr *dagstore.DAGStore - mounts *mount.Registry - - bs *blockstore - cache atomic.Pointer[cache.DoubleCache] - - carIdx index.FullIndexRepo - invertedIdx *simpleInvertedIndex - - basepath string - gcInterval time.Duration - // lastGCResult is only stored on the store for testing purposes. - lastGCResult atomic.Pointer[dagstore.GCResult] - - // stripedLocks is used to synchronize parallel operations - stripedLocks [256]sync.Mutex - shardFailures chan dagstore.ShardResult - - metrics *metrics -} - -// NewStore creates a new EDS Store under the given basepath and datastore. -func NewStore(params *Parameters, basePath string, ds datastore.Batching) (*Store, error) { - if err := params.Validate(); err != nil { - return nil, err - } - - err := setupPath(basePath) - if err != nil { - return nil, fmt.Errorf("failed to setup eds.Store directories: %w", err) - } - - r := mount.NewRegistry() - err = r.Register("fs", &inMemoryOnceMount{}) - if err != nil { - return nil, fmt.Errorf("failed to register memory mount on the registry: %w", err) - } - if err != nil { - return nil, fmt.Errorf("failed to register FS mount on the registry: %w", err) - } - - fsRepo, err := index.NewFSRepo(basePath + indexPath) - if err != nil { - return nil, fmt.Errorf("failed to create index repository: %w", err) - } - - invertedIdx, err := newSimpleInvertedIndex(basePath) - if err != nil { - return nil, fmt.Errorf("failed to create index: %w", err) - } - - failureChan := make(chan dagstore.ShardResult) - dagStore, err := dagstore.NewDAGStore( - dagstore.Config{ - TransientsDir: basePath + transientsPath, - IndexRepo: fsRepo, - Datastore: ds, - MountRegistry: r, - TopLevelIndex: invertedIdx, - FailureCh: failureChan, - }, - ) - if err != nil { - return nil, fmt.Errorf("failed to create DAGStore: %w", err) - } - - recentBlocksCache, err := cache.NewAccessorCache("recent", params.RecentBlocksCacheSize) - if err != nil { - return nil, fmt.Errorf("failed to create recent blocks cache: %w", err) - } - - blockstoreCache, err := cache.NewAccessorCache("blockstore", params.BlockstoreCacheSize) - if err != nil { - return nil, fmt.Errorf("failed to create blockstore cache: %w", err) - } - - store := &Store{ - basepath: basePath, - dgstr: dagStore, - carIdx: fsRepo, - invertedIdx: invertedIdx, - gcInterval: params.GCInterval, - mounts: r, - shardFailures: failureChan, - } - store.bs = newBlockstore(store, ds) - store.cache.Store(cache.NewDoubleCache(recentBlocksCache, blockstoreCache)) - return store, nil -} - -func (s *Store) Start(ctx context.Context) error { - err := s.dgstr.Start(ctx) - if err != nil { - return err - } - // start Store only if DagStore succeeds - runCtx, cancel := context.WithCancel(context.Background()) - s.cancel = cancel - // initialize empty gc result to avoid panic on access - s.lastGCResult.Store(&dagstore.GCResult{ - Shards: make(map[shard.Key]error), - }) - - if s.gcInterval != 0 { - go s.gc(runCtx) - } - - go s.watchForFailures(runCtx) - return nil -} - -// Stop stops the underlying DAGStore. -func (s *Store) Stop(context.Context) error { - defer s.cancel() - if err := s.invertedIdx.close(); err != nil { - return err - } - return s.dgstr.Close() -} - -// gc periodically removes all inactive or errored shards. -func (s *Store) gc(ctx context.Context) { - ticker := time.NewTicker(s.gcInterval) - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - tnow := time.Now() - res, err := s.dgstr.GC(ctx) - s.metrics.observeGCtime(ctx, time.Since(tnow), err != nil) - if err != nil { - log.Errorf("garbage collecting dagstore: %v", err) - return - } - s.lastGCResult.Store(res) - } - } -} - -func (s *Store) watchForFailures(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case res := <-s.shardFailures: - log.Errorw("removing shard after failure", "key", res.Key, "err", res.Error) - s.metrics.observeShardFailure(ctx, res.Key.String()) - k := share.MustDataHashFromString(res.Key.String()) - err := s.Remove(ctx, k) - if err != nil { - log.Errorw("failed to remove shard after failure", "key", res.Key, "err", err) - } - } - } -} - -// Put stores the given data square with DataRoot's hash as a key. -// -// The square is verified on the Exchange level, and Put only stores the square, trusting it. -// The resulting file stores all the shares and NMT Merkle Proofs of the EDS. -// Additionally, the file gets indexed s.t. store.Blockstore can access them. -func (s *Store) Put(ctx context.Context, root share.DataHash, square *rsmt2d.ExtendedDataSquare) error { - ctx, span := tracer.Start(ctx, "store/put", trace.WithAttributes( - attribute.Int("width", int(square.Width())), - )) - - tnow := time.Now() - err := s.put(ctx, root, square) - result := putOK - switch { - case errors.Is(err, dagstore.ErrShardExists): - result = putExists - case err != nil: - result = putFailed - } - utils.SetStatusAndEnd(span, err) - s.metrics.observePut(ctx, time.Since(tnow), result, square.Width()) - return err -} - -func (s *Store) put(ctx context.Context, root share.DataHash, square *rsmt2d.ExtendedDataSquare) (err error) { - lk := &s.stripedLocks[root[len(root)-1]] - lk.Lock() - defer lk.Unlock() - - // if root already exists, short-circuit - if has, _ := s.Has(ctx, root); has { - return dagstore.ErrShardExists - } - - key := root.String() - f, err := os.OpenFile(s.basepath+blocksPath+key, os.O_CREATE|os.O_WRONLY, 0600) - if err != nil { - return err - } - defer closeAndLog("car file", f) - - // save encoded eds into buffer - mount := &inMemoryOnceMount{ - // TODO: buffer could be pre-allocated with capacity calculated based on eds size. - buf: bytes.NewBuffer(nil), - FileMount: mount.FileMount{Path: s.basepath + blocksPath + key}, - } - err = WriteEDS(ctx, square, mount) - if err != nil { - return fmt.Errorf("failed to write EDS to file: %w", err) - } - - // write whole buffered mount data in one go to optimize i/o - if _, err = mount.WriteTo(f); err != nil { - return fmt.Errorf("failed to write EDS to file: %w", err) - } - - ch := make(chan dagstore.ShardResult, 1) - err = s.dgstr.RegisterShard(ctx, shard.KeyFromString(key), mount, ch, dagstore.RegisterOpts{}) - if err != nil { - return fmt.Errorf("failed to initiate shard registration: %w", err) - } - - var result dagstore.ShardResult - select { - case result = <-ch: - case <-ctx.Done(): - // if the context finished before the result was received, track the result in a separate goroutine - go trackLateResult("put", ch, s.metrics, time.Minute*5) - return ctx.Err() - } - - if result.Error != nil { - return fmt.Errorf("failed to register shard: %w", result.Error) - } - - // the accessor returned in the result will be nil, so the shard needs to be acquired first to - // become available in the cache. It might take some time, and the result should not affect the put - // operation, so do it in a goroutine - // TODO: Ideally, only recent blocks should be put in the cache, but there is no way right now to - // check such a condition. - go func() { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - ac, err := s.cache.Load().First().GetOrLoad(ctx, result.Key, s.getAccessor) - if err != nil { - log.Warnw("unable to put accessor to recent blocks accessors cache", "err", err) - return - } - - // need to close returned accessor to remove the reader reference - if err := ac.Close(); err != nil { - log.Warnw("unable to close accessor after loading", "err", err) - } - }() - - return nil -} - -// waitForResult waits for a result from the res channel for a maximum duration specified by -// maxWait. If the result is not received within the specified duration, it logs an error -// indicating that the parent context has expired and the shard registration is stuck. If a result -// is received, it checks for any error and logs appropriate messages. -func trackLateResult(opName string, res <-chan dagstore.ShardResult, metrics *metrics, maxWait time.Duration) { - tnow := time.Now() - select { - case <-time.After(maxWait): - metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpUnresolved) - log.Errorf("parent context is expired, while register shard is stuck for more than %v sec", time.Since(tnow)) - return - case result := <-res: - // don't observe if result was received right after launch of the func - if time.Since(tnow) < time.Second { - return - } - if result.Error != nil { - metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpFailed) - log.Errorf("failed to register shard after context expired: %v ago, err: %s", time.Since(tnow), result.Error) - return - } - metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpOK) - log.Warnf("parent context expired, but register shard finished with no error,"+ - " after context expired: %v ago", time.Since(tnow)) - return - } -} - -// GetCAR takes a DataRoot and returns a buffered reader to the respective EDS serialized as a -// CARv1 file. -// The Reader strictly reads the CAR header and first quadrant (1/4) of the EDS, omitting all the -// NMT Merkle proofs. Integrity of the store data is not verified. -// -// The shard is cached in the Store, so subsequent calls to GetCAR with the same root will use the -// same reader. The cache is responsible for closing the underlying reader. -func (s *Store) GetCAR(ctx context.Context, root share.DataHash) (io.ReadCloser, error) { - ctx, span := tracer.Start(ctx, "store/get-car") - tnow := time.Now() - r, err := s.getCAR(ctx, root) - s.metrics.observeGetCAR(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return r, err -} - -func (s *Store) getCAR(ctx context.Context, root share.DataHash) (io.ReadCloser, error) { - key := shard.KeyFromString(root.String()) - accessor, err := s.cache.Load().Get(key) - if err == nil { - return newReadCloser(accessor), nil - } - // If the accessor is not found in the cache, create a new one from dagstore. We don't put the - // accessor in the cache here because getCAR is used by shrex-eds. There is a lower probability, - // compared to other cache put triggers, that the same block will be requested again soon. - shardAccessor, err := s.getAccessor(ctx, key) - if err != nil { - return nil, fmt.Errorf("failed to get accessor: %w", err) - } - - return newReadCloser(shardAccessor), nil -} - -// Blockstore returns an IPFS blockstore providing access to individual shares/nodes of all EDS -// registered on the Store. NOTE: The blockstore does not store whole Celestia Blocks but IPFS -// blocks. We represent `shares` and NMT Merkle proofs as IPFS blocks and IPLD nodes so Bitswap can -// access those. -func (s *Store) Blockstore() bstore.Blockstore { - return s.bs -} - -// CARBlockstore returns an IPFS Blockstore providing access to individual shares/nodes of a -// specific EDS identified by DataHash and registered on the Store. NOTE: The Blockstore does not -// store whole Celestia Blocks but IPFS blocks. We represent `shares` and NMT Merkle proofs as IPFS -// blocks and IPLD nodes so Bitswap can access those. -func (s *Store) CARBlockstore( - ctx context.Context, - root share.DataHash, -) (*BlockstoreCloser, error) { - ctx, span := tracer.Start(ctx, "store/car-blockstore") - tnow := time.Now() - cbs, err := s.carBlockstore(ctx, root) - s.metrics.observeCARBlockstore(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return cbs, err -} - -func (s *Store) carBlockstore( - ctx context.Context, - root share.DataHash, -) (*BlockstoreCloser, error) { - key := shard.KeyFromString(root.String()) - accessor, err := s.cache.Load().Get(key) - if err == nil { - return blockstoreCloser(accessor) - } - - // if the accessor is not found in the cache, create a new one from dagstore - sa, err := s.getAccessor(ctx, key) - if err != nil { - return nil, fmt.Errorf("failed to get accessor: %w", err) - } - return blockstoreCloser(sa) -} - -// GetDAH returns the DataAvailabilityHeader for the EDS identified by DataHash. -func (s *Store) GetDAH(ctx context.Context, root share.DataHash) (*share.Root, error) { - ctx, span := tracer.Start(ctx, "store/car-dah") - tnow := time.Now() - r, err := s.getDAH(ctx, root) - s.metrics.observeGetDAH(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return r, err -} - -func (s *Store) getDAH(ctx context.Context, root share.DataHash) (*share.Root, error) { - r, err := s.getCAR(ctx, root) - if err != nil { - return nil, fmt.Errorf("eds/store: failed to get CAR file: %w", err) - } - defer closeAndLog("car reader", r) - - carHeader, err := carv1.ReadHeader(bufio.NewReader(r)) - if err != nil { - return nil, fmt.Errorf("eds/store: failed to read car header: %w", err) - } - - dah := dahFromCARHeader(carHeader) - if !bytes.Equal(dah.Hash(), root) { - return nil, fmt.Errorf("eds/store: content integrity mismatch from CAR for root %x", root) - } - return dah, nil -} - -// dahFromCARHeader returns the DataAvailabilityHeader stored in the CIDs of a CARv1 header. -func dahFromCARHeader(carHeader *carv1.CarHeader) *share.Root { - rootCount := len(carHeader.Roots) - rootBytes := make([][]byte, 0, rootCount) - for _, root := range carHeader.Roots { - rootBytes = append(rootBytes, ipld.NamespacedSha256FromCID(root)) - } - return &share.Root{ - RowRoots: rootBytes[:rootCount/2], - ColumnRoots: rootBytes[rootCount/2:], - } -} - -func (s *Store) getAccessor(ctx context.Context, key shard.Key) (cache.Accessor, error) { - ch := make(chan dagstore.ShardResult, 1) - err := s.dgstr.AcquireShard(ctx, key, ch, dagstore.AcquireOpts{}) - if err != nil { - if errors.Is(err, dagstore.ErrShardUnknown) { - return nil, ErrNotFound - } - return nil, fmt.Errorf("failed to initialize shard acquisition: %w", err) - } - - select { - case res := <-ch: - if res.Error != nil { - return nil, fmt.Errorf("failed to acquire shard: %w", res.Error) - } - return res.Accessor, nil - case <-ctx.Done(): - go trackLateResult("get_shard", ch, s.metrics, time.Minute) - return nil, ctx.Err() - } -} - -// Remove removes EDS from Store by the given share.Root hash and cleans up all -// the indexing. -func (s *Store) Remove(ctx context.Context, root share.DataHash) error { - ctx, span := tracer.Start(ctx, "store/remove") - tnow := time.Now() - err := s.remove(ctx, root) - s.metrics.observeRemove(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return err -} - -func (s *Store) remove(ctx context.Context, root share.DataHash) (err error) { - key := shard.KeyFromString(root.String()) - // remove open links to accessor from cache - if err := s.cache.Load().Remove(key); err != nil { - log.Warnw("remove accessor from cache", "err", err) - } - ch := make(chan dagstore.ShardResult, 1) - err = s.dgstr.DestroyShard(ctx, key, ch, dagstore.DestroyOpts{}) - if err != nil { - return fmt.Errorf("failed to initiate shard destruction: %w", err) - } - - select { - case result := <-ch: - if result.Error != nil { - return fmt.Errorf("failed to destroy shard: %w", result.Error) - } - case <-ctx.Done(): - go trackLateResult("remove", ch, s.metrics, time.Minute) - return ctx.Err() - } - - dropped, err := s.carIdx.DropFullIndex(key) - if !dropped { - log.Warnf("failed to drop index for %s", key) - } - if err != nil { - return fmt.Errorf("failed to drop index for %s: %w", key, err) - } - - err = os.Remove(s.basepath + blocksPath + root.String()) - if err != nil { - return fmt.Errorf("failed to remove CAR file: %w", err) - } - return nil -} - -// Get reads EDS out of Store by given DataRoot. -// -// It reads only one quadrant(1/4) of the EDS and verifies the integrity of the stored data by -// recomputing it. -func (s *Store) Get(ctx context.Context, root share.DataHash) (*rsmt2d.ExtendedDataSquare, error) { - ctx, span := tracer.Start(ctx, "store/get") - tnow := time.Now() - eds, err := s.get(ctx, root) - s.metrics.observeGet(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return eds, err -} - -func (s *Store) get(ctx context.Context, root share.DataHash) (eds *rsmt2d.ExtendedDataSquare, err error) { - ctx, span := tracer.Start(ctx, "store/get") - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - r, err := s.getCAR(ctx, root) - if err != nil { - return nil, fmt.Errorf("failed to get CAR file: %w", err) - } - defer closeAndLog("car reader", r) - - eds, err = ReadEDS(ctx, r, root) - if err != nil { - return nil, fmt.Errorf("failed to read EDS from CAR file: %w", err) - } - return eds, nil -} - -// Has checks if EDS exists by the given share.Root hash. -func (s *Store) Has(ctx context.Context, root share.DataHash) (has bool, err error) { - ctx, span := tracer.Start(ctx, "store/has") - tnow := time.Now() - eds, err := s.has(ctx, root) - s.metrics.observeHas(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return eds, err -} - -func (s *Store) has(_ context.Context, root share.DataHash) (bool, error) { - key := root.String() - info, err := s.dgstr.GetShardInfo(shard.KeyFromString(key)) - switch err { - case nil: - return true, info.Error - case dagstore.ErrShardUnknown: - return false, info.Error - default: - return false, err - } -} - -// List lists all the registered EDSes. -func (s *Store) List() ([]share.DataHash, error) { - ctx, span := tracer.Start(context.Background(), "store/list") - tnow := time.Now() - hashes, err := s.list() - s.metrics.observeList(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return hashes, err -} - -func (s *Store) list() ([]share.DataHash, error) { - shards := s.dgstr.AllShardsInfo() - hashes := make([]share.DataHash, 0, len(shards)) - for shrd := range shards { - hash := share.MustDataHashFromString(shrd.String()) - hashes = append(hashes, hash) - } - return hashes, nil -} - -func setupPath(basepath string) error { - err := os.MkdirAll(basepath+blocksPath, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create blocks directory: %w", err) - } - err = os.MkdirAll(basepath+transientsPath, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create transients directory: %w", err) - } - err = os.MkdirAll(basepath+indexPath, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create index directory: %w", err) - } - return nil -} - -// inMemoryOnceMount is used to allow reading once from buffer before using main mount.Reader -type inMemoryOnceMount struct { - buf *bytes.Buffer - - readOnce atomic.Bool - mount.FileMount -} - -func (m *inMemoryOnceMount) Fetch(ctx context.Context) (mount.Reader, error) { - if m.buf != nil && !m.readOnce.Swap(true) { - reader := &inMemoryReader{Reader: bytes.NewReader(m.buf.Bytes())} - // release memory for gc, otherwise buffer will stick forever - m.buf = nil - return reader, nil - } - return m.FileMount.Fetch(ctx) -} - -func (m *inMemoryOnceMount) Write(b []byte) (int, error) { - return m.buf.Write(b) -} - -func (m *inMemoryOnceMount) WriteTo(w io.Writer) (int64, error) { - return io.Copy(w, bytes.NewReader(m.buf.Bytes())) -} - -// inMemoryReader extends bytes.Reader to implement mount.Reader interface -type inMemoryReader struct { - *bytes.Reader -} - -// Close allows inMemoryReader to satisfy mount.Reader interface -func (r *inMemoryReader) Close() error { - return nil -} diff --git a/share/eds/store_test.go b/share/eds/store_test.go deleted file mode 100644 index 09357347d0..0000000000 --- a/share/eds/store_test.go +++ /dev/null @@ -1,539 +0,0 @@ -package eds - -import ( - "context" - "io" - "os" - "sync" - "testing" - "time" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" - "github.com/ipld/go-car" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-app/pkg/da" - dsbadger "github.com/celestiaorg/go-ds-badger4" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/cache" - "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -func TestEDSStore(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - // PutRegistersShard tests if Put registers the shard on the underlying DAGStore - t.Run("PutRegistersShard", func(t *testing.T) { - eds, dah := randomEDS(t) - - // shard hasn't been registered yet - has, err := edsStore.Has(ctx, dah.Hash()) - assert.False(t, has) - assert.NoError(t, err) - - err = edsStore.Put(ctx, dah.Hash(), eds) - assert.NoError(t, err) - - _, err = edsStore.dgstr.GetShardInfo(shard.KeyFromString(dah.String())) - assert.NoError(t, err) - }) - - // PutIndexesEDS ensures that Putting an EDS indexes it into the car index - t.Run("PutIndexesEDS", func(t *testing.T) { - eds, dah := randomEDS(t) - - stat, _ := edsStore.carIdx.StatFullIndex(shard.KeyFromString(dah.String())) - assert.False(t, stat.Exists) - - err = edsStore.Put(ctx, dah.Hash(), eds) - assert.NoError(t, err) - - stat, err = edsStore.carIdx.StatFullIndex(shard.KeyFromString(dah.String())) - assert.True(t, stat.Exists) - assert.NoError(t, err) - }) - - // GetCAR ensures that the reader returned from GetCAR is capable of reading the CAR header and - // ODS. - t.Run("GetCAR", func(t *testing.T) { - eds, dah := randomEDS(t) - - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - r, err := edsStore.GetCAR(ctx, dah.Hash()) - assert.NoError(t, err) - defer func() { - require.NoError(t, r.Close()) - }() - carReader, err := car.NewCarReader(r) - assert.NoError(t, err) - - for i := 0; i < 4; i++ { - for j := 0; j < 4; j++ { - original := eds.GetCell(uint(i), uint(j)) - block, err := carReader.Next() - assert.NoError(t, err) - assert.Equal(t, original, share.GetData(block.RawData())) - } - } - }) - - t.Run("item not exist", func(t *testing.T) { - root := share.DataHash{1} - _, err := edsStore.GetCAR(ctx, root) - assert.ErrorIs(t, err, ErrNotFound) - - _, err = edsStore.GetDAH(ctx, root) - assert.ErrorIs(t, err, ErrNotFound) - - _, err = edsStore.CARBlockstore(ctx, root) - assert.ErrorIs(t, err, ErrNotFound) - }) - - t.Run("Remove", func(t *testing.T) { - eds, dah := randomEDS(t) - - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // assert that file now exists - _, err = os.Stat(edsStore.basepath + blocksPath + dah.String()) - assert.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - err = edsStore.Remove(ctx, dah.Hash()) - assert.NoError(t, err) - - // shard should no longer be registered on the dagstore - _, err = edsStore.dgstr.GetShardInfo(shard.KeyFromString(dah.String())) - assert.Error(t, err, "shard not found") - - // shard should have been dropped from the index, which also removes the file under /index/ - indexStat, err := edsStore.carIdx.StatFullIndex(shard.KeyFromString(dah.String())) - assert.NoError(t, err) - assert.False(t, indexStat.Exists) - - // file no longer exists - _, err = os.Stat(edsStore.basepath + blocksPath + dah.String()) - assert.ErrorContains(t, err, "no such file or directory") - }) - - t.Run("Remove after OpShardFail", func(t *testing.T) { - eds, dah := randomEDS(t) - - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // assert that shard now exists - ok, err := edsStore.Has(ctx, dah.Hash()) - assert.NoError(t, err) - assert.True(t, ok) - - // assert that file now exists - path := edsStore.basepath + blocksPath + dah.String() - _, err = os.Stat(path) - assert.NoError(t, err) - - err = os.Remove(path) - assert.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - // remove non-failed accessor from cache - err = edsStore.cache.Load().Remove(shard.KeyFromString(dah.String())) - assert.NoError(t, err) - - _, err = edsStore.GetCAR(ctx, dah.Hash()) - assert.Error(t, err) - - ticker := time.NewTicker(time.Millisecond * 100) - defer ticker.Stop() - for { - select { - case <-ticker.C: - has, err := edsStore.Has(ctx, dah.Hash()) - if err == nil && !has { - // shard no longer exists after OpShardFail was detected from GetCAR call - return - } - case <-ctx.Done(): - t.Fatal("timeout waiting for shard to be removed") - } - } - }) - - t.Run("Has", func(t *testing.T) { - eds, dah := randomEDS(t) - - ok, err := edsStore.Has(ctx, dah.Hash()) - assert.NoError(t, err) - assert.False(t, ok) - - err = edsStore.Put(ctx, dah.Hash(), eds) - assert.NoError(t, err) - - ok, err = edsStore.Has(ctx, dah.Hash()) - assert.NoError(t, err) - assert.True(t, ok) - }) - - t.Run("RecentBlocksCache", func(t *testing.T) { - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - // check, that the key is in the cache after put - shardKey := shard.KeyFromString(dah.String()) - _, err = edsStore.cache.Load().Get(shardKey) - assert.NoError(t, err) - }) - - t.Run("List", func(t *testing.T) { - const amount = 10 - hashes := make([]share.DataHash, 0, amount) - for range make([]byte, amount) { - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - hashes = append(hashes, dah.Hash()) - } - - hashesOut, err := edsStore.List() - require.NoError(t, err) - for _, hash := range hashes { - assert.Contains(t, hashesOut, hash) - } - }) - - t.Run("Parallel put", func(t *testing.T) { - const amount = 20 - eds, dah := randomEDS(t) - - wg := sync.WaitGroup{} - for i := 1; i < amount; i++ { - wg.Add(1) - go func() { - defer wg.Done() - err := edsStore.Put(ctx, dah.Hash(), eds) - if err != nil { - require.ErrorIs(t, err, dagstore.ErrShardExists) - } - }() - } - wg.Wait() - - eds, err := edsStore.Get(ctx, dah.Hash()) - require.NoError(t, err) - newDah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(t, err) - require.Equal(t, dah.Hash(), newDah.Hash()) - }) -} - -// TestEDSStore_GC verifies that unused transient shards are collected by the GC periodically. -func TestEDSStore_GC(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - edsStore.gcInterval = time.Second - require.NoError(t, err) - - // kicks off the gc goroutine - err = edsStore.Start(ctx) - require.NoError(t, err) - - eds, dah := randomEDS(t) - shardKey := shard.KeyFromString(dah.String()) - - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - // remove links to the shard from cache - time.Sleep(time.Millisecond * 100) - key := shard.KeyFromString(share.DataHash(dah.Hash()).String()) - err = edsStore.cache.Load().Remove(key) - require.NoError(t, err) - - // doesn't exist yet - assert.NotContains(t, edsStore.lastGCResult.Load().Shards, shardKey) - - // wait for gc to run, retry three times - for i := 0; i < 3; i++ { - time.Sleep(edsStore.gcInterval) - if _, ok := edsStore.lastGCResult.Load().Shards[shardKey]; ok { - break - } - } - assert.Contains(t, edsStore.lastGCResult.Load().Shards, shardKey) - - // assert nil in this context means there was no error re-acquiring the shard during GC - assert.Nil(t, edsStore.lastGCResult.Load().Shards[shardKey]) -} - -func Test_BlockstoreCache(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - // store eds to the store with noopCache to allow clean cache after put - swap := edsStore.cache.Load() - edsStore.cache.Store(cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{})) - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // get any key from saved eds - bs, err := edsStore.carBlockstore(ctx, dah.Hash()) - require.NoError(t, err) - defer func() { - require.NoError(t, bs.Close()) - }() - keys, err := bs.AllKeysChan(ctx) - require.NoError(t, err) - var key cid.Cid - select { - case key = <-keys: - case <-ctx.Done(): - t.Fatal("context timeout") - } - - // swap back original cache - edsStore.cache.Store(swap) - - // key shouldn't be in cache yet, check for returned errCacheMiss - shardKey := shard.KeyFromString(dah.String()) - _, err = edsStore.cache.Load().Get(shardKey) - require.Error(t, err) - - // now get it from blockstore, to trigger storing to cache - _, err = edsStore.Blockstore().Get(ctx, key) - require.NoError(t, err) - - // should be no errCacheMiss anymore - _, err = edsStore.cache.Load().Get(shardKey) - require.NoError(t, err) -} - -// Test_CachedAccessor verifies that the reader represented by a cached accessor can be read from -// multiple times, without exhausting the underlying reader. -func Test_CachedAccessor(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - // accessor should be in cache - _, err = edsStore.cache.Load().Get(shard.KeyFromString(dah.String())) - require.NoError(t, err) - - // first read from cached accessor - carReader, err := edsStore.getCAR(ctx, dah.Hash()) - require.NoError(t, err) - firstBlock, err := io.ReadAll(carReader) - require.NoError(t, err) - require.NoError(t, carReader.Close()) - - // second read from cached accessor - carReader, err = edsStore.getCAR(ctx, dah.Hash()) - require.NoError(t, err) - secondBlock, err := io.ReadAll(carReader) - require.NoError(t, err) - require.NoError(t, carReader.Close()) - - require.Equal(t, firstBlock, secondBlock) -} - -// Test_CachedAccessor verifies that the reader represented by a accessor obtained directly from -// dagstore can be read from multiple times, without exhausting the underlying reader. -func Test_NotCachedAccessor(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - // replace cache with noopCache to - edsStore.cache.Store(cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{})) - - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - // accessor should not be in cache - _, err = edsStore.cache.Load().Get(shard.KeyFromString(dah.String())) - require.Error(t, err) - - // first read from direct accessor (not from cache) - carReader, err := edsStore.getCAR(ctx, dah.Hash()) - require.NoError(t, err) - firstBlock, err := io.ReadAll(carReader) - require.NoError(t, err) - require.NoError(t, carReader.Close()) - - // second read from direct accessor (not from cache) - carReader, err = edsStore.getCAR(ctx, dah.Hash()) - require.NoError(t, err) - secondBlock, err := io.ReadAll(carReader) - require.NoError(t, err) - require.NoError(t, carReader.Close()) - - require.Equal(t, firstBlock, secondBlock) -} - -func BenchmarkStore(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) - b.Cleanup(cancel) - - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - edsStore, err := NewStore(DefaultParameters(), b.TempDir(), ds) - require.NoError(b, err) - err = edsStore.Start(ctx) - require.NoError(b, err) - - // BenchmarkStore/bench_put_128-10 10 3231859283 ns/op (~3sec) - b.Run("bench put 128", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - // pause the timer for initializing test data - b.StopTimer() - eds := edstest.RandEDS(b, 128) - dah, err := share.NewRoot(eds) - require.NoError(b, err) - b.StartTimer() - - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(b, err) - } - }) - - // BenchmarkStore/bench_read_128-10 14 78970661 ns/op (~70ms) - b.Run("bench read 128", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - // pause the timer for initializing test data - b.StopTimer() - eds := edstest.RandEDS(b, 128) - dah, err := share.NewRoot(eds) - require.NoError(b, err) - _ = edsStore.Put(ctx, dah.Hash(), eds) - b.StartTimer() - - _, err = edsStore.Get(ctx, dah.Hash()) - require.NoError(b, err) - } - }) -} - -// BenchmarkCacheEviction benchmarks the time it takes to load a block to the cache, when the -// cache size is set to 1. This forces cache eviction on every read. -// BenchmarkCacheEviction-10/128 384 3533586 ns/op (~3ms) -func BenchmarkCacheEviction(b *testing.B) { - const ( - blocks = 4 - size = 128 - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) - b.Cleanup(cancel) - - dir := b.TempDir() - ds, err := dsbadger.NewDatastore(dir, &dsbadger.DefaultOptions) - require.NoError(b, err) - - newStore := func(params *Parameters) *Store { - edsStore, err := NewStore(params, dir, ds) - require.NoError(b, err) - err = edsStore.Start(ctx) - require.NoError(b, err) - return edsStore - } - edsStore := newStore(DefaultParameters()) - - // generate EDSs and store them - cids := make([]cid.Cid, blocks) - for i := range cids { - eds := edstest.RandEDS(b, size) - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(b, err) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(b, err) - - // store cids for read loop later - cids[i] = ipld.MustCidFromNamespacedSha256(dah.RowRoots[0]) - } - - // restart store to clear cache - require.NoError(b, edsStore.Stop(ctx)) - - // set BlockstoreCacheSize to 1 to force eviction on every read - params := DefaultParameters() - params.BlockstoreCacheSize = 1 - bstore := newStore(params).Blockstore() - - // start benchmark - b.ResetTimer() - for i := 0; i < b.N; i++ { - h := cids[i%blocks] - // every read will trigger eviction - _, err := bstore.Get(ctx, h) - require.NoError(b, err) - } -} - -func newStore(t *testing.T) (*Store, error) { - t.Helper() - - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - return NewStore(DefaultParameters(), t.TempDir(), ds) -} - -func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *share.Root) { - eds := edstest.RandEDS(t, 4) - dah, err := share.NewRoot(eds) - require.NoError(t, err) - - return eds, dah -} diff --git a/share/eds/utils.go b/share/eds/utils.go deleted file mode 100644 index 0dda9c0da3..0000000000 --- a/share/eds/utils.go +++ /dev/null @@ -1,152 +0,0 @@ -package eds - -import ( - "context" - "errors" - "fmt" - "io" - - "github.com/filecoin-project/dagstore" - "github.com/ipfs/boxo/blockservice" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "golang.org/x/sync/errgroup" - - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/cache" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -// readCloser is a helper struct, that combines io.Reader and io.Closer -type readCloser struct { - io.Reader - io.Closer -} - -// BlockstoreCloser represents a blockstore that can also be closed. It combines the functionality -// of a dagstore.ReadBlockstore with that of an io.Closer. -type BlockstoreCloser struct { - dagstore.ReadBlockstore - io.Closer -} - -func newReadCloser(ac cache.Accessor) io.ReadCloser { - return readCloser{ - ac.Reader(), - ac, - } -} - -// blockstoreCloser constructs new BlockstoreCloser from cache.Accessor -func blockstoreCloser(ac cache.Accessor) (*BlockstoreCloser, error) { - bs, err := ac.Blockstore() - if err != nil { - return nil, fmt.Errorf("eds/store: failed to get blockstore: %w", err) - } - return &BlockstoreCloser{ - ReadBlockstore: bs, - Closer: ac, - }, nil -} - -func closeAndLog(name string, closer io.Closer) { - if err := closer.Close(); err != nil { - log.Warnw("closing "+name, "err", err) - } -} - -// RetrieveNamespaceFromStore gets all EDS shares in the given namespace from -// the EDS store through the corresponding CAR-level blockstore. It is extracted -// from the store getter to make it available for reuse in the shrexnd server. -func RetrieveNamespaceFromStore( - ctx context.Context, - store *Store, - dah *share.Root, - namespace share.Namespace, -) (shares share.NamespacedShares, err error) { - if err = namespace.ValidateForData(); err != nil { - return nil, err - } - - bs, err := store.CARBlockstore(ctx, dah.Hash()) - if errors.Is(err, ErrNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("failed to retrieve blockstore from eds store: %w", err) - } - defer func() { - if err := bs.Close(); err != nil { - log.Warnw("closing blockstore", "err", err) - } - }() - - // wrap the read-only CAR blockstore in a getter - blockGetter := NewBlockGetter(bs) - shares, err = CollectSharesByNamespace(ctx, blockGetter, dah, namespace) - if errors.Is(err, ipld.ErrNodeNotFound) { - // IPLD node not found after the index pointed to this shard and the CAR - // blockstore has been opened successfully is a strong indicator of - // corruption. We remove the block on bridges and fulls and return - // share.ErrNotFound to ensure the data is retrieved by the next getter. - // Note that this recovery is manual and will only be restored by an RPC - // call to SharesAvailable that fetches the same datahash that was - // removed. - err = store.Remove(ctx, dah.Hash()) - if err != nil { - log.Errorf("failed to remove CAR from store after detected corruption: %w", err) - } - err = share.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("failed to retrieve shares by namespace from store: %w", err) - } - - return shares, nil -} - -// CollectSharesByNamespace collects NamespaceShares within the given namespace from share.Root. -func CollectSharesByNamespace( - ctx context.Context, - bg blockservice.BlockGetter, - root *share.Root, - namespace share.Namespace, -) (shares share.NamespacedShares, err error) { - ctx, span := tracer.Start(ctx, "collect-shares-by-namespace", trace.WithAttributes( - attribute.String("namespace", namespace.String()), - )) - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - rows := ipld.FilterRootByNamespace(root, namespace) - if len(rows) == 0 { - return []share.NamespacedRow{}, nil - } - - errGroup, ctx := errgroup.WithContext(ctx) - shares = make([]share.NamespacedRow, len(rows)) - for i, row := range rows { - // shadow loop variables, to ensure correct values are captured - i, row := i, row - errGroup.Go(func() error { - row, proof, err := ipld.GetSharesByNamespace(ctx, bg, row, namespace, len(root.RowRoots)) - shares[i] = share.NamespacedRow{ - Shares: row, - Proof: proof, - } - if err != nil { - return fmt.Errorf("retrieving shares by namespace %s for row %x: %w", namespace.String(), row, err) - } - return nil - }) - } - - if err := errGroup.Wait(); err != nil { - return nil, err - } - - return shares, nil -} diff --git a/share/getters/ipld.go b/share/getters/ipld.go deleted file mode 100644 index e9c930248d..0000000000 --- a/share/getters/ipld.go +++ /dev/null @@ -1,165 +0,0 @@ -package getters - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - - "github.com/ipfs/boxo/blockservice" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/eds/byzantine" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -var _ share.Getter = (*IPLDGetter)(nil) - -// IPLDGetter is a share.Getter that retrieves shares from the bitswap network. Result caching is -// handled by the provided blockservice. A blockservice session will be created for retrieval if the -// passed context is wrapped with WithSession. -type IPLDGetter struct { - rtrv *eds.Retriever - bServ blockservice.BlockService -} - -// NewIPLDGetter creates a new share.Getter that retrieves shares from the bitswap network. -func NewIPLDGetter(bServ blockservice.BlockService) *IPLDGetter { - return &IPLDGetter{ - rtrv: eds.NewRetriever(bServ), - bServ: bServ, - } -} - -// GetShare gets a single share at the given EDS coordinates from the bitswap network. -func (ig *IPLDGetter) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { - var err error - ctx, span := tracer.Start(ctx, "ipld/get-share", trace.WithAttributes( - attribute.Int("row", row), - attribute.Int("col", col), - )) - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - dah := header.DAH - upperBound := len(dah.RowRoots) - if row >= upperBound || col >= upperBound { - err := share.ErrOutOfBounds - span.RecordError(err) - return nil, err - } - root, leaf := ipld.Translate(dah, row, col) - - // wrap the blockservice in a session if it has been signaled in the context. - blockGetter := getGetter(ctx, ig.bServ) - s, err := ipld.GetShare(ctx, blockGetter, root, leaf, len(dah.RowRoots)) - if errors.Is(err, ipld.ErrNodeNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("getter/ipld: failed to retrieve share: %w", err) - } - - return s, nil -} - -func (ig *IPLDGetter) GetEDS( - ctx context.Context, - header *header.ExtendedHeader, -) (eds *rsmt2d.ExtendedDataSquare, err error) { - ctx, span := tracer.Start(ctx, "ipld/get-eds") - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - // rtrv.Retrieve calls shares.GetShares until enough shares are retrieved to reconstruct the EDS - eds, err = ig.rtrv.Retrieve(ctx, header.DAH) - if errors.Is(err, ipld.ErrNodeNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } - var errByz *byzantine.ErrByzantine - if errors.As(err, &errByz) { - return nil, err - } - if err != nil { - return nil, fmt.Errorf("getter/ipld: failed to retrieve eds: %w", err) - } - return eds, nil -} - -func (ig *IPLDGetter) GetSharesByNamespace( - ctx context.Context, - header *header.ExtendedHeader, - namespace share.Namespace, -) (shares share.NamespacedShares, err error) { - ctx, span := tracer.Start(ctx, "ipld/get-shares-by-namespace", trace.WithAttributes( - attribute.String("namespace", namespace.String()), - )) - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - if err = namespace.ValidateForData(); err != nil { - return nil, err - } - - // wrap the blockservice in a session if it has been signaled in the context. - blockGetter := getGetter(ctx, ig.bServ) - shares, err = eds.CollectSharesByNamespace(ctx, blockGetter, header.DAH, namespace) - if errors.Is(err, ipld.ErrNodeNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("getter/ipld: failed to retrieve shares by namespace: %w", err) - } - return shares, nil -} - -var sessionKey = &session{} - -// session is a struct that can optionally be passed by context to the share.Getter methods using -// WithSession to indicate that a blockservice session should be created. -type session struct { - sync.Mutex - atomic.Pointer[blockservice.Session] - ctx context.Context -} - -// WithSession stores an empty session in the context, indicating that a blockservice session should -// be created. -func WithSession(ctx context.Context) context.Context { - return context.WithValue(ctx, sessionKey, &session{ctx: ctx}) -} - -func getGetter(ctx context.Context, service blockservice.BlockService) blockservice.BlockGetter { - s, ok := ctx.Value(sessionKey).(*session) - if !ok { - return service - } - - val := s.Load() - if val != nil { - return val - } - - s.Lock() - defer s.Unlock() - val = s.Load() - if val == nil { - val = blockservice.NewSession(s.ctx, service) - s.Store(val) - } - return val -} diff --git a/share/getters/shrex.go b/share/getters/shrex.go index 0586826e22..3b6983e2cd 100644 --- a/share/getters/shrex.go +++ b/share/getters/shrex.go @@ -135,9 +135,8 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader utils.SetStatusAndEnd(span, err) }() - dah := header.DAH // short circuit if the data root is empty - if dah.Equals(share.EmptyRoot()) { + if header.DAH.Equals(share.EmptyRoot()) { return share.EmptyExtendedDataSquare(), nil } for { @@ -147,10 +146,11 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader } attempt++ start := time.Now() - peer, setStatus, getErr := sg.peerManager.Peer(ctx, dah.Hash()) + peer, setStatus, getErr := sg.peerManager.Peer(ctx, header.DAH.Hash()) if getErr != nil { log.Debugw("eds: couldn't find peer", - "hash", dah.String(), + "hash", header.DAH.String(), + "height", header.Height(), "err", getErr, "finished (s)", time.Since(start)) sg.metrics.recordEDSAttempt(ctx, attempt, false) @@ -159,7 +159,7 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader reqStart := time.Now() reqCtx, cancel := ctxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) - eds, getErr := sg.edsClient.RequestEDS(reqCtx, dah.Hash(), peer) + eds, getErr := sg.edsClient.RequestEDS(reqCtx, header.Height(), header.DAH.Hash(), peer) cancel() switch { case getErr == nil: @@ -182,7 +182,7 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader err = errors.Join(err, getErr) } log.Debugw("eds: request failed", - "hash", dah.String(), + "height", header.Height(), "peer", peer.String(), "attempt", attempt, "err", getErr, @@ -227,6 +227,7 @@ func (sg *ShrexGetter) GetSharesByNamespace( if getErr != nil { log.Debugw("nd: couldn't find peer", "hash", dah.String(), + "height", header.Height(), "namespace", namespace.String(), "err", getErr, "finished (s)", time.Since(start)) @@ -236,7 +237,7 @@ func (sg *ShrexGetter) GetSharesByNamespace( reqStart := time.Now() reqCtx, cancel := ctxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) - nd, getErr := sg.ndClient.RequestND(reqCtx, dah, namespace, peer) + nd, getErr := sg.ndClient.RequestND(reqCtx, header.Height(), dah, namespace, peer) cancel() switch { case getErr == nil: @@ -265,7 +266,7 @@ func (sg *ShrexGetter) GetSharesByNamespace( err = errors.Join(err, getErr) } log.Debugw("nd: request failed", - "hash", dah.String(), + "height", header.Height(), "namespace", namespace.String(), "peer", peer.String(), "attempt", attempt, diff --git a/share/getters/shwap.go b/share/getters/shwap.go new file mode 100644 index 0000000000..b3dfc31b05 --- /dev/null +++ b/share/getters/shwap.go @@ -0,0 +1,259 @@ +package getters + +import ( + "context" + "fmt" + "sync" + + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +// TODO: GetRow method +type Getter struct { + fetch exchange.SessionExchange + bstore blockstore.Blockstore +} + +func NewGetter(fetch exchange.SessionExchange, bstore blockstore.Blockstore) *Getter { + return &Getter{fetch: fetch, bstore: bstore} +} + +func (g *Getter) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { + shrIdx := row*len(header.DAH.RowRoots) + col + shrs, err := g.GetShares(ctx, header, shrIdx) + if err != nil { + return nil, fmt.Errorf("getting shares: %w", err) + } + + if len(shrs) != 1 { + return nil, fmt.Errorf("expected 1 share, got %d", len(shrs)) + } + + return shrs[0], nil +} + +// TODO: Make GetSamples so it provides proofs to users. +// GetShares fetches in the Block/EDS by their indexes. +// Automatically caches them on the Blockstore. +// Guarantee that the returned shares are in the same order as shrIdxs. +func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, smplIdxs ...int) ([]share.Share, error) { + sids := make([]shwap.SampleID, len(smplIdxs)) + for i, shrIdx := range smplIdxs { + sid, err := shwap.NewSampleID(hdr.Height(), shrIdx, hdr.DAH) + if err != nil { + return nil, err + } + + sids[i] = sid + } + + smplsMu := sync.Mutex{} + smpls := make(map[int]shwap.Sample, len(smplIdxs)) + verifyFn := func(s shwap.Sample) error { + err := s.Verify(hdr.DAH) + if err != nil { + return err + } + + smplIdx := int(s.SampleID.RowIndex)*len(hdr.DAH.RowRoots) + int(s.SampleID.ShareIndex) + smplsMu.Lock() + smpls[smplIdx] = s + smplsMu.Unlock() + return nil + } + + cids := make([]cid.Cid, len(smplIdxs)) + for i, sid := range sids { + sampleVerifiers.Add(sid, verifyFn) + cids[i] = sid.Cid() + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + ses := g.fetch.NewSession(ctx) + // must start getting only after verifiers are registered + blkCh, err := ses.GetBlocks(ctx, cids) + if err != nil { + return nil, fmt.Errorf("fetching blocks: %w", err) + } + // GetBlocks handles ctx and closes blkCh, so we don't have to + blks := make([]block.Block, 0, len(smplIdxs)) + for blk := range blkCh { + blks = append(blks, blk) + } + // only persist when all samples received + if len(blks) != len(smplIdxs) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + return nil, fmt.Errorf("not all shares were found") + } + // ensure we persist samples/blks and make them available for Bitswap + err = g.bstore.PutMany(ctx, blks) + if err != nil { + return nil, fmt.Errorf("storing shares: %w", err) + } + // tell bitswap that we stored the blks and can serve them now + err = g.fetch.NotifyNewBlocks(ctx, blks...) + if err != nil { + return nil, fmt.Errorf("notifying new shares: %w", err) + } + + // ensure we return shares in the requested order + shrs := make([]share.Share, len(smplIdxs)) + for i, smplIdx := range smplIdxs { + shrs[i] = smpls[smplIdx].SampleShare + } + + return shrs, nil +} + +// GetEDS +// TODO(@Wondertan): Consider requesting randomized rows instead of ODS only +func (g *Getter) GetEDS(ctx context.Context, hdr *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + sqrLn := len(hdr.DAH.RowRoots) + rids := make([]shwap.RowID, sqrLn/2) + for i := 0; i < sqrLn/2; i++ { + rid, err := shwap.NewRowID(hdr.Height(), uint16(i), hdr.DAH) + if err != nil { + return nil, err + } + + rids[i] = rid + } + + square, err := rsmt2d.NewExtendedDataSquare( + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(sqrLn/2)), uint(sqrLn), + share.Size, + ) + if err != nil { + return nil, err + } + + verifyFn := func(row shwap.Row) error { + err := row.Verify(hdr.DAH) + if err != nil { + return err + } + + for shrIdx, shr := range row.RowShares { + err = square.SetCell(uint(row.RowIndex), uint(shrIdx), shr) // no synchronization needed + if err != nil { + panic(err) // this should never happen and if it is... something is really wrong + } + } + + return nil + } + + cids := make([]cid.Cid, sqrLn/2) + for i, rid := range rids { + rowVerifiers.Add(rid, verifyFn) + cids[i] = rid.Cid() + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + ses := g.fetch.NewSession(ctx) + // must start getting only after verifiers are registered + blkCh, err := ses.GetBlocks(ctx, cids) + if err != nil { + return nil, fmt.Errorf("fetching blocks: %w", err) + } + // GetBlocks handles ctx by closing blkCh, so we don't have to + for range blkCh { //nolint:revive // it complains on empty block, but the code is functional + // we handle writes in verifyFn so just wait for as many results as possible + } + + // and try to repair + err = square.Repair(hdr.DAH.RowRoots, hdr.DAH.ColumnRoots) + if err != nil { + if ctx.Err() != nil { + return nil, ctx.Err() + } + return nil, fmt.Errorf("repairing EDS: %w", err) + } + + return square, nil +} + +func (g *Getter) GetSharesByNamespace( + ctx context.Context, + hdr *header.ExtendedHeader, + ns share.Namespace, +) (share.NamespacedShares, error) { + if err := ns.ValidateForData(); err != nil { + return nil, err + } + + var dids []shwap.DataID //nolint:prealloc// we don't know how many rows with needed namespace there are + for rowIdx, rowRoot := range hdr.DAH.RowRoots { + if ns.IsOutsideRange(rowRoot, rowRoot) { + continue + } + + did, err := shwap.NewDataID(hdr.Height(), uint16(rowIdx), ns, hdr.DAH) + if err != nil { + return nil, err + } + + dids = append(dids, did) + } + if len(dids) == 0 { + return share.NamespacedShares{}, nil + } + + datas := make([]shwap.Data, len(dids)) + verifyFn := func(d shwap.Data) error { + err := d.Verify(hdr.DAH) + if err != nil { + return err + } + + nsStartIdx := dids[0].RowIndex + idx := d.RowIndex - nsStartIdx + datas[idx] = d + return nil + } + + cids := make([]cid.Cid, len(dids)) + for i, did := range dids { + dataVerifiers.Add(did, verifyFn) + cids[i] = did.Cid() + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + ses := g.fetch.NewSession(ctx) + // must start getting only after verifiers are registered + blkCh, err := ses.GetBlocks(ctx, cids) + if err != nil { + return nil, fmt.Errorf("fetching blocks:%w", err) + } + // GetBlocks handles ctx by closing blkCh, so we don't have to + for range blkCh { //nolint:revive // it complains on empty block, but the code is functional + // we handle writes in verifyFn so just wait for as many results as possible + } + + nShrs := make([]share.NamespacedRow, 0, len(datas)) + for _, row := range datas { + proof := row.DataProof + nShrs = append(nShrs, share.NamespacedRow{ + Shares: row.DataShares, + Proof: &proof, + }) + } + + return nShrs, nil +} diff --git a/share/getters/store.go b/share/getters/store.go index e5543c42b4..0c72fbedaf 100644 --- a/share/getters/store.go +++ b/share/getters/store.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/celestiaorg/celestia-node/share/store" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -14,7 +13,7 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/store" ) var _ share.Getter = (*StoreGetter)(nil) @@ -53,7 +52,7 @@ func (sg *StoreGetter) GetShare(ctx context.Context, header *header.ExtendedHead } file, err := sg.store.GetByHash(ctx, dah.Hash()) - if errors.Is(err, eds.ErrNotFound) { + if errors.Is(err, store.ErrNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound } @@ -84,7 +83,7 @@ func (sg *StoreGetter) GetEDS( }() file, err := sg.store.GetByHash(ctx, header.DAH.Hash()) - if errors.Is(err, eds.ErrNotFound) { + if errors.Is(err, store.ErrNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound } @@ -115,7 +114,7 @@ func (sg *StoreGetter) GetSharesByNamespace( }() file, err := sg.store.GetByHash(ctx, header.DAH.Hash()) - if errors.Is(err, eds.ErrNotFound) { + if errors.Is(err, store.ErrNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound } diff --git a/share/ipld/blockserv.go b/share/ipld/blockserv.go index 2ed2a21c77..b7a9bf84e9 100644 --- a/share/ipld/blockserv.go +++ b/share/ipld/blockserv.go @@ -9,7 +9,7 @@ import ( ) // NewBlockservice constructs Blockservice for fetching NMTrees. -func NewBlockservice(bs blockstore.Blockstore, exchange exchange.Interface) blockservice.BlockService { +func NewBlockservice(bs blockstore.Blockstore, exchange exchange.SessionExchange) blockservice.BlockService { return blockservice.New(bs, exchange, blockservice.WithAllowlist(defaultAllowlist)) } diff --git a/share/ipld/corrupted_data_test.go b/share/ipld/corrupted_data_test.go index d1d6e6b4d5..50bf46e4d0 100644 --- a/share/ipld/corrupted_data_test.go +++ b/share/ipld/corrupted_data_test.go @@ -1,17 +1,7 @@ package ipld_test import ( - "context" - "testing" "time" - - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/header/headertest" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/availability/full" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/getters" ) // sharesAvailableTimeout is an arbitrarily picked interval of time in which a TestNode is expected @@ -20,32 +10,33 @@ const sharesAvailableTimeout = 2 * time.Second // TestNamespaceHasher_CorruptedData is an integration test that verifies that the NamespaceHasher // of a recipient of corrupted data will not panic, and will throw away the corrupted data. -func TestNamespaceHasher_CorruptedData(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - net := availability_test.NewTestDAGNet(ctx, t) - - requestor := full.Node(net) - provider, mockBS := availability_test.MockNode(t, net) - provider.Availability = full.TestAvailability(t, getters.NewIPLDGetter(provider.BlockService)) - net.ConnectAll() - - // before the provider starts attacking, we should be able to retrieve successfully. We pass a size - // 16 block, but this is not important to the test and any valid block size behaves the same. - root := availability_test.RandFillBS(t, 16, provider.BlockService) - - eh := headertest.RandExtendedHeaderWithRoot(t, root) - getCtx, cancelGet := context.WithTimeout(ctx, sharesAvailableTimeout) - t.Cleanup(cancelGet) - err := requestor.SharesAvailable(getCtx, eh) - require.NoError(t, err) - - // clear the storage of the requester so that it must retrieve again, then start attacking - // we reinitialize the node to clear the eds store - requestor = full.Node(net) - mockBS.Attacking = true - getCtx, cancelGet = context.WithTimeout(ctx, sharesAvailableTimeout) - t.Cleanup(cancelGet) - err = requestor.SharesAvailable(getCtx, eh) - require.ErrorIs(t, err, share.ErrNotAvailable) -} +//FIXME: fix this test +//func TestNamespaceHasher_CorruptedData(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// t.Cleanup(cancel) +// net := availability_test.NewTestDAGNet(ctx, t) +// +// requestor := full.Node(net) +// provider, mockBS := availability_test.MockNode(t, net) +// provider.Availability = full.TestAvailability(t, getters.NewIPLDGetter(provider.BlockService)) +// net.ConnectAll() +// +// // before the provider starts attacking, we should be able to retrieve successfully. We pass a size +// // 16 block, but this is not important to the test and any valid block size behaves the same. +// root := availability_test.RandFillBS(t, 16, provider.BlockService) +// +// eh := headertest.RandExtendedHeaderWithRoot(t, root) +// getCtx, cancelGet := context.WithTimeout(ctx, sharesAvailableTimeout) +// t.Cleanup(cancelGet) +// err := requestor.SharesAvailable(getCtx, eh) +// require.NoError(t, err) +// +// // clear the storage of the requester so that it must retrieve again, then start attacking +// // we reinitialize the node to clear the eds store +// requestor = full.Node(net) +// mockBS.Attacking = true +// getCtx, cancelGet = context.WithTimeout(ctx, sharesAvailableTimeout) +// t.Cleanup(cancelGet) +// err = requestor.SharesAvailable(getCtx, eh) +// require.ErrorIs(t, err, share.ErrNotAvailable) +//} diff --git a/share/ipld/get_shares_test.go b/share/ipld/get_shares_test.go index 580efcb69b..3bcdcf2753 100644 --- a/share/ipld/get_shares_test.go +++ b/share/ipld/get_shares_test.go @@ -175,8 +175,7 @@ func TestGetSharesByNamespace(t *testing.T) { rowRoots, err := eds.RowRoots() require.NoError(t, err) for _, row := range rowRoots { - rcid := MustCidFromNamespacedSha256(row) - rowShares, _, err := GetSharesByNamespace(ctx, bServ, rcid, namespace, len(rowRoots)) + rowShares, _, err := GetSharesByNamespace(ctx, bServ, row, namespace, len(rowRoots)) if errors.Is(err, ErrNamespaceOutsideRange) { continue } @@ -364,8 +363,7 @@ func TestGetSharesWithProofsByNamespace(t *testing.T) { rowRoots, err := eds.RowRoots() require.NoError(t, err) for _, row := range rowRoots { - rcid := MustCidFromNamespacedSha256(row) - rowShares, proof, err := GetSharesByNamespace(ctx, bServ, rcid, namespace, len(rowRoots)) + rowShares, proof, err := GetSharesByNamespace(ctx, bServ, row, namespace, len(rowRoots)) if namespace.IsOutsideRange(row, row) { require.ErrorIs(t, err, ErrNamespaceOutsideRange) continue @@ -387,7 +385,7 @@ func TestGetSharesWithProofsByNamespace(t *testing.T) { sha256.New(), namespace.ToNMT(), leaves, - NamespacedSha256FromCID(rcid)) + row) require.True(t, verified) // verify inclusion @@ -395,7 +393,7 @@ func TestGetSharesWithProofsByNamespace(t *testing.T) { sha256.New(), namespace.ToNMT(), rowShares, - NamespacedSha256FromCID(rcid)) + row) require.True(t, verified) } } diff --git a/share/ipld/namespace_data.go b/share/ipld/namespace_data.go index 5a6fd2abb4..0c7d73b984 100644 --- a/share/ipld/namespace_data.go +++ b/share/ipld/namespace_data.go @@ -2,6 +2,7 @@ package ipld import ( "context" + "encoding/hex" "errors" "fmt" "sync" @@ -79,6 +80,7 @@ func (n *NamespaceData) validate(rootCid cid.Cid) error { root := NamespacedSha256FromCID(rootCid) if n.namespace.IsOutsideRange(root, root) { + fmt.Println("look", n.namespace.String(), hex.EncodeToString(root)) return ErrNamespaceOutsideRange } return nil diff --git a/share/p2p/shrexeds/client.go b/share/p2p/shrexeds/client.go index 956dc1243d..4c5cc0ba56 100644 --- a/share/p2p/shrexeds/client.go +++ b/share/p2p/shrexeds/client.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/celestiaorg/celestia-node/share/store/file" "io" "net" "time" @@ -20,6 +19,7 @@ import ( "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/p2p" pb "github.com/celestiaorg/celestia-node/share/p2p/shrexeds/pb" + "github.com/celestiaorg/celestia-node/share/store/file" ) // Client is responsible for requesting EDSs for blocksync over the ShrEx/EDS protocol. @@ -47,14 +47,18 @@ func NewClient(params *Parameters, host host.Host) (*Client, error) { // RequestEDS requests the ODS from the given peers and returns the EDS upon success. func (c *Client) RequestEDS( ctx context.Context, + height uint64, dataHash share.DataHash, peer peer.ID, ) (*rsmt2d.ExtendedDataSquare, error) { - eds, err := c.doRequest(ctx, dataHash, peer) + eds, err := c.doRequest(ctx, height, dataHash, peer) if err == nil { return eds, nil } - log.Debugw("client: eds request to peer failed", "peer", peer.String(), "hash", dataHash.String(), "error", err) + log.Debugw("client: eds request to peer failed", + "height", height, + "peer", peer.String(), + "error", err) if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { c.metrics.ObserveRequests(ctx, 1, p2p.StatusTimeout) return nil, err @@ -71,7 +75,7 @@ func (c *Client) RequestEDS( if err != p2p.ErrNotFound { log.Warnw("client: eds request to peer failed", "peer", peer.String(), - "hash", dataHash.String(), + "height", height, "err", err) } @@ -80,6 +84,7 @@ func (c *Client) RequestEDS( func (c *Client) doRequest( ctx context.Context, + height uint64, dataHash share.DataHash, to peer.ID, ) (*rsmt2d.ExtendedDataSquare, error) { @@ -93,10 +98,12 @@ func (c *Client) doRequest( c.setStreamDeadlines(ctx, stream) - req := &pb.EDSRequest{Hash: dataHash} + req := &pb.EDSRequest{Height: height} // request ODS - log.Debugw("client: requesting ods", "hash", dataHash.String(), "peer", to.String()) + log.Debugw("client: requesting ods", + "height", height, + "peer", to.String()) _, err = serde.Write(stream, req) if err != nil { stream.Reset() //nolint:errcheck diff --git a/share/p2p/shrexeds/params.go b/share/p2p/shrexeds/params.go index 795cb313ed..d2adad2930 100644 --- a/share/p2p/shrexeds/params.go +++ b/share/p2p/shrexeds/params.go @@ -8,7 +8,7 @@ import ( "github.com/celestiaorg/celestia-node/share/p2p" ) -const protocolString = "/shrex/eds/v0.0.1" +const protocolString = "/shrex/eds/v0.0.2" var log = logging.Logger("shrex/eds") diff --git a/share/p2p/shrexeds/pb/extended_data_square.pb.go b/share/p2p/shrexeds/pb/extended_data_square.pb.go index ed1a96ae3b..12b592afdc 100644 --- a/share/p2p/shrexeds/pb/extended_data_square.pb.go +++ b/share/p2p/shrexeds/pb/extended_data_square.pb.go @@ -54,7 +54,7 @@ func (Status) EnumDescriptor() ([]byte, []int) { } type EDSRequest struct { - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } func (m *EDSRequest) Reset() { *m = EDSRequest{} } @@ -90,11 +90,11 @@ func (m *EDSRequest) XXX_DiscardUnknown() { var xxx_messageInfo_EDSRequest proto.InternalMessageInfo -func (m *EDSRequest) GetHash() []byte { +func (m *EDSRequest) GetHeight() uint64 { if m != nil { - return m.Hash + return m.Height } - return nil + return 0 } type EDSResponse struct { @@ -152,22 +152,22 @@ func init() { } var fileDescriptor_49d42aa96098056e = []byte{ - // 227 bytes of a gzipped FileDescriptorProto + // 229 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x28, 0xce, 0x48, 0x2c, 0x4a, 0xd5, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0xce, 0x28, 0x4a, 0xad, 0x48, 0x4d, 0x29, 0xd6, 0x2f, 0x48, 0xd2, 0x4f, 0xad, 0x28, 0x49, 0xcd, 0x4b, 0x49, 0x4d, 0x89, 0x4f, 0x49, 0x2c, 0x49, 0x8c, - 0x2f, 0x2e, 0x2c, 0x4d, 0x2c, 0x4a, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x52, 0xe0, 0xe2, - 0x72, 0x75, 0x09, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x12, 0xe2, 0x62, 0xc9, 0x48, - 0x2c, 0xce, 0x90, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xb8, 0xc1, - 0x2a, 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xe4, 0xb9, 0xd8, 0x8a, 0x4b, 0x12, 0x4b, 0x4a, - 0x8b, 0xc1, 0x8a, 0xf8, 0x8c, 0xd8, 0xf5, 0x82, 0xc1, 0xdc, 0x20, 0xa8, 0xb0, 0x96, 0x15, 0x17, - 0x1b, 0x44, 0x44, 0x88, 0x9b, 0x8b, 0xdd, 0xd3, 0x2f, 0xcc, 0xd1, 0xc7, 0xd3, 0x45, 0x80, 0x41, - 0x88, 0x8d, 0x8b, 0xc9, 0xdf, 0x5b, 0x80, 0x51, 0x88, 0x97, 0x8b, 0xd3, 0xcf, 0x3f, 0x24, 0xde, - 0xcd, 0x3f, 0xd4, 0xcf, 0x45, 0x80, 0x49, 0x88, 0x87, 0x8b, 0xc3, 0xd3, 0x2f, 0xc4, 0x35, 0xc8, - 0xcf, 0xd1, 0x47, 0x80, 0xd9, 0x49, 0xe2, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, - 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, - 0x92, 0xd8, 0xc0, 0xce, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x1d, 0xd4, 0xa7, 0xe2, - 0x00, 0x00, 0x00, + 0x2f, 0x2e, 0x2c, 0x4d, 0x2c, 0x4a, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x52, 0xe1, 0xe2, + 0x72, 0x75, 0x09, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x12, 0xe3, 0x62, 0xcb, 0x48, + 0xcd, 0x4c, 0xcf, 0x28, 0x91, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x09, 0x82, 0xf2, 0x94, 0xf4, 0xb8, + 0xb8, 0xc1, 0xaa, 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xe4, 0xb9, 0xd8, 0x8a, 0x4b, 0x12, + 0x4b, 0x4a, 0x8b, 0xc1, 0xca, 0xf8, 0x8c, 0xd8, 0xf5, 0x82, 0xc1, 0xdc, 0x20, 0xa8, 0xb0, 0x96, + 0x15, 0x17, 0x1b, 0x44, 0x44, 0x88, 0x9b, 0x8b, 0xdd, 0xd3, 0x2f, 0xcc, 0xd1, 0xc7, 0xd3, 0x45, + 0x80, 0x41, 0x88, 0x8d, 0x8b, 0xc9, 0xdf, 0x5b, 0x80, 0x51, 0x88, 0x97, 0x8b, 0xd3, 0xcf, 0x3f, + 0x24, 0xde, 0xcd, 0x3f, 0xd4, 0xcf, 0x45, 0x80, 0x49, 0x88, 0x87, 0x8b, 0xc3, 0xd3, 0x2f, 0xc4, + 0x35, 0xc8, 0xcf, 0xd1, 0x47, 0x80, 0xd9, 0x49, 0xe2, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, + 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, + 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0x4e, 0x36, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xc0, 0x6e, 0x10, + 0xf2, 0xe6, 0x00, 0x00, 0x00, } func (m *EDSRequest) Marshal() (dAtA []byte, err error) { @@ -190,12 +190,10 @@ func (m *EDSRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintExtendedDataSquare(dAtA, i, uint64(len(m.Hash))) + if m.Height != 0 { + i = encodeVarintExtendedDataSquare(dAtA, i, uint64(m.Height)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -245,9 +243,8 @@ func (m *EDSRequest) Size() (n int) { } var l int _ = l - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovExtendedDataSquare(uint64(l)) + if m.Height != 0 { + n += 1 + sovExtendedDataSquare(uint64(m.Height)) } return n } @@ -300,10 +297,10 @@ func (m *EDSRequest) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var byteLen int + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExtendedDataSquare @@ -313,26 +310,11 @@ func (m *EDSRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Height |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthExtendedDataSquare - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthExtendedDataSquare - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipExtendedDataSquare(dAtA[iNdEx:]) diff --git a/share/p2p/shrexeds/pb/extended_data_square.proto b/share/p2p/shrexeds/pb/extended_data_square.proto index 63750962e9..636d01697a 100644 --- a/share/p2p/shrexeds/pb/extended_data_square.proto +++ b/share/p2p/shrexeds/pb/extended_data_square.proto @@ -1,7 +1,7 @@ syntax = "proto3"; message EDSRequest { - bytes hash = 1; // identifies the requested EDS. + uint64 height = 1; // identifies the requested EDS. } enum Status { diff --git a/share/p2p/shrexeds/server.go b/share/p2p/shrexeds/server.go index a6e1e002d8..3bc6fef50e 100644 --- a/share/p2p/shrexeds/server.go +++ b/share/p2p/shrexeds/server.go @@ -4,9 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share/store" - "github.com/celestiaorg/celestia-node/share/store/file" "io" "time" @@ -17,10 +14,11 @@ import ( "github.com/celestiaorg/go-libp2p-messenger/serde" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share/p2p" p2p_pb "github.com/celestiaorg/celestia-node/share/p2p/shrexeds/pb" + "github.com/celestiaorg/celestia-node/share/store" + "github.com/celestiaorg/celestia-node/share/store/file" ) // Server is responsible for serving ODSs for blocksync over the ShrEx/EDS protocol. @@ -86,15 +84,7 @@ func (s *Server) handleStream(stream network.Stream) { return } - // ensure the requested dataHash is a valid root - hash := share.DataHash(req.Hash) - err = hash.Validate() - if err != nil { - logger.Warnw("server: invalid request", "err", err) - stream.Reset() //nolint:errcheck - return - } - logger = logger.With("hash", hash.String()) + logger = logger.With("height", req.Height) ctx, cancel := context.WithTimeout(s.ctx, s.params.HandleRequestTimeout) defer cancel() @@ -102,13 +92,13 @@ func (s *Server) handleStream(stream network.Stream) { // determine whether the EDS is available in our store // we do not close the reader, so that other requests will not need to re-open the file. // closing is handled by the LRU cache. - file, err := s.store.GetByHash(ctx, hash) + file, err := s.store.GetByHeight(ctx, req.Height) var status p2p_pb.Status switch { case err == nil: defer utils.CloseAndLog(logger, "file", file) status = p2p_pb.Status_OK - case errors.Is(err, eds.ErrNotFound): + case errors.Is(err, store.ErrNotFound): logger.Warnw("server: request hash not found") s.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) status = p2p_pb.Status_NOT_FOUND diff --git a/share/p2p/shrexnd/client.go b/share/p2p/shrexnd/client.go index 3ae3cc0134..731128c938 100644 --- a/share/p2p/shrexnd/client.go +++ b/share/p2p/shrexnd/client.go @@ -48,6 +48,7 @@ func NewClient(params *Parameters, host host.Host) (*Client, error) { // Returns NamespacedShares with unverified inclusion proofs against the share.Root. func (c *Client) RequestND( ctx context.Context, + height uint64, root *share.Root, namespace share.Namespace, peer peer.ID, @@ -56,7 +57,7 @@ func (c *Client) RequestND( return nil, err } - shares, err := c.doRequest(ctx, root, namespace, peer) + shares, err := c.doRequest(ctx, height, root, namespace, peer) if err == nil { return shares, nil } @@ -81,6 +82,7 @@ func (c *Client) RequestND( func (c *Client) doRequest( ctx context.Context, + height uint64, root *share.Root, namespace share.Namespace, peerID peer.ID, @@ -95,7 +97,7 @@ func (c *Client) doRequest( from, to := share.RowRangeForNamespace(root, namespace) req := &pb.GetSharesByNamespaceRequest{ - RootHash: root.Hash(), + Height: height, Namespace: namespace, FromRow: uint32(from), ToRow: uint32(to), diff --git a/share/p2p/shrexnd/params.go b/share/p2p/shrexnd/params.go index 8489627a07..921999372f 100644 --- a/share/p2p/shrexnd/params.go +++ b/share/p2p/shrexnd/params.go @@ -8,7 +8,7 @@ import ( "github.com/celestiaorg/celestia-node/share/p2p" ) -const protocolString = "/shrex/nd/v0.0.3" +const protocolString = "/shrex/nd/v0.0.4" var log = logging.Logger("shrex/nd") diff --git a/share/p2p/shrexnd/pb/share.pb.go b/share/p2p/shrexnd/pb/share.pb.go index 80c12c6465..ea510234e2 100644 --- a/share/p2p/shrexnd/pb/share.pb.go +++ b/share/p2p/shrexnd/pb/share.pb.go @@ -54,12 +54,11 @@ func (StatusCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor_ed9f13149b0de397, []int{0} } -// FIXME(@walldiss): Needs to be regenerated type GetSharesByNamespaceRequest struct { - RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Namespace []byte `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - FromRow uint32 - ToRow uint32 + FromRow uint32 `protobuf:"varint,3,opt,name=fromRow,proto3" json:"fromRow,omitempty"` + ToRow uint32 `protobuf:"varint,4,opt,name=toRow,proto3" json:"toRow,omitempty"` } func (m *GetSharesByNamespaceRequest) Reset() { *m = GetSharesByNamespaceRequest{} } @@ -95,11 +94,11 @@ func (m *GetSharesByNamespaceRequest) XXX_DiscardUnknown() { var xxx_messageInfo_GetSharesByNamespaceRequest proto.InternalMessageInfo -func (m *GetSharesByNamespaceRequest) GetRootHash() []byte { +func (m *GetSharesByNamespaceRequest) GetHeight() uint64 { if m != nil { - return m.RootHash + return m.Height } - return nil + return 0 } func (m *GetSharesByNamespaceRequest) GetNamespace() []byte { @@ -109,6 +108,20 @@ func (m *GetSharesByNamespaceRequest) GetNamespace() []byte { return nil } +func (m *GetSharesByNamespaceRequest) GetFromRow() uint32 { + if m != nil { + return m.FromRow + } + return 0 +} + +func (m *GetSharesByNamespaceRequest) GetToRow() uint32 { + if m != nil { + return m.ToRow + } + return 0 +} + type GetSharesByNamespaceStatusResponse struct { Status StatusCode `protobuf:"varint,1,opt,name=status,proto3,enum=share.p2p.shrex.nd.StatusCode" json:"status,omitempty"` } @@ -215,28 +228,30 @@ func init() { func init() { proto.RegisterFile("share/p2p/shrexnd/pb/share.proto", fileDescriptor_ed9f13149b0de397) } var fileDescriptor_ed9f13149b0de397 = []byte{ - // 326 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4f, 0x4b, 0xf3, 0x40, - 0x10, 0xc6, 0x93, 0x96, 0x37, 0x6f, 0x3b, 0xad, 0x35, 0x2c, 0x22, 0xc5, 0xca, 0x52, 0x02, 0x42, - 0xf1, 0xb0, 0x81, 0x08, 0x1e, 0x85, 0xd6, 0xfa, 0xa7, 0x58, 0x52, 0xd9, 0xb6, 0xe2, 0x41, 0x28, - 0x1b, 0xbb, 0x92, 0x8b, 0xd9, 0x35, 0xbb, 0x45, 0xfd, 0x16, 0x7e, 0x2c, 0x8f, 0x3d, 0x7a, 0x94, - 0xf6, 0x8b, 0x48, 0xb6, 0xd1, 0x1c, 0xf4, 0xb6, 0xf3, 0xcc, 0x33, 0xbf, 0x7d, 0x66, 0xa0, 0xad, - 0x62, 0x96, 0x72, 0x5f, 0x06, 0xd2, 0x57, 0x71, 0xca, 0x5f, 0x92, 0xb9, 0x2f, 0x23, 0xdf, 0x88, - 0x44, 0xa6, 0x42, 0x0b, 0x84, 0xf2, 0x22, 0x90, 0xc4, 0x38, 0x48, 0x32, 0xdf, 0x6b, 0xc8, 0xc8, - 0x97, 0xa9, 0x10, 0x0f, 0x1b, 0x8f, 0x77, 0x0b, 0xad, 0x0b, 0xae, 0xc7, 0x99, 0x51, 0xf5, 0x5e, - 0x43, 0xf6, 0xc8, 0x95, 0x64, 0xf7, 0x9c, 0xf2, 0xa7, 0x05, 0x57, 0x1a, 0xb5, 0xa0, 0x9a, 0x0a, - 0xa1, 0x67, 0x31, 0x53, 0x71, 0xd3, 0x6e, 0xdb, 0x9d, 0x3a, 0xad, 0x64, 0xc2, 0x25, 0x53, 0x31, - 0xda, 0x87, 0x6a, 0xf2, 0x3d, 0xd0, 0x2c, 0x99, 0x66, 0x21, 0x78, 0x77, 0xe0, 0xfd, 0x45, 0x1e, - 0x6b, 0xa6, 0x17, 0x8a, 0x72, 0x25, 0x45, 0xa2, 0x38, 0x3a, 0x06, 0x47, 0x19, 0xc5, 0xd0, 0x1b, - 0x01, 0x26, 0xbf, 0x43, 0x93, 0xcd, 0xcc, 0xa9, 0x98, 0x73, 0x9a, 0xbb, 0xbd, 0x29, 0xec, 0x14, - 0x61, 0xc5, 0xf3, 0x0f, 0x6f, 0x17, 0x1c, 0x03, 0xc8, 0x78, 0xe5, 0x4e, 0x9d, 0xe6, 0x15, 0x3a, - 0x80, 0x7f, 0x66, 0x6d, 0x93, 0xb3, 0x16, 0x6c, 0x93, 0xfc, 0x08, 0x11, 0xb9, 0xce, 0x1e, 0x74, - 0xd3, 0x3d, 0x3c, 0x01, 0x28, 0x3e, 0x43, 0x35, 0xf8, 0x3f, 0x08, 0x6f, 0xba, 0xc3, 0x41, 0xdf, - 0xb5, 0x90, 0x03, 0xa5, 0xd1, 0x95, 0x6b, 0xa3, 0x2d, 0xa8, 0x86, 0xa3, 0xc9, 0xec, 0x7c, 0x34, - 0x0d, 0xfb, 0x6e, 0x09, 0xd5, 0xa1, 0x32, 0x08, 0x27, 0x67, 0x34, 0xec, 0x0e, 0xdd, 0x72, 0xaf, - 0xf9, 0xbe, 0xc2, 0xf6, 0x72, 0x85, 0xed, 0xcf, 0x15, 0xb6, 0xdf, 0xd6, 0xd8, 0x5a, 0xae, 0xb1, - 0xf5, 0xb1, 0xc6, 0x56, 0xe4, 0x98, 0x7b, 0x1f, 0x7d, 0x05, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x53, - 0xb4, 0x86, 0xb7, 0x01, 0x00, 0x00, + // 353 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x4f, 0x4b, 0xe3, 0x40, + 0x18, 0xc6, 0x33, 0xfd, 0x93, 0x6e, 0xdf, 0xfe, 0xd9, 0x30, 0x94, 0x12, 0x76, 0x97, 0x10, 0x02, + 0x0b, 0x61, 0x0f, 0x09, 0x64, 0xc1, 0xa3, 0xd0, 0x5a, 0x95, 0x62, 0x49, 0x65, 0xda, 0x7a, 0x12, + 0x24, 0xb1, 0x53, 0xe3, 0xa1, 0x99, 0x31, 0x93, 0x52, 0x3d, 0xfb, 0x05, 0xfc, 0x58, 0x1e, 0x7b, + 0xf4, 0x28, 0xed, 0x17, 0x91, 0x4c, 0x52, 0x7b, 0xd0, 0x5b, 0x9e, 0x27, 0xbf, 0x79, 0xe6, 0x7d, + 0xde, 0x01, 0x53, 0x44, 0x41, 0x42, 0x5d, 0xee, 0x71, 0x57, 0x44, 0x09, 0x7d, 0x8c, 0xe7, 0x2e, + 0x0f, 0x5d, 0x69, 0x3a, 0x3c, 0x61, 0x29, 0xc3, 0xb8, 0x10, 0x1e, 0x77, 0x24, 0xe1, 0xc4, 0xf3, + 0x5f, 0x6d, 0x1e, 0xba, 0x3c, 0x61, 0x6c, 0x91, 0x33, 0xd6, 0x33, 0x82, 0xdf, 0xe7, 0x34, 0x9d, + 0x64, 0xa4, 0xe8, 0x3f, 0xf9, 0xc1, 0x92, 0x0a, 0x1e, 0xdc, 0x52, 0x42, 0x1f, 0x56, 0x54, 0xa4, + 0xb8, 0x0b, 0x6a, 0x44, 0xef, 0xef, 0xa2, 0x54, 0x47, 0x26, 0xb2, 0x2b, 0xa4, 0x50, 0xf8, 0x0f, + 0xd4, 0xe3, 0x3d, 0xab, 0x97, 0x4c, 0x64, 0x37, 0xc9, 0xc1, 0xc0, 0x3a, 0xd4, 0x16, 0x09, 0x5b, + 0x12, 0xb6, 0xd6, 0xcb, 0x26, 0xb2, 0x5b, 0x64, 0x2f, 0x71, 0x07, 0xaa, 0x29, 0xcb, 0xfc, 0x8a, + 0xf4, 0x73, 0x61, 0x5d, 0x83, 0xf5, 0xdd, 0x10, 0x93, 0x34, 0x48, 0x57, 0x82, 0x50, 0xc1, 0x59, + 0x2c, 0x28, 0x3e, 0x02, 0x55, 0x48, 0x47, 0xce, 0xd2, 0xf6, 0x0c, 0xe7, 0x6b, 0x41, 0x27, 0x3f, + 0x73, 0xc2, 0xe6, 0x94, 0x14, 0xb4, 0x35, 0x83, 0xce, 0xa1, 0x17, 0x5b, 0x7f, 0xe6, 0x75, 0x41, + 0x95, 0x01, 0x59, 0x5e, 0xd9, 0x6e, 0x92, 0x42, 0xe1, 0xbf, 0x50, 0x95, 0x2b, 0x92, 0xbd, 0x1a, + 0xde, 0x4f, 0xa7, 0x58, 0x58, 0xe8, 0x5c, 0x66, 0x1f, 0x24, 0xff, 0xfb, 0xef, 0x18, 0xe0, 0x70, + 0x19, 0x6e, 0x40, 0x6d, 0xe8, 0x5f, 0xf5, 0x46, 0xc3, 0x81, 0xa6, 0x60, 0x15, 0x4a, 0xe3, 0x0b, + 0x0d, 0xe1, 0x16, 0xd4, 0xfd, 0xf1, 0xf4, 0xe6, 0x6c, 0x3c, 0xf3, 0x07, 0x5a, 0x09, 0x37, 0xe1, + 0xc7, 0xd0, 0x9f, 0x9e, 0x12, 0xbf, 0x37, 0xd2, 0xca, 0x7d, 0xfd, 0x75, 0x6b, 0xa0, 0xcd, 0xd6, + 0x40, 0xef, 0x5b, 0x03, 0xbd, 0xec, 0x0c, 0x65, 0xb3, 0x33, 0x94, 0xb7, 0x9d, 0xa1, 0x84, 0xaa, + 0x7c, 0x9b, 0xff, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x43, 0xb4, 0x09, 0x76, 0xe3, 0x01, 0x00, + 0x00, } func (m *GetSharesByNamespaceRequest) Marshal() (dAtA []byte, err error) { @@ -259,6 +274,16 @@ func (m *GetSharesByNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, er _ = i var l int _ = l + if m.ToRow != 0 { + i = encodeVarintShare(dAtA, i, uint64(m.ToRow)) + i-- + dAtA[i] = 0x20 + } + if m.FromRow != 0 { + i = encodeVarintShare(dAtA, i, uint64(m.FromRow)) + i-- + dAtA[i] = 0x18 + } if len(m.Namespace) > 0 { i -= len(m.Namespace) copy(dAtA[i:], m.Namespace) @@ -266,12 +291,10 @@ func (m *GetSharesByNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, er i-- dAtA[i] = 0x12 } - if len(m.RootHash) > 0 { - i -= len(m.RootHash) - copy(dAtA[i:], m.RootHash) - i = encodeVarintShare(dAtA, i, uint64(len(m.RootHash))) + if m.Height != 0 { + i = encodeVarintShare(dAtA, i, uint64(m.Height)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -365,14 +388,19 @@ func (m *GetSharesByNamespaceRequest) Size() (n int) { } var l int _ = l - l = len(m.RootHash) - if l > 0 { - n += 1 + l + sovShare(uint64(l)) + if m.Height != 0 { + n += 1 + sovShare(uint64(m.Height)) } l = len(m.Namespace) if l > 0 { n += 1 + l + sovShare(uint64(l)) } + if m.FromRow != 0 { + n += 1 + sovShare(uint64(m.FromRow)) + } + if m.ToRow != 0 { + n += 1 + sovShare(uint64(m.ToRow)) + } return n } @@ -443,10 +471,10 @@ func (m *GetSharesByNamespaceRequest) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var byteLen int + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowShare @@ -456,26 +484,11 @@ func (m *GetSharesByNamespaceRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Height |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthShare - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthShare - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) - if m.RootHash == nil { - m.RootHash = []byte{} - } - iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) @@ -510,6 +523,44 @@ func (m *GetSharesByNamespaceRequest) Unmarshal(dAtA []byte) error { m.Namespace = []byte{} } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FromRow", wireType) + } + m.FromRow = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FromRow |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ToRow", wireType) + } + m.ToRow = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShare + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ToRow |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipShare(dAtA[iNdEx:]) diff --git a/share/p2p/shrexnd/pb/share.proto b/share/p2p/shrexnd/pb/share.proto index 026965f6c3..a1c9c17a6d 100644 --- a/share/p2p/shrexnd/pb/share.proto +++ b/share/p2p/shrexnd/pb/share.proto @@ -1,10 +1,10 @@ syntax = "proto3"; package share.p2p.shrex.nd; -import "pb/proof"; +import "pb/proof.proto"; message GetSharesByNamespaceRequest{ - bytes root_hash = 1; + uint64 height = 1; bytes namespace = 2; uint32 fromRow = 3; uint32 toRow = 4; diff --git a/share/p2p/shrexnd/server.go b/share/p2p/shrexnd/server.go index accdc25f56..5d99041e7a 100644 --- a/share/p2p/shrexnd/server.go +++ b/share/p2p/shrexnd/server.go @@ -2,11 +2,8 @@ package shrexnd import ( "context" - "crypto/sha256" "errors" "fmt" - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share/store" "time" "github.com/libp2p/go-libp2p/core/host" @@ -17,10 +14,11 @@ import ( "github.com/celestiaorg/go-libp2p-messenger/serde" nmt_pb "github.com/celestiaorg/nmt/pb" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/p2p" pb "github.com/celestiaorg/celestia-node/share/p2p/shrexnd/pb" + "github.com/celestiaorg/celestia-node/share/store" ) // Server implements server side of shrex/nd protocol to serve namespaced share to remote @@ -110,13 +108,15 @@ func (srv *Server) handleNamespacedData(ctx context.Context, stream network.Stre return err } - logger = logger.With("namespace", share.Namespace(req.Namespace).String(), - "hash", share.DataHash(req.RootHash).String()) + logger = logger.With( + "namespace", share.Namespace(req.Namespace).String(), + "height", req.Height, + ) ctx, cancel := context.WithTimeout(ctx, srv.params.HandleRequestTimeout) defer cancel() - shares, status, err := srv.getNamespaceData(ctx, req.RootHash, req.Namespace, int(req.FromRow), int(req.ToRow)) + shares, status, err := srv.getNamespaceData(ctx, req.Height, req.Namespace, int(req.FromRow), int(req.ToRow)) if err != nil { // server should respond with status regardless if there was an error getting data sendErr := srv.respondStatus(ctx, logger, stream, status) @@ -174,13 +174,13 @@ func (srv *Server) readRequest( } func (srv *Server) getNamespaceData(ctx context.Context, - hash share.DataHash, + height uint64, namespace share.Namespace, fromRow, toRow int, ) (share.NamespacedShares, pb.StatusCode, error) { - file, err := srv.store.GetByHash(ctx, hash) + file, err := srv.store.GetByHeight(ctx, height) if err != nil { - if errors.Is(err, eds.ErrNotFound) { + if errors.Is(err, store.ErrNotFound) { return nil, pb.StatusCode_NOT_FOUND, nil } return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving DAH: %w", err) @@ -254,11 +254,5 @@ func (srv *Server) observeStatus(ctx context.Context, status pb.StatusCode) { // validateRequest checks correctness of the request func validateRequest(req pb.GetSharesByNamespaceRequest) error { - if err := share.Namespace(req.Namespace).ValidateForData(); err != nil { - return err - } - if len(req.RootHash) != sha256.Size { - return fmt.Errorf("incorrect root hash length: %v", len(req.RootHash)) - } - return nil + return share.Namespace(req.Namespace).ValidateForData() } diff --git a/share/shwap/data.go b/share/shwap/data.go index 69cb13af5f..126566245a 100644 --- a/share/shwap/data.go +++ b/share/shwap/data.go @@ -1,16 +1,18 @@ package shwap import ( + "context" "fmt" blocks "github.com/ipfs/go-block-format" + "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" nmtpb "github.com/celestiaorg/nmt/pb" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/ipld" shwappb "github.com/celestiaorg/celestia-node/share/shwap/pb" ) @@ -54,7 +56,7 @@ func NewDataFromEDS( shrs := square.Row(uint(rowIdx)) // TDOD(@Wondertan): This will likely be removed - nd, proof, err := eds.NDFromShares(shrs, namespace, rowIdx) + nd, proof, err := ndFromShares(shrs, namespace, rowIdx) if err != nil { return nil, err } @@ -65,6 +67,35 @@ func NewDataFromEDS( return datas, nil } +func ndFromShares(shrs []share.Share, namespace share.Namespace, axisIdx int) ([]share.Share, nmt.Proof, error) { + bserv := ipld.NewMemBlockservice() + batchAdder := ipld.NewNmtNodeAdder(context.TODO(), bserv, ipld.MaxSizeBatchOption(len(shrs))) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(shrs)/2), uint(axisIdx), + nmt.NodeVisitor(batchAdder.Visit)) + for _, shr := range shrs { + err := tree.Push(shr) + if err != nil { + return nil, nmt.Proof{}, err + } + } + + root, err := tree.Root() + if err != nil { + return nil, nmt.Proof{}, err + } + + err = batchAdder.Commit() + if err != nil { + return nil, nmt.Proof{}, err + } + + row, proof, err := ipld.GetSharesByNamespace(context.TODO(), bserv, root, namespace, len(shrs)) + if err != nil { + return nil, nmt.Proof{}, err + } + return row, *proof, nil +} + // DataFromBlock converts blocks.Block into Data. func DataFromBlock(blk blocks.Block) (*Data, error) { if err := validateCID(blk.Cid()); err != nil { diff --git a/share/shwap/data_hasher_test.go b/share/shwap/data_hasher_test.go index ac6ae0db83..f94f1aa6e6 100644 --- a/share/shwap/data_hasher_test.go +++ b/share/shwap/data_hasher_test.go @@ -16,8 +16,9 @@ func TestDataHasher(t *testing.T) { _, err := hasher.Write([]byte("hello")) assert.Error(t, err) + size := 8 namespace := sharetest.RandV0Namespace() - square, root := edstest.RandEDSWithNamespace(t, namespace, 8) + square, root := edstest.RandEDSWithNamespace(t, namespace, size*size, size) datas, err := NewDataFromEDS(square, 1, namespace) require.NoError(t, err) diff --git a/share/shwap/data_id.go b/share/shwap/data_id.go index 1bbacbd31b..5a12b04734 100644 --- a/share/shwap/data_id.go +++ b/share/shwap/data_id.go @@ -3,13 +3,13 @@ package shwap import ( "context" "fmt" - "github.com/celestiaorg/celestia-node/share/store/file" - blocks "github.com/ipfs/go-block-format" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/store/file" ) // DataIDSize is the size of the DataID in bytes. diff --git a/share/shwap/getter.go b/share/shwap/getter.go index 9845f618f1..69ea90c655 100644 --- a/share/shwap/getter.go +++ b/share/shwap/getter.go @@ -27,6 +27,20 @@ func NewGetter(fetch exchange.SessionExchange, bstore blockstore.Blockstore) *Ge return &Getter{fetch: fetch, bstore: bstore} } +func (g *Getter) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { + shrIdx := row*len(header.DAH.RowRoots) + col + shrs, err := g.GetShares(ctx, header, shrIdx) + if err != nil { + return nil, fmt.Errorf("getting shares: %w", err) + } + + if len(shrs) != 1 { + return nil, fmt.Errorf("expected 1 share, got %d", len(shrs)) + } + + return shrs[0], nil +} + // TODO: Make GetSamples so it provides proofs to users. // GetShares fetches in the Block/EDS by their indexes. // Automatically caches them on the Blockstore. diff --git a/share/shwap/getter_test.go b/share/shwap/getter_test.go index b1e479b057..3625f17471 100644 --- a/share/shwap/getter_test.go +++ b/share/shwap/getter_test.go @@ -14,29 +14,33 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" format "github.com/ipfs/go-ipld-format" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/store/cache" ) func TestGetter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + size := 8 ns := sharetest.RandV0Namespace() - square, root := edstest.RandEDSWithNamespace(t, ns, 4) + square, root := edstest.RandEDSWithNamespace(t, ns, size*size, size) hdr := &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 1}, DAH: root} bstore := edsBlockstore(square) - exch := dummySessionExchange{bstore} + exch := DummySessionExchange{bstore} get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) t.Run("GetShares", func(t *testing.T) { @@ -88,7 +92,7 @@ func TestGetter(t *testing.T) { hdr := &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 1}, DAH: root} bstore := edsBlockstore(square) - exch := &dummySessionExchange{bstore} + exch := &DummySessionExchange{bstore} get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) maxNs := nmt.MaxNamespace(root.RowRoots[(len(root.RowRoots))/2-1], share.NamespaceSize) @@ -152,15 +156,15 @@ func addToNamespace(namespace share.Namespace, val int) (share.Namespace, error) return result, nil } -type dummySessionExchange struct { +type DummySessionExchange struct { blockstore.Blockstore } -func (e dummySessionExchange) NewSession(context.Context) exchange.Fetcher { +func (e DummySessionExchange) NewSession(context.Context) exchange.Fetcher { return e } -func (e dummySessionExchange) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { +func (e DummySessionExchange) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { blk, err := e.Get(ctx, k) if format.IsNotFound(err) { return nil, fmt.Errorf("block was not found locally (offline): %w", err) @@ -176,11 +180,11 @@ func (e dummySessionExchange) GetBlock(ctx context.Context, k cid.Cid) (blocks.B return blk, err } -func (e dummySessionExchange) NotifyNewBlocks(context.Context, ...blocks.Block) error { +func (e DummySessionExchange) NotifyNewBlocks(context.Context, ...blocks.Block) error { return nil } -func (e dummySessionExchange) GetBlocks(ctx context.Context, ks []cid.Cid) (<-chan blocks.Block, error) { +func (e DummySessionExchange) GetBlocks(ctx context.Context, ks []cid.Cid) (<-chan blocks.Block, error) { out := make(chan blocks.Block) go func() { defer close(out) @@ -204,8 +208,24 @@ func (e dummySessionExchange) GetBlocks(ctx context.Context, ks []cid.Cid) (<-ch return out, nil } -func (e dummySessionExchange) Close() error { +func (e DummySessionExchange) Close() error { // NB: exchange doesn't own the blockstore's underlying datastore, so it is // not responsible for closing it. return nil } + +func edsBlockstore(sqr *rsmt2d.ExtendedDataSquare) blockstore.Blockstore { + edsStore, err := NewStore(DefaultParameters(), t.TempDir()) + require.NoError(t, err) + + // disable cache + edsStore.cache = cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{}) + bs := NewBlockstore(edsStore, ds_sync.MutexWrap(ds.NewMapDatastore())) + + height := uint64(100) + eds, dah := randomEDS(t) + + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) +} diff --git a/share/shwap/handler.go b/share/shwap/handler.go index a296f4d2d1..220555191f 100644 --- a/share/shwap/handler.go +++ b/share/shwap/handler.go @@ -3,9 +3,11 @@ package shwap import ( "context" "fmt" - "github.com/celestiaorg/celestia-node/share/store/file" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" + + "github.com/celestiaorg/celestia-node/share/store/file" ) // BlockBuilder is an interface for building blocks from files. diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index 792e075b5b..1c58dbcf18 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -4,14 +4,15 @@ import ( "context" "encoding/binary" "fmt" - "github.com/celestiaorg/celestia-node/share/store/file" - "github.com/celestiaorg/rsmt2d" - blocks "github.com/ipfs/go-block-format" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" + "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/store/file" ) //TODO(@walldiss): maybe move into separate subpkg? @@ -83,8 +84,8 @@ func (rid RowID) MarshalTo(data []byte) (int, error) { // UnmarshalFrom decodes RowID from given byte slice. func (rid *RowID) UnmarshalFrom(data []byte) (int, error) { - rid.Height = binary.LittleEndian.Uint64(data[2:]) - rid.RowIndex = binary.LittleEndian.Uint16(data) + rid.Height = binary.LittleEndian.Uint64(data) + rid.RowIndex = binary.LittleEndian.Uint16(data[8:]) return RowIDSize, nil } diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index 83056c66eb..28ff9efa76 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -4,13 +4,13 @@ import ( "context" "encoding/binary" "fmt" - "github.com/celestiaorg/celestia-node/share/store/file" - blocks "github.com/ipfs/go-block-format" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/store/file" ) //TODO(@walldiss): maybe move into separate subpkg? @@ -115,7 +115,7 @@ func (sid SampleID) GetHeight() uint64 { } func (sid SampleID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block, error) { - shr, err := f.Share(ctx, int(sid.RowID.RowIndex), int(sid.ShareIndex)) + shr, err := f.Share(ctx, int(sid.ShareIndex), int(sid.RowID.RowIndex)) if err != nil { return nil, fmt.Errorf("while getting share with proof: %w", err) } diff --git a/share/store/blockstore.go b/share/store/blockstore.go index 48aa0cb15a..1136f11f48 100644 --- a/share/store/blockstore.go +++ b/share/store/blockstore.go @@ -4,8 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/celestiaorg/celestia-node/share/shwap" - "github.com/celestiaorg/celestia-node/share/store/cache" bstore "github.com/ipfs/boxo/blockstore" "github.com/ipfs/boxo/datastore/dshelp" @@ -14,6 +12,8 @@ import ( "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" ipld "github.com/ipfs/go-ipld-format" + + "github.com/celestiaorg/celestia-node/share/shwap" ) var _ bstore.Blockstore = (*Blockstore)(nil) @@ -47,12 +47,12 @@ func (bs *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { // check cache first height := h.GetHeight() - _, err = bs.store.cache.Get(cache.Key{Height: height}) + _, err = bs.store.cache.Get(height) if err == nil { return true, nil } - _, err = bs.store.GetByHeight(ctx, height) + _, err = bs.store.HasByHeight(ctx, height) if err == nil { return true, nil } @@ -75,7 +75,7 @@ func (bs *Blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error } height := h.GetHeight() - f, err := bs.store.cache.Second().GetOrLoad(ctx, cache.Key{Height: height}, bs.store.openFileByHeight(height)) + f, err := bs.store.cache.Second().GetOrLoad(ctx, height, bs.store.openFileByHeight(height)) if err == nil { return h.BlockFromFile(ctx, f) } diff --git a/share/store/blockstore_test.go b/share/store/blockstore_test.go index da618212da..2940860fc6 100644 --- a/share/store/blockstore_test.go +++ b/share/store/blockstore_test.go @@ -2,53 +2,108 @@ package store import ( "context" - "github.com/celestiaorg/celestia-node/share/shwap" + mrand "math/rand" "testing" + "time" - boxobs "github.com/ipfs/boxo/blockstore" - "github.com/stretchr/testify/assert" + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" "github.com/stretchr/testify/require" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/store/cache" ) -// TODO(@Wondertan): Add row and data code +//TODO: +// - add caching tests +// - add recontruction tests func TestBlockstoreGetShareSample(t *testing.T) { - ctx := context.Background() - sqr := edstest.RandEDS(t, 4) - root, err := share.NewRoot(sqr) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + edsStore, err := NewStore(DefaultParameters(), t.TempDir()) require.NoError(t, err) - b := edsBlockstore(sqr) + // disable cache + edsStore.cache = cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{}) - width := int(sqr.Width()) - for i := 0; i < width*width; i++ { - id, err := shwap.NewSampleID(1, i, root) - require.NoError(t, err) + height := uint64(100) + eds, dah := randomEDS(t) - blk, err := b.Get(ctx, id.Cid()) - require.NoError(t, err) + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) - sample, err := shwap.SampleFromBlock(blk) - require.NoError(t, err) + bs := NewBlockstore(edsStore, ds_sync.MutexWrap(ds.NewMapDatastore())) + + t.Run("Sample", func(t *testing.T) { + width := int(eds.Width()) + for i := 0; i < width*width; i++ { + id, err := shwap.NewSampleID(height, i, dah) + require.NoError(t, err) + blk, err := bs.Get(ctx, id.Cid()) + require.NoError(t, err) + + sample, err := shwap.SampleFromBlock(blk) + require.NoError(t, err) + + err = sample.Verify(dah) + require.NoError(t, err) + require.EqualValues(t, id, sample.SampleID) + } + }) - err = sample.Verify(root) + t.Run("Row", func(t *testing.T) { + width := int(eds.Width()) + for i := 0; i < width; i++ { + rowId, err := shwap.NewRowID(height, uint16(i), dah) + require.NoError(t, err) + + blk, err := bs.Get(ctx, rowId.Cid()) + require.NoError(t, err) + + row, err := shwap.RowFromBlock(blk) + require.NoError(t, err) + + err = row.Verify(dah) + require.NoError(t, err) + + require.EqualValues(t, rowId, row.RowID) + } + }) + + t.Run("NamespaceData", func(t *testing.T) { + size := 8 + namespace := sharetest.RandV0Namespace() + amount := mrand.Intn(size*size-1) + 1 + eds, dah := edstest.RandEDSWithNamespace(t, namespace, amount, size) + + height := uint64(42) + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) require.NoError(t, err) - assert.EqualValues(t, id, sample.SampleID) - } -} + require.NoError(t, f.Close()) -type edsFileAndFS eds.MemFile + for i, row := range dah.RowRoots { + if namespace.IsOutsideRange(row, row) { + continue + } -func (m *edsFileAndFS) File(uint64) (*eds.MemFile, error) { - return (*eds.MemFile)(m), nil -} + dataID, err := shwap.NewDataID(height, uint16(i), namespace, dah) + require.NoError(t, err) + + blk, err := bs.Get(ctx, dataID.Cid()) + require.NoError(t, err) + + nd, err := shwap.DataFromBlock(blk) + require.NoError(t, err) + + err = nd.Verify(dah) + require.NoError(t, err) -func edsBlockstore(sqr *rsmt2d.ExtendedDataSquare) boxobs.Blockstore { - return NewBlockstore[*eds.MemFile]((*edsFileAndFS)(&eds.MemFile{Eds: sqr})) + require.EqualValues(t, dataID, nd.DataID) + } + }) } diff --git a/share/store/cache/accessor_cache.go b/share/store/cache/accessor_cache.go index 05ec00b64c..9bb7992e35 100644 --- a/share/store/cache/accessor_cache.go +++ b/share/store/cache/accessor_cache.go @@ -2,14 +2,14 @@ package cache import ( "context" - "errors" "fmt" - "github.com/celestiaorg/celestia-node/share/store/file" "sync" "sync/atomic" "time" lru "github.com/hashicorp/golang-lru/v2" + + "github.com/celestiaorg/celestia-node/share/store/file" ) const defaultCloseTimeout = time.Minute @@ -18,18 +18,15 @@ var _ Cache = (*FileCache)(nil) // FileCache implements the Cache interface using an LRU cache backend. type FileCache struct { - idxLock sync.RWMutex // The name is a prefix that will be used for cache metrics if they are enabled. name string - // HeightsIdx is a map of Height to Datahash. It is used to find the Datahash for a given Height. - HeightsIdx map[uint64]string // stripedLocks prevents simultaneous RW access to the file cache for an accessor. Instead // of using only one lock or one lock per key, we stripe the keys across 256 locks. 256 is // chosen because it 0-255 is the range of values we get looking at the last byte of the key. - stripedLocks [256]sync.Mutex + stripedLocks [256]*sync.RWMutex // Caches the file for a given key for file read affinity, i.e., further reads will likely // be from the same file. Maps (Datahash -> accessor). - cache *lru.Cache[string, *accessor] + cache *lru.Cache[key, *accessor] metrics *metrics } @@ -37,66 +34,26 @@ type FileCache struct { // accessor is the value stored in Cache. It implements the file.EdsFile interface. It has a // reference counted so that it can be removed from the cache only when all references are released. type accessor struct { - lock sync.RWMutex + lock sync.Mutex file.EdsFile - Height uint64 + height uint64 done chan struct{} refs atomic.Int32 isClosed bool } -func (s *accessor) addRef() error { - s.lock.Lock() - defer s.lock.Unlock() - if s.isClosed { - // item is already closed and soon will be removed after all refs are released - return errCacheMiss - } - if s.refs.Add(1) == 1 { - // there were no refs previously and done channel was closed, reopen it by recreating - s.done = make(chan struct{}) - } - return nil -} - -func (s *accessor) removeRef() { - s.lock.Lock() - defer s.lock.Unlock() - if s.refs.Add(-1) <= 0 { - close(s.done) - } -} - -func (s *accessor) close() error { - s.lock.Lock() - if s.isClosed { - s.lock.Unlock() - // accessor will be closed by another goroutine - return nil - } - s.isClosed = true - done := s.done - s.lock.Unlock() - - select { - case <-done: - case <-time.After(defaultCloseTimeout): - return fmt.Errorf("closing file, some readers didn't close the file within timeout,"+ - " amount left: %v", s.refs.Load()) - } - if err := s.EdsFile.Close(); err != nil { - return fmt.Errorf("closing accessor: %w", err) - } - return nil -} - func NewFileCache(name string, cacheSize int) (*FileCache, error) { bc := &FileCache{ - name: name, + name: name, + stripedLocks: [256]*sync.RWMutex{}, + } + + for i := range bc.stripedLocks { + bc.stripedLocks[i] = &sync.RWMutex{} } // Instantiate the file Cache. - bslru, err := lru.NewWithEvict[string, *accessor](cacheSize, bc.evictFn()) + bslru, err := lru.NewWithEvict[key, *accessor](cacheSize, bc.evictFn()) if err != nil { return nil, fmt.Errorf("failed to instantiate accessor cache: %w", err) } @@ -105,14 +62,11 @@ func NewFileCache(name string, cacheSize int) (*FileCache, error) { } // evictFn will be invoked when an item is evicted from the cache. -func (bc *FileCache) evictFn() func(string, *accessor) { - return func(_ string, fa *accessor) { - bc.idxLock.Lock() - defer bc.idxLock.Unlock() - delete(bc.HeightsIdx, fa.Height) +func (bc *FileCache) evictFn() func(key, *accessor) { + return func(_ key, ac *accessor) { // we can release accessor from cache early, while it is being closed in parallel routine go func() { - err := fa.close() + err := ac.close() if err != nil { bc.metrics.observeEvicted(true) log.Errorf("couldn't close accessor after cache eviction: %s", err) @@ -124,54 +78,33 @@ func (bc *FileCache) evictFn() func(string, *accessor) { } // Get retrieves the accessor for a given key from the Cache. If the Accessor is not in -// the Cache, it returns an errCacheMiss. -func (bc *FileCache) Get(key Key) (file.EdsFile, error) { - lk := &bc.stripedLocks[keyToStriped(key)] - lk.Lock() - defer lk.Unlock() +// the Cache, it returns an ErrCacheMiss. +func (bc *FileCache) Get(key key) (file.EdsFile, error) { + lk := bc.getLock(key) + lk.RLock() + defer lk.RUnlock() - accessor, err := bc.get(key) - if err != nil { + ac, ok := bc.cache.Get(key) + if !ok { bc.metrics.observeGet(false) - return nil, err + return nil, ErrCacheMiss } - bc.metrics.observeGet(true) - return newRefCloser(accessor) -} -func (bc *FileCache) get(key Key) (*accessor, error) { - hashStr := key.Datahash.String() - if hashStr == "" { - var ok bool - bc.idxLock.RLock() - hashStr, ok = bc.HeightsIdx[key.Height] - bc.idxLock.RUnlock() - if !ok { - return nil, errCacheMiss - } - } - abs, ok := bc.cache.Get(hashStr) - if !ok { - return nil, errCacheMiss - } - return abs, nil + bc.metrics.observeGet(true) + return newRefCloser(ac) } // GetOrLoad attempts to get an item from the cache, and if not found, invokes // the provided loader function to load it. -func (bc *FileCache) GetOrLoad(ctx context.Context, key Key, loader OpenFileFn) (file.EdsFile, error) { - if !key.isComplete() { - return nil, errors.New("key is not complete") - } - - lk := &bc.stripedLocks[keyToStriped(key)] +func (bc *FileCache) GetOrLoad(ctx context.Context, key key, loader OpenFileFn) (file.EdsFile, error) { + lk := bc.getLock(key) lk.Lock() defer lk.Unlock() - abs, err := bc.get(key) - if err == nil { + ac, ok := bc.cache.Get(key) + if ok { // return accessor, only if it is not closed yet - accessorWithRef, err := newRefCloser(abs) + accessorWithRef, err := newRefCloser(ac) if err == nil { bc.metrics.observeGet(true) return accessorWithRef, nil @@ -179,47 +112,39 @@ func (bc *FileCache) GetOrLoad(ctx context.Context, key Key, loader OpenFileFn) } // accessor not found in cache or closed, so load new one using loader - key, file, err := loader(ctx) + f, err := loader(ctx) if err != nil { return nil, fmt.Errorf("unable to load accessor: %w", err) } - fa := &accessor{EdsFile: file} + // wrap file with close once and axis cache + cacheFile := file.CloseOnceFile(file.NewCacheFile(f)) + ac = &accessor{EdsFile: cacheFile} // Create a new accessor first to increment the reference count in it, so it cannot get evicted // from the inner lru cache before it is used. - rc, err := newRefCloser(fa) + rc, err := newRefCloser(ac) if err != nil { return nil, err } - return rc, bc.add(key, fa) -} - -func (bc *FileCache) add(key Key, fa *accessor) error { - keyStr := key.Datahash.String() - bc.idxLock.Lock() - defer bc.idxLock.Unlock() - // Create a new accessor first to increment the reference count in it, so it cannot get evicted - // from the inner lru cache before it is used. - bc.cache.Add(keyStr, fa) - bc.HeightsIdx[key.Height] = keyStr - return nil + bc.cache.Add(key, ac) + return rc, nil } // Remove removes the Accessor for a given key from the cache. -func (bc *FileCache) Remove(key Key) error { - lk := &bc.stripedLocks[keyToStriped(key)] - lk.Lock() - accessor, err := bc.get(key) - lk.Unlock() - if errors.Is(err, errCacheMiss) { +func (bc *FileCache) Remove(key key) error { + lk := bc.getLock(key) + lk.RLock() + ac, ok := bc.cache.Get(key) + lk.RUnlock() + if !ok { // item is not in cache return nil } - if err = accessor.close(); err != nil { + if err := ac.close(); err != nil { return err } // The cache will call evictFn on removal, where accessor close will be called. - bc.cache.Remove(key.Datahash.String()) + bc.cache.Remove(key) return nil } @@ -230,6 +155,51 @@ func (bc *FileCache) EnableMetrics() error { return err } +func (s *accessor) addRef() error { + s.lock.Lock() + defer s.lock.Unlock() + if s.isClosed { + // item is already closed and soon will be removed after all refs are released + return ErrCacheMiss + } + if s.refs.Add(1) == 1 { + // there were no refs previously and done channel was closed, reopen it by recreating + s.done = make(chan struct{}) + } + return nil +} + +func (s *accessor) removeRef() { + s.lock.Lock() + defer s.lock.Unlock() + if s.refs.Add(-1) <= 0 { + close(s.done) + } +} + +func (s *accessor) close() error { + s.lock.Lock() + if s.isClosed { + s.lock.Unlock() + // accessor will be closed by another goroutine + return nil + } + s.isClosed = true + done := s.done + s.lock.Unlock() + + select { + case <-done: + case <-time.After(defaultCloseTimeout): + return fmt.Errorf("closing file, some readers didn't close the file within timeout,"+ + " amount left: %v", s.refs.Load()) + } + if err := s.EdsFile.Close(); err != nil { + return fmt.Errorf("closing accessor: %w", err) + } + return nil +} + // refCloser manages references to accessor from provided reader and removes the ref, when the // Close is called type refCloser struct { @@ -257,9 +227,6 @@ func (c *refCloser) Close() error { return nil } -// keyToStriped returns the index of the lock to use for a given key. We use the last -// byte of the Datahash as the pseudo-random index. -func keyToStriped(sk Key) byte { - str := sk.Datahash.String() - return str[len(str)-1] +func (bc *FileCache) getLock(k key) *sync.RWMutex { + return bc.stripedLocks[byte(k%256)] } diff --git a/share/eds/cache/accessor_cache_test.go b/share/store/cache/accessor_cache_test.go similarity index 93% rename from share/eds/cache/accessor_cache_test.go rename to share/store/cache/accessor_cache_test.go index 347b251a88..e910cc0017 100644 --- a/share/eds/cache/accessor_cache_test.go +++ b/share/store/cache/accessor_cache_test.go @@ -20,7 +20,7 @@ func TestAccessorCache(t *testing.T) { t.Run("add / get item from cache", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - cache, err := NewAccessorCache("test", 1) + cache, err := NewFileCache("test", 1) require.NoError(t, err) // add accessor to the cache @@ -48,7 +48,7 @@ func TestAccessorCache(t *testing.T) { t.Run("get blockstore from accessor", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - cache, err := NewAccessorCache("test", 1) + cache, err := NewFileCache("test", 1) require.NoError(t, err) // add accessor to the cache @@ -79,7 +79,7 @@ func TestAccessorCache(t *testing.T) { t.Run("remove an item", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - cache, err := NewAccessorCache("test", 1) + cache, err := NewFileCache("test", 1) require.NoError(t, err) // add accessor to the cache @@ -100,13 +100,13 @@ func TestAccessorCache(t *testing.T) { // check if item exists _, err = cache.Get(key) - require.ErrorIs(t, err, errCacheMiss) + require.ErrorIs(t, err, ErrCacheMiss) }) t.Run("successive reads should read the same data", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - cache, err := NewAccessorCache("test", 1) + cache, err := NewFileCache("test", 1) require.NoError(t, err) // add accessor to the cache @@ -133,7 +133,7 @@ func TestAccessorCache(t *testing.T) { t.Run("removed by eviction", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - cache, err := NewAccessorCache("test", 1) + cache, err := NewFileCache("test", 1) require.NoError(t, err) // add accessor to the cache @@ -160,13 +160,13 @@ func TestAccessorCache(t *testing.T) { // check if item evicted _, err = cache.Get(key) - require.ErrorIs(t, err, errCacheMiss) + require.ErrorIs(t, err, ErrCacheMiss) }) t.Run("close on accessor is not closing underlying accessor", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - cache, err := NewAccessorCache("test", 1) + cache, err := NewFileCache("test", 1) require.NoError(t, err) // add accessor to the cache @@ -193,7 +193,7 @@ func TestAccessorCache(t *testing.T) { t.Run("close on accessor should wait all readers to finish", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - cache, err := NewAccessorCache("test", 1) + cache, err := NewFileCache("test", 1) require.NoError(t, err) // add accessor to the cache @@ -226,9 +226,9 @@ func TestAccessorCache(t *testing.T) { require.NoError(t, err) mock.checkClosed(t, false) - // reads for item that is being evicted should result in errCacheMiss + // reads for item that is being evicted should result in ErrCacheMiss _, err = cache.Get(key) - require.ErrorIs(t, err, errCacheMiss) + require.ErrorIs(t, err, ErrCacheMiss) // close second reader and wait for accessor to be closed err = accessor2.Close() @@ -247,7 +247,7 @@ func TestAccessorCache(t *testing.T) { t.Run("slow reader should not block eviction", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - cache, err := NewAccessorCache("test", 1) + cache, err := NewFileCache("test", 1) require.NoError(t, err) // add accessor to the cache @@ -268,7 +268,7 @@ func TestAccessorCache(t *testing.T) { // first accessor should be evicted from cache _, err = cache.Get(key1) - require.ErrorIs(t, err, errCacheMiss) + require.ErrorIs(t, err, ErrCacheMiss) // first accessor should not be closed before all refs are released by Close() is calls. mock1.checkClosed(t, false) diff --git a/share/store/cache/cache.go b/share/store/cache/cache.go index 4c12ff1eaf..e4f79d1e0d 100644 --- a/share/store/cache/cache.go +++ b/share/store/cache/cache.go @@ -3,10 +3,11 @@ package cache import ( "context" "errors" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/store/file" + logging "github.com/ipfs/go-log/v2" "go.opentelemetry.io/otel" + + "github.com/celestiaorg/celestia-node/share/store/file" ) var ( @@ -15,33 +16,24 @@ var ( ) var ( - errCacheMiss = errors.New("accessor not found in blockstore cache") + ErrCacheMiss = errors.New("accessor not found in blockstore cache") ) -// Key is a unique identifier for an item in the Cache. Either Datahash or Height should be set. -type Key struct { - Datahash share.DataHash - Height uint64 -} - -// A Key is considered complete if it has both Datahash and Height set. -func (k Key) isComplete() bool { - return k.Datahash != nil && k.Height != 0 -} +type OpenFileFn func(context.Context) (file.EdsFile, error) -type OpenFileFn func(context.Context) (Key, file.EdsFile, error) +type key = uint64 // Cache is an interface that defines the basic Cache operations. type Cache interface { // Get returns the EDS file for the given key. - Get(Key) (file.EdsFile, error) + Get(key) (file.EdsFile, error) // GetOrLoad attempts to get an item from the Cache and, if not found, invokes // the provided loader function to load it into the Cache. - GetOrLoad(context.Context, Key, OpenFileFn) (file.EdsFile, error) + GetOrLoad(context.Context, key, OpenFileFn) (file.EdsFile, error) // Remove removes an item from Cache. - Remove(Key) error + Remove(key) error // EnableMetrics enables metrics in Cache EnableMetrics() error diff --git a/share/store/cache/doublecache.go b/share/store/cache/doublecache.go index 39107d86cf..567a189252 100644 --- a/share/store/cache/doublecache.go +++ b/share/store/cache/doublecache.go @@ -2,6 +2,7 @@ package cache import ( "errors" + "github.com/celestiaorg/celestia-node/share/store/file" ) @@ -19,7 +20,7 @@ func NewDoubleCache(first, second Cache) *DoubleCache { } // Get looks for an item in all the caches one by one and returns the Cache found item. -func (mc *DoubleCache) Get(key Key) (file.EdsFile, error) { +func (mc *DoubleCache) Get(key key) (file.EdsFile, error) { accessor, err := mc.first.Get(key) if err == nil { return accessor, nil @@ -28,7 +29,7 @@ func (mc *DoubleCache) Get(key Key) (file.EdsFile, error) { } // Remove removes an item from all underlying caches -func (mc *DoubleCache) Remove(key Key) error { +func (mc *DoubleCache) Remove(key key) error { err1 := mc.first.Remove(key) err2 := mc.second.Remove(key) return errors.Join(err1, err2) diff --git a/share/store/cache/noop.go b/share/store/cache/noop.go index 1977e98dc0..166710ad80 100644 --- a/share/store/cache/noop.go +++ b/share/store/cache/noop.go @@ -2,10 +2,12 @@ package cache import ( "context" + "io" + + "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/store/file" - "github.com/celestiaorg/rsmt2d" - "io" ) var _ Cache = (*NoopCache)(nil) @@ -13,15 +15,15 @@ var _ Cache = (*NoopCache)(nil) // NoopCache implements noop version of Cache interface type NoopCache struct{} -func (n NoopCache) Get(Key) (file.EdsFile, error) { - return nil, errCacheMiss +func (n NoopCache) Get(key) (file.EdsFile, error) { + return nil, ErrCacheMiss } -func (n NoopCache) GetOrLoad(context.Context, Key, OpenFileFn) (file.EdsFile, error) { - return NoopFile{}, nil +func (n NoopCache) GetOrLoad(ctx context.Context, _ key, loader OpenFileFn) (file.EdsFile, error) { + return loader(ctx) } -func (n NoopCache) Remove(Key) error { +func (n NoopCache) Remove(key) error { return nil } diff --git a/share/store/file/cache_file_test.go b/share/store/file/cache_file_test.go index c5a74f689c..a569655b48 100644 --- a/share/store/file/cache_file_test.go +++ b/share/store/file/cache_file_test.go @@ -1,9 +1,11 @@ package file import ( - "github.com/celestiaorg/rsmt2d" - "github.com/stretchr/testify/require" "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" ) func TestCacheFile(t *testing.T) { diff --git a/share/store/file/file_closer.go b/share/store/file/file_closer.go index 13cb71a40a..dd459474f4 100644 --- a/share/store/file/file_closer.go +++ b/share/store/file/file_closer.go @@ -3,10 +3,12 @@ package file import ( "context" "errors" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/rsmt2d" "io" "sync/atomic" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" ) var _ EdsFile = (*closeOnceFile)(nil) diff --git a/share/store/file/file_header.go b/share/store/file/file_header.go index 1222b0a4f6..8430b07128 100644 --- a/share/store/file/file_header.go +++ b/share/store/file/file_header.go @@ -3,8 +3,9 @@ package file import ( "bytes" "encoding/binary" - "github.com/celestiaorg/celestia-node/share" "io" + + "github.com/celestiaorg/celestia-node/share" ) const HeaderSize = 64 diff --git a/share/store/file/mem_file.go b/share/store/file/mem_file.go index 1ce362049d..97f9e06379 100644 --- a/share/store/file/mem_file.go +++ b/share/store/file/mem_file.go @@ -3,9 +3,11 @@ package file import ( "bytes" "context" - "github.com/celestiaorg/celestia-app/pkg/da" + "encoding/hex" + "fmt" "io" + "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -116,11 +118,13 @@ func ndDataFromShares(shares []share.Share, namespace share.Namespace, rowIdx in return share.NamespacedRow{}, err } + fmt.Println("roooot", rowIdx, hex.EncodeToString(root)) err = batchAdder.Commit() if err != nil { return share.NamespacedRow{}, err } + fmt.Println("lookup", rowIdx) row, proof, err := ipld.GetSharesByNamespace(context.TODO(), bserv, root, namespace, len(shares)) if err != nil { return share.NamespacedRow{}, err diff --git a/share/store/file/mem_file_test.go b/share/store/file/mem_file_test.go index 5fb53ff41f..f02c02a883 100644 --- a/share/store/file/mem_file_test.go +++ b/share/store/file/mem_file_test.go @@ -1,8 +1,9 @@ package file import ( - "github.com/celestiaorg/rsmt2d" "testing" + + "github.com/celestiaorg/rsmt2d" ) func TestMemFile(t *testing.T) { diff --git a/share/store/file/mempool.go b/share/store/file/mempool.go index 2b743f80d9..d290e0cdbc 100644 --- a/share/store/file/mempool.go +++ b/share/store/file/mempool.go @@ -1,8 +1,9 @@ package file import ( - "github.com/celestiaorg/celestia-node/share" "sync" + + "github.com/celestiaorg/celestia-node/share" ) // TODO: need better name @@ -32,11 +33,11 @@ func (m poolsMap) get(size int) *memPool { return pool } -func (m *memPool) putOds(ods [][]share.Share) { - m.ods.Put(ods) +func (m *memPool) putSquare(s [][]share.Share) { + m.ods.Put(s) } -func (m *memPool) getOds() [][]share.Share { +func (m *memPool) square() [][]share.Share { return m.ods.Get().([][]share.Share) } diff --git a/share/store/file/ods_file.go b/share/store/file/ods_file.go index 5539c60bba..0289c92ba9 100644 --- a/share/store/file/ods_file.go +++ b/share/store/file/ods_file.go @@ -21,7 +21,7 @@ type OdsFile struct { fl *os.File lock sync.RWMutex - ods *odsInMemFile + ods square } // OpenOdsFile opens an existing file. File has to be closed after usage. @@ -36,7 +36,6 @@ func OpenOdsFile(path string) (*OdsFile, error) { return nil, err } - // TODO(WWondertan): Validate header return &OdsFile{ path: path, hdr: h, @@ -51,7 +50,7 @@ func CreateOdsFile( eds *rsmt2d.ExtendedDataSquare) (*OdsFile, error) { f, err := os.Create(path) if err != nil { - return nil, err + return nil, fmt.Errorf("file create: %w", err) } h := &Header{ @@ -67,7 +66,7 @@ func CreateOdsFile( return nil, fmt.Errorf("writing ODS file: %w", err) } - // TODO: fill odsInMemFile with data from eds + // TODO: fill ods field with data from eds return &OdsFile{ path: path, fl: f, @@ -98,7 +97,7 @@ func (f *OdsFile) Size() int { } func (f *OdsFile) Close() error { - if err := f.ods.Сlose(); err != nil { + if err := f.ods.close(); err != nil { return err } return f.fl.Close() @@ -117,7 +116,7 @@ func (f *OdsFile) Reader() (io.Reader, error) { if err != nil { return nil, fmt.Errorf("reading ods: %w", err) } - return f.ods.Reader() + return f.ods.Reader(f.hdr) } func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { @@ -137,7 +136,7 @@ func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx in func (f *OdsFile) odsAxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { f.lock.RLock() defer f.lock.RUnlock() - shrs, err := f.ods.AxisHalf(context.Background(), axisType, axisIdx) + shrs, err := f.ods.axisHalf(context.Background(), axisType, axisIdx) if err == nil { return shrs, nil } @@ -164,11 +163,11 @@ func (f *OdsFile) readOds() error { return fmt.Errorf("discarding header: %w", err) } - ods, err := readOdsInMem(f.hdr, f.fl) + square, err := readShares(f.hdr, f.fl) if err != nil { return fmt.Errorf("reading ods: %w", err) } - f.ods = ods + f.ods = square return nil } @@ -293,5 +292,5 @@ func (f *OdsFile) EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) { return nil, err } - return f.ods.EDS(ctx) + return f.ods.eds() } diff --git a/share/store/file/ods_file_test.go b/share/store/file/ods_file_test.go index 8c4ff5b0ba..0e0f63d711 100644 --- a/share/store/file/ods_file_test.go +++ b/share/store/file/ods_file_test.go @@ -3,10 +3,12 @@ package file import ( "context" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share/eds/edstest" @@ -59,7 +61,7 @@ func TestReadOdsFile(t *testing.T) { err = f.readOds() require.NoError(t, err) - for i, row := range f.ods.square { + for i, row := range f.ods { original, err := f.readRow(i) require.NoError(t, err) require.True(t, len(original) == len(row)) @@ -67,6 +69,35 @@ func TestReadOdsFile(t *testing.T) { } } +func TestFileStreaming(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + eds := edstest.RandEDS(t, 8) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + + path := t.TempDir() + "/testfile" + f, err := CreateOdsFile(path, 1, []byte{}, eds) + require.NoError(t, err) + + reader, err := f.Reader() + require.NoError(t, err) + + streamed, err := ReadEds(ctx, reader, dah.Hash()) + require.NoError(t, err) + require.True(t, eds.Equals(streamed)) + + // verify that the reader represented by file can be read from + // multiple times, without exhausting the underlying reader. + reader2, err := f.Reader() + require.NoError(t, err) + + streamed2, err := ReadEds(ctx, reader2, dah.Hash()) + require.NoError(t, err) + require.True(t, eds.Equals(streamed2)) +} + // Leopard full encode // BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 418206 2545 ns/op // BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 4968 227265 ns/op diff --git a/share/store/file/in_mem_ods_file.go b/share/store/file/square.go similarity index 58% rename from share/store/file/in_mem_ods_file.go rename to share/store/file/square.go index 7a69334243..be9f091590 100644 --- a/share/store/file/in_mem_ods_file.go +++ b/share/store/file/square.go @@ -4,30 +4,30 @@ import ( "bytes" "context" "fmt" + "io" + + "golang.org/x/sync/errgroup" + "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/rsmt2d" - "golang.org/x/sync/errgroup" - "io" + + "github.com/celestiaorg/celestia-node/share" ) -type odsInMemFile struct { - inner *OdsFile - square [][]share.Share -} +type square [][]share.Share -func ReadEds(ctx context.Context, r io.Reader, root share.DataHash) (*rsmt2d.ExtendedDataSquare, error) { +func ReadEds(_ context.Context, r io.Reader, root share.DataHash) (*rsmt2d.ExtendedDataSquare, error) { h, err := ReadHeader(r) if err != nil { - return nil, err + return nil, fmt.Errorf("reading header: %w", err) } - ods, err := readOdsInMem(h, r) + square, err := readShares(h, r) if err != nil { - return nil, err + return nil, fmt.Errorf("reading shares: %w", err) } - eds, err := ods.EDS(ctx) + eds, err := square.eds() if err != nil { return nil, fmt.Errorf("computing EDS: %w", err) } @@ -39,18 +39,18 @@ func ReadEds(ctx context.Context, r io.Reader, root share.DataHash) (*rsmt2d.Ext if !bytes.Equal(newDah.Hash(), root) { return nil, fmt.Errorf( "share: content integrity mismatch: imported root %s doesn't match expected root %s", - newDah.Hash(), + share.DataHash(newDah.Hash()), root, ) } return eds, nil } -func readOdsInMem(hdr *Header, reader io.Reader) (*odsInMemFile, error) { +func readShares(hdr *Header, reader io.Reader) (square, error) { shrLn := int(hdr.shareSize) odsLn := int(hdr.squareSize) / 2 - ods := memPools.get(odsLn).getOds() + square := memPools.get(odsLn).square() buf := memPools.get(odsLn).getHalfAxis() defer memPools.get(odsLn).putHalfAxis(buf) @@ -60,74 +60,69 @@ func readOdsInMem(hdr *Header, reader io.Reader) (*odsInMemFile, error) { } for j := 0; j < odsLn; j++ { - copy(ods[i][j], buf[j*shrLn:(j+1)*shrLn]) + copy(square[i][j], buf[j*shrLn:(j+1)*shrLn]) } } - return &odsInMemFile{square: ods}, nil + return square, nil } -func (f *odsInMemFile) Size() int { - f.inner.lock.RLock() - defer f.inner.lock.RUnlock() - return len(f.square) * 2 +func (s square) size() int { + return len(s) } -func (f *odsInMemFile) Сlose() error { - f.inner.lock.RLock() - defer f.inner.lock.RUnlock() - if f != nil { - memPools.get(f.Size() / 2).putOds(f.square) +func (s square) close() error { + if s != nil { + memPools.get(s.size()).putSquare(s) } return nil } -func (f *odsInMemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { - f.inner.lock.RLock() - defer f.inner.lock.RUnlock() - if f == nil { - return nil, fmt.Errorf("ods file not cached") +func (s square) axisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + if s == nil { + return nil, fmt.Errorf("ods file not in mem") } - if axisIdx >= f.Size()/2 { + if axisIdx >= s.size() { return nil, fmt.Errorf("index is out of ods bounds") } + + // square stores rows directly in high level slice, so we can return by accessing row by index if axisType == rsmt2d.Row { - return f.square[axisIdx], nil + return s[axisIdx], nil } - // TODO: this is not efficient, but it is better than reading from file - shrs := make([]share.Share, f.Size()/2) - for i := 0; i < f.Size()/2; i++ { - shrs[i] = f.square[i][axisIdx] + // construct half column from row ordered square + col := make([]share.Share, s.size()) + for i := 0; i < s.size(); i++ { + col[i] = s[i][axisIdx] } - return shrs, nil + return col, nil } -func (f *odsInMemFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { - shrs := make([]share.Share, 0, f.Size()*f.Size()) - for _, row := range f.square { +func (s square) eds() (*rsmt2d.ExtendedDataSquare, error) { + shrs := make([]share.Share, 0, 4*s.size()*s.size()) + for _, row := range s { shrs = append(shrs, row...) } - treeFn := wrapper.NewConstructor(uint64(f.Size() / 2)) + treeFn := wrapper.NewConstructor(uint64(s.size())) return rsmt2d.ComputeExtendedDataSquare(shrs, share.DefaultRSMT2DCodec(), treeFn) } -func (f *odsInMemFile) Reader() (io.Reader, error) { - f.inner.lock.RLock() - defer f.inner.lock.RUnlock() - if f == nil { +func (s square) Reader(hdr *Header) (io.Reader, error) { + if s == nil { return nil, fmt.Errorf("ods file not cached") } odsR := &bufferedODSReader{ - f: f, - buf: bytes.NewBuffer(make([]byte, int(f.inner.hdr.shareSize))), + square: s, + total: s.size() * s.size(), + buf: bytes.NewBuffer(make([]byte, 0, int(hdr.shareSize))), } // write header to the buffer - _, err := f.inner.hdr.WriteTo(odsR.buf) + _, err := hdr.WriteTo(odsR.buf) if err != nil { return nil, fmt.Errorf("writing header: %w", err) } @@ -135,30 +130,30 @@ func (f *odsInMemFile) Reader() (io.Reader, error) { return odsR, nil } -func (f *odsInMemFile) computeAxisHalf( +func (s square) computeAxisHalf( ctx context.Context, axisType rsmt2d.Axis, axisIdx int, ) ([]share.Share, error) { - shares := make([]share.Share, f.Size()/2) + shares := make([]share.Share, s.size()) // extend opposite half of the square while collecting shares for the first half of required axis g, ctx := errgroup.WithContext(ctx) opposite := oppositeAxis(axisType) - for i := 0; i < f.Size()/2; i++ { + for i := 0; i < s.size(); i++ { i := i g.Go(func() error { - original, err := f.AxisHalf(ctx, opposite, i) + original, err := s.axisHalf(ctx, opposite, i) if err != nil { return err } - enc, err := codec.Encoder(f.Size()) + enc, err := codec.Encoder(s.size() * 2) if err != nil { return fmt.Errorf("encoder: %w", err) } - shards := make([][]byte, f.Size()) + shards := make([][]byte, s.size()*2) copy(shards, original) //for j := len(original); j < len(shards); j++ { // shards[j] = make([]byte, len(original[0])) @@ -169,7 +164,7 @@ func (f *odsInMemFile) computeAxisHalf( // return fmt.Errorf("encode: %w", err) //} - target := make([]bool, f.Size()) + target := make([]bool, s.size()*2) target[axisIdx] = true err = enc.ReconstructSome(shards, target) @@ -193,10 +188,10 @@ func oppositeAxis(axis rsmt2d.Axis) rsmt2d.Axis { return rsmt2d.Col } -// bufferedODSReader will reads shares from odsInMemFile into the buffer. +// bufferedODSReader will reads shares from inMemOds into the buffer. // It exposes the buffer to be read by io.Reader interface implementation type bufferedODSReader struct { - f *odsInMemFile + square square // current is the amount of shares stored in ods file that have been read from reader. When current // reaches total, bufferedODSReader will prevent further reads by returning io.EOF current, total int @@ -207,11 +202,10 @@ func (r *bufferedODSReader) Read(p []byte) (n int, err error) { // read shares to the buffer until it has sufficient data to fill provided container or full ods is // read for r.current < r.total && r.buf.Len() < len(p) { - x, y := r.current%r.f.Size(), r.current/r.f.Size() - r.buf.Write(r.f.square[y][x]) + x, y := r.current%(r.square.size()), r.current/(r.square.size()) + r.buf.Write(r.square[y][x]) r.current++ } - // read buffer to slice return r.buf.Read(p) } diff --git a/share/store/metrics.go b/share/store/metrics.go new file mode 100644 index 0000000000..c4f0840f33 --- /dev/null +++ b/share/store/metrics.go @@ -0,0 +1,132 @@ +package store + +import ( + "context" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +const ( + failedKey = "failed" + sizeKey = "eds_size" +) + +var ( + meter = otel.Meter("store") +) + +type metrics struct { + put metric.Float64Histogram + putExists metric.Int64Counter + get metric.Float64Histogram + has metric.Float64Histogram + remove metric.Float64Histogram +} + +func (s *Store) WithMetrics() error { + put, err := meter.Float64Histogram("eds_store_put_time_histogram", + metric.WithDescription("eds store put time histogram(s)")) + if err != nil { + return err + } + + putExists, err := meter.Int64Counter("eds_store_put_exists_histogram", + metric.WithDescription("eds store put file exists")) + if err != nil { + return err + } + + get, err := meter.Float64Histogram("eds_store_get_time_histogram", + metric.WithDescription("eds store get time histogram(s)")) + if err != nil { + return err + } + + has, err := meter.Float64Histogram("eds_store_has_time_histogram", + metric.WithDescription("eds store has time histogram(s)")) + if err != nil { + return err + } + + remove, err := meter.Float64Histogram("eds_store_remove_time_histogram", + metric.WithDescription("eds store remove time histogram(s)")) + if err != nil { + return err + } + + if err = s.cache.EnableMetrics(); err != nil { + return err + } + + s.metrics = &metrics{ + put: put, + putExists: putExists, + get: get, + has: has, + remove: remove, + } + return nil +} + +func (m *metrics) observePut(ctx context.Context, dur time.Duration, size uint, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.put.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed), + attribute.Int(sizeKey, int(size)))) +} + +func (m *metrics) observePutExist(ctx context.Context) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.putExists.Add(ctx, 1) +} + +func (m *metrics) observeGet(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.get.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeHas(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.has.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeRemove(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.remove.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} diff --git a/share/store/store.go b/share/store/store.go index 6527a89702..c0493391c5 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -4,13 +4,17 @@ import ( "context" "errors" "fmt" + "os" + "time" + + logging "github.com/ipfs/go-log/v2" + "go.opentelemetry.io/otel" + + "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/store/cache" "github.com/celestiaorg/celestia-node/share/store/file" - "github.com/celestiaorg/rsmt2d" - logging "github.com/ipfs/go-log/v2" - "go.opentelemetry.io/otel" - "os" ) var ( @@ -18,9 +22,12 @@ var ( tracer = otel.Tracer("share/eds") ) +// TODO(@walldiss): persist store stats like amount of files, file types, avg file size etc in a file const ( - blocksPath = "/blocks/" + hashsPath = "/blocks/" heightsPath = "/heights/" + + defaultDirPerm = 0755 ) var ErrNotFound = errors.New("eds not found in store") @@ -42,7 +49,7 @@ type Store struct { // stripedLocks is used to synchronize parallel operations stripLock *striplock - //metrics *metrics + metrics *metrics } // NewStore creates a new EDS Store under the given basepath and datastore. @@ -53,6 +60,16 @@ func NewStore(params *Parameters, basePath string) (*Store, error) { //TODO: acquire DirectoryLock store lockGuard + // ensure blocks folder + if err := ensureFolder(basePath + hashsPath); err != nil { + return nil, fmt.Errorf("ensure blocks folder: %w", err) + } + + // ensure heights folder + if err := ensureFolder(basePath + heightsPath); err != nil { + return nil, fmt.Errorf("ensure blocks folder: %w", err) + } + recentBlocksCache, err := cache.NewFileCache("recent", params.RecentBlocksCacheSize) if err != nil { return nil, fmt.Errorf("failed to create recent blocks cache: %w", err) @@ -63,11 +80,9 @@ func NewStore(params *Parameters, basePath string) (*Store, error) { return nil, fmt.Errorf("failed to create blockstore cache: %w", err) } - dcache := cache.NewDoubleCache(recentBlocksCache, blockstoreCache) - store := &Store{ basepath: basePath, - cache: dcache, + cache: cache.NewDoubleCache(recentBlocksCache, blockstoreCache), stripLock: newStripLock(1024), //metrics: newMetrics(), } @@ -80,54 +95,77 @@ func (s *Store) Put( height uint64, square *rsmt2d.ExtendedDataSquare, ) (file.EdsFile, error) { + tNow := time.Now() lock := s.stripLock.byDatahashAndHeight(datahash, height) lock.lock() defer lock.unlock() - path := s.basepath + blocksPath + datahash.String() - odsFile, err := file.CreateOdsFile(path, height, datahash, square) + // short circuit if file exists + if has, _ := s.hasByHeight(height); has { + s.metrics.observePutExist(ctx) + return s.getByHeight(height) + } + + path := s.basepath + hashsPath + datahash.String() + file, err := file.CreateOdsFile(path, height, datahash, square) if err != nil { - return nil, fmt.Errorf("writing ODS file: %w", err) + s.metrics.observePut(ctx, time.Since(tNow), square.Width(), true) + return nil, fmt.Errorf("creating ODS file: %w", err) } // create hard link with height as name err = os.Link(path, s.basepath+heightsPath+fmt.Sprintf("%d", height)) if err != nil { + s.metrics.observePut(ctx, time.Since(tNow), square.Width(), true) return nil, fmt.Errorf("creating hard link: %w", err) } + s.metrics.observePut(ctx, time.Since(tNow), square.Width(), false) + // put in recent cache - f, err := s.cache.First().GetOrLoad(ctx, cache.Key{Height: height}, wrapWithCache(odsFile)) + f, err := s.cache.First().GetOrLoad(ctx, height, edsLoader(file)) if err != nil { return nil, fmt.Errorf("putting in cache: %w", err) } return f, nil } -func (s *Store) GetByHash(_ context.Context, datahash share.DataHash) (file.EdsFile, error) { +func (s *Store) GetByHash(ctx context.Context, datahash share.DataHash) (file.EdsFile, error) { lock := s.stripLock.byDatahash(datahash) lock.RLock() defer lock.RUnlock() - f, err := s.cache.Get(cache.Key{Datahash: datahash}) - if err == nil { - return f, nil - } + tNow := time.Now() + f, err := s.getByHash(datahash) + s.metrics.observeGet(ctx, time.Since(tNow), err != nil) + return f, err +} - path := s.basepath + blocksPath + datahash.String() +func (s *Store) getByHash(datahash share.DataHash) (file.EdsFile, error) { + path := s.basepath + hashsPath + datahash.String() odsFile, err := file.OpenOdsFile(path) if err != nil { + if os.IsNotExist(err) { + return nil, ErrNotFound + } return nil, fmt.Errorf("opening ODS file: %w", err) } return odsFile, nil } -func (s *Store) GetByHeight(_ context.Context, height uint64) (file.EdsFile, error) { +func (s *Store) GetByHeight(ctx context.Context, height uint64) (file.EdsFile, error) { lock := s.stripLock.byHeight(height) lock.RLock() defer lock.RUnlock() - f, err := s.cache.Get(cache.Key{Height: height}) + tNow := time.Now() + f, err := s.getByHeight(height) + s.metrics.observeGet(ctx, time.Since(tNow), err != nil) + return f, err +} + +func (s *Store) getByHeight(height uint64) (file.EdsFile, error) { + f, err := s.cache.Get(height) if err == nil { return f, nil } @@ -135,89 +173,132 @@ func (s *Store) GetByHeight(_ context.Context, height uint64) (file.EdsFile, err path := s.basepath + heightsPath + fmt.Sprintf("%d", height) odsFile, err := file.OpenOdsFile(path) if err != nil { + if os.IsNotExist(err) { + return nil, ErrNotFound + } return nil, fmt.Errorf("opening ODS file: %w", err) } return odsFile, nil } -func (s *Store) HasByHash(_ context.Context, datahash share.DataHash) (bool, error) { +func (s *Store) HasByHash(ctx context.Context, datahash share.DataHash) (bool, error) { lock := s.stripLock.byDatahash(datahash) lock.RLock() defer lock.RUnlock() - _, err := s.cache.Get(cache.Key{Datahash: datahash}) - if err == nil { - return true, nil - } + tNow := time.Now() + exist, err := s.hasByHash(datahash) + s.metrics.observeHas(ctx, time.Since(tNow), err != nil) + return exist, err +} - path := s.basepath + blocksPath + datahash.String() - _, err = file.OpenOdsFile(path) - if err != nil { - return false, fmt.Errorf("opening ODS file: %w", err) - } - return true, nil +func (s *Store) hasByHash(datahash share.DataHash) (bool, error) { + path := s.basepath + hashsPath + datahash.String() + return pathExists(path) } -func (s *Store) HasByHeight(_ context.Context, height uint64) (bool, error) { +func (s *Store) HasByHeight(ctx context.Context, height uint64) (bool, error) { lock := s.stripLock.byHeight(height) lock.RLock() defer lock.RUnlock() - _, err := s.cache.Get(cache.Key{Height: height}) + tNow := time.Now() + exist, err := s.hasByHeight(height) + s.metrics.observeHas(ctx, time.Since(tNow), err != nil) + return exist, err +} + +func (s *Store) hasByHeight(height uint64) (bool, error) { + _, err := s.cache.Get(height) if err == nil { return true, nil } path := s.basepath + heightsPath + fmt.Sprintf("%d", height) - _, err = file.OpenOdsFile(path) - if err != nil { - return false, fmt.Errorf("opening ODS file: %w", err) + return pathExists(path) +} + +func (s *Store) Remove(ctx context.Context, height uint64) error { + lock := s.stripLock.byHeight(height) + lock.Lock() + defer lock.Unlock() + + tNow := time.Now() + err := s.remove(height) + s.metrics.observeRemove(ctx, time.Since(tNow), err != nil) + return err +} + +func (s *Store) remove(height uint64) error { + // short circuit if file not exists + f, err := s.getByHeight(height) + if errors.Is(err, ErrNotFound) { + return nil } - return true, nil + + hashStr := f.DataHash().String() + if err = f.Close(); err != nil { + return fmt.Errorf("closing file on removal: %w", err) + } + + if err = s.cache.Remove(height); err != nil { + return fmt.Errorf("removing from cache: %w", err) + } + + heightPath := s.basepath + heightsPath + fmt.Sprintf("%d", height) + if err = os.Remove(heightPath); err != nil { + return fmt.Errorf("removing by height: %w", err) + } + + hashPath := s.basepath + hashsPath + hashStr + if err = os.Remove(hashPath); err != nil { + return fmt.Errorf("removing by hash: %w", err) + } + return nil } -func wrapWithCache(f file.EdsFile) cache.OpenFileFn { - return func(ctx context.Context) (cache.Key, file.EdsFile, error) { - f := file.NewCacheFile(f) - key := cache.Key{ - Datahash: f.DataHash(), - Height: f.Height(), - } - return key, f, nil +func edsLoader(f file.EdsFile) cache.OpenFileFn { + return func(ctx context.Context) (file.EdsFile, error) { + return f, nil } } func (s *Store) openFileByHeight(height uint64) cache.OpenFileFn { - return func(ctx context.Context) (cache.Key, file.EdsFile, error) { + return func(ctx context.Context) (file.EdsFile, error) { path := s.basepath + heightsPath + fmt.Sprintf("%d", height) f, err := file.OpenOdsFile(path) if err != nil { - return cache.Key{}, nil, fmt.Errorf("opening ODS file: %w", err) - } - key := cache.Key{ - Datahash: f.DataHash(), - Height: height, + return nil, fmt.Errorf("opening ODS file: %w", err) } - return key, file.NewCacheFile(f), nil + return f, nil } } -func (s *Store) openFileByDatahash(datahash share.DataHash) cache.OpenFileFn { - return func(ctx context.Context) (cache.Key, file.EdsFile, error) { - path := s.basepath + blocksPath + datahash.String() - f, err := file.OpenOdsFile(path) +func ensureFolder(path string) error { + info, err := os.Stat(path) + if os.IsNotExist(err) { + err = os.Mkdir(path, defaultDirPerm) if err != nil { - return cache.Key{}, nil, fmt.Errorf("opening ODS file: %w", err) + return fmt.Errorf("creating blocks dir: %w", err) } - key := cache.Key{ - Datahash: f.DataHash(), - Height: f.Height(), - } - return key, file.NewCacheFile(f), nil + return nil } + if err != nil { + return fmt.Errorf("checking blocks dir: %w", err) + } + if !info.IsDir() { + return errors.New("expected dir, got a file") + } + return nil } -func (s *Store) Close() error { - panic("implement me") - return nil +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil } diff --git a/share/store/striplock.go b/share/store/striplock.go index 20669a97b9..9d17dc5881 100644 --- a/share/store/striplock.go +++ b/share/store/striplock.go @@ -2,8 +2,9 @@ package store import ( "encoding/binary" - "github.com/celestiaorg/celestia-node/share" "sync" + + "github.com/celestiaorg/celestia-node/share" ) // TODO: move to utils From 2f5563e940f329078b92e3a2f2d6f826d22acd41 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 5 Feb 2024 15:54:01 +0500 Subject: [PATCH 059/132] add store tests and benchmarks --- share/getters/shwap.go | 259 -------------------------------------- share/store/store.go | 8 ++ share/store/store_test.go | 250 ++++++++++++++++++++++++++++++++++++ 3 files changed, 258 insertions(+), 259 deletions(-) delete mode 100644 share/getters/shwap.go create mode 100644 share/store/store_test.go diff --git a/share/getters/shwap.go b/share/getters/shwap.go deleted file mode 100644 index b3dfc31b05..0000000000 --- a/share/getters/shwap.go +++ /dev/null @@ -1,259 +0,0 @@ -package getters - -import ( - "context" - "fmt" - "sync" - - "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/boxo/exchange" - block "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - - "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/shwap" -) - -// TODO: GetRow method -type Getter struct { - fetch exchange.SessionExchange - bstore blockstore.Blockstore -} - -func NewGetter(fetch exchange.SessionExchange, bstore blockstore.Blockstore) *Getter { - return &Getter{fetch: fetch, bstore: bstore} -} - -func (g *Getter) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { - shrIdx := row*len(header.DAH.RowRoots) + col - shrs, err := g.GetShares(ctx, header, shrIdx) - if err != nil { - return nil, fmt.Errorf("getting shares: %w", err) - } - - if len(shrs) != 1 { - return nil, fmt.Errorf("expected 1 share, got %d", len(shrs)) - } - - return shrs[0], nil -} - -// TODO: Make GetSamples so it provides proofs to users. -// GetShares fetches in the Block/EDS by their indexes. -// Automatically caches them on the Blockstore. -// Guarantee that the returned shares are in the same order as shrIdxs. -func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, smplIdxs ...int) ([]share.Share, error) { - sids := make([]shwap.SampleID, len(smplIdxs)) - for i, shrIdx := range smplIdxs { - sid, err := shwap.NewSampleID(hdr.Height(), shrIdx, hdr.DAH) - if err != nil { - return nil, err - } - - sids[i] = sid - } - - smplsMu := sync.Mutex{} - smpls := make(map[int]shwap.Sample, len(smplIdxs)) - verifyFn := func(s shwap.Sample) error { - err := s.Verify(hdr.DAH) - if err != nil { - return err - } - - smplIdx := int(s.SampleID.RowIndex)*len(hdr.DAH.RowRoots) + int(s.SampleID.ShareIndex) - smplsMu.Lock() - smpls[smplIdx] = s - smplsMu.Unlock() - return nil - } - - cids := make([]cid.Cid, len(smplIdxs)) - for i, sid := range sids { - sampleVerifiers.Add(sid, verifyFn) - cids[i] = sid.Cid() - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - ses := g.fetch.NewSession(ctx) - // must start getting only after verifiers are registered - blkCh, err := ses.GetBlocks(ctx, cids) - if err != nil { - return nil, fmt.Errorf("fetching blocks: %w", err) - } - // GetBlocks handles ctx and closes blkCh, so we don't have to - blks := make([]block.Block, 0, len(smplIdxs)) - for blk := range blkCh { - blks = append(blks, blk) - } - // only persist when all samples received - if len(blks) != len(smplIdxs) { - if ctx.Err() != nil { - return nil, ctx.Err() - } - return nil, fmt.Errorf("not all shares were found") - } - // ensure we persist samples/blks and make them available for Bitswap - err = g.bstore.PutMany(ctx, blks) - if err != nil { - return nil, fmt.Errorf("storing shares: %w", err) - } - // tell bitswap that we stored the blks and can serve them now - err = g.fetch.NotifyNewBlocks(ctx, blks...) - if err != nil { - return nil, fmt.Errorf("notifying new shares: %w", err) - } - - // ensure we return shares in the requested order - shrs := make([]share.Share, len(smplIdxs)) - for i, smplIdx := range smplIdxs { - shrs[i] = smpls[smplIdx].SampleShare - } - - return shrs, nil -} - -// GetEDS -// TODO(@Wondertan): Consider requesting randomized rows instead of ODS only -func (g *Getter) GetEDS(ctx context.Context, hdr *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { - sqrLn := len(hdr.DAH.RowRoots) - rids := make([]shwap.RowID, sqrLn/2) - for i := 0; i < sqrLn/2; i++ { - rid, err := shwap.NewRowID(hdr.Height(), uint16(i), hdr.DAH) - if err != nil { - return nil, err - } - - rids[i] = rid - } - - square, err := rsmt2d.NewExtendedDataSquare( - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(sqrLn/2)), uint(sqrLn), - share.Size, - ) - if err != nil { - return nil, err - } - - verifyFn := func(row shwap.Row) error { - err := row.Verify(hdr.DAH) - if err != nil { - return err - } - - for shrIdx, shr := range row.RowShares { - err = square.SetCell(uint(row.RowIndex), uint(shrIdx), shr) // no synchronization needed - if err != nil { - panic(err) // this should never happen and if it is... something is really wrong - } - } - - return nil - } - - cids := make([]cid.Cid, sqrLn/2) - for i, rid := range rids { - rowVerifiers.Add(rid, verifyFn) - cids[i] = rid.Cid() - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - ses := g.fetch.NewSession(ctx) - // must start getting only after verifiers are registered - blkCh, err := ses.GetBlocks(ctx, cids) - if err != nil { - return nil, fmt.Errorf("fetching blocks: %w", err) - } - // GetBlocks handles ctx by closing blkCh, so we don't have to - for range blkCh { //nolint:revive // it complains on empty block, but the code is functional - // we handle writes in verifyFn so just wait for as many results as possible - } - - // and try to repair - err = square.Repair(hdr.DAH.RowRoots, hdr.DAH.ColumnRoots) - if err != nil { - if ctx.Err() != nil { - return nil, ctx.Err() - } - return nil, fmt.Errorf("repairing EDS: %w", err) - } - - return square, nil -} - -func (g *Getter) GetSharesByNamespace( - ctx context.Context, - hdr *header.ExtendedHeader, - ns share.Namespace, -) (share.NamespacedShares, error) { - if err := ns.ValidateForData(); err != nil { - return nil, err - } - - var dids []shwap.DataID //nolint:prealloc// we don't know how many rows with needed namespace there are - for rowIdx, rowRoot := range hdr.DAH.RowRoots { - if ns.IsOutsideRange(rowRoot, rowRoot) { - continue - } - - did, err := shwap.NewDataID(hdr.Height(), uint16(rowIdx), ns, hdr.DAH) - if err != nil { - return nil, err - } - - dids = append(dids, did) - } - if len(dids) == 0 { - return share.NamespacedShares{}, nil - } - - datas := make([]shwap.Data, len(dids)) - verifyFn := func(d shwap.Data) error { - err := d.Verify(hdr.DAH) - if err != nil { - return err - } - - nsStartIdx := dids[0].RowIndex - idx := d.RowIndex - nsStartIdx - datas[idx] = d - return nil - } - - cids := make([]cid.Cid, len(dids)) - for i, did := range dids { - dataVerifiers.Add(did, verifyFn) - cids[i] = did.Cid() - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - ses := g.fetch.NewSession(ctx) - // must start getting only after verifiers are registered - blkCh, err := ses.GetBlocks(ctx, cids) - if err != nil { - return nil, fmt.Errorf("fetching blocks:%w", err) - } - // GetBlocks handles ctx by closing blkCh, so we don't have to - for range blkCh { //nolint:revive // it complains on empty block, but the code is functional - // we handle writes in verifyFn so just wait for as many results as possible - } - - nShrs := make([]share.NamespacedRow, 0, len(datas)) - for _, row := range datas { - proof := row.DataProof - nShrs = append(nShrs, share.NamespacedRow{ - Shares: row.DataShares, - Proof: &proof, - }) - } - - return nShrs, nil -} diff --git a/share/store/store.go b/share/store/store.go index c0493391c5..5b317dc033 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -106,6 +106,14 @@ func (s *Store) Put( return s.getByHeight(height) } + if has, _ := s.hasByHash(datahash); has { + log.Errorw("put: file already exists by hash, but not by height", + "height", height, + "hash", datahash.String()) + s.metrics.observePutExist(ctx) + return s.getByHash(datahash) + } + path := s.basepath + hashsPath + datahash.String() file, err := file.CreateOdsFile(path, height, datahash, square) if err != nil { diff --git a/share/store/store_test.go b/share/store/store_test.go new file mode 100644 index 0000000000..f278103f94 --- /dev/null +++ b/share/store/store_test.go @@ -0,0 +1,250 @@ +package store + +import ( + "context" + "github.com/tendermint/tendermint/libs/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/store/cache" +) + +//TODO: add benchmarks for store + +func TestEDSStore(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + edsStore, err := NewStore(DefaultParameters(), t.TempDir()) + require.NoError(t, err) + + // disable cache + edsStore.cache = cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{}) + height := atomic.NewUint64(100) + + // PutRegistersShard tests if Put registers the shard on the underlying DAGStore + t.Run("Put", func(t *testing.T) { + eds, dah := randomEDS(t) + height := height.Add(1) + + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) + + // file should become available by hash + has, err := edsStore.HasByHash(ctx, dah.Hash()) + require.NoError(t, err) + require.True(t, has) + + // file should become available by height + has, err = edsStore.HasByHeight(ctx, height) + require.NoError(t, err) + require.True(t, has) + }) + + t.Run("Cached after Put", func(t *testing.T) { + edsStore, err := NewStore(DefaultParameters(), t.TempDir()) + require.NoError(t, err) + + eds, dah := randomEDS(t) + height := height.Add(1) + + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) + + // file should be cached after put + f, err = edsStore.cache.Get(height) + require.NoError(t, err) + require.NoError(t, f.Close()) + + // check that cached file is the same eds + fromFile, err := f.EDS(ctx) + require.NoError(t, err) + require.NoError(t, f.Close()) + require.True(t, eds.Equals(fromFile)) + }) + + t.Run("Second Put should be noop", func(t *testing.T) { + eds, dah := randomEDS(t) + height := height.Add(1) + + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) + + f, err = edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) + }) + + t.Run("GetByHeight", func(t *testing.T) { + eds, dah := randomEDS(t) + height := height.Add(1) + + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) + + f, err = edsStore.GetByHeight(ctx, height) + require.NoError(t, err) + + fromFile, err := f.EDS(ctx) + require.NoError(t, err) + require.NoError(t, f.Close()) + + require.True(t, eds.Equals(fromFile)) + }) + + t.Run("GetByDataHash", func(t *testing.T) { + eds, dah := randomEDS(t) + height := height.Add(1) + + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) + + f, err = edsStore.GetByHash(ctx, dah.Hash()) + require.NoError(t, err) + + fromFile, err := f.EDS(ctx) + require.NoError(t, err) + require.NoError(t, f.Close()) + + require.True(t, eds.Equals(fromFile)) + }) + + t.Run("Does not exist", func(t *testing.T) { + _, dah := randomEDS(t) + height := height.Add(1) + + has, err := edsStore.HasByHash(ctx, dah.Hash()) + require.NoError(t, err) + require.False(t, has) + + has, err = edsStore.HasByHeight(ctx, height) + require.NoError(t, err) + require.False(t, has) + + _, err = edsStore.GetByHeight(ctx, height) + require.ErrorIs(t, err, ErrNotFound) + + _, err = edsStore.GetByHash(ctx, dah.Hash()) + require.ErrorIs(t, err, ErrNotFound) + }) + + t.Run("Remove", func(t *testing.T) { + // removing file that not exists should be noop + missingHeight := height.Add(1) + err := edsStore.Remove(ctx, missingHeight) + require.NoError(t, err) + + eds, dah := randomEDS(t) + height := height.Add(1) + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) + + err = edsStore.Remove(ctx, height) + require.NoError(t, err) + + // file should be removed from cache + _, err = edsStore.cache.Get(height) + require.ErrorIs(t, err, cache.ErrCacheMiss) + + // file should not be accessible by hash + has, err := edsStore.HasByHash(ctx, dah.Hash()) + require.NoError(t, err) + require.False(t, has) + + // file should not be accessible by height + has, err = edsStore.HasByHeight(ctx, height) + require.NoError(t, err) + require.False(t, has) + }) +} + +func BenchmarkStore(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + b.Cleanup(cancel) + + edsStore, err := NewStore(DefaultParameters(), b.TempDir()) + require.NoError(b, err) + + eds := edstest.RandEDS(b, 128) + require.NoError(b, err) + + // BenchmarkStore/bench_put_128-10 27 43968818 ns/op (~43ms) + b.Run("put 128", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + h := share.DataHash(rand.Bytes(5)) + f, _ := edsStore.Put(ctx, h, uint64(i), eds) + _ = f.Close() + } + }) + + // read 128 EDSs does not read full EDS, but only the header + // BenchmarkStore/bench_read_128-10 82766 14678 ns/op (~14ms) + b.Run("open by height, 128", func(b *testing.B) { + edsStore, err := NewStore(DefaultParameters(), b.TempDir()) + require.NoError(b, err) + + // disable cache + edsStore.cache = cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{}) + + dah, err := share.NewRoot(eds) + require.NoError(b, err) + + height := uint64(1984) + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(b, err) + require.NoError(b, f.Close()) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := edsStore.GetByHeight(ctx, height) + require.NoError(b, err) + _ = f.Close() + } + }) + + // BenchmarkStore/open_by_hash,_128-10 72921 16799 ns/op (~16ms) + b.Run("open by hash, 128", func(b *testing.B) { + edsStore, err := NewStore(DefaultParameters(), b.TempDir()) + require.NoError(b, err) + + // disable cache + edsStore.cache = cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{}) + + dah, err := share.NewRoot(eds) + require.NoError(b, err) + + height := uint64(1984) + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(b, err) + require.NoError(b, f.Close()) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := edsStore.GetByHash(ctx, dah.Hash()) + require.NoError(b, err) + _ = f.Close() + } + }) +} + +func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *share.Root) { + eds := edstest.RandEDS(t, 4) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + + return eds, dah +} From efd829aff65176f785bb1bc4ebc86ace925797bc Mon Sep 17 00:00:00 2001 From: Vlad Date: Tue, 6 Feb 2024 19:05:26 +0500 Subject: [PATCH 060/132] fix hashing for in-mem proofs cache --- share/store/file/cache_file.go | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/share/store/file/cache_file.go b/share/store/file/cache_file.go index f2e26a707f..fd4f103792 100644 --- a/share/store/file/cache_file.go +++ b/share/store/file/cache_file.go @@ -70,7 +70,7 @@ func (f *CacheFile) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, ax // build proofs from shares and cache them shrs, err := f.axis(ctx, axisType, axisIdx) if err != nil { - return inMemoryAxis{}, err + return inMemoryAxis{}, fmt.Errorf("get axis: %w", err) } // calculate proofs @@ -80,20 +80,23 @@ func (f *CacheFile) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, ax for _, shr := range shrs { err = tree.Push(shr) if err != nil { - return inMemoryAxis{}, err + return inMemoryAxis{}, fmt.Errorf("push shares: %w", err) } } // build the tree root, err := tree.Root() if err != nil { - return inMemoryAxis{}, err + return inMemoryAxis{}, fmt.Errorf("calculating root: %w", err) } ax = f.axisCache[axisType][axisIdx] ax.root = root ax.shares = shrs - ax.proofs = newRowProofsGetter(adder.Proofs()) + ax.proofs, err = newRowProofsGetter(adder.Proofs()) + if err != nil { + return inMemoryAxis{}, fmt.Errorf("creating proof getter: %w", err) + } if !f.disableCache { f.axisCache[axisType][axisIdx] = ax @@ -178,14 +181,16 @@ type rowProofsGetter struct { proofs map[cid.Cid]blocks.Block } -func newRowProofsGetter(rawProofs map[cid.Cid][]byte) *rowProofsGetter { +func newRowProofsGetter(rawProofs map[cid.Cid][]byte) (*rowProofsGetter, error) { proofs := make(map[cid.Cid]blocks.Block, len(rawProofs)) for k, v := range rawProofs { - proofs[k] = blocks.NewBlock(v) - } - return &rowProofsGetter{ - proofs: proofs, + b, err := blocks.NewBlockWithCid(v, k) + if err != nil { + return nil, err + } + proofs[k] = b } + return &rowProofsGetter{proofs: proofs}, nil } func (r rowProofsGetter) GetBlock(_ context.Context, c cid.Cid) (blocks.Block, error) { From da5533bb851c115c1cedea63857ec322e10e1b36 Mon Sep 17 00:00:00 2001 From: Vlad Date: Tue, 6 Feb 2024 19:05:57 +0500 Subject: [PATCH 061/132] fix shrex tests --- share/getters/getter_test.go | 262 ++++------------------------------- share/getters/shrex_test.go | 91 ++++++++---- share/getters/store.go | 4 +- share/ipld/namespace_data.go | 2 - share/p2p/shrexnd/server.go | 2 +- share/store/file/mem_file.go | 4 - share/store/store.go | 11 +- share/utils.go | 8 +- 8 files changed, 102 insertions(+), 282 deletions(-) diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go index 77c470dae9..fe37b79685 100644 --- a/share/getters/getter_test.go +++ b/share/getters/getter_test.go @@ -2,28 +2,20 @@ package getters import ( "context" - "os" - "sync" - "testing" - "time" - - "github.com/ipfs/boxo/exchange/offline" - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/celestiaorg/celestia-node/share/store" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "sync/atomic" + "testing" "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-app/pkg/wrapper" - dsbadger "github.com/celestiaorg/go-ds-badger4" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/sharetest" ) @@ -31,33 +23,31 @@ func TestStoreGetter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - tmpDir := t.TempDir() - storeCfg := eds.DefaultParameters() - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - edsStore, err := eds.NewStore(storeCfg, tmpDir, ds) - require.NoError(t, err) - - err = edsStore.Start(ctx) + edsStore, err := store.NewStore(store.DefaultParameters(), t.TempDir()) require.NoError(t, err) sg := NewStoreGetter(edsStore) + height := atomic.Uint64{} t.Run("GetShare", func(t *testing.T) { - randEds, eh := randomEDS(t) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) + eds, eh := randomEDS(t) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + f, err := edsStore.Put(ctx, eh.DAH.Hash(), height, eds) require.NoError(t, err) + defer f.Close() - squareSize := int(randEds.Width()) + squareSize := int(eds.Width()) for i := 0; i < squareSize; i++ { for j := 0; j < squareSize; j++ { share, err := sg.GetShare(ctx, eh, i, j) require.NoError(t, err) - assert.Equal(t, randEds.GetCell(uint(i), uint(j)), share) + assert.Equal(t, eds.GetCell(uint(i), uint(j)), share) } } // doesn't panic on indexes too high - _, err := sg.GetShare(ctx, eh, squareSize, squareSize) + _, err = sg.GetShare(ctx, eh, squareSize, squareSize) require.ErrorIs(t, err, share.ErrOutOfBounds) // root not found @@ -67,13 +57,16 @@ func TestStoreGetter(t *testing.T) { }) t.Run("GetEDS", func(t *testing.T) { - randEds, eh := randomEDS(t) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) + eds, eh := randomEDS(t) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + f, err := edsStore.Put(ctx, eh.DAH.Hash(), height, eds) require.NoError(t, err) + defer f.Close() retrievedEDS, err := sg.GetEDS(ctx, eh) require.NoError(t, err) - assert.True(t, randEds.Equals(retrievedEDS)) + assert.True(t, eds.Equals(retrievedEDS)) // root not found emptyRoot := da.MinDataAvailabilityHeader() @@ -83,9 +76,12 @@ func TestStoreGetter(t *testing.T) { }) t.Run("GetSharesByNamespace", func(t *testing.T) { - randEds, namespace, eh := randomEDSWithDoubledNamespace(t, 4) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) + eds, namespace, eh := randomEDSWithDoubledNamespace(t, 4) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + f, err := edsStore.Put(ctx, eh.DAH.Hash(), height, eds) require.NoError(t, err) + defer f.Close() shares, err := sg.GetSharesByNamespace(ctx, eh, namespace) require.NoError(t, err) @@ -99,218 +95,12 @@ func TestStoreGetter(t *testing.T) { require.Empty(t, emptyShares.Flatten()) // root not found - emptyRoot := da.MinDataAvailabilityHeader() - eh.DAH = &emptyRoot + eh.RawHeader.Height = 666 _, err = sg.GetSharesByNamespace(ctx, eh, namespace) - require.ErrorIs(t, err, share.ErrNotFound) - }) - - t.Run("GetSharesFromNamespace removes corrupted shard", func(t *testing.T) { - randEds, namespace, eh := randomEDSWithDoubledNamespace(t, 4) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) - require.NoError(t, err) - - // available - shares, err := sg.GetSharesByNamespace(ctx, eh, namespace) - require.NoError(t, err) - require.NoError(t, shares.Verify(eh.DAH, namespace)) - assert.Len(t, shares.Flatten(), 2) - - // 'corrupt' existing CAR by overwriting with a random EDS - f, err := os.OpenFile(tmpDir+"/blocks/"+eh.DAH.String(), os.O_WRONLY, 0644) - require.NoError(t, err) - edsToOverwriteWith, eh := randomEDS(t) - err = eds.WriteEDS(ctx, edsToOverwriteWith, f) - require.NoError(t, err) - - shares, err = sg.GetSharesByNamespace(ctx, eh, namespace) - require.ErrorIs(t, err, share.ErrNotFound) - require.Nil(t, shares) - - // corruption detected, shard is removed - // try every 200ms until it passes or the context ends - ticker := time.NewTicker(200 * time.Millisecond) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - t.Fatal("context ended before successful retrieval") - case <-ticker.C: - has, err := edsStore.Has(ctx, eh.DAH.Hash()) - if err != nil { - t.Fatal(err) - } - if !has { - require.NoError(t, err) - return - } - } - } - }) -} - -func TestIPLDGetter(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - storeCfg := eds.DefaultParameters() - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - edsStore, err := eds.NewStore(storeCfg, t.TempDir(), ds) - require.NoError(t, err) - - err = edsStore.Start(ctx) - require.NoError(t, err) - - bStore := edsStore.Blockstore() - bserv := ipld.NewBlockservice(bStore, offline.Exchange(edsStore.Blockstore())) - sg := NewIPLDGetter(bserv) - - t.Run("GetShare", func(t *testing.T) { - ctx, cancel := context.WithTimeout(ctx, time.Second) - t.Cleanup(cancel) - - randEds, eh := randomEDS(t) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) - require.NoError(t, err) - - squareSize := int(randEds.Width()) - for i := 0; i < squareSize; i++ { - for j := 0; j < squareSize; j++ { - share, err := sg.GetShare(ctx, eh, i, j) - require.NoError(t, err) - assert.Equal(t, randEds.GetCell(uint(i), uint(j)), share) - } - } - - // doesn't panic on indexes too high - _, err := sg.GetShare(ctx, eh, squareSize+1, squareSize+1) - require.ErrorIs(t, err, share.ErrOutOfBounds) - - // root not found - _, eh = randomEDS(t) - _, err = sg.GetShare(ctx, eh, 0, 0) - require.ErrorIs(t, err, share.ErrNotFound) - }) - - t.Run("GetEDS", func(t *testing.T) { - ctx, cancel := context.WithTimeout(ctx, time.Second) - t.Cleanup(cancel) - - randEds, eh := randomEDS(t) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) - require.NoError(t, err) - - retrievedEDS, err := sg.GetEDS(ctx, eh) - require.NoError(t, err) - assert.True(t, randEds.Equals(retrievedEDS)) - - // Ensure blocks still exist after cleanup - colRoots, _ := retrievedEDS.ColRoots() - has, err := bStore.Has(ctx, ipld.MustCidFromNamespacedSha256(colRoots[0])) - assert.NoError(t, err) - assert.True(t, has) - }) - - t.Run("GetSharesByNamespace", func(t *testing.T) { - ctx, cancel := context.WithTimeout(ctx, time.Second) - t.Cleanup(cancel) - - randEds, namespace, eh := randomEDSWithDoubledNamespace(t, 4) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) - require.NoError(t, err) - - // first check that shares are returned correctly if they exist - shares, err := sg.GetSharesByNamespace(ctx, eh, namespace) - require.NoError(t, err) - require.NoError(t, shares.Verify(eh.DAH, namespace)) - assert.Len(t, shares.Flatten(), 2) - - // namespace not found - randNamespace := sharetest.RandV0Namespace() - emptyShares, err := sg.GetSharesByNamespace(ctx, eh, randNamespace) - require.NoError(t, err) - require.Empty(t, emptyShares.Flatten()) - - // nid doesnt exist in root - emptyRoot := da.MinDataAvailabilityHeader() - eh.DAH = &emptyRoot - emptyShares, err = sg.GetSharesByNamespace(ctx, eh, namespace) - require.NoError(t, err) - require.Empty(t, emptyShares.Flatten()) + require.ErrorIs(t, err, share.ErrNotFound, err) }) } -// BenchmarkIPLDGetterOverBusyCache benchmarks the performance of the IPLDGetter when the -// cache size of the underlying blockstore is less than the number of blocks being requested in -// parallel. This is to ensure performance doesn't degrade when the cache is being frequently -// evicted. -// BenchmarkIPLDGetterOverBusyCache-10/128 1 12460428417 ns/op (~12s) -func BenchmarkIPLDGetterOverBusyCache(b *testing.B) { - const ( - blocks = 10 - size = 128 - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) - b.Cleanup(cancel) - - dir := b.TempDir() - ds, err := dsbadger.NewDatastore(dir, &dsbadger.DefaultOptions) - require.NoError(b, err) - - newStore := func(params *eds.Parameters) *eds.Store { - edsStore, err := eds.NewStore(params, dir, ds) - require.NoError(b, err) - err = edsStore.Start(ctx) - require.NoError(b, err) - return edsStore - } - edsStore := newStore(eds.DefaultParameters()) - - // generate EDSs and store them - headers := make([]*header.ExtendedHeader, blocks) - for i := range headers { - eds := edstest.RandEDS(b, size) - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(b, err) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(b, err) - - eh := headertest.RandExtendedHeader(b) - eh.DAH = &dah - - // store cids for read loop later - headers[i] = eh - } - - // restart store to clear cache - require.NoError(b, edsStore.Stop(ctx)) - - // set BlockstoreCacheSize to 1 to force eviction on every read - params := eds.DefaultParameters() - params.BlockstoreCacheSize = 1 - edsStore = newStore(params) - bstore := edsStore.Blockstore() - bserv := ipld.NewBlockservice(bstore, offline.Exchange(bstore)) - - // start client - getter := NewIPLDGetter(bserv) - - // request blocks in parallel - b.ResetTimer() - g := sync.WaitGroup{} - g.Add(blocks) - for _, h := range headers { - h := h - go func() { - defer g.Done() - _, err := getter.GetEDS(ctx, h) - require.NoError(b, err) - }() - } - g.Wait() -} - func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *header.ExtendedHeader) { eds := edstest.RandEDS(t, 4) dah, err := share.NewRoot(eds) diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go index b9a23faae3..5ae9e12255 100644 --- a/share/getters/shrex_test.go +++ b/share/getters/shrex_test.go @@ -4,6 +4,9 @@ import ( "context" "encoding/binary" "errors" + "github.com/celestiaorg/celestia-node/share/store" + "github.com/tendermint/tendermint/libs/rand" + "sync/atomic" "testing" "time" @@ -21,7 +24,6 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/p2p/peers" @@ -41,10 +43,9 @@ func TestShrexGetter(t *testing.T) { clHost, srvHost := net.Hosts()[0], net.Hosts()[1] // launch eds store and put test data into it - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) + edsStore, err := store.NewStore(store.DefaultParameters(), t.TempDir()) require.NoError(t, err) + height := atomic.Uint64{} ndClient, _ := newNDClientServer(ctx, t, edsStore, srvHost, clHost) edsClient, _ := newEDSClientServer(ctx, t, edsStore, srvHost, clHost) @@ -61,14 +62,20 @@ func TestShrexGetter(t *testing.T) { t.Cleanup(cancel) // generate test data - size := 64 + size := 128 namespace := sharetest.RandV0Namespace() - randEDS, dah := edstest.RandEDSWithNamespace(t, namespace, size*size, size) + sqSise := size * size + eds, dah := edstest.RandEDSWithNamespace(t, namespace, sqSise/2+rand.Intn(sqSise/2), size) eh := headertest.RandExtendedHeaderWithRoot(t, dah) - require.NoError(t, edsStore.Put(ctx, dah.Hash(), randEDS)) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + defer f.Close() peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), - Height: 1, + Height: height, }) got, err := getter.GetSharesByNamespace(ctx, eh, namespace) @@ -83,9 +90,11 @@ func TestShrexGetter(t *testing.T) { // generate test data _, dah, namespace := generateTestEDS(t) eh := headertest.RandExtendedHeaderWithRoot(t, dah) + height := height.Add(1) + peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), - Height: 1, + Height: height, }) _, err := getter.GetSharesByNamespace(ctx, eh, namespace) @@ -93,16 +102,21 @@ func TestShrexGetter(t *testing.T) { }) t.Run("ND_namespace_not_included", func(t *testing.T) { - ctx, cancel := context.WithTimeout(ctx, time.Second) + ctx, cancel := context.WithTimeout(ctx, time.Second*444) t.Cleanup(cancel) // generate test data eds, dah, maxNamespace := generateTestEDS(t) eh := headertest.RandExtendedHeaderWithRoot(t, dah) - require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + f.Close() peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), - Height: 1, + Height: height, }) nID, err := addToNamespace(maxNamespace, -1) @@ -110,11 +124,19 @@ func TestShrexGetter(t *testing.T) { // check for namespace to be between max and min namespace in root require.Len(t, ipld.FilterRootByNamespace(dah, nID), 1) - emptyShares, err := getter.GetSharesByNamespace(ctx, eh, nID) + sgetter := NewStoreGetter(edsStore) + emptyShares, err := sgetter.GetSharesByNamespace(ctx, eh, nID) require.NoError(t, err) // no shares should be returned require.Empty(t, emptyShares.Flatten()) require.Nil(t, emptyShares.Verify(dah, nID)) + + emptyShares1, err := getter.GetSharesByNamespace(ctx, eh, nID) + require.Equal(t, emptyShares.Flatten(), emptyShares1.Flatten()) + require.NoError(t, err) + // no shares should be returned + require.Empty(t, emptyShares1.Flatten()) + require.Nil(t, emptyShares1.Verify(dah, nID)) }) t.Run("ND_namespace_not_in_dah", func(t *testing.T) { @@ -124,10 +146,15 @@ func TestShrexGetter(t *testing.T) { // generate test data eds, dah, maxNamesapce := generateTestEDS(t) eh := headertest.RandExtendedHeaderWithRoot(t, dah) - require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + defer f.Close() peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), - Height: 1, + Height: height, }) namespace, err := addToNamespace(maxNamesapce, 1) @@ -147,17 +174,22 @@ func TestShrexGetter(t *testing.T) { t.Cleanup(cancel) // generate test data - randEDS, dah, _ := generateTestEDS(t) + eds, dah, _ := generateTestEDS(t) eh := headertest.RandExtendedHeaderWithRoot(t, dah) - require.NoError(t, edsStore.Put(ctx, dah.Hash(), randEDS)) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + defer f.Close() peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), - Height: 1, + Height: height, }) got, err := getter.GetEDS(ctx, eh) require.NoError(t, err) - require.Equal(t, randEDS.Flattened(), got.Flattened()) + require.Equal(t, eds.Flattened(), got.Flattened()) }) t.Run("EDS_ctx_deadline", func(t *testing.T) { @@ -166,9 +198,12 @@ func TestShrexGetter(t *testing.T) { // generate test data _, dah, _ := generateTestEDS(t) eh := headertest.RandExtendedHeaderWithRoot(t, dah) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), - Height: 1, + Height: height, }) cancel() @@ -183,9 +218,12 @@ func TestShrexGetter(t *testing.T) { // generate test data _, dah, _ := generateTestEDS(t) eh := headertest.RandExtendedHeaderWithRoot(t, dah) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), - Height: 1, + Height: height, }) _, err := getter.GetEDS(ctx, eh) @@ -193,13 +231,6 @@ func TestShrexGetter(t *testing.T) { }) } -func newStore(t *testing.T) (*eds.Store, error) { - t.Helper() - - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - return eds.NewStore(eds.DefaultParameters(), t.TempDir(), ds) -} - func generateTestEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *share.Root, share.Namespace) { eds := edstest.RandEDS(t, 4) dah, err := share.NewRoot(eds) @@ -230,7 +261,7 @@ func testManager( } func newNDClientServer( - ctx context.Context, t *testing.T, edsStore *eds.Store, srvHost, clHost host.Host, + ctx context.Context, t *testing.T, edsStore *store.Store, srvHost, clHost host.Host, ) (*shrexnd.Client, *shrexnd.Server) { params := shrexnd.DefaultParameters() @@ -250,7 +281,7 @@ func newNDClientServer( } func newEDSClientServer( - ctx context.Context, t *testing.T, edsStore *eds.Store, srvHost, clHost host.Host, + ctx context.Context, t *testing.T, edsStore *store.Store, srvHost, clHost host.Host, ) (*shrexeds.Client, *shrexeds.Server) { params := shrexeds.DefaultParameters() diff --git a/share/getters/store.go b/share/getters/store.go index 0c72fbedaf..ece14c9c0d 100644 --- a/share/getters/store.go +++ b/share/getters/store.go @@ -113,7 +113,7 @@ func (sg *StoreGetter) GetSharesByNamespace( utils.SetStatusAndEnd(span, err) }() - file, err := sg.store.GetByHash(ctx, header.DAH.Hash()) + file, err := sg.store.GetByHeight(ctx, header.Height()) if errors.Is(err, store.ErrNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound @@ -127,7 +127,7 @@ func (sg *StoreGetter) GetSharesByNamespace( from, to := share.RowRangeForNamespace(header.DAH, namespace) shares = make(share.NamespacedShares, 0, to-from+1) - for row := from; row <= to; row++ { + for row := from; row < to; row++ { data, err := file.Data(ctx, namespace, row) if err != nil { return nil, fmt.Errorf("getter/store: failed to retrieve namespcaed data: %w", err) diff --git a/share/ipld/namespace_data.go b/share/ipld/namespace_data.go index 0c7d73b984..5a6fd2abb4 100644 --- a/share/ipld/namespace_data.go +++ b/share/ipld/namespace_data.go @@ -2,7 +2,6 @@ package ipld import ( "context" - "encoding/hex" "errors" "fmt" "sync" @@ -80,7 +79,6 @@ func (n *NamespaceData) validate(rootCid cid.Cid) error { root := NamespacedSha256FromCID(rootCid) if n.namespace.IsOutsideRange(root, root) { - fmt.Println("look", n.namespace.String(), hex.EncodeToString(root)) return ErrNamespaceOutsideRange } return nil diff --git a/share/p2p/shrexnd/server.go b/share/p2p/shrexnd/server.go index 5d99041e7a..7905394f7b 100644 --- a/share/p2p/shrexnd/server.go +++ b/share/p2p/shrexnd/server.go @@ -188,7 +188,7 @@ func (srv *Server) getNamespaceData(ctx context.Context, defer utils.CloseAndLog(log, "file", file) namespacedRows := make(share.NamespacedShares, 0, toRow-fromRow+1) - for rowIdx := fromRow; rowIdx <= toRow; rowIdx++ { + for rowIdx := fromRow; rowIdx < toRow; rowIdx++ { data, err := file.Data(ctx, namespace, rowIdx) if err != nil { return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving data: %w", err) diff --git a/share/store/file/mem_file.go b/share/store/file/mem_file.go index 97f9e06379..213bbb7555 100644 --- a/share/store/file/mem_file.go +++ b/share/store/file/mem_file.go @@ -3,8 +3,6 @@ package file import ( "bytes" "context" - "encoding/hex" - "fmt" "io" "github.com/celestiaorg/celestia-app/pkg/da" @@ -118,13 +116,11 @@ func ndDataFromShares(shares []share.Share, namespace share.Namespace, rowIdx in return share.NamespacedRow{}, err } - fmt.Println("roooot", rowIdx, hex.EncodeToString(root)) err = batchAdder.Commit() if err != nil { return share.NamespacedRow{}, err } - fmt.Println("lookup", rowIdx) row, proof, err := ipld.GetSharesByNamespace(context.TODO(), bserv, root, namespace, len(shares)) if err != nil { return share.NamespacedRow{}, err diff --git a/share/store/store.go b/share/store/store.go index 5b317dc033..90c15bb837 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -22,7 +22,12 @@ var ( tracer = otel.Tracer("share/eds") ) -// TODO(@walldiss): persist store stats like amount of files, file types, avg file size etc in a file +// TODO(@walldiss): +// - persist store stats like amount of files, file types, avg file size etc in a file +// - handle corrupted files +// - maintain in-memory missing files index / bloom-filter to fast return for not stored files. +// - lock store folder + const ( hashsPath = "/blocks/" heightsPath = "/heights/" @@ -44,8 +49,6 @@ type Store struct { // cache is used to cache recent blocks and blocks that are accessed frequently cache *cache.DoubleCache - //TODO: maintain in-memory missing files index / bloom-filter to fast return for not stored files. - // stripedLocks is used to synchronize parallel operations stripLock *striplock @@ -58,8 +61,6 @@ func NewStore(params *Parameters, basePath string) (*Store, error) { return nil, err } - //TODO: acquire DirectoryLock store lockGuard - // ensure blocks folder if err := ensureFolder(basePath + hashsPath); err != nil { return nil, fmt.Errorf("ensure blocks folder: %w", err) diff --git a/share/utils.go b/share/utils.go index 9fe87d4110..db12b039fd 100644 --- a/share/utils.go +++ b/share/utils.go @@ -2,13 +2,17 @@ package share // TODO(@walldiss): refactor this into proper package once we have a better idea of what it should look like func RowRangeForNamespace(root *Root, namespace Namespace) (from, to int) { + from = -1 for i, row := range root.RowRoots { if !namespace.IsOutsideRange(row, row) { - if from == 0 { + if from == -1 { from = i } - to = i + to = i + 1 } } + if to == 0 { + return 0, 0 + } return from, to } From c0040966d9383afb461b296aecdc3e660501c452 Mon Sep 17 00:00:00 2001 From: Vlad Date: Tue, 6 Feb 2024 19:27:24 +0500 Subject: [PATCH 062/132] add non-inclusion tests to file --- share/getters/shrex_test.go | 106 +--------------------------------- share/namespace.go | 48 +++++++++++++++ share/namespace_test.go | 55 ++++++++++++++++++ share/store/file/file_test.go | 31 ++++++++-- 4 files changed, 130 insertions(+), 110 deletions(-) diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go index 5ae9e12255..36fea496fd 100644 --- a/share/getters/shrex_test.go +++ b/share/getters/shrex_test.go @@ -2,8 +2,6 @@ package getters import ( "context" - "encoding/binary" - "errors" "github.com/celestiaorg/celestia-node/share/store" "github.com/tendermint/tendermint/libs/rand" "sync/atomic" @@ -119,7 +117,7 @@ func TestShrexGetter(t *testing.T) { Height: height, }) - nID, err := addToNamespace(maxNamespace, -1) + nID, err := maxNamespace.AddInt(-1) require.NoError(t, err) // check for namespace to be between max and min namespace in root require.Len(t, ipld.FilterRootByNamespace(dah, nID), 1) @@ -157,7 +155,7 @@ func TestShrexGetter(t *testing.T) { Height: height, }) - namespace, err := addToNamespace(maxNamesapce, 1) + namespace, err := maxNamesapce.AddInt(1) require.NoError(t, err) // check for namespace to be not in root require.Len(t, ipld.FilterRootByNamespace(dah, namespace), 0) @@ -299,103 +297,3 @@ func newEDSClientServer( require.NoError(t, err) return client, server } - -// addToNamespace adds arbitrary int value to namespace, treating namespace as big-endian -// implementation of int -func addToNamespace(namespace share.Namespace, val int) (share.Namespace, error) { - if val == 0 { - return namespace, nil - } - // Convert the input integer to a byte slice and add it to result slice - result := make([]byte, len(namespace)) - if val > 0 { - binary.BigEndian.PutUint64(result[len(namespace)-8:], uint64(val)) - } else { - binary.BigEndian.PutUint64(result[len(namespace)-8:], uint64(-val)) - } - - // Perform addition byte by byte - var carry int - for i := len(namespace) - 1; i >= 0; i-- { - sum := 0 - if val > 0 { - sum = int(namespace[i]) + int(result[i]) + carry - } else { - sum = int(namespace[i]) - int(result[i]) + carry - } - - switch { - case sum > 255: - carry = 1 - sum -= 256 - case sum < 0: - carry = -1 - sum += 256 - default: - carry = 0 - } - - result[i] = uint8(sum) - } - - // Handle any remaining carry - if carry != 0 { - return nil, errors.New("namespace overflow") - } - - return result, nil -} - -func TestAddToNamespace(t *testing.T) { - testCases := []struct { - name string - value int - input share.Namespace - expected share.Namespace - expectedError error - }{ - { - name: "Positive value addition", - value: 42, - input: share.Namespace{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, - expected: share.Namespace{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x2b}, - expectedError: nil, - }, - { - name: "Negative value addition", - value: -42, - input: share.Namespace{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, - expected: share.Namespace{0x1, 0x1, 0x1, 0x1, 0x1, 0x01, 0x1, 0x1, 0x1, 0x0, 0xd7}, - expectedError: nil, - }, - { - name: "Overflow error", - value: 1, - input: share.Namespace{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, - expected: nil, - expectedError: errors.New("namespace overflow"), - }, - { - name: "Overflow error negative", - value: -1, - input: share.Namespace{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, - expected: nil, - expectedError: errors.New("namespace overflow"), - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result, err := addToNamespace(tc.input, tc.value) - if tc.expectedError == nil { - require.NoError(t, err) - require.Equal(t, tc.expected, result) - return - } - require.Error(t, err) - if err.Error() != tc.expectedError.Error() { - t.Errorf("Unexpected error message. Expected: %v, Got: %v", tc.expectedError, err) - } - }) - } -} diff --git a/share/namespace.go b/share/namespace.go index df4ad74058..7d27b58e9a 100644 --- a/share/namespace.go +++ b/share/namespace.go @@ -2,7 +2,9 @@ package share import ( "bytes" + "encoding/binary" "encoding/hex" + "errors" "fmt" appns "github.com/celestiaorg/celestia-app/pkg/namespace" @@ -182,3 +184,49 @@ func (n Namespace) IsGreater(target Namespace) bool { func (n Namespace) IsGreaterOrEqualThan(target Namespace) bool { return bytes.Compare(n, target) > -1 } + +// AddInt adds arbitrary int value to namespace, treating namespace as big-endian +// implementation of int +func (n Namespace) AddInt(val int) (Namespace, error) { + if val == 0 { + return n, nil + } + // Convert the input integer to a byte slice and add it to result slice + result := make([]byte, len(n)) + if val > 0 { + binary.BigEndian.PutUint64(result[len(n)-8:], uint64(val)) + } else { + binary.BigEndian.PutUint64(result[len(n)-8:], uint64(-val)) + } + + // Perform addition byte by byte + var carry int + for i := len(n) - 1; i >= 0; i-- { + sum := 0 + if val > 0 { + sum = int(n[i]) + int(result[i]) + carry + } else { + sum = int(n[i]) - int(result[i]) + carry + } + + switch { + case sum > 255: + carry = 1 + sum -= 256 + case sum < 0: + carry = -1 + sum += 256 + default: + carry = 0 + } + + result[i] = uint8(sum) + } + + // Handle any remaining carry + if carry != 0 { + return nil, errors.New("n overflow") + } + + return result, nil +} diff --git a/share/namespace_test.go b/share/namespace_test.go index 786441b043..c2d3d6328a 100644 --- a/share/namespace_test.go +++ b/share/namespace_test.go @@ -2,6 +2,7 @@ package share import ( "bytes" + "errors" "testing" "github.com/stretchr/testify/assert" @@ -198,6 +199,60 @@ func TestValidateForBlob(t *testing.T) { } } +func TestAddToNamespace(t *testing.T) { + testCases := []struct { + name string + value int + input Namespace + expected Namespace + expectedError error + }{ + { + name: "Positive value addition", + value: 42, + input: Namespace{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, + expected: Namespace{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x2b}, + expectedError: nil, + }, + { + name: "Negative value addition", + value: -42, + input: Namespace{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, + expected: Namespace{0x1, 0x1, 0x1, 0x1, 0x1, 0x01, 0x1, 0x1, 0x1, 0x0, 0xd7}, + expectedError: nil, + }, + { + name: "Overflow error", + value: 1, + input: Namespace{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + expected: nil, + expectedError: errors.New("namespace overflow"), + }, + { + name: "Overflow error negative", + value: -1, + input: Namespace{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + expected: nil, + expectedError: errors.New("namespace overflow"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := tc.input.AddInt(tc.value) + if tc.expectedError == nil { + require.NoError(t, err) + require.Equal(t, tc.expected, result) + return + } + require.Error(t, err) + if err.Error() != tc.expectedError.Error() { + t.Errorf("Unexpected error message. Expected: %v, Got: %v", tc.expectedError, err) + } + }) + } +} + func primaryReservedNamespace(lastByte byte) Namespace { result := make([]byte, NamespaceSize) result = append(result, appns.NamespaceVersionZero) diff --git a/share/store/file/file_test.go b/share/store/file/file_test.go index 166c80bba9..adc2117873 100644 --- a/share/store/file/file_test.go +++ b/share/store/file/file_test.go @@ -3,6 +3,7 @@ package file import ( "context" "fmt" + "github.com/celestiaorg/nmt" mrand "math/rand" "strconv" "testing" @@ -47,13 +48,31 @@ func testFileShare(t *testing.T, createFile createFile, size int) { } func testFileData(t *testing.T, createFile createFile, size int) { - // generate EDS with random data and some shares with the same namespace - namespace := sharetest.RandV0Namespace() - amount := mrand.Intn(size*size-1) + 1 - eds, dah := edstest.RandEDSWithNamespace(t, namespace, amount, size) - - f := createFile(eds) + t.Run("included", func(t *testing.T) { + // generate EDS with random data and some shares with the same namespace + namespace := sharetest.RandV0Namespace() + amount := mrand.Intn(size*size-1) + 1 + eds, dah := edstest.RandEDSWithNamespace(t, namespace, amount, size) + f := createFile(eds) + testData(t, f, namespace, dah) + }) + + t.Run("not included", func(t *testing.T) { + // generate EDS with random data and some shares with the same namespace + eds := edstest.RandEDS(t, size) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + + maxNs := nmt.MaxNamespace(dah.RowRoots[(len(dah.RowRoots))/2-1], share.NamespaceSize) + targetNs, err := share.Namespace(maxNs).AddInt(-1) + require.NoError(t, err) + + f := createFile(eds) + testData(t, f, targetNs, dah) + }) +} +func testData(t *testing.T, f EdsFile, namespace share.Namespace, dah *share.Root) { for i, root := range dah.RowRoots { if !namespace.IsOutsideRange(root, root) { nd, err := f.Data(context.Background(), namespace, i) From fb93edb2da5a3ed66c183c37ae1906893621a55b Mon Sep 17 00:00:00 2001 From: Vlad Date: Tue, 6 Feb 2024 21:06:45 +0500 Subject: [PATCH 063/132] fix shwap tests --- share/shwap/data_hasher_test.go | 2 + share/shwap/data_id_test.go | 2 +- share/shwap/data_test.go | 2 +- share/shwap/getter_test.go | 85 ++---- share/shwap/shwap_test.go | 463 ++++++++++++++++---------------- 5 files changed, 251 insertions(+), 303 deletions(-) diff --git a/share/shwap/data_hasher_test.go b/share/shwap/data_hasher_test.go index f94f1aa6e6..ff4a800be1 100644 --- a/share/shwap/data_hasher_test.go +++ b/share/shwap/data_hasher_test.go @@ -10,6 +10,8 @@ import ( "github.com/celestiaorg/celestia-node/share/sharetest" ) +// TODO(@walldiss): +// FIX: hasher test succeed, while logging unmarshal error: "unmarshaling Data: proto: Data: wiretype end group for non-group" func TestDataHasher(t *testing.T) { hasher := &DataHasher{} diff --git a/share/shwap/data_id_test.go b/share/shwap/data_id_test.go index 1068d4d56f..7fa87e8e41 100644 --- a/share/shwap/data_id_test.go +++ b/share/shwap/data_id_test.go @@ -12,7 +12,7 @@ import ( func TestDataID(t *testing.T) { ns := sharetest.RandV0Namespace() - _, root := edstest.RandEDSWithNamespace(t, ns, 4) + _, root := edstest.RandEDSWithNamespace(t, ns, 8, 4) id, err := NewDataID(1, 1, ns, root) require.NoError(t, err) diff --git a/share/shwap/data_test.go b/share/shwap/data_test.go index 43e44d547c..fc616da235 100644 --- a/share/shwap/data_test.go +++ b/share/shwap/data_test.go @@ -12,7 +12,7 @@ import ( func TestData(t *testing.T) { namespace := sharetest.RandV0Namespace() - square, root := edstest.RandEDSWithNamespace(t, namespace, 8) + square, root := edstest.RandEDSWithNamespace(t, namespace, 16, 8) nds, err := NewDataFromEDS(square, 1, namespace) require.NoError(t, err) diff --git a/share/shwap/getter_test.go b/share/shwap/getter_test.go index 3625f17471..b3223387f5 100644 --- a/share/shwap/getter_test.go +++ b/share/shwap/getter_test.go @@ -1,10 +1,12 @@ -package shwap +package shwap_test import ( "bytes" "context" - "encoding/binary" "fmt" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/store" + ds_sync "github.com/ipfs/go-datastore/sync" "math/rand" "testing" "time" @@ -14,7 +16,6 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" format "github.com/ipfs/go-ipld-format" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,11 +28,10 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/sharetest" - "github.com/celestiaorg/celestia-node/share/store/cache" ) func TestGetter(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() size := 8 @@ -39,9 +39,9 @@ func TestGetter(t *testing.T) { square, root := edstest.RandEDSWithNamespace(t, ns, size*size, size) hdr := &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 1}, DAH: root} - bstore := edsBlockstore(square) + bstore := edsBlockstore(ctx, t, square, hdr.Height()) exch := DummySessionExchange{bstore} - get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) + get := shwap.NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) t.Run("GetShares", func(t *testing.T) { idxs := rand.Perm(int(square.Width() ^ 2))[:10] @@ -89,14 +89,14 @@ func TestGetter(t *testing.T) { square := edstest.RandEDS(t, 8) root, err := share.NewRoot(square) require.NoError(t, err) - hdr := &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 1}, DAH: root} + hdr := &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 3}, DAH: root} - bstore := edsBlockstore(square) + bstore := edsBlockstore(ctx, t, square, hdr.Height()) exch := &DummySessionExchange{bstore} - get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) + get := shwap.NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) maxNs := nmt.MaxNamespace(root.RowRoots[(len(root.RowRoots))/2-1], share.NamespaceSize) - ns, err := addToNamespace(maxNs, -1) + ns, err := share.Namespace(maxNs).AddInt(-1) require.NoError(t, err) require.Len(t, ipld.FilterRootByNamespace(root, ns), 1) @@ -109,53 +109,6 @@ func TestGetter(t *testing.T) { }) } -// addToNamespace adds arbitrary int value to namespace, treating namespace as big-endian -// implementation of int -// TODO: dedup with getters/shrex_test.go -func addToNamespace(namespace share.Namespace, val int) (share.Namespace, error) { - if val == 0 { - return namespace, nil - } - // Convert the input integer to a byte slice and Add it to result slice - result := make([]byte, len(namespace)) - if val > 0 { - binary.BigEndian.PutUint64(result[len(namespace)-8:], uint64(val)) - } else { - binary.BigEndian.PutUint64(result[len(namespace)-8:], uint64(-val)) - } - - // Perform addition byte by byte - var carry int - for i := len(namespace) - 1; i >= 0; i-- { - sum := 0 - if val > 0 { - sum = int(namespace[i]) + int(result[i]) + carry - } else { - sum = int(namespace[i]) - int(result[i]) + carry - } - - switch { - case sum > 255: - carry = 1 - sum -= 256 - case sum < 0: - carry = -1 - sum += 256 - default: - carry = 0 - } - - result[i] = uint8(sum) - } - - // Handle any remaining carry - if carry != 0 { - return nil, fmt.Errorf("namespace overflow") - } - - return result, nil -} - type DummySessionExchange struct { blockstore.Blockstore } @@ -214,18 +167,16 @@ func (e DummySessionExchange) Close() error { return nil } -func edsBlockstore(sqr *rsmt2d.ExtendedDataSquare) blockstore.Blockstore { - edsStore, err := NewStore(DefaultParameters(), t.TempDir()) +func edsBlockstore(ctx context.Context, t *testing.T, eds *rsmt2d.ExtendedDataSquare, height uint64) blockstore.Blockstore { + dah, err := share.NewRoot(eds) require.NoError(t, err) - // disable cache - edsStore.cache = cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{}) - bs := NewBlockstore(edsStore, ds_sync.MutexWrap(ds.NewMapDatastore())) - - height := uint64(100) - eds, dah := randomEDS(t) + edsStore, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + require.NoError(t, err) f, err := edsStore.Put(ctx, dah.Hash(), height, eds) require.NoError(t, err) - require.NoError(t, f.Close()) + f.Close() + + return store.NewBlockstore(edsStore, ds_sync.MutexWrap(datastore.NewMapDatastore())) } diff --git a/share/shwap/shwap_test.go b/share/shwap/shwap_test.go index 3ddc4b3a73..9fa8b8f9e6 100644 --- a/share/shwap/shwap_test.go +++ b/share/shwap/shwap_test.go @@ -2,252 +2,247 @@ package shwap import ( "context" - "testing" - "time" - "github.com/ipfs/boxo/bitswap" "github.com/ipfs/boxo/bitswap/network" "github.com/ipfs/boxo/blockstore" "github.com/ipfs/boxo/exchange" "github.com/ipfs/boxo/routing/offline" - "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" record "github.com/libp2p/go-libp2p-record" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/sharetest" + "testing" ) -// TestSampleRoundtripGetBlock tests full protocol round trip of: -// EDS -> Sample -> IPLDBlock -> BlockService -> Bitswap and in reverse. -func TestSampleRoundtripGetBlock(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - square := edstest.RandEDS(t, 8) - root, err := share.NewRoot(square) - require.NoError(t, err) - - b := edsBlockstore(square) - client := remoteClient(ctx, t, b) - - width := int(square.Width()) - for i := 0; i < width*width; i++ { - smpl, err := NewSampleFromEDS(RowProofType, i, square, 1) // TODO: Col - require.NoError(t, err) - - sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { - return sample.Verify(root) - }) - - cid := smpl.Cid() - blkOut, err := client.GetBlock(ctx, cid) - require.NoError(t, err) - assert.EqualValues(t, cid, blkOut.Cid()) - - smpl, err = SampleFromBlock(blkOut) - assert.NoError(t, err) - - err = smpl.Verify(root) - assert.NoError(t, err) - } -} - -// TODO: Debug why is it flaky -func TestSampleRoundtripGetBlocks(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - square := edstest.RandEDS(t, 8) - root, err := share.NewRoot(square) - require.NoError(t, err) - b := edsBlockstore(square) - client := remoteClient(ctx, t, b) - - set := cid.NewSet() - width := int(square.Width()) - for i := 0; i < width*width; i++ { - smpl, err := NewSampleFromEDS(RowProofType, i, square, 1) // TODO: Col - require.NoError(t, err) - set.Add(smpl.Cid()) - - sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { - return sample.Verify(root) - }) - } - - blks, err := client.GetBlocks(ctx, set.Keys()) - require.NoError(t, err) - - err = set.ForEach(func(c cid.Cid) error { - select { - case blk := <-blks: - assert.True(t, set.Has(blk.Cid())) - - smpl, err := SampleFromBlock(blk) - assert.NoError(t, err) - - err = smpl.Verify(root) // bitswap already performed validation and this is only for testing - assert.NoError(t, err) - case <-ctx.Done(): - return ctx.Err() - } - return nil - }) - assert.NoError(t, err) -} - -func TestRowRoundtripGetBlock(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - square := edstest.RandEDS(t, 16) - root, err := share.NewRoot(square) - require.NoError(t, err) - b := edsBlockstore(square) - client := remoteClient(ctx, t, b) - - width := int(square.Width()) - for i := 0; i < width; i++ { - row, err := NewRowFromEDS(1, i, square) - require.NoError(t, err) - - rowVerifiers.Add(row.RowID, func(row Row) error { - return row.Verify(root) - }) - - cid := row.Cid() - blkOut, err := client.GetBlock(ctx, cid) - require.NoError(t, err) - assert.EqualValues(t, cid, blkOut.Cid()) - - row, err = RowFromBlock(blkOut) - assert.NoError(t, err) - - err = row.Verify(root) - assert.NoError(t, err) - } -} - -func TestRowRoundtripGetBlocks(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - square := edstest.RandEDS(t, 16) - root, err := share.NewRoot(square) - require.NoError(t, err) - b := edsBlockstore(square) - client := remoteClient(ctx, t, b) - - set := cid.NewSet() - width := int(square.Width()) - for i := 0; i < width; i++ { - row, err := NewRowFromEDS(1, i, square) - require.NoError(t, err) - set.Add(row.Cid()) - - rowVerifiers.Add(row.RowID, func(row Row) error { - return row.Verify(root) - }) - } - - blks, err := client.GetBlocks(ctx, set.Keys()) - require.NoError(t, err) - - err = set.ForEach(func(c cid.Cid) error { - select { - case blk := <-blks: - assert.True(t, set.Has(blk.Cid())) - - row, err := RowFromBlock(blk) - assert.NoError(t, err) - - err = row.Verify(root) - assert.NoError(t, err) - case <-ctx.Done(): - return ctx.Err() - } - return nil - }) - assert.NoError(t, err) -} - -func TestDataRoundtripGetBlock(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - namespace := sharetest.RandV0Namespace() - sqr, root := edstest.RandEDSWithNamespace(t, namespace, 16) - b := edsBlockstore(sqr) - client := remoteClient(ctx, t, b) - - nds, err := NewDataFromEDS(sqr, 1, namespace) - require.NoError(t, err) - - for _, nd := range nds { - dataVerifiers.Add(nd.DataID, func(data Data) error { - return data.Verify(root) - }) - - cid := nd.Cid() - blkOut, err := client.GetBlock(ctx, cid) - require.NoError(t, err) - assert.EqualValues(t, cid, blkOut.Cid()) - - ndOut, err := DataFromBlock(blkOut) - assert.NoError(t, err) - - err = ndOut.Verify(root) - assert.NoError(t, err) - } -} - -func TestDataRoundtripGetBlocks(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - namespace := sharetest.RandV0Namespace() - sqr, root := edstest.RandEDSWithNamespace(t, namespace, 16) - b := edsBlockstore(sqr) - client := remoteClient(ctx, t, b) - - nds, err := NewDataFromEDS(sqr, 1, namespace) - require.NoError(t, err) - - set := cid.NewSet() - for _, nd := range nds { - set.Add(nd.Cid()) - - dataVerifiers.Add(nd.DataID, func(data Data) error { - return data.Verify(root) - }) - } - - blks, err := client.GetBlocks(ctx, set.Keys()) - require.NoError(t, err) - - err = set.ForEach(func(c cid.Cid) error { - select { - case blk := <-blks: - assert.True(t, set.Has(blk.Cid())) - - smpl, err := DataFromBlock(blk) - assert.NoError(t, err) - - err = smpl.Verify(root) - assert.NoError(t, err) - case <-ctx.Done(): - return ctx.Err() - } - return nil - }) - assert.NoError(t, err) -} +// TODO(@walldiss): those tests works, but wants to imports with edsStore, when dependency is reversed +// - need to rework to test over local blockstore + +//// TestSampleRoundtripGetBlock tests full protocol round trip of: +//// EDS -> Sample -> IPLDBlock -> BlockService -> Bitswap and in reverse. +//func TestSampleRoundtripGetBlock(t *testing.T) { +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) +// defer cancel() +// +// square := edstest.RandEDS(t, 8) +// root, err := share.NewRoot(square) +// require.NoError(t, err) +// +// b := edsBlockstore(ctx, t, square) +// client := remoteClient(ctx, t, b) +// +// width := int(square.Width()) +// for i := 0; i < width*width; i++ { +// smpl, err := NewSampleFromEDS(RowProofType, i, square, 1) // TODO: Col +// require.NoError(t, err) +// +// sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { +// return sample.Verify(root) +// }) +// +// cid := smpl.Cid() +// blkOut, err := client.GetBlock(ctx, cid) +// require.NoError(t, err) +// assert.EqualValues(t, cid, blkOut.Cid()) +// +// smpl, err = SampleFromBlock(blkOut) +// assert.NoError(t, err) +// +// err = smpl.Verify(root) +// assert.NoError(t, err) +// } +//} +// +//// TODO: Debug why is it flaky +//func TestSampleRoundtripGetBlocks(t *testing.T) { +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) +// defer cancel() +// +// square := edstest.RandEDS(t, 8) +// root, err := share.NewRoot(square) +// require.NoError(t, err) +// b := edsBlockstore(ctx, t, square) +// client := remoteClient(ctx, t, b) +// +// set := cid.NewSet() +// width := int(square.Width()) +// for i := 0; i < width*width; i++ { +// smpl, err := NewSampleFromEDS(RowProofType, i, square, 1) // TODO: Col +// require.NoError(t, err) +// set.Add(smpl.Cid()) +// +// sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { +// return sample.Verify(root) +// }) +// } +// +// blks, err := client.GetBlocks(ctx, set.Keys()) +// require.NoError(t, err) +// +// err = set.ForEach(func(c cid.Cid) error { +// select { +// case blk := <-blks: +// assert.True(t, set.Has(blk.Cid())) +// +// smpl, err := SampleFromBlock(blk) +// assert.NoError(t, err) +// +// err = smpl.Verify(root) // bitswap already performed validation and this is only for testing +// assert.NoError(t, err) +// case <-ctx.Done(): +// return ctx.Err() +// } +// return nil +// }) +// assert.NoError(t, err) +//} +// +//func TestRowRoundtripGetBlock(t *testing.T) { +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) +// defer cancel() +// +// square := edstest.RandEDS(t, 16) +// root, err := share.NewRoot(square) +// require.NoError(t, err) +// b := edsBlockstore(ctx, t, square) +// client := remoteClient(ctx, t, b) +// +// width := int(square.Width()) +// for i := 0; i < width; i++ { +// row, err := NewRowFromEDS(1, i, square) +// require.NoError(t, err) +// +// rowVerifiers.Add(row.RowID, func(row Row) error { +// return row.Verify(root) +// }) +// +// cid := row.Cid() +// blkOut, err := client.GetBlock(ctx, cid) +// require.NoError(t, err) +// assert.EqualValues(t, cid, blkOut.Cid()) +// +// row, err = RowFromBlock(blkOut) +// assert.NoError(t, err) +// +// err = row.Verify(root) +// assert.NoError(t, err) +// } +//} +// +//func TestRowRoundtripGetBlocks(t *testing.T) { +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) +// defer cancel() +// +// square := edstest.RandEDS(t, 16) +// root, err := share.NewRoot(square) +// require.NoError(t, err) +// b := edsBlockstore(ctx, t, square) +// client := remoteClient(ctx, t, b) +// +// set := cid.NewSet() +// width := int(square.Width()) +// for i := 0; i < width; i++ { +// row, err := NewRowFromEDS(1, i, square) +// require.NoError(t, err) +// set.Add(row.Cid()) +// +// rowVerifiers.Add(row.RowID, func(row Row) error { +// return row.Verify(root) +// }) +// } +// +// blks, err := client.GetBlocks(ctx, set.Keys()) +// require.NoError(t, err) +// +// err = set.ForEach(func(c cid.Cid) error { +// select { +// case blk := <-blks: +// assert.True(t, set.Has(blk.Cid())) +// +// row, err := RowFromBlock(blk) +// assert.NoError(t, err) +// +// err = row.Verify(root) +// assert.NoError(t, err) +// case <-ctx.Done(): +// return ctx.Err() +// } +// return nil +// }) +// assert.NoError(t, err) +//} +// +//func TestDataRoundtripGetBlock(t *testing.T) { +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) +// defer cancel() +// +// namespace := sharetest.RandV0Namespace() +// square, root := edstest.RandEDSWithNamespace(t, namespace, 64, 16) +// b := edsBlockstore(ctx, t, square) +// client := remoteClient(ctx, t, b) +// +// nds, err := NewDataFromEDS(square, 1, namespace) +// require.NoError(t, err) +// +// for _, nd := range nds { +// dataVerifiers.Add(nd.DataID, func(data Data) error { +// return data.Verify(root) +// }) +// +// cid := nd.Cid() +// blkOut, err := client.GetBlock(ctx, cid) +// require.NoError(t, err) +// assert.EqualValues(t, cid, blkOut.Cid()) +// +// ndOut, err := DataFromBlock(blkOut) +// assert.NoError(t, err) +// +// err = ndOut.Verify(root) +// assert.NoError(t, err) +// } +//} +// +//func TestDataRoundtripGetBlocks(t *testing.T) { +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) +// defer cancel() +// +// namespace := sharetest.RandV0Namespace() +// sqr, root := edstest.RandEDSWithNamespace(t, namespace, 64, 16) +// b := edsBlockstore(ctx, t, sqr) +// client := remoteClient(ctx, t, b) +// +// nds, err := NewDataFromEDS(sqr, 1, namespace) +// require.NoError(t, err) +// +// set := cid.NewSet() +// for _, nd := range nds { +// set.Add(nd.Cid()) +// +// dataVerifiers.Add(nd.DataID, func(data Data) error { +// return data.Verify(root) +// }) +// } +// +// blks, err := client.GetBlocks(ctx, set.Keys()) +// require.NoError(t, err) +// +// err = set.ForEach(func(c cid.Cid) error { +// select { +// case blk := <-blks: +// assert.True(t, set.Has(blk.Cid())) +// +// smpl, err := DataFromBlock(blk) +// assert.NoError(t, err) +// +// err = smpl.Verify(root) +// assert.NoError(t, err) +// case <-ctx.Done(): +// return ctx.Err() +// } +// return nil +// }) +// assert.NoError(t, err) +//} func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstore) exchange.Fetcher { net, err := mocknet.FullMeshLinked(2) From 6ad17910f9439f24971cc0a359a955635e1e42e3 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 7 Feb 2024 16:16:13 +0500 Subject: [PATCH 064/132] handle empty sqaure in store and getters --- share/getters/shrex.go | 7 ++++--- share/getters/store.go | 16 ++++++++++++---- share/store/store.go | 26 +++++++++++++++++++++----- 3 files changed, 37 insertions(+), 12 deletions(-) diff --git a/share/getters/shrex.go b/share/getters/shrex.go index 3b6983e2cd..bdbb9764c3 100644 --- a/share/getters/shrex.go +++ b/share/getters/shrex.go @@ -209,10 +209,11 @@ func (sg *ShrexGetter) GetSharesByNamespace( utils.SetStatusAndEnd(span, err) }() - // verify that the namespace could exist inside the roots before starting network requests + // find rows that contains target namespace dah := header.DAH - roots := ipld.FilterRootByNamespace(dah, namespace) - if len(roots) == 0 { + fromRow, toRow := share.RowRangeForNamespace(dah, namespace) + if fromRow == toRow { + // target namespace is out of bounds of all rows in the EDS return []share.NamespacedRow{}, nil } diff --git a/share/getters/store.go b/share/getters/store.go index ece14c9c0d..deadd140ba 100644 --- a/share/getters/store.go +++ b/share/getters/store.go @@ -82,7 +82,11 @@ func (sg *StoreGetter) GetEDS( utils.SetStatusAndEnd(span, err) }() - file, err := sg.store.GetByHash(ctx, header.DAH.Hash()) + if header.DAH.IsZero() { + return share.EmptyExtendedDataSquare(), nil + } + + file, err := sg.store.GetByHeight(ctx, header.Height()) if errors.Is(err, store.ErrNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound @@ -113,6 +117,13 @@ func (sg *StoreGetter) GetSharesByNamespace( utils.SetStatusAndEnd(span, err) }() + // find rows that contains target namespace + from, to := share.RowRangeForNamespace(header.DAH, namespace) + if from == to { + // target namespace is out of bounds of all rows in the EDS + return share.NamespacedShares{}, nil + } + file, err := sg.store.GetByHeight(ctx, header.Height()) if errors.Is(err, store.ErrNotFound) { // convert error to satisfy getter interface contract @@ -123,9 +134,6 @@ func (sg *StoreGetter) GetSharesByNamespace( } defer utils.CloseAndLog(log, "file", file) - // get all shares in the namespace - from, to := share.RowRangeForNamespace(header.DAH, namespace) - shares = make(share.NamespacedShares, 0, to-from+1) for row := from; row < to; row++ { data, err := file.Data(ctx, namespace, row) diff --git a/share/store/store.go b/share/store/store.go index 90c15bb837..c3f8dd9568 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -20,9 +20,12 @@ import ( var ( log = logging.Logger("share/eds") tracer = otel.Tracer("share/eds") + + emptyFile = &file.MemFile{Eds: share.EmptyExtendedDataSquare()} ) // TODO(@walldiss): +// - index empty files by height // - persist store stats like amount of files, file types, avg file size etc in a file // - handle corrupted files // - maintain in-memory missing files index / bloom-filter to fast return for not stored files. @@ -102,17 +105,17 @@ func (s *Store) Put( defer lock.unlock() // short circuit if file exists - if has, _ := s.hasByHeight(height); has { + if has, _ := s.hasByHash(datahash); has { s.metrics.observePutExist(ctx) - return s.getByHeight(height) + return s.getByHash(datahash) } - if has, _ := s.hasByHash(datahash); has { - log.Errorw("put: file already exists by hash, but not by height", + if has, _ := s.hasByHeight(height); has { + log.Warnw("put: file already exists by height, but not by hash", "height", height, "hash", datahash.String()) s.metrics.observePutExist(ctx) - return s.getByHash(datahash) + return s.getByHeight(height) } path := s.basepath + hashsPath + datahash.String() @@ -140,6 +143,9 @@ func (s *Store) Put( } func (s *Store) GetByHash(ctx context.Context, datahash share.DataHash) (file.EdsFile, error) { + if datahash.IsEmptyRoot() { + return emptyFile, nil + } lock := s.stripLock.byDatahash(datahash) lock.RLock() defer lock.RUnlock() @@ -151,6 +157,10 @@ func (s *Store) GetByHash(ctx context.Context, datahash share.DataHash) (file.Ed } func (s *Store) getByHash(datahash share.DataHash) (file.EdsFile, error) { + if datahash.IsEmptyRoot() { + return emptyFile, nil + } + path := s.basepath + hashsPath + datahash.String() odsFile, err := file.OpenOdsFile(path) if err != nil { @@ -191,6 +201,9 @@ func (s *Store) getByHeight(height uint64) (file.EdsFile, error) { } func (s *Store) HasByHash(ctx context.Context, datahash share.DataHash) (bool, error) { + if datahash.IsEmptyRoot() { + return true, nil + } lock := s.stripLock.byDatahash(datahash) lock.RLock() defer lock.RUnlock() @@ -202,6 +215,9 @@ func (s *Store) HasByHash(ctx context.Context, datahash share.DataHash) (bool, e } func (s *Store) hasByHash(datahash share.DataHash) (bool, error) { + if datahash.IsEmptyRoot() { + return true, nil + } path := s.basepath + hashsPath + datahash.String() return pathExists(path) } From 7d85083c8e4898e67796c561b9226018927d9498 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 7 Feb 2024 16:17:50 +0500 Subject: [PATCH 065/132] refactor shrexNd client to use rowIdx --- share/getters/shrex.go | 5 ++--- share/p2p/shrexnd/client.go | 11 +++++------ 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/share/getters/shrex.go b/share/getters/shrex.go index bdbb9764c3..40b0a8ea02 100644 --- a/share/getters/shrex.go +++ b/share/getters/shrex.go @@ -16,7 +16,6 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/p2p" "github.com/celestiaorg/celestia-node/share/p2p/peers" "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" @@ -159,7 +158,7 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader reqStart := time.Now() reqCtx, cancel := ctxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) - eds, getErr := sg.edsClient.RequestEDS(reqCtx, header.Height(), header.DAH.Hash(), peer) + eds, getErr := sg.edsClient.RequestEDS(reqCtx, header, peer) cancel() switch { case getErr == nil: @@ -238,7 +237,7 @@ func (sg *ShrexGetter) GetSharesByNamespace( reqStart := time.Now() reqCtx, cancel := ctxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) - nd, getErr := sg.ndClient.RequestND(reqCtx, header.Height(), dah, namespace, peer) + nd, getErr := sg.ndClient.RequestND(reqCtx, header.Height(), fromRow, toRow, namespace, peer) cancel() switch { case getErr == nil: diff --git a/share/p2p/shrexnd/client.go b/share/p2p/shrexnd/client.go index 731128c938..5c1b5407ae 100644 --- a/share/p2p/shrexnd/client.go +++ b/share/p2p/shrexnd/client.go @@ -49,7 +49,7 @@ func NewClient(params *Parameters, host host.Host) (*Client, error) { func (c *Client) RequestND( ctx context.Context, height uint64, - root *share.Root, + fromRow, toRow int, namespace share.Namespace, peer peer.ID, ) (share.NamespacedShares, error) { @@ -57,7 +57,7 @@ func (c *Client) RequestND( return nil, err } - shares, err := c.doRequest(ctx, height, root, namespace, peer) + shares, err := c.doRequest(ctx, height, fromRow, toRow, namespace, peer) if err == nil { return shares, nil } @@ -83,7 +83,7 @@ func (c *Client) RequestND( func (c *Client) doRequest( ctx context.Context, height uint64, - root *share.Root, + fromRow, toRow int, namespace share.Namespace, peerID peer.ID, ) (share.NamespacedShares, error) { @@ -95,12 +95,11 @@ func (c *Client) doRequest( c.setStreamDeadlines(ctx, stream) - from, to := share.RowRangeForNamespace(root, namespace) req := &pb.GetSharesByNamespaceRequest{ Height: height, Namespace: namespace, - FromRow: uint32(from), - ToRow: uint32(to), + FromRow: uint32(fromRow), + ToRow: uint32(toRow), } _, err = serde.Write(stream, req) From 8bde751817262fef08ce504103210bfbb9f6aa7d Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 7 Feb 2024 16:20:50 +0500 Subject: [PATCH 066/132] limit reader by known header instead of the one send over the wire --- share/getters/getter_test.go | 6 ++--- share/p2p/shrexeds/client.go | 49 +++++++++++++++++++++++++----------- share/store/file/ods_file.go | 2 +- share/store/file/square.go | 39 ++++++++-------------------- 4 files changed, 48 insertions(+), 48 deletions(-) diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go index fe37b79685..99e27f9675 100644 --- a/share/getters/getter_test.go +++ b/share/getters/getter_test.go @@ -8,7 +8,6 @@ import ( "sync/atomic" "testing" - "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/rsmt2d" @@ -69,10 +68,9 @@ func TestStoreGetter(t *testing.T) { assert.True(t, eds.Equals(retrievedEDS)) // root not found - emptyRoot := da.MinDataAvailabilityHeader() - eh.DAH = &emptyRoot + eh.RawHeader.Height = 666 _, err = sg.GetEDS(ctx, eh) - require.ErrorIs(t, err, share.ErrNotFound) + require.ErrorIs(t, err, share.ErrNotFound, err) }) t.Run("GetSharesByNamespace", func(t *testing.T) { diff --git a/share/p2p/shrexeds/client.go b/share/p2p/shrexeds/client.go index 4c5cc0ba56..be97ee9558 100644 --- a/share/p2p/shrexeds/client.go +++ b/share/p2p/shrexeds/client.go @@ -1,9 +1,11 @@ package shrexeds import ( + "bytes" "context" "errors" "fmt" + "github.com/celestiaorg/celestia-node/header" "io" "net" "time" @@ -47,16 +49,15 @@ func NewClient(params *Parameters, host host.Host) (*Client, error) { // RequestEDS requests the ODS from the given peers and returns the EDS upon success. func (c *Client) RequestEDS( ctx context.Context, - height uint64, - dataHash share.DataHash, + header *header.ExtendedHeader, peer peer.ID, ) (*rsmt2d.ExtendedDataSquare, error) { - eds, err := c.doRequest(ctx, height, dataHash, peer) + eds, err := c.doRequest(ctx, header, peer) if err == nil { return eds, nil } log.Debugw("client: eds request to peer failed", - "height", height, + "height", header.Height(), "peer", peer.String(), "error", err) if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { @@ -75,7 +76,7 @@ func (c *Client) RequestEDS( if err != p2p.ErrNotFound { log.Warnw("client: eds request to peer failed", "peer", peer.String(), - "height", height, + "height", header.Height(), "err", err) } @@ -84,30 +85,29 @@ func (c *Client) RequestEDS( func (c *Client) doRequest( ctx context.Context, - height uint64, - dataHash share.DataHash, + header *header.ExtendedHeader, to peer.ID, ) (*rsmt2d.ExtendedDataSquare, error) { streamOpenCtx, cancel := context.WithTimeout(ctx, c.params.ServerReadTimeout) defer cancel() stream, err := c.host.NewStream(streamOpenCtx, to, c.protocolID) if err != nil { - return nil, fmt.Errorf("failed to open stream: %w", err) + return nil, fmt.Errorf("open stream: %w", err) } defer stream.Close() c.setStreamDeadlines(ctx, stream) - req := &pb.EDSRequest{Height: height} + req := &pb.EDSRequest{Height: header.Height()} // request ODS log.Debugw("client: requesting ods", - "height", height, + "height", header.Height(), "peer", to.String()) _, err = serde.Write(stream, req) if err != nil { stream.Reset() //nolint:errcheck - return nil, fmt.Errorf("failed to write request to stream: %w", err) + return nil, fmt.Errorf("write request to stream: %w", err) } err = stream.CloseWrite() if err != nil { @@ -128,7 +128,7 @@ func (c *Client) doRequest( return nil, p2p.ErrNotFound } stream.Reset() //nolint:errcheck - return nil, fmt.Errorf("failed to read status from stream: %w", err) + return nil, fmt.Errorf("read status from stream: %w", err) } switch resp.Status { @@ -136,9 +136,9 @@ func (c *Client) doRequest( // reset stream deadlines to original values, since read deadline was changed during status read c.setStreamDeadlines(ctx, stream) // use header and ODS bytes to construct EDS and verify it against dataHash - eds, err := file.ReadEds(ctx, stream, dataHash) + eds, err := readEds(ctx, stream, header) if err != nil { - return nil, fmt.Errorf("failed to read eds from ods bytes: %w", err) + return nil, fmt.Errorf("read eds from stream: %w", err) } c.metrics.ObserveRequests(ctx, 1, p2p.StatusSuccess) return eds, nil @@ -156,6 +156,27 @@ func (c *Client) doRequest( } } +func readEds(ctx context.Context, stream network.Stream, eh *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + eds, err := file.ReadEds(ctx, stream, len(eh.DAH.RowRoots)) + if err != nil { + return nil, fmt.Errorf("failed to read eds from ods bytes: %w", err) + } + + // verify that the EDS hash matches the expected hash + newDah, err := share.NewRoot(eds) + if err != nil { + return nil, err + } + if !bytes.Equal(newDah.Hash(), eh.Hash()) { + return nil, fmt.Errorf( + "content integrity mismatch: imported root %s doesn't match expected root %s", + share.DataHash(newDah.Hash()), + eh.Hash(), + ) + } + return eds, nil +} + func (c *Client) setStreamDeadlines(ctx context.Context, stream network.Stream) { // set read/write deadline to use context deadline if it exists if dl, ok := ctx.Deadline(); ok { diff --git a/share/store/file/ods_file.go b/share/store/file/ods_file.go index 0289c92ba9..f23e832649 100644 --- a/share/store/file/ods_file.go +++ b/share/store/file/ods_file.go @@ -163,7 +163,7 @@ func (f *OdsFile) readOds() error { return fmt.Errorf("discarding header: %w", err) } - square, err := readShares(f.hdr, f.fl) + square, err := readShares(f.hdr.ShareSize(), f.Size(), f.fl) if err != nil { return fmt.Errorf("reading ods: %w", err) } diff --git a/share/store/file/square.go b/share/store/file/square.go index be9f091590..05e4110982 100644 --- a/share/store/file/square.go +++ b/share/store/file/square.go @@ -16,13 +16,9 @@ import ( type square [][]share.Share -func ReadEds(_ context.Context, r io.Reader, root share.DataHash) (*rsmt2d.ExtendedDataSquare, error) { - h, err := ReadHeader(r) - if err != nil { - return nil, fmt.Errorf("reading header: %w", err) - } - - square, err := readShares(h, r) +// ReadEds reads an EDS from the reader and returns it. +func ReadEds(_ context.Context, r io.Reader, size int) (*rsmt2d.ExtendedDataSquare, error) { + square, err := readShares(share.Size, size, r) if err != nil { return nil, fmt.Errorf("reading shares: %w", err) } @@ -31,25 +27,16 @@ func ReadEds(_ context.Context, r io.Reader, root share.DataHash) (*rsmt2d.Exten if err != nil { return nil, fmt.Errorf("computing EDS: %w", err) } - - newDah, err := share.NewRoot(eds) - if err != nil { - return nil, err - } - if !bytes.Equal(newDah.Hash(), root) { - return nil, fmt.Errorf( - "share: content integrity mismatch: imported root %s doesn't match expected root %s", - share.DataHash(newDah.Hash()), - root, - ) - } return eds, nil } -func readShares(hdr *Header, reader io.Reader) (square, error) { - shrLn := int(hdr.shareSize) - odsLn := int(hdr.squareSize) / 2 +// readShares reads shares from the reader and returns a square. It assumes that the reader is +// positioned at the beginning of the shares. It knows the size of the shares and the size of the +// square, so reads from reader are limited to exactly the amount of data required. +func readShares(shareSize, squareSize int, reader io.Reader) (square, error) { + odsLn := squareSize / 2 + // get pre-allocated square and buffer from memPools square := memPools.get(odsLn).square() buf := memPools.get(odsLn).getHalfAxis() defer memPools.get(odsLn).putHalfAxis(buf) @@ -60,7 +47,7 @@ func readShares(hdr *Header, reader io.Reader) (square, error) { } for j := 0; j < odsLn; j++ { - copy(square[i][j], buf[j*shrLn:(j+1)*shrLn]) + copy(square[i][j], buf[j*shareSize:(j+1)*shareSize]) } } @@ -121,12 +108,6 @@ func (s square) Reader(hdr *Header) (io.Reader, error) { buf: bytes.NewBuffer(make([]byte, 0, int(hdr.shareSize))), } - // write header to the buffer - _, err := hdr.WriteTo(odsR.buf) - if err != nil { - return nil, fmt.Errorf("writing header: %w", err) - } - return odsR, nil } From 85e069b704a3a9cf7329bfd865b0e1ba501f3740 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 7 Feb 2024 16:22:10 +0500 Subject: [PATCH 067/132] add reader tests to all types of files --- share/store/file/cache_file_test.go | 4 +++ share/store/file/file_test.go | 25 +++++++++++++++++++ share/store/file/mem_file_test.go | 4 +++ share/store/file/ods_file_test.go | 38 ++++------------------------- 4 files changed, 38 insertions(+), 33 deletions(-) diff --git a/share/store/file/cache_file_test.go b/share/store/file/cache_file_test.go index a569655b48..582a7d04c9 100644 --- a/share/store/file/cache_file_test.go +++ b/share/store/file/cache_file_test.go @@ -32,4 +32,8 @@ func TestCacheFile(t *testing.T) { t.Run("EDS", func(t *testing.T) { testFileEds(t, newFile, size) }) + + t.Run("ReadOds", func(t *testing.T) { + testFileReader(t, newFile, size) + }) } diff --git a/share/store/file/file_test.go b/share/store/file/file_test.go index adc2117873..29b8310102 100644 --- a/share/store/file/file_test.go +++ b/share/store/file/file_test.go @@ -7,6 +7,7 @@ import ( mrand "math/rand" "strconv" "testing" + "time" "github.com/stretchr/testify/require" @@ -105,6 +106,30 @@ func testFileEds(t *testing.T, createFile createFile, size int) { require.True(t, eds.Equals(eds2)) } +func testFileReader(t *testing.T, createFile createFile, size int) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + eds := edstest.RandEDS(t, size) + f := createFile(eds) + + reader, err := f.Reader() + require.NoError(t, err) + + streamed, err := ReadEds(ctx, reader, size) + require.NoError(t, err) + require.True(t, eds.Equals(streamed)) + + // verify that the reader represented by file can be read from + // multiple times, without exhausting the underlying reader. + reader2, err := f.Reader() + require.NoError(t, err) + + streamed2, err := ReadEds(ctx, reader2, size) + require.NoError(t, err) + require.True(t, eds.Equals(streamed2)) +} + func benchGetAxisFromFile(b *testing.B, newFile func(size int) EdsFile, minSize, maxSize int) { for size := minSize; size <= maxSize; size *= 2 { f := newFile(size) diff --git a/share/store/file/mem_file_test.go b/share/store/file/mem_file_test.go index f02c02a883..f27c8ab8b1 100644 --- a/share/store/file/mem_file_test.go +++ b/share/store/file/mem_file_test.go @@ -27,4 +27,8 @@ func TestMemFile(t *testing.T) { t.Run("EDS", func(t *testing.T) { testFileEds(t, newFile, size) }) + + t.Run("ReadOds", func(t *testing.T) { + testFileReader(t, newFile, size) + }) } diff --git a/share/store/file/ods_file_test.go b/share/store/file/ods_file_test.go index 0e0f63d711..5c689e6355 100644 --- a/share/store/file/ods_file_test.go +++ b/share/store/file/ods_file_test.go @@ -2,13 +2,10 @@ package file import ( "context" - "testing" - "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "testing" - "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share/eds/edstest" @@ -51,6 +48,10 @@ func TestOdsFile(t *testing.T) { t.Run("EDS", func(t *testing.T) { testFileEds(t, createOdsFile, size) }) + + t.Run("ReadOds", func(t *testing.T) { + testFileReader(t, createOdsFile, size) + }) } func TestReadOdsFile(t *testing.T) { @@ -69,35 +70,6 @@ func TestReadOdsFile(t *testing.T) { } } -func TestFileStreaming(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - eds := edstest.RandEDS(t, 8) - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(t, err) - - path := t.TempDir() + "/testfile" - f, err := CreateOdsFile(path, 1, []byte{}, eds) - require.NoError(t, err) - - reader, err := f.Reader() - require.NoError(t, err) - - streamed, err := ReadEds(ctx, reader, dah.Hash()) - require.NoError(t, err) - require.True(t, eds.Equals(streamed)) - - // verify that the reader represented by file can be read from - // multiple times, without exhausting the underlying reader. - reader2, err := f.Reader() - require.NoError(t, err) - - streamed2, err := ReadEds(ctx, reader2, dah.Hash()) - require.NoError(t, err) - require.True(t, eds.Equals(streamed2)) -} - // Leopard full encode // BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 418206 2545 ns/op // BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 4968 227265 ns/op From 70dddf60d8950420372ced9d8426df0efd334a42 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 7 Feb 2024 16:23:10 +0500 Subject: [PATCH 068/132] add proper prealloc for shwamp data request builder --- share/shwap/getter.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/share/shwap/getter.go b/share/shwap/getter.go index 69ea90c655..370d140b56 100644 --- a/share/shwap/getter.go +++ b/share/shwap/getter.go @@ -196,12 +196,13 @@ func (g *Getter) GetSharesByNamespace( return nil, err } - var dids []DataID //nolint:prealloc// we don't know how many rows with needed namespace there are - for rowIdx, rowRoot := range hdr.DAH.RowRoots { - if ns.IsOutsideRange(rowRoot, rowRoot) { - continue - } + from, to := share.RowRangeForNamespace(hdr.DAH, ns) + if from == to { + return share.NamespacedShares{}, nil + } + dids := make([]DataID, 0, to-from) + for rowIdx := from; rowIdx < to; rowIdx++ { did, err := NewDataID(hdr.Height(), uint16(rowIdx), ns, hdr.DAH) if err != nil { return nil, err @@ -209,9 +210,6 @@ func (g *Getter) GetSharesByNamespace( dids = append(dids, did) } - if len(dids) == 0 { - return share.NamespacedShares{}, nil - } datas := make([]Data, len(dids)) verifyFn := func(d Data) error { From 0c8bc88c17a08f9f38cb6f8a8f0ff9c6c3600ae9 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 7 Feb 2024 16:46:36 +0500 Subject: [PATCH 069/132] fix offset calc for file streaming --- share/store/file/file_test.go | 8 ++++---- share/store/file/square.go | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/share/store/file/file_test.go b/share/store/file/file_test.go index 29b8310102..79c19bd181 100644 --- a/share/store/file/file_test.go +++ b/share/store/file/file_test.go @@ -106,17 +106,17 @@ func testFileEds(t *testing.T, createFile createFile, size int) { require.True(t, eds.Equals(eds2)) } -func testFileReader(t *testing.T, createFile createFile, size int) { +func testFileReader(t *testing.T, createFile createFile, odsSize int) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - eds := edstest.RandEDS(t, size) + eds := edstest.RandEDS(t, odsSize) f := createFile(eds) reader, err := f.Reader() require.NoError(t, err) - streamed, err := ReadEds(ctx, reader, size) + streamed, err := ReadEds(ctx, reader, f.Size()) require.NoError(t, err) require.True(t, eds.Equals(streamed)) @@ -125,7 +125,7 @@ func testFileReader(t *testing.T, createFile createFile, size int) { reader2, err := f.Reader() require.NoError(t, err) - streamed2, err := ReadEds(ctx, reader2, size) + streamed2, err := ReadEds(ctx, reader2, f.Size()) require.NoError(t, err) require.True(t, eds.Equals(streamed2)) } diff --git a/share/store/file/square.go b/share/store/file/square.go index 05e4110982..ad3e58ff62 100644 --- a/share/store/file/square.go +++ b/share/store/file/square.go @@ -17,8 +17,8 @@ import ( type square [][]share.Share // ReadEds reads an EDS from the reader and returns it. -func ReadEds(_ context.Context, r io.Reader, size int) (*rsmt2d.ExtendedDataSquare, error) { - square, err := readShares(share.Size, size, r) +func ReadEds(_ context.Context, r io.Reader, edsSize int) (*rsmt2d.ExtendedDataSquare, error) { + square, err := readShares(share.Size, edsSize, r) if err != nil { return nil, fmt.Errorf("reading shares: %w", err) } @@ -33,8 +33,8 @@ func ReadEds(_ context.Context, r io.Reader, size int) (*rsmt2d.ExtendedDataSqua // readShares reads shares from the reader and returns a square. It assumes that the reader is // positioned at the beginning of the shares. It knows the size of the shares and the size of the // square, so reads from reader are limited to exactly the amount of data required. -func readShares(shareSize, squareSize int, reader io.Reader) (square, error) { - odsLn := squareSize / 2 +func readShares(shareSize, edsSize int, reader io.Reader) (square, error) { + odsLn := edsSize / 2 // get pre-allocated square and buffer from memPools square := memPools.get(odsLn).square() From f3186f1bdd76112464e51d194767664f891f2898 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 7 Feb 2024 16:50:47 +0500 Subject: [PATCH 070/132] add streaming for mem file --- share/store/file/mem_file.go | 18 ++++++++++++------ share/store/file/ods_file.go | 2 +- share/store/file/square.go | 4 ++-- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/share/store/file/mem_file.go b/share/store/file/mem_file.go index 213bbb7555..5e15a6ce2b 100644 --- a/share/store/file/mem_file.go +++ b/share/store/file/mem_file.go @@ -1,7 +1,6 @@ package file import ( - "bytes" "context" "io" @@ -26,12 +25,19 @@ func (f *MemFile) Close() error { } func (f *MemFile) Reader() (io.Reader, error) { - bs, err := f.Eds.MarshalJSON() - if err != nil { - return nil, err - } + return f.readOds().Reader() +} - return bytes.NewReader(bs), nil +func (f *MemFile) readOds() square { + odsLn := int(f.Eds.Width() / 2) + s := make(square, odsLn) + for y := 0; y < odsLn; y++ { + s[y] = make([]share.Share, odsLn) + for x := 0; x < odsLn; x++ { + s[y][x] = f.Eds.GetCell(uint(y), uint(x)) + } + } + return s } func (f *MemFile) Height() uint64 { diff --git a/share/store/file/ods_file.go b/share/store/file/ods_file.go index f23e832649..42aed35115 100644 --- a/share/store/file/ods_file.go +++ b/share/store/file/ods_file.go @@ -116,7 +116,7 @@ func (f *OdsFile) Reader() (io.Reader, error) { if err != nil { return nil, fmt.Errorf("reading ods: %w", err) } - return f.ods.Reader(f.hdr) + return f.ods.Reader() } func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { diff --git a/share/store/file/square.go b/share/store/file/square.go index ad3e58ff62..ea5c8605c4 100644 --- a/share/store/file/square.go +++ b/share/store/file/square.go @@ -97,7 +97,7 @@ func (s square) eds() (*rsmt2d.ExtendedDataSquare, error) { return rsmt2d.ComputeExtendedDataSquare(shrs, share.DefaultRSMT2DCodec(), treeFn) } -func (s square) Reader(hdr *Header) (io.Reader, error) { +func (s square) Reader() (io.Reader, error) { if s == nil { return nil, fmt.Errorf("ods file not cached") } @@ -105,7 +105,7 @@ func (s square) Reader(hdr *Header) (io.Reader, error) { odsR := &bufferedODSReader{ square: s, total: s.size() * s.size(), - buf: bytes.NewBuffer(make([]byte, 0, int(hdr.shareSize))), + buf: new(bytes.Buffer), } return odsR, nil From f1556a49fbafccb20e9a505e7ec17a598262e5cd Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 7 Feb 2024 18:37:08 +0500 Subject: [PATCH 071/132] add support for empty root in getters and store --- share/getters/getter_test.go | 15 +++++ share/getters/shrex_test.go | 18 +++++- share/p2p/shrexeds/client.go | 4 +- share/shwap/getter.go | 17 ++++++ share/shwap/getter_test.go | 35 +++++++++++ share/store/store.go | 115 +++++++++++++++++++++++++++++++---- share/store/store_test.go | 69 +++++++++++++++++++++ 7 files changed, 257 insertions(+), 16 deletions(-) diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go index 99e27f9675..b0bc2cb774 100644 --- a/share/getters/getter_test.go +++ b/share/getters/getter_test.go @@ -2,6 +2,7 @@ package getters import ( "context" + "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-node/share/store" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -73,6 +74,20 @@ func TestStoreGetter(t *testing.T) { require.ErrorIs(t, err, share.ErrNotFound, err) }) + t.Run("Get empty EDS", func(t *testing.T) { + // empty root + emptyRoot := da.MinDataAvailabilityHeader() + eh := headertest.RandExtendedHeaderWithRoot(t, &emptyRoot) + f, err := edsStore.Put(ctx, eh.DAH.Hash(), eh.Height(), nil) + require.NoError(t, err) + require.NoError(t, f.Close()) + + eds, err := sg.GetEDS(ctx, eh) + require.NoError(t, err) + dah, err := share.NewRoot(eds) + require.True(t, share.DataHash(dah.Hash()).IsEmptyRoot()) + }) + t.Run("GetSharesByNamespace", func(t *testing.T) { eds, namespace, eh := randomEDSWithDoubledNamespace(t, 4) height := height.Add(1) diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go index 36fea496fd..54c5d31486 100644 --- a/share/getters/shrex_test.go +++ b/share/getters/shrex_test.go @@ -2,6 +2,7 @@ package getters import ( "context" + "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-node/share/store" "github.com/tendermint/tendermint/libs/rand" "sync/atomic" @@ -111,7 +112,7 @@ func TestShrexGetter(t *testing.T) { f, err := edsStore.Put(ctx, dah.Hash(), height, eds) require.NoError(t, err) - f.Close() + require.NoError(t, f.Close()) peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), Height: height, @@ -149,7 +150,7 @@ func TestShrexGetter(t *testing.T) { f, err := edsStore.Put(ctx, dah.Hash(), height, eds) require.NoError(t, err) - defer f.Close() + require.NoError(t, f.Close()) peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), Height: height, @@ -179,7 +180,7 @@ func TestShrexGetter(t *testing.T) { f, err := edsStore.Put(ctx, dah.Hash(), height, eds) require.NoError(t, err) - defer f.Close() + require.NoError(t, f.Close()) peerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ DataHash: dah.Hash(), Height: height, @@ -190,6 +191,17 @@ func TestShrexGetter(t *testing.T) { require.Equal(t, eds.Flattened(), got.Flattened()) }) + t.Run("EDS get empty", func(t *testing.T) { + // empty root + emptyRoot := da.MinDataAvailabilityHeader() + eh := headertest.RandExtendedHeaderWithRoot(t, &emptyRoot) + + eds, err := getter.GetEDS(ctx, eh) + require.NoError(t, err) + dah, err := share.NewRoot(eds) + require.True(t, share.DataHash(dah.Hash()).IsEmptyRoot()) + }) + t.Run("EDS_ctx_deadline", func(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) diff --git a/share/p2p/shrexeds/client.go b/share/p2p/shrexeds/client.go index be97ee9558..f10f25a859 100644 --- a/share/p2p/shrexeds/client.go +++ b/share/p2p/shrexeds/client.go @@ -167,11 +167,11 @@ func readEds(ctx context.Context, stream network.Stream, eh *header.ExtendedHead if err != nil { return nil, err } - if !bytes.Equal(newDah.Hash(), eh.Hash()) { + if !bytes.Equal(newDah.Hash(), eh.DAH.Hash()) { return nil, fmt.Errorf( "content integrity mismatch: imported root %s doesn't match expected root %s", share.DataHash(newDah.Hash()), - eh.Hash(), + eh.DAH.Hash(), ) } return eds, nil diff --git a/share/shwap/getter.go b/share/shwap/getter.go index 370d140b56..98ff74becc 100644 --- a/share/shwap/getter.go +++ b/share/shwap/getter.go @@ -46,6 +46,19 @@ func (g *Getter) GetShare(ctx context.Context, header *header.ExtendedHeader, ro // Automatically caches them on the Blockstore. // Guarantee that the returned shares are in the same order as shrIdxs. func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, smplIdxs ...int) ([]share.Share, error) { + if len(smplIdxs) == 0 { + return nil, nil + } + + if hdr.DAH.Equals(share.EmptyRoot()) { + shares := make([]share.Share, len(smplIdxs)) + for _, idx := range smplIdxs { + x, y := uint(smplIdxs[idx]/len(hdr.DAH.RowRoots)), uint(smplIdxs[idx]%len(hdr.DAH.RowRoots)) + shares[idx] = share.EmptyExtendedDataSquare().GetCell(x, y) + } + return shares, nil + } + sids := make([]SampleID, len(smplIdxs)) for i, shrIdx := range smplIdxs { sid, err := NewSampleID(hdr.Height(), shrIdx, hdr.DAH) @@ -120,6 +133,10 @@ func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, smpl // GetEDS // TODO(@Wondertan): Consider requesting randomized rows instead of ODS only func (g *Getter) GetEDS(ctx context.Context, hdr *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + if hdr.DAH.Equals(share.EmptyRoot()) { + return share.EmptyExtendedDataSquare(), nil + } + sqrLn := len(hdr.DAH.RowRoots) rids := make([]RowID, sqrLn/2) for i := 0; i < sqrLn/2; i++ { diff --git a/share/shwap/getter_test.go b/share/shwap/getter_test.go index b3223387f5..88d72f0112 100644 --- a/share/shwap/getter_test.go +++ b/share/shwap/getter_test.go @@ -4,6 +4,8 @@ import ( "bytes" "context" "fmt" + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share/shwap" "github.com/celestiaorg/celestia-node/share/store" ds_sync "github.com/ipfs/go-datastore/sync" @@ -57,6 +59,24 @@ func TestGetter(t *testing.T) { } }) + t.Run("GetShares from empty", func(t *testing.T) { + emptyRoot := da.MinDataAvailabilityHeader() + eh := headertest.RandExtendedHeaderWithRoot(t, &emptyRoot) + + idxs := []int{0, 1, 2, 3} + square := share.EmptyExtendedDataSquare() + shrs, err := get.GetShares(ctx, eh, idxs...) + assert.NoError(t, err) + + for i, shrs := range shrs { + idx := idxs[i] + x, y := uint(idx)/square.Width(), uint(idx)%square.Width() + cell := square.GetCell(x, y) + ok := bytes.Equal(cell, shrs) + require.True(t, ok) + } + }) + t.Run("GetEDS", func(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) t.Cleanup(cancel) @@ -69,6 +89,21 @@ func TestGetter(t *testing.T) { assert.True(t, ok) }) + t.Run("GetEDS empty", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + t.Cleanup(cancel) + + emptyRoot := da.MinDataAvailabilityHeader() + eh := headertest.RandExtendedHeaderWithRoot(t, &emptyRoot) + + eds, err := get.GetEDS(ctx, eh) + assert.NoError(t, err) + assert.NotNil(t, eds) + + dah, err := share.NewRoot(eds) + require.True(t, share.DataHash(dah.Hash()).IsEmptyRoot()) + }) + t.Run("GetSharesByNamespace", func(t *testing.T) { nshrs, err := get.GetSharesByNamespace(ctx, hdr, ns) assert.NoError(t, err) diff --git a/share/store/store.go b/share/store/store.go index c3f8dd9568..66d597f4a3 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -2,9 +2,13 @@ package store import ( "context" + "encoding/gob" "errors" "fmt" + "github.com/celestiaorg/celestia-node/libs/utils" + "io" "os" + "sync" "time" logging "github.com/ipfs/go-log/v2" @@ -32,8 +36,9 @@ var ( // - lock store folder const ( - hashsPath = "/blocks/" - heightsPath = "/heights/" + hashsPath = "/blocks/" + heightsPath = "/heights/" + emptyHeights = "/empty_heights" defaultDirPerm = 0755 ) @@ -45,15 +50,15 @@ var ErrNotFound = errors.New("eds not found in store") // blockstore interface implementation to achieve access. The main use-case is randomized sampling // over the whole chain of EDS block data and getting data by namespace. type Store struct { - cancel context.CancelFunc - + // basepath is the root directory of the store basepath string - // cache is used to cache recent blocks and blocks that are accessed frequently cache *cache.DoubleCache - // stripedLocks is used to synchronize parallel operations stripLock *striplock + // emptyHeights stores the heights of empty files + emptyHeights map[uint64]struct{} + emptyHeightsLock sync.RWMutex metrics *metrics } @@ -74,6 +79,11 @@ func NewStore(params *Parameters, basePath string) (*Store, error) { return nil, fmt.Errorf("ensure blocks folder: %w", err) } + // ensure empty heights file + if err := ensureFile(basePath + emptyHeights); err != nil { + return nil, fmt.Errorf("ensure empty heights file: %w", err) + } + recentBlocksCache, err := cache.NewFileCache("recent", params.RecentBlocksCacheSize) if err != nil { return nil, fmt.Errorf("failed to create recent blocks cache: %w", err) @@ -84,15 +94,24 @@ func NewStore(params *Parameters, basePath string) (*Store, error) { return nil, fmt.Errorf("failed to create blockstore cache: %w", err) } + emptyHeights, err := loadEmptyHeights(basePath) + if err != nil { + return nil, fmt.Errorf("loading empty heights: %w", err) + } + store := &Store{ - basepath: basePath, - cache: cache.NewDoubleCache(recentBlocksCache, blockstoreCache), - stripLock: newStripLock(1024), - //metrics: newMetrics(), + basepath: basePath, + cache: cache.NewDoubleCache(recentBlocksCache, blockstoreCache), + stripLock: newStripLock(1024), + emptyHeights: emptyHeights, } return store, nil } +func (s *Store) Close() error { + return s.storeEmptyHeights() +} + func (s *Store) Put( ctx context.Context, datahash share.DataHash, @@ -104,6 +123,11 @@ func (s *Store) Put( lock.lock() defer lock.unlock() + if datahash.IsEmptyRoot() { + s.addEmptyHeight(height) + return emptyFile, nil + } + // short circuit if file exists if has, _ := s.hasByHash(datahash); has { s.metrics.observePutExist(ctx) @@ -184,6 +208,10 @@ func (s *Store) GetByHeight(ctx context.Context, height uint64) (file.EdsFile, e } func (s *Store) getByHeight(height uint64) (file.EdsFile, error) { + if s.isEmptyHeight(height) { + return emptyFile, nil + } + f, err := s.cache.Get(height) if err == nil { return f, nil @@ -234,6 +262,10 @@ func (s *Store) HasByHeight(ctx context.Context, height uint64) (bool, error) { } func (s *Store) hasByHeight(height uint64) (bool, error) { + if s.isEmptyHeight(height) { + return true, nil + } + _, err := s.cache.Get(height) if err == nil { return true, nil @@ -309,7 +341,7 @@ func ensureFolder(path string) error { return nil } if err != nil { - return fmt.Errorf("checking blocks dir: %w", err) + return fmt.Errorf("checking dir: %w", err) } if !info.IsDir() { return errors.New("expected dir, got a file") @@ -317,6 +349,24 @@ func ensureFolder(path string) error { return nil } +func ensureFile(path string) error { + info, err := os.Stat(path) + if os.IsNotExist(err) { + file, err := os.Create(path) + if err != nil { + return fmt.Errorf("creating file: %w", err) + } + return file.Close() + } + if err != nil { + return fmt.Errorf("checking file: %w", err) + } + if info.IsDir() { + return errors.New("expected file, got a dir") + } + return nil +} + func pathExists(path string) (bool, error) { _, err := os.Stat(path) if err != nil { @@ -327,3 +377,46 @@ func pathExists(path string) (bool, error) { } return true, nil } + +func (s *Store) storeEmptyHeights() error { + file, err := os.OpenFile(s.basepath+emptyHeights, os.O_WRONLY, os.ModePerm) + if err != nil { + return fmt.Errorf("opening empty heights file: %w", err) + } + defer utils.CloseAndLog(log, "empty heights file", file) + + encoder := gob.NewEncoder(file) + if err := encoder.Encode(s.emptyHeights); err != nil { + return fmt.Errorf("encoding empty heights: %w", err) + } + + return nil +} + +func loadEmptyHeights(basepath string) (map[uint64]struct{}, error) { + file, err := os.Open(basepath + emptyHeights) + if err != nil { + return nil, fmt.Errorf("opening empty heights file: %w", err) + } + defer utils.CloseAndLog(log, "empty heights file", file) + + emptyHeights := make(map[uint64]struct{}) + gob.NewDecoder(file).Decode(&emptyHeights) + if err != nil && !errors.Is(err, io.EOF) { + return nil, fmt.Errorf("decoding empty heights file: %w", err) + } + return emptyHeights, nil +} + +func (s *Store) isEmptyHeight(height uint64) bool { + s.emptyHeightsLock.RLock() + defer s.emptyHeightsLock.RUnlock() + _, ok := s.emptyHeights[height] + return ok +} + +func (s *Store) addEmptyHeight(height uint64) { + s.emptyHeightsLock.Lock() + defer s.emptyHeightsLock.Unlock() + s.emptyHeights[height] = struct{}{} +} diff --git a/share/store/store_test.go b/share/store/store_test.go index f278103f94..e704763ed8 100644 --- a/share/store/store_test.go +++ b/share/store/store_test.go @@ -169,6 +169,75 @@ func TestEDSStore(t *testing.T) { require.NoError(t, err) require.False(t, has) }) + + t.Run("empty EDS returned by hash", func(t *testing.T) { + eds := share.EmptyExtendedDataSquare() + dah, err := share.NewRoot(eds) + require.NoError(t, err) + + // assert that the empty file exists + has, err := edsStore.HasByHash(ctx, dah.Hash()) + require.NoError(t, err) + require.True(t, has) + + // assert that the empty file is, in fact, empty + f, err := edsStore.GetByHash(ctx, dah.Hash()) + require.NoError(t, err) + require.True(t, f.DataHash().IsEmptyRoot()) + }) + + t.Run("empty EDS returned by height", func(t *testing.T) { + eds := share.EmptyExtendedDataSquare() + dah, err := share.NewRoot(eds) + require.NoError(t, err) + height := height.Add(1) + + // assert that the empty file exists + has, err := edsStore.HasByHeight(ctx, height) + require.NoError(t, err) + require.False(t, has) + + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + require.True(t, f.DataHash().IsEmptyRoot()) + require.NoError(t, f.Close()) + + // assert that the empty file can be accessed by height + f, err = edsStore.GetByHeight(ctx, height) + require.NoError(t, err) + require.True(t, f.DataHash().IsEmptyRoot()) + }) + + t.Run("empty EDS are persisted", func(t *testing.T) { + dir := t.TempDir() + edsStore, err := NewStore(DefaultParameters(), dir) + require.NoError(t, err) + + eds := share.EmptyExtendedDataSquare() + dah, err := share.NewRoot(eds) + require.NoError(t, err) + from, to := 10, 20 + + // store empty EDSs + for i := from; i <= to; i++ { + f, err := edsStore.Put(ctx, dah.Hash(), uint64(i), eds) + require.NoError(t, err) + require.NoError(t, f.Close()) + } + + // close and reopen the store to ensure that the empty files are persisted + require.NoError(t, edsStore.Close()) + edsStore, err = NewStore(DefaultParameters(), dir) + require.NoError(t, err) + + // assert that the empty files restored from disk + for i := from; i <= to; i++ { + f, err := edsStore.GetByHeight(ctx, uint64(i)) + require.NoError(t, err) + require.True(t, f.DataHash().IsEmptyRoot()) + require.NoError(t, f.Close()) + } + }) } func BenchmarkStore(b *testing.B) { From 05e5a5e2acfc16cb7df5dd396351d0ca8a717d4a Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 7 Feb 2024 18:41:11 +0500 Subject: [PATCH 072/132] sort imports --- share/getters/getter_test.go | 9 +++++---- share/getters/shrex_test.go | 6 +++--- share/p2p/shrexeds/client.go | 2 +- share/shwap/getter_test.go | 10 +++++----- share/shwap/shwap_test.go | 3 ++- share/store/file/file_test.go | 2 +- share/store/file/ods_file_test.go | 3 ++- share/store/store.go | 2 +- share/store/store_test.go | 2 +- 9 files changed, 21 insertions(+), 18 deletions(-) diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go index b0bc2cb774..6578a7eab5 100644 --- a/share/getters/getter_test.go +++ b/share/getters/getter_test.go @@ -2,13 +2,13 @@ package getters import ( "context" - "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-node/share/store" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "sync/atomic" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/rsmt2d" @@ -17,6 +17,7 @@ import ( "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/store" ) func TestStoreGetter(t *testing.T) { diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go index 54c5d31486..c906622c08 100644 --- a/share/getters/shrex_test.go +++ b/share/getters/shrex_test.go @@ -2,9 +2,6 @@ package getters import ( "context" - "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-node/share/store" - "github.com/tendermint/tendermint/libs/rand" "sync/atomic" "testing" "time" @@ -15,7 +12,9 @@ import ( "github.com/libp2p/go-libp2p/p2p/net/conngater" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/rand" + "github.com/celestiaorg/celestia-app/pkg/da" libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -30,6 +29,7 @@ import ( "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/store" ) func TestShrexGetter(t *testing.T) { diff --git a/share/p2p/shrexeds/client.go b/share/p2p/shrexeds/client.go index f10f25a859..7ef2ced518 100644 --- a/share/p2p/shrexeds/client.go +++ b/share/p2p/shrexeds/client.go @@ -5,7 +5,6 @@ import ( "context" "errors" "fmt" - "github.com/celestiaorg/celestia-node/header" "io" "net" "time" @@ -18,6 +17,7 @@ import ( "github.com/celestiaorg/go-libp2p-messenger/serde" "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/p2p" pb "github.com/celestiaorg/celestia-node/share/p2p/shrexeds/pb" diff --git a/share/shwap/getter_test.go b/share/shwap/getter_test.go index 88d72f0112..e79e16ec9a 100644 --- a/share/shwap/getter_test.go +++ b/share/shwap/getter_test.go @@ -4,11 +4,6 @@ import ( "bytes" "context" "fmt" - "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-node/header/headertest" - "github.com/celestiaorg/celestia-node/share/shwap" - "github.com/celestiaorg/celestia-node/share/store" - ds_sync "github.com/ipfs/go-datastore/sync" "math/rand" "testing" "time" @@ -18,18 +13,23 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" format "github.com/ipfs/go-ipld-format" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/store" ) func TestGetter(t *testing.T) { diff --git a/share/shwap/shwap_test.go b/share/shwap/shwap_test.go index 9fa8b8f9e6..7924172450 100644 --- a/share/shwap/shwap_test.go +++ b/share/shwap/shwap_test.go @@ -2,6 +2,8 @@ package shwap import ( "context" + "testing" + "github.com/ipfs/boxo/bitswap" "github.com/ipfs/boxo/bitswap/network" "github.com/ipfs/boxo/blockstore" @@ -12,7 +14,6 @@ import ( record "github.com/libp2p/go-libp2p-record" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/require" - "testing" ) // TODO(@walldiss): those tests works, but wants to imports with edsStore, when dependency is reversed diff --git a/share/store/file/file_test.go b/share/store/file/file_test.go index 79c19bd181..66191f7b24 100644 --- a/share/store/file/file_test.go +++ b/share/store/file/file_test.go @@ -3,7 +3,6 @@ package file import ( "context" "fmt" - "github.com/celestiaorg/nmt" mrand "math/rand" "strconv" "testing" @@ -11,6 +10,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" diff --git a/share/store/file/ods_file_test.go b/share/store/file/ods_file_test.go index 5c689e6355..d19cc5b1d1 100644 --- a/share/store/file/ods_file_test.go +++ b/share/store/file/ods_file_test.go @@ -2,9 +2,10 @@ package file import ( "context" + "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" "github.com/celestiaorg/rsmt2d" diff --git a/share/store/store.go b/share/store/store.go index 66d597f4a3..711e22ff87 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -5,7 +5,6 @@ import ( "encoding/gob" "errors" "fmt" - "github.com/celestiaorg/celestia-node/libs/utils" "io" "os" "sync" @@ -16,6 +15,7 @@ import ( "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/store/cache" "github.com/celestiaorg/celestia-node/share/store/file" diff --git a/share/store/store_test.go b/share/store/store_test.go index e704763ed8..2669ff47fc 100644 --- a/share/store/store_test.go +++ b/share/store/store_test.go @@ -2,11 +2,11 @@ package store import ( "context" - "github.com/tendermint/tendermint/libs/rand" "testing" "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/rand" "go.uber.org/atomic" "github.com/celestiaorg/rsmt2d" From ad117d7e21dfea1b69b189decd9e034562fa2b26 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 8 Feb 2024 15:33:27 +0500 Subject: [PATCH 073/132] fix shrexnd tests --- share/p2p/shrexnd/exchange_test.go | 31 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/share/p2p/shrexnd/exchange_test.go b/share/p2p/shrexnd/exchange_test.go index cb8bbe9d74..db285e0ce7 100644 --- a/share/p2p/shrexnd/exchange_test.go +++ b/share/p2p/shrexnd/exchange_test.go @@ -2,19 +2,17 @@ package shrexnd import ( "context" + "github.com/celestiaorg/celestia-node/share/store" "sync" "testing" "time" - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" libhost "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/p2p" "github.com/celestiaorg/celestia-node/share/sharetest" @@ -24,16 +22,14 @@ func TestExchange_RequestND_NotFound(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) t.Cleanup(cancel) edsStore, client, server := makeExchange(t) - require.NoError(t, edsStore.Start(ctx)) require.NoError(t, server.Start(ctx)) - t.Run("CAR_not_exist", func(t *testing.T) { + t.Run("File not exist", func(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) t.Cleanup(cancel) - root := share.Root{} namespace := sharetest.RandV0Namespace() - _, err := client.RequestND(ctx, &root, namespace, server.host.ID()) + _, err := client.RequestND(ctx, 666, 1, 2, namespace, server.host.ID()) require.ErrorIs(t, err, p2p.ErrNotFound) }) @@ -43,11 +39,15 @@ func TestExchange_RequestND_NotFound(t *testing.T) { eds := edstest.RandEDS(t, 4) dah, err := share.NewRoot(eds) + height := uint64(42) require.NoError(t, err) - require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) + f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) namespace := sharetest.RandV0Namespace() - emptyShares, err := client.RequestND(ctx, dah, namespace, server.host.ID()) + fromRow, toRow := share.RowRangeForNamespace(dah, namespace) + emptyShares, err := client.RequestND(ctx, height, fromRow, toRow, namespace, server.host.ID()) require.NoError(t, err) require.Empty(t, emptyShares.Flatten()) }) @@ -90,23 +90,22 @@ func TestExchange_RequestND(t *testing.T) { // take server concurrency slots with blocked requests for i := 0; i < rateLimit; i++ { go func(i int) { - client.RequestND(ctx, nil, sharetest.RandV0Namespace(), server.host.ID()) //nolint:errcheck + client.RequestND(ctx, 1, 1, 2, sharetest.RandV0Namespace(), server.host.ID()) //nolint:errcheck }(i) } // wait until all server slots are taken wg.Wait() - _, err = client.RequestND(ctx, nil, sharetest.RandV0Namespace(), server.host.ID()) + _, err = client.RequestND(ctx, 1, 1, 2, sharetest.RandV0Namespace(), server.host.ID()) require.ErrorIs(t, err, p2p.ErrRateLimited) }) } -func newStore(t *testing.T) *eds.Store { +func newStore(t *testing.T) *store.Store { t.Helper() - storeCfg := eds.DefaultParameters() - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - store, err := eds.NewStore(storeCfg, t.TempDir(), ds) + storeCfg := store.DefaultParameters() + store, err := store.NewStore(storeCfg, t.TempDir()) require.NoError(t, err) return store } @@ -120,7 +119,7 @@ func createMocknet(t *testing.T, amount int) []libhost.Host { return net.Hosts() } -func makeExchange(t *testing.T) (*eds.Store, *Client, *Server) { +func makeExchange(t *testing.T) (*store.Store, *Client, *Server) { t.Helper() store := newStore(t) hosts := createMocknet(t, 2) From 4052a00492a620a926a13b3b657b1ba7dafe43fd Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 8 Feb 2024 15:34:32 +0500 Subject: [PATCH 074/132] various tests fixes and improvements --- share/getters/getter_test.go | 1 + share/getters/shrex_test.go | 1 + share/shwap/getter_test.go | 1 + share/shwap/row_id.go | 2 + share/shwap/shwap_test.go | 72 +++++++++++++-------------------- share/store/blockstore_test.go | 6 +-- share/store/file/cache_file.go | 2 +- share/store/file/file_header.go | 10 ++--- share/store/file/file_test.go | 7 ---- share/store/file/ods_file.go | 2 +- share/store/store.go | 2 +- 11 files changed, 44 insertions(+), 62 deletions(-) diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go index 6578a7eab5..40d099c634 100644 --- a/share/getters/getter_test.go +++ b/share/getters/getter_test.go @@ -86,6 +86,7 @@ func TestStoreGetter(t *testing.T) { eds, err := sg.GetEDS(ctx, eh) require.NoError(t, err) dah, err := share.NewRoot(eds) + require.NoError(t, err) require.True(t, share.DataHash(dah.Hash()).IsEmptyRoot()) }) diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go index c906622c08..09bd0655e1 100644 --- a/share/getters/shrex_test.go +++ b/share/getters/shrex_test.go @@ -199,6 +199,7 @@ func TestShrexGetter(t *testing.T) { eds, err := getter.GetEDS(ctx, eh) require.NoError(t, err) dah, err := share.NewRoot(eds) + require.NoError(t, err) require.True(t, share.DataHash(dah.Hash()).IsEmptyRoot()) }) diff --git a/share/shwap/getter_test.go b/share/shwap/getter_test.go index e79e16ec9a..c2be58275e 100644 --- a/share/shwap/getter_test.go +++ b/share/shwap/getter_test.go @@ -101,6 +101,7 @@ func TestGetter(t *testing.T) { assert.NotNil(t, eds) dah, err := share.NewRoot(eds) + require.NoError(t, err) require.True(t, share.DataHash(dah.Hash()).IsEmptyRoot()) }) diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index 1c58dbcf18..a831e85b63 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -77,6 +77,8 @@ func (rid RowID) Cid() cid.Cid { // * Its size is not deterministic which is required for IPLD. // * No support for uint16 func (rid RowID) MarshalTo(data []byte) (int, error) { + //TODO:(@walldiss): this works, only if data underlying array was preallocated with + // enough size. Otherwise Caller might not see the changes. data = binary.LittleEndian.AppendUint64(data, rid.Height) data = binary.LittleEndian.AppendUint16(data, rid.RowIndex) return RowIDSize, nil diff --git a/share/shwap/shwap_test.go b/share/shwap/shwap_test.go index 7924172450..c567c7c27c 100644 --- a/share/shwap/shwap_test.go +++ b/share/shwap/shwap_test.go @@ -1,21 +1,5 @@ package shwap -import ( - "context" - "testing" - - "github.com/ipfs/boxo/bitswap" - "github.com/ipfs/boxo/bitswap/network" - "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/boxo/exchange" - "github.com/ipfs/boxo/routing/offline" - ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" - record "github.com/libp2p/go-libp2p-record" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/require" -) - // TODO(@walldiss): those tests works, but wants to imports with edsStore, when dependency is reversed // - need to rework to test over local blockstore @@ -244,31 +228,31 @@ import ( // }) // assert.NoError(t, err) //} - -func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstore) exchange.Fetcher { - net, err := mocknet.FullMeshLinked(2) - require.NoError(t, err) - - dstore := dssync.MutexWrap(ds.NewMapDatastore()) - routing := offline.NewOfflineRouter(dstore, record.NamespacedValidator{}) - _ = bitswap.New( - ctx, - network.NewFromIpfsHost(net.Hosts()[0], routing), - bstore, - ) - - dstoreClient := dssync.MutexWrap(ds.NewMapDatastore()) - bstoreClient := blockstore.NewBlockstore(dstoreClient) - routingClient := offline.NewOfflineRouter(dstoreClient, record.NamespacedValidator{}) - - bitswapClient := bitswap.New( - ctx, - network.NewFromIpfsHost(net.Hosts()[1], routingClient), - bstoreClient, - ) - - err = net.ConnectAllButSelf() - require.NoError(t, err) - - return bitswapClient -} +// +//func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstore) exchange.Fetcher { +// net, err := mocknet.FullMeshLinked(2) +// require.NoError(t, err) +// +// dstore := dssync.MutexWrap(ds.NewMapDatastore()) +// routing := offline.NewOfflineRouter(dstore, record.NamespacedValidator{}) +// _ = bitswap.New( +// ctx, +// network.NewFromIpfsHost(net.Hosts()[0], routing), +// bstore, +// ) +// +// dstoreClient := dssync.MutexWrap(ds.NewMapDatastore()) +// bstoreClient := blockstore.NewBlockstore(dstoreClient) +// routingClient := offline.NewOfflineRouter(dstoreClient, record.NamespacedValidator{}) +// +// bitswapClient := bitswap.New( +// ctx, +// network.NewFromIpfsHost(net.Hosts()[1], routingClient), +// bstoreClient, +// ) +// +// err = net.ConnectAllButSelf() +// require.NoError(t, err) +// +// return bitswapClient +//} diff --git a/share/store/blockstore_test.go b/share/store/blockstore_test.go index 2940860fc6..287808f8d3 100644 --- a/share/store/blockstore_test.go +++ b/share/store/blockstore_test.go @@ -59,10 +59,10 @@ func TestBlockstoreGetShareSample(t *testing.T) { t.Run("Row", func(t *testing.T) { width := int(eds.Width()) for i := 0; i < width; i++ { - rowId, err := shwap.NewRowID(height, uint16(i), dah) + rowID, err := shwap.NewRowID(height, uint16(i), dah) require.NoError(t, err) - blk, err := bs.Get(ctx, rowId.Cid()) + blk, err := bs.Get(ctx, rowID.Cid()) require.NoError(t, err) row, err := shwap.RowFromBlock(blk) @@ -71,7 +71,7 @@ func TestBlockstoreGetShareSample(t *testing.T) { err = row.Verify(dah) require.NoError(t, err) - require.EqualValues(t, rowId, row.RowID) + require.EqualValues(t, rowID, row.RowID) } }) diff --git a/share/store/file/cache_file.go b/share/store/file/cache_file.go index fd4f103792..6ef39f74a9 100644 --- a/share/store/file/cache_file.go +++ b/share/store/file/cache_file.go @@ -147,7 +147,7 @@ func (f *CacheFile) Data(ctx context.Context, namespace share.Namespace, rowIdx row, proof, err := ipld.GetSharesByNamespace(ctx, ax.proofs, ax.root, namespace, f.Size()) if err != nil { - return share.NamespacedRow{}, fmt.Errorf("retrieving shares by namespace %s for row %x: %w", namespace.String(), row, err) + return share.NamespacedRow{}, fmt.Errorf("shares by namespace %s for row %v: %w", namespace.String(), rowIdx, err) } return share.NamespacedRow{ diff --git a/share/store/file/file_header.go b/share/store/file/file_header.go index 8430b07128..8eaf7f03be 100644 --- a/share/store/file/file_header.go +++ b/share/store/file/file_header.go @@ -11,7 +11,7 @@ import ( const HeaderSize = 64 type Header struct { - version FileVersion + version fileVersion // Taken directly from EDS shareSize uint16 @@ -21,13 +21,13 @@ type Header struct { datahash share.DataHash } -type FileVersion uint8 +type fileVersion uint8 const ( - FileV0 FileVersion = iota + FileV0 fileVersion = iota ) -func (h *Header) Version() FileVersion { +func (h *Header) Version() fileVersion { return h.version } @@ -66,7 +66,7 @@ func ReadHeader(r io.Reader) (*Header, error) { } h := &Header{ - version: FileVersion(buf[0]), + version: fileVersion(buf[0]), shareSize: binary.LittleEndian.Uint16(buf[1:3]), squareSize: binary.LittleEndian.Uint16(buf[3:5]), height: binary.LittleEndian.Uint64(buf[5:13]), diff --git a/share/store/file/file_test.go b/share/store/file/file_test.go index 66191f7b24..92e6f4349d 100644 --- a/share/store/file/file_test.go +++ b/share/store/file/file_test.go @@ -189,10 +189,3 @@ func (q quadrant) coordinates(edsSize int) (x, y int) { y = edsSize/2*(int(q-1)/2) + 1 return } - -func TestQuandrant(t *testing.T) { - for _, q := range quadrants { - x, y := q.coordinates(4) - fmt.Println(x, y) - } -} diff --git a/share/store/file/ods_file.go b/share/store/file/ods_file.go index 42aed35115..d743f48f84 100644 --- a/share/store/file/ods_file.go +++ b/share/store/file/ods_file.go @@ -286,7 +286,7 @@ func (f *OdsFile) Data(ctx context.Context, namespace share.Namespace, rowIdx in return ndDataFromShares(shares, namespace, rowIdx) } -func (f *OdsFile) EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) { +func (f *OdsFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { err := f.readOds() if err != nil { return nil, err diff --git a/share/store/store.go b/share/store/store.go index 711e22ff87..d7922f76f0 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -401,7 +401,7 @@ func loadEmptyHeights(basepath string) (map[uint64]struct{}, error) { defer utils.CloseAndLog(log, "empty heights file", file) emptyHeights := make(map[uint64]struct{}) - gob.NewDecoder(file).Decode(&emptyHeights) + err = gob.NewDecoder(file).Decode(&emptyHeights) if err != nil && !errors.Is(err, io.EOF) { return nil, fmt.Errorf("decoding empty heights file: %w", err) } From 134c55425825c9ce4a10c002131fb193d943708d Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 8 Feb 2024 21:24:52 +0500 Subject: [PATCH 075/132] store empty heights in availability --- share/availability/full/availability.go | 44 ++++++++++++------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go index cf62a9a065..cc7eabe4d7 100644 --- a/share/availability/full/availability.go +++ b/share/availability/full/availability.go @@ -4,8 +4,8 @@ import ( "context" "errors" "fmt" + "github.com/celestiaorg/rsmt2d" - "github.com/filecoin-project/dagstore" logging "github.com/ipfs/go-log/v2" "github.com/celestiaorg/celestia-node/header" @@ -58,23 +58,28 @@ func (fa *ShareAvailability) Stop(context.Context) error { // SharesAvailable reconstructs the data committed to the given Root by requesting // enough Shares from the network. func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error { - dah := header.DAH - // short-circuit if the given root is minimum DAH of an empty data square, to avoid datastore hit - if share.DataHash(dah.Hash()).IsEmptyRoot() { + // a hack to avoid loading the whole EDS in mem if we store it already. + if ok, _ := fa.store.HasByHeight(ctx, header.Height()); ok { return nil } - // we assume the caller of this method has already performed basic validation on the - // given dah/root. If for some reason this has not happened, the node should panic. - if err := dah.ValidateBasic(); err != nil { - log.Errorw("Availability validation cannot be performed on a malformed DataAvailabilityHeader", - "err", err) - panic(err) + eds, err := fa.getEds(ctx, header) + if err != nil { + return err } - // a hack to avoid loading the whole EDS in mem if we store it already. - if ok, _ := fa.store.HasByHash(ctx, dah.Hash()); ok { - return nil + _, err = fa.store.Put(ctx, header.DAH.Hash(), header.Height(), eds) + if err != nil { + return fmt.Errorf("full availability: failed to store eds: %w", err) + } + return nil +} + +func (fa *ShareAvailability) getEds(ctx context.Context, header *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + dah := header.DAH + // short-circuit if the given root is minimum DAH of an empty data square, to avoid datastore hit + if share.DataHash(dah.Hash()).IsEmptyRoot() { + return share.EmptyExtendedDataSquare(), nil } adder := ipld.NewProofsAdder(len(dah.RowRoots), false) @@ -84,19 +89,14 @@ func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header eds, err := fa.getter.GetEDS(ctx, header) if err != nil { if errors.Is(err, context.Canceled) { - return err + return nil, err } log.Errorw("availability validation failed", "root", dah.String(), "err", err.Error()) var byzantineErr *byzantine.ErrByzantine if errors.Is(err, share.ErrNotFound) || errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &byzantineErr) { - return share.ErrNotAvailable + return nil, share.ErrNotAvailable } - return err + return nil, err } - - _, err = fa.store.Put(ctx, dah.Hash(), header.Height(), eds) - if err != nil && !errors.Is(err, dagstore.ErrShardExists) { - return fmt.Errorf("full availability: failed to store eds: %w", err) - } - return nil + return eds, nil } From 9a6d1a01c8a99045803724d605952dacbf135bee Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 8 Feb 2024 21:31:00 +0500 Subject: [PATCH 076/132] remove proofs collection into proofAdder --- core/exchange.go | 17 ++------ core/listener.go | 11 +---- header/headertest/fraud/testing.go | 9 ++--- nodebuilder/store_test.go | 54 +++++-------------------- share/availability/full/availability.go | 5 --- 5 files changed, 19 insertions(+), 77 deletions(-) diff --git a/core/exchange.go b/core/exchange.go index a12ae6e13f..52b9b781c1 100644 --- a/core/exchange.go +++ b/core/exchange.go @@ -8,12 +8,9 @@ import ( "golang.org/x/sync/errgroup" - libhead "github.com/celestiaorg/go-header" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/store" + libhead "github.com/celestiaorg/go-header" ) const concurrencyLimit = 4 @@ -132,10 +129,8 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende } // extend block data - adder := ipld.NewProofsAdder(int(block.Data.SquareSize), false) - defer adder.Purge() - eds, err := extendBlock(block.Data, block.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) + eds, err := extendBlock(block.Data, block.Header.Version.App) if err != nil { return nil, fmt.Errorf("extending block data for height %d: %w", &block.Height, err) } @@ -150,7 +145,6 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende &block.Height, hash, eh.Hash()) } - ctx = ipld.CtxWithProofsAdder(ctx, adder) _, err = ce.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) if err != nil { return nil, fmt.Errorf("storing EDS to eds.Store for height %d: %w", &block.Height, err) @@ -176,11 +170,7 @@ func (ce *Exchange) getExtendedHeaderByHeight(ctx context.Context, height *int64 } log.Debugw("fetched signed block from core", "height", b.Header.Height) - // extend block data - adder := ipld.NewProofsAdder(int(b.Data.SquareSize), false) - defer adder.Purge() - - eds, err := extendBlock(b.Data, b.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) + eds, err := extendBlock(b.Data, b.Header.Version.App) if err != nil { return nil, fmt.Errorf("extending block data for height %d: %w", b.Header.Height, err) } @@ -190,7 +180,6 @@ func (ce *Exchange) getExtendedHeaderByHeight(ctx context.Context, height *int64 panic(fmt.Errorf("constructing extended header for height %d: %w", b.Header.Height, err)) } - ctx = ipld.CtxWithProofsAdder(ctx, adder) _, err = ce.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) if err != nil { return nil, fmt.Errorf("storing EDS to eds.Store for block height %d: %w", b.Header.Height, err) diff --git a/core/listener.go b/core/listener.go index fefea40ba9..2cd4675e2e 100644 --- a/core/listener.go +++ b/core/listener.go @@ -11,13 +11,10 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - libhead "github.com/celestiaorg/go-header" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" "github.com/celestiaorg/celestia-node/share/store" + libhead "github.com/celestiaorg/go-header" ) var ( @@ -192,11 +189,8 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS span.SetAttributes( attribute.Int64("height", b.Header.Height), ) - // extend block data - adder := ipld.NewProofsAdder(int(b.Data.SquareSize), false) - defer adder.Purge() - eds, err := extendBlock(b.Data, b.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) + eds, err := extendBlock(b.Data, b.Header.Version.App) if err != nil { return fmt.Errorf("extending block data: %w", err) } @@ -208,7 +202,6 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS } // attempt to store block data if not empty - ctx = ipld.CtxWithProofsAdder(ctx, adder) _, err = cl.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) if err != nil { return fmt.Errorf("storing EDS: %w", err) diff --git a/header/headertest/fraud/testing.go b/header/headertest/fraud/testing.go index 68d38f63ad..fa4ef63f2d 100644 --- a/header/headertest/fraud/testing.go +++ b/header/headertest/fraud/testing.go @@ -2,6 +2,7 @@ package headerfraud import ( "context" + "github.com/celestiaorg/celestia-node/share/store" "testing" "time" @@ -12,12 +13,10 @@ import ( "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/ipld" ) @@ -45,7 +44,7 @@ func NewFraudMaker(t *testing.T, height int64, vals []types.PrivValidator, valSe } } -func (f *FraudMaker) MakeExtendedHeader(odsSize int, edsStore *eds.Store) header.ConstructFn { +func (f *FraudMaker) MakeExtendedHeader(odsSize int, edsStore *store.Store) header.ConstructFn { return func( h *types.Header, comm *types.Commit, @@ -58,13 +57,11 @@ func (f *FraudMaker) MakeExtendedHeader(odsSize int, edsStore *eds.Store) header hdr := *h if h.Height == f.height { - adder := ipld.NewProofsAdder(odsSize, false) - square := edstest.RandByzantineEDS(f.t, odsSize, nmt.NodeVisitor(adder.VisitFn())) + square := edstest.RandByzantineEDS(f.t, odsSize) dah, err := da.NewDataAvailabilityHeader(square) require.NoError(f.t, err) hdr.DataHash = dah.Hash() - ctx := ipld.CtxWithProofsAdder(context.Background(), adder) require.NoError(f.t, edsStore.Put(ctx, h.DataHash.Bytes(), square)) *eds = *square diff --git a/nodebuilder/store_test.go b/nodebuilder/store_test.go index 43450d0a34..67b49961de 100644 --- a/nodebuilder/store_test.go +++ b/nodebuilder/store_test.go @@ -10,16 +10,9 @@ import ( "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/nodebuilder/node" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/sharetest" ) func TestRepo(t *testing.T) { @@ -75,44 +68,19 @@ func BenchmarkStore(b *testing.B) { store := newStore(ctx, b, eds.DefaultParameters(), dir) size := 128 - b.Run("enabled eds proof caching", func(b *testing.B) { - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - adder := ipld.NewProofsAdder(size*2, false) - shares := sharetest.RandShares(b, size*size) - eds, err := rsmt2d.ComputeExtendedDataSquare( - shares, - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(size), - nmt.NodeVisitor(adder.VisitFn())), - ) - require.NoError(b, err) - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(b, err) - ctx := ipld.CtxWithProofsAdder(ctx, adder) - - b.StartTimer() - err = store.edsStore.Put(ctx, dah.Hash(), eds) - b.StopTimer() - require.NoError(b, err) - } - }) - b.Run("disabled eds proof caching", func(b *testing.B) { - b.ResetTimer() + b.ResetTimer() + b.StopTimer() + for i := 0; i < b.N; i++ { + eds := edstest.RandEDS(b, size) + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(b, err) + + b.StartTimer() + err = store.edsStore.Put(ctx, dah.Hash(), eds) b.StopTimer() - for i := 0; i < b.N; i++ { - eds := edstest.RandEDS(b, size) - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(b, err) - - b.StartTimer() - err = store.edsStore.Put(ctx, dah.Hash(), eds) - b.StopTimer() - require.NoError(b, err) - } - }) + require.NoError(b, err) + } }) } diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go index cc7eabe4d7..5cb62a5954 100644 --- a/share/availability/full/availability.go +++ b/share/availability/full/availability.go @@ -11,7 +11,6 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/byzantine" - "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/p2p/discovery" "github.com/celestiaorg/celestia-node/share/store" ) @@ -82,10 +81,6 @@ func (fa *ShareAvailability) getEds(ctx context.Context, header *header.Extended return share.EmptyExtendedDataSquare(), nil } - adder := ipld.NewProofsAdder(len(dah.RowRoots), false) - ctx = ipld.CtxWithProofsAdder(ctx, adder) - defer adder.Purge() - eds, err := fa.getter.GetEDS(ctx, header) if err != nil { if errors.Is(err, context.Canceled) { From cf2d29e7dec8b28193726bb1592b7d5c63900867 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 8 Feb 2024 21:46:29 +0500 Subject: [PATCH 077/132] close file after read everywhere --- core/exchange.go | 7 ++-- core/listener.go | 4 ++- header/headertest/fraud/testing.go | 4 ++- nodebuilder/share/share_test.go | 43 ------------------------- share/availability/full/availability.go | 4 ++- share/getters/store.go | 2 +- share/store/file/file_closer.go | 2 +- share/store/file/square.go | 1 + share/store/store_test.go | 15 +++++---- 9 files changed, 26 insertions(+), 56 deletions(-) delete mode 100644 nodebuilder/share/share_test.go diff --git a/core/exchange.go b/core/exchange.go index 52b9b781c1..5eb1117ea7 100644 --- a/core/exchange.go +++ b/core/exchange.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "github.com/celestiaorg/celestia-node/libs/utils" "time" "golang.org/x/sync/errgroup" @@ -145,10 +146,11 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende &block.Height, hash, eh.Hash()) } - _, err = ce.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) + f, err := ce.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) if err != nil { return nil, fmt.Errorf("storing EDS to eds.Store for height %d: %w", &block.Height, err) } + utils.CloseAndLog(log, "file", f) return eh, nil } @@ -180,9 +182,10 @@ func (ce *Exchange) getExtendedHeaderByHeight(ctx context.Context, height *int64 panic(fmt.Errorf("constructing extended header for height %d: %w", b.Header.Height, err)) } - _, err = ce.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) + f, err := ce.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) if err != nil { return nil, fmt.Errorf("storing EDS to eds.Store for block height %d: %w", b.Header.Height, err) } + utils.CloseAndLog(log, "file", f) return eh, nil } diff --git a/core/listener.go b/core/listener.go index 2cd4675e2e..8b2dcb87da 100644 --- a/core/listener.go +++ b/core/listener.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/celestiaorg/celestia-node/libs/utils" "time" pubsub "github.com/libp2p/go-libp2p-pubsub" @@ -202,10 +203,11 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS } // attempt to store block data if not empty - _, err = cl.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) + f, err := cl.store.Put(ctx, eh.DAH.Hash(), eh.Height(), eds) if err != nil { return fmt.Errorf("storing EDS: %w", err) } + utils.CloseAndLog(log, "file", f) syncing, err := cl.fetcher.IsSyncing(ctx) if err != nil { diff --git a/header/headertest/fraud/testing.go b/header/headertest/fraud/testing.go index fa4ef63f2d..469350bde0 100644 --- a/header/headertest/fraud/testing.go +++ b/header/headertest/fraud/testing.go @@ -62,7 +62,9 @@ func (f *FraudMaker) MakeExtendedHeader(odsSize int, edsStore *store.Store) head require.NoError(f.t, err) hdr.DataHash = dah.Hash() - require.NoError(f.t, edsStore.Put(ctx, h.DataHash.Bytes(), square)) + file, err := edsStore.Put(context.Background(), dah.Hash(), uint64(h.Height), square) + require.NoError(f.t, err) + require.NoError(f.t, file.Close()) *eds = *square } diff --git a/nodebuilder/share/share_test.go b/nodebuilder/share/share_test.go deleted file mode 100644 index db170709db..0000000000 --- a/nodebuilder/share/share_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package share - -import ( - "context" - "testing" - - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" -) - -func Test_EmptyCARExists(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - edsStore, err := eds.NewStore(eds.DefaultParameters(), t.TempDir(), ds) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - eds := share.EmptyExtendedDataSquare() - dah, err := share.NewRoot(eds) - require.NoError(t, err) - - // add empty EDS to store - err = ensureEmptyCARExists(ctx, edsStore) - assert.NoError(t, err) - - // assert that the empty car exists - has, err := edsStore.Has(ctx, dah.Hash()) - assert.True(t, has) - assert.NoError(t, err) - - // assert that the empty car is, in fact, empty - emptyEds, err := edsStore.Get(ctx, dah.Hash()) - assert.Equal(t, eds.Flattened(), emptyEds.Flattened()) - assert.NoError(t, err) -} diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go index 5cb62a5954..1dbe845700 100644 --- a/share/availability/full/availability.go +++ b/share/availability/full/availability.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/rsmt2d" logging "github.com/ipfs/go-log/v2" @@ -67,10 +68,11 @@ func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header return err } - _, err = fa.store.Put(ctx, header.DAH.Hash(), header.Height(), eds) + f, err := fa.store.Put(ctx, header.DAH.Hash(), header.Height(), eds) if err != nil { return fmt.Errorf("full availability: failed to store eds: %w", err) } + utils.CloseAndLog(log, "file", f) return nil } diff --git a/share/getters/store.go b/share/getters/store.go index deadd140ba..68d1a27d8d 100644 --- a/share/getters/store.go +++ b/share/getters/store.go @@ -51,7 +51,7 @@ func (sg *StoreGetter) GetShare(ctx context.Context, header *header.ExtendedHead return nil, err } - file, err := sg.store.GetByHash(ctx, dah.Hash()) + file, err := sg.store.GetByHeight(ctx, header.Height()) if errors.Is(err, store.ErrNotFound) { // convert error to satisfy getter interface contract err = share.ErrNotFound diff --git a/share/store/file/file_closer.go b/share/store/file/file_closer.go index dd459474f4..96880264ca 100644 --- a/share/store/file/file_closer.go +++ b/share/store/file/file_closer.go @@ -27,7 +27,7 @@ func CloseOnceFile(f EdsFile) *closeOnceFile { func (c *closeOnceFile) Close() error { if !c.closed.Swap(true) { err := c.f.Close() - // release reference to the file + // release reference to the file to allow GC to collect it c.f = nil return err } diff --git a/share/store/file/square.go b/share/store/file/square.go index ea5c8605c4..f0712cdf5f 100644 --- a/share/store/file/square.go +++ b/share/store/file/square.go @@ -60,6 +60,7 @@ func (s square) size() int { func (s square) close() error { if s != nil { + // return square to memPools memPools.get(s.size()).putSquare(s) } return nil diff --git a/share/store/store_test.go b/share/store/store_test.go index 2669ff47fc..6628b6fc3b 100644 --- a/share/store/store_test.go +++ b/share/store/store_test.go @@ -96,11 +96,11 @@ func TestEDSStore(t *testing.T) { f, err = edsStore.GetByHeight(ctx, height) require.NoError(t, err) - fromFile, err := f.EDS(ctx) + fileEds, err := f.EDS(ctx) require.NoError(t, err) require.NoError(t, f.Close()) - require.True(t, eds.Equals(fromFile)) + require.True(t, eds.Equals(fileEds)) }) t.Run("GetByDataHash", func(t *testing.T) { @@ -133,11 +133,13 @@ func TestEDSStore(t *testing.T) { require.NoError(t, err) require.False(t, has) - _, err = edsStore.GetByHeight(ctx, height) + f, err := edsStore.GetByHeight(ctx, height) require.ErrorIs(t, err, ErrNotFound) + require.NoError(t, f.Close()) - _, err = edsStore.GetByHash(ctx, dah.Hash()) + f, err = edsStore.GetByHash(ctx, dah.Hash()) require.ErrorIs(t, err, ErrNotFound) + require.NoError(t, f.Close()) }) t.Run("Remove", func(t *testing.T) { @@ -206,6 +208,7 @@ func TestEDSStore(t *testing.T) { f, err = edsStore.GetByHeight(ctx, height) require.NoError(t, err) require.True(t, f.DataHash().IsEmptyRoot()) + require.NoError(t, f.Close()) }) t.Run("empty EDS are persisted", func(t *testing.T) { @@ -281,7 +284,7 @@ func BenchmarkStore(b *testing.B) { for i := 0; i < b.N; i++ { f, err := edsStore.GetByHeight(ctx, height) require.NoError(b, err) - _ = f.Close() + require.NoError(b, f.Close()) } }) @@ -305,7 +308,7 @@ func BenchmarkStore(b *testing.B) { for i := 0; i < b.N; i++ { f, err := edsStore.GetByHash(ctx, dah.Hash()) require.NoError(b, err) - _ = f.Close() + require.NoError(b, f.Close()) } }) } From d79338e5b5d823322d2e0f0d4891ee78be97a3fb Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 9 Feb 2024 18:30:12 +0500 Subject: [PATCH 078/132] put heights inside blocks folder --- share/store/store.go | 21 ++++++++------------- share/store/store_test.go | 6 ++---- 2 files changed, 10 insertions(+), 17 deletions(-) diff --git a/share/store/store.go b/share/store/store.go index d7922f76f0..fb838b39af 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -36,9 +36,9 @@ var ( // - lock store folder const ( - hashsPath = "/blocks/" - heightsPath = "/heights/" - emptyHeights = "/empty_heights" + blocksPath = "/blocks/" + heightsPath = blocksPath + "heights/" + emptyHeights = blocksPath + "/empty_heights" defaultDirPerm = 0755 ) @@ -70,7 +70,7 @@ func NewStore(params *Parameters, basePath string) (*Store, error) { } // ensure blocks folder - if err := ensureFolder(basePath + hashsPath); err != nil { + if err := ensureFolder(basePath + blocksPath); err != nil { return nil, fmt.Errorf("ensure blocks folder: %w", err) } @@ -129,11 +129,6 @@ func (s *Store) Put( } // short circuit if file exists - if has, _ := s.hasByHash(datahash); has { - s.metrics.observePutExist(ctx) - return s.getByHash(datahash) - } - if has, _ := s.hasByHeight(height); has { log.Warnw("put: file already exists by height, but not by hash", "height", height, @@ -142,7 +137,7 @@ func (s *Store) Put( return s.getByHeight(height) } - path := s.basepath + hashsPath + datahash.String() + path := s.basepath + blocksPath + datahash.String() file, err := file.CreateOdsFile(path, height, datahash, square) if err != nil { s.metrics.observePut(ctx, time.Since(tNow), square.Width(), true) @@ -185,7 +180,7 @@ func (s *Store) getByHash(datahash share.DataHash) (file.EdsFile, error) { return emptyFile, nil } - path := s.basepath + hashsPath + datahash.String() + path := s.basepath + blocksPath + datahash.String() odsFile, err := file.OpenOdsFile(path) if err != nil { if os.IsNotExist(err) { @@ -246,7 +241,7 @@ func (s *Store) hasByHash(datahash share.DataHash) (bool, error) { if datahash.IsEmptyRoot() { return true, nil } - path := s.basepath + hashsPath + datahash.String() + path := s.basepath + blocksPath + datahash.String() return pathExists(path) } @@ -307,7 +302,7 @@ func (s *Store) remove(height uint64) error { return fmt.Errorf("removing by height: %w", err) } - hashPath := s.basepath + hashsPath + hashStr + hashPath := s.basepath + blocksPath + hashStr if err = os.Remove(hashPath); err != nil { return fmt.Errorf("removing by hash: %w", err) } diff --git a/share/store/store_test.go b/share/store/store_test.go index 6628b6fc3b..9c809d0eaf 100644 --- a/share/store/store_test.go +++ b/share/store/store_test.go @@ -133,13 +133,11 @@ func TestEDSStore(t *testing.T) { require.NoError(t, err) require.False(t, has) - f, err := edsStore.GetByHeight(ctx, height) + _, err = edsStore.GetByHeight(ctx, height) require.ErrorIs(t, err, ErrNotFound) - require.NoError(t, f.Close()) - f, err = edsStore.GetByHash(ctx, dah.Hash()) + _, err = edsStore.GetByHash(ctx, dah.Hash()) require.ErrorIs(t, err, ErrNotFound) - require.NoError(t, f.Close()) }) t.Run("Remove", func(t *testing.T) { From 6b7080eab742e5b53501bfda432a065486b2e67b Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 9 Feb 2024 20:28:49 +0500 Subject: [PATCH 079/132] add concurrency safety for cached file add concurrency tests for all files --- share/getters/shrex_test.go | 2 +- share/p2p/shrexeds/client.go | 2 +- share/store/file/cache_file.go | 52 +++++++++----- share/store/file/file_test.go | 109 ++++++++++++++++++++++-------- share/store/file/ods_file_test.go | 2 +- share/store/metrics.go | 2 +- share/store/store.go | 5 +- 7 files changed, 119 insertions(+), 55 deletions(-) diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go index 09bd0655e1..5637499a84 100644 --- a/share/getters/shrex_test.go +++ b/share/getters/shrex_test.go @@ -188,7 +188,7 @@ func TestShrexGetter(t *testing.T) { got, err := getter.GetEDS(ctx, eh) require.NoError(t, err) - require.Equal(t, eds.Flattened(), got.Flattened()) + require.True(t, got.Equals(eds)) }) t.Run("EDS get empty", func(t *testing.T) { diff --git a/share/p2p/shrexeds/client.go b/share/p2p/shrexeds/client.go index 7ef2ced518..1d21beb665 100644 --- a/share/p2p/shrexeds/client.go +++ b/share/p2p/shrexeds/client.go @@ -165,7 +165,7 @@ func readEds(ctx context.Context, stream network.Stream, eh *header.ExtendedHead // verify that the EDS hash matches the expected hash newDah, err := share.NewRoot(eds) if err != nil { - return nil, err + return nil, fmt.Errorf("create new root from eds: %w", err) } if !bytes.Equal(newDah.Hash(), eh.DAH.Hash()) { return nil, fmt.Errorf( diff --git a/share/store/file/cache_file.go b/share/store/file/cache_file.go index 6ef39f74a9..a97eaa1060 100644 --- a/share/store/file/cache_file.go +++ b/share/store/file/cache_file.go @@ -4,10 +4,10 @@ import ( "context" "errors" "fmt" - "github.com/ipfs/boxo/blockservice" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" + "sync" "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" @@ -19,18 +19,22 @@ import ( var _ EdsFile = (*CacheFile)(nil) -// TODO: allow concurrency safety fpr CacheFile methods type CacheFile struct { EdsFile + // lock protects axisCache + lock sync.RWMutex + // axisCache caches the axis shares and proofs axisCache []map[int]inMemoryAxis // disableCache disables caching of rows for testing purposes disableCache bool } type inMemoryAxis struct { - root []byte shares []share.Share + + // root will be set only when proofs are calculated + root []byte proofs blockservice.BlockGetter } @@ -52,6 +56,7 @@ func (f *CacheFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, return nil, err } + // build share proof from proofs cached for given axis share, err := ipld.GetShareWithProof(ctx, ax.proofs, ax.root, shrIdx, f.Size(), axisType) if err != nil { return nil, fmt.Errorf("building proof from cache: %w", err) @@ -61,24 +66,27 @@ func (f *CacheFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, } func (f *CacheFile) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (inMemoryAxis, error) { - // return axis from cache if possible - ax := f.axisCache[axisType][axisIdx] + // return axis with proofs from cache if possible + ax, ok := f.getAxisFromCache(axisType, axisIdx) if ax.proofs != nil { return ax, nil } // build proofs from shares and cache them - shrs, err := f.axis(ctx, axisType, axisIdx) - if err != nil { - return inMemoryAxis{}, fmt.Errorf("get axis: %w", err) + if !ok { + shrs, err := f.axis(ctx, axisType, axisIdx) + if err != nil { + return inMemoryAxis{}, fmt.Errorf("get axis: %w", err) + } + ax.shares = shrs } // calculate proofs adder := ipld.NewProofsAdder(f.Size(), true) tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(axisIdx), nmt.NodeVisitor(adder.VisitFn())) - for _, shr := range shrs { - err = tree.Push(shr) + for _, shr := range ax.shares { + err := tree.Push(shr) if err != nil { return inMemoryAxis{}, fmt.Errorf("push shares: %w", err) } @@ -90,23 +98,21 @@ func (f *CacheFile) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, ax return inMemoryAxis{}, fmt.Errorf("calculating root: %w", err) } - ax = f.axisCache[axisType][axisIdx] ax.root = root - ax.shares = shrs ax.proofs, err = newRowProofsGetter(adder.Proofs()) if err != nil { return inMemoryAxis{}, fmt.Errorf("creating proof getter: %w", err) } if !f.disableCache { - f.axisCache[axisType][axisIdx] = ax + f.storeAxisInCache(axisType, axisIdx, ax) } return ax, nil } func (f *CacheFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { // return axis from cache if possible - ax, ok := f.axisCache[axisType][axisIdx] + ax, ok := f.getAxisFromCache(axisType, axisIdx) if ok { return ax.shares[:f.Size()/2], nil } @@ -122,9 +128,8 @@ func (f *CacheFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx if err != nil { return nil, fmt.Errorf("extending shares: %w", err) } - f.axisCache[axisType][axisIdx] = inMemoryAxis{ - shares: axis, - } + ax.shares = axis + f.storeAxisInCache(axisType, axisIdx, ax) } return half, nil @@ -176,6 +181,19 @@ func (f *CacheFile) EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) return eds, nil } +func (f *CacheFile) storeAxisInCache(axisType rsmt2d.Axis, axisIdx int, axis inMemoryAxis) { + f.lock.Lock() + defer f.lock.Unlock() + f.axisCache[axisType][axisIdx] = axis +} + +func (f *CacheFile) getAxisFromCache(axisType rsmt2d.Axis, axisIdx int) (inMemoryAxis, bool) { + f.lock.RLock() + defer f.lock.RUnlock() + ax, ok := f.axisCache[axisType][axisIdx] + return ax, ok +} + // rowProofsGetter implements blockservice.BlockGetter interface type rowProofsGetter struct { proofs map[cid.Cid]blocks.Block diff --git a/share/store/file/file_test.go b/share/store/file/file_test.go index 92e6f4349d..ea87f919c2 100644 --- a/share/store/file/file_test.go +++ b/share/store/file/file_test.go @@ -5,6 +5,7 @@ import ( "fmt" mrand "math/rand" "strconv" + "sync" "testing" "time" @@ -24,28 +25,53 @@ func testFileShare(t *testing.T, createFile createFile, size int) { eds := edstest.RandEDS(t, size) fl := createFile(eds) - root, err := share.NewRoot(eds) + dah, err := share.NewRoot(eds) require.NoError(t, err) width := int(eds.Width()) - for x := 0; x < width; x++ { - for y := 0; y < width; y++ { - shr, err := fl.Share(context.TODO(), x, y) - require.NoError(t, err) - - var axishash []byte - if shr.Axis == rsmt2d.Row { - require.Equal(t, getAxis(eds, shr.Axis, y)[x], shr.Share) - axishash = root.RowRoots[y] - } else { - require.Equal(t, getAxis(eds, shr.Axis, x)[y], shr.Share) - axishash = root.ColumnRoots[x] + t.Run("single thread", func(t *testing.T) { + for x := 0; x < width; x++ { + for y := 0; y < width; y++ { + testShare(t, fl, eds, dah, x, y) } + } + }) - ok := shr.Validate(axishash, x, y, width) - require.True(t, ok) + t.Run("parallel", func(t *testing.T) { + wg := sync.WaitGroup{} + for y := 0; y < width; y++ { + for x := 0; x < width; x++ { + wg.Add(1) + go func(x, y int) { + defer wg.Done() + testShare(t, fl, eds, dah, x, y) + }(x, y) + } } + wg.Wait() + }) +} + +func testShare(t *testing.T, + fl EdsFile, + eds *rsmt2d.ExtendedDataSquare, + dah *share.Root, + x, y int) { + width := int(eds.Width()) + shr, err := fl.Share(context.TODO(), x, y) + require.NoError(t, err) + + var axishash []byte + if shr.Axis == rsmt2d.Row { + require.Equal(t, getAxis(eds, shr.Axis, y)[x], shr.Share) + axishash = dah.RowRoots[y] + } else { + require.Equal(t, getAxis(eds, shr.Axis, x)[y], shr.Share) + axishash = dah.ColumnRoots[x] } + + ok := shr.Validate(axishash, x, y, width) + require.True(t, ok) } func testFileData(t *testing.T, createFile createFile, size int) { @@ -88,13 +114,31 @@ func testFileAxisHalf(t *testing.T, createFile createFile, size int) { eds := edstest.RandEDS(t, size) fl := createFile(eds) - for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { - for i := 0; i < size; i++ { - half, err := fl.AxisHalf(context.Background(), axisType, i) - require.NoError(t, err) - require.Equal(t, getAxis(eds, axisType, i)[:size], half) + t.Run("single thread", func(t *testing.T) { + for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { + for i := 0; i < size; i++ { + half, err := fl.AxisHalf(context.Background(), axisType, i) + require.NoError(t, err) + require.Equal(t, getAxis(eds, axisType, i)[:size], half) + } } - } + }) + + t.Run("parallel", func(t *testing.T) { + wg := sync.WaitGroup{} + for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { + for i := 0; i < size; i++ { + wg.Add(1) + go func(axisType rsmt2d.Axis, idx int) { + defer wg.Done() + half, err := fl.AxisHalf(context.Background(), axisType, idx) + require.NoError(t, err) + require.Equal(t, getAxis(eds, axisType, idx)[:size], half) + }(axisType, i) + } + } + wg.Wait() + }) } func testFileEds(t *testing.T, createFile createFile, size int) { @@ -113,21 +157,26 @@ func testFileReader(t *testing.T, createFile createFile, odsSize int) { eds := edstest.RandEDS(t, odsSize) f := createFile(eds) + // verify that the reader represented by file can be read from + // multiple times, without exhausting the underlying reader. + wg := sync.WaitGroup{} + for i := 0; i < 6; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testReader(t, ctx, f, eds) + }() + } + wg.Wait() +} + +func testReader(t *testing.T, ctx context.Context, f EdsFile, eds *rsmt2d.ExtendedDataSquare) { reader, err := f.Reader() require.NoError(t, err) streamed, err := ReadEds(ctx, reader, f.Size()) require.NoError(t, err) require.True(t, eds.Equals(streamed)) - - // verify that the reader represented by file can be read from - // multiple times, without exhausting the underlying reader. - reader2, err := f.Reader() - require.NoError(t, err) - - streamed2, err := ReadEds(ctx, reader2, f.Size()) - require.NoError(t, err) - require.True(t, eds.Equals(streamed2)) } func benchGetAxisFromFile(b *testing.B, newFile func(size int) EdsFile, minSize, maxSize int) { diff --git a/share/store/file/ods_file_test.go b/share/store/file/ods_file_test.go index d19cc5b1d1..ee7c30f7d3 100644 --- a/share/store/file/ods_file_test.go +++ b/share/store/file/ods_file_test.go @@ -26,7 +26,7 @@ func TestCreateOdsFile(t *testing.T) { } func TestOdsFile(t *testing.T) { - size := 32 + size := 8 createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { path := t.TempDir() + "/testfile" fl, err := CreateOdsFile(path, 1, []byte{}, eds) diff --git a/share/store/metrics.go b/share/store/metrics.go index c4f0840f33..b251715be9 100644 --- a/share/store/metrics.go +++ b/share/store/metrics.go @@ -33,7 +33,7 @@ func (s *Store) WithMetrics() error { return err } - putExists, err := meter.Int64Counter("eds_store_put_exists_histogram", + putExists, err := meter.Int64Counter("eds_store_put_exists_counter", metric.WithDescription("eds store put file exists")) if err != nil { return err diff --git a/share/store/store.go b/share/store/store.go index fb838b39af..7513f1d9d0 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -29,7 +29,7 @@ var ( ) // TODO(@walldiss): -// - index empty files by height +// - handle blocks duplicates(same hash,different height) // - persist store stats like amount of files, file types, avg file size etc in a file // - handle corrupted files // - maintain in-memory missing files index / bloom-filter to fast return for not stored files. @@ -130,9 +130,6 @@ func (s *Store) Put( // short circuit if file exists if has, _ := s.hasByHeight(height); has { - log.Warnw("put: file already exists by height, but not by hash", - "height", height, - "hash", datahash.String()) s.metrics.observePutExist(ctx) return s.getByHeight(height) } From f8bbacadb0215b129268a46faaa5afb8d66e5805 Mon Sep 17 00:00:00 2001 From: Vlad Date: Sat, 10 Feb 2024 01:36:18 +0500 Subject: [PATCH 080/132] improve logging for shrex eds --- core/exchange.go | 1 - nodebuilder/share/constructors.go | 13 ------------- nodebuilder/share/module.go | 1 - share/getters/shrex.go | 1 + share/getters/shrex_test.go | 2 +- share/p2p/shrexeds/client.go | 6 +++++- share/p2p/shrexeds/server.go | 4 ++-- share/store/file/square.go | 1 + 8 files changed, 10 insertions(+), 19 deletions(-) diff --git a/core/exchange.go b/core/exchange.go index 5eb1117ea7..22d4996b56 100644 --- a/core/exchange.go +++ b/core/exchange.go @@ -130,7 +130,6 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende } // extend block data - eds, err := extendBlock(block.Data, block.Header.Version.App) if err != nil { return nil, fmt.Errorf("extending block data for height %d: %w", &block.Height, err) diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index 816232bf9f..dcd91e3f66 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -1,16 +1,12 @@ package share import ( - "context" - - "github.com/ipfs/boxo/blockservice" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/routing" routingdisc "github.com/libp2p/go-libp2p/p2p/discovery/routing" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/getters" - "github.com/celestiaorg/celestia-node/share/ipld" disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" "github.com/celestiaorg/celestia-node/share/p2p/peers" "github.com/celestiaorg/celestia-node/share/shwap" @@ -42,15 +38,6 @@ func newModule(getter share.Getter, avail share.Availability) Module { return &module{getter, avail} } -// ensureEmptyEDSInBS checks if the given DAG contains an empty block data square. -// If it does not, it stores an empty block. This optimization exists to prevent -// redundant storing of empty block data so that it is only stored once and returned -// upon request for a block with an empty data square. -func ensureEmptyEDSInBS(ctx context.Context, bServ blockservice.BlockService) error { - _, err := ipld.AddShares(ctx, share.EmptyBlockShares(), bServ) - return err -} - func lightGetter( shrexGetter *getters.ShrexGetter, shwapGetter *shwap.Getter, diff --git a/nodebuilder/share/module.go b/nodebuilder/share/module.go index b58d833c41..73e9a14292 100644 --- a/nodebuilder/share/module.go +++ b/nodebuilder/share/module.go @@ -192,7 +192,6 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option }), peerManagerWithShrexPools, shrexGetterComponents, - fx.Invoke(ensureEmptyEDSInBS), fx.Provide(shwap.NewGetter), fx.Provide(lightGetter), // shrexsub broadcaster stub for daser diff --git a/share/getters/shrex.go b/share/getters/shrex.go index 40b0a8ea02..0204755faf 100644 --- a/share/getters/shrex.go +++ b/share/getters/shrex.go @@ -182,6 +182,7 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader } log.Debugw("eds: request failed", "height", header.Height(), + "hash", header.DAH.String(), "peer", peer.String(), "attempt", attempt, "err", getErr, diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go index 5637499a84..4242d913d0 100644 --- a/share/getters/shrex_test.go +++ b/share/getters/shrex_test.go @@ -191,7 +191,7 @@ func TestShrexGetter(t *testing.T) { require.True(t, got.Equals(eds)) }) - t.Run("EDS get empty", func(t *testing.T) { + t.Run("EDS get empty block", func(t *testing.T) { // empty root emptyRoot := da.MinDataAvailabilityHeader() eh := headertest.RandExtendedHeaderWithRoot(t, &emptyRoot) diff --git a/share/p2p/shrexeds/client.go b/share/p2p/shrexeds/client.go index 1d21beb665..d7169d2667 100644 --- a/share/p2p/shrexeds/client.go +++ b/share/p2p/shrexeds/client.go @@ -165,7 +165,11 @@ func readEds(ctx context.Context, stream network.Stream, eh *header.ExtendedHead // verify that the EDS hash matches the expected hash newDah, err := share.NewRoot(eds) if err != nil { - return nil, fmt.Errorf("create new root from eds: %w", err) + return nil, fmt.Errorf("create new root from eds: %w, size:%v , expectedSize:%v", + err, + eds.Width(), + len(eh.DAH.RowRoots), + ) } if !bytes.Equal(newDah.Hash(), eh.DAH.Hash()) { return nil, fmt.Errorf( diff --git a/share/p2p/shrexeds/server.go b/share/p2p/shrexeds/server.go index 3bc6fef50e..5447c8ee57 100644 --- a/share/p2p/shrexeds/server.go +++ b/share/p2p/shrexeds/server.go @@ -99,11 +99,11 @@ func (s *Server) handleStream(stream network.Stream) { defer utils.CloseAndLog(logger, "file", file) status = p2p_pb.Status_OK case errors.Is(err, store.ErrNotFound): - logger.Warnw("server: request hash not found") + logger.Warnw("server: request height not found") s.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) status = p2p_pb.Status_NOT_FOUND case err != nil: - logger.Errorw("server: get CAR", "err", err) + logger.Errorw("server: get file", "err", err) status = p2p_pb.Status_INTERNAL } diff --git a/share/store/file/square.go b/share/store/file/square.go index f0712cdf5f..3d8cb0801b 100644 --- a/share/store/file/square.go +++ b/share/store/file/square.go @@ -89,6 +89,7 @@ func (s square) axisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ( } func (s square) eds() (*rsmt2d.ExtendedDataSquare, error) { + //TODO(@walldiss): use mempool shrs := make([]share.Share, 0, 4*s.size()*s.size()) for _, row := range s { shrs = append(shrs, row...) From eb6e377e995a1cbffab187957ae808ff2a949005 Mon Sep 17 00:00:00 2001 From: Vlad Date: Sat, 10 Feb 2024 02:24:40 +0500 Subject: [PATCH 081/132] log amount of written bytes by server when stream interrupted --- share/p2p/shrexeds/server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/share/p2p/shrexeds/server.go b/share/p2p/shrexeds/server.go index 5447c8ee57..8478dcd426 100644 --- a/share/p2p/shrexeds/server.go +++ b/share/p2p/shrexeds/server.go @@ -179,9 +179,9 @@ func (s *Server) writeODS(logger *zap.SugaredLogger, file file.EdsFile, stream n } buf := make([]byte, s.params.BufferSize) - _, err = io.CopyBuffer(stream, reader, buf) + n, err := io.CopyBuffer(stream, reader, buf) if err != nil { - return fmt.Errorf("writing ODS bytes: %w", err) + return fmt.Errorf("written: %v, writing ODS bytes: %w", n, err) } return nil From 02b2e80f12097a026d28098b196a05f8cf2c7dd0 Mon Sep 17 00:00:00 2001 From: Vlad Date: Sat, 10 Feb 2024 03:35:05 +0500 Subject: [PATCH 082/132] log amount of bytes read from stream --- share/p2p/shrexeds/server.go | 1 + share/store/file/eds_file.go | 3 +++ share/store/file/ods_file.go | 14 +++++--------- share/store/file/square.go | 10 ++++++++-- share/store/store.go | 1 + 5 files changed, 18 insertions(+), 11 deletions(-) diff --git a/share/p2p/shrexeds/server.go b/share/p2p/shrexeds/server.go index 8478dcd426..b38f3a1728 100644 --- a/share/p2p/shrexeds/server.go +++ b/share/p2p/shrexeds/server.go @@ -184,5 +184,6 @@ func (s *Server) writeODS(logger *zap.SugaredLogger, file file.EdsFile, stream n return fmt.Errorf("written: %v, writing ODS bytes: %w", n, err) } + logger.Debugw("server: wrote ODS", "bytes", n) return nil } diff --git a/share/store/file/eds_file.go b/share/store/file/eds_file.go index fa2fcc798f..4556cede9a 100644 --- a/share/store/file/eds_file.go +++ b/share/store/file/eds_file.go @@ -2,6 +2,7 @@ package file import ( "context" + logging "github.com/ipfs/go-log/v2" "io" "github.com/celestiaorg/rsmt2d" @@ -9,6 +10,8 @@ import ( "github.com/celestiaorg/celestia-node/share" ) +var log = logging.Logger("store/file") + type EdsFile interface { io.Closer // Reader returns binary reader for the file. diff --git a/share/store/file/ods_file.go b/share/store/file/ods_file.go index d743f48f84..f545ea59ee 100644 --- a/share/store/file/ods_file.go +++ b/share/store/file/ods_file.go @@ -55,7 +55,7 @@ func CreateOdsFile( h := &Header{ version: FileV0, - shareSize: uint16(len(eds.GetCell(0, 0))), // TODO: rsmt2d should expose this field + shareSize: share.Size, // TODO: rsmt2d should expose this field squareSize: uint16(eds.Width()), height: height, datahash: datahash, @@ -80,13 +80,9 @@ func writeOdsFile(w io.Writer, h *Header, eds *rsmt2d.ExtendedDataSquare) error return err } - for i := uint(0); i < eds.Width()/2; i++ { - for j := uint(0); j < eds.Width()/2; j++ { - // TODO: Implemented buffered write through io.CopyBuffer - shr := eds.GetCell(i, j) - if _, err := w.Write(shr); err != nil { - return err - } + for _, shr := range eds.FlattenedODS() { + if _, err := w.Write(shr); err != nil { + return err } } return nil @@ -163,7 +159,7 @@ func (f *OdsFile) readOds() error { return fmt.Errorf("discarding header: %w", err) } - square, err := readShares(f.hdr.ShareSize(), f.Size(), f.fl) + square, err := readShares(share.Size, f.Size(), f.fl) if err != nil { return fmt.Errorf("reading ods: %w", err) } diff --git a/share/store/file/square.go b/share/store/file/square.go index 3d8cb0801b..2eb08f0a15 100644 --- a/share/store/file/square.go +++ b/share/store/file/square.go @@ -41,16 +41,22 @@ func readShares(shareSize, edsSize int, reader io.Reader) (square, error) { buf := memPools.get(odsLn).getHalfAxis() defer memPools.get(odsLn).putHalfAxis(buf) + var total int + log.Info("start reading ods", "ods size", odsLn, "share size", shareSize, "buf size", len(buf)) for i := 0; i < odsLn; i++ { - if _, err := reader.Read(buf); err != nil { - return nil, err + n, err := reader.Read(buf) + if err != nil { + return nil, fmt.Errorf("reading share: %w, bytes read: %v", err, total+n) } + total += n for j := 0; j < odsLn; j++ { copy(square[i][j], buf[j*shareSize:(j+1)*shareSize]) } } + // TODO: remove this log + log.Info("read bytes", "total", total) return square, nil } diff --git a/share/store/store.go b/share/store/store.go index 7513f1d9d0..0768a29417 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -30,6 +30,7 @@ var ( // TODO(@walldiss): // - handle blocks duplicates(same hash,different height) +// - periodically store empty heights // - persist store stats like amount of files, file types, avg file size etc in a file // - handle corrupted files // - maintain in-memory missing files index / bloom-filter to fast return for not stored files. From dd8a225db7ffebca7a3ed305ab71f76596d8126a Mon Sep 17 00:00:00 2001 From: Vlad Date: Sat, 10 Feb 2024 06:22:50 +0500 Subject: [PATCH 083/132] remove extra alloc in ods reader --- share/store/file/square.go | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/share/store/file/square.go b/share/store/file/square.go index 2eb08f0a15..02125b35f4 100644 --- a/share/store/file/square.go +++ b/share/store/file/square.go @@ -1,6 +1,7 @@ package file import ( + "bufio" "bytes" "context" "fmt" @@ -33,25 +34,23 @@ func ReadEds(_ context.Context, r io.Reader, edsSize int) (*rsmt2d.ExtendedDataS // readShares reads shares from the reader and returns a square. It assumes that the reader is // positioned at the beginning of the shares. It knows the size of the shares and the size of the // square, so reads from reader are limited to exactly the amount of data required. -func readShares(shareSize, edsSize int, reader io.Reader) (square, error) { +func readShares(shareSize, edsSize int, r io.Reader) (square, error) { odsLn := edsSize / 2 // get pre-allocated square and buffer from memPools square := memPools.get(odsLn).square() - buf := memPools.get(odsLn).getHalfAxis() - defer memPools.get(odsLn).putHalfAxis(buf) + // TODO(@walldiss): find proper size for buffer + br := bufio.NewReader(r) var total int - log.Info("start reading ods", "ods size", odsLn, "share size", shareSize, "buf size", len(buf)) + log.Info("start reading ods", "ods size", odsLn, "share size", shareSize) for i := 0; i < odsLn; i++ { - n, err := reader.Read(buf) - if err != nil { - return nil, fmt.Errorf("reading share: %w, bytes read: %v", err, total+n) - } - - total += n for j := 0; j < odsLn; j++ { - copy(square[i][j], buf[j*shareSize:(j+1)*shareSize]) + n, err := io.ReadFull(br, square[i][j]) + if err != nil { + return nil, fmt.Errorf("reading share: %w, bytes read: %v", err, total+n) + } + total += n } } From 2f7cb654af68e1f6444a1108a31a356b5a20d82b Mon Sep 17 00:00:00 2001 From: Vlad Date: Sun, 11 Feb 2024 01:19:17 +0500 Subject: [PATCH 084/132] remove height method from edsdile --- share/store/file/cache_file_test.go | 2 +- share/store/file/eds_file.go | 2 -- share/store/file/file_closer.go | 7 ------- share/store/file/file_header.go | 13 ++++--------- share/store/file/mem_file.go | 7 +------ share/store/file/ods_file.go | 6 ------ share/store/file/ods_file_test.go | 10 +++++----- 7 files changed, 11 insertions(+), 36 deletions(-) diff --git a/share/store/file/cache_file_test.go b/share/store/file/cache_file_test.go index 582a7d04c9..c406bbfcec 100644 --- a/share/store/file/cache_file_test.go +++ b/share/store/file/cache_file_test.go @@ -12,7 +12,7 @@ func TestCacheFile(t *testing.T) { size := 8 newFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { path := t.TempDir() + "/testfile" - fl, err := CreateOdsFile(path, 1, []byte{}, eds) + fl, err := CreateOdsFile(path, []byte{}, eds) require.NoError(t, err) return NewCacheFile(fl) } diff --git a/share/store/file/eds_file.go b/share/store/file/eds_file.go index 4556cede9a..7135563ac5 100644 --- a/share/store/file/eds_file.go +++ b/share/store/file/eds_file.go @@ -18,8 +18,6 @@ type EdsFile interface { Reader() (io.Reader, error) // Size returns square size of the file. Size() int - // Height returns height of the file. - Height() uint64 // DataHash returns data hash of the file. DataHash() share.DataHash // Share returns share and corresponding proof for the given axis and share index in this axis. diff --git a/share/store/file/file_closer.go b/share/store/file/file_closer.go index 96880264ca..15b853dec7 100644 --- a/share/store/file/file_closer.go +++ b/share/store/file/file_closer.go @@ -48,13 +48,6 @@ func (c *closeOnceFile) Size() int { return c.f.Size() } -func (c *closeOnceFile) Height() uint64 { - if c.closed.Load() { - return 0 - } - return c.f.Height() -} - func (c *closeOnceFile) DataHash() share.DataHash { if c.closed.Load() { return nil diff --git a/share/store/file/file_header.go b/share/store/file/file_header.go index 8eaf7f03be..287281a628 100644 --- a/share/store/file/file_header.go +++ b/share/store/file/file_header.go @@ -17,7 +17,8 @@ type Header struct { shareSize uint16 squareSize uint16 - height uint64 + // TODO(@walldiss) store all heights in the header? + //height uint64 datahash share.DataHash } @@ -39,10 +40,6 @@ func (h *Header) SquareSize() int { return int(h.squareSize) } -func (h *Header) Height() uint64 { - return h.height -} - func (h *Header) DataHash() share.DataHash { return h.datahash } @@ -52,8 +49,7 @@ func (h *Header) WriteTo(w io.Writer) (int64, error) { buf[0] = byte(h.version) binary.LittleEndian.PutUint16(buf[1:3], h.shareSize) binary.LittleEndian.PutUint16(buf[3:5], h.squareSize) - binary.LittleEndian.PutUint64(buf[5:13], h.height) - copy(buf[13:45], h.datahash) + copy(buf[5:37], h.datahash) _, err := io.Copy(w, bytes.NewBuffer(buf)) return HeaderSize, err } @@ -69,10 +65,9 @@ func ReadHeader(r io.Reader) (*Header, error) { version: fileVersion(buf[0]), shareSize: binary.LittleEndian.Uint16(buf[1:3]), squareSize: binary.LittleEndian.Uint16(buf[3:5]), - height: binary.LittleEndian.Uint64(buf[5:13]), datahash: make([]byte, 32), } - copy(h.datahash, buf[13:45]) + copy(h.datahash, buf[5:37]) return h, err } diff --git a/share/store/file/mem_file.go b/share/store/file/mem_file.go index 5e15a6ce2b..99ccbafa1b 100644 --- a/share/store/file/mem_file.go +++ b/share/store/file/mem_file.go @@ -16,8 +16,7 @@ import ( var _ EdsFile = (*MemFile)(nil) type MemFile struct { - height uint64 - Eds *rsmt2d.ExtendedDataSquare + Eds *rsmt2d.ExtendedDataSquare } func (f *MemFile) Close() error { @@ -40,10 +39,6 @@ func (f *MemFile) readOds() square { return s } -func (f *MemFile) Height() uint64 { - return f.height -} - func (f *MemFile) DataHash() share.DataHash { dah, _ := da.NewDataAvailabilityHeader(f.Eds) return dah.Hash() diff --git a/share/store/file/ods_file.go b/share/store/file/ods_file.go index f545ea59ee..c00f6fd07a 100644 --- a/share/store/file/ods_file.go +++ b/share/store/file/ods_file.go @@ -45,7 +45,6 @@ func OpenOdsFile(path string) (*OdsFile, error) { func CreateOdsFile( path string, - height uint64, datahash share.DataHash, eds *rsmt2d.ExtendedDataSquare) (*OdsFile, error) { f, err := os.Create(path) @@ -57,7 +56,6 @@ func CreateOdsFile( version: FileV0, shareSize: share.Size, // TODO: rsmt2d should expose this field squareSize: uint16(eds.Width()), - height: height, datahash: datahash, } @@ -99,10 +97,6 @@ func (f *OdsFile) Close() error { return f.fl.Close() } -func (f *OdsFile) Height() uint64 { - return f.hdr.Height() -} - func (f *OdsFile) DataHash() share.DataHash { return f.hdr.DataHash() } diff --git a/share/store/file/ods_file_test.go b/share/store/file/ods_file_test.go index ee7c30f7d3..7f190c5c3a 100644 --- a/share/store/file/ods_file_test.go +++ b/share/store/file/ods_file_test.go @@ -15,7 +15,7 @@ import ( func TestCreateOdsFile(t *testing.T) { path := t.TempDir() + "/testfile" edsIn := edstest.RandEDS(t, 8) - _, err := CreateOdsFile(path, 1, []byte{}, edsIn) + _, err := CreateOdsFile(path, []byte{}, edsIn) require.NoError(t, err) f, err := OpenOdsFile(path) @@ -29,7 +29,7 @@ func TestOdsFile(t *testing.T) { size := 8 createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { path := t.TempDir() + "/testfile" - fl, err := CreateOdsFile(path, 1, []byte{}, eds) + fl, err := CreateOdsFile(path, []byte{}, eds) require.NoError(t, err) return fl } @@ -58,7 +58,7 @@ func TestOdsFile(t *testing.T) { func TestReadOdsFile(t *testing.T) { eds := edstest.RandEDS(t, 8) path := t.TempDir() + "/testfile" - f, err := CreateOdsFile(path, 1, []byte{}, eds) + f, err := CreateOdsFile(path, []byte{}, eds) require.NoError(t, err) err = f.readOds() @@ -105,7 +105,7 @@ func BenchmarkAxisFromOdsFile(b *testing.B) { newFile := func(size int) EdsFile { eds := edstest.RandEDS(b, size) path := dir + "/testfile" - f, err := CreateOdsFile(path, 1, []byte{}, eds) + f, err := CreateOdsFile(path, []byte{}, eds) require.NoError(b, err) return f } @@ -131,7 +131,7 @@ func BenchmarkShareFromOdsFile(b *testing.B) { newFile := func(size int) EdsFile { eds := edstest.RandEDS(b, size) path := dir + "/testfile" - f, err := CreateOdsFile(path, 1, []byte{}, eds) + f, err := CreateOdsFile(path, []byte{}, eds) require.NoError(b, err) return f } From db0bb8829a0c105558f8207d45989b1d0b9bedfa Mon Sep 17 00:00:00 2001 From: Vlad Date: Sun, 11 Feb 2024 01:19:43 +0500 Subject: [PATCH 085/132] handle blocks duplicates in store --- share/availability/full/availability.go | 6 +- share/store/store.go | 107 +++++++++++++++++++----- share/store/store_test.go | 35 ++++++++ 3 files changed, 126 insertions(+), 22 deletions(-) diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go index 1dbe845700..b3da0a8c0c 100644 --- a/share/availability/full/availability.go +++ b/share/availability/full/availability.go @@ -59,8 +59,8 @@ func (fa *ShareAvailability) Stop(context.Context) error { // enough Shares from the network. func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error { // a hack to avoid loading the whole EDS in mem if we store it already. - if ok, _ := fa.store.HasByHeight(ctx, header.Height()); ok { - return nil + if ok, _ := fa.store.HasByHash(ctx, header.DAH.Hash()); ok { + return fa.store.LinkHeight(ctx, header.DAH.Hash(), header.Height()) } eds, err := fa.getEds(ctx, header) @@ -91,7 +91,7 @@ func (fa *ShareAvailability) getEds(ctx context.Context, header *header.Extended log.Errorw("availability validation failed", "root", dah.String(), "err", err.Error()) var byzantineErr *byzantine.ErrByzantine if errors.Is(err, share.ErrNotFound) || errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &byzantineErr) { - return nil, share.ErrNotAvailable + return nil, fmt.Errorf("%w:%w", share.ErrNotAvailable, err) } return nil, err } diff --git a/share/store/store.go b/share/store/store.go index 0768a29417..6ee4a95e08 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -7,7 +7,9 @@ import ( "fmt" "io" "os" + "strconv" "sync" + "syscall" "time" logging "github.com/ipfs/go-log/v2" @@ -29,7 +31,6 @@ var ( ) // TODO(@walldiss): -// - handle blocks duplicates(same hash,different height) // - periodically store empty heights // - persist store stats like amount of files, file types, avg file size etc in a file // - handle corrupted files @@ -85,12 +86,12 @@ func NewStore(params *Parameters, basePath string) (*Store, error) { return nil, fmt.Errorf("ensure empty heights file: %w", err) } - recentBlocksCache, err := cache.NewFileCache("recent", params.RecentBlocksCacheSize) + recentBlocksCache, err := cache.NewFileCache("recent", 1) if err != nil { return nil, fmt.Errorf("failed to create recent blocks cache: %w", err) } - blockstoreCache, err := cache.NewFileCache("blockstore", params.BlockstoreCacheSize) + blockstoreCache, err := cache.NewFileCache("blockstore", 1) if err != nil { return nil, fmt.Errorf("failed to create blockstore cache: %w", err) } @@ -135,26 +136,44 @@ func (s *Store) Put( return s.getByHeight(height) } - path := s.basepath + blocksPath + datahash.String() - file, err := file.CreateOdsFile(path, height, datahash, square) + filePath := s.basepath + blocksPath + datahash.String() + f, err := s.createFile(filePath, datahash, square) if err != nil { s.metrics.observePut(ctx, time.Since(tNow), square.Width(), true) - return nil, fmt.Errorf("creating ODS file: %w", err) + return nil, fmt.Errorf("creating file: %w", err) } // create hard link with height as name - err = os.Link(path, s.basepath+heightsPath+fmt.Sprintf("%d", height)) + err = s.createHeightLink(datahash, height) if err != nil { - s.metrics.observePut(ctx, time.Since(tNow), square.Width(), true) - return nil, fmt.Errorf("creating hard link: %w", err) + s.metrics.observePut(ctx, time.Since(tNow), square.Width(), false) + return nil, fmt.Errorf("linking height: %w", err) } - s.metrics.observePut(ctx, time.Since(tNow), square.Width(), false) - // put in recent cache - f, err := s.cache.First().GetOrLoad(ctx, height, edsLoader(file)) + // put file in recent cache + f, err = s.cache.First().GetOrLoad(ctx, height, fileLoader(f)) if err != nil { - return nil, fmt.Errorf("putting in cache: %w", err) + log.Warnf("failed to put file in recent cache: %s", err) + } + return f, nil +} + +func (s *Store) createFile(filePath string, datahash share.DataHash, square *rsmt2d.ExtendedDataSquare) (file.EdsFile, error) { + // check if file with the same hash already exists + f, err := s.getByHash(datahash) + if err == nil { + return f, nil + } + + if !errors.Is(err, ErrNotFound) { + return nil, fmt.Errorf("getting by hash: %w", err) + } + + // create ODS file + f, err = file.CreateOdsFile(filePath, datahash, square) + if err != nil { + return nil, fmt.Errorf("creating ODS file: %w", err) } return f, nil } @@ -189,6 +208,36 @@ func (s *Store) getByHash(datahash share.DataHash) (file.EdsFile, error) { return odsFile, nil } +func (s *Store) LinkHeight(_ context.Context, datahash share.DataHash, height uint64) error { + lock := s.stripLock.byDatahashAndHeight(datahash, height) + lock.lock() + defer lock.unlock() + + if datahash.IsEmptyRoot() { + s.addEmptyHeight(height) + return nil + } + + // short circuit if link exists + if has, _ := s.hasByHeight(height); has { + return nil + } + + return s.createHeightLink(datahash, height) +} + +func (s *Store) createHeightLink(datahash share.DataHash, height uint64) error { + filePath := s.basepath + blocksPath + datahash.String() + // create hard link with height as name + linkPath := s.basepath + heightsPath + strconv.Itoa(int(height)) + err := os.Link(filePath, linkPath) + if err != nil { + return fmt.Errorf("creating hard link: %w", err) + } + + return nil +} + func (s *Store) GetByHeight(ctx context.Context, height uint64) (file.EdsFile, error) { lock := s.stripLock.byHeight(height) lock.RLock() @@ -282,11 +331,13 @@ func (s *Store) Remove(ctx context.Context, height uint64) error { func (s *Store) remove(height uint64) error { // short circuit if file not exists f, err := s.getByHeight(height) - if errors.Is(err, ErrNotFound) { - return nil + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil + } + return fmt.Errorf("getting by height: %w", err) } - hashStr := f.DataHash().String() if err = f.Close(); err != nil { return fmt.Errorf("closing file on removal: %w", err) } @@ -295,19 +346,28 @@ func (s *Store) remove(height uint64) error { return fmt.Errorf("removing from cache: %w", err) } + // remove hard link by height heightPath := s.basepath + heightsPath + fmt.Sprintf("%d", height) if err = os.Remove(heightPath); err != nil { return fmt.Errorf("removing by height: %w", err) } + hashStr := f.DataHash().String() hashPath := s.basepath + blocksPath + hashStr - if err = os.Remove(hashPath); err != nil { - return fmt.Errorf("removing by hash: %w", err) + count, err := linksCount(hashPath) + if err != nil { + return fmt.Errorf("counting links: %w", err) + } + if count == 1 { + err = os.Remove(hashPath) + if err != nil { + return fmt.Errorf("removing by hash: %w", err) + } } return nil } -func edsLoader(f file.EdsFile) cache.OpenFileFn { +func fileLoader(f file.EdsFile) cache.OpenFileFn { return func(ctx context.Context) (file.EdsFile, error) { return f, nil } @@ -371,6 +431,15 @@ func pathExists(path string) (bool, error) { return true, nil } +func linksCount(path string) (int, error) { + info, err := os.Stat(path) + if err != nil { + return 0, fmt.Errorf("checking file: %w", err) + } + + return int(info.Sys().(*syscall.Stat_t).Nlink), nil +} + func (s *Store) storeEmptyHeights() error { file, err := os.OpenFile(s.basepath+emptyHeights, os.O_WRONLY, os.ModePerm) if err != nil { diff --git a/share/store/store_test.go b/share/store/store_test.go index 9c809d0eaf..7ea860a945 100644 --- a/share/store/store_test.go +++ b/share/store/store_test.go @@ -85,6 +85,41 @@ func TestEDSStore(t *testing.T) { require.NoError(t, f.Close()) }) + t.Run("Put eds with same hash for different height", func(t *testing.T) { + eds, dah := randomEDS(t) + h1 := height.Add(1) + + f, err := edsStore.Put(ctx, dah.Hash(), h1, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) + + h2 := height.Add(1) + f, err = edsStore.Put(ctx, dah.Hash(), h2, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) + + // both heights should be available + has, err := edsStore.HasByHeight(ctx, h1) + require.NoError(t, err) + require.True(t, has) + + has, err = edsStore.HasByHeight(ctx, h2) + require.NoError(t, err) + require.True(t, has) + + // removing one height should not affect the other + err = edsStore.Remove(ctx, h1) + require.NoError(t, err) + + has, err = edsStore.HasByHeight(ctx, h1) + require.NoError(t, err) + require.False(t, has) + + has, err = edsStore.HasByHeight(ctx, h2) + require.NoError(t, err) + require.True(t, has) + }) + t.Run("GetByHeight", func(t *testing.T) { eds, dah := randomEDS(t) height := height.Add(1) From aeafcadcbb3df9c3c88910a4f83fee63ded70b53 Mon Sep 17 00:00:00 2001 From: Vlad Date: Sun, 11 Feb 2024 01:43:36 +0500 Subject: [PATCH 086/132] fix core tests --- core/exchange_test.go | 10 ++++------ core/listener_test.go | 13 +++++-------- share/shwap/handler.go | 2 +- share/store/blockstore.go | 10 +++++----- 4 files changed, 15 insertions(+), 20 deletions(-) diff --git a/core/exchange_test.go b/core/exchange_test.go index c43084c57d..c0153bc79f 100644 --- a/core/exchange_test.go +++ b/core/exchange_test.go @@ -2,18 +2,16 @@ package core import ( "context" + "github.com/celestiaorg/celestia-node/share/store" "testing" "time" - ds "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/test/util/testnode" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share/eds" ) func TestCoreExchange_RequestHeaders(t *testing.T) { @@ -60,11 +58,11 @@ func createCoreFetcher(t *testing.T, cfg *testnode.Config) (*BlockFetcher, testn return NewBlockFetcher(cctx.Client), cctx } -func createStore(t *testing.T) *eds.Store { +func createStore(t *testing.T) *store.Store { t.Helper() - storeCfg := eds.DefaultParameters() - store, err := eds.NewStore(storeCfg, t.TempDir(), ds_sync.MutexWrap(ds.NewMapDatastore())) + storeCfg := store.DefaultParameters() + store, err := store.NewStore(storeCfg, t.TempDir()) require.NoError(t, err) return store } diff --git a/core/listener_test.go b/core/listener_test.go index bf84c07b41..5a5db3e295 100644 --- a/core/listener_test.go +++ b/core/listener_test.go @@ -3,6 +3,7 @@ package core import ( "bytes" "context" + "github.com/celestiaorg/celestia-node/share/store" "testing" "time" @@ -18,7 +19,6 @@ import ( "github.com/celestiaorg/celestia-node/header" nodep2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" ) @@ -84,16 +84,13 @@ func TestListenerWithNonEmptyBlocks(t *testing.T) { eds := createEdsPubSub(ctx, t) store := createStore(t) - err := store.Start(ctx) - require.NoError(t, err) t.Cleanup(func() { - err = store.Stop(ctx) - require.NoError(t, err) + require.NoError(t, store.Close()) }) // create Listener and start listening cl := createListener(ctx, t, fetcher, ps0, eds, store) - err = cl.Start(ctx) + err := cl.Start(ctx) require.NoError(t, err) // listen for eds hashes broadcasted through eds-sub and ensure store has @@ -114,7 +111,7 @@ func TestListenerWithNonEmptyBlocks(t *testing.T) { continue } - has, err := store.Has(ctx, msg.DataHash) + has, err := store.HasByHash(ctx, msg.DataHash) require.NoError(t, err) require.True(t, has) } @@ -165,7 +162,7 @@ func createListener( fetcher *BlockFetcher, ps *pubsub.PubSub, edsSub *shrexsub.PubSub, - store *eds.Store, + store *store.Store, ) *Listener { p2pSub, err := p2p.NewSubscriber[*header.ExtendedHeader](ps, header.MsgID, p2p.WithSubscriberNetworkID(networkID)) require.NoError(t, err) diff --git a/share/shwap/handler.go b/share/shwap/handler.go index 220555191f..859c087d23 100644 --- a/share/shwap/handler.go +++ b/share/shwap/handler.go @@ -10,7 +10,7 @@ import ( "github.com/celestiaorg/celestia-node/share/store/file" ) -// BlockBuilder is an interface for building blocks from files. +// BlockBuilder is an interface for building response blocks from request and file. type BlockBuilder interface { // TODO(@walldiss): don't like this name, but it collides with field name in RowID GetHeight() uint64 diff --git a/share/store/blockstore.go b/share/store/blockstore.go index 1136f11f48..4678636091 100644 --- a/share/store/blockstore.go +++ b/share/store/blockstore.go @@ -40,13 +40,13 @@ func NewBlockstore(store *Store, ds datastore.Batching) *Blockstore { } func (bs *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { - h, err := shwap.BlockBuilderFromCID(cid) + req, err := shwap.BlockBuilderFromCID(cid) if err != nil { return false, fmt.Errorf("while getting height from CID: %w", err) } // check cache first - height := h.GetHeight() + height := req.GetHeight() _, err = bs.store.cache.Get(height) if err == nil { return true, nil @@ -69,15 +69,15 @@ func (bs *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { } func (bs *Blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { - h, err := shwap.BlockBuilderFromCID(cid) + req, err := shwap.BlockBuilderFromCID(cid) if err != nil { return nil, fmt.Errorf("while getting height from CID: %w", err) } - height := h.GetHeight() + height := req.GetHeight() f, err := bs.store.cache.Second().GetOrLoad(ctx, height, bs.store.openFileByHeight(height)) if err == nil { - return h.BlockFromFile(ctx, f) + return req.BlockFromFile(ctx, f) } if errors.Is(err, ErrNotFound) { From ae6a0af64dacfdd56be86a76b9e10a926d22a0f1 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 15 Feb 2024 12:32:46 +0500 Subject: [PATCH 087/132] add test blockstore for shwap and fix shwap roundtrip tests --- share/shwap/blockstore.go | 97 +++++++ share/shwap/shwap_test.go | 537 ++++++++++++++++++++------------------ 2 files changed, 378 insertions(+), 256 deletions(-) create mode 100644 share/shwap/blockstore.go diff --git a/share/shwap/blockstore.go b/share/shwap/blockstore.go new file mode 100644 index 0000000000..56e3e11bd6 --- /dev/null +++ b/share/shwap/blockstore.go @@ -0,0 +1,97 @@ +package shwap + +import ( + "context" + "fmt" + "testing" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/store/file" +) + +type TestBlockstore struct { + t *testing.T + lastHeight uint64 + blocks map[uint64]*file.MemFile +} + +func NewTestBlockstore(t *testing.T) *TestBlockstore { + return &TestBlockstore{ + t: t, + lastHeight: 1, + blocks: make(map[uint64]*file.MemFile), + } +} + +func (t *TestBlockstore) AddEds(eds *rsmt2d.ExtendedDataSquare) (height uint64) { + for { + if _, ok := t.blocks[t.lastHeight]; !ok { + break + } + t.lastHeight++ + } + t.blocks[t.lastHeight] = &file.MemFile{Eds: eds} + return t.lastHeight +} + +func (t *TestBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { + //TODO implement me + panic("not implemented") +} + +func (t *TestBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + req, err := BlockBuilderFromCID(cid) + if err != nil { + return false, fmt.Errorf("while getting height from CID: %w", err) + } + + _, ok := t.blocks[req.GetHeight()] + return ok, nil +} + +func (t *TestBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + req, err := BlockBuilderFromCID(cid) + if err != nil { + return nil, fmt.Errorf("while getting height from CID: %w", err) + } + + f, ok := t.blocks[req.GetHeight()] + if !ok { + return nil, ipld.ErrNotFound{Cid: cid} + } + return req.BlockFromFile(ctx, f) +} + +func (t *TestBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + req, err := BlockBuilderFromCID(cid) + if err != nil { + return 0, fmt.Errorf("while getting height from CID: %w", err) + } + + f, ok := t.blocks[req.GetHeight()] + if !ok { + return 0, ipld.ErrNotFound{Cid: cid} + } + return f.Size(), nil +} + +func (t *TestBlockstore) Put(ctx context.Context, block blocks.Block) error { + panic("not implemented") +} + +func (t *TestBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { + panic("not implemented") +} + +func (t *TestBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + panic("not implemented") +} + +func (t *TestBlockstore) HashOnRead(enabled bool) { + panic("not implemented") +} diff --git a/share/shwap/shwap_test.go b/share/shwap/shwap_test.go index c567c7c27c..22a3b70d00 100644 --- a/share/shwap/shwap_test.go +++ b/share/shwap/shwap_test.go @@ -1,258 +1,283 @@ package shwap -// TODO(@walldiss): those tests works, but wants to imports with edsStore, when dependency is reversed -// - need to rework to test over local blockstore - -//// TestSampleRoundtripGetBlock tests full protocol round trip of: -//// EDS -> Sample -> IPLDBlock -> BlockService -> Bitswap and in reverse. -//func TestSampleRoundtripGetBlock(t *testing.T) { -// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) -// defer cancel() -// -// square := edstest.RandEDS(t, 8) -// root, err := share.NewRoot(square) -// require.NoError(t, err) -// -// b := edsBlockstore(ctx, t, square) -// client := remoteClient(ctx, t, b) -// -// width := int(square.Width()) -// for i := 0; i < width*width; i++ { -// smpl, err := NewSampleFromEDS(RowProofType, i, square, 1) // TODO: Col -// require.NoError(t, err) -// -// sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { -// return sample.Verify(root) -// }) -// -// cid := smpl.Cid() -// blkOut, err := client.GetBlock(ctx, cid) -// require.NoError(t, err) -// assert.EqualValues(t, cid, blkOut.Cid()) -// -// smpl, err = SampleFromBlock(blkOut) -// assert.NoError(t, err) -// -// err = smpl.Verify(root) -// assert.NoError(t, err) -// } -//} -// -//// TODO: Debug why is it flaky -//func TestSampleRoundtripGetBlocks(t *testing.T) { -// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) -// defer cancel() -// -// square := edstest.RandEDS(t, 8) -// root, err := share.NewRoot(square) -// require.NoError(t, err) -// b := edsBlockstore(ctx, t, square) -// client := remoteClient(ctx, t, b) -// -// set := cid.NewSet() -// width := int(square.Width()) -// for i := 0; i < width*width; i++ { -// smpl, err := NewSampleFromEDS(RowProofType, i, square, 1) // TODO: Col -// require.NoError(t, err) -// set.Add(smpl.Cid()) -// -// sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { -// return sample.Verify(root) -// }) -// } -// -// blks, err := client.GetBlocks(ctx, set.Keys()) -// require.NoError(t, err) -// -// err = set.ForEach(func(c cid.Cid) error { -// select { -// case blk := <-blks: -// assert.True(t, set.Has(blk.Cid())) -// -// smpl, err := SampleFromBlock(blk) -// assert.NoError(t, err) -// -// err = smpl.Verify(root) // bitswap already performed validation and this is only for testing -// assert.NoError(t, err) -// case <-ctx.Done(): -// return ctx.Err() -// } -// return nil -// }) -// assert.NoError(t, err) -//} -// -//func TestRowRoundtripGetBlock(t *testing.T) { -// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) -// defer cancel() -// -// square := edstest.RandEDS(t, 16) -// root, err := share.NewRoot(square) -// require.NoError(t, err) -// b := edsBlockstore(ctx, t, square) -// client := remoteClient(ctx, t, b) -// -// width := int(square.Width()) -// for i := 0; i < width; i++ { -// row, err := NewRowFromEDS(1, i, square) -// require.NoError(t, err) -// -// rowVerifiers.Add(row.RowID, func(row Row) error { -// return row.Verify(root) -// }) -// -// cid := row.Cid() -// blkOut, err := client.GetBlock(ctx, cid) -// require.NoError(t, err) -// assert.EqualValues(t, cid, blkOut.Cid()) -// -// row, err = RowFromBlock(blkOut) -// assert.NoError(t, err) -// -// err = row.Verify(root) -// assert.NoError(t, err) -// } -//} -// -//func TestRowRoundtripGetBlocks(t *testing.T) { -// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) -// defer cancel() -// -// square := edstest.RandEDS(t, 16) -// root, err := share.NewRoot(square) -// require.NoError(t, err) -// b := edsBlockstore(ctx, t, square) -// client := remoteClient(ctx, t, b) -// -// set := cid.NewSet() -// width := int(square.Width()) -// for i := 0; i < width; i++ { -// row, err := NewRowFromEDS(1, i, square) -// require.NoError(t, err) -// set.Add(row.Cid()) -// -// rowVerifiers.Add(row.RowID, func(row Row) error { -// return row.Verify(root) -// }) -// } -// -// blks, err := client.GetBlocks(ctx, set.Keys()) -// require.NoError(t, err) -// -// err = set.ForEach(func(c cid.Cid) error { -// select { -// case blk := <-blks: -// assert.True(t, set.Has(blk.Cid())) -// -// row, err := RowFromBlock(blk) -// assert.NoError(t, err) -// -// err = row.Verify(root) -// assert.NoError(t, err) -// case <-ctx.Done(): -// return ctx.Err() -// } -// return nil -// }) -// assert.NoError(t, err) -//} -// -//func TestDataRoundtripGetBlock(t *testing.T) { -// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) -// defer cancel() -// -// namespace := sharetest.RandV0Namespace() -// square, root := edstest.RandEDSWithNamespace(t, namespace, 64, 16) -// b := edsBlockstore(ctx, t, square) -// client := remoteClient(ctx, t, b) -// -// nds, err := NewDataFromEDS(square, 1, namespace) -// require.NoError(t, err) -// -// for _, nd := range nds { -// dataVerifiers.Add(nd.DataID, func(data Data) error { -// return data.Verify(root) -// }) -// -// cid := nd.Cid() -// blkOut, err := client.GetBlock(ctx, cid) -// require.NoError(t, err) -// assert.EqualValues(t, cid, blkOut.Cid()) -// -// ndOut, err := DataFromBlock(blkOut) -// assert.NoError(t, err) -// -// err = ndOut.Verify(root) -// assert.NoError(t, err) -// } -//} -// -//func TestDataRoundtripGetBlocks(t *testing.T) { -// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) -// defer cancel() -// -// namespace := sharetest.RandV0Namespace() -// sqr, root := edstest.RandEDSWithNamespace(t, namespace, 64, 16) -// b := edsBlockstore(ctx, t, sqr) -// client := remoteClient(ctx, t, b) -// -// nds, err := NewDataFromEDS(sqr, 1, namespace) -// require.NoError(t, err) -// -// set := cid.NewSet() -// for _, nd := range nds { -// set.Add(nd.Cid()) -// -// dataVerifiers.Add(nd.DataID, func(data Data) error { -// return data.Verify(root) -// }) -// } -// -// blks, err := client.GetBlocks(ctx, set.Keys()) -// require.NoError(t, err) -// -// err = set.ForEach(func(c cid.Cid) error { -// select { -// case blk := <-blks: -// assert.True(t, set.Has(blk.Cid())) -// -// smpl, err := DataFromBlock(blk) -// assert.NoError(t, err) -// -// err = smpl.Verify(root) -// assert.NoError(t, err) -// case <-ctx.Done(): -// return ctx.Err() -// } -// return nil -// }) -// assert.NoError(t, err) -//} -// -//func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstore) exchange.Fetcher { -// net, err := mocknet.FullMeshLinked(2) -// require.NoError(t, err) -// -// dstore := dssync.MutexWrap(ds.NewMapDatastore()) -// routing := offline.NewOfflineRouter(dstore, record.NamespacedValidator{}) -// _ = bitswap.New( -// ctx, -// network.NewFromIpfsHost(net.Hosts()[0], routing), -// bstore, -// ) -// -// dstoreClient := dssync.MutexWrap(ds.NewMapDatastore()) -// bstoreClient := blockstore.NewBlockstore(dstoreClient) -// routingClient := offline.NewOfflineRouter(dstoreClient, record.NamespacedValidator{}) -// -// bitswapClient := bitswap.New( -// ctx, -// network.NewFromIpfsHost(net.Hosts()[1], routingClient), -// bstoreClient, -// ) -// -// err = net.ConnectAllButSelf() -// require.NoError(t, err) -// -// return bitswapClient -//} +import ( + "context" + "testing" + "time" + + "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" + "github.com/ipfs/boxo/routing/offline" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + record "github.com/libp2p/go-libp2p-record" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +// TestSampleRoundtripGetBlock tests full protocol round trip of: +// EDS -> Sample -> IPLDBlock -> BlockService -> Bitswap and in reverse. +func TestSampleRoundtripGetBlock(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + b := NewTestBlockstore(t) + eds := edstest.RandEDS(t, 8) + height := b.AddEds(eds) + root, err := share.NewRoot(eds) + require.NoError(t, err) + + client := remoteClient(ctx, t, b) + + width := int(eds.Width()) + for i := 0; i < width*width; i++ { + smpl, err := NewSampleFromEDS(RowProofType, i, eds, height) // TODO: Col + require.NoError(t, err) + + sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { + return sample.Verify(root) + }) + + cid := smpl.Cid() + blkOut, err := client.GetBlock(ctx, cid) + require.NoError(t, err) + require.EqualValues(t, cid, blkOut.Cid()) + + smpl, err = SampleFromBlock(blkOut) + require.NoError(t, err) + + err = smpl.Verify(root) + require.NoError(t, err) + } +} + +// TODO: Debug why is it flaky +func TestSampleRoundtripGetBlocks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + b := NewTestBlockstore(t) + eds := edstest.RandEDS(t, 8) + height := b.AddEds(eds) + root, err := share.NewRoot(eds) + require.NoError(t, err) + client := remoteClient(ctx, t, b) + + set := cid.NewSet() + width := int(eds.Width()) + for i := 0; i < width*width; i++ { + smpl, err := NewSampleFromEDS(RowProofType, i, eds, height) // TODO: Col + require.NoError(t, err) + set.Add(smpl.Cid()) + + sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { + return sample.Verify(root) + }) + } + + blks, err := client.GetBlocks(ctx, set.Keys()) + require.NoError(t, err) + + err = set.ForEach(func(c cid.Cid) error { + select { + case blk := <-blks: + require.True(t, set.Has(blk.Cid())) + + smpl, err := SampleFromBlock(blk) + require.NoError(t, err) + + err = smpl.Verify(root) // bitswap already performed validation and this is only for testing + require.NoError(t, err) + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + require.NoError(t, err) +} + +func TestRowRoundtripGetBlock(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + b := NewTestBlockstore(t) + eds := edstest.RandEDS(t, 8) + height := b.AddEds(eds) + root, err := share.NewRoot(eds) + require.NoError(t, err) + client := remoteClient(ctx, t, b) + + width := int(eds.Width()) + for i := 0; i < width; i++ { + row, err := NewRowFromEDS(height, i, eds) + require.NoError(t, err) + + rowVerifiers.Add(row.RowID, func(row Row) error { + return row.Verify(root) + }) + + cid := row.Cid() + blkOut, err := client.GetBlock(ctx, cid) + require.NoError(t, err) + require.EqualValues(t, cid, blkOut.Cid()) + + row, err = RowFromBlock(blkOut) + require.NoError(t, err) + + err = row.Verify(root) + require.NoError(t, err) + } +} + +func TestRowRoundtripGetBlocks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + b := NewTestBlockstore(t) + eds := edstest.RandEDS(t, 8) + height := b.AddEds(eds) + root, err := share.NewRoot(eds) + require.NoError(t, err) + client := remoteClient(ctx, t, b) + + set := cid.NewSet() + width := int(eds.Width()) + for i := 0; i < width; i++ { + row, err := NewRowFromEDS(height, i, eds) + require.NoError(t, err) + set.Add(row.Cid()) + + rowVerifiers.Add(row.RowID, func(row Row) error { + return row.Verify(root) + }) + } + + blks, err := client.GetBlocks(ctx, set.Keys()) + require.NoError(t, err) + + err = set.ForEach(func(c cid.Cid) error { + select { + case blk := <-blks: + require.True(t, set.Has(blk.Cid())) + + row, err := RowFromBlock(blk) + require.NoError(t, err) + + err = row.Verify(root) + require.NoError(t, err) + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + require.NoError(t, err) +} + +func TestDataRoundtripGetBlock(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + b := NewTestBlockstore(t) + namespace := sharetest.RandV0Namespace() + eds, root := edstest.RandEDSWithNamespace(t, namespace, 64, 16) + height := b.AddEds(eds) + client := remoteClient(ctx, t, b) + + nds, err := NewDataFromEDS(eds, height, namespace) + require.NoError(t, err) + + for _, nd := range nds { + dataVerifiers.Add(nd.DataID, func(data Data) error { + return data.Verify(root) + }) + + cid := nd.Cid() + blkOut, err := client.GetBlock(ctx, cid) + require.NoError(t, err) + require.EqualValues(t, cid, blkOut.Cid()) + + ndOut, err := DataFromBlock(blkOut) + require.NoError(t, err) + + err = ndOut.Verify(root) + require.NoError(t, err) + } +} + +func TestDataRoundtripGetBlocks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + b := NewTestBlockstore(t) + namespace := sharetest.RandV0Namespace() + eds, root := edstest.RandEDSWithNamespace(t, namespace, 64, 16) + height := b.AddEds(eds) + client := remoteClient(ctx, t, b) + + nds, err := NewDataFromEDS(eds, height, namespace) + require.NoError(t, err) + + set := cid.NewSet() + for _, nd := range nds { + set.Add(nd.Cid()) + + dataVerifiers.Add(nd.DataID, func(data Data) error { + return data.Verify(root) + }) + } + + blks, err := client.GetBlocks(ctx, set.Keys()) + require.NoError(t, err) + + err = set.ForEach(func(c cid.Cid) error { + select { + case blk := <-blks: + require.True(t, set.Has(blk.Cid())) + + smpl, err := DataFromBlock(blk) + require.NoError(t, err) + + err = smpl.Verify(root) + require.NoError(t, err) + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + require.NoError(t, err) +} + +func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstore) exchange.Fetcher { + net, err := mocknet.FullMeshLinked(2) + require.NoError(t, err) + + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + routing := offline.NewOfflineRouter(dstore, record.NamespacedValidator{}) + _ = bitswap.New( + ctx, + network.NewFromIpfsHost(net.Hosts()[0], routing), + bstore, + ) + + dstoreClient := dssync.MutexWrap(ds.NewMapDatastore()) + bstoreClient := blockstore.NewBlockstore(dstoreClient) + routingClient := offline.NewOfflineRouter(dstoreClient, record.NamespacedValidator{}) + + bitswapClient := bitswap.New( + ctx, + network.NewFromIpfsHost(net.Hosts()[1], routingClient), + bstoreClient, + ) + + err = net.ConnectAllButSelf() + require.NoError(t, err) + + return bitswapClient +} From 9a0cc1de04b7d8d19ab3ea9a869cdde350adbe33 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 15 Feb 2024 12:34:37 +0500 Subject: [PATCH 088/132] close files inside blockstore instead of shwap handlers --- share/availability/full/availability.go | 8 +++++--- share/shwap/data_id.go | 6 ------ share/shwap/row_id.go | 6 ------ share/shwap/sample_id.go | 6 ------ share/store/blockstore.go | 26 +++++++++++++++++-------- 5 files changed, 23 insertions(+), 29 deletions(-) diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go index b3da0a8c0c..4dca52e038 100644 --- a/share/availability/full/availability.go +++ b/share/availability/full/availability.go @@ -4,12 +4,13 @@ import ( "context" "errors" "fmt" - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/rsmt2d" logging "github.com/ipfs/go-log/v2" + "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/byzantine" "github.com/celestiaorg/celestia-node/share/p2p/discovery" @@ -24,7 +25,8 @@ var log = logging.Logger("share/full") type ShareAvailability struct { store *store.Store getter share.Getter - disc *discovery.Discovery + // TODO(@walldiss): discovery should be managed by nodebuilder, not availability + disc *discovery.Discovery cancel context.CancelFunc } diff --git a/share/shwap/data_id.go b/share/shwap/data_id.go index 5a12b04734..5c31c4b920 100644 --- a/share/shwap/data_id.go +++ b/share/shwap/data_id.go @@ -133,11 +133,5 @@ func (s DataID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block if err != nil { return nil, fmt.Errorf("while coverting Data to IPLD block: %w", err) } - - err = f.Close() - if err != nil { - return nil, fmt.Errorf("while closing ODS file: %w", err) - } - return blk, nil } diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index a831e85b63..6df32f8b33 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -139,11 +139,5 @@ func (rid RowID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Bloc if err != nil { return nil, fmt.Errorf("while coverting to IPLD block: %w", err) } - - err = f.Close() - if err != nil { - return nil, fmt.Errorf("while closing EDS file: %w", err) - } - return blk, nil } diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index 28ff9efa76..dc1df87302 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -125,11 +125,5 @@ func (sid SampleID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.B if err != nil { return nil, fmt.Errorf("while coverting to IPLD block: %w", err) } - - err = f.Close() - if err != nil { - return nil, fmt.Errorf("while closing ODS file: %w", err) - } - return blk, nil } diff --git a/share/store/blockstore.go b/share/store/blockstore.go index 4678636091..b711919c0f 100644 --- a/share/store/blockstore.go +++ b/share/store/blockstore.go @@ -13,6 +13,7 @@ import ( "github.com/ipfs/go-datastore/namespace" ipld "github.com/ipfs/go-ipld-format" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share/shwap" ) @@ -42,7 +43,7 @@ func NewBlockstore(store *Store, ds datastore.Batching) *Blockstore { func (bs *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { req, err := shwap.BlockBuilderFromCID(cid) if err != nil { - return false, fmt.Errorf("while getting height from CID: %w", err) + return false, fmt.Errorf("get height from CID: %w", err) } // check cache first @@ -52,16 +53,17 @@ func (bs *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { return true, nil } - _, err = bs.store.HasByHeight(ctx, height) + has, err := bs.store.HasByHeight(ctx, height) if err == nil { - return true, nil + return has, nil } if !errors.Is(err, ErrNotFound) { - return false, fmt.Errorf("failed to get file: %w", err) + return false, fmt.Errorf("has file: %w", err) } // key wasn't found in top level blockstore, but could be in datastore while being reconstructed dsHas, dsErr := bs.ds.Has(ctx, dshelp.MultihashToDsKey(cid.Hash())) + // TODO:(@walldoss): Only specific error should be treated as missing block, otherwise return error if dsErr != nil { return false, nil } @@ -77,6 +79,7 @@ func (bs *Blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error height := req.GetHeight() f, err := bs.store.cache.Second().GetOrLoad(ctx, height, bs.store.openFileByHeight(height)) if err == nil { + defer utils.CloseAndLog(log, "file", f) return req.BlockFromFile(ctx, f) } @@ -90,7 +93,7 @@ func (bs *Blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error return nil, ipld.ErrNotFound{Cid: cid} } - log.Debugf("failed to get blockstore for cid %s: %s", cid, err) + log.Debugf("get blockstore for cid %s: %s", cid, err) return nil, err } @@ -99,12 +102,19 @@ func (bs *Blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { // allocating Sample's block.Block. // NOTE:Bitswap uses GetSize also to determine if we have content stored or not // so simply returning constant size is not an option - blk, err := bs.Get(ctx, cid) + req, err := shwap.BlockBuilderFromCID(cid) + if err != nil { + return 0, fmt.Errorf("get height from CID: %w", err) + } + + height := req.GetHeight() + f, err := bs.store.cache.Second().GetOrLoad(ctx, height, bs.store.openFileByHeight(height)) if err != nil { - return 0, err + return 0, fmt.Errorf("get file: %w", err) } + defer utils.CloseAndLog(log, "file", f) - return len(blk.RawData()), nil + return f.Size(), nil } func (bs *Blockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { From 72c4d5ecfdf3349b1164aca3852500f17a7619dc Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 15 Feb 2024 12:35:45 +0500 Subject: [PATCH 089/132] sort imports --- core/exchange.go | 5 +++-- core/exchange_test.go | 2 +- core/listener.go | 5 +++-- core/listener_test.go | 2 +- header/headertest/fraud/testing.go | 2 +- nodebuilder/store_test.go | 1 + share/p2p/shrexnd/exchange_test.go | 2 +- share/store/file/cache_file.go | 3 ++- share/store/file/eds_file.go | 3 ++- 9 files changed, 15 insertions(+), 10 deletions(-) diff --git a/core/exchange.go b/core/exchange.go index 22d4996b56..1f06189ff2 100644 --- a/core/exchange.go +++ b/core/exchange.go @@ -4,14 +4,15 @@ import ( "bytes" "context" "fmt" - "github.com/celestiaorg/celestia-node/libs/utils" "time" "golang.org/x/sync/errgroup" + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share/store" - libhead "github.com/celestiaorg/go-header" ) const concurrencyLimit = 4 diff --git a/core/exchange_test.go b/core/exchange_test.go index c0153bc79f..45ade53561 100644 --- a/core/exchange_test.go +++ b/core/exchange_test.go @@ -2,7 +2,6 @@ package core import ( "context" - "github.com/celestiaorg/celestia-node/share/store" "testing" "time" @@ -12,6 +11,7 @@ import ( "github.com/celestiaorg/celestia-app/test/util/testnode" "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/store" ) func TestCoreExchange_RequestHeaders(t *testing.T) { diff --git a/core/listener.go b/core/listener.go index 8b2dcb87da..61af7418c0 100644 --- a/core/listener.go +++ b/core/listener.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/celestiaorg/celestia-node/libs/utils" "time" pubsub "github.com/libp2p/go-libp2p-pubsub" @@ -12,10 +11,12 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" "github.com/celestiaorg/celestia-node/share/store" - libhead "github.com/celestiaorg/go-header" ) var ( diff --git a/core/listener_test.go b/core/listener_test.go index 5a5db3e295..50b0d13cd2 100644 --- a/core/listener_test.go +++ b/core/listener_test.go @@ -3,7 +3,6 @@ package core import ( "bytes" "context" - "github.com/celestiaorg/celestia-node/share/store" "testing" "time" @@ -20,6 +19,7 @@ import ( nodep2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/store" ) const networkID = "private" diff --git a/header/headertest/fraud/testing.go b/header/headertest/fraud/testing.go index 469350bde0..5d5319f641 100644 --- a/header/headertest/fraud/testing.go +++ b/header/headertest/fraud/testing.go @@ -2,7 +2,6 @@ package headerfraud import ( "context" - "github.com/celestiaorg/celestia-node/share/store" "testing" "time" @@ -19,6 +18,7 @@ import ( "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/store" ) // FraudMaker allows to produce an invalid header at the specified height in order to produce the diff --git a/nodebuilder/store_test.go b/nodebuilder/store_test.go index 67b49961de..06d7d087e8 100644 --- a/nodebuilder/store_test.go +++ b/nodebuilder/store_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" diff --git a/share/p2p/shrexnd/exchange_test.go b/share/p2p/shrexnd/exchange_test.go index db285e0ce7..91ed562f59 100644 --- a/share/p2p/shrexnd/exchange_test.go +++ b/share/p2p/shrexnd/exchange_test.go @@ -2,7 +2,6 @@ package shrexnd import ( "context" - "github.com/celestiaorg/celestia-node/share/store" "sync" "testing" "time" @@ -16,6 +15,7 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/p2p" "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/store" ) func TestExchange_RequestND_NotFound(t *testing.T) { diff --git a/share/store/file/cache_file.go b/share/store/file/cache_file.go index a97eaa1060..7c24d6c0ce 100644 --- a/share/store/file/cache_file.go +++ b/share/store/file/cache_file.go @@ -4,10 +4,11 @@ import ( "context" "errors" "fmt" + "sync" + "github.com/ipfs/boxo/blockservice" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "sync" "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" diff --git a/share/store/file/eds_file.go b/share/store/file/eds_file.go index 7135563ac5..4ccf879d2f 100644 --- a/share/store/file/eds_file.go +++ b/share/store/file/eds_file.go @@ -2,9 +2,10 @@ package file import ( "context" - logging "github.com/ipfs/go-log/v2" "io" + logging "github.com/ipfs/go-log/v2" + "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" From 3113f73135d6b39916c5f15d2bfd0cddb66788fd Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 15 Feb 2024 12:40:04 +0500 Subject: [PATCH 090/132] remove old store benchmarks --- nodebuilder/store_test.go | 106 +------------------------------------- 1 file changed, 2 insertions(+), 104 deletions(-) diff --git a/nodebuilder/store_test.go b/nodebuilder/store_test.go index 06d7d087e8..083aa33471 100644 --- a/nodebuilder/store_test.go +++ b/nodebuilder/store_test.go @@ -1,19 +1,12 @@ package nodebuilder import ( - "context" - "strconv" - "testing" - "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-app/pkg/da" + "strconv" + "testing" "github.com/celestiaorg/celestia-node/nodebuilder/node" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/eds/edstest" ) func TestRepo(t *testing.T) { @@ -56,98 +49,3 @@ func TestRepo(t *testing.T) { }) } } - -func BenchmarkStore(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) - b.Cleanup(cancel) - - // BenchmarkStore/bench_read_128-10 14 78970661 ns/op (~70ms) - b.Run("bench put 128", func(b *testing.B) { - dir := b.TempDir() - err := Init(*DefaultConfig(node.Full), dir, node.Full) - require.NoError(b, err) - - store := newStore(ctx, b, eds.DefaultParameters(), dir) - size := 128 - - b.ResetTimer() - b.StopTimer() - for i := 0; i < b.N; i++ { - eds := edstest.RandEDS(b, size) - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(b, err) - - b.StartTimer() - err = store.edsStore.Put(ctx, dah.Hash(), eds) - b.StopTimer() - require.NoError(b, err) - } - }) -} - -func TestStoreRestart(t *testing.T) { - const ( - blocks = 5 - size = 32 - ) - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - dir := t.TempDir() - err := Init(*DefaultConfig(node.Full), dir, node.Full) - require.NoError(t, err) - - store := newStore(ctx, t, eds.DefaultParameters(), dir) - - hashes := make([][]byte, blocks) - for i := range hashes { - edss := edstest.RandEDS(t, size) - require.NoError(t, err) - dah, err := da.NewDataAvailabilityHeader(edss) - require.NoError(t, err) - err = store.edsStore.Put(ctx, dah.Hash(), edss) - require.NoError(t, err) - - // store hashes for read loop later - hashes[i] = dah.Hash() - } - - // restart store - store.stop(ctx, t) - store = newStore(ctx, t, eds.DefaultParameters(), dir) - - for _, h := range hashes { - edsReader, err := store.edsStore.GetCAR(ctx, h) - require.NoError(t, err) - odsReader, err := eds.ODSReader(edsReader) - require.NoError(t, err) - _, err = eds.ReadEDS(ctx, odsReader, h) - require.NoError(t, err) - require.NoError(t, edsReader.Close()) - } -} - -type store struct { - s Store - edsStore *eds.Store -} - -func newStore(ctx context.Context, t require.TestingT, params *eds.Parameters, dir string) store { - s, err := OpenStore(dir, nil) - require.NoError(t, err) - ds, err := s.Datastore() - require.NoError(t, err) - edsStore, err := eds.NewStore(params, dir, ds) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - return store{ - s: s, - edsStore: edsStore, - } -} - -func (s *store) stop(ctx context.Context, t *testing.T) { - require.NoError(t, s.edsStore.Stop(ctx)) - require.NoError(t, s.s.Close()) -} From 1a2256cbfcd91e475788aa471653b50f5751aef4 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 15 Feb 2024 12:43:38 +0500 Subject: [PATCH 091/132] fix nodebuilder tests --- nodebuilder/store_test.go | 5 +++-- nodebuilder/tests/fraud_test.go | 13 +++---------- nodebuilder/tests/nd_test.go | 6 +++--- nodebuilder/tests/swamp/swamp.go | 9 +++------ 4 files changed, 12 insertions(+), 21 deletions(-) diff --git a/nodebuilder/store_test.go b/nodebuilder/store_test.go index 083aa33471..512d45bb70 100644 --- a/nodebuilder/store_test.go +++ b/nodebuilder/store_test.go @@ -1,11 +1,12 @@ package nodebuilder import ( - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "strconv" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/celestiaorg/celestia-node/nodebuilder/node" ) diff --git a/nodebuilder/tests/fraud_test.go b/nodebuilder/tests/fraud_test.go index cb07dbb73a..342ae842e5 100644 --- a/nodebuilder/tests/fraud_test.go +++ b/nodebuilder/tests/fraud_test.go @@ -5,8 +5,6 @@ import ( "testing" "time" - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" @@ -21,8 +19,8 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/store" ) /* @@ -58,14 +56,9 @@ func TestFraudProofHandling(t *testing.T) { set, val := sw.Validators(t) fMaker := headerfraud.NewFraudMaker(t, 10, []types.PrivValidator{val}, set) - storeCfg := eds.DefaultParameters() - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - edsStore, err := eds.NewStore(storeCfg, t.TempDir(), ds) + storeCfg := store.DefaultParameters() + edsStore, err := store.NewStore(storeCfg, t.TempDir()) require.NoError(t, err) - require.NoError(t, edsStore.Start(ctx)) - t.Cleanup(func() { - _ = edsStore.Stop(ctx) - }) cfg := nodebuilder.DefaultConfig(node.Bridge) // 1. diff --git a/nodebuilder/tests/nd_test.go b/nodebuilder/tests/nd_test.go index f3338ec294..809de92ef9 100644 --- a/nodebuilder/tests/nd_test.go +++ b/nodebuilder/tests/nd_test.go @@ -15,9 +15,9 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/getters" "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" + "github.com/celestiaorg/celestia-node/share/store" ) func TestShrexNDFromLights(t *testing.T) { @@ -175,7 +175,7 @@ func replaceNDServer(cfg *nodebuilder.Config, handler network.StreamHandler) fx. return fx.Decorate(fx.Annotate( func( host host.Host, - store *eds.Store, + store *store.Store, network p2p.Network, ) (*shrexnd.Server, error) { cfg.Share.ShrExNDParams.WithNetworkID(network.String()) @@ -196,7 +196,7 @@ func replaceShareGetter() fx.Option { return fx.Decorate(fx.Annotate( func( host host.Host, - store *eds.Store, + store *store.Store, storeGetter *getters.StoreGetter, shrexGetter *getters.ShrexGetter, network p2p.Network, diff --git a/nodebuilder/tests/swamp/swamp.go b/nodebuilder/tests/swamp/swamp.go index e3ac3ad4f2..db07666c59 100644 --- a/nodebuilder/tests/swamp/swamp.go +++ b/nodebuilder/tests/swamp/swamp.go @@ -9,8 +9,6 @@ import ( "testing" "time" - ds "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" @@ -34,7 +32,7 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/nodebuilder/state" - "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/store" ) var blackholeIP6 = net.ParseIP("100::") @@ -172,8 +170,7 @@ func (s *Swamp) setupGenesis() { // ensure core has surpassed genesis block s.WaitTillHeight(ctx, 2) - ds := ds_sync.MutexWrap(ds.NewMapDatastore()) - store, err := eds.NewStore(eds.DefaultParameters(), s.t.TempDir(), ds) + store, err := store.NewStore(store.DefaultParameters(), s.t.TempDir()) require.NoError(s.t, err) ex, err := core.NewExchange( @@ -287,7 +284,7 @@ func (s *Swamp) newNode(t node.Type, store nodebuilder.Store, options ...fx.Opti cfg, _ := store.Config() cfg.RPC.Port = "0" - // tempDir is used for the eds.Store + // tempDir is used for the store.Store tempDir := s.t.TempDir() options = append(options, p2p.WithHost(s.createPeer(ks)), From c7dd8b322f93365883cb3ccab90b33816b99279e Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 15 Feb 2024 12:50:13 +0500 Subject: [PATCH 092/132] some todos to rework availability tests --- share/availability/full/availability_test.go | 161 +++--- share/availability/light/availability_test.go | 483 +++++++++--------- share/availability/light/testing.go | 2 +- 3 files changed, 324 insertions(+), 322 deletions(-) diff --git a/share/availability/full/availability_test.go b/share/availability/full/availability_test.go index 8ac0648a87..e75c1d73ab 100644 --- a/share/availability/full/availability_test.go +++ b/share/availability/full/availability_test.go @@ -1,82 +1,83 @@ package full -import ( - "context" - "testing" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-app/pkg/da" - - "github.com/celestiaorg/celestia-node/header/headertest" - "github.com/celestiaorg/celestia-node/share" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/mocks" -) - -func TestShareAvailableOverMocknet_Full(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - net := availability_test.NewTestDAGNet(ctx, t) - _, root := RandNode(net, 32) - - eh := headertest.RandExtendedHeaderWithRoot(t, root) - nd := Node(net) - net.ConnectAll() - - err := nd.SharesAvailable(ctx, eh) - assert.NoError(t, err) -} - -func TestSharesAvailable_Full(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // RandServiceWithSquare creates a NewShareAvailability inside, so we can test it - getter, dah := GetterWithRandSquare(t, 16) - - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - avail := TestAvailability(t, getter) - err := avail.SharesAvailable(ctx, eh) - assert.NoError(t, err) -} - -func TestSharesAvailable_StoresToEDSStore(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // RandServiceWithSquare creates a NewShareAvailability inside, so we can test it - getter, dah := GetterWithRandSquare(t, 16) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - avail := TestAvailability(t, getter) - err := avail.SharesAvailable(ctx, eh) - assert.NoError(t, err) - - has, err := avail.store.Has(ctx, dah.Hash()) - assert.NoError(t, err) - assert.True(t, has) -} - -func TestSharesAvailable_Full_ErrNotAvailable(t *testing.T) { - ctrl := gomock.NewController(t) - getter := mocks.NewMockGetter(ctrl) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - eds := edstest.RandEDS(t, 4) - dah, err := da.NewDataAvailabilityHeader(eds) - eh := headertest.RandExtendedHeaderWithRoot(t, &dah) - require.NoError(t, err) - avail := TestAvailability(t, getter) - - errors := []error{share.ErrNotFound, context.DeadlineExceeded} - for _, getterErr := range errors { - getter.EXPECT().GetEDS(gomock.Any(), gomock.Any()).Return(nil, getterErr) - err := avail.SharesAvailable(ctx, eh) - require.ErrorIs(t, err, share.ErrNotAvailable) - } -} +// TODO(@walldiss): rework all availability tests +//import ( +// "context" +// "testing" +// +// "github.com/golang/mock/gomock" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/require" +// +// "github.com/celestiaorg/celestia-app/pkg/da" +// +// "github.com/celestiaorg/celestia-node/header/headertest" +// "github.com/celestiaorg/celestia-node/share" +// availability_test "github.com/celestiaorg/celestia-node/share/availability/test" +// "github.com/celestiaorg/celestia-node/share/eds/edstest" +// "github.com/celestiaorg/celestia-node/share/mocks" +//) +// +//func TestShareAvailableOverMocknet_Full(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// net := availability_test.NewTestDAGNet(ctx, t) +// _, root := RandNode(net, 32) +// +// eh := headertest.RandExtendedHeaderWithRoot(t, root) +// nd := Node(net) +// net.ConnectAll() +// +// err := nd.SharesAvailable(ctx, eh) +// assert.NoError(t, err) +//} +// +//func TestSharesAvailable_Full(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// // RandServiceWithSquare creates a NewShareAvailability inside, so we can test it +// getter, dah := GetterWithRandSquare(t, 16) +// +// eh := headertest.RandExtendedHeaderWithRoot(t, dah) +// avail := TestAvailability(t, getter) +// err := avail.SharesAvailable(ctx, eh) +// assert.NoError(t, err) +//} +// +//func TestSharesAvailable_StoresToEDSStore(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// // RandServiceWithSquare creates a NewShareAvailability inside, so we can test it +// getter, dah := GetterWithRandSquare(t, 16) +// eh := headertest.RandExtendedHeaderWithRoot(t, dah) +// avail := TestAvailability(t, getter) +// err := avail.SharesAvailable(ctx, eh) +// assert.NoError(t, err) +// +// has, err := avail.store.Has(ctx, dah.Hash()) +// assert.NoError(t, err) +// assert.True(t, has) +//} +// +//func TestSharesAvailable_Full_ErrNotAvailable(t *testing.T) { +// ctrl := gomock.NewController(t) +// getter := mocks.NewMockGetter(ctrl) +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// eds := edstest.RandEDS(t, 4) +// dah, err := da.NewDataAvailabilityHeader(eds) +// eh := headertest.RandExtendedHeaderWithRoot(t, &dah) +// require.NoError(t, err) +// avail := TestAvailability(t, getter) +// +// errors := []error{share.ErrNotFound, context.DeadlineExceeded} +// for _, getterErr := range errors { +// getter.EXPECT().GetEDS(gomock.Any(), gomock.Any()).Return(nil, getterErr) +// err := avail.SharesAvailable(ctx, eh) +// require.ErrorIs(t, err, share.ErrNotAvailable) +// } +//} diff --git a/share/availability/light/availability_test.go b/share/availability/light/availability_test.go index 2ace654d50..f655671156 100644 --- a/share/availability/light/availability_test.go +++ b/share/availability/light/availability_test.go @@ -1,243 +1,244 @@ package light -import ( - "context" - _ "embed" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/header/headertest" - "github.com/celestiaorg/celestia-node/share" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/sharetest" -) - -func TestSharesAvailableCaches(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - getter, eh := GetterWithRandSquare(t, 16) - dah := eh.DAH - avail := TestAvailability(getter) - - // cache doesn't have dah yet - has, err := avail.ds.Has(ctx, rootKey(dah)) - assert.NoError(t, err) - assert.False(t, has) - - err = avail.SharesAvailable(ctx, eh) - assert.NoError(t, err) - - // is now cached - has, err = avail.ds.Has(ctx, rootKey(dah)) - assert.NoError(t, err) - assert.True(t, has) -} - -func TestSharesAvailableHitsCache(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - getter, _ := GetterWithRandSquare(t, 16) - avail := TestAvailability(getter) - - bServ := ipld.NewMemBlockservice() - dah := availability_test.RandFillBS(t, 16, bServ) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - - // blockstore doesn't actually have the dah - err := avail.SharesAvailable(ctx, eh) - require.Error(t, err) - - // cache doesn't have dah yet, since it errored - has, err := avail.ds.Has(ctx, rootKey(dah)) - assert.NoError(t, err) - assert.False(t, has) - - err = avail.ds.Put(ctx, rootKey(dah), []byte{}) - require.NoError(t, err) - - // should hit cache after putting - err = avail.SharesAvailable(ctx, eh) - require.NoError(t, err) -} - -func TestSharesAvailableEmptyRoot(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - getter, _ := GetterWithRandSquare(t, 16) - avail := TestAvailability(getter) - - eh := headertest.RandExtendedHeaderWithRoot(t, share.EmptyRoot()) - err := avail.SharesAvailable(ctx, eh) - assert.NoError(t, err) -} - -func TestSharesAvailable(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - getter, dah := GetterWithRandSquare(t, 16) - avail := TestAvailability(getter) - err := avail.SharesAvailable(ctx, dah) - assert.NoError(t, err) -} - -func TestSharesAvailableFailed(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - bServ := ipld.NewMemBlockservice() - dah := availability_test.RandFillBS(t, 16, bServ) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - - getter, _ := GetterWithRandSquare(t, 16) - avail := TestAvailability(getter) - err := avail.SharesAvailable(ctx, eh) - assert.Error(t, err) -} - -func TestShareAvailableOverMocknet_Light(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - net := availability_test.NewTestDAGNet(ctx, t) - _, root := RandNode(net, 16) - eh := headertest.RandExtendedHeader(t) - eh.DAH = root - nd := Node(net) - net.ConnectAll() - - err := nd.SharesAvailable(ctx, eh) - assert.NoError(t, err) -} - -func TestGetShare(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n := 16 - getter, eh := GetterWithRandSquare(t, n) - - for i := range make([]bool, n) { - for j := range make([]bool, n) { - sh, err := getter.GetShare(ctx, eh, i, j) - assert.NotNil(t, sh) - assert.NoError(t, err) - } - } -} - -func TestService_GetSharesByNamespace(t *testing.T) { - var tests = []struct { - squareSize int - expectedShareCount int - }{ - {squareSize: 4, expectedShareCount: 2}, - {squareSize: 16, expectedShareCount: 2}, - {squareSize: 128, expectedShareCount: 2}, - } - - for _, tt := range tests { - t.Run("size: "+strconv.Itoa(tt.squareSize), func(t *testing.T) { - getter, bServ := EmptyGetter() - totalShares := tt.squareSize * tt.squareSize - randShares := sharetest.RandShares(t, totalShares) - idx1 := (totalShares - 1) / 2 - idx2 := totalShares / 2 - if tt.expectedShareCount > 1 { - // make it so that two rows have the same namespace - copy(share.GetNamespace(randShares[idx2]), share.GetNamespace(randShares[idx1])) - } - root := availability_test.FillBS(t, bServ, randShares) - eh := headertest.RandExtendedHeader(t) - eh.DAH = root - randNamespace := share.GetNamespace(randShares[idx1]) - - shares, err := getter.GetSharesByNamespace(context.Background(), eh, randNamespace) - require.NoError(t, err) - require.NoError(t, shares.Verify(root, randNamespace)) - flattened := shares.Flatten() - assert.Len(t, flattened, tt.expectedShareCount) - for _, value := range flattened { - assert.Equal(t, randNamespace, share.GetNamespace(value)) - } - if tt.expectedShareCount > 1 { - // idx1 is always smaller than idx2 - assert.Equal(t, randShares[idx1], flattened[0]) - assert.Equal(t, randShares[idx2], flattened[1]) - } - }) - t.Run("last two rows of a 4x4 square that have the same namespace have valid NMT proofs", func(t *testing.T) { - squareSize := 4 - totalShares := squareSize * squareSize - getter, bServ := EmptyGetter() - randShares := sharetest.RandShares(t, totalShares) - lastNID := share.GetNamespace(randShares[totalShares-1]) - for i := totalShares / 2; i < totalShares; i++ { - copy(share.GetNamespace(randShares[i]), lastNID) - } - root := availability_test.FillBS(t, bServ, randShares) - eh := headertest.RandExtendedHeader(t) - eh.DAH = root - - shares, err := getter.GetSharesByNamespace(context.Background(), eh, lastNID) - require.NoError(t, err) - require.NoError(t, shares.Verify(root, lastNID)) - }) - } -} - -func TestGetShares(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n := 16 - getter, eh := GetterWithRandSquare(t, n) - - eds, err := getter.GetEDS(ctx, eh) - require.NoError(t, err) - gotDAH, err := share.NewRoot(eds) - require.NoError(t, err) - - require.True(t, eh.DAH.Equals(gotDAH)) -} - -func TestService_GetSharesByNamespaceNotFound(t *testing.T) { - getter, eh := GetterWithRandSquare(t, 1) - eh.DAH.RowRoots = nil - - emptyShares, err := getter.GetSharesByNamespace(context.Background(), eh, sharetest.RandV0Namespace()) - require.NoError(t, err) - require.Empty(t, emptyShares.Flatten()) -} - -func BenchmarkService_GetSharesByNamespace(b *testing.B) { - var tests = []struct { - amountShares int - }{ - {amountShares: 4}, - {amountShares: 16}, - {amountShares: 128}, - } - - for _, tt := range tests { - b.Run(strconv.Itoa(tt.amountShares), func(b *testing.B) { - t := &testing.T{} - getter, eh := GetterWithRandSquare(t, tt.amountShares) - root := eh.DAH - randNamespace := root.RowRoots[(len(root.RowRoots)-1)/2][:share.NamespaceSize] - root.RowRoots[(len(root.RowRoots) / 2)] = root.RowRoots[(len(root.RowRoots)-1)/2] - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := getter.GetSharesByNamespace(context.Background(), eh, randNamespace) - require.NoError(t, err) - } - }) - } -} +//// TODO(@walldiss): rework all availability tests +//import ( +// "context" +// _ "embed" +// "strconv" +// "testing" +// +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/require" +// +// "github.com/celestiaorg/celestia-node/header/headertest" +// "github.com/celestiaorg/celestia-node/share" +// availability_test "github.com/celestiaorg/celestia-node/share/availability/test" +// "github.com/celestiaorg/celestia-node/share/ipld" +// "github.com/celestiaorg/celestia-node/share/sharetest" +//) +// +//func TestSharesAvailableCaches(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// getter, eh := GetterWithRandSquare(t, 16) +// dah := eh.DAH +// avail := TestAvailability(getter) +// +// // cache doesn't have dah yet +// has, err := avail.ds.Has(ctx, rootKey(dah)) +// assert.NoError(t, err) +// assert.False(t, has) +// +// err = avail.SharesAvailable(ctx, eh) +// assert.NoError(t, err) +// +// // is now cached +// has, err = avail.ds.Has(ctx, rootKey(dah)) +// assert.NoError(t, err) +// assert.True(t, has) +//} +// +//func TestSharesAvailableHitsCache(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// getter, _ := GetterWithRandSquare(t, 16) +// avail := TestAvailability(getter) +// +// bServ := ipld.NewMemBlockservice() +// dah := availability_test.RandFillBS(t, 16, bServ) +// eh := headertest.RandExtendedHeaderWithRoot(t, dah) +// +// // blockstore doesn't actually have the dah +// err := avail.SharesAvailable(ctx, eh) +// require.Error(t, err) +// +// // cache doesn't have dah yet, since it errored +// has, err := avail.ds.Has(ctx, rootKey(dah)) +// assert.NoError(t, err) +// assert.False(t, has) +// +// err = avail.ds.Put(ctx, rootKey(dah), []byte{}) +// require.NoError(t, err) +// +// // should hit cache after putting +// err = avail.SharesAvailable(ctx, eh) +// require.NoError(t, err) +//} +// +//func TestSharesAvailableEmptyRoot(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// getter, _ := GetterWithRandSquare(t, 16) +// avail := TestAvailability(getter) +// +// eh := headertest.RandExtendedHeaderWithRoot(t, share.EmptyRoot()) +// err := avail.SharesAvailable(ctx, eh) +// assert.NoError(t, err) +//} +// +//func TestSharesAvailable(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// getter, dah := GetterWithRandSquare(t, 16) +// avail := TestAvailability(getter) +// err := avail.SharesAvailable(ctx, dah) +// assert.NoError(t, err) +//} +// +//func TestSharesAvailableFailed(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// bServ := ipld.NewMemBlockservice() +// dah := availability_test.RandFillBS(t, 16, bServ) +// eh := headertest.RandExtendedHeaderWithRoot(t, dah) +// +// getter, _ := GetterWithRandSquare(t, 16) +// avail := TestAvailability(getter) +// err := avail.SharesAvailable(ctx, eh) +// assert.Error(t, err) +//} +// +//func TestShareAvailableOverMocknet_Light(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// net := availability_test.NewTestDAGNet(ctx, t) +// _, root := RandNode(net, 16) +// eh := headertest.RandExtendedHeader(t) +// eh.DAH = root +// nd := Node(net) +// net.ConnectAll() +// +// err := nd.SharesAvailable(ctx, eh) +// assert.NoError(t, err) +//} +// +//func TestGetShare(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// n := 16 +// getter, eh := GetterWithRandSquare(t, n) +// +// for i := range make([]bool, n) { +// for j := range make([]bool, n) { +// sh, err := getter.GetShare(ctx, eh, i, j) +// assert.NotNil(t, sh) +// assert.NoError(t, err) +// } +// } +//} +// +//func TestService_GetSharesByNamespace(t *testing.T) { +// var tests = []struct { +// squareSize int +// expectedShareCount int +// }{ +// {squareSize: 4, expectedShareCount: 2}, +// {squareSize: 16, expectedShareCount: 2}, +// {squareSize: 128, expectedShareCount: 2}, +// } +// +// for _, tt := range tests { +// t.Run("size: "+strconv.Itoa(tt.squareSize), func(t *testing.T) { +// getter, bServ := EmptyGetter() +// totalShares := tt.squareSize * tt.squareSize +// randShares := sharetest.RandShares(t, totalShares) +// idx1 := (totalShares - 1) / 2 +// idx2 := totalShares / 2 +// if tt.expectedShareCount > 1 { +// // make it so that two rows have the same namespace +// copy(share.GetNamespace(randShares[idx2]), share.GetNamespace(randShares[idx1])) +// } +// root := availability_test.FillBS(t, bServ, randShares) +// eh := headertest.RandExtendedHeader(t) +// eh.DAH = root +// randNamespace := share.GetNamespace(randShares[idx1]) +// +// shares, err := getter.GetSharesByNamespace(context.Background(), eh, randNamespace) +// require.NoError(t, err) +// require.NoError(t, shares.Verify(root, randNamespace)) +// flattened := shares.Flatten() +// assert.Len(t, flattened, tt.expectedShareCount) +// for _, value := range flattened { +// assert.Equal(t, randNamespace, share.GetNamespace(value)) +// } +// if tt.expectedShareCount > 1 { +// // idx1 is always smaller than idx2 +// assert.Equal(t, randShares[idx1], flattened[0]) +// assert.Equal(t, randShares[idx2], flattened[1]) +// } +// }) +// t.Run("last two rows of a 4x4 square that have the same namespace have valid NMT proofs", func(t *testing.T) { +// squareSize := 4 +// totalShares := squareSize * squareSize +// getter, bServ := EmptyGetter() +// randShares := sharetest.RandShares(t, totalShares) +// lastNID := share.GetNamespace(randShares[totalShares-1]) +// for i := totalShares / 2; i < totalShares; i++ { +// copy(share.GetNamespace(randShares[i]), lastNID) +// } +// root := availability_test.FillBS(t, bServ, randShares) +// eh := headertest.RandExtendedHeader(t) +// eh.DAH = root +// +// shares, err := getter.GetSharesByNamespace(context.Background(), eh, lastNID) +// require.NoError(t, err) +// require.NoError(t, shares.Verify(root, lastNID)) +// }) +// } +//} +// +//func TestGetShares(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// n := 16 +// getter, eh := GetterWithRandSquare(t, n) +// +// eds, err := getter.GetEDS(ctx, eh) +// require.NoError(t, err) +// gotDAH, err := share.NewRoot(eds) +// require.NoError(t, err) +// +// require.True(t, eh.DAH.Equals(gotDAH)) +//} +// +//func TestService_GetSharesByNamespaceNotFound(t *testing.T) { +// getter, eh := GetterWithRandSquare(t, 1) +// eh.DAH.RowRoots = nil +// +// emptyShares, err := getter.GetSharesByNamespace(context.Background(), eh, sharetest.RandV0Namespace()) +// require.NoError(t, err) +// require.Empty(t, emptyShares.Flatten()) +//} +// +//func BenchmarkService_GetSharesByNamespace(b *testing.B) { +// var tests = []struct { +// amountShares int +// }{ +// {amountShares: 4}, +// {amountShares: 16}, +// {amountShares: 128}, +// } +// +// for _, tt := range tests { +// b.Run(strconv.Itoa(tt.amountShares), func(b *testing.B) { +// t := &testing.T{} +// getter, eh := GetterWithRandSquare(t, tt.amountShares) +// root := eh.DAH +// randNamespace := root.RowRoots[(len(root.RowRoots)-1)/2][:share.NamespaceSize] +// root.RowRoots[(len(root.RowRoots) / 2)] = root.RowRoots[(len(root.RowRoots)-1)/2] +// b.ResetTimer() +// for i := 0; i < b.N; i++ { +// _, err := getter.GetSharesByNamespace(context.Background(), eh, randNamespace) +// require.NoError(t, err) +// } +// }) +// } +//} diff --git a/share/availability/light/testing.go b/share/availability/light/testing.go index 52bb3a89d5..804dc25c05 100644 --- a/share/availability/light/testing.go +++ b/share/availability/light/testing.go @@ -1,6 +1,6 @@ package light -// FIXME: rework testing pkg +// TODO(@walldiss): rework all availability tests // GetterWithRandSquare provides a share.Getter filled with 'n' NMT trees of 'n' random shares, // essentially storing a whole square. //func GetterWithRandSquare(t *testing.T, n int) (share.Getter, *header.ExtendedHeader) { From d148bad0bbc7118fd0d72cbc4a1262256903046b Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 15 Feb 2024 13:11:54 +0500 Subject: [PATCH 093/132] fix shrex-eds tests --- share/getters/shrex.go | 2 +- share/p2p/shrexeds/client.go | 29 +++++----- share/p2p/shrexeds/exchange_test.go | 84 +++++++++++++---------------- 3 files changed, 52 insertions(+), 63 deletions(-) diff --git a/share/getters/shrex.go b/share/getters/shrex.go index 0204755faf..1716138343 100644 --- a/share/getters/shrex.go +++ b/share/getters/shrex.go @@ -158,7 +158,7 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader reqStart := time.Now() reqCtx, cancel := ctxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) - eds, getErr := sg.edsClient.RequestEDS(reqCtx, header, peer) + eds, getErr := sg.edsClient.RequestEDS(reqCtx, header.DAH, header.Height(), peer) cancel() switch { case getErr == nil: diff --git a/share/p2p/shrexeds/client.go b/share/p2p/shrexeds/client.go index d7169d2667..6f2531e902 100644 --- a/share/p2p/shrexeds/client.go +++ b/share/p2p/shrexeds/client.go @@ -17,7 +17,6 @@ import ( "github.com/celestiaorg/go-libp2p-messenger/serde" "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/p2p" pb "github.com/celestiaorg/celestia-node/share/p2p/shrexeds/pb" @@ -49,15 +48,16 @@ func NewClient(params *Parameters, host host.Host) (*Client, error) { // RequestEDS requests the ODS from the given peers and returns the EDS upon success. func (c *Client) RequestEDS( ctx context.Context, - header *header.ExtendedHeader, + root *share.Root, + height uint64, peer peer.ID, ) (*rsmt2d.ExtendedDataSquare, error) { - eds, err := c.doRequest(ctx, header, peer) + eds, err := c.doRequest(ctx, root, height, peer) if err == nil { return eds, nil } log.Debugw("client: eds request to peer failed", - "height", header.Height(), + "height", height, "peer", peer.String(), "error", err) if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { @@ -76,7 +76,7 @@ func (c *Client) RequestEDS( if err != p2p.ErrNotFound { log.Warnw("client: eds request to peer failed", "peer", peer.String(), - "height", header.Height(), + "height", height, "err", err) } @@ -85,7 +85,8 @@ func (c *Client) RequestEDS( func (c *Client) doRequest( ctx context.Context, - header *header.ExtendedHeader, + root *share.Root, + height uint64, to peer.ID, ) (*rsmt2d.ExtendedDataSquare, error) { streamOpenCtx, cancel := context.WithTimeout(ctx, c.params.ServerReadTimeout) @@ -98,11 +99,11 @@ func (c *Client) doRequest( c.setStreamDeadlines(ctx, stream) - req := &pb.EDSRequest{Height: header.Height()} + req := &pb.EDSRequest{Height: height} // request ODS log.Debugw("client: requesting ods", - "height", header.Height(), + "height", height, "peer", to.String()) _, err = serde.Write(stream, req) if err != nil { @@ -136,7 +137,7 @@ func (c *Client) doRequest( // reset stream deadlines to original values, since read deadline was changed during status read c.setStreamDeadlines(ctx, stream) // use header and ODS bytes to construct EDS and verify it against dataHash - eds, err := readEds(ctx, stream, header) + eds, err := readEds(ctx, stream, root) if err != nil { return nil, fmt.Errorf("read eds from stream: %w", err) } @@ -156,8 +157,8 @@ func (c *Client) doRequest( } } -func readEds(ctx context.Context, stream network.Stream, eh *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { - eds, err := file.ReadEds(ctx, stream, len(eh.DAH.RowRoots)) +func readEds(ctx context.Context, stream network.Stream, root *share.Root) (*rsmt2d.ExtendedDataSquare, error) { + eds, err := file.ReadEds(ctx, stream, len(root.RowRoots)) if err != nil { return nil, fmt.Errorf("failed to read eds from ods bytes: %w", err) } @@ -168,14 +169,14 @@ func readEds(ctx context.Context, stream network.Stream, eh *header.ExtendedHead return nil, fmt.Errorf("create new root from eds: %w, size:%v , expectedSize:%v", err, eds.Width(), - len(eh.DAH.RowRoots), + len(root.RowRoots), ) } - if !bytes.Equal(newDah.Hash(), eh.DAH.Hash()) { + if !bytes.Equal(newDah.Hash(), root.Hash()) { return nil, fmt.Errorf( "content integrity mismatch: imported root %s doesn't match expected root %s", share.DataHash(newDah.Hash()), - eh.DAH.Hash(), + root.Hash(), ) } return eds, nil diff --git a/share/p2p/shrexeds/exchange_test.go b/share/p2p/shrexeds/exchange_test.go index 9155be6dec..f161201e60 100644 --- a/share/p2p/shrexeds/exchange_test.go +++ b/share/p2p/shrexeds/exchange_test.go @@ -6,59 +6,59 @@ import ( "testing" "time" - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" libhost "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/p2p" + "github.com/celestiaorg/celestia-node/share/store" ) func TestExchange_RequestEDS(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) store, client, server := makeExchange(t) - - err := store.Start(ctx) + err := server.Start(ctx) require.NoError(t, err) - err = server.Start(ctx) - require.NoError(t, err) + height := atomic.NewUint64(1) // Testcase: EDS is immediately available t.Run("EDS_Available", func(t *testing.T) { - eds := edstest.RandEDS(t, 4) - dah, err := share.NewRoot(eds) - require.NoError(t, err) - err = store.Put(ctx, dah.Hash(), eds) + eds, root := testData(t) + height := height.Add(1) + f, err := store.Put(ctx, root.Hash(), height, eds) require.NoError(t, err) + require.NoError(t, f.Close()) - requestedEDS, err := client.RequestEDS(ctx, dah.Hash(), server.host.ID()) + requestedEDS, err := client.RequestEDS(ctx, root, height, server.host.ID()) assert.NoError(t, err) assert.Equal(t, eds.Flattened(), requestedEDS.Flattened()) }) // Testcase: EDS is unavailable initially, but is found after multiple requests t.Run("EDS_AvailableAfterDelay", func(t *testing.T) { - eds := edstest.RandEDS(t, 4) - dah, err := share.NewRoot(eds) - require.NoError(t, err) + eds, root := testData(t) + height := height.Add(1) lock := make(chan struct{}) go func() { <-lock - err = store.Put(ctx, dah.Hash(), eds) + f, err := store.Put(ctx, root.Hash(), height, eds) + require.NoError(t, err) + require.NoError(t, f.Close()) require.NoError(t, err) lock <- struct{}{} }() - requestedEDS, err := client.RequestEDS(ctx, dah.Hash(), server.host.ID()) + requestedEDS, err := client.RequestEDS(ctx, root, height, server.host.ID()) assert.ErrorIs(t, err, p2p.ErrNotFound) assert.Nil(t, requestedEDS) @@ -67,34 +67,22 @@ func TestExchange_RequestEDS(t *testing.T) { // wait for write to finish <-lock - requestedEDS, err = client.RequestEDS(ctx, dah.Hash(), server.host.ID()) + requestedEDS, err = client.RequestEDS(ctx, root, height, server.host.ID()) assert.NoError(t, err) assert.Equal(t, eds.Flattened(), requestedEDS.Flattened()) }) - // Testcase: Invalid request excludes peer from round-robin, stopping request - t.Run("EDS_InvalidRequest", func(t *testing.T) { - dataHash := []byte("invalid") - requestedEDS, err := client.RequestEDS(ctx, dataHash, server.host.ID()) - assert.ErrorContains(t, err, "stream reset") - assert.Nil(t, requestedEDS) - }) - t.Run("EDS_err_not_found", func(t *testing.T) { - timeoutCtx, cancel := context.WithTimeout(ctx, time.Second) - t.Cleanup(cancel) - eds := edstest.RandEDS(t, 4) - dah, err := share.NewRoot(eds) + _, root := testData(t) + height := height.Add(1) require.NoError(t, err) - _, err = client.RequestEDS(timeoutCtx, dah.Hash(), server.host.ID()) + _, err = client.RequestEDS(ctx, root, height, server.host.ID()) require.ErrorIs(t, err, p2p.ErrNotFound) }) // Testcase: Concurrency limit reached t.Run("EDS_concurrency_limit", func(t *testing.T) { - store, client, server := makeExchange(t) - - require.NoError(t, store.Start(ctx)) + _, client, server := makeExchange(t) require.NoError(t, server.Start(ctx)) ctx, cancel := context.WithTimeout(ctx, time.Second) @@ -120,29 +108,20 @@ func TestExchange_RequestEDS(t *testing.T) { middleware.RateLimitHandler(mockHandler)) // take server concurrency slots with blocked requests + height := height.Add(1) for i := 0; i < rateLimit; i++ { go func(i int) { - client.RequestEDS(ctx, nil, server.host.ID()) //nolint:errcheck + client.RequestEDS(ctx, nil, height, server.host.ID()) //nolint:errcheck }(i) } // wait until all server slots are taken wg.Wait() - _, err = client.RequestEDS(ctx, nil, server.host.ID()) + _, err = client.RequestEDS(ctx, nil, height, server.host.ID()) require.ErrorIs(t, err, p2p.ErrNotFound) }) } -func newStore(t *testing.T) *eds.Store { - t.Helper() - - storeCfg := eds.DefaultParameters() - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - store, err := eds.NewStore(storeCfg, t.TempDir(), ds) - require.NoError(t, err) - return store -} - func createMocknet(t *testing.T, amount int) []libhost.Host { t.Helper() @@ -152,9 +131,11 @@ func createMocknet(t *testing.T, amount int) []libhost.Host { return net.Hosts() } -func makeExchange(t *testing.T) (*eds.Store, *Client, *Server) { +func makeExchange(t *testing.T) (*store.Store, *Client, *Server) { t.Helper() - store := newStore(t) + cfg := store.DefaultParameters() + store, err := store.NewStore(cfg, t.TempDir()) + require.NoError(t, err) hosts := createMocknet(t, 2) client, err := NewClient(DefaultParameters(), hosts[0]) @@ -164,3 +145,10 @@ func makeExchange(t *testing.T) (*eds.Store, *Client, *Server) { return store, client, server } + +func testData(t *testing.T) (*rsmt2d.ExtendedDataSquare, *share.Root) { + eds := edstest.RandEDS(t, 4) + dah, err := share.NewRoot(eds) + require.NoError(t, err) + return eds, dah +} From 28866655208fd02d8907e5df383d4fe19a12daca Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 29 Feb 2024 16:58:21 +0400 Subject: [PATCH 094/132] refactor shwap blockstore test --- share/shwap/blockstore.go | 97 -------------------------------------- share/shwap/shwap_test.go | 99 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 93 insertions(+), 103 deletions(-) delete mode 100644 share/shwap/blockstore.go diff --git a/share/shwap/blockstore.go b/share/shwap/blockstore.go deleted file mode 100644 index 56e3e11bd6..0000000000 --- a/share/shwap/blockstore.go +++ /dev/null @@ -1,97 +0,0 @@ -package shwap - -import ( - "context" - "fmt" - "testing" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share/store/file" -) - -type TestBlockstore struct { - t *testing.T - lastHeight uint64 - blocks map[uint64]*file.MemFile -} - -func NewTestBlockstore(t *testing.T) *TestBlockstore { - return &TestBlockstore{ - t: t, - lastHeight: 1, - blocks: make(map[uint64]*file.MemFile), - } -} - -func (t *TestBlockstore) AddEds(eds *rsmt2d.ExtendedDataSquare) (height uint64) { - for { - if _, ok := t.blocks[t.lastHeight]; !ok { - break - } - t.lastHeight++ - } - t.blocks[t.lastHeight] = &file.MemFile{Eds: eds} - return t.lastHeight -} - -func (t *TestBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { - //TODO implement me - panic("not implemented") -} - -func (t *TestBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { - req, err := BlockBuilderFromCID(cid) - if err != nil { - return false, fmt.Errorf("while getting height from CID: %w", err) - } - - _, ok := t.blocks[req.GetHeight()] - return ok, nil -} - -func (t *TestBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { - req, err := BlockBuilderFromCID(cid) - if err != nil { - return nil, fmt.Errorf("while getting height from CID: %w", err) - } - - f, ok := t.blocks[req.GetHeight()] - if !ok { - return nil, ipld.ErrNotFound{Cid: cid} - } - return req.BlockFromFile(ctx, f) -} - -func (t *TestBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { - req, err := BlockBuilderFromCID(cid) - if err != nil { - return 0, fmt.Errorf("while getting height from CID: %w", err) - } - - f, ok := t.blocks[req.GetHeight()] - if !ok { - return 0, ipld.ErrNotFound{Cid: cid} - } - return f.Size(), nil -} - -func (t *TestBlockstore) Put(ctx context.Context, block blocks.Block) error { - panic("not implemented") -} - -func (t *TestBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { - panic("not implemented") -} - -func (t *TestBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - panic("not implemented") -} - -func (t *TestBlockstore) HashOnRead(enabled bool) { - panic("not implemented") -} diff --git a/share/shwap/shwap_test.go b/share/shwap/shwap_test.go index 22a3b70d00..4860f9521b 100644 --- a/share/shwap/shwap_test.go +++ b/share/shwap/shwap_test.go @@ -2,6 +2,11 @@ package shwap import ( "context" + "fmt" + "github.com/celestiaorg/celestia-node/share/store/file" + "github.com/celestiaorg/rsmt2d" + blocks "github.com/ipfs/go-block-format" + ipld "github.com/ipfs/go-ipld-format" "testing" "time" @@ -28,7 +33,7 @@ func TestSampleRoundtripGetBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - b := NewTestBlockstore(t) + b := newTestBlockstore(t) eds := edstest.RandEDS(t, 8) height := b.AddEds(eds) root, err := share.NewRoot(eds) @@ -63,7 +68,7 @@ func TestSampleRoundtripGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - b := NewTestBlockstore(t) + b := newTestBlockstore(t) eds := edstest.RandEDS(t, 8) height := b.AddEds(eds) root, err := share.NewRoot(eds) @@ -107,7 +112,7 @@ func TestRowRoundtripGetBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - b := NewTestBlockstore(t) + b := newTestBlockstore(t) eds := edstest.RandEDS(t, 8) height := b.AddEds(eds) root, err := share.NewRoot(eds) @@ -140,7 +145,7 @@ func TestRowRoundtripGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - b := NewTestBlockstore(t) + b := newTestBlockstore(t) eds := edstest.RandEDS(t, 8) height := b.AddEds(eds) root, err := share.NewRoot(eds) @@ -184,7 +189,7 @@ func TestDataRoundtripGetBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - b := NewTestBlockstore(t) + b := newTestBlockstore(t) namespace := sharetest.RandV0Namespace() eds, root := edstest.RandEDSWithNamespace(t, namespace, 64, 16) height := b.AddEds(eds) @@ -215,7 +220,7 @@ func TestDataRoundtripGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - b := NewTestBlockstore(t) + b := newTestBlockstore(t) namespace := sharetest.RandV0Namespace() eds, root := edstest.RandEDSWithNamespace(t, namespace, 64, 16) height := b.AddEds(eds) @@ -281,3 +286,85 @@ func remoteClient(ctx context.Context, t *testing.T, bstore blockstore.Blockstor return bitswapClient } + +type testBlockstore struct { + t *testing.T + lastHeight uint64 + blocks map[uint64]*file.MemFile +} + +func newTestBlockstore(t *testing.T) *testBlockstore { + return &testBlockstore{ + t: t, + lastHeight: 1, + blocks: make(map[uint64]*file.MemFile), + } +} + +func (t *testBlockstore) AddEds(eds *rsmt2d.ExtendedDataSquare) (height uint64) { + for { + if _, ok := t.blocks[t.lastHeight]; !ok { + break + } + t.lastHeight++ + } + t.blocks[t.lastHeight] = &file.MemFile{Eds: eds} + return t.lastHeight +} + +func (t *testBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { + //TODO implement me + panic("not implemented") +} + +func (t *testBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + req, err := BlockBuilderFromCID(cid) + if err != nil { + return false, fmt.Errorf("while getting height from CID: %w", err) + } + + _, ok := t.blocks[req.GetHeight()] + return ok, nil +} + +func (t *testBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + req, err := BlockBuilderFromCID(cid) + if err != nil { + return nil, fmt.Errorf("while getting height from CID: %w", err) + } + + f, ok := t.blocks[req.GetHeight()] + if !ok { + return nil, ipld.ErrNotFound{Cid: cid} + } + return req.BlockFromFile(ctx, f) +} + +func (t *testBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + req, err := BlockBuilderFromCID(cid) + if err != nil { + return 0, fmt.Errorf("while getting height from CID: %w", err) + } + + f, ok := t.blocks[req.GetHeight()] + if !ok { + return 0, ipld.ErrNotFound{Cid: cid} + } + return f.Size(), nil +} + +func (t *testBlockstore) Put(ctx context.Context, block blocks.Block) error { + panic("not implemented") +} + +func (t *testBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { + panic("not implemented") +} + +func (t *testBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + panic("not implemented") +} + +func (t *testBlockstore) HashOnRead(enabled bool) { + panic("not implemented") +} From 035a2725c6d417a3600bdd5a0deab3a7a5d1b294 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 6 Mar 2024 17:09:55 +0400 Subject: [PATCH 095/132] add validating file --- share/store/file/validating_file.go | 55 ++++++++++++ share/store/file/validationg_file_test.go | 103 ++++++++++++++++++++++ 2 files changed, 158 insertions(+) create mode 100644 share/store/file/validating_file.go create mode 100644 share/store/file/validationg_file_test.go diff --git a/share/store/file/validating_file.go b/share/store/file/validating_file.go new file mode 100644 index 0000000000..94a0ae9ab2 --- /dev/null +++ b/share/store/file/validating_file.go @@ -0,0 +1,55 @@ +package file + +import ( + "context" + "errors" + "fmt" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" +) + +// ErrOutOfBounds is returned whenever an index is out of bounds. +var ErrOutOfBounds = errors.New("index is out of bounds") + +// ValidatingFile is a file implementation that performs sanity checks on file operations. +type ValidatingFile struct { + EdsFile +} + +func NewValidatingFile(f EdsFile) EdsFile { + return &ValidatingFile{EdsFile: f} +} + +func (f *ValidatingFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { + if err := validateIndexBounds(f, x); err != nil { + return nil, fmt.Errorf("col: %w", err) + } + if err := validateIndexBounds(f, y); err != nil { + return nil, fmt.Errorf("row: %w", err) + } + return f.EdsFile.Share(ctx, x, y) +} + +func (f *ValidatingFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + if err := validateIndexBounds(f, axisIdx); err != nil { + return nil, fmt.Errorf("%s: %w", axisType, err) + } + return f.EdsFile.AxisHalf(ctx, axisType, axisIdx) +} + +func (f *ValidatingFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { + if err := validateIndexBounds(f, rowIdx); err != nil { + return share.NamespacedRow{}, fmt.Errorf("row: %w", err) + } + return f.EdsFile.Data(ctx, namespace, rowIdx) +} + +// validateIndexBounds checks if the index is within the bounds of the file. +func validateIndexBounds(f EdsFile, idx int) error { + if idx < 0 || idx >= f.Size() { + return fmt.Errorf("%w: index %d is out of bounds: [0, %d)", ErrOutOfBounds, idx, f.Size()) + } + return nil +} diff --git a/share/store/file/validationg_file_test.go b/share/store/file/validationg_file_test.go new file mode 100644 index 0000000000..1e034a99d5 --- /dev/null +++ b/share/store/file/validationg_file_test.go @@ -0,0 +1,103 @@ +package file + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestValidatingFile_Share(t *testing.T) { + tests := []struct { + name string + x, y int + odsSize int + expectFail bool + }{ + {"ValidIndices", 3, 2, 4, false}, + {"OutOfBoundsX", 8, 3, 4, true}, + {"OutOfBoundsY", 3, 8, 4, true}, + {"NegativeX", -1, 4, 6, true}, + {"NegativeY", 3, -1, 6, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + eds := edstest.RandEDS(t, tt.odsSize) + file := &MemFile{Eds: eds} + vf := NewValidatingFile(file) + + _, err := vf.Share(context.Background(), tt.x, tt.y) + if tt.expectFail { + require.ErrorIs(t, err, ErrOutOfBounds) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidatingFile_AxisHalf(t *testing.T) { + tests := []struct { + name string + axisType rsmt2d.Axis + axisIdx int + odsSize int + expectFail bool + }{ + {"ValidIndex", rsmt2d.Row, 2, 4, false}, + {"OutOfBounds", rsmt2d.Col, 8, 4, true}, + {"NegativeIndex", rsmt2d.Row, -1, 4, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + eds := edstest.RandEDS(t, tt.odsSize) + file := &MemFile{Eds: eds} + vf := NewValidatingFile(file) + + _, err := vf.AxisHalf(context.Background(), tt.axisType, tt.axisIdx) + if tt.expectFail { + require.ErrorIs(t, err, ErrOutOfBounds) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidatingFile_Data(t *testing.T) { + tests := []struct { + name string + rowIdx int + odsSize int + expectFail bool + }{ + {"ValidIndex", 3, 4, false}, + {"OutOfBounds", 8, 4, true}, + {"NegativeIndex", -1, 4, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + eds := edstest.RandEDS(t, tt.odsSize) + file := &MemFile{Eds: eds} + vf := NewValidatingFile(file) + + ns := sharetest.RandV0Namespace() + _, err := vf.Data(context.Background(), ns, tt.rowIdx) + if tt.expectFail { + require.ErrorIs(t, err, ErrOutOfBounds) + } else { + require.True(t, err == nil || errors.Is(err, ipld.ErrNamespaceOutsideRange)) + } + }) + } +} From 9e29a18c9a8fdf12762788b63292442764bcdb5b Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 6 Mar 2024 17:11:56 +0400 Subject: [PATCH 096/132] wrap opened files in store with validation file and cache --- share/availability/full/availability.go | 2 +- share/store/blockstore.go | 17 +++++++- share/store/cache/accessor_cache.go | 8 ++-- share/store/store.go | 55 ++++++++++++++----------- 4 files changed, 50 insertions(+), 32 deletions(-) diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go index 4dca52e038..2137e4a2b7 100644 --- a/share/availability/full/availability.go +++ b/share/availability/full/availability.go @@ -62,7 +62,7 @@ func (fa *ShareAvailability) Stop(context.Context) error { func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error { // a hack to avoid loading the whole EDS in mem if we store it already. if ok, _ := fa.store.HasByHash(ctx, header.DAH.Hash()); ok { - return fa.store.LinkHeight(ctx, header.DAH.Hash(), header.Height()) + return fa.store.LinkHashToHeight(ctx, header.DAH.Hash(), header.Height()) } eds, err := fa.getEds(ctx, header) diff --git a/share/store/blockstore.go b/share/store/blockstore.go index b711919c0f..4cc1c438c4 100644 --- a/share/store/blockstore.go +++ b/share/store/blockstore.go @@ -15,6 +15,8 @@ import ( "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/store/cache" + "github.com/celestiaorg/celestia-node/share/store/file" ) var _ bstore.Blockstore = (*Blockstore)(nil) @@ -77,7 +79,7 @@ func (bs *Blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error } height := req.GetHeight() - f, err := bs.store.cache.Second().GetOrLoad(ctx, height, bs.store.openFileByHeight(height)) + f, err := bs.store.cache.Second().GetOrLoad(ctx, height, bs.openFile(height)) if err == nil { defer utils.CloseAndLog(log, "file", f) return req.BlockFromFile(ctx, f) @@ -108,7 +110,7 @@ func (bs *Blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { } height := req.GetHeight() - f, err := bs.store.cache.Second().GetOrLoad(ctx, height, bs.store.openFileByHeight(height)) + f, err := bs.store.cache.Second().GetOrLoad(ctx, height, bs.openFile(height)) if err != nil { return 0, fmt.Errorf("get file: %w", err) } @@ -160,3 +162,14 @@ func (bs *Blockstore) AllKeysChan(context.Context) (<-chan cid.Cid, error) { func (bs *Blockstore) HashOnRead(bool) { log.Warn("HashOnRead is a noop on the EDS blockstore") } + +func (bs *Blockstore) openFile(height uint64) cache.OpenFileFn { + return func(ctx context.Context) (file.EdsFile, error) { + path := bs.store.basepath + heightsPath + fmt.Sprintf("%d", height) + f, err := file.OpenOdsFile(path) + if err != nil { + return nil, fmt.Errorf("opening ODS file: %w", err) + } + return wrappedFile(f), nil + } +} diff --git a/share/store/cache/accessor_cache.go b/share/store/cache/accessor_cache.go index 9bb7992e35..c690074c23 100644 --- a/share/store/cache/accessor_cache.go +++ b/share/store/cache/accessor_cache.go @@ -34,9 +34,9 @@ type FileCache struct { // accessor is the value stored in Cache. It implements the file.EdsFile interface. It has a // reference counted so that it can be removed from the cache only when all references are released. type accessor struct { - lock sync.Mutex file.EdsFile + lock sync.Mutex height uint64 done chan struct{} refs atomic.Int32 @@ -117,9 +117,7 @@ func (bc *FileCache) GetOrLoad(ctx context.Context, key key, loader OpenFileFn) return nil, fmt.Errorf("unable to load accessor: %w", err) } - // wrap file with close once and axis cache - cacheFile := file.CloseOnceFile(file.NewCacheFile(f)) - ac = &accessor{EdsFile: cacheFile} + ac = &accessor{EdsFile: f} // Create a new accessor first to increment the reference count in it, so it cannot get evicted // from the inner lru cache before it is used. rc, err := newRefCloser(ac) @@ -177,6 +175,8 @@ func (s *accessor) removeRef() { } } +// close closes the accessor and removes it from the cache if it is not closed yet. It will block +// until all references are released or timeout is reached. func (s *accessor) close() error { s.lock.Lock() if s.isClosed { diff --git a/share/store/store.go b/share/store/store.go index 6ee4a95e08..cb5e4f0209 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -31,16 +31,21 @@ var ( ) // TODO(@walldiss): -// - periodically store empty heights -// - persist store stats like amount of files, file types, avg file size etc in a file -// - handle corrupted files +// - periodically persist empty heights +// - persist store stats like +// - amount of files +// - file types hist (ods/q1q4) +// - file size hist +// - amount of links hist +// - add handling of corrupted files / links // - maintain in-memory missing files index / bloom-filter to fast return for not stored files. // - lock store folder +// - add traces const ( - blocksPath = "/blocks/" - heightsPath = blocksPath + "heights/" - emptyHeights = blocksPath + "/empty_heights" + blocksPath = "/blocks/" + heightsPath = blocksPath + "heights/" + emptyHeightsFile = heightsPath + "empty_heights" defaultDirPerm = 0755 ) @@ -82,7 +87,7 @@ func NewStore(params *Parameters, basePath string) (*Store, error) { } // ensure empty heights file - if err := ensureFile(basePath + emptyHeights); err != nil { + if err := ensureFile(basePath + emptyHeightsFile); err != nil { return nil, fmt.Errorf("ensure empty heights file: %w", err) } @@ -208,7 +213,7 @@ func (s *Store) getByHash(datahash share.DataHash) (file.EdsFile, error) { return odsFile, nil } -func (s *Store) LinkHeight(_ context.Context, datahash share.DataHash, height uint64) error { +func (s *Store) LinkHashToHeight(_ context.Context, datahash share.DataHash, height uint64) error { lock := s.stripLock.byDatahashAndHeight(datahash, height) lock.lock() defer lock.unlock() @@ -218,15 +223,18 @@ func (s *Store) LinkHeight(_ context.Context, datahash share.DataHash, height ui return nil } - // short circuit if link exists - if has, _ := s.hasByHeight(height); has { - return nil + if has, _ := s.hasByHash(datahash); !has { + return errors.New("cannot link non-existing file") } - return s.createHeightLink(datahash, height) } func (s *Store) createHeightLink(datahash share.DataHash, height uint64) error { + // short circuit if link exists + if has, _ := s.hasByHeight(height); has { + return nil + } + filePath := s.basepath + blocksPath + datahash.String() // create hard link with height as name linkPath := s.basepath + heightsPath + strconv.Itoa(int(height)) @@ -329,15 +337,16 @@ func (s *Store) Remove(ctx context.Context, height uint64) error { } func (s *Store) remove(height uint64) error { - // short circuit if file not exists f, err := s.getByHeight(height) if err != nil { + // short circuit if file not exists if errors.Is(err, ErrNotFound) { return nil } return fmt.Errorf("getting by height: %w", err) } + // close file to release the reference in the cache if err = f.Close(); err != nil { return fmt.Errorf("closing file on removal: %w", err) } @@ -369,19 +378,15 @@ func (s *Store) remove(height uint64) error { func fileLoader(f file.EdsFile) cache.OpenFileFn { return func(ctx context.Context) (file.EdsFile, error) { - return f, nil + return wrappedFile(f), nil } } -func (s *Store) openFileByHeight(height uint64) cache.OpenFileFn { - return func(ctx context.Context) (file.EdsFile, error) { - path := s.basepath + heightsPath + fmt.Sprintf("%d", height) - f, err := file.OpenOdsFile(path) - if err != nil { - return nil, fmt.Errorf("opening ODS file: %w", err) - } - return f, nil - } +func wrappedFile(f file.EdsFile) file.EdsFile { + withCache := file.NewCacheFile(f) + closedOnce := file.CloseOnceFile(withCache) + sanityChecked := file.NewValidatingFile(closedOnce) + return sanityChecked } func ensureFolder(path string) error { @@ -441,7 +446,7 @@ func linksCount(path string) (int, error) { } func (s *Store) storeEmptyHeights() error { - file, err := os.OpenFile(s.basepath+emptyHeights, os.O_WRONLY, os.ModePerm) + file, err := os.OpenFile(s.basepath+emptyHeightsFile, os.O_WRONLY, os.ModePerm) if err != nil { return fmt.Errorf("opening empty heights file: %w", err) } @@ -456,7 +461,7 @@ func (s *Store) storeEmptyHeights() error { } func loadEmptyHeights(basepath string) (map[uint64]struct{}, error) { - file, err := os.Open(basepath + emptyHeights) + file, err := os.Open(basepath + emptyHeightsFile) if err != nil { return nil, fmt.Errorf("opening empty heights file: %w", err) } From db97ae6b5bf22a79ed8601e3a12ff01c1d1d248f Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 6 Mar 2024 17:12:58 +0400 Subject: [PATCH 097/132] store size and datahash in close_once_file --- .../{file_closer.go => close_once_file.go} | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) rename share/store/file/{file_closer.go => close_once_file.go} (83%) diff --git a/share/store/file/file_closer.go b/share/store/file/close_once_file.go similarity index 83% rename from share/store/file/file_closer.go rename to share/store/file/close_once_file.go index 15b853dec7..8943b7f1a3 100644 --- a/share/store/file/file_closer.go +++ b/share/store/file/close_once_file.go @@ -16,18 +16,24 @@ var _ EdsFile = (*closeOnceFile)(nil) var errFileClosed = errors.New("file closed") type closeOnceFile struct { - f EdsFile - closed atomic.Bool + f EdsFile + size int + datahash share.DataHash + closed atomic.Bool } func CloseOnceFile(f EdsFile) *closeOnceFile { - return &closeOnceFile{f: f} + return &closeOnceFile{ + f: f, + size: f.Size(), + datahash: f.DataHash(), + } } func (c *closeOnceFile) Close() error { if !c.closed.Swap(true) { err := c.f.Close() - // release reference to the file to allow GC to collect it + // release reference to the file to allow GC to collect all resources associated with it c.f = nil return err } @@ -42,17 +48,11 @@ func (c *closeOnceFile) Reader() (io.Reader, error) { } func (c *closeOnceFile) Size() int { - if c.closed.Load() { - return 0 - } - return c.f.Size() + return c.size } func (c *closeOnceFile) DataHash() share.DataHash { - if c.closed.Load() { - return nil - } - return c.f.DataHash() + return c.datahash } func (c *closeOnceFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { From f2d4704d28b47e95e9efa2b05cddfecd88186619 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 6 Mar 2024 17:13:12 +0400 Subject: [PATCH 098/132] minor cleanup and renames --- share/shwap/shwap_test.go | 9 +++++---- share/store/file/{eds_file.go => file.go} | 1 + share/store/file/file_header.go | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) rename share/store/file/{eds_file.go => file.go} (92%) diff --git a/share/shwap/shwap_test.go b/share/shwap/shwap_test.go index 4860f9521b..e259b2ba1e 100644 --- a/share/shwap/shwap_test.go +++ b/share/shwap/shwap_test.go @@ -3,10 +3,6 @@ package shwap import ( "context" "fmt" - "github.com/celestiaorg/celestia-node/share/store/file" - "github.com/celestiaorg/rsmt2d" - blocks "github.com/ipfs/go-block-format" - ipld "github.com/ipfs/go-ipld-format" "testing" "time" @@ -15,16 +11,21 @@ import ( "github.com/ipfs/boxo/blockstore" "github.com/ipfs/boxo/exchange" "github.com/ipfs/boxo/routing/offline" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" + ipld "github.com/ipfs/go-ipld-format" record "github.com/libp2p/go-libp2p-record" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/require" + "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/store/file" ) // TestSampleRoundtripGetBlock tests full protocol round trip of: diff --git a/share/store/file/eds_file.go b/share/store/file/file.go similarity index 92% rename from share/store/file/eds_file.go rename to share/store/file/file.go index 4ccf879d2f..67273caadb 100644 --- a/share/store/file/eds_file.go +++ b/share/store/file/file.go @@ -13,6 +13,7 @@ import ( var log = logging.Logger("store/file") +// TODO: add validation of input parameters for Share, AxisHalf and Data methods. type EdsFile interface { io.Closer // Reader returns binary reader for the file. diff --git a/share/store/file/file_header.go b/share/store/file/file_header.go index 287281a628..080d29bedb 100644 --- a/share/store/file/file_header.go +++ b/share/store/file/file_header.go @@ -18,7 +18,7 @@ type Header struct { squareSize uint16 // TODO(@walldiss) store all heights in the header? - //height uint64 + //heightы []uint64 datahash share.DataHash } From dfe79fd6b26e85860322b62a62f22b6697ab98d7 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 6 Mar 2024 17:29:06 +0400 Subject: [PATCH 099/132] add comment how server side shwap requests validation should work --- share/store/blockstore.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/share/store/blockstore.go b/share/store/blockstore.go index 4cc1c438c4..77609b7ae9 100644 --- a/share/store/blockstore.go +++ b/share/store/blockstore.go @@ -19,6 +19,12 @@ import ( "github.com/celestiaorg/celestia-node/share/store/file" ) +//TODO(@walldiss): blockstore is now able to identify invalid cids(requests) by handling file.ErrOutOfBounds +// err. Ideally this case should lead to some penalty for the peer that sent the invalid request. The proper +// place for this logic is in the bitswap protocol, but it's not designed to handle such cases. It forces us +// to handle this case in the blockstore level. For now, we just log the error and return an error to the +// caller. We should revisit this issue and find a proper solution. + var _ bstore.Blockstore = (*Blockstore)(nil) var ( From e6c64df8ef2ab75c61d2cc7535893af74f5553e1 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 14 Mar 2024 17:02:57 +0400 Subject: [PATCH 100/132] iterate shrex-sub version --- share/p2p/shrexsub/pubsub.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/share/p2p/shrexsub/pubsub.go b/share/p2p/shrexsub/pubsub.go index ed713b4614..64d7239c63 100644 --- a/share/p2p/shrexsub/pubsub.go +++ b/share/p2p/shrexsub/pubsub.go @@ -17,7 +17,7 @@ var log = logging.Logger("shrex-sub") // pubsubTopic hardcodes the name of the EDS floodsub topic with the provided networkID. func pubsubTopicID(networkID string) string { - return fmt.Sprintf("%s/eds-sub/v0.1.0", networkID) + return fmt.Sprintf("%s/eds-sub/v0.2.0", networkID) } // ValidatorFn is an injectable func and governs EDS notification msg validity. From 25250c8499227581382371a44754272a4f38ad8d Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 14 Mar 2024 17:46:46 +0400 Subject: [PATCH 101/132] version bitswap --- nodebuilder/p2p/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodebuilder/p2p/bitswap.go b/nodebuilder/p2p/bitswap.go index 0ea33cf683..f659079b9e 100644 --- a/nodebuilder/p2p/bitswap.go +++ b/nodebuilder/p2p/bitswap.go @@ -97,5 +97,5 @@ type bitSwapParams struct { } func protocolID(network Network) protocol.ID { - return protocol.ID(fmt.Sprintf("/celestia/%s", network)) + return protocol.ID(fmt.Sprintf("/celestia/%s/v0.0.1", network)) } From 6e654df0d46ce03d612420cb6b7171cbbb03d5e1 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 14 Mar 2024 19:23:51 +0400 Subject: [PATCH 102/132] add share size validation to readShares --- share/store/file/square.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/share/store/file/square.go b/share/store/file/square.go index 02125b35f4..bc5d4ed299 100644 --- a/share/store/file/square.go +++ b/share/store/file/square.go @@ -40,8 +40,8 @@ func readShares(shareSize, edsSize int, r io.Reader) (square, error) { // get pre-allocated square and buffer from memPools square := memPools.get(odsLn).square() - // TODO(@walldiss): find proper size for buffer - br := bufio.NewReader(r) + // TODO(@walldiss): run benchmark to find optimal size for buffer + br := bufio.NewReaderSize(r, 4096) var total int log.Info("start reading ods", "ods size", odsLn, "share size", shareSize) for i := 0; i < odsLn; i++ { @@ -50,6 +50,9 @@ func readShares(shareSize, edsSize int, r io.Reader) (square, error) { if err != nil { return nil, fmt.Errorf("reading share: %w, bytes read: %v", err, total+n) } + if n != shareSize { + return nil, fmt.Errorf("share size mismatch: expected %v, got %v", shareSize, n) + } total += n } } From 3e91d02b1a94b31d8960b67a437d3c612b16a358 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 18 Mar 2024 14:26:04 +0400 Subject: [PATCH 103/132] bump discovery dht tag version --- nodebuilder/share/constructors.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index dcd91e3f66..4f79313112 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -14,7 +14,7 @@ import ( const ( // fullNodesTag is the tag used to identify full nodes in the discovery service. - fullNodesTag = "full" + fullNodesTag = "full/v0.0.1" ) func newDiscovery(cfg *disc.Parameters, From 777564d8bbef50066274ad7318e920fdde69140a Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 20 Mar 2024 21:01:01 +0400 Subject: [PATCH 104/132] fix validation file test --- ...alidationg_file_test.go => validating_file_test.go} | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) rename share/store/file/{validationg_file_test.go => validating_file_test.go} (92%) diff --git a/share/store/file/validationg_file_test.go b/share/store/file/validating_file_test.go similarity index 92% rename from share/store/file/validationg_file_test.go rename to share/store/file/validating_file_test.go index 1e034a99d5..903240c3ff 100644 --- a/share/store/file/validationg_file_test.go +++ b/share/store/file/validating_file_test.go @@ -24,15 +24,15 @@ func TestValidatingFile_Share(t *testing.T) { {"ValidIndices", 3, 2, 4, false}, {"OutOfBoundsX", 8, 3, 4, true}, {"OutOfBoundsY", 3, 8, 4, true}, - {"NegativeX", -1, 4, 6, true}, - {"NegativeY", 3, -1, 6, true}, + {"NegativeX", -1, 4, 8, true}, + {"NegativeY", 3, -1, 8, true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { eds := edstest.RandEDS(t, tt.odsSize) file := &MemFile{Eds: eds} - vf := NewValidatingFile(file) + vf := WithValidation(file) _, err := vf.Share(context.Background(), tt.x, tt.y) if tt.expectFail { @@ -61,7 +61,7 @@ func TestValidatingFile_AxisHalf(t *testing.T) { t.Run(tt.name, func(t *testing.T) { eds := edstest.RandEDS(t, tt.odsSize) file := &MemFile{Eds: eds} - vf := NewValidatingFile(file) + vf := WithValidation(file) _, err := vf.AxisHalf(context.Background(), tt.axisType, tt.axisIdx) if tt.expectFail { @@ -89,7 +89,7 @@ func TestValidatingFile_Data(t *testing.T) { t.Run(tt.name, func(t *testing.T) { eds := edstest.RandEDS(t, tt.odsSize) file := &MemFile{Eds: eds} - vf := NewValidatingFile(file) + vf := WithValidation(file) ns := sharetest.RandV0Namespace() _, err := vf.Data(context.Background(), ns, tt.rowIdx) From 743fed2a33d822696d90e503b2f7f358495a14c5 Mon Sep 17 00:00:00 2001 From: Wondertan Date: Tue, 26 Mar 2024 23:52:10 +0100 Subject: [PATCH 105/132] aligh shwap with the spec --- share/shwap/pb/shwap_pb.pb.go | 151 +++++++++++++++++----------------- share/shwap/pb/shwap_pb.proto | 12 +-- share/shwap/row_id.go | 17 ++-- share/shwap/sample.go | 4 +- share/shwap/sample_id.go | 6 +- share/shwap/shwap.go | 30 +++---- 6 files changed, 107 insertions(+), 113 deletions(-) diff --git a/share/shwap/pb/shwap_pb.pb.go b/share/shwap/pb/shwap_pb.pb.go index bb72bed41a..a0541e8172 100644 --- a/share/shwap/pb/shwap_pb.pb.go +++ b/share/shwap/pb/shwap_pb.pb.go @@ -23,28 +23,28 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -type SampleProofType int32 +type ProofType int32 const ( - SampleProofType_RowSampleProofType SampleProofType = 0 - SampleProofType_ColSampleProofType SampleProofType = 1 + ProofType_RowProofType ProofType = 0 + ProofType_ColProofType ProofType = 1 ) -var SampleProofType_name = map[int32]string{ - 0: "RowSampleProofType", - 1: "ColSampleProofType", +var ProofType_name = map[int32]string{ + 0: "RowProofType", + 1: "ColProofType", } -var SampleProofType_value = map[string]int32{ - "RowSampleProofType": 0, - "ColSampleProofType": 1, +var ProofType_value = map[string]int32{ + "RowProofType": 0, + "ColProofType": 1, } -func (x SampleProofType) String() string { - return proto.EnumName(SampleProofType_name, int32(x)) +func (x ProofType) String() string { + return proto.EnumName(ProofType_name, int32(x)) } -func (SampleProofType) EnumDescriptor() ([]byte, []int) { +func (ProofType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_fdfe0676a85dc852, []int{0} } @@ -101,10 +101,10 @@ func (m *Row) GetRowHalf() [][]byte { } type Sample struct { - SampleId []byte `protobuf:"bytes,1,opt,name=sample_id,json=sampleId,proto3" json:"sample_id,omitempty"` - SampleType SampleProofType `protobuf:"varint,2,opt,name=sample_type,json=sampleType,proto3,enum=SampleProofType" json:"sample_type,omitempty"` - SampleShare []byte `protobuf:"bytes,3,opt,name=sample_share,json=sampleShare,proto3" json:"sample_share,omitempty"` - SampleProof *pb.Proof `protobuf:"bytes,4,opt,name=sample_proof,json=sampleProof,proto3" json:"sample_proof,omitempty"` + SampleId []byte `protobuf:"bytes,1,opt,name=sample_id,json=sampleId,proto3" json:"sample_id,omitempty"` + SampleShare []byte `protobuf:"bytes,2,opt,name=sample_share,json=sampleShare,proto3" json:"sample_share,omitempty"` + SampleProof *pb.Proof `protobuf:"bytes,3,opt,name=sample_proof,json=sampleProof,proto3" json:"sample_proof,omitempty"` + ProofType ProofType `protobuf:"varint,4,opt,name=proof_type,json=proofType,proto3,enum=ProofType" json:"proof_type,omitempty"` } func (m *Sample) Reset() { *m = Sample{} } @@ -147,13 +147,6 @@ func (m *Sample) GetSampleId() []byte { return nil } -func (m *Sample) GetSampleType() SampleProofType { - if m != nil { - return m.SampleType - } - return SampleProofType_RowSampleProofType -} - func (m *Sample) GetSampleShare() []byte { if m != nil { return m.SampleShare @@ -168,6 +161,13 @@ func (m *Sample) GetSampleProof() *pb.Proof { return nil } +func (m *Sample) GetProofType() ProofType { + if m != nil { + return m.ProofType + } + return ProofType_RowProofType +} + type Data struct { DataId []byte `protobuf:"bytes,1,opt,name=data_id,json=dataId,proto3" json:"data_id,omitempty"` DataShares [][]byte `protobuf:"bytes,2,rep,name=data_shares,json=dataShares,proto3" json:"data_shares,omitempty"` @@ -229,7 +229,7 @@ func (m *Data) GetDataProof() *pb.Proof { } func init() { - proto.RegisterEnum("SampleProofType", SampleProofType_name, SampleProofType_value) + proto.RegisterEnum("ProofType", ProofType_name, ProofType_value) proto.RegisterType((*Row)(nil), "Row") proto.RegisterType((*Sample)(nil), "Sample") proto.RegisterType((*Data)(nil), "Data") @@ -238,28 +238,27 @@ func init() { func init() { proto.RegisterFile("share/shwap/pb/shwap_pb.proto", fileDescriptor_fdfe0676a85dc852) } var fileDescriptor_fdfe0676a85dc852 = []byte{ - // 322 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0x4d, 0x4f, 0xc2, 0x40, - 0x14, 0xec, 0x52, 0x2c, 0xf0, 0x20, 0x40, 0x36, 0x51, 0x57, 0x8d, 0x6b, 0xe5, 0xd4, 0x78, 0x28, - 0x11, 0x0f, 0x9e, 0xfd, 0x38, 0xc8, 0xcd, 0x14, 0xef, 0x64, 0x9b, 0x96, 0x60, 0x52, 0xb3, 0x9b, - 0x6d, 0x93, 0x0d, 0xff, 0xc2, 0x7f, 0xe2, 0xdf, 0xf0, 0xc8, 0xd1, 0xa3, 0x81, 0x3f, 0x62, 0xf6, - 0x75, 0x95, 0x84, 0x78, 0x9b, 0x99, 0x7d, 0xf3, 0xe6, 0x4d, 0x16, 0xce, 0xcb, 0xa5, 0xd0, 0xf9, - 0xb8, 0x5c, 0x1a, 0xa1, 0xc6, 0x2a, 0xad, 0xc1, 0x5c, 0xa5, 0xb1, 0xd2, 0xb2, 0x92, 0xa7, 0x7d, - 0x95, 0x8e, 0x95, 0x96, 0x72, 0x51, 0xf3, 0xd1, 0x2d, 0xf8, 0x89, 0x34, 0xf4, 0x10, 0x02, 0x2d, - 0xcd, 0xfc, 0x35, 0x63, 0x24, 0x24, 0x51, 0x2f, 0x39, 0xd0, 0xd2, 0x4c, 0x33, 0x7a, 0x02, 0x6d, - 0x2b, 0x2f, 0x45, 0xb1, 0x60, 0x8d, 0xd0, 0x8f, 0x7a, 0x49, 0x4b, 0x4b, 0xf3, 0x24, 0x8a, 0xc5, - 0xe8, 0x83, 0x40, 0x30, 0x13, 0x6f, 0xaa, 0xc8, 0xe9, 0x19, 0x74, 0x4a, 0x44, 0x3b, 0x7f, 0xbb, - 0x16, 0xa6, 0x19, 0xbd, 0x86, 0xae, 0x7b, 0xac, 0x56, 0x2a, 0x67, 0x8d, 0x90, 0x44, 0xfd, 0xc9, - 0x30, 0xae, 0xad, 0xcf, 0xf6, 0x92, 0x97, 0x95, 0xca, 0x13, 0xa8, 0x87, 0x2c, 0xa6, 0x97, 0xd0, - 0x73, 0x16, 0xec, 0xc2, 0x7c, 0x5c, 0xe9, 0xd6, 0xcc, 0xac, 0x44, 0x27, 0x7f, 0x23, 0x58, 0x86, - 0x35, 0x43, 0x12, 0x75, 0x27, 0x83, 0xd8, 0x55, 0x4b, 0x63, 0xdc, 0xfc, 0xeb, 0x41, 0x32, 0x52, - 0xd0, 0x7c, 0x14, 0x95, 0xa0, 0xc7, 0xd0, 0xca, 0x44, 0x25, 0x76, 0xc7, 0x06, 0x96, 0x4e, 0x33, - 0x7a, 0x01, 0x5d, 0x7c, 0xc0, 0xd4, 0xd2, 0x15, 0x06, 0x2b, 0x61, 0x68, 0x49, 0x63, 0x40, 0xe6, - 0x32, 0xfd, 0xff, 0x33, 0x3b, 0x76, 0x04, 0xe1, 0xd5, 0x1d, 0x0c, 0xf6, 0x7a, 0xd2, 0x23, 0xa0, - 0x89, 0x34, 0x7b, 0xea, 0xd0, 0xb3, 0xfa, 0x83, 0x2c, 0xf6, 0x75, 0x72, 0xcf, 0x3e, 0x37, 0x9c, - 0xac, 0x37, 0x9c, 0x7c, 0x6f, 0x38, 0x79, 0xdf, 0x72, 0x6f, 0xbd, 0xe5, 0xde, 0xd7, 0x96, 0x7b, - 0x69, 0x80, 0x1f, 0x78, 0xf3, 0x13, 0x00, 0x00, 0xff, 0xff, 0xa1, 0x0e, 0x39, 0xc1, 0xf1, 0x01, - 0x00, 0x00, + // 314 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xc1, 0x4a, 0xf3, 0x40, + 0x14, 0x85, 0x33, 0x6d, 0xff, 0xb4, 0xb9, 0x0d, 0xfd, 0xcb, 0x80, 0x38, 0x2a, 0x8e, 0xb1, 0xab, + 0xe8, 0x22, 0x81, 0xba, 0x70, 0xaf, 0x2e, 0xec, 0x4e, 0x52, 0xf7, 0x65, 0x42, 0x52, 0x2a, 0x44, + 0x66, 0x98, 0x04, 0x86, 0xbe, 0x85, 0x6f, 0xe1, 0xab, 0xb8, 0xec, 0xd2, 0xa5, 0xb4, 0x2f, 0x22, + 0x73, 0x93, 0x26, 0x3b, 0x77, 0xe7, 0x7c, 0xe7, 0x0e, 0x7c, 0x21, 0x70, 0x59, 0x6e, 0x84, 0xce, + 0xe3, 0x72, 0x63, 0x84, 0x8a, 0x55, 0x5a, 0x87, 0x95, 0x4a, 0x23, 0xa5, 0x65, 0x25, 0xcf, 0x27, + 0x2a, 0x8d, 0x95, 0x96, 0x72, 0x5d, 0xf7, 0xd9, 0x3d, 0xf4, 0x13, 0x69, 0xe8, 0x09, 0xb8, 0x5a, + 0x9a, 0xd5, 0x5b, 0xc6, 0x48, 0x40, 0x42, 0x3f, 0xf9, 0xa7, 0xa5, 0x59, 0x64, 0xf4, 0x0c, 0x46, + 0x16, 0x6f, 0x44, 0xb1, 0x66, 0xbd, 0xa0, 0x1f, 0xfa, 0xc9, 0x50, 0x4b, 0xf3, 0x2c, 0x8a, 0xf5, + 0xec, 0x93, 0x80, 0xbb, 0x14, 0xef, 0xaa, 0xc8, 0xe9, 0x05, 0x78, 0x25, 0xa6, 0xee, 0xfd, 0xa8, + 0x06, 0x8b, 0x8c, 0x5e, 0x83, 0xdf, 0x8c, 0x28, 0xc6, 0x7a, 0xb8, 0x8f, 0x6b, 0xb6, 0xb4, 0x88, + 0xce, 0xdb, 0x13, 0x34, 0x63, 0xfd, 0x80, 0x84, 0xe3, 0xf9, 0xff, 0xa8, 0xf1, 0x4c, 0xa3, 0x17, + 0x1b, 0x8e, 0x6f, 0xb0, 0xd0, 0x1b, 0x00, 0x9c, 0x57, 0xd5, 0x56, 0xe5, 0x6c, 0x10, 0x90, 0x70, + 0x32, 0x87, 0xfa, 0xf0, 0x75, 0xab, 0xf2, 0xc4, 0x53, 0xc7, 0x38, 0x53, 0x30, 0x78, 0x12, 0x95, + 0xa0, 0xa7, 0x30, 0xcc, 0x44, 0x25, 0x3a, 0x49, 0xd7, 0xd6, 0x45, 0x46, 0xaf, 0x60, 0x8c, 0x03, + 0x0a, 0x96, 0xcd, 0x87, 0x82, 0x45, 0xe8, 0x57, 0xd2, 0x08, 0xb0, 0xfd, 0xad, 0xe7, 0xd9, 0x13, + 0x8c, 0xb7, 0x31, 0x78, 0xad, 0x09, 0x9d, 0x82, 0x9f, 0x48, 0xd3, 0xf6, 0xa9, 0x63, 0xc9, 0xa3, + 0x2c, 0x3a, 0x42, 0x1e, 0xd8, 0xd7, 0x9e, 0x93, 0xdd, 0x9e, 0x93, 0x9f, 0x3d, 0x27, 0x1f, 0x07, + 0xee, 0xec, 0x0e, 0xdc, 0xf9, 0x3e, 0x70, 0x27, 0x75, 0xf1, 0x37, 0xdd, 0xfd, 0x06, 0x00, 0x00, + 0xff, 0xff, 0x3b, 0x95, 0x2f, 0xb8, 0xd7, 0x01, 0x00, 0x00, } func (m *Row) Marshal() (dAtA []byte, err error) { @@ -321,6 +320,11 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ProofType != 0 { + i = encodeVarintShwapPb(dAtA, i, uint64(m.ProofType)) + i-- + dAtA[i] = 0x20 + } if m.SampleProof != nil { { size, err := m.SampleProof.MarshalToSizedBuffer(dAtA[:i]) @@ -331,19 +335,14 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintShwapPb(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x1a } if len(m.SampleShare) > 0 { i -= len(m.SampleShare) copy(dAtA[i:], m.SampleShare) i = encodeVarintShwapPb(dAtA, i, uint64(len(m.SampleShare))) i-- - dAtA[i] = 0x1a - } - if m.SampleType != 0 { - i = encodeVarintShwapPb(dAtA, i, uint64(m.SampleType)) - i-- - dAtA[i] = 0x10 + dAtA[i] = 0x12 } if len(m.SampleId) > 0 { i -= len(m.SampleId) @@ -446,9 +445,6 @@ func (m *Sample) Size() (n int) { if l > 0 { n += 1 + l + sovShwapPb(uint64(l)) } - if m.SampleType != 0 { - n += 1 + sovShwapPb(uint64(m.SampleType)) - } l = len(m.SampleShare) if l > 0 { n += 1 + l + sovShwapPb(uint64(l)) @@ -457,6 +453,9 @@ func (m *Sample) Size() (n int) { l = m.SampleProof.Size() n += 1 + l + sovShwapPb(uint64(l)) } + if m.ProofType != 0 { + n += 1 + sovShwapPb(uint64(m.ProofType)) + } return n } @@ -669,25 +668,6 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType) - } - m.SampleType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShwapPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SampleType |= SampleProofType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SampleShare", wireType) } @@ -721,7 +701,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { m.SampleShare = []byte{} } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SampleProof", wireType) } @@ -757,6 +737,25 @@ func (m *Sample) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofType", wireType) + } + m.ProofType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwapPb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProofType |= ProofType(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipShwapPb(dAtA[iNdEx:]) diff --git a/share/shwap/pb/shwap_pb.proto b/share/shwap/pb/shwap_pb.proto index 2cc68d2333..ab5b161ca5 100644 --- a/share/shwap/pb/shwap_pb.proto +++ b/share/shwap/pb/shwap_pb.proto @@ -7,16 +7,16 @@ message Row { repeated bytes row_half = 2; } -enum SampleProofType { - RowSampleProofType = 0; - ColSampleProofType = 1; +enum ProofType { + RowProofType = 0; + ColProofType = 1; } message Sample { bytes sample_id = 1; - SampleProofType sample_type = 2; - bytes sample_share = 3; - proof.pb.Proof sample_proof = 4; + bytes sample_share = 2; + proof.pb.Proof sample_proof = 3; + ProofType proof_type = 4; } message Data { diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index 6df32f8b33..5d947cd64c 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -15,12 +15,7 @@ import ( "github.com/celestiaorg/celestia-node/share/store/file" ) -//TODO(@walldiss): maybe move into separate subpkg? - -// TODO: -// * Remove RowHash -// * Change validation -// * Remove IDs from responses +// TODO(@walldiss): maybe move into separate subpkg? // RowIDSize is the size of the RowID in bytes const RowIDSize = 10 @@ -77,17 +72,17 @@ func (rid RowID) Cid() cid.Cid { // * Its size is not deterministic which is required for IPLD. // * No support for uint16 func (rid RowID) MarshalTo(data []byte) (int, error) { - //TODO:(@walldiss): this works, only if data underlying array was preallocated with + // TODO:(@walldiss): this works, only if data underlying array was preallocated with // enough size. Otherwise Caller might not see the changes. - data = binary.LittleEndian.AppendUint64(data, rid.Height) - data = binary.LittleEndian.AppendUint16(data, rid.RowIndex) + data = binary.BigEndian.AppendUint64(data, rid.Height) + data = binary.BigEndian.AppendUint16(data, rid.RowIndex) return RowIDSize, nil } // UnmarshalFrom decodes RowID from given byte slice. func (rid *RowID) UnmarshalFrom(data []byte) (int, error) { - rid.Height = binary.LittleEndian.Uint64(data) - rid.RowIndex = binary.LittleEndian.Uint16(data[8:]) + rid.Height = binary.BigEndian.Uint64(data) + rid.RowIndex = binary.BigEndian.Uint16(data[8:]) return RowIDSize, nil } diff --git a/share/shwap/sample.go b/share/shwap/sample.go index 5330ecfc5b..fa4ea58c0c 100644 --- a/share/shwap/sample.go +++ b/share/shwap/sample.go @@ -136,7 +136,7 @@ func (s *Sample) MarshalBinary() ([]byte, error) { return (&shwappb.Sample{ SampleId: id, - SampleType: shwappb.SampleProofType(s.SampleProofType), + ProofType: shwappb.ProofType(s.SampleProofType), SampleProof: proof, SampleShare: s.SampleShare, }).Marshal() @@ -154,7 +154,7 @@ func (s *Sample) UnmarshalBinary(data []byte) error { return err } - s.SampleProofType = SampleProofType(proto.SampleType) + s.SampleProofType = SampleProofType(proto.ProofType) s.SampleProof = nmt.ProtoToProof(*proto.SampleProof) s.SampleShare = proto.SampleShare return nil diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index dc1df87302..5964a5d07d 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -13,7 +13,7 @@ import ( "github.com/celestiaorg/celestia-node/share/store/file" ) -//TODO(@walldiss): maybe move into separate subpkg? +// TODO(@walldiss): maybe move into separate subpkg? // SampleIDSize is the size of the SampleID in bytes const SampleIDSize = RowIDSize + 2 @@ -82,7 +82,7 @@ func (sid SampleID) MarshalBinary() ([]byte, error) { return nil, err } data = data[:n] - data = binary.LittleEndian.AppendUint16(data, sid.ShareIndex) + data = binary.BigEndian.AppendUint16(data, sid.ShareIndex) return data, nil } @@ -96,7 +96,7 @@ func (sid *SampleID) UnmarshalBinary(data []byte) error { return err } data = data[n:] - sid.ShareIndex = binary.LittleEndian.Uint16(data) + sid.ShareIndex = binary.BigEndian.Uint16(data) return nil } diff --git a/share/shwap/shwap.go b/share/shwap/shwap.go index 66fb819349..ccc51cce92 100644 --- a/share/shwap/shwap.go +++ b/share/shwap/shwap.go @@ -22,19 +22,19 @@ func NewBlockService(b blockstore.Blockstore, ex exchange.Interface) blockservic var log = logger.Logger("shwap") const ( - // sampleCodec is a CID codec used for share sampling Bitswap requests over Namespaced - // Merkle Tree. - sampleCodec = 0x7800 - - // sampleMultihashCode is the multihash code for share sampling multihash function. - sampleMultihashCode = 0x7801 - // rowCodec is a CID codec used for row Bitswap requests over Namespaced Merkle // Tree. - rowCodec = 0x7810 + rowCodec = 0x7800 // rowMultihashCode is the multihash code for custom axis sampling multihash function. - rowMultihashCode = 0x7811 + rowMultihashCode = 0x7801 + + // sampleCodec is a CID codec used for share sampling Bitswap requests over Namespaced + // Merkle Tree. + sampleCodec = 0x7810 + + // sampleMultihashCode is the multihash code for share sampling multihash function. + sampleMultihashCode = 0x7811 // dataCodec is a CID codec used for data Bitswap requests over Namespaced Merkle Tree. dataCodec = 0x7820 @@ -52,12 +52,12 @@ var ( func init() { // Register hashers for new multihashes - mh.Register(sampleMultihashCode, func() hash.Hash { - return &SampleHasher{} - }) mh.Register(rowMultihashCode, func() hash.Hash { return &RowHasher{} }) + mh.Register(sampleMultihashCode, func() hash.Hash { + return &SampleHasher{} + }) mh.Register(dataMultihashCode, func() hash.Hash { return &DataHasher{} }) @@ -94,7 +94,7 @@ type allowlist struct{} func (a allowlist) IsAllowed(code uint64) bool { // we disable all codes except home-baked code switch code { - case sampleMultihashCode, rowMultihashCode, dataMultihashCode: + case rowMultihashCode, sampleMultihashCode, dataMultihashCode: return true } return false @@ -109,13 +109,13 @@ func validateCID(cid cid.Cid) error { switch prefix.Codec { default: return fmt.Errorf("unsupported codec %d", prefix.Codec) - case sampleCodec, rowCodec, dataCodec: + case rowCodec, sampleCodec, dataCodec: } switch prefix.MhLength { default: return fmt.Errorf("unsupported multihash length %d", prefix.MhLength) - case SampleIDSize, RowIDSize, DataIDSize: + case RowIDSize, SampleIDSize, DataIDSize: } return nil From 7021639304f2e2e5297acf1c511a5a35d14ad28a Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 27 Mar 2024 18:32:27 +0400 Subject: [PATCH 106/132] use AxisHalf type in file interface --- share/store/file/axis_half.go | 68 ++++++++++++++++++++++++++++++ share/store/file/axis_half_test.go | 29 +++++++++++++ share/store/file/codec_test.go | 2 +- share/store/file/file.go | 5 +-- share/store/file/file_test.go | 38 ++++++++++++----- 5 files changed, 128 insertions(+), 14 deletions(-) create mode 100644 share/store/file/axis_half.go create mode 100644 share/store/file/axis_half_test.go diff --git a/share/store/file/axis_half.go b/share/store/file/axis_half.go new file mode 100644 index 0000000000..2737839a22 --- /dev/null +++ b/share/store/file/axis_half.go @@ -0,0 +1,68 @@ +package file + +import ( + "fmt" + "github.com/celestiaorg/celestia-node/share" +) + +type AxisHalf struct { + Shares []share.Share + IsParity bool +} + +func (a AxisHalf) Extended() ([]share.Share, error) { + if a.IsParity { + return reconstructShares(codec, a.Shares) + } + return extendShares(codec, a.Shares) +} + +func extendShares(codec Codec, original []share.Share) ([]share.Share, error) { + if len(original) == 0 { + return nil, fmt.Errorf("original shares are empty") + } + + sqLen := len(original) * 2 + shareSize := len(original[0]) + + enc, err := codec.Encoder(sqLen) + if err != nil { + return nil, fmt.Errorf("encoder: %w", err) + } + + shares := make([]share.Share, sqLen) + copy(shares, original) + for i := len(original); i < len(shares); i++ { + shares[i] = make([]byte, shareSize) + } + + err = enc.Encode(shares) + if err != nil { + return nil, fmt.Errorf("encoding: %w", err) + } + return shares, nil +} + +func reconstructShares(codec Codec, parity []share.Share) ([]share.Share, error) { + if len(parity) == 0 { + return nil, fmt.Errorf("parity shares are empty") + } + + sqLen := len(parity) * 2 + + enc, err := codec.Encoder(sqLen) + if err != nil { + return nil, fmt.Errorf("encoder: %w", err) + } + + shares := make([]share.Share, sqLen) + for i := sqLen / 2; i < sqLen; i++ { + shares[i] = parity[i-sqLen/2] + } + + err = enc.Reconstruct(shares) + if err != nil { + return nil, fmt.Errorf("reconstructing: %w", err) + } + return shares, nil +} diff --git a/share/store/file/axis_half_test.go b/share/store/file/axis_half_test.go new file mode 100644 index 0000000000..ac4d56fd75 --- /dev/null +++ b/share/store/file/axis_half_test.go @@ -0,0 +1,29 @@ +package file + +import ( + "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/stretchr/testify/require" + "testing" +) + +func TestExtendAxisHalf(t *testing.T) { + shares := sharetest.RandShares(t, 16) + + original := AxisHalf{ + Shares: shares, + IsParity: false, + } + + extended, err := original.Extended() + require.NoError(t, err) + + parity := AxisHalf{ + Shares: extended[len(shares):], + IsParity: true, + } + + parityExtended, err := parity.Extended() + require.NoError(t, err) + + require.Equal(t, extended, parityExtended) +} diff --git a/share/store/file/codec_test.go b/share/store/file/codec_test.go index 2a214165c8..d6fdbb3045 100644 --- a/share/store/file/codec_test.go +++ b/share/store/file/codec_test.go @@ -74,7 +74,7 @@ func newShards(b require.TestingT, size int, fillParity bool) [][]byte { copy(shards, original) if fillParity { - // fill with parity empty shares + // fill with parity empty Shares for j := len(original); j < len(shards); j++ { shards[j] = make([]byte, len(original[0])) } diff --git a/share/store/file/file.go b/share/store/file/file.go index 67273caadb..f77cee3d94 100644 --- a/share/store/file/file.go +++ b/share/store/file/file.go @@ -13,7 +13,6 @@ import ( var log = logging.Logger("store/file") -// TODO: add validation of input parameters for Share, AxisHalf and Data methods. type EdsFile interface { io.Closer // Reader returns binary reader for the file. @@ -24,8 +23,8 @@ type EdsFile interface { DataHash() share.DataHash // Share returns share and corresponding proof for the given axis and share index in this axis. Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) - // AxisHalf returns shares for the first half of the axis of the given type and index. - AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) + // AxisHalf returns Shares for the first half of the axis of the given type and index. + AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) // Data returns data for the given namespace and row index. Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) // EDS returns extended data square stored in the file. diff --git a/share/store/file/file_test.go b/share/store/file/file_test.go index ea87f919c2..e05e053095 100644 --- a/share/store/file/file_test.go +++ b/share/store/file/file_test.go @@ -21,8 +21,8 @@ import ( type createFile func(eds *rsmt2d.ExtendedDataSquare) EdsFile -func testFileShare(t *testing.T, createFile createFile, size int) { - eds := edstest.RandEDS(t, size) +func testFileShare(t *testing.T, createFile createFile, odsSize int) { + eds := edstest.RandEDS(t, odsSize) fl := createFile(eds) dah, err := share.NewRoot(eds) @@ -76,7 +76,7 @@ func testShare(t *testing.T, func testFileData(t *testing.T, createFile createFile, size int) { t.Run("included", func(t *testing.T) { - // generate EDS with random data and some shares with the same namespace + // generate EDS with random data and some Shares with the same namespace namespace := sharetest.RandV0Namespace() amount := mrand.Intn(size*size-1) + 1 eds, dah := edstest.RandEDSWithNamespace(t, namespace, amount, size) @@ -85,7 +85,7 @@ func testFileData(t *testing.T, createFile createFile, size int) { }) t.Run("not included", func(t *testing.T) { - // generate EDS with random data and some shares with the same namespace + // generate EDS with random data and some Shares with the same namespace eds := edstest.RandEDS(t, size) dah, err := share.NewRoot(eds) require.NoError(t, err) @@ -110,16 +110,25 @@ func testData(t *testing.T, f EdsFile, namespace share.Namespace, dah *share.Roo } } -func testFileAxisHalf(t *testing.T, createFile createFile, size int) { - eds := edstest.RandEDS(t, size) +func testFileAxisHalf(t *testing.T, createFile createFile, odsSize int) { + eds := edstest.RandEDS(t, odsSize) fl := createFile(eds) t.Run("single thread", func(t *testing.T) { for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { - for i := 0; i < size; i++ { + for i := 0; i < int(eds.Width()); i++ { half, err := fl.AxisHalf(context.Background(), axisType, i) require.NoError(t, err) - require.Equal(t, getAxis(eds, axisType, i)[:size], half) + require.Len(t, half.Shares, odsSize) + + var expected []share.Share + if half.IsParity { + expected = getAxis(eds, axisType, i)[odsSize:] + } else { + expected = getAxis(eds, axisType, i)[:odsSize] + } + + require.Equal(t, expected, half.Shares) } } }) @@ -127,13 +136,22 @@ func testFileAxisHalf(t *testing.T, createFile createFile, size int) { t.Run("parallel", func(t *testing.T) { wg := sync.WaitGroup{} for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { - for i := 0; i < size; i++ { + for i := 0; i < int(eds.Width()); i++ { wg.Add(1) go func(axisType rsmt2d.Axis, idx int) { defer wg.Done() half, err := fl.AxisHalf(context.Background(), axisType, idx) require.NoError(t, err) - require.Equal(t, getAxis(eds, axisType, idx)[:size], half) + require.Len(t, half.Shares, odsSize) + + var expected []share.Share + if half.IsParity { + expected = getAxis(eds, axisType, idx)[odsSize:] + } else { + expected = getAxis(eds, axisType, idx)[:odsSize] + } + + require.Equal(t, expected, half.Shares) }(axisType, i) } } From 3e71d570e31bf45312b6f05d8a7555332e905058 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 27 Mar 2024 18:34:14 +0400 Subject: [PATCH 107/132] use AxisHalf in file implementations --- share/shwap/row_id.go | 13 +++- share/store/cache/noop.go | 4 +- share/store/file/cache_file.go | 44 ++++++----- share/store/file/close_once_file.go | 6 +- share/store/file/mem_file.go | 7 +- share/store/file/ods_file.go | 113 +++++++++++++--------------- share/store/file/ods_file_test.go | 47 ++++-------- share/store/file/validating_file.go | 16 ++-- 8 files changed, 119 insertions(+), 131 deletions(-) diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index 5d947cd64c..83b4954573 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -4,7 +4,6 @@ import ( "context" "encoding/binary" "fmt" - blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" @@ -129,7 +128,17 @@ func (rid RowID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Bloc return nil, fmt.Errorf("while getting AxisHalf: %w", err) } - s := NewRow(rid, axisHalf) + shares := axisHalf.Shares + // If it's a parity axis, we need to get the left half of the shares + if axisHalf.IsParity { + axis, err := axisHalf.Extended() + if err != nil { + return nil, fmt.Errorf("while getting extended shares: %w", err) + } + shares = axis[:len(axis)/2] + } + + s := NewRow(rid, shares) blk, err := s.IPLDBlock() if err != nil { return nil, fmt.Errorf("while coverting to IPLD block: %w", err) diff --git a/share/store/cache/noop.go b/share/store/cache/noop.go index 166710ad80..6d2906d5bb 100644 --- a/share/store/cache/noop.go +++ b/share/store/cache/noop.go @@ -60,8 +60,8 @@ func (n NoopFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, e return nil, nil } -func (n NoopFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { - return nil, nil +func (n NoopFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (file.AxisHalf, error) { + return file.AxisHalf{}, nil } func (n NoopFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { diff --git a/share/store/file/cache_file.go b/share/store/file/cache_file.go index 7c24d6c0ce..bdccd52e4c 100644 --- a/share/store/file/cache_file.go +++ b/share/store/file/cache_file.go @@ -25,7 +25,7 @@ type CacheFile struct { // lock protects axisCache lock sync.RWMutex - // axisCache caches the axis shares and proofs + // axisCache caches the axis Shares and proofs axisCache []map[int]inMemoryAxis // disableCache disables caching of rows for testing purposes disableCache bool @@ -48,10 +48,6 @@ func NewCacheFile(f EdsFile) *CacheFile { func (f *CacheFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { axisType, axisIdx, shrIdx := rsmt2d.Row, y, x - if x < f.Size()/2 && y >= f.Size()/2 { - axisType, axisIdx, shrIdx = rsmt2d.Col, x, y - } - ax, err := f.axisWithProofs(ctx, axisType, axisIdx) if err != nil { return nil, err @@ -73,7 +69,7 @@ func (f *CacheFile) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, ax return ax, nil } - // build proofs from shares and cache them + // build proofs from Shares and cache them if !ok { shrs, err := f.axis(ctx, axisType, axisIdx) if err != nil { @@ -89,7 +85,7 @@ func (f *CacheFile) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, ax for _, shr := range ax.shares { err := tree.Push(shr) if err != nil { - return inMemoryAxis{}, fmt.Errorf("push shares: %w", err) + return inMemoryAxis{}, fmt.Errorf("push Shares: %w", err) } } @@ -111,40 +107,33 @@ func (f *CacheFile) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, ax return ax, nil } -func (f *CacheFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { +func (f *CacheFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) { // return axis from cache if possible ax, ok := f.getAxisFromCache(axisType, axisIdx) if ok { - return ax.shares[:f.Size()/2], nil + return AxisHalf{ + Shares: ax.shares[:f.Size()/2], + IsParity: false, + }, nil } // read axis from file if axis is in the first quadrant half, err := f.EdsFile.AxisHalf(ctx, axisType, axisIdx) if err != nil { - return nil, fmt.Errorf("reading axis from inner file: %w", err) + return AxisHalf{}, fmt.Errorf("reading axis from inner file: %w", err) } if !f.disableCache { - axis, err := extendShares(codec, half) + ax.shares, err = half.Extended() if err != nil { - return nil, fmt.Errorf("extending shares: %w", err) + return AxisHalf{}, fmt.Errorf("extending Shares: %w", err) } - ax.shares = axis f.storeAxisInCache(axisType, axisIdx, ax) } return half, nil } -func (f *CacheFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { - original, err := f.AxisHalf(ctx, axisType, axisIdx) - if err != nil { - return nil, err - } - - return extendShares(codec, original) -} - func (f *CacheFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { ax, err := f.axisWithProofs(ctx, rsmt2d.Row, rowIdx) if err != nil { @@ -153,7 +142,7 @@ func (f *CacheFile) Data(ctx context.Context, namespace share.Namespace, rowIdx row, proof, err := ipld.GetSharesByNamespace(ctx, ax.proofs, ax.root, namespace, f.Size()) if err != nil { - return share.NamespacedRow{}, fmt.Errorf("shares by namespace %s for row %v: %w", namespace.String(), rowIdx, err) + return share.NamespacedRow{}, fmt.Errorf("Shares by namespace %s for row %v: %w", namespace.String(), rowIdx, err) } return share.NamespacedRow{ @@ -182,6 +171,15 @@ func (f *CacheFile) EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) return eds, nil } +func (f *CacheFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + half, err := f.AxisHalf(ctx, axisType, axisIdx) + if err != nil { + return nil, err + } + + return half.Extended() +} + func (f *CacheFile) storeAxisInCache(axisType rsmt2d.Axis, axisIdx int, axis inMemoryAxis) { f.lock.Lock() defer f.lock.Unlock() diff --git a/share/store/file/close_once_file.go b/share/store/file/close_once_file.go index 8943b7f1a3..b77e9b82ff 100644 --- a/share/store/file/close_once_file.go +++ b/share/store/file/close_once_file.go @@ -22,7 +22,7 @@ type closeOnceFile struct { closed atomic.Bool } -func CloseOnceFile(f EdsFile) *closeOnceFile { +func WithClosedOnce(f EdsFile) EdsFile { return &closeOnceFile{ f: f, size: f.Size(), @@ -62,9 +62,9 @@ func (c *closeOnceFile) Share(ctx context.Context, x, y int) (*share.ShareWithPr return c.f.Share(ctx, x, y) } -func (c *closeOnceFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { +func (c *closeOnceFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) { if c.closed.Load() { - return nil, errFileClosed + return AxisHalf{}, errFileClosed } return c.f.AxisHalf(ctx, axisType, axisIdx) } diff --git a/share/store/file/mem_file.go b/share/store/file/mem_file.go index 99ccbafa1b..092aee9904 100644 --- a/share/store/file/mem_file.go +++ b/share/store/file/mem_file.go @@ -76,8 +76,11 @@ func (f *MemFile) Share( }, nil } -func (f *MemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { - return getAxis(f.Eds, axisType, axisIdx)[:f.Size()/2], nil +func (f *MemFile) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) { + return AxisHalf{ + Shares: getAxis(f.Eds, axisType, axisIdx)[:f.Size()/2], + IsParity: false, + }, nil } func (f *MemFile) Data(_ context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { diff --git a/share/store/file/ods_file.go b/share/store/file/ods_file.go index c00f6fd07a..e805856730 100644 --- a/share/store/file/ods_file.go +++ b/share/store/file/ods_file.go @@ -109,31 +109,45 @@ func (f *OdsFile) Reader() (io.Reader, error) { return f.ods.Reader() } -func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { +func (f *OdsFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) { // read axis from file if axis is in the first quadrant if axisIdx < f.Size()/2 { - return f.odsAxisHalf(axisType, axisIdx) + shares, err := f.readAxisHalf(axisType, axisIdx) + if err != nil { + return AxisHalf{}, fmt.Errorf("reading axis half: %w", err) + } + return AxisHalf{ + Shares: shares, + IsParity: false, + }, nil } err := f.readOds() if err != nil { - return nil, err + return AxisHalf{}, err } - return f.ods.computeAxisHalf(ctx, axisType, axisIdx) + shares, err := f.ods.computeAxisHalf(ctx, axisType, axisIdx) + if err != nil { + return AxisHalf{}, fmt.Errorf("computing axis half: %w", err) + } + return AxisHalf{ + Shares: shares, + IsParity: false, + }, nil } -func (f *OdsFile) odsAxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { +func (f *OdsFile) readAxisHalf(axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { f.lock.RLock() - defer f.lock.RUnlock() - shrs, err := f.ods.axisHalf(context.Background(), axisType, axisIdx) - if err == nil { - return shrs, nil + ods := f.ods + f.lock.RUnlock() + if ods != nil { + return f.ods.axisHalf(context.Background(), axisType, axisIdx) } switch axisType { case rsmt2d.Col: - return f.readCol(axisIdx) + return f.readCol(axisIdx, 0) case rsmt2d.Row: return f.readRow(axisIdx) } @@ -153,7 +167,7 @@ func (f *OdsFile) readOds() error { return fmt.Errorf("discarding header: %w", err) } - square, err := readShares(share.Size, f.Size(), f.fl) + square, err := readSquare(f.fl, share.Size, f.Size()) if err != nil { return fmt.Errorf("reading ods: %w", err) } @@ -162,10 +176,6 @@ func (f *OdsFile) readOds() error { } func (f *OdsFile) readRow(idx int) ([]share.Share, error) { - if idx >= f.Size()/2 { - return nil, fmt.Errorf("index is out of ods bounds") - } - shrLn := int(f.hdr.shareSize) odsLn := int(f.hdr.squareSize) / 2 @@ -185,19 +195,16 @@ func (f *OdsFile) readRow(idx int) ([]share.Share, error) { return shrs, nil } -func (f *OdsFile) readCol(idx int) ([]share.Share, error) { - if idx >= f.Size()/2 { - return nil, fmt.Errorf("index is out of ods bounds") - } - +func (f *OdsFile) readCol(axisIdx, quadrantIdx int) ([]share.Share, error) { shrLn := int(f.hdr.shareSize) odsLn := int(f.hdr.squareSize) / 2 + quadrantOffset := quadrantIdx * odsLn * odsLn * shrLn shrs := make([]share.Share, odsLn) for i := 0; i < odsLn; i++ { - pos := idx + i*odsLn - offset := pos*shrLn + HeaderSize + pos := axisIdx + i*odsLn + offset := pos*shrLn + HeaderSize + quadrantOffset shr := make(share.Share, shrLn) if _, err := f.fl.ReadAt(shr, int64(offset)); err != nil { @@ -208,47 +215,41 @@ func (f *OdsFile) readCol(idx int) ([]share.Share, error) { return shrs, nil } -func (f *OdsFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { - original, err := f.AxisHalf(ctx, axisType, axisIdx) - if err != nil { - return nil, err +func (f *OdsFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { + axisType, axisIdx, shrIdx := rsmt2d.Row, y, x + // if the share is in the third quadrant, we need to switch axis type to column because it + // is more efficient to read single column than reading full ods to calculate single row + if x < f.Size()/2 && y >= f.Size()/2 { + axisType, axisIdx, shrIdx = rsmt2d.Col, x, y } - return extendShares(codec, original) -} - -func extendShares(codec Codec, original []share.Share) ([]share.Share, error) { - sqLen := len(original) * 2 - enc, err := codec.Encoder(sqLen) + axis, err := f.axis(ctx, axisType, axisIdx) if err != nil { - return nil, fmt.Errorf("encoder: %w", err) + return nil, fmt.Errorf("reading axis: %w", err) } - shares := make([]share.Share, sqLen) - copy(shares, original) - for j := len(original); j < len(shares); j++ { - shares[j] = make([]byte, len(original[0])) - } + return shareWithProof(axis, axisType, axisIdx, shrIdx) +} - err = enc.Encode(shares) +func (f *OdsFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { + shares, err := f.axis(ctx, rsmt2d.Row, rowIdx) if err != nil { - return nil, fmt.Errorf("encoder: %w", err) + return share.NamespacedRow{}, err } - - return shares, nil + return ndDataFromShares(shares, namespace, rowIdx) } -func (f *OdsFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { - axisType, axisIdx, shrIdx := rsmt2d.Row, y, x - if x < f.Size()/2 && y >= f.Size()/2 { - axisType, axisIdx, shrIdx = rsmt2d.Col, x, y - } - shares, err := f.axis(ctx, axisType, axisIdx) +func (f *OdsFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { + err := f.readOds() if err != nil { return nil, err } - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(f.Size()/2), uint(axisIdx)) + return f.ods.eds() +} + +func shareWithProof(shares []share.Share, axisType rsmt2d.Axis, axisIdx, shrIdx int) (*share.ShareWithProof, error) { + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(shares)/2), uint(axisIdx)) for _, shr := range shares { err := tree.Push(shr) if err != nil { @@ -268,19 +269,11 @@ func (f *OdsFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, e }, nil } -func (f *OdsFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { - shares, err := f.axis(ctx, rsmt2d.Row, rowIdx) - if err != nil { - return share.NamespacedRow{}, err - } - return ndDataFromShares(shares, namespace, rowIdx) -} - -func (f *OdsFile) EDS(_ context.Context) (*rsmt2d.ExtendedDataSquare, error) { - err := f.readOds() +func (f *OdsFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + half, err := f.AxisHalf(ctx, axisType, axisIdx) if err != nil { return nil, err } - return f.ods.eds() + return half.Extended() } diff --git a/share/store/file/ods_file_test.go b/share/store/file/ods_file_test.go index 7f190c5c3a..e30df719a0 100644 --- a/share/store/file/ods_file_test.go +++ b/share/store/file/ods_file_test.go @@ -25,6 +25,21 @@ func TestCreateOdsFile(t *testing.T) { assert.True(t, edsIn.Equals(edsOut)) } +func TestReadFullOdsFromFile(t *testing.T) { + eds := edstest.RandEDS(t, 8) + path := t.TempDir() + "/testfile" + f, err := CreateOdsFile(path, []byte{}, eds) + require.NoError(t, err) + + err = f.readOds() + require.NoError(t, err) + for i, row := range f.ods { + original := eds.Row(uint(i))[:eds.Width()/2] + require.True(t, len(original) == len(row)) + require.Equal(t, original, row) + } +} + func TestOdsFile(t *testing.T) { size := 8 createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { @@ -55,36 +70,6 @@ func TestOdsFile(t *testing.T) { }) } -func TestReadOdsFile(t *testing.T) { - eds := edstest.RandEDS(t, 8) - path := t.TempDir() + "/testfile" - f, err := CreateOdsFile(path, []byte{}, eds) - require.NoError(t, err) - - err = f.readOds() - require.NoError(t, err) - for i, row := range f.ods { - original, err := f.readRow(i) - require.NoError(t, err) - require.True(t, len(original) == len(row)) - require.Equal(t, original, row) - } -} - -// Leopard full encode -// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 418206 2545 ns/op -// BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 4968 227265 ns/op -// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:first(original)-10 57007 20707 ns/op -// BenchmarkAxisFromOdsFile/Size:32/Axis:col/squareHalf:second(extended)-10 5016 214184 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:first(original)-10 308559 3786 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:row/squareHalf:second(extended)-10 1624 713999 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:first(original)-10 28724 41421 ns/op -// BenchmarkAxisFromOdsFile/Size:64/Axis:col/squareHalf:second(extended)-10 1686 629314 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:first(original)-10 183322 6360 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:row/squareHalf:second(extended)-10 428 2616150 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 14338 83598 ns/op -// BenchmarkAxisFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 488 2213146 ns/op - // ReconstructSome, default codec // BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:first(original)-10 455848 2588 ns/op // BenchmarkAxisFromOdsFile/Size:32/Axis:row/squareHalf:second(extended)-10 9015 203950 ns/op @@ -125,7 +110,7 @@ func BenchmarkAxisFromOdsFile(b *testing.B) { // BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:first(original)-10 2114 514642 ns/op // BenchmarkShareFromOdsFile/Size:128/Axis:col/squareHalf:second(extended)-10 373 3068104 ns/op func BenchmarkShareFromOdsFile(b *testing.B) { - minSize, maxSize := 128, 128 + minSize, maxSize := 32, 128 dir := b.TempDir() newFile := func(size int) EdsFile { diff --git a/share/store/file/validating_file.go b/share/store/file/validating_file.go index 94a0ae9ab2..ba36bc79e3 100644 --- a/share/store/file/validating_file.go +++ b/share/store/file/validating_file.go @@ -13,16 +13,16 @@ import ( // ErrOutOfBounds is returned whenever an index is out of bounds. var ErrOutOfBounds = errors.New("index is out of bounds") -// ValidatingFile is a file implementation that performs sanity checks on file operations. -type ValidatingFile struct { +// validatingFile is a file implementation that performs sanity checks on file operations. +type validatingFile struct { EdsFile } -func NewValidatingFile(f EdsFile) EdsFile { - return &ValidatingFile{EdsFile: f} +func WithValidation(f EdsFile) EdsFile { + return &validatingFile{EdsFile: f} } -func (f *ValidatingFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { +func (f *validatingFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { if err := validateIndexBounds(f, x); err != nil { return nil, fmt.Errorf("col: %w", err) } @@ -32,14 +32,14 @@ func (f *ValidatingFile) Share(ctx context.Context, x, y int) (*share.ShareWithP return f.EdsFile.Share(ctx, x, y) } -func (f *ValidatingFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { +func (f *validatingFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) { if err := validateIndexBounds(f, axisIdx); err != nil { - return nil, fmt.Errorf("%s: %w", axisType, err) + return AxisHalf{}, fmt.Errorf("%s: %w", axisType, err) } return f.EdsFile.AxisHalf(ctx, axisType, axisIdx) } -func (f *ValidatingFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { +func (f *validatingFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { if err := validateIndexBounds(f, rowIdx); err != nil { return share.NamespacedRow{}, fmt.Errorf("row: %w", err) } From 74ed5c057278fb7fcf0bcd111af5e5c0894052ad Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 27 Mar 2024 18:34:53 +0400 Subject: [PATCH 108/132] add q1q4 file --- share/store/file/file_header.go | 24 +++++-- share/store/file/q1q4_file.go | 112 +++++++++++++++++++++++++++++ share/store/file/q1q4_file_test.go | 37 ++++++++++ share/store/file/square.go | 29 ++++---- 4 files changed, 178 insertions(+), 24 deletions(-) create mode 100644 share/store/file/q1q4_file.go create mode 100644 share/store/file/q1q4_file_test.go diff --git a/share/store/file/file_header.go b/share/store/file/file_header.go index 080d29bedb..f025754bd7 100644 --- a/share/store/file/file_header.go +++ b/share/store/file/file_header.go @@ -11,7 +11,8 @@ import ( const HeaderSize = 64 type Header struct { - version fileVersion + version fileVersion + fileType fileType // Taken directly from EDS shareSize uint16 @@ -28,6 +29,13 @@ const ( FileV0 fileVersion = iota ) +type fileType uint8 + +const ( + ods fileType = iota + q1q4 +) + func (h *Header) Version() fileVersion { return h.version } @@ -47,9 +55,10 @@ func (h *Header) DataHash() share.DataHash { func (h *Header) WriteTo(w io.Writer) (int64, error) { buf := make([]byte, HeaderSize) buf[0] = byte(h.version) - binary.LittleEndian.PutUint16(buf[1:3], h.shareSize) - binary.LittleEndian.PutUint16(buf[3:5], h.squareSize) - copy(buf[5:37], h.datahash) + buf[1] = byte(h.fileType) + binary.LittleEndian.PutUint16(buf[2:4], h.shareSize) + binary.LittleEndian.PutUint16(buf[4:6], h.squareSize) + copy(buf[32:64], h.datahash) _, err := io.Copy(w, bytes.NewBuffer(buf)) return HeaderSize, err } @@ -63,11 +72,12 @@ func ReadHeader(r io.Reader) (*Header, error) { h := &Header{ version: fileVersion(buf[0]), - shareSize: binary.LittleEndian.Uint16(buf[1:3]), - squareSize: binary.LittleEndian.Uint16(buf[3:5]), + fileType: fileType(buf[1]), + shareSize: binary.LittleEndian.Uint16(buf[2:4]), + squareSize: binary.LittleEndian.Uint16(buf[4:6]), datahash: make([]byte, 32), } - copy(h.datahash, buf[5:37]) + copy(h.datahash, buf[32:64]) return h, err } diff --git a/share/store/file/q1q4_file.go b/share/store/file/q1q4_file.go new file mode 100644 index 0000000000..804b8ffd3d --- /dev/null +++ b/share/store/file/q1q4_file.go @@ -0,0 +1,112 @@ +package file + +import ( + "context" + "fmt" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/rsmt2d" + "io" +) + +var _ EdsFile = (*Q1Q4File)(nil) + +type Q1Q4File struct { + *OdsFile +} + +func OpenQ1Q4File(path string) (*Q1Q4File, error) { + ods, err := OpenOdsFile(path) + if err != nil { + return nil, err + } + + return &Q1Q4File{ + OdsFile: ods, + }, nil +} + +func CreateQ1Q4File( + path string, + datahash share.DataHash, + eds *rsmt2d.ExtendedDataSquare) (*Q1Q4File, error) { + ods, err := CreateOdsFile(path, datahash, eds) + if err != nil { + return nil, err + } + + err = writeQ4(ods.fl, eds) + if err != nil { + return nil, fmt.Errorf("writing Q4: %w", err) + } + + return &Q1Q4File{ + OdsFile: ods, + }, nil + +} + +func (f *Q1Q4File) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) { + if axisIdx < f.Size()/2 { + half, err := f.OdsFile.readAxisHalf(axisType, axisIdx) + if err != nil { + return AxisHalf{}, fmt.Errorf("reading axis half: %w", err) + } + return AxisHalf{ + Shares: half, + IsParity: false, + }, nil + } + + var half []share.Share + var err error + switch axisType { + case rsmt2d.Col: + half, err = f.readCol(axisIdx-f.Size()/2, 1) + case rsmt2d.Row: + half, err = f.readRow(axisIdx) + } + if err != nil { + return AxisHalf{}, fmt.Errorf("reading axis: %w", err) + } + return AxisHalf{ + Shares: half, + IsParity: true, + }, nil +} + +func (f *Q1Q4File) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { + half, err := f.AxisHalf(ctx, rsmt2d.Row, y) + if err != nil { + return nil, fmt.Errorf("reading axis: %w", err) + } + shares, err := half.Extended() + if err != nil { + return nil, fmt.Errorf("extending shares: %w", err) + } + return shareWithProof(shares, rsmt2d.Row, y, x) +} + +func (f *Q1Q4File) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { + half, err := f.AxisHalf(ctx, rsmt2d.Row, rowIdx) + if err != nil { + return share.NamespacedRow{}, fmt.Errorf("reading axis: %w", err) + } + shares, err := half.Extended() + if err != nil { + return share.NamespacedRow{}, fmt.Errorf("extending shares: %w", err) + } + return ndDataFromShares(shares, namespace, rowIdx) +} + +func writeQ4(w io.Writer, eds *rsmt2d.ExtendedDataSquare) error { + odsLn := int(eds.Width()) / 2 + for x := odsLn; x < int(eds.Width()); x++ { + for y := odsLn; y < int(eds.Width()); y++ { + _, err := w.Write(eds.GetCell(uint(x), uint(y))) + if err != nil { + return err + } + } + } + return nil +} diff --git a/share/store/file/q1q4_file_test.go b/share/store/file/q1q4_file_test.go new file mode 100644 index 0000000000..53aedb7555 --- /dev/null +++ b/share/store/file/q1q4_file_test.go @@ -0,0 +1,37 @@ +package file + +import ( + "github.com/celestiaorg/rsmt2d" + "github.com/stretchr/testify/require" + "testing" +) + +func TestQ1Q4File(t *testing.T) { + size := 8 + createOdsFile := func(eds *rsmt2d.ExtendedDataSquare) EdsFile { + path := t.TempDir() + "/testfile" + fl, err := CreateQ1Q4File(path, []byte{}, eds) + require.NoError(t, err) + return fl + } + + t.Run("Share", func(t *testing.T) { + testFileShare(t, createOdsFile, size) + }) + + t.Run("AxisHalf", func(t *testing.T) { + testFileAxisHalf(t, createOdsFile, size) + }) + + t.Run("Data", func(t *testing.T) { + testFileData(t, createOdsFile, size) + }) + + t.Run("EDS", func(t *testing.T) { + testFileEds(t, createOdsFile, size) + }) + + t.Run("ReadOds", func(t *testing.T) { + testFileReader(t, createOdsFile, size) + }) +} diff --git a/share/store/file/square.go b/share/store/file/square.go index bc5d4ed299..ab145dfb9a 100644 --- a/share/store/file/square.go +++ b/share/store/file/square.go @@ -5,9 +5,8 @@ import ( "bytes" "context" "fmt" - "io" - "golang.org/x/sync/errgroup" + "io" "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/rsmt2d" @@ -19,9 +18,9 @@ type square [][]share.Share // ReadEds reads an EDS from the reader and returns it. func ReadEds(_ context.Context, r io.Reader, edsSize int) (*rsmt2d.ExtendedDataSquare, error) { - square, err := readShares(share.Size, edsSize, r) + square, err := readSquare(r, share.Size, edsSize) if err != nil { - return nil, fmt.Errorf("reading shares: %w", err) + return nil, fmt.Errorf("reading Shares: %w", err) } eds, err := square.eds() @@ -31,10 +30,10 @@ func ReadEds(_ context.Context, r io.Reader, edsSize int) (*rsmt2d.ExtendedDataS return eds, nil } -// readShares reads shares from the reader and returns a square. It assumes that the reader is -// positioned at the beginning of the shares. It knows the size of the shares and the size of the +// readSquare reads Shares from the reader and returns a square. It assumes that the reader is +// positioned at the beginning of the Shares. It knows the size of the Shares and the size of the // square, so reads from reader are limited to exactly the amount of data required. -func readShares(shareSize, edsSize int, r io.Reader) (square, error) { +func readSquare(r io.Reader, shareSize, edsSize int) (square, error) { odsLn := edsSize / 2 // get pre-allocated square and buffer from memPools @@ -43,7 +42,6 @@ func readShares(shareSize, edsSize int, r io.Reader) (square, error) { // TODO(@walldiss): run benchmark to find optimal size for buffer br := bufio.NewReaderSize(r, 4096) var total int - log.Info("start reading ods", "ods size", odsLn, "share size", shareSize) for i := 0; i < odsLn; i++ { for j := 0; j < odsLn; j++ { n, err := io.ReadFull(br, square[i][j]) @@ -56,9 +54,6 @@ func readShares(shareSize, edsSize int, r io.Reader) (square, error) { total += n } } - - // TODO: remove this log - log.Info("read bytes", "total", total) return square, nil } @@ -76,11 +71,11 @@ func (s square) close() error { func (s square) axisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { if s == nil { - return nil, fmt.Errorf("ods file not in mem") + return nil, fmt.Errorf("square is nil") } if axisIdx >= s.size() { - return nil, fmt.Errorf("index is out of ods bounds") + return nil, fmt.Errorf("index is out of square bounds") } // square stores rows directly in high level slice, so we can return by accessing row by index @@ -128,7 +123,7 @@ func (s square) computeAxisHalf( ) ([]share.Share, error) { shares := make([]share.Share, s.size()) - // extend opposite half of the square while collecting shares for the first half of required axis + // extend opposite half of the square while collecting Shares for the first half of required axis g, ctx := errgroup.WithContext(ctx) opposite := oppositeAxis(axisType) for i := 0; i < s.size(); i++ { @@ -179,18 +174,18 @@ func oppositeAxis(axis rsmt2d.Axis) rsmt2d.Axis { return rsmt2d.Col } -// bufferedODSReader will reads shares from inMemOds into the buffer. +// bufferedODSReader will read Shares from inMemOds into the buffer. // It exposes the buffer to be read by io.Reader interface implementation type bufferedODSReader struct { square square - // current is the amount of shares stored in ods file that have been read from reader. When current + // current is the amount of Shares stored in square that have been read from reader. When current // reaches total, bufferedODSReader will prevent further reads by returning io.EOF current, total int buf *bytes.Buffer } func (r *bufferedODSReader) Read(p []byte) (n int, err error) { - // read shares to the buffer until it has sufficient data to fill provided container or full ods is + // read Shares to the buffer until it has sufficient data to fill provided container or full square is // read for r.current < r.total && r.buf.Len() < len(p) { x, y := r.current%(r.square.size()), r.current/(r.square.size()) From ab92e820bd8711983927a014b147fe92ed9ad8a6 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 27 Mar 2024 18:36:53 +0400 Subject: [PATCH 109/132] rename wrapping files to middleware pattern --- share/store/file/cache_file.go | 24 ++++++++++++------------ share/store/file/cache_file_test.go | 2 +- share/store/store.go | 6 +++--- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/share/store/file/cache_file.go b/share/store/file/cache_file.go index bdccd52e4c..189a16c500 100644 --- a/share/store/file/cache_file.go +++ b/share/store/file/cache_file.go @@ -18,9 +18,9 @@ import ( "github.com/celestiaorg/celestia-node/share/ipld" ) -var _ EdsFile = (*CacheFile)(nil) +var _ EdsFile = (*proofsCacheFile)(nil) -type CacheFile struct { +type proofsCacheFile struct { EdsFile // lock protects axisCache @@ -39,14 +39,14 @@ type inMemoryAxis struct { proofs blockservice.BlockGetter } -func NewCacheFile(f EdsFile) *CacheFile { - return &CacheFile{ +func WithProofsCache(f EdsFile) EdsFile { + return &proofsCacheFile{ EdsFile: f, axisCache: []map[int]inMemoryAxis{make(map[int]inMemoryAxis), make(map[int]inMemoryAxis)}, } } -func (f *CacheFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { +func (f *proofsCacheFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, error) { axisType, axisIdx, shrIdx := rsmt2d.Row, y, x ax, err := f.axisWithProofs(ctx, axisType, axisIdx) if err != nil { @@ -62,7 +62,7 @@ func (f *CacheFile) Share(ctx context.Context, x, y int) (*share.ShareWithProof, return share, nil } -func (f *CacheFile) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (inMemoryAxis, error) { +func (f *proofsCacheFile) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (inMemoryAxis, error) { // return axis with proofs from cache if possible ax, ok := f.getAxisFromCache(axisType, axisIdx) if ax.proofs != nil { @@ -107,7 +107,7 @@ func (f *CacheFile) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, ax return ax, nil } -func (f *CacheFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) { +func (f *proofsCacheFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) { // return axis from cache if possible ax, ok := f.getAxisFromCache(axisType, axisIdx) if ok { @@ -134,7 +134,7 @@ func (f *CacheFile) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx return half, nil } -func (f *CacheFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { +func (f *proofsCacheFile) Data(ctx context.Context, namespace share.Namespace, rowIdx int) (share.NamespacedRow, error) { ax, err := f.axisWithProofs(ctx, rsmt2d.Row, rowIdx) if err != nil { return share.NamespacedRow{}, err @@ -151,7 +151,7 @@ func (f *CacheFile) Data(ctx context.Context, namespace share.Namespace, rowIdx }, nil } -func (f *CacheFile) EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) { +func (f *proofsCacheFile) EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) { shares := make([][]byte, 0, f.Size()*f.Size()) for i := 0; i < f.Size(); i++ { ax, err := f.axis(ctx, rsmt2d.Row, i) @@ -171,7 +171,7 @@ func (f *CacheFile) EDS(ctx context.Context) (*rsmt2d.ExtendedDataSquare, error) return eds, nil } -func (f *CacheFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { +func (f *proofsCacheFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { half, err := f.AxisHalf(ctx, axisType, axisIdx) if err != nil { return nil, err @@ -180,13 +180,13 @@ func (f *CacheFile) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) return half.Extended() } -func (f *CacheFile) storeAxisInCache(axisType rsmt2d.Axis, axisIdx int, axis inMemoryAxis) { +func (f *proofsCacheFile) storeAxisInCache(axisType rsmt2d.Axis, axisIdx int, axis inMemoryAxis) { f.lock.Lock() defer f.lock.Unlock() f.axisCache[axisType][axisIdx] = axis } -func (f *CacheFile) getAxisFromCache(axisType rsmt2d.Axis, axisIdx int) (inMemoryAxis, bool) { +func (f *proofsCacheFile) getAxisFromCache(axisType rsmt2d.Axis, axisIdx int) (inMemoryAxis, bool) { f.lock.RLock() defer f.lock.RUnlock() ax, ok := f.axisCache[axisType][axisIdx] diff --git a/share/store/file/cache_file_test.go b/share/store/file/cache_file_test.go index c406bbfcec..d4ae61a807 100644 --- a/share/store/file/cache_file_test.go +++ b/share/store/file/cache_file_test.go @@ -14,7 +14,7 @@ func TestCacheFile(t *testing.T) { path := t.TempDir() + "/testfile" fl, err := CreateOdsFile(path, []byte{}, eds) require.NoError(t, err) - return NewCacheFile(fl) + return WithProofsCache(fl) } t.Run("Share", func(t *testing.T) { diff --git a/share/store/store.go b/share/store/store.go index cb5e4f0209..2963bb3da8 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -383,9 +383,9 @@ func fileLoader(f file.EdsFile) cache.OpenFileFn { } func wrappedFile(f file.EdsFile) file.EdsFile { - withCache := file.NewCacheFile(f) - closedOnce := file.CloseOnceFile(withCache) - sanityChecked := file.NewValidatingFile(closedOnce) + withCache := file.WithProofsCache(f) + closedOnce := file.WithClosedOnce(withCache) + sanityChecked := file.WithValidation(closedOnce) return sanityChecked } From 28fe96755c83cffeef134012fe075cc529c024a9 Mon Sep 17 00:00:00 2001 From: Vlad Date: Wed, 27 Mar 2024 18:41:07 +0400 Subject: [PATCH 110/132] use Q1Q4 file in store --- share/store/blockstore.go | 2 +- share/store/store.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/share/store/blockstore.go b/share/store/blockstore.go index 77609b7ae9..f5d61a4c22 100644 --- a/share/store/blockstore.go +++ b/share/store/blockstore.go @@ -172,7 +172,7 @@ func (bs *Blockstore) HashOnRead(bool) { func (bs *Blockstore) openFile(height uint64) cache.OpenFileFn { return func(ctx context.Context) (file.EdsFile, error) { path := bs.store.basepath + heightsPath + fmt.Sprintf("%d", height) - f, err := file.OpenOdsFile(path) + f, err := file.OpenQ1Q4File(path) if err != nil { return nil, fmt.Errorf("opening ODS file: %w", err) } diff --git a/share/store/store.go b/share/store/store.go index 2963bb3da8..2a984d2378 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -175,8 +175,8 @@ func (s *Store) createFile(filePath string, datahash share.DataHash, square *rsm return nil, fmt.Errorf("getting by hash: %w", err) } - // create ODS file - f, err = file.CreateOdsFile(filePath, datahash, square) + // create Q1Q4 file + f, err = file.CreateQ1Q4File(filePath, datahash, square) if err != nil { return nil, fmt.Errorf("creating ODS file: %w", err) } @@ -203,7 +203,7 @@ func (s *Store) getByHash(datahash share.DataHash) (file.EdsFile, error) { } path := s.basepath + blocksPath + datahash.String() - odsFile, err := file.OpenOdsFile(path) + odsFile, err := file.OpenQ1Q4File(path) if err != nil { if os.IsNotExist(err) { return nil, ErrNotFound @@ -268,7 +268,7 @@ func (s *Store) getByHeight(height uint64) (file.EdsFile, error) { } path := s.basepath + heightsPath + fmt.Sprintf("%d", height) - odsFile, err := file.OpenOdsFile(path) + odsFile, err := file.OpenQ1Q4File(path) if err != nil { if os.IsNotExist(err) { return nil, ErrNotFound From 3b46993dc01812af0efe025f6c40b6f36e8f34b3 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 28 Mar 2024 00:44:20 +0400 Subject: [PATCH 111/132] remove redundant files from eds store --- share/eds/store_options.go | 43 --------------------------- share/eds/testdata/README.md | 5 ---- share/eds/testdata/example-root.json | 22 -------------- share/eds/testdata/example.car | Bin 74051 -> 0 bytes 4 files changed, 70 deletions(-) delete mode 100644 share/eds/store_options.go delete mode 100644 share/eds/testdata/README.md delete mode 100644 share/eds/testdata/example-root.json delete mode 100644 share/eds/testdata/example.car diff --git a/share/eds/store_options.go b/share/eds/store_options.go deleted file mode 100644 index e5e6ffa73d..0000000000 --- a/share/eds/store_options.go +++ /dev/null @@ -1,43 +0,0 @@ -package eds - -import ( - "fmt" - "time" -) - -type Parameters struct { - // GC performs DAG store garbage collection by reclaiming transient files of - // shards that are currently available but inactive, or errored. - // We don't use transient files right now, so GC is turned off by default. - GCInterval time.Duration - - // RecentBlocksCacheSize is the size of the cache for recent blocks. - RecentBlocksCacheSize int - - // BlockstoreCacheSize is the size of the cache for blockstore requested accessors. - BlockstoreCacheSize int -} - -// DefaultParameters returns the default configuration values for the EDS store parameters. -func DefaultParameters() *Parameters { - return &Parameters{ - GCInterval: 0, - RecentBlocksCacheSize: 10, - BlockstoreCacheSize: 128, - } -} - -func (p *Parameters) Validate() error { - if p.GCInterval < 0 { - return fmt.Errorf("eds: GC interval cannot be negative") - } - - if p.RecentBlocksCacheSize < 1 { - return fmt.Errorf("eds: recent blocks cache size must be positive") - } - - if p.BlockstoreCacheSize < 1 { - return fmt.Errorf("eds: blockstore cache size must be positive") - } - return nil -} diff --git a/share/eds/testdata/README.md b/share/eds/testdata/README.md deleted file mode 100644 index 960549e2a0..0000000000 --- a/share/eds/testdata/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# CARxEDS Testdata - -This directory contains an example CARv1 file of an EDS and its matching data availability header. - -They might need to be regenerated when modifying constants such as the default share size. This can be done by running the test utility in `eds_test.go` called `createTestData`. diff --git a/share/eds/testdata/example-root.json b/share/eds/testdata/example-root.json deleted file mode 100644 index 999d6301b6..0000000000 --- a/share/eds/testdata/example-root.json +++ /dev/null @@ -1,22 +0,0 @@ -{ -"row_roots": [ -"AAAAAAAAAAAAAAAAAAAAAAAAABPYEuDlO9Dz69oAAAAAAAAAAAAAAAAAAAAAAAAAMcklN0h38T4b/UBC/Cmr5YWmjmmxvi1e35vZBW14b8gDHBoTFVvY6H4J", -"AAAAAAAAAAAAAAAAAAAAAAAAADxyZecUZD41W5IAAAAAAAAAAAAAAAAAAAAAAAAAh8vQUZ38PaWyeUs7dQhphIuRIKiGaTr4KFwEhMRhejTd6/4NHdnKTDyY", -"AAAAAAAAAAAAAAAAAAAAAAAAAKDQatbQSwQ9uJsAAAAAAAAAAAAAAAAAAAAAAAAArtdqXCSsM1OlVCRZqqfZDnEO9eC5cwlgy5MQHb2g4NLr7nZYTruiOoz7", -"AAAAAAAAAAAAAAAAAAAAAAAAAMeUhM8LZBo9sWwAAAAAAAAAAAAAAAAAAAAAAAAA8PtvJpbDc4APKOK6MT1k61HuQXwauWw3nFWwr9pSljiYMv6jjjdLDF8o", -"/////////////////////////////////////////////////////////////////////////////xnHmhDh4Y8vfJrgewAcvLWpvI5XOyATj1IQDkCwvIEh", -"/////////////////////////////////////////////////////////////////////////////+qngp0AfoykfXwsMBukRtYxNA/bzW0+F3J7Q/+S1YZJ", -"/////////////////////////////////////////////////////////////////////////////4WNPrME/2MLrIZgAUoKaVx2GzJqDcYGrBg+sudPKUDy", -"/////////////////////////////////////////////////////////////////////////////6HdebpaHl7iTpLvmuPvtQNnkHfNOPyEhahxbVnIB2d1" -], -"column_roots": [ -"AAAAAAAAAAAAAAAAAAAAAAAAABPYEuDlO9Dz69oAAAAAAAAAAAAAAAAAAAAAAAAAx5SEzwtkGj2xbESyOeamsjGWUBQdAQoiSl+rMtNMo1wEtfGQnFS/g+K+", -"AAAAAAAAAAAAAAAAAAAAAAAAAC3uK6nhCxHTfBwAAAAAAAAAAAAAAAAAAAAAAAAA1fxnqHyO6qV39pcUQ8MuTfJ7RBhbSVWf0aamUP27KRY0II55oJoY6Ng6", -"AAAAAAAAAAAAAAAAAAAAAAAAAC6DkYeeBY/kKvAAAAAAAAAAAAAAAAAAAAAAAAAA47rxk8hoCnWGM+CX47TlYWBeE2unvRhA/j3EvHdxeL1rFRkaYfAd5eg7", -"AAAAAAAAAAAAAAAAAAAAAAAAADHJJTdId/E+G/0AAAAAAAAAAAAAAAAAAAAAAAAA8PtvJpbDc4APKAk5QPSH59HECE2sf/CDLKAZJjWo9DD4sLXJQ4jTZoH6", -"/////////////////////////////////////////////////////////////////////////////4lKCT3K11RnNIuLNfY+SfDZCYAE2iW0hjQHIVBpoN0q", -"/////////////////////////////////////////////////////////////////////////////1NpYcgayEVenbFeEO5LJ1j1/1sD+PvZWHDv+jqT1dLR", -"/////////////////////////////////////////////////////////////////////////////8FOWVuCU0rTzUW9tP2R47RmTBvwXX8ycKrMhgKEi1xa", -"/////////////////////////////////////////////////////////////////////////////7K5SoZ3HF5QgPvIXpKSr9eT4Xfiokc3PUMmXE4pBDTf" -] -} \ No newline at end of file diff --git a/share/eds/testdata/example.car b/share/eds/testdata/example.car deleted file mode 100644 index 4d33c0ef33300d68c8f3a05f7e09c65409c30b9d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 74051 zcmb@NV{m0%yLRKGV>{{Cww;dcq+>fB+h)hMZQHhO+fKgsUw@}gSKfL~J?rnNYhAl) zk2%+>x$o@*lVfOWZEbJ&dx=6-9{|w%0?_9IP#xfZUr0-cTl<_-x2G%rn*q=d6SIms zUGreQ^9j6>m+l7?M4DE2P-|_bE`wS+TMq$aV&7l1hyoGkK9OB321Gy z|55Y#f4(pVQ{cC^qiEezw%h=)+vFjoPEQ`b*7j z%XRFK38X_H<5uEs-M+4l7Ls{kJ38M7cVC;+FXwi16bww=E1dl!&i_{PXFvH%&7XDi zjh*i<=y19ZLaNN`%8x1wgM_*0j_#?depoPg*2w4Of7JY+m(6F0_&4s6MMYz#urTHa z9o7X%-K3!q$@YejU<^ ziK$$S-FDi^5n<&PB>5>#4#Ko)>}Y6fXKHN)_zCiVed_#KTEDQp*~#s+8E?%ikPQSK zDx-?C$WRT=umvG((MK;KWo86tXR)RdR?|cYdjRAcR-{GBH@1AGa))<)Hw_TWN z6U0i`3pI*WVNlB7MSeyX=_ppGg|sEJXZxBk0+xSfl&LA*RYHrG?=B| zZzht`pOxBzKp0c`vXU!J^!b!@%nUtMa?ca6FAfGE6IO53`0x*uXnWj)0B>p!QT&G* zVe^@D4M{87KIw0}E98Q1&Jp}*v=~k_lFHff#Ip`Z-@ZfN?V&Jxip6X(ElIf9J>9Fs zHLUmpGnX;Ml_j~&?jwVPa0cltVti^|;1=o<408^}sLqsMs$eBvzAh|6>NWM!;P>C@ z>uM^t!|zreAG(76UW8;D<|ogH+elY5ET6rQaC1C--2s*Yu-iRcA9zf3b=hiToqjOx zfs80=z!0vb_zu`img)qL;tmhNeVgOq71Iw;OD?X`7!cNx#Is|2vCN7WlQ>aKT%Lvm zzd)Pg={XD+e>X>KqZv)hn-I!?v|HcF34lkFZWdCqq#oCM%XEYFrGQ`VtA8LLcE3;< z7bE(Xs*9GXU})2$^giHQG|hF!)(bCD#+Kk;9Qzag63%)HLnfU{evW*86PPlF?h6O^ zB0xsXB8?6tJYITan|`(}0zFRw47EJIPS4b@?GNIx4jOnAKC)j`#VC~z6?sHF-y12> z^kf@@lqrnnsD5A^Q!bDkoi(6#fmXtp#i)F9ZRu)SV1ABxBWldd32VgQ$pUktAoaq) zmmehn-XtH37eChkp}urTh@#ezA?=nVIoU*bigoW*|Ut{K7?&X^e8g8P289PX+CbkzN|RE!KdQZS ztNu7*T|QU?LT6*$1kT|@31|>0IQpng%>`_Rbmj~kiZRr8Ug;XrCz7>5g&8iZ$g5ELeWLn5+h7Z|d;EwONPTlVdn9eXkxBxcDBOm-W zd2+VNS-TOy$b(hPX6*LbS=#O#Z|spuZo&fI13d#tL(CZ0=ox-BmdL|1eUk)Jg)Mle z072xjUH<(RKc)oM78&=Py3tTUCY=Xslm&L_#cCa@H5|5N3c^8fVj!G|X%GaT6&3-E zfQ;@U26ZX1aGD9X^khUU63<(6K$k7Ojr_$p(}hM{`DH7MdY(qF=7;7EFEq117IT

uV63KN<4CdSW_vibBAyP55LvrQkQL-J<;&bqeNo1z>))&to zBoCU{l5>67!g*?9s@HSyYcSCu7?LLiSJdJ}tP!Wnd3ugXhrm#48Nw{#%h;SJZKzuz zH(r&9cB8%&@XL3xKhTR=c$(B0)qGg;hZ?nP;LQV(X$-xr8sgT9I{aAs0pMR8`xE{W zGNp+fVel3LGHk!LHBK2DzlJ)Jt;0j~soqMKu-~JJJ0R#y>}Rsg-s0x_K=Yf()fH1B z)}sR9@zGevLqY)+#nGds+p?u*0B;aBR^v};APAOqeT4kyZ3}okE7X{>Op0^CG`=X9 zB0k|;hW!{`$VlVwECK?QGpu(vc5}@n;wU7ei&cj_1DMX`YR~}%GtD;HfEcadSK;2v z8lC>KGpxcbgx0=2+Y85S;kB1gUp4LV(J-C0!>v^i1Kl%0-tLfVg(=H5c*~USY1g$S z)-DCK_Nc8?!yV1(IkUJHXM%%(l7Svv8xd=(6T!xnuY8f!|BOEUZP(-90pZ{16}I7_>uQ~ng@Lg-TVwf0Ap)sA*GIy&>sLDFQ* zSb?Ybs>NcKYP>rkZm{W2d#n?AmZy#(x2$slI|$+Fq>k*W_hDp-H1_L^ROwidaTV@GYkTWb&MeJWUkdo;k-#6Alj0p%Qu~e6*{NvM8a%M0uCI;JIU5?2q|dJ} zYfZ}GFOK~Qe+j2PL$NYJAVmbB*7-CCr-SxJ=#x_k-`TN?{5hRKPbcB`;s8+zMZ?@m z%@(z-y;|2N=a{H?wX|@qHr;2g@iSJYFLQb}=CY{9^Q(lh(W;plL>oKnoXz_D0-Y$- zcGNdKkfY>~av@F%>-F(Cj1|9aVk3amBQ&g_a}R1tTH(%a8!rnsD2aB@v$IC3TKP3e z`YJ0&#TygN#jtR**-hk0~?} ztP&DH3X5RLCk1`CnkqX(x&uM2=i&syb-j+Fyn%A|)#Y1(rx5Pg-u^C(^ouoilN}EW zV7L<8e=gCWym4{crl6#z=Lu6}S13HDHx4;uRn}}aK$H(*^MW7C%D9_%BS8>^*+w>~ z=g5F9*R+w-M9a*dCSt3tD{bps24C<-DG~@B1d6 z00g<(!k$={7YN%Ospd{r#A*i=BpS-S|N3J-25AXjY`&d%fO`~d(!Ad3X;sGNKx)uc zLa}x)j%>9)H<@sL<$nQM>hCS55Y@z)ZP5wYGf4MrMI-EySRt)1!(YNY#LYkfBZ>6cxR17dB~Wzgi`N5IfVo9o z`pD@))M*}08aJu$^;LxXu@sG|#6aE?LA$6}Nk7~mJf%>fWzMcQq6y3*E)iFLgU*wv zT?bQBq;aE&TlTbPhaxXN6|u3}J5Zsr?rmMdVch5Qcy}kR^qU-VjsIZ!gb|_!Q)z29 zSBxmgS#o(;KTSC+r6{uUROK4v8{P~-&?VXzx@(xOj2CAu&1?uaa!&>=OLPx?ru#e; zuL5!a1Fnd*rQ<)ELK6#>A;qny_#}sYXNXLbb*E7wDW4X-98c-u79YSuz_3mM06Ap< zGi2pH#=N}^ z*s2Ve!!UPr&Sh_?#c2)B-P%fa)a0P@n|6=Xkav;WcPVbFo8;X%4C-_II1KT73|Yvk z%u&?T-{QB4&IVlZjTr+rx$n=Gp7OufKGhN(thJ5&MAUVpSegitORPp!#MzaE64ST% z5{VoEGiwk!^}0l9FV)i{$oMA#jFKvUVXYl5!@6*;Ojx;Og#$PFQ@{x{*36{1!ZG zKEyJi$K4qjk}Wf&q6(cpx{?Wr*#y^p&|M{M9Zv!AOI4$Yx=KvqaGwv`b$|9&yT8v2 z=JHgE77pzxxL#1~OD8^>Cp+gpvR;zT%n`dTQnBlsX2Nd9U9| z<*DyErRyY1nYSZBnDwR6{Kq8QyKlcRTXyL`KO^@Q$AVP_o?U)YZaB-mg|z0#wdw}LKxU$z5d+i( zZzHp?9r_OP4!~OmySKhRyIH78T4?=9T!;lJ$IB%pOt_`r#Bn^fT((QzWW`-1mXL~p zlZ#EdE!MyLIDi|%v)r`~(0RC?<9vir>YPhU3{`vPEsaF4A${Ce1bXR*L_}k=QvZ3h zWv4;!z^wBCq<>?-xQuBk|HMCxVB#kWq*%rgjRp(cT`wKtl)g{v@K|BUo_lCq5Jc~5 z&+^b$1Jgik^`(G+4&MKw$sd^L^4rpkiM7)h#ifXN=<$?S{AtW}MY#)D^qv{LYESIT z@Rtw^#|#b*^f?6|?=b~QqI3uzP+RvfX{V9OC6-4vs=E@-SU5^Bry+-C0J@CI*)_uT z17EH8qHrp;wm=x0&SL>Vg1D}o#k}O$=R31|S-W~9p27^rN~?gU_8L33Qw{GKSTQ7E zIFVmMRpA}Qf}CNXO|T)Y!e$jaY>HO{3ADdHGL?u8L7juY#3vY#D;MmN z$2)F|#@Yt693TW;0>xnQ-$nZO3awnh3~S_+e_|u8$02hIv(+uEGWc-ioOzHT6^U24 z*Rx4s!90EpJuR~U^(|%oTq~`5_Zzm8MF2Mj{zT z2MH%egp++A61teLUR^U7xI+w9@#O#Yk)fU?#bEqUPKhJ|_V_HFe%_69$y!@H%XSdR z%VBaUnAs1a$Rx`GvV@8x;^A;G>Hq-6*)HBg>GF&q=cV2(D95CXVh28Fh4Z6V;6%^%oICD|^=O=gq1Gq~9mcy3Tlb&SzAb0PVw zsO%7|5^wr1j{OOL34SU&TqZM>MZ4Jrp< z6RLo7Z$G%&c6lC;+Qb0Blye3GoyL!kiRihKwAh#Y#?d7^OxPuH;+o! zvPQ`*<4RJB+=3?btj-1}qn4?Xoio$D5TwmRH0;af1b-@<^AIp3yPN~H2(Okx+|62b zf+Z-4AVz-+r$;zqauH?+W64@NOHj{OGXzj#!fVS66j32Xa)Lfy{(+GL3Ap>r41a_k z-z3*K3o#PcVoPVO%QZ7D66ZL~t=Sj~IEhA)cfGL)G^kG4U;i0=-lIlPaDCZ*c?|#i ztwvt0WU;Est7|sdTjFd9RmN3c5P~BR`YANjqp3+}RhqhkI_4}Yd4vYS3xa16HNkxq zm@CCHgI~hAL+sVw{{$~Acgm)IEf)T~Ea>&Z!Hu<;UWn>Yqst|w2H)n}p_h`iEY~oJ zcvqcHi^laj(?;X^5p@CU4b%e{YMSPY5aR=}#O3(bRY_5U^x#Y86~e|@5BV2O{y?3ZT2W>v zBu>o+ytpKB?zaZyufz-~2Zpd@pzBPe0QoiJZV!c%BKq z-H(bC8PzRjpoAu_7MIwm90sIdBQ&{$J|efjYsTgsJ zGrc;*cf2x0egVx`W`oJ>{Wj_~$Ax#-nGJG5HXVL)GCo;c)R;3(^DwGl>5ZiH&~qDl zFOlbn-OhM7gq`3;DtmCRm`}jo65MAxcnT9v{TYXwV-@rH+O>0@Ee#Ebx#7ydeR zP7DYA&_PT3QgRAZ2SHpAs)VRmXn>H57zxYH13B|^c;4(8)d!;Y2~PH+vuLL+d26z> zLhEp!zpA3&CFXn?QW2)l(rDnCcZqC*LaAbq{p78NtZt)Xn=tC+(ynXh6X;6;OSV13 ze)Wk|s`vdxlRprQ*LLYBTyfkC9Rw!-;SlXa|06;FV3r33+qn4SBo6J%@RzWKT$5c) zpA7B>I=vjw_MmSzRm%j_Q zC-VHGSrPJ&$}YE_mv(?y4m=r!WV2nPRNA@NCJfIse#jC>Gi{kPeZIXu3+o=Ar*mSV zCwXVr(bE-|UuJ!HS+5}-%o83f+GzRx4}r}AukleQFwL1T_PsB`8n-Zf5eOmyX%~0A zdGi8h3Mqvv+0@n=aw*B@tQfhFVAAigTY>(mhCC38TT3`VO1Ixyc|r>o!pldike*U* zgS58ECbcD}x2t5K$Vm!3Db*Gr14}`M1Tc+Y!ruCS=CNy@98TYgDsyGgptAj_mlom& z8zK(caVIqa@lr>R#6Gu8=tF;wWdNk1 zKRi(?DQT{b3-pm%{+&!Ef#}3KldwT$WW*W6Lnh!AOmV>SJ3$DfuwYU1cD}@FH8{#} zu*B{NB^^4#Bc3VN$&ZC^ba?On%M;lj6Nt1iibXT7-y|LI2BD}L6~LK^GI`Y+jDd3-}h`(s^2l`w|(*U6xo89g48@0pL3LSJ;c0jQOqtV&<_2oh! zgMJzQ5)$xFrDX+M3xZGk0Q<(Pry)yX1@? zJfwr-OvUkcuN;Z#F!XxSysTASrpX;7*f3W-03^;D5>nCC`u|Q6T|FUbQCX)bZfjPY1$OD8Th{9%UPKA-;@m7J^ z9DpG{%NNub)URee$UR_>tYMZF|4|t5ur2z8zs0@~{gR4bA1KB?2;y2N(M-!N3OYI| z+dZu7HDx$(lOP}E!5T$vM5AZE?k&=#A}v(AIS(z4SQ?tk^aAwAaG{f0JoG?A9N4oO__qgE8hY-T|3g z!YZ;%C2?)oOLfyr$OH=h0(CDUcwY+mXISDdn*4z`9NxBeKpoYFj7!W_M3mj>D!_yH z?>n2)QO>tW0=F+;hQEaQm1%En${VyO7sY-G39zqJrFa^_z5*)h35?;K-&~gMCpYD? zEf;-qCi>lEK=jrj4f)=V?AR9CFJ8vzM~~wb(W%yx95aLM${z4Uq8UN0gP#muZee*Y z*22fw8@=xL%0cFIZig8oWQ=RGa4@?gP^@Bl+^x`ZDrC}aCY>(-63`ow0h8jLV>2qZ>OudTGBIg9(oPChQE~ZId4DS4@#i0 zEZ9-hoQf>D8}O$f*J#Uf3^T6xIGWCgtS{O9l!vK&cO8KW1i*1Qu=_O1NxYkMIE{3g zB8C=qB&b9CwdFz)ii##qn3EnS%f<&xKGcIj@Q7vjp0aAzEnN`o1$;&0+DgtAvU%EG zk?UmA1)Jh_o?Y7qDPLfL*%`D6Q1=Gkp3CPY-0(mI$1+#Yk9_TA3%NAJ$;Jx(bTFD} zD}VWR;Fnua#pxrcg4Vw40Z|WMmrB@#-z6=;K$jz53ixM?<}aH3fz>2R>iJGCAAp;s zKP1w``RKt30>okJSq8T>ictLFrk6z!6i~x2gZZf=LmT07jq$ zy7#;%} zSCPS3EC~sB>ef~%?T8tjvr$ip(b+}NGR0A%@dLirv*qE9A_dz3cir&h!0&+JxkXTG zHvspFti(5ndP-0LtJ?x0F^8QtH#UnDBOf>P_pE@1r@#(H3@5r% z>(cNY2A!bEwr6Xqlo%JNlk9>N3sc*t9@0z~U9%=i%gJ5=HVt|Ak@dN#>g8pYRRq_U zw%R3+2Kj-MY%Q>=TpZtuRJh8i{wRXeA1nLKv`dW_N4g&CR!U;ieH|d1S-#$;#qWDg z{6sd0)BtMAn_+57tP2EX7FcUqlM$7hph}b(TA>~KmNa6RGh|(2U;h&FXTa(&n*4!? zB9wJ>C7@C|z)eTntv0Sd}pAiwW#t82)c)pTe+}qrpT1iVO*|~>*-f;;QQIia@)FM^?Ok)esnl4{gZee z^(W~kMBHSFLj++{#W@z%qFjF7?YwY;$2=hAHenEJ8t_!#9K0{tX$)2k9}dqb<|fu|g!eLfqC%GYGr#=L4Q4j||9$2 zw><7$%!%0Zy@$>AT&(6b6rDpCo?V9NRpG3%{ITAKnRSY`IQd=_S0fHt9O0fD4{m-O zz{P)FDLC-kzCl1HO3VUDw#JSVC&=V=-9fP@4N|M<6KF64gbP$TBvUQF{vtllH|73F z-u@&kU2du%{)}n`gSi^KwWGSPv8Dst2VE2o`?*e8WsKc9gi*l(=_EUv%TQ4P{B3}3 zOP$Egnkm*mKCp!fb3yeAmE2zaKuXSwo?#b}iQB7ou&D!Y`&Dkuth84L zYG^WmjU-U^aw%$ZHcG%Umb#KI5C?oJPV6rdnGum;ypj|X3H01%{GDXHr0#JI+6M$< zkHd{GcfdcRcYo334>XE|*(m4I(H86gYKb78fZnZgd)P}C#@>CC7?TI~Rr@mhB}7Gr zI&T1|DnkL@^^;lN41^K5XPd z#2fc~bjzovXd|MF7t-vx+D%sw+rmZ&y~xOtr+rJ?3n3yKuBtPXW`-u!P&>)S%*4cb z5Edhcj-NG&ao;GecmPG-4=wYG8N97in!-4l%X3<3;Xwq$eVV)@aE(VDQ>E%2sbF(R z!2w`!nOF>(qf~FEmYj1ssGjP;hEuWlT&EUW`!{_w9=53~34J_4mD$2a|^ zcY|lTE^$f4gHP>#wv=d5(yx~^bDB9+WH_LI@RraZV4{8&<4GGb1FRqSK&Nnj5m>Iq zp0^`sJOx8hDYZ)>ca3?ExpF1jhdbQfGkOIc?MsY5S^E$;)m`Yf>J)D^izVw(oq@L* zi;Pf7%fs;A$0!!xy}pB_lZrhjfZd(RC{}hh9LEGoR|DJHFLI2pKY0z*#3()8pZuD z@B9}5*+K+!o|02p7&)4LbqT~ZWv25(3gAa9)WW3z3U+z#gJh3A>5Ylk=m=D(t8k8c>qmOaxr7O+7C|mZK>S|5F(&(ovT;N9=3OnQ`feI zd;mnI?4kg`uxwJJ-H_Da?Q2B98U>@GeNg+G3EnvY4H#BK5&)J_h$1yDQis&Z*0;Od z>Lp{Lex6?Cl<^sOwL{Ni_}=RaZwB)}wp+JU0ND@|?dONr5{4HxH#xDYDhcsB#V@t@%GO1V^S+f`BR(g;QeH8q29 zx2VygH5~3;7<7=n2rDuw2^kD(wzN$Ih>TJ9VNe6%9*V7pV6%XMWe%4z6y>q&gv@C+ zKX{_fbD>=me>-oVCU?suCnSSy!@~{c$@EZaqo-cVK1wZ-G6^M7c1gGYA;}`%A(Kvm ziPV}v({$B_siWmTH#kHJAWoe`-Z?rW4f9eP72nN|zp%KTjuDA#7)6Wn5G*REzjw`E~DH3n|z zHy9!xY_E=^f3~3Qw-{CRlAC9FP(cX$3gSVxaHM}jz^!z)%Ph`2hjwIXnbENr2Hj2r z%e;mY!^KUS(I|uY;D0J09c+bP8Bna3Xh`uD1m#aAa+dBYBDZ>C-U zi@rMpJ|PH#sC*CQrZyH{!p=~}9j~-}77-SR+SO>X>k&N+cd-@06u%Yq{6N(5gaI3P zY(4W2FLb97pDS{Dydcw&1fm;fXdqS~r&QaV+a8E%Nm3`3cZ@{cWA4peU$G4zBPA(6 zdVwIUvr$?C=CecXTRVMiQsMk{MgB{g0cL6D__HdXpeRoSVG!yABq12mwss`K|5W%Q z@9lc#>Xyp`*PuxQVJ<`il}V;8^8xbmcea>Z@^Sl{ch`ei1LLM-U8}I<$+QZ@X}DW$ zZ@DeT<^dXk5tDx4P?(a-dk?QXnX~Zgq+|Gaj1=P02w{nNqr-+AuUmoXnvYfXEq>M0 zPTW`JUUW*?FtBzWH-xj+}UDP8Y=)YQ8C4^l#olPk64^l+@yd zh~bf0JCNzS<#6+Z4hdF=q_)u^@Ds}qHYzv{Y2}%s10#2CfO2^acS-+-fLX4D6iCQ@ zxtUSPsB)nnWdO&Bqo^aFa^Ns5-Y~MKc>h$ursUjH96Uo{{I|n5u!D5m1-=&W7$k?> zc-F&f(d17Vk}|+0m&{fpL#;k%-w^<+s55!Irb&f{X2|y6iN_0V92+j2%tv-c{NlHt z>hC>lBg!sqAVbkEhPt(Y=VU-&2s}PC0LxzWou&!ZZfky33T`}I^|6`QPPoogIAfIK zhD`A^BYY|yR^L1#AQ@(A+h)t7uS5~$9wDGht!T)k-XGSo%k(wyvYcxjO!?Gzv5$y4 zeBn;K@$??-<-vY9e)i#BAY61XxScW!5IuC9)}DKZ%TE4u<;=s7E0%niQYbGaLjdem zfnH}DD11SYq#E+#z+BYqlfRgXCVkVE7f+;PpoT>pmA9WjbgByL_+0E0k2&>?h!Op+ zl;Ib{-6C+7^89qm7mcK5gT}C*eFhDVf1P16OfC`y>2D;TCWF+Qwkc(Pdk! zFEXBd1&d)2`#Gs*hcx-c7O)<+ZvfUvR0x{~hGuloe;IZ3ri=0reRtjFT8XcuR;e>p zw&~vxu(ts;sFF9uS5au}K@+?K{3j53gi>bwotkw|)ZR8j&p#C~TCbNe^?9c1%^yo4 zN_B#Vmzo=f{hK-ZX?6o4ujZ8t#nVy`X|+LMN#Z4IHX^mg5@@3ze%UGvxFlYs>sv?J zylC8s8V7WIe`beCAVV6D;uQ*}Z(vf}>hgZZd(QWg+M$oj`(VrtB-s-{f45FKbr;&O zr&2muQ${WrR|w_N*c#Bz7_s2Uu8t`NON$3x8@W5~*Dm)rB4geVW;O#M=1?9lelAbH z=d)nsGaSB+sp&{*B`@?V7&Js9{0ez!YZJ9uRVs(|@%eey=^FgaUBf<5C-B}A;AGfn z8-zL1!)1%p&Hc>CyRn7Xs1{rm`D)aX?0|^jNfMYW8Va=ULG?(SRtgw+4Qk?t18=PH;&Lm;e9kmY42#mKE zZ;v8lgOWJ;(69miuJvySxSBnc)+bs3QimlEpO9fZwv3c6s|~?4Z_wV+l_f%MSEbI#co4qbFY95LTho8l#(orh6HuaduS ze+Mm#4qT&jw0b=wkY3z4hQ0|X)J`Xn1P{MFRIN^&HfHAaCBh5Z! z{QLZpqB|?1c0xr;FzSo|>nl#@Y^SkDa%P#tanx|2RH#=3p}_gOEn}__q|Kv~VVPo-;uX&3AfHvk?kH}QT=2_15f0%DcP8jTsF19v^O9*5 ze2AdwA!^CCk$d81u&uxp@5d7Mm^V86(Y12**3vZuD`4@swMaqnw;QPm+AZz5iRjLI z@}91$^HQ)Nq5@rfHx!BnM)Z+IQVsJ)Kue1572R~{02KKi<)sTyOlKcVbAV5bh>Lwl z7Ruh#73>4)Jw@aO*7eJj+S~uq@jSDAmkW!X#m6jPMIG6f3d3_GWA7$yLIA0UdhZ#9 z0pMJ4*v|Y{$l(kTE6*klIxy|3h1_oNsmo{PT{zqUg>T`YaE1?Mj-b^pS1Q8NISy`E z7Pdi5L?07wGn8Og`rWF5aNuaP$~E0;mGAi3)QyV78x^I46$YA9g54Zs*d*@mHr??( zgER$7&n7%LYBH$h2k9)P!aAS^sf$J3C|0u4q0_1184v_^+P@(n4DQ}00pCPK90wGo zF~egsMZDIiSN@4B&??mNR_@xsKNXOTDA{inCK2~04pJ=O53U8TbP zTck_1QHG866hmEWX@X69aVM>#M?`ef1Ok>kEYuj-B>Fr^c#MQZdxAskMOmeiml+M} zD-kmlFjvDp)3#d81y&A3`VI=&tYl|~`v*Ad8DuFk0v+Kj)KOgugy9x>dFH2+`AOc1 zf#S8%%Qh`%K@iT*E^?adUuw32;_*O!F6M1MQ7mjZ#PQqq^FZG`@x!z_=Cc`1m_nzB zyUTz^5Xpm#zW;=;S-vqxigf}M-|ivN zU1?u6E5^&Cv6uj(mF!j#fZKBy4RP6E0@XUxT-!Ku+x^MZYf;SFDl~tXKaQN^1Lg0L z7<8#Y4mh3L#yZIew>0z(A$!Oei9w70f@YKqcNXb-FYbGdl|cpD3XYzP^NTP*Kj~39 zvxj4N7x9Iwp7;cN?6|JDf1t#WW#wlq2j7jZm?&EFLLs^!Hl5Wg55lMvhwpVt%Nt={ zz%FdGP!MLy{dn}Uklc!v>yvLy`kv^ocjxV2A!=1yXZEMC4mpi&++MP`mQuMKssDz6 zKSJm9LkAgXgC@s|@L8N4Jg$gIBMfhU@rX-93kZl@|5E{35a*w;^%uW?igIFg*#WfcV$~Ub9yQ1W1J*dS^TURwQIV{TeQVxDqZ^OVUIS=)9;ZXoN)yV@hf)Iab_7fmF-U*r`QR(l`=Ju*+ zaCDes&Lt*4Z~nh=tS>eB5~i)`Y6ZW8ymEl}O9^$dhBQt$lQ2e^{X{Q7qQo)sTKqEn zmvg`Zt5{XU1?_uC2t0qa&4G4~neDrn&sWD4xJy|SgE!9&Wy?0wkEieRok^T9P8e_G z$jF$&uS-=)g3-t;GGJz4a#Q@Z$BQPuhx+Hkz+AFE}CZ?l=CZMrsIGP=(M;; z_{E$0m*h96Z|f4g_g$zl8)kP6{sj!z5(zeb5gF{<+<3@kBb3KF{PDIF@lYc zpi6;$*5YrB2lu7Ni)AV&n~k(X+0`&lpq2#kq)QP4bk_ z4a4@|x6H+%+D>np?|O^aAPT$1?8E`8n<*6N?Wc!%=w;6j@_uW!;6D9mUc|#76fJq%oQc;IitN zvG%jO?21Snv&Qth$N}$x6$&kEku}C(0Q7I`8G5iGlX_O4F$j>lfM`?$t1uhPS%zgd zZU`W3PK_#zZ}KRGZRn+J~T) zX_mm=fBwED=RMwt`A-GR!ObRmC!d*;PK`}yw>gGnc==iAg3QGSPr&K9ccHugE`VB0 z8f<9=ogKB0YAq%xqZ*ejO1ZMYzE5*K9zcBhbchkAm2s&7?q-#NU3;)t>v*hhRm~cA zA8bwrbo|&hJhQ*Ds6h_`Fe<#RI~V0L$aw;WXu@|XCg8wmfCJ7 zc)=olW6bCUT0ViLh6j&^y!BfAsLfmZn^|P)%A zo%_#vzIk>|pfiWUssy154taURkV1zRG+a9FYb6;xY=31RG|>4(Et z4tBC4?IEoyN{Ea@$`lT&Rtsu!*E@wVnX}RCHDr5nX-5URJghSsnA87Exm=y4$zn ze2(Kp_DA8^`WR^eI(M)TbC8L^=&?{hQeG{MCl(WNIdXWX;KTv*(cTx?mL~+_6iqPC zEf4tB*oNqCD<4lyDq8cce-HlAQr4?33m8W@tE@(h-4G zf6v!Qx>9IP+9KwX=CZ0pfz))3(BQpR1OJr)#i{s;g4ldH=!bc}_-%kFiQZ7CqM|Su z+{;4@Sj9#n9sc(|(hW+A8v_`W=zR5fw)#Wg6q`FHop+e>-N!ZU-YjxJp8!;q-)7gh zKqA+*EGD#>bMoSY|I9#-Nn`5kOWFFq=gm1*xpVJ1zSH<)-Ex!eYW;%J3f2q#F_{3n zjfIr^OV5}ggYEvnK`{c3Mhq2(JI#=v;NrIFay*IW2MD;0d6@bZO(Dnpe983en%r$I z*&^;$>5rQr?_xeZeqMBq_6jW+59#`GPQwr;^3X>%Wj3`%vSBkPe=pm^(g_h}X*>e; zzBSDyL@JqC6OO^{-+iXrWv97R zpY1nU@je|(z<_pqOZ5-v>~z?I=OsKy+UE;c87NJ=E``f7p*$rbsQ1$7WXU#j(2Pe4 zt|_fSa4L#>v2nuJwh*+)EyhD6D44+?uZ>;x4r5ADW+)Y1mepA{(lI&E*gy1BCtJ^d zE&MCzfX(_5&bvKRoZAf1DM15_p*Mm(5&r$1#@jXE*!`LV8vj(lA~n%*cbDPIfF8Ay zDML6olJE*M6-q)T{4TYycDGNpixfv^GH!y3ApN0i;7_$etuUi_Op zJ_N*{{4T?@;GR)JOD!gAPb&D*`k%oIgO~E0#0}U<2}X=`AC)NfD%;wDH6b7aL!yza z5tgv4EHZ8e{Rj)6AQ>xKe5s65^M|Az1iU~S^pn=dC|4po`r*ZPtBe3o32L?h-41v& zmWqXxtltj7%&MM$ezK_X+O9Wy0jm~0Q`tQZ%pRxAiG#$}1DW?<;ZrUWvA9x^h65YVO~ANk*-u@lQHo z&U?J011~v-F_;fvKn4ukKC*Uh-=U9rfzj~7mnPtlVfEkG0byYGXB`GnTuOS4l17k0Ha|#%5>a&PM)->O98XpSUjM0p z!4{5-Bf@fE91XM2bIxm_-kIA}Mb_i{(3?MgW-929L-2L`O%GQOROV;3Fa5IoNQh|u z%>K`;vdO-gSmbmb)*Fgu1SsTLSDVR%Ku%cIXtao@%VKD)9ya5t0lrRPdX0qbQXh;A zGYqYQR3;IVIKZA)>J#M%Lmz=Os2uTCF)P<5fVqVj+hU3pqGu5AMia5Np_dsNDVptffsRcvhE-$T-Bs<(3IMisT1#s8{T@a*JR> z3VlD;z`9q_>yxpMP0sR>E0z~_vC=$WJ5cdrd!3JQr23DazQ-{Hn|t+=LPH5v20veb z&rk^x!^3{K&@~Zf89=-JBJ396x{SBS7J@@`*-2?1wxXBz`+)741Y@)B$J1R6b8I<$ zJ!)`;oYGHmb>2e$H6#JMl`~`*kkWtn;TA7?bYVMDr&9Z?t8W!B%~L)Bz%%Gd(=3?F z%q)0?M@=_^P4Y)4dEY|?^9tyDN2X#j>^Cl_c}t%;Nf^Z*|H@gm8Nb%D0|H?8LLGg^ z``7sDah~DWJ*L#WCMdd$N4p^6SF(vBX5?(u{eC4{G;_fz(vGrJrgs{U+x5C*=)pzW zSz56giNQ0BXMb4NNtj2@aV~dyh7}>75jx<1LqH~cmWyw4?BX+9fM6;!49j;~?=cY} zu~>@}!g?-4EV%zvz$9Lhtw2yOaQRP!)-`i94K#iN$dGd(Y^E#@q}fE!osCPzZWHKa zt~$k_e=D}yE;s;GK z&k<(ccCWW&+!A2Jm<+Ws(_#w&=hyC-@m0VBpwY2s+d>h>o*uQA!_bBS!zbO7_hwKd zVya&R8uKs`B_CCnRF7r1-U^zT`(3jyV)P8PZidinLLp5RMqCI(L=sY_UvcEm95Eck zg(zfD-O7R2uS>DMCn5P8gC3r@aJ_L7JkV6n?nX0S71E(kA6k37;waOCdFF)9fDPe%ic)~9T zB_Sg31f7-+0UXB`Gp4^zDDhJ0bFw7dg!=nLo%{MP0K$0D1LEcZ(whlqvLYSwUha1w z{E=2iua3ESdT+ zQ+(gs3l@NG1yYQmUKB$fFlTgfuM(#Am`pQp^9X9Ms~xjsV<`6?krHYggJL*kI3~CC zQLuPz9Q7LdVjV#CYfmAF%i+B&;=I8z;z{ExXVI5J3XXl%Qn+H|4o0zoNdLx4J>}MDMq|grDElI26l3wA)vPfg_;yEE{CwGd%#MFN0_921TRu7}l zgjt>&>STB4%uZY;bN+pK!I2`Ch+53M0d$Y^OlLPLRhZvlidi9kzCGcUj_x(oON{aa z-BRfX8oVJj9?~u6QOL%M|K(hVIQq$4YbXd)f8+F%#$x~wuUc>s)rlz=i~;S_kSf>d zpUtDByvC0%`8H;kO@@n|HmZgEiR-ss4SYS`E!gA&SAmPScxTa5hKX@3DR;UYa9EoZ ziR`)oza_Z=$FX6x7|)x$W3043Nex$AZV0mw&TTfT)o|b|mvkH9`6qWk*-&kavhAH) zn|fBe0|STXOjCZII~n*On8H8(e|z<5|0}@GV57UWp;+7!nZBK!e%C}M7?2^3)pb^+ zKtQ$d4bkS%ipE>V_~AY|Fr2b8pqD}5^UT+U!cS3nq2@7W9{-}jx`P0XihP?({RFLT z3gacQm&1h}dWIw;rWI#E#eR`|q;EvRUQd@RjLP4%KAgTGwSaFXj`~Xy6jXJB$Vc6& zO~ER=Wy`#CAhmJ`PVauUQ(YT5LagQFk6mw5)^He^bcoCX$@6KN#x9n1JM4)Iqi z+cb$aEV#3;p3xrbdbl8bt3^IR(TE8nVNzEOF|Unp&5G)4!zIb4mH1_(OU;qed@V`W zS48X9$7{O*^k`x~tWv=+hCnv>`syoRSkvx@Ic76GqY77zI(Jjb>(-ndvr6oMY2Fh9 z2Jq6Bqe}PKM798&*e4HLKfq-V&3WM_)!?B$(L%zN)>?mv5vGtQ>XdY&JtoHnxfFz! z;}Z|4lDmTyBd(F~_f=*1l-ut7Uk_x!B9-QwI!+*(h!mqXo}d@-w2-rR8{%4J6rtE% z87`C7L)dJ@;HR`xs21$5bhgxu@A{w2d2|veUkY4!WeEn9FJ}^Vu;v>~qm>Vj8WQn! z3_wSGVHd|1o^`K!2Cc!Q<%t_Z+=&u33pwgDb-F6!NZ@*#0m{Iw@z}WCIOdB zbOY_O|NmX4ybWASM?K0&?0Db1T^q~U?hM@k^}`jkjOGii$ZNXNVf4jcy-}`kamn!S zUef((iD4K3AqSz3{1*UaUep~7N@rNj9Jv3~#Yi3?Oo+xV(KhZ5a9~NP>DGAtSAaU1 zdpMZ3q%qZrfQ?;MiBqbR22qg>8aLxh*n4=TMJ#ELiAw&`K{2I+QoMUQ*zrw-yn7!ZHY8?h-fbtd+5 znf!>jxtexc>?+VteY@N3eD8P;OKE<^#wBnRZ)!I1O2nN6N255rNID!VYw>PD>GEe& zywtR{Qdb`G-^X{qDy|GDiwv0X7MAau&|AizybnOpl<$5sj?MY|{6(sm#6mkMPGc(@ z)zkupqgdsw{lWPjDn4i&-}6Pq+{BFuqG0yF+KVC+2LXiumtyM^=9ovRrD-D86QIt8 zT%u+zo#RPfb^@maf>CLqBHfsdu}n6$-rN&?@W_KKT)7kh?+k{x7g>zg1SBZfjX{`Y zv#_d`Jxz(DGUEBN-Q^FCiE!dvO!VBzR%W>;eM(v=63-CtZ_s;327BcBDUlKAxanSF zyeCR)F@jI)7TWa}t}J{H?>7dp*2b`Fv*(Aoa%8^*zr3Qa%P+gEpW_bk`A1C>&qVv# zR;A9zdarbpy!8;&CnonS0jtQukiVk-1way!^NQ6?B$7Od*y=70yB}&#c}D-?L0@(6 ze0RY9xTOCIkdf+Tv_9BNEwV6GM$fX`kLlAv9tPiaWLWN9RI)^!1vMV4T+Msl4kFf1 zhiXSd2T4n4n4!sx=Z)RxE$Qo;QtkPnB^9585IwyVOqBgrI!lAzR1<@`Fa)9Y0OQO& zCf^JbkQ=bR=_LT==|xC_T2gpaM@L#Fn+4ofpFA`Ai@CpCpW3^XwH3#Uu56H4zUGNI zz5}%;6ZHepn^%jFwcalitd8hGitMT4=el}w_j`R)ei3=6f|t?Xv8i=*&inuIZSkE8 z({milg-3TsRe~(_CSJ+2lnNUp9B+`I1hnQlnh5xP{4jt)vZCl8Hjt&=&u@u##~9rK zR2)mD0S6qYL$k0{Mu=GpEf8PdGOLL@k&DVLSUjqnCe~SlY zJaa(J6rYGmwdUf}g6_JfA1u6DdF*ZLC((hei*BWC`G3=>57B62D%tp9r<)HvPG6b5 z5@j)c#M;xYjx7gDFtsArxa-TU93aD!%P`?m+6JdG^v=BvA_U#C_+XKaKZu`SM`D+; zsO6N2-!@h@GCyY+QjG@ zCpmPwMS=1!6C-20cUk|&{^trGb-3)+zKbfjy72 zvwlE|y7slyyyzDwC6*u^!TM;$O*|}P=RzqF2QZDs%JqKjAcQ?p*0Fmruk<+$wFRM; zA4GlWi3x1(2+e?)bRPI+L8c6(6{Vb+kUxEhKsL$ETx@1Zr6YR6{Dugq1LTd(zw@$6 zadA9$b!@7(rKXL%yi|L$_=;W5EE2TjuWhac=PVq0{OMW8@RVc4LbrlN9W*kYC8(KaJ(!etgHXL4_V~-lST>@~$(T01%J)4_*M-1<*i*Tr-UIK%* zzPLxAX)HMH2b{3JPT=8?6tMf5se9smaq@8Wi=AdBw>$NJEX%rOd^Zhan37=FQ1Wxe zQE@5X1CuUfd~3R2d&1LhDySJLk%-H>>r?bB#_V#{Be})hogeF<#0kME_E^SoU3VPm zFPVadP!=!ZWVwY*nZV{AmXhRhPXbG=^~SOU;=1U~^NhSah1+^MZGew7X@-;cwo-7$84dNL;X5gt633IzdXEh>43zWP0>a zL|k_hcgrz}yiln+c1ABKegl7#HmGnf>p~7Od)SMn4G8b&e5lzn>gwlugdu{XVu8o6 z!&j$x2)?j^@Q2c8M5}yXI|Lpc=>@eeb}m>P^EqoPlB$XQ%g)gJ?N>M{gwlVN2c4m^ zV3nw#oYS%4iuFj(4BPV^vV#~8S42E8@P^vVsZd&n8Ze`z=#Fw9qTMR?v}CGCA;}{` zkS;$6Fm=2I)$Xvg;=;N|Tv<$Ce-lf!2+Bd3-!+L*GVJQ(={7A~&q&l8?EIsn0Kf7l z0@~UP#NZ@R=YFGQK_$S19MS8PFIPCo_k7^92JZA6n$buBU+ZnFY0v#@oG8PnbkbXJ z`uqOfO%?qI-Ct*Tdie{>7Nhghf|B9m2Ax{rn6;;LAw%&G$zv#-W3WMX5o7VJlh_{0 zGrGzWrIIk$SPy_I9s@jZvq(?M8Jo^bCLuv;-;SAi4N|rf*yEH2DI-1Vyh~eoG-udH z$}m&xzW~VSh0W&?_%-yD8_^ty7+^wrLIm@S9hAz5l!{vCJ&y8U0piz)yrn9_^1for zxcH^&=CF)uEg^A5PHn>q3KjqZRjiVdZpe(3@?R~H+q@@Q!D2Rq+a* zb-#hLMq!iQwq8#IB!B_$kI)H(ix9~X>b77gx4A7Pju>P7ytOAp=f9T~ zMU}cqbQx)wr?B7j@`NYbPcJm>mnhH9Sqn)raNd7etNn>GTCvORJssQ-Nx zIJA#!&h}uIM|>X-x74HXWVWYcbwozx$)?0|-Knya@Frt0c63_TEANs>tsUv@1#6Z$ z5ZE6vlx=G))J>*hiBkC&0H29V&90PPF02n-mVN0Y5?a7jHt*-FiBKcT(H7b!1^+8R z3v46Jav!!=I+S`c^6DWr^c#3}T|G@ zd#Jkf!5QI$d5{jK{uLnwvuNnS(QLP192zRVw)1UDB33}h5KkdPJ@6m~?3iG=K1709 z)`nC=r@uPIsEKt|D!>S5u#f{e+hp(2qzqMYl)GAP;Om59k9&6pDcmn&3m`88Da(50 z9*FezI15hgUs_;;e}MPS8fbS>^fI5rbRXKs^{MuSV@G-!$C~zc3gCGWd1W?Ia{I>( zrx?Z`MK~>KC~e;DHhw?bI-Qx5sBdJfM{%y6$N%LHbfKA&yL)21}F~JwL-}*qQ z`Hk-NeBS_CvkpAX1v9c6;)0)n$*MwJYGDm3ER9-M9X&;!ZL`21O&!e{@flO{-htr+ z5;(9{dLZy_kO@V})~x+qt|HES;P=)b4~IP=z6Q@ev;K6{lQdF9#zZrr6Fh-$ z@U_L6ws(Fc%%!SR(W9+89-;l2=Z<=8uswWp!|-(2rAZq%yfc%Z@1v|AwN8`9v5r3Z zuXK1ATnnCXs-)5%f*ez0%3^8Y++plmTpxpXzGbxw=L6ZQe*v(kwp?=E0v(2{nFrnl zffQ9O)vW2DpCy06q5N;Au1VN`1xS{wO;oPz=ZeEZDBU?&`4)q<<17#=7W4Z_8O4y2 zn7u6$EI>G~rka`cOcUPVhH6Z-bYtTBaw_SPwAGW^<}{zn%j%AVVwFjimp_vf$AOl8 zcyG;*C)u?KPwm1Qn?R3NN2-!aKW7n8w7PP#r`DsBtipJs1x1URk7B+* z@Ct;Bz`>4>2is)w(|x0^assI1KwH<|OgWtwcsjxyvkwItK^D$|@=en0d7hF`m>0l{ zFzGe$OEux)0C3fTQeA?`0OtX0#G^K;P0H0x{ni8!q?am7d)90kW?-lOT(R+_o^}>` zt8(vcJ!f_%NkK;Jv{su7kpTLaH%uW#skP2t5$9*0h0-NP8dv=rEuLz z^Za04EculzvjgQBu0j~fM3$%vs1k9Z=eGak`WrY*_%iR&j+nP~&khLx=!aOONGq@o zMGfB}{*<-qY{Il!g~{DYJFEg$9l$n|axrgF`E9_bZ!gG79ukx~wn=M>R`G-uD(M9m z($C2DDD12Fu=w@*4NfAD@+4BSPt*TDEbW@KNm?@I{bzq%_6&#Mf{{4Ii80JsXZw_v zI`_~uC6pxeOx^IU%aDrE>ap z2}JeoF4rmi>~`$lI00rip#Tl%+^AIAK#h$U!iVQ;^LyL3d|GL%3=VjX>c#N}6dalC z>9#36gXVb&YkltOmb-{GY(>)?+^rIrtPz{_%zbFmk)tZm$pA4iq4#*|pmQXB@I zXaCk}UF2T?wBsN(YqibbD!Gee>uFE`Ag~LX9nD5I&y*rF$RE?c`L6(hvP3+pR16=?yf)hl+NMrNW29U^c@5wrZ$n0-fN1?ZtySn)_{^NqXD#{% z3wadueh9Z87t0mnILT(TH|=SeD-2z{w9F1`9J$WmKvp(7@*81i&nOsFn*x+4n{a8< zk=Wnr3$#=}Vsa2=JD5XyIvhIV$+d7|c z*8wbYW#6@~k6rjFqFgl#L=LB(GCCDb>*cGQv+P>OTQ6PtZ9k4W=84fn*2%d2(_nTH zm2P%x;7wiePfYByRPO^W*&O)Zt3G0(SmuUW|JELT21=m-C4FyMr zGJ39THwRwGeW3TS(k3b8>%2;pbwtCuU@u?#W8VL`zl+;?G)rAVJIFV_Ij!*4TU`o*AbN zHXr2Hfzgl*yZ3$Kkn646lp>&H!pHtLTZ}l%t2L74Z{eH} zhfNwj9QuhgR}iDBeLdIYkX0HIh#oMo$xWb_qU-n^*h$}*>#1%6)YSrbpKA4;*g}F7 zm!6p8S!-ldb0Z}#(l>4A9*q-My#ih~L06#K{iU1=D|7*1Bg!}Alb#tN&DJKcX&EPO;0$`!)4Vs@|d zTkT*$zMg>a{FLLC7waF`4XKKVKN!1Ei}rdn9mL3 zkV!IA-4j69NeX+RT=O0X*EkpFsoL_UFFqtZmgoXha$*})vb|y>!Yd}_=E@#k@T*r9 zQ(<@I)3tU|jk6REzHg&KYJ0l&Q{-0UVu*Rbf7VH?H}W0OhDbjIO5@x* zD!|F0yaQ(OF)jQdSF$4?LVCw;A*mv!gZGN74$#svH|Je*>am%YH#lggjcFaUqhbDg z>L{ZvL~G+KA+#(GXc2+)$q}fpvbIC&0_^LC%MeVJVBTW=McC}SaEW!ge;>gFqO$#m zyD8A&Ym|UWG8q*9`{2|UY8SLJSURV&fYPCA>xF^g-u$sWtYerUGwx;A#O?pH1taKU2MrH}ql;EPqsow-fz2&TZHk5L-TLQN zwgc<46yH9ildKU@bi!B&zYS?>ip-fNf27^srlu{2GBkC;OOnvEOMNPS3`^eu1a@K} zNvcOb6z^kMx|-(HD)WJV@*ldJ5fD(c6o4uA!3mwe~xFnkS zLet719z9f6I4)kiopR>Sh2>6U^!+mVy^XxCKkM@DFFwXMX*CXTpCZ||j=LWCuT}mF zfCo;#!%94M$98siew6Ld$EdhFgkLj|5IAdnGOC3;O#c<2<2w?%mBwp@lwx4V#83eJ z*9ajJ+6Fl5ZB*RN>Lp+yiu_Lb#Pd3;P-*v@^pVE^^P;FHDKIFwm;rBTLbatShU(q8EM z&;qT6ZRy4bSso!_oSK)aRmJHf2_~JN;mB|6jDFh3zixpkZ7c;STF*j@ZzbTP)i4$5 zh?>X60bHcf6!4BSXQBY4S~_0@Ul$O`%vs9*(gys#x~7}NYV2PG4)H3(GAGX!1Cm2) zNGCXG#So2nLe{TaL~K0#3EbX)@^|c2%D6}+!yN!Sb9e5Pv zRk`HAe%j@;fQ8hOS-lx`ooDF=bp3_%Q`B=C4ciDMT-H7}WON@=*9<j!Ih0P;#aJ!#dX+87+|JqvR_4}`JqiMp7QlNGwCMk-Su1vDomvsJ4BPIDV zCQ41UBT6r?RClXFl)d_?*a5Lwu*qf+sBUpGk|vcbqH9;=Oh9q%6_i#pv@6uu%E4#% zv9!kW?_(}Znf?oanA>vQaI!s}J&P=EvO*P9J|jnp*$8gNo}K&h_&T4q{}rHj#g}Y5 z0kS?`ldOz3JD9-alLND-Qw^Ahu8NO`73=PDgUgD6(&bU-8m$7AceCrH&DD4cof3eO zYG6r%jX&n9N4k+6SrH}jml7k{O=MBm>~su*C5A$IPc$7HVW zXK*eye&Q^~!vnzEbAu7}gnQ&F-}8lm+|ajAjxg`X5cL%wLdmjW8Kh)#Ik=CcplqkU ziB}OLpbO@SED|}1kfL?Rj#do{0o?FNI(9orJ>?1*yIL4y+AMg?p{tfDj|Fm5CswG< zEF?u_M+`+$$o>);jb%u*5ch-o;AW-C+Z?KqNXS=ECgN+`&*Q_Xd974;gq|g3P=pVssV~N2{>S5u zZU+v^T$+HgW2)C4x3Y#Qd=nmu>wKGcFb*nZ0(l=#SoG3`{nLV~#i98drQtf3j(g8n zj|?!v*tu_)>3inD^*cc0G3t z9aucMtdl4j{GX_;zvbx7=5)cs{{rA2EMBDfygE%)W~2q1;%>5fM?JDZ40wVhXR$o9qf)JkNAXDws zRB8%~O>6k0kWerFS*c`gp}i67uFaAjbUT3b5cN6C%djIWh-6Ngj??t9=4wc)tk*gmTELC-`?`efQ8lBp6Utp$xsg>@~)kb zPb*R$XghE7mfm$Y|I%r6y=~8@EJVM7+#VXwI9qJFYS((uJceRbWoJl}arG(8{00r#y64`%07#Z(zh~&Xp@(oE?x=rdvtjh$ zb+;N-_ET+Uv&>#4!}VVQBJ;ax84#9C|DGYb1Q4$c6K(^q!JxdM;#71tj6NrxyBG&g zwM}2Lc#p2}_G%vs;ScQHGfD**3+nKqUyZkNp@I79suX_!l^|)iFkCk=%_wDD4Vjxn z%mMM#s@ah)8M}tbL6^QVI3Nq1(zFW}{Q=-n6M=!`x;2M3<<}Z?yZDGn6`>3?TB|k)0w~B!8j4?IUqAE~{3s}?F zpFqp2Ak6NzaA!4v!|}a1I-R8he*E7yvcxp*PWaHz$mn$6Z;F0p>6)UDPGDeYP?q-{ z?bT~=9Co}^)UY!clsf)}1U;J7Q_gx&Lyo$fcY=|k5Z`^~KK5Wg zH!6FR3p*Ks#(GV`wdqb8W#0j-kFm4yKSy&kkc!__PxUZLisRhSz2ix!mcBOH?VE`F zUEl@jXW&C1xiakmBaWRW^*KfNRdpi)1qcy@I;dL&?00aYJ}B zUNwiHdx<}#vRtf?<(52oIS?m`RabL?hXTFog)5b@U565k&w z;y7>esc$w35JsBh=P~mt?`#;3VkukivVM=30D67e?LSB2UnTRGMd0;ETUDQl`m-9% z&ar4PLC_e}qXes7*0^roTASqEZ6NLPIQLv7)hm0)vzi;P|M7+iO;;*W1ot18hUQ=*y-qK>bUFFS}I zXE4|@zRqP%OT!l679U}kTQp7c&O00VC!b=B<>mx#*i9`}3(I;0Xdq#2kA#!|Nsjv6 z_rCzRizDK1Ij9zgCD%iC|B-kI)<~)2cX?YiplipEEM2VsUn`(v1n7b2L=1d$*R+8! zNb!ttBGJBLE#HIpq8yUtYHs)wtSS^sk^mzWCA5Q#r_OC4jJC$y>d(+F$={$9%b
YE=pZf9ISrPT6?ngCwoUBS43zIJC^5uUQjSLMm#a>`R{;6l#*cXy+WMn=G?azFh_o`B4HP{apfoD zo|E(zf@=eMOnX-z4ijqSiuiv9CIQmtq~gSKx4zUU$0_=Z2-AR`f1|T(c*cHSA<+s$7e&`fVNU=TQT2 zB+X-STW*MKmB^4eGqi9Dhh%uO$j@fto3%n6`oEF(!F#FXf&(LdRq^2Sy2-{_+Y*y) z-A%8b{B8lO_c(5(JmIQTlQYk1zLb817Bb+8DG^++1wHphx&(^&J*7p5-7SxgoH&nIC6jxW*0w~fjmd5hgL4mI{x1N|=7M-)Dep-R9b9LqvBA;isQnF>rs%bC3YSAgBl zd5*W;==Vhxjh6u`d0ytOey1S8MN6CmCB-7;xTQ#!dUfMAJD1g5Yl-{g!>RUT2>XxT z%QMWm)Xi`4oEZ^B5%ajADNaSSJ-#+%y$`&0>Gzjs$-OwtC>4)+C|@b%11+qrQ20hZ zk}t#2$}5IJwf*g2Xx_$7nB^Jb)__qPNpWK(ON_1E5`eIK$n)!MT81501T#dQb=Rds zylAlLLMaKR=kktkphNkpmY8%;M4W1_rEX$ge+|tw7BLUX7<6>2;=F$H-6HVn{my*Azy!E>XlwOB(Vcr##ZeWIDaMiQu*R`kpd455$=mAOAszdzGe zpdxM-R{YuB0|?@vTmVr){atr00!{`5D^iT8Nkjt(O6Q4(T4}*4cMd{i3cFULNY&uM6F>di< zRY-&uK(E3NY@xS422+oj)^M_Ps>4AX@A%!bBfy}Edtuzmbrv#^Tdy6KJ85V^4rHA& z##3nf_DP!T)MQ{>rc`H$OU-PtjuE`IB*e!WgJh0qsH<<$YRs+>EwlQoHUAw1(dq~h zEA8cc4&XO$?4H{MeLuo3_jYe^MoYE%r@S zDjpJ>9Rc`sF^!IOsI~;Y{#mRlH^rh|&VF#`!O7ijRqJ5sQ09DQTzJEwsfrZnY?kr4 zT{PQqjPC9Qfur1zHStSJ6j;BfrI$ls4KCBP3KvJ+Kdewyi?pynE|zC~;;Fw;?euez zUgPHk0RqcFtLRUU)!zw zv?-ff3&&V3j`JMW-XUZNcUpRxY09q!`~VWLa356(C#r6TY62T@c}cewnHk$Hd32Yd zs_+naMnJ++Ld3?Gt|}yd9SXY+PTr6-PTw zQrF}jb5sioeCH)SBzN+-WW(*2TwOWuU_-4nK8>EYSgpQ3et-EUc@0xNnTSxfK^Mh; zvN_-l&Fcz*u)dU{RN@{D7MOzaB=jT&Gh7t4yCyugtkVAlKp%a%x78-D!!!i*Bad`5 z4|5`ov9aYA*IUZekg`Hx!v6~J;?$s10g1vv{LXxAb(nc(-Evq7*5DfY&cbO!%uE^U z+1VnAPWz1MQQ=hHCpwB7nbrC8Q#I!G$%fl}!|L-nS0Zav2gS1|0rt zrvsBwnK)R;EO?R=^bbp{%Wn1W2=MIi(wp_G4^u0@3`7h9uSJ)+BBOZ|iRq6-;MfYm z=qZ(s6E|z8-O7q^XapWc8BT+D&QtJEMKG*~Xsrz4%FmgxyLBG!%IqRh#_2HH`!1wS z23>+u&u5NlrY?pg1VF!c&=$qxW98~LQ*yWe3ya#LZkju7F9$7WUSrmj={Fc zTCnZ3_2m_oCF3&JN%yw`xt9(GFeL^_`MRnoX9TDhT?tg(6(TTTRo@-bm;VBw$d_sQZ zr*104!tTP+04rGE0#AVHSpNCzi>Rw{*2TFN%uJ3NG;BsbncnoE8J3UDRo+L1DSm>* zm18?nXphP3h!sRBSUwpyuARJnhLG5H9CDu*-T^!)KRW~ZU}Qv}c4*b=T*a`G=uMXQ zygUS}_kbP`Cprlc)v9RAHCx|ZI6D*Db1|hGu}W*@)8F`5 zJXx{cqJp4Bz9GbR-s}J^LJP}U#eT0`Oxm8)DxJO7RnYgqidjMxdr^nx*~W7aR=*() z&xx*n7@&%ID{rCbRmP(Ny%Lu2fdUf*iTM=OHs)r=L#Q}Fmz+U&Cz0wK1r*^s8_}2a zVsJdNT|dB78q&z6I#lPYD6@9aivC>`zSj(DQvmZBxl=|Ite982X^z2HA>NLQr(h5J zY$6h#%v*t7FVdmp7QDUothXNx*NKwNuO?9bQp3kj4u3~M_jWZtQaJJfy>RMg0moQS z>L?WPF93d~wQDSY7Yib!T(pjMX}xTP{DRReE=KhYUyMYq%H#i6fNk+Q_01-5S!s2$ zl02v8jeco47s?;4&4eG7CgTv_QfVce6e?<=(N;VNbTu z%;hDrO-i&fp+Yl)CigP5#x8N|xwtwu)lyZ_5yb2Tu?hixL}IT9xR4g?wCYzlL~TIa zFA|LE98=m_<`jSXl&>%|MeQVP!|pMlmQ9;6=|q_y6sZ6F46d-qI?p9dfHP6LKR8$Q z3ON3DgE$=zaWN>P(Fn%PN2EKgR?GHCV}4VwD*XCzQ|Qz7uI^TQ32s_IA;SZ;l_|_+ z8=Ke_-X1Cg-%dKwpw$Ebf}U*OH8{Ey`29 zhEZK}K%hky724In`c>Tl(>cpCFddw`F4xA6xf=c9x}GBlET6h~U{I4F#*gxU&@*|V zR?4AHq<{yN{FNp5F}H`MuJBZUn8|E923n{gDqm*W%Sg%@9>Is>>;?SL1l0a9^0n;2 z;p#zPgxI9p4jWu7cI6X63q0sZpH=<4G1MTcRMd-E!o^D!FWyKaI@%T_A#`^SN}E85 zXTI0G0*@pKKw*JSH{G^2Dlb6}W`b5qOqq7Aw$Q91@QvUjA2r z?#F_-9K$;9m&>e+h#yTrcJbZ$Z@DQP1c?nCBfj2dF?x4tBA3V(l3fm{-B;Q?Er@L! z*N*U@AVOEH#sL%{mhvJMSOi7qBLJt`)0sYUTVC7KIn0Jnm~Guzi+*#*RopXL{1P8RA{#2!=Xar+&U_5uO2> z5kxXJy|Wa-(kO8D522B3sPyA-zxs;lQ>3(j#Hf$h%s~7o${INCozUCqp9P;zdsyaFfLQU|IWpH>cx|hM&g#!L z98l~$Ri2rmeTIHzXNd{*+OrJCb?<4f!uoXH9PMZQD);L0p8TN_WeT98LQbo0I!{4e zp>5GGU5NQY{-IRPGM!ol@^r9T>sa!rY5o@g6@2JGSQdSm{0f4hDV?kOLhI74l0JVA z)P$?&C(s;(|0}@BZNoF&(AAgE&)Q}c*kDVte4sVx6Hla(dSgX}cO7L3%6QW9OB~P! zE3@^~6XYvMLR^F;0AK`WZJHl-+%qNroe>PSRvlEEIt^c|1skluFXTYLzXt$)zN241 z_RJl_y;Fu$n|+uUJMXas*GX;e%_=nfH6dUHFur`b4iyQhd><~ic_Ym}h0koN4dX~x z4O&QNzuO7bWSb_oWI*q?RM+hWrOxExpwAPE<41Wh$_ zWou17?XIh050K1he6#RW1tW)Kz zPXbT`96KXar?daW`XZFL*FRbg41lJa*8}CZqdG)h7XvXn%qJg&!SXhW(^z37N9=LO zxcz_1dd-35Qx@b_0_yBYB>|a9v@43M4A=fCHIWNH7Y-$V@1B7Ji2tq2G)nqDA;CD^ixwi? zOE`d4%Zbp9L6&B?OlPz$pKrbQUjZK5Niw57;*@!NoLLu|Q2I01Q3=sEr}L~EQetfzm+wTbyt9Tsk^a2Ot74Ot*drO8Ji|)%mb1(2m z?hVLPAd?!?zjsvuLKe#cYo;NdTOzB7+uai(kiq781*Cyh&FtMGVH347(4|QofN%7O z+MYd#I{AvvgiWO#S?LMLg7nG7Q8&wEOM63$nfr>jW$o{eRnB?;j%KC_;?T>@!!jk# zsqNo2X39faqz6R63~`>k3OYNLy%oPAM@ORrRob{x6&DMw4Nm$vt#_j-SIGID+kLWa zu{g%tts9waWK!W+;(<+3h9mi8k#5h|83R)EC2vAE0mxdG=3+6w>-*>0KMt+<;!O%I z_C-i#a8CV62buekT$wXMkO<4sBCmOy14{^_!i|7G=JP4{&_+A&qoO>TzU-eU z0f&o_C;ZZv|ZC zTSC@8Kk>3qf%uWM4Uly@Hqn^yLMQA|_VGik1EJVTC<56~aNLk7P3ta5OaDY@m7CHz zuxZzXnh@pnu@Avp<%>_DnY$Q+pt2i5r#fcfDPUYC; zysixKGPe8|01HHGtgI&b0SfGzm;oQQ$FeEW6qn-u*V}o=bM?M|+}=}wsq^xWqWXlMVh>WaA2-%_(LRLnyiQnhH^ZVmG?$bB#_vi2DVJI$zW-ucqO{(nPcfo3O`}yfK<4z!tCJ;dEVEa5^%RA- zAP%pfSAfo-W|I~6^`iI}*AE__^E}RXA|>?q=q+O+p7+F(Hb)`TEEHFBhobIl5u}c2YY{q+W9U$vqHu;tHaxB+dg_u@`R0iy3SDC8x7yK z(|pfao#m!5NKCHjYdn6J;>DK)t~W2(o^Tb*XD62INvCTS36z{uS9sYNG9S6v>n!`Iz6Kg4!cSg60`UMTdDY7 z=5Z65DF0i(ix_17-FTImM`dz*>hM*}>z7y^B<(Ly`aYq1G<_(#t@u<{uOCV%{R*?K z(KLeqbPPZ}_4F$?Y*`0JD3+&ZlDRmG{4QpP>(kjk=vrU7p=iPKzYak9ft!y5T5F8Y z_QkQ0Cu>e=b2|8?-NquJlq?@BIDvIXCpzAH@jgq<#s_WLSyOCE`SRF^1}jtlpWd_S z)c8y;iS2uWMH7wvS3)kA7vK->x8}S1Ys- zT79ACrHZeIH&b$)gaVgMT~;CS*YwbRt~uuC!3|r-^As~=edBr<0ww%Rj_ilP*UsEfb`zz2YX#lVjUR)N$PK&lUdO}u6+sa zqh9=!psb@4DWS|!a`imlVdk>U&(|FO=oo;y{M@aD^k16z&8i;=?BEe_;>de6ZCYtR z)H3sT;pY9HJERJl`&vZ%tqr&se~nEE{C4rse{j9wEBz(mHt%Uump5jL5tmQW(hV|a z`zGmk3=NmC^Ya#nm6~-^%Ac3eA$8u67wsil6%Fi`w8qlE+A{IhT>0#PCV?oqvChr2 z*F2wEu(EcW4k(%9i*XDHP|a_bKF%y7WP#9(;SQX ztKa*DlYGT?WXA-bI9Eus4Q6P%C(_cfpV2Aor|rN=p}p}sByYPh+3m(IZs%#v-YVQy zdSe@1K?=M>yZXNRTn$-{fx8rWRhD$5OE1Y&j`z)n{a7HwQ~PIhIVkiqN?s%^sx=@a z(^^smFJtb#ORt|vSwZl<3!~G5cI|c7rP{Rmq|YZiR=9Fl3uoWia>qM4?ryezc)ai1 zCQoCqpR9MUg2(q_<&u-{)#GM#>@RVB&&$(YD``G)LP$CX?z%ur| zWo)=}&G`Z!hGyW$OmnTmoaO-?dd@(}KwRo*aXx67_6ucQUD z4;L6?pl3}r3B?7jrXY>tnAP4oI=?&&@$~EzsV-24Cw-Ve*@uV z2g`QGc15%FGBe6$bq<4#Lu1_Sy-@?Vm0ijBq5eLgF%Ty1+1E(oT=LkTxt;Asw(L%L zdDgty%k*oei`O#GZWt}l7zqF2I@f%A?)QAy%Xo2N;({H!V@uAh*n?Aj=a%+|2r@*` zp-XU->`S~uE7IZ+3v4wK2NN$QA;)9y2#OhG%V#ffqSk-`0^v3eX*eUES$weRqLL1W zluFgb>n>*!-AEoY-kn+WEyx2J10lEQzn^UJp7=Z=IIfS(9fwpXdno>G)q*uj=TW4F zDmO-lE?R-t!$c@i$sPLn4-%;&ad{hG-nEQ>f$L-Dx7i_(0`4>njQ&Xd>wun-8k0e~ z0Dq1!aCPrGdt%Ft0E&O6H0dbFV!ghUw($B(KJR#KnpB^kZ7|hl`n#98Ozig;7-Q&% z4un+7+!X>v_?i~D)pL{G4?S+oAf zoZ2Y14`fp>Zf{PfD>6^*t9R+*-ZzAF0m=)6Rs9cjFL0l;-`IZrQg)q~;>`MnjS`nM zr)m9c5tipBcp|YNy$-p*`wyC}TImwi zGfoP>1_lU(M>-J;=o#sp1xOc|VMjeeH8CUA`)lzN4CGzrE}SGiK5*yX9eXI-nPV%) z12+9ShSNf_PhVx4uf)=ucb8;p@iga9!WaWRL(rW$;5_FIbKzf3&uOgXJxgN>v;Lv8 z{=qCy(du)>nUC; ze1B8gKq~QoUf@RH+Z-Yqd`K7Iumi%#D60*+#AXZe(B7x^O|CSIq*zxA1P+Meuf!O5 zY;Dy6je+n;Swo;_q;wEEbg@rhHpm{FKf!qtWxwCyW&X@Txy@g&ZQg|Rkrbh&!3$u3 zK!`cT$pX)Ref~|cICy9>+M7hnBisAC|AYCGc(wNZo=nbo7DyMEVTZY|b)aVmrhc>e zWRQ*Ou4OyPI~;NA)$Ro?CttCoNeb`JcIzp-8Za?KlL#REb^n-uYsTBwJyAK?%Ypu+ zilX7v0nZ;LRR$(;11i%h^>)}6~zYuB`P*;b@4;`|`j z|9}l>41{j1aTi>e8usky{SvlU`3(|s-z4g2ek0X*6{4!s?0kfg7%M$`kgTB56I`gM z=UCp7qEqnZ+{8$KfXYlniEJ}wvP7kL{V2pvDAy~tsQLiKOk6Y%qw(+KrCF?St z`4e^h%Np(l#j^yZr~6<$Ltz2onq+{-4V;ED8{rYr7p&ZG(y!vSZT$W=t`+0`i(2m2 zE;`0b5anC$dgrI_O$CR=l8Gs>S|Th1T@>{iyt$`*3|FbZg#GxeZSBcV1Ph)q!SK+o{{?IqC=OPuS#=& z8M?83=w#9jyhOqAcB771oI2^MXH>y+AGHUAzyN`e;E{u&h+ERq$@n5X6@D7eAGN%J z#H#U?-Bu>zUA2#_fyO}Sn;SM5t|2p7emh?~HfQ>~DP?yc9-(ah#!q4eZW(hbbm($@ zvC8>(EyeF^^5Gpmj}JKZXY!A&ooMbDl>BYul5!&j7$6WL&0>JYK#0`d0v)vhA@h80Gb={~cQ`E;FhC$YQaJ+9Gg6Wp9lG2;?|S~UQ&1Pa z+@OXI?u_kD;FkyD49^nBF~e=Y5I}U5alh-E7h1 zW)u_NT`FqdaQY88kMT?WT?u`0Tp(=|+34EtM% z3JJC&i$~ZvMfje#bw;&JECC-J5FTlRGte{Awi$Hj;_&uND*s33ll?VTdOmACYw825 zgF|x_ZbI+98hU2)h=BnDVUMNCBZ@SxVZ|rcBvbskk$(*OzIR;#>G+KD~ zuH@|AZT#MRVt?bw0XE|&@M*%p=xg*;2Uhj;EAF!!yA@y(-X!s z6c!Ll2kBI>j!xr@iOlozjLeH&$uteiA7ZbjDOn7&2!9Sbxf2u4tZ!3} zbM!w{VAp7!4+^GaAv+!zGgZ9B?g%sn!Xq8G4D^h-@e85 zDx(AgG7?&KZ^;uKyu*a+PaBw!%)$p2y_0`o6W%y9)VoROky%Tqto zw5rcK64~FCVCKK|uu4m^AxvN>QUn?=FaZK{$V2;f1$Uws@@{SRioI|AcG7%KP2dVG zr-}I46>n}Ko6E=Os?!_XRB6)S^4>ttkS@Qg_C)+F?SHJDD=8#qz9-+KcW#L9p!xoz zj;k$~H+xUO#GI=vKc_IjkRC(=1EH9$*YDt0VG-Qw!9IaPq0R`?k-CN_NW}QZH59jh z>eurF>LS>|Ca!Ypr!3QNm}@}V^Pl4Kt2nU~@A9#jqJ#TJF)in9X~M3}l;-f2n>-!* za9I+dX9(UERqW%iqjT!eP%`Az4q4k|b%+zxHl&*PWJK9j{~a3`77!-+#JV_&h&S5N z`V_FWZ!KO>+wSuHbfpPbWkZzV#k49=7br0ga}8+85UDl*#u(@sf`73d0EGo}SqLJn zWJ0 zI{m4%19>xtEV*d{r#YQ~?kYy0F%TjhtBnp_EPoLQX)$|U9>ja}1&fZ=@4jijfb(Sf zMho`pt&J?dQec2Uh`Br_(v~uG=<<#9q>r$UeS;fw$+57Ff;qJTmw!8Q#r-bw4%~I@ z#`6H)IuIgNV*rhT5b5v{bm%fKzUE7v;KyPdzIwL+XC+q$X9lk)kHNlQ(-fD3!72+F zAP^oqIX!+B^&u)jl9by{Xsd?nky+>6yaivJ&!qDcIb-jD#z3ePK4=x*Cd41w(_L^z z%-hp%m5n3H=9jyyniiRyobpdd7ns*K=Dsq3o*{^|x(*XF^zj3Pe?AX@aRXe7fog1a zRIXM#OqjCV9Jcj#)7RBpZt(3^?)S6Q($I+Sxn`*Lzj}tgzKU$S-?MoV6YI|^<9ud3 zw{Tog?deY~0^X$WG*#C`DF|RZLx06E5Q-V8mI*js(DdyH6Cltt1TEHHs)*jAmNI>H zIw4t2W<5?P{{us|#v|4d7y1W$@9khbL!ts<8N0qw-YuU4?D1zx>S<~y!S4J~qXCtR zwTkhWVPP8W&Emv7)=A>YZ z|Ijm11uXCt0)$p_u^-|%zt4KLSr#>pwlm70dh zfq6|qg^@52`nXAaS0vsr+BjA^c0L|;2Rx^8L4Im(gi3l5DMCh5tCWF zjlYw5RBKFtb@mq@{=M{EkK?5px!3(u`igpZWcvg91y3aDD=OG%zrF%%I!zWG zx-6emudC-ft}4_QtCmE*UGx317|WT5<7p!2EswYSQn3ZzIuMqfH%}&H6=cFrW>5B; zZ#KIT)@9CnkMoP`x=41`E)JLu4*yhbhL7PFalz+SsqTwLkL_{m$}o+yjU z$g`VhapH-L0|Nv?%qdQu>6R1zqI-F&>BCI+M(Dz1aBrSL6yNYiDXIQ1D;F-JLl?|F z)s|QIp`{m6?&~RZZJH7`5Srj{M(JlhS~tAj95X#B)C?3A2>(3)0(1j}584We?_HKC z51UZy5V1cmc~auu_motYu#LrNCd#?9MnGdA%ADguC_0wGc@Hlz#C4G`MU?5|}_6~!gae~2Gb zSGP~Gq7&*>_)K%wTbccINe?#A7zi=8esr#op-%7HchWF@LR**dq&D&07V7D zo#u&1fu0ZmmbzwrJYFpE6leP#wMZ&o=5u6v z<=I?Gdqs5_Q$$%JP+S#Hk7sD&Kb-+qM0j7dd8c`Yd8uix z3Uj3JmOh~M{F#asQVt9Y2+f9)CIYx+8%s$YxrT-O=*F@w;@(_wCuwS5nok>0tU`w_ zNdI}j0D%y5J13EPkf1{sq`iW`TL(gT4P`ID>YJ*|4l3<|AB%^YU*&+C%(d3!NQ(0ekxh!|DphFkT{c9lY{ss;< zAp9@Z?J>7&6?5OaNT)KPW4s{MWC0%>5Mpj66Dc=>4qcFHY=E~8gh*BVKw}_8I!qnX z1?FoVSTg}-?O6?ro{o~clW$Da+#t5-jHhI&&zk68T=p{1di_K)a_{dwBUSDJZygAc z_V5CAfl^N}w+7{R|Dwtk=ox}YU0Q%)0pXw5sRHE%LZr1`pfM0)uCfNHrvz}kplNOj z^WcDlrJyM@%w-{nv=a`}1^D2A5Oce~RljaV+SA><$@MDKXo+PvZ{N^KV*K9Z zrP}z`jm1859{fBruWbOVIhUwmqfPMUqwid#vUTB*dfi@nt?u^$v%;O=7BPH5)|GzLP< z&AgC$5u#(fAoUIe-Z~H>RR;o%f$+cR7lc$B01OZaF;|a?)S(F-;{~a98hGnKh`Eec zB+1uYH$tfbiA|10E_&&e0FUqN5|s+S#@4mPsM;?5zx_wR0DI%ULiZD4s9U0~-0+4!Qp<}!t)dm1X1w!18b`(>Wb!ZY{!WZ%V zj`B^M@V33#h4zD%C-)?}wvNzo2dT3RI&^8jM;X-1^r<8*Z^duOXOhBYmS{xlI(et} z@l=M-Md5_NTL;29y#0b5&&53!f)td5O7K`9VR)OzS_-&-Qh;=8MX zJXt5$MGMviwqI0s$Uhjcy|)Wv{D+>s)SUQT9}7=qCOt8FH=r^oQIS`-LqL<;cr5kb>V%w1hN#n>r!2mj z=Mw7G`#pMzrP@fM&4G^J5o5hlt#Okg`;&krZ~Qm*i-g!eN8IYBHV3?7qNviI{00UH zgh;1|0F8n0zvvf)GzS9)2!u$hfBK0N(1_*?hn;#){Qbfn^2y>^PVXiV5 zsaF^}beZottN)IN%l}Tnmu z8}k?T$almu79XTtvv;wY-m+Ib(0v0m212CTT6E}wRKo`h5D1ZW^8$^5@W1H0g}H11 z=FU|>>WG1k@q)S21Toi99jQknI&?YFmO?o>8N+`kW#Tp#b6sU&UG!C{)6_A#l;o%v zb@$Ey2Q(03F5`vtPDF<;NHyiaTL(hS?Z`mtxd-V2^R*785?~-S4?xP2z!(EPLsOi; z=wJ(#<-lAPiWzASFQg0b!2u!W{!J?xWvF^Ho*U{5Q}sFL)UlVkn+Y~Dc~Gu)I#;u# qiuVJ<0z#yd3DBVn=DK5HP8Xz#X>{m Date: Wed, 27 Mar 2024 21:45:18 +0100 Subject: [PATCH 112/132] add edsid --- share/shwap/data_hasher_test.go | 2 - share/shwap/data_id.go | 9 ++-- share/shwap/eds_id.go | 75 +++++++++++++++++++++++++++++++++ share/shwap/eds_id_test.go | 31 ++++++++++++++ share/shwap/row_id.go | 23 ++++------ share/shwap/sample_id.go | 9 ++-- 6 files changed, 121 insertions(+), 28 deletions(-) create mode 100644 share/shwap/eds_id.go create mode 100644 share/shwap/eds_id_test.go diff --git a/share/shwap/data_hasher_test.go b/share/shwap/data_hasher_test.go index ff4a800be1..f94f1aa6e6 100644 --- a/share/shwap/data_hasher_test.go +++ b/share/shwap/data_hasher_test.go @@ -10,8 +10,6 @@ import ( "github.com/celestiaorg/celestia-node/share/sharetest" ) -// TODO(@walldiss): -// FIX: hasher test succeed, while logging unmarshal error: "unmarshaling Data: proto: Data: wiretype end group for non-group" func TestDataHasher(t *testing.T) { hasher := &DataHasher{} diff --git a/share/shwap/data_id.go b/share/shwap/data_id.go index 5c31c4b920..b8fa5a1cb2 100644 --- a/share/shwap/data_id.go +++ b/share/shwap/data_id.go @@ -17,7 +17,6 @@ const DataIDSize = RowIDSize + share.NamespaceSize // DataID is an unique identifier of a namespaced Data inside EDS Row. type DataID struct { - // TODO(@walldiss): why embed instead of just having a field? RowID // DataNamespace is the namespace of the data @@ -29,8 +28,10 @@ type DataID struct { func NewDataID(height uint64, rowIdx uint16, namespace share.Namespace, root *share.Root) (DataID, error) { did := DataID{ RowID: RowID{ + EdsID: EdsID{ + Height: height, + }, RowIndex: rowIdx, - Height: height, }, DataNamespace: string(namespace), } @@ -118,10 +119,6 @@ func (s DataID) Verify(root *share.Root) error { return nil } -func (s DataID) GetHeight() uint64 { - return s.RowID.GetHeight() -} - func (s DataID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block, error) { data, err := f.Data(ctx, s.Namespace(), int(s.RowIndex)) if err != nil { diff --git a/share/shwap/eds_id.go b/share/shwap/eds_id.go new file mode 100644 index 0000000000..60b3e072d2 --- /dev/null +++ b/share/shwap/eds_id.go @@ -0,0 +1,75 @@ +package shwap + +import ( + "encoding/binary" + "fmt" + + "github.com/celestiaorg/celestia-node/share" +) + +// EdsIDSize is the size of the EdsID in bytes +const EdsIDSize = 8 + +// EdsID is an unique identifier of a Row. +type EdsID struct { + // Height of the block. + // Needed to identify block's data square in the whole chain + Height uint64 +} + +// NewEdsID constructs a new EdsID. +func NewEdsID(height uint64, root *share.Root) (EdsID, error) { + rid := EdsID{ + Height: height, + } + return rid, rid.Verify(root) +} + +// MarshalTo encodes EdsID into given byte slice. +// NOTE: Proto is avoided because +// * Its size is not deterministic which is required for IPLD. +// * No support for uint16 +func (rid EdsID) MarshalTo(data []byte) (int, error) { + // TODO:(@walldiss): this works, only if data underlying array was preallocated with + // enough size. Otherwise Caller might not see the changes. + data = binary.BigEndian.AppendUint64(data, rid.Height) + return EdsIDSize, nil +} + +// UnmarshalFrom decodes EdsID from given byte slice. +func (rid *EdsID) UnmarshalFrom(data []byte) (int, error) { + rid.Height = binary.BigEndian.Uint64(data) + return EdsIDSize, nil +} + +// MarshalBinary encodes EdsID into binary form. +func (rid EdsID) MarshalBinary() ([]byte, error) { + data := make([]byte, 0, EdsIDSize) + n, err := rid.MarshalTo(data) + return data[:n], err +} + +// UnmarshalBinary decodes EdsID from binary form. +func (rid *EdsID) UnmarshalBinary(data []byte) error { + if len(data) != EdsIDSize { + return fmt.Errorf("invalid EdsID data length: %d != %d", len(data), EdsIDSize) + } + _, err := rid.UnmarshalFrom(data) + return err +} + +// Verify verifies EdsID fields. +func (rid EdsID) Verify(root *share.Root) error { + if root == nil { + return fmt.Errorf("nil Root") + } + if rid.Height == 0 { + return fmt.Errorf("zero Height") + } + + return nil +} + +func (rid EdsID) GetHeight() uint64 { + return rid.Height +} diff --git a/share/shwap/eds_id_test.go b/share/shwap/eds_id_test.go new file mode 100644 index 0000000000..697980cb8b --- /dev/null +++ b/share/shwap/eds_id_test.go @@ -0,0 +1,31 @@ +package shwap + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestEdsID(t *testing.T) { + square := edstest.RandEDS(t, 2) + root, err := share.NewRoot(square) + require.NoError(t, err) + + id, err := NewEdsID(2, root) + require.NoError(t, err) + + data, err := id.MarshalBinary() + require.NoError(t, err) + + idOut := EdsID{} + err = idOut.UnmarshalBinary(data) + require.NoError(t, err) + assert.EqualValues(t, id, idOut) + + err = idOut.Verify(root) + require.NoError(t, err) +} diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index 83b4954573..b842805808 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "fmt" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" @@ -17,13 +18,12 @@ import ( // TODO(@walldiss): maybe move into separate subpkg? // RowIDSize is the size of the RowID in bytes -const RowIDSize = 10 +const RowIDSize = EdsIDSize + 2 // RowID is an unique identifier of a Row. type RowID struct { - // Height of the block. - // Needed to identify block's data square in the whole chain - Height uint64 + EdsID + // RowIndex is the index of the axis(row, col) in the data square RowIndex uint16 } @@ -31,8 +31,10 @@ type RowID struct { // NewRowID constructs a new RowID. func NewRowID(height uint64, rowIdx uint16, root *share.Root) (RowID, error) { rid := RowID{ + EdsID: EdsID{ + Height: height, + }, RowIndex: rowIdx, - Height: height, } return rid, rid.Verify(root) } @@ -103,11 +105,8 @@ func (rid *RowID) UnmarshalBinary(data []byte) error { // Verify verifies RowID fields. func (rid RowID) Verify(root *share.Root) error { - if root == nil { - return fmt.Errorf("nil Root") - } - if rid.Height == 0 { - return fmt.Errorf("zero Height") + if err := rid.EdsID.Verify(root); err != nil { + return err } sqrLn := len(root.RowRoots) @@ -118,10 +117,6 @@ func (rid RowID) Verify(root *share.Root) error { return nil } -func (rid RowID) GetHeight() uint64 { - return rid.Height -} - func (rid RowID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block, error) { axisHalf, err := f.AxisHalf(ctx, rsmt2d.Row, int(rid.RowIndex)) if err != nil { diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index 5964a5d07d..ae65c646f2 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -20,7 +20,6 @@ const SampleIDSize = RowIDSize + 2 // SampleID is an unique identifier of a Sample. type SampleID struct { - // TODO(@walldiss): why embed instead of just having a field? RowID // ShareIndex is the index of the sampled share in the Row @@ -33,8 +32,10 @@ func NewSampleID(height uint64, smplIdx int, root *share.Root) (SampleID, error) rowIdx, shrIdx := uint16(smplIdx/sqrLn), uint16(smplIdx%sqrLn) sid := SampleID{ RowID: RowID{ + EdsID: EdsID{ + Height: height, + }, RowIndex: rowIdx, - Height: height, }, ShareIndex: shrIdx, } @@ -110,10 +111,6 @@ func (sid SampleID) Verify(root *share.Root) error { return sid.RowID.Verify(root) } -func (sid SampleID) GetHeight() uint64 { - return sid.RowID.Height -} - func (sid SampleID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block, error) { shr, err := f.Share(ctx, int(sid.ShareIndex), int(sid.RowID.RowIndex)) if err != nil { From 960ea10381bbfb626e10d840f92a2e29be9539c3 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 28 Mar 2024 00:52:41 +0400 Subject: [PATCH 113/132] extract ErrOperationNotSupported to getter interface --- share/getter.go | 3 +++ share/getters/cascade.go | 2 +- share/getters/shrex.go | 2 +- share/getters/utils.go | 2 -- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/share/getter.go b/share/getter.go index c75c2f5b3f..c121f262c1 100644 --- a/share/getter.go +++ b/share/getter.go @@ -18,6 +18,9 @@ var ( // ErrOutOfBounds is used to indicate that a passed row or column index is out of bounds of the // square size. ErrOutOfBounds = errors.New("share: row or column index is larger than square size") + // ErrOperationNotSupported is used to indicate that the operation is not supported by the + // implementation. + ErrOperationNotSupported = errors.New("operation is not supported") ) // Getter interface provides a set of accessors for shares by the Root. diff --git a/share/getters/cascade.go b/share/getters/cascade.go index 3875127580..42e211e3f7 100644 --- a/share/getters/cascade.go +++ b/share/getters/cascade.go @@ -132,7 +132,7 @@ func cascadeGetters[V any]( return val, nil } - if errors.Is(getErr, errOperationNotSupported) { + if errors.Is(getErr, share.ErrOperationNotSupported) { continue } diff --git a/share/getters/shrex.go b/share/getters/shrex.go index 1716138343..bef3f92e7c 100644 --- a/share/getters/shrex.go +++ b/share/getters/shrex.go @@ -121,7 +121,7 @@ func (sg *ShrexGetter) Stop(ctx context.Context) error { } func (sg *ShrexGetter) GetShare(context.Context, *header.ExtendedHeader, int, int) (share.Share, error) { - return nil, fmt.Errorf("getter/shrex: GetShare %w", errOperationNotSupported) + return nil, fmt.Errorf("getter/shrex: GetShare %w", share.ErrOperationNotSupported) } func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { diff --git a/share/getters/utils.go b/share/getters/utils.go index 2260183b4f..70d44a623b 100644 --- a/share/getters/utils.go +++ b/share/getters/utils.go @@ -12,8 +12,6 @@ import ( var ( tracer = otel.Tracer("share/getters") log = logging.Logger("share/getters") - - errOperationNotSupported = errors.New("operation is not supported") ) // ctxWithSplitTimeout will split timeout stored in context by splitFactor and return the result if From a0c885bbec3c2392aa7e6acdfe93b1903225d925 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 28 Mar 2024 00:56:11 +0400 Subject: [PATCH 114/132] aggregate testing utils in the same folder --- share/{eds/edstest/testing.go => testing/edstest/eds.go} | 0 share/{sharetest/testing.go => testing/sharetest/share.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename share/{eds/edstest/testing.go => testing/edstest/eds.go} (100%) rename share/{sharetest/testing.go => testing/sharetest/share.go} (100%) diff --git a/share/eds/edstest/testing.go b/share/testing/edstest/eds.go similarity index 100% rename from share/eds/edstest/testing.go rename to share/testing/edstest/eds.go diff --git a/share/sharetest/testing.go b/share/testing/sharetest/share.go similarity index 100% rename from share/sharetest/testing.go rename to share/testing/sharetest/share.go From 683bb93afc644b87e3760acf47fcea8be87c8119 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 28 Mar 2024 00:58:42 +0400 Subject: [PATCH 115/132] - register verifier on message creation - allow release of verifier without verify call --- share/shwap/data_id.go | 6 ++++++ share/shwap/row_id.go | 10 ++++++++++ share/shwap/sample_id.go | 10 ++++++++++ share/shwap/shwap.go | 18 +++++++----------- 4 files changed, 33 insertions(+), 11 deletions(-) diff --git a/share/shwap/data_id.go b/share/shwap/data_id.go index 5c31c4b920..f814a83bd2 100644 --- a/share/shwap/data_id.go +++ b/share/shwap/data_id.go @@ -34,6 +34,12 @@ func NewDataID(height uint64, rowIdx uint16, namespace share.Namespace, root *sh }, DataNamespace: string(namespace), } + + verifyFn := func(d Data) error { + return d.Verify(root) + } + dataVerifiers.Add(did, verifyFn) + return did, did.Verify(root) } diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index 83b4954573..ce47eb2d0d 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -34,6 +34,12 @@ func NewRowID(height uint64, rowIdx uint16, root *share.Root) (RowID, error) { RowIndex: rowIdx, Height: height, } + + verifyFn := func(row Row) error { + return row.Verify(root) + } + rowVerifiers.Add(rid, verifyFn) + return rid, rid.Verify(root) } @@ -145,3 +151,7 @@ func (rid RowID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Bloc } return blk, nil } + +func (rid RowID) Release() { + rowVerifiers.Delete(rid) +} diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index 5964a5d07d..be667e58ca 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -38,6 +38,12 @@ func NewSampleID(height uint64, smplIdx int, root *share.Root) (SampleID, error) }, ShareIndex: shrIdx, } + + verifyFn := func(s Sample) error { + return s.Verify(root) + } + sampleVerifiers.Add(sid, verifyFn) + return sid, sid.Verify(root) } @@ -127,3 +133,7 @@ func (sid SampleID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.B } return blk, nil } + +func (sid SampleID) Release() { + sampleVerifiers.Delete(sid) +} diff --git a/share/shwap/shwap.go b/share/shwap/shwap.go index ccc51cce92..bb8e414094 100644 --- a/share/shwap/shwap.go +++ b/share/shwap/shwap.go @@ -6,19 +6,11 @@ import ( "hash" "sync" - "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/boxo/exchange" "github.com/ipfs/go-cid" logger "github.com/ipfs/go-log/v2" mh "github.com/multiformats/go-multihash" ) -// NewBlockService creates a new blockservice.BlockService with allowlist supporting the protocol. -func NewBlockService(b blockstore.Blockstore, ex exchange.Interface) blockservice.BlockService { - return blockservice.New(b, ex, blockservice.WithAllowlist(defaultAllowlist)) -} - var log = logger.Logger("shwap") const ( @@ -86,8 +78,12 @@ func (v *verifiers[ID, V]) Verify(id ID, val V) error { return f.(func(V) error)(val) } -// defaultAllowlist keeps default list of hashes allowed in the network. -var defaultAllowlist allowlist +func (v *verifiers[ID, V]) Delete(id ID) { + v.mp.Delete(id) +} + +// DefaultAllowlist keeps default list of hashes allowed in the network. +var DefaultAllowlist allowlist type allowlist struct{} @@ -102,7 +98,7 @@ func (a allowlist) IsAllowed(code uint64) bool { func validateCID(cid cid.Cid) error { prefix := cid.Prefix() - if !defaultAllowlist.IsAllowed(prefix.MhType) { + if !DefaultAllowlist.IsAllowed(prefix.MhType) { return fmt.Errorf("unsupported multihash type %d", prefix.MhType) } From 83beea04111a01bc346e3a2bf016db53d0d99b76 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 28 Mar 2024 00:58:56 +0400 Subject: [PATCH 116/132] refactor shwap getter --- share/shwap/{ => getter}/getter.go | 207 +++++++++++------------- share/shwap/{ => getter}/getter_test.go | 7 +- 2 files changed, 99 insertions(+), 115 deletions(-) rename share/shwap/{ => getter}/getter.go (58%) rename share/shwap/{ => getter}/getter_test.go (96%) diff --git a/share/shwap/getter.go b/share/shwap/getter/getter.go similarity index 58% rename from share/shwap/getter.go rename to share/shwap/getter/getter.go index 98ff74becc..28f640899c 100644 --- a/share/shwap/getter.go +++ b/share/shwap/getter/getter.go @@ -1,24 +1,22 @@ -package shwap +package shwap_getter import ( "context" "fmt" - "sync" - + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/rsmt2d" "github.com/ipfs/boxo/blockstore" "github.com/ipfs/boxo/exchange" block "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" ) -// TODO: GetRow method type Getter struct { + // TODO(@walldiss): why not blockservice? fetch exchange.SessionExchange bstore blockstore.Blockstore } @@ -59,57 +57,28 @@ func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, smpl return shares, nil } - sids := make([]SampleID, len(smplIdxs)) + cids := make([]cid.Cid, len(smplIdxs)) for i, shrIdx := range smplIdxs { - sid, err := NewSampleID(hdr.Height(), shrIdx, hdr.DAH) + sid, err := shwap.NewSampleID(hdr.Height(), shrIdx, hdr.DAH) if err != nil { return nil, err } - - sids[i] = sid - } - - smplsMu := sync.Mutex{} - smpls := make(map[int]Sample, len(smplIdxs)) - verifyFn := func(s Sample) error { - err := s.Verify(hdr.DAH) - if err != nil { - return err - } - - smplIdx := int(s.SampleID.RowIndex)*len(hdr.DAH.RowRoots) + int(s.SampleID.ShareIndex) - smplsMu.Lock() - smpls[smplIdx] = s - smplsMu.Unlock() - return nil - } - - cids := make([]cid.Cid, len(smplIdxs)) - for i, sid := range sids { - sampleVerifiers.Add(sid, verifyFn) + defer sid.Release() cids[i] = sid.Cid() } - ctx, cancel := context.WithCancel(ctx) - defer cancel() - ses := g.fetch.NewSession(ctx) - // must start getting only after verifiers are registered - blkCh, err := ses.GetBlocks(ctx, cids) + blks, err := g.getBlocks(ctx, cids) if err != nil { - return nil, fmt.Errorf("fetching blocks: %w", err) - } - // GetBlocks handles ctx and closes blkCh, so we don't have to - blks := make([]block.Block, 0, len(smplIdxs)) - for blk := range blkCh { - blks = append(blks, blk) + return nil, fmt.Errorf("getting blocks: %w", err) } - // only persist when all samples received + if len(blks) != len(smplIdxs) { if ctx.Err() != nil { return nil, ctx.Err() } return nil, fmt.Errorf("not all shares were found") } + // ensure we persist samples/blks and make them available for Bitswap err = g.bstore.PutMany(ctx, blks) if err != nil { @@ -122,12 +91,26 @@ func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, smpl } // ensure we return shares in the requested order - shrs := make([]share.Share, len(smplIdxs)) - for i, smplIdx := range smplIdxs { - shrs[i] = smpls[smplIdx].SampleShare + shrs := make(map[int]share.Share, len(blks)) + for _, blk := range blks { + sample, err := shwap.SampleFromBlock(blk) + if err != nil { + return nil, fmt.Errorf("getting sample from block: %w", err) + } + shrIdx := int(sample.SampleID.RowIndex)*len(hdr.DAH.RowRoots) + int(sample.SampleID.ShareIndex) + shrs[shrIdx] = sample.SampleShare + } + + ordered := make([]share.Share, len(shrs)) + for i, shrIdx := range smplIdxs { + sh, ok := shrs[shrIdx] + if !ok { + return nil, fmt.Errorf("missing share for index %d", shrIdx) + } + ordered[i] = sh } - return shrs, nil + return ordered, nil } // GetEDS @@ -138,58 +121,54 @@ func (g *Getter) GetEDS(ctx context.Context, hdr *header.ExtendedHeader) (*rsmt2 } sqrLn := len(hdr.DAH.RowRoots) - rids := make([]RowID, sqrLn/2) + cids := make([]cid.Cid, sqrLn/2) for i := 0; i < sqrLn/2; i++ { - rid, err := NewRowID(hdr.Height(), uint16(i), hdr.DAH) + rid, err := shwap.NewRowID(hdr.Height(), uint16(i), hdr.DAH) if err != nil { return nil, err } - - rids[i] = rid + defer rid.Release() + cids[i] = rid.Cid() } - square, err := rsmt2d.NewExtendedDataSquare( - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(sqrLn/2)), uint(sqrLn), - share.Size, - ) + blks, err := g.getBlocks(ctx, cids) if err != nil { - return nil, err + return nil, fmt.Errorf("getting blocks: %w", err) + } - verifyFn := func(row Row) error { - err := row.Verify(hdr.DAH) - if err != nil { - return err + if len(blks) != sqrLn/2 { + if ctx.Err() != nil { + return nil, ctx.Err() } + return nil, fmt.Errorf("not all rows were found") + } - for shrIdx, shr := range row.RowShares { - err = square.SetCell(uint(row.RowIndex), uint(shrIdx), shr) // no synchronization needed - if err != nil { - panic(err) // this should never happen and if it is... something is really wrong - } + rows := make([]*shwap.Row, len(blks)) + for _, blk := range blks { + row, err := shwap.RowFromBlock(blk) + if err != nil { + return nil, fmt.Errorf("getting row from block: %w", err) } - - return nil + if row.RowIndex >= uint16(sqrLn/2) { + // should never happen, because rows should be verified against root by the time they are returned + return nil, fmt.Errorf("row index out of bounds: %d", row.RowIndex) + } + rows[row.RowIndex] = row } - cids := make([]cid.Cid, sqrLn/2) - for i, rid := range rids { - rowVerifiers.Add(rid, verifyFn) - cids[i] = rid.Cid() + shrs := make([]share.Share, 0, sqrLn*sqrLn) + for _, row := range rows { + shrs = append(shrs, row.RowShares...) } - ctx, cancel := context.WithCancel(ctx) - defer cancel() - ses := g.fetch.NewSession(ctx) - // must start getting only after verifiers are registered - blkCh, err := ses.GetBlocks(ctx, cids) + square, err := rsmt2d.ComputeExtendedDataSquare( + shrs, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(sqrLn/2)), + ) if err != nil { - return nil, fmt.Errorf("fetching blocks: %w", err) - } - // GetBlocks handles ctx by closing blkCh, so we don't have to - for range blkCh { //nolint:revive // it complains on empty block, but the code is functional - // we handle writes in verifyFn so just wait for as many results as possible + return nil, fmt.Errorf("computing EDS: %w", err) } // and try to repair @@ -218,56 +197,62 @@ func (g *Getter) GetSharesByNamespace( return share.NamespacedShares{}, nil } - dids := make([]DataID, 0, to-from) + cids := make([]cid.Cid, 0, to-from) for rowIdx := from; rowIdx < to; rowIdx++ { - did, err := NewDataID(hdr.Height(), uint16(rowIdx), ns, hdr.DAH) + did, err := shwap.NewDataID(hdr.Height(), uint16(rowIdx), ns, hdr.DAH) if err != nil { return nil, err } + defer did.Release() + cids = append(cids, did.Cid()) + } - dids = append(dids, did) + blks, err := g.getBlocks(ctx, cids) + if err != nil { + return nil, fmt.Errorf("getting blocks: %w", err) } - datas := make([]Data, len(dids)) - verifyFn := func(d Data) error { - err := d.Verify(hdr.DAH) + nShrs := make([]share.NamespacedRow, len(blks)) + for _, blk := range blks { + data, err := shwap.DataFromBlock(blk) if err != nil { - return err + return nil, fmt.Errorf("getting row from block: %w", err) } - nsStartIdx := dids[0].RowIndex - idx := d.RowIndex - nsStartIdx - datas[idx] = d - return nil + if data.RowIndex < uint16(from) || data.RowIndex >= uint16(to) { + // should never happen, because rows should be verified against root by the time they are returned + return nil, fmt.Errorf("row index out of bounds: %d", data.RowIndex) + } + nShrs[int(data.RowIndex)-from] = share.NamespacedRow{ + Shares: data.DataShares, + Proof: &data.DataProof, + } } - cids := make([]cid.Cid, len(dids)) - for i, did := range dids { - dataVerifiers.Add(did, verifyFn) - cids[i] = did.Cid() - } + return nShrs, nil +} +func (g *Getter) getBlocks(ctx context.Context, cids []cid.Cid) ([]block.Block, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() ses := g.fetch.NewSession(ctx) // must start getting only after verifiers are registered blkCh, err := ses.GetBlocks(ctx, cids) if err != nil { - return nil, fmt.Errorf("fetching blocks:%w", err) + return nil, fmt.Errorf("fetching blocks: %w", err) } - // GetBlocks handles ctx by closing blkCh, so we don't have to - for range blkCh { //nolint:revive // it complains on empty block, but the code is functional - // we handle writes in verifyFn so just wait for as many results as possible + // GetBlocks handles ctx and closes blkCh, so we don't have to + blks := make([]block.Block, 0, len(cids)) + for blk := range blkCh { + blks = append(blks, blk) } - - nShrs := make([]share.NamespacedRow, 0, len(datas)) - for _, row := range datas { - proof := row.DataProof - nShrs = append(nShrs, share.NamespacedRow{ - Shares: row.DataShares, - Proof: &proof, - }) + // only persist when all samples received + if len(blks) != len(cids) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + return nil, fmt.Errorf("not all shares were found") } - return nShrs, nil + return blks, nil } diff --git a/share/shwap/getter_test.go b/share/shwap/getter/getter_test.go similarity index 96% rename from share/shwap/getter_test.go rename to share/shwap/getter/getter_test.go index c2be58275e..68a6b3967e 100644 --- a/share/shwap/getter_test.go +++ b/share/shwap/getter/getter_test.go @@ -1,4 +1,4 @@ -package shwap_test +package shwap_getter import ( "bytes" @@ -28,7 +28,6 @@ import ( "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/sharetest" - "github.com/celestiaorg/celestia-node/share/shwap" "github.com/celestiaorg/celestia-node/share/store" ) @@ -43,7 +42,7 @@ func TestGetter(t *testing.T) { bstore := edsBlockstore(ctx, t, square, hdr.Height()) exch := DummySessionExchange{bstore} - get := shwap.NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) + get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) t.Run("GetShares", func(t *testing.T) { idxs := rand.Perm(int(square.Width() ^ 2))[:10] @@ -129,7 +128,7 @@ func TestGetter(t *testing.T) { bstore := edsBlockstore(ctx, t, square, hdr.Height()) exch := &DummySessionExchange{bstore} - get := shwap.NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) + get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) maxNs := nmt.MaxNamespace(root.RowRoots[(len(root.RowRoots))/2-1], share.NamespaceSize) ns, err := share.Namespace(maxNs).AddInt(-1) From 6f7fdd25c94eaae263ebd1c43bd8da9c0ec35b01 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 28 Mar 2024 13:50:00 +0400 Subject: [PATCH 117/132] rework retriever and add reconstruction getter --- nodebuilder/share/constructors.go | 6 +- nodebuilder/share/module.go | 6 +- share/eds/retriever_quadrant.go | 102 ---------- share/shwap/getter/getter_test.go | 24 ++- share/shwap/getter/reconstruction.go | 24 +++ share/{eds => shwap/getter}/retriever.go | 215 +++++++++----------- share/shwap/getter/retriever_quadrant.go | 44 ++++ share/shwap/getter/retriever_test.go | 247 +++++++++++++++++++++++ 8 files changed, 435 insertions(+), 233 deletions(-) delete mode 100644 share/eds/retriever_quadrant.go create mode 100644 share/shwap/getter/reconstruction.go rename share/{eds => shwap/getter}/retriever.go (50%) create mode 100644 share/shwap/getter/retriever_quadrant.go create mode 100644 share/shwap/getter/retriever_test.go diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index 4f79313112..9cf42229dd 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -1,6 +1,7 @@ package share import ( + shwap_getter "github.com/celestiaorg/celestia-node/share/shwap/getter" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/routing" routingdisc "github.com/libp2p/go-libp2p/p2p/discovery/routing" @@ -9,7 +10,6 @@ import ( "github.com/celestiaorg/celestia-node/share/getters" disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" "github.com/celestiaorg/celestia-node/share/p2p/peers" - "github.com/celestiaorg/celestia-node/share/shwap" ) const ( @@ -40,7 +40,7 @@ func newModule(getter share.Getter, avail share.Availability) Module { func lightGetter( shrexGetter *getters.ShrexGetter, - shwapGetter *shwap.Getter, + shwapGetter *shwap_getter.Getter, cfg Config, ) share.Getter { var cascade []share.Getter @@ -71,7 +71,7 @@ func bridgeGetter( func fullGetter( storeGetter *getters.StoreGetter, shrexGetter *getters.ShrexGetter, - shwapGetter *shwap.Getter, + shwapGetter *shwap_getter.Getter, cfg Config, ) share.Getter { var cascade []share.Getter diff --git a/nodebuilder/share/module.go b/nodebuilder/share/module.go index 73e9a14292..91e94bad1f 100644 --- a/nodebuilder/share/module.go +++ b/nodebuilder/share/module.go @@ -2,6 +2,7 @@ package share import ( "context" + shwap_getter "github.com/celestiaorg/celestia-node/share/shwap/getter" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/p2p/net/conngater" @@ -22,7 +23,6 @@ import ( "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" - "github.com/celestiaorg/celestia-node/share/shwap" "github.com/celestiaorg/celestia-node/share/store" ) @@ -178,7 +178,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option baseComponents, bridgeAndFullComponents, shrexGetterComponents, - fx.Provide(shwap.NewGetter), + fx.Provide(shwap_getter.NewGetter), fx.Provide(fullGetter), ) case node.Light: @@ -192,7 +192,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option }), peerManagerWithShrexPools, shrexGetterComponents, - fx.Provide(shwap.NewGetter), + fx.Provide(shwap_getter.NewGetter), fx.Provide(lightGetter), // shrexsub broadcaster stub for daser fx.Provide(func() shrexsub.BroadcastFn { diff --git a/share/eds/retriever_quadrant.go b/share/eds/retriever_quadrant.go deleted file mode 100644 index 3d616e9cd4..0000000000 --- a/share/eds/retriever_quadrant.go +++ /dev/null @@ -1,102 +0,0 @@ -package eds - -import ( - "math/rand" - "time" - - "github.com/ipfs/go-cid" - - "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share/ipld" -) - -const ( - // there are always 4 quadrants - numQuadrants = 4 - // blockTime equals to the time with which new blocks are produced in the network. - // TODO(@Wondertan): Here we assume that the block time is a minute, but - // block time is a network wide variable/param that has to be taken from - // a proper place - blockTime = time.Minute -) - -// RetrieveQuadrantTimeout defines how much time Retriever waits before -// starting to retrieve another quadrant. -// -// NOTE: -// - The whole data square must be retrieved in less than block time. -// - We have 4 quadrants from two sources(rows, cols) which equals to 8 in total. -var RetrieveQuadrantTimeout = blockTime / numQuadrants * 2 - -type quadrant struct { - // slice of roots to get shares from - roots []cid.Cid - // Example coordinates(x;y) of each quadrant when fetching from column roots - // ------ ------- - // | Q0 | | Q1 | - // |(0;0)| |(1;0)| - // ------ ------- - // | Q2 | | Q3 | - // |(0;1)| |(1;1)| - // ------ ------- - x, y int - // source defines the axis(Row or Col) to fetch the quadrant from - source rsmt2d.Axis -} - -// newQuadrants constructs a slice of quadrants from DAHeader. -// There are always 4 quadrants per each source (row and col), so 8 in total. -// The ordering of quadrants is random. -func newQuadrants(dah *da.DataAvailabilityHeader) []*quadrant { - // combine all the roots into one slice, so they can be easily accessible by index - daRoots := [][][]byte{ - dah.RowRoots, - dah.ColumnRoots, - } - // create a quadrant slice for each source(row;col) - sources := [][]*quadrant{ - make([]*quadrant, numQuadrants), - make([]*quadrant, numQuadrants), - } - for source, quadrants := range sources { - size, qsize := len(daRoots[source]), len(daRoots[source])/2 - roots := make([]cid.Cid, size) - for i, root := range daRoots[source] { - roots[i] = ipld.MustCidFromNamespacedSha256(root) - } - - for i := range quadrants { - // convert quadrant 1D into into 2D coordinates - x, y := i%2, i/2 - quadrants[i] = &quadrant{ - roots: roots[qsize*y : qsize*(y+1)], - x: x, - y: y, - source: rsmt2d.Axis(source), - } - } - } - quadrants := make([]*quadrant, 0, numQuadrants*2) - for _, qs := range sources { - quadrants = append(quadrants, qs...) - } - // shuffle quadrants to be fetched in random order - rand.Shuffle(len(quadrants), func(i, j int) { quadrants[i], quadrants[j] = quadrants[j], quadrants[i] }) - return quadrants -} - -// pos calculates position of a share in a data square. -func (q *quadrant) pos(rootIdx, cellIdx int) (int, int) { - cellIdx += len(q.roots) * q.x - rootIdx += len(q.roots) * q.y - switch q.source { - case rsmt2d.Row: - return rootIdx, cellIdx - case rsmt2d.Col: - return cellIdx, rootIdx - default: - panic("unknown axis") - } -} diff --git a/share/shwap/getter/getter_test.go b/share/shwap/getter/getter_test.go index bbef6ec090..fcaf7e9d22 100644 --- a/share/shwap/getter/getter_test.go +++ b/share/shwap/getter/getter_test.go @@ -40,7 +40,8 @@ func TestGetter(t *testing.T) { square, root := edstest.RandEDSWithNamespace(t, ns, size*size, size) hdr := &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 1}, DAH: root} - bstore := edsBlockstore(ctx, t, square, hdr.Height()) + store, bstore := edsBlockstore(t) + put(t, store, square, hdr.Height()) exch := DummySessionExchange{bstore} get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) @@ -126,7 +127,8 @@ func TestGetter(t *testing.T) { require.NoError(t, err) hdr := &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 3}, DAH: root} - bstore := edsBlockstore(ctx, t, square, hdr.Height()) + store, bstore := edsBlockstore(t) + put(t, store, square, hdr.Height()) exch := &DummySessionExchange{bstore} get := NewGetter(exch, blockstore.NewBlockstore(datastore.NewMapDatastore())) @@ -157,6 +159,10 @@ func (e DummySessionExchange) GetBlock(ctx context.Context, k cid.Cid) (blocks.B if format.IsNotFound(err) { return nil, fmt.Errorf("block was not found locally (offline): %w", err) } + if err != nil { + fmt.Println("ERROR", err) + return nil, err + } rbcid, err := k.Prefix().Sum(blk.RawData()) if err != nil { return nil, err @@ -202,16 +208,18 @@ func (e DummySessionExchange) Close() error { return nil } -func edsBlockstore(ctx context.Context, t *testing.T, eds *rsmt2d.ExtendedDataSquare, height uint64) blockstore.Blockstore { - dah, err := share.NewRoot(eds) +func edsBlockstore(t *testing.T) (*store.Store, blockstore.Blockstore) { + edsStore, err := store.NewStore(store.DefaultParameters(), t.TempDir()) require.NoError(t, err) - edsStore, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + return edsStore, store.NewBlockstore(edsStore, ds_sync.MutexWrap(datastore.NewMapDatastore())) +} + +func put(t *testing.T, store *store.Store, eds *rsmt2d.ExtendedDataSquare, height uint64) { + dah, err := share.NewRoot(eds) require.NoError(t, err) - f, err := edsStore.Put(ctx, dah.Hash(), height, eds) + f, err := store.Put(context.Background(), dah.Hash(), height, eds) require.NoError(t, err) f.Close() - - return store.NewBlockstore(edsStore, ds_sync.MutexWrap(datastore.NewMapDatastore())) } diff --git a/share/shwap/getter/reconstruction.go b/share/shwap/getter/reconstruction.go new file mode 100644 index 0000000000..e66749cfb5 --- /dev/null +++ b/share/shwap/getter/reconstruction.go @@ -0,0 +1,24 @@ +package shwap_getter + +import ( + "context" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/rsmt2d" +) + +type ReconstructionGetter struct { + retriever edsRetriver +} + +func (r ReconstructionGetter) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { + return nil, share.ErrOperationNotSupported +} + +func (r ReconstructionGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + return r.retriever.Retrieve(ctx, header) +} + +func (r ReconstructionGetter) GetSharesByNamespace(ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace) (share.NamespacedShares, error) { + return nil, share.ErrOperationNotSupported +} diff --git a/share/eds/retriever.go b/share/shwap/getter/retriever.go similarity index 50% rename from share/eds/retriever.go rename to share/shwap/getter/retriever.go index c2966c3953..50354d0605 100644 --- a/share/eds/retriever.go +++ b/share/shwap/getter/retriever.go @@ -1,36 +1,44 @@ -package eds +package shwap_getter import ( "context" "errors" + "github.com/celestiaorg/celestia-node/header" "sync" "sync/atomic" "time" "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" - "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/byzantine" - "github.com/celestiaorg/celestia-node/share/ipld" ) +// TODO(@walldiss): +// - update comments +// - befp construction should work over share.getter instead of blockservice +// - use single bitswap session for fetching shares +// - don't request repaired shares +// - use enriched logger for session +// - remove per-share tracing +// - remove quadrants struct +// - remove unneeded locks +// - add metrics + var ( log = logging.Logger("share/eds") tracer = otel.Tracer("share/eds") ) -// Retriever retrieves rsmt2d.ExtendedDataSquares from the IPLD network. +// edsRetriver retrieves rsmt2d.ExtendedDataSquares from the IPLD network. // Instead of requesting data 'share by share' it requests data by quadrants // minimizing bandwidth usage in the happy cases. // @@ -40,15 +48,19 @@ var ( // | 2 | 3 | // ---- ---- // -// Retriever randomly picks one of the data square quadrants and tries to request them one by one +// edsRetriver randomly picks one of the data square quadrants and tries to request them one by one // until it is able to reconstruct the whole square. -type Retriever struct { - bServ blockservice.BlockService +type edsRetriver struct { + bServ blockservice.BlockService + getter share.Getter } -// NewRetriever creates a new instance of the Retriever over IPLD BlockService and rmst2d.Codec -func NewRetriever(bServ blockservice.BlockService) *Retriever { - return &Retriever{bServ: bServ} +// NewRetriever creates a new instance of the edsRetriver over IPLD BlockService and rmst2d.Codec +func NewRetriever(bServ blockservice.BlockService, getter share.Getter) *edsRetriver { + return &edsRetriver{ + bServ: bServ, + getter: getter, + } } // Retrieve retrieves all the data committed to DataAvailabilityHeader. @@ -57,7 +69,8 @@ func NewRetriever(bServ blockservice.BlockService) *Retriever { // data square and reconstructs the other three quadrants (3/4). If the requested quadrant is not // available within RetrieveQuadrantTimeout, it starts requesting another quadrant until either the // data is reconstructed, context is canceled or ErrByzantine is generated. -func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader) (*rsmt2d.ExtendedDataSquare, error) { +func (r *edsRetriver) Retrieve(ctx context.Context, h *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + dah := h.DAH ctx, cancel := context.WithCancel(ctx) defer cancel() // cancels all the ongoing requests if reconstruction succeeds early @@ -68,7 +81,7 @@ func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader ) log.Debugw("retrieving data square", "data_hash", dah.String(), "size", len(dah.RowRoots)) - ses, err := r.newSession(ctx, dah) + ses, err := r.newSession(ctx, h) if err != nil { return nil, err } @@ -103,13 +116,9 @@ func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader // quadrant request retries. Also, provides an API // to reconstruct the block once enough shares are fetched. type retrievalSession struct { - dah *da.DataAvailabilityHeader - bget blockservice.BlockGetter + header *header.ExtendedHeader + getter share.Getter - // TODO(@Wondertan): Extract into a separate data structure - // https://github.com/celestiaorg/rsmt2d/issues/135 - squareQuadrants []*quadrant - squareCellsLks [][]sync.Mutex squareCellsCount uint32 squareSig chan struct{} squareDn chan struct{} @@ -120,18 +129,11 @@ type retrievalSession struct { } // newSession creates a new retrieval session and kicks off requesting process. -func (r *Retriever) newSession(ctx context.Context, dah *da.DataAvailabilityHeader) (*retrievalSession, error) { - size := len(dah.RowRoots) +func (r *edsRetriver) newSession(ctx context.Context, h *header.ExtendedHeader) (*retrievalSession, error) { + size := len(h.DAH.RowRoots) treeFn := func(_ rsmt2d.Axis, index uint) rsmt2d.Tree { - // use proofs adder if provided, to cache collected proofs while recomputing the eds - var opts []nmt.Option - visitor := ipld.ProofsAdderFromCtx(ctx).VisitFn() - if visitor != nil { - opts = append(opts, nmt.NodeVisitor(visitor)) - } - - tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(size)/2, index, opts...) + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(size)/2, index) return &tree } @@ -141,17 +143,12 @@ func (r *Retriever) newSession(ctx context.Context, dah *da.DataAvailabilityHead } ses := &retrievalSession{ - dah: dah, - bget: blockservice.NewSession(ctx, r.bServ), - squareQuadrants: newQuadrants(dah), - squareCellsLks: make([][]sync.Mutex, size), - squareSig: make(chan struct{}, 1), - squareDn: make(chan struct{}), - square: square, - span: trace.SpanFromContext(ctx), - } - for i := range ses.squareCellsLks { - ses.squareCellsLks[i] = make([]sync.Mutex, size) + header: h, + getter: r.getter, + squareSig: make(chan struct{}, 1), + squareDn: make(chan struct{}), + square: square, + span: trace.SpanFromContext(ctx), } go ses.request(ctx) @@ -178,12 +175,12 @@ func (rs *retrievalSession) Reconstruct(ctx context.Context) (*rsmt2d.ExtendedDa defer span.End() // and try to repair with what we have - err := rs.square.Repair(rs.dah.RowRoots, rs.dah.ColumnRoots) + err := rs.square.Repair(rs.header.DAH.RowRoots, rs.header.DAH.ColumnRoots) if err != nil { span.RecordError(err) return nil, err } - log.Infow("data square reconstructed", "data_hash", rs.dah.String(), "size", len(rs.dah.RowRoots)) + log.Infow("data square reconstructed", "data_hash", rs.header.DAH.String(), "size", len(rs.header.DAH.RowRoots)) close(rs.squareDn) return rs.square, nil } @@ -211,21 +208,16 @@ func (rs *retrievalSession) Close() error { func (rs *retrievalSession) request(ctx context.Context) { t := time.NewTicker(RetrieveQuadrantTimeout) defer t.Stop() - for retry := 0; retry < len(rs.squareQuadrants); retry++ { - q := rs.squareQuadrants[retry] + for _, q := range newQuadrants() { log.Debugw("requesting quadrant", - "axis", q.source, "x", q.x, "y", q.y, - "size", len(q.roots), ) rs.span.AddEvent("requesting quadrant", trace.WithAttributes( - attribute.Int("axis", int(q.source)), attribute.Int("x", q.x), attribute.Int("y", q.y), - attribute.Int("size", len(q.roots)), )) - rs.doRequest(ctx, q) + rs.requestQuadrant(ctx, q) select { case <-t.C: case <-ctx.Done(): @@ -233,83 +225,72 @@ func (rs *retrievalSession) request(ctx context.Context) { } log.Warnw("quadrant request timeout", "timeout", RetrieveQuadrantTimeout.String(), - "axis", q.source, "x", q.x, "y", q.y, - "size", len(q.roots), ) rs.span.AddEvent("quadrant request timeout", trace.WithAttributes( - attribute.Int("axis", int(q.source)), attribute.Int("x", q.x), attribute.Int("y", q.y), - attribute.Int("size", len(q.roots)), )) } } -// doRequest requests the given quadrant by requesting halves of axis(Row or Col) using GetShares +// requestQuadrant requests the given quadrant by requesting halves of axis(Row or Col) using GetShares // and fills shares into rs.square slice. -func (rs *retrievalSession) doRequest(ctx context.Context, q *quadrant) { - size := len(q.roots) - for i, root := range q.roots { - go func(i int, root cid.Cid) { - // get the root node - nd, err := ipld.GetNode(ctx, rs.bget, root) - if err != nil { - rs.span.RecordError(err, trace.WithAttributes( - attribute.Int("root-index", i), - )) - return - } - // and go get shares of left or the right side of the whole col/row axis - // the left or the right side of the tree represent some portion of the quadrant - // which we put into the rs.square share-by-share by calculating shares' indexes using q.index - ipld.GetShares(ctx, rs.bget, nd.Links()[q.x].Cid, size, func(j int, share share.Share) { - // NOTE: Each share can appear twice here, for a Row and Col, respectively. - // These shares are always equal, and we allow only the first one to be written - // in the square. - // NOTE-2: We may never actually fetch shares from the network *twice*. - // Once a share is downloaded from the network it may be cached on the IPLD(blockservice) level. - // - // calc position of the share - x, y := q.pos(i, j) - // try to lock the share - ok := rs.squareCellsLks[x][y].TryLock() - if !ok { - // if already locked and written - do nothing - return - } - // The R lock here is *not* to protect rs.square from multiple - // concurrent shares writes but to avoid races between share writes and - // repairing attempts. - // Shares are written atomically in their own slice slots and these "writes" do - // not need synchronization! - rs.squareLk.RLock() - defer rs.squareLk.RUnlock() - // the routine could be blocked above for some time during which the square - // might be reconstructed, if so don't write anything and return - if rs.isReconstructed() { - return - } - if err := rs.square.SetCell(uint(x), uint(y), share); err != nil { - // safe to ignore as: - // * share size already verified - // * the same share might come from either Row or Col - return - } - // if we have >= 1/4 of the square we can start trying to Reconstruct - // TODO(@Wondertan): This is not an ideal way to know when to start - // reconstruction and can cause idle reconstruction tries in some cases, - // but it is totally fine for the happy case and for now. - // The earlier we correctly know that we have the full square - the earlier - // we cancel ongoing requests - the less data is being wastedly transferred. - if atomic.AddUint32(&rs.squareCellsCount, 1) >= uint32(size*size) { - select { - case rs.squareSig <- struct{}{}: - default: - } - } - }) - }(i, root) +func (rs *retrievalSession) requestQuadrant(ctx context.Context, q quadrant) { + odsSize := len(rs.header.DAH.RowRoots) / 2 + for x := q.x * odsSize; x < (q.x+1)*odsSize; x++ { + for y := q.y * odsSize; y < (q.y+1)*odsSize; y++ { + go rs.requestCell(ctx, x, y) + } + } +} + +func (rs *retrievalSession) requestCell(ctx context.Context, x, y int) { + share, err := rs.getter.GetShare(ctx, rs.header, x, y) + if err != nil { + log.Debugw("failed to get share", + "height", rs.header.Height, + "x", x, + "y", y, + "err", err, + ) + return + } + + // the routine could be blocked above for some time during which the square + // might be reconstructed, if so don't write anything and return + if rs.isReconstructed() { + return + } + + rs.squareLk.Lock() + defer rs.squareLk.Unlock() + + if err := rs.square.SetCell(uint(x), uint(y), share); err != nil { + log.Warnw("failed to set cell", + "height", rs.header.Height, + "x", x, + "y", y, + "err", err, + ) + return + } + rs.indicateDone() +} + +func (rs *retrievalSession) indicateDone() { + size := len(rs.header.DAH.RowRoots) / 2 + // if we have >= 1/4 of the square we can start trying to Reconstruct + // TODO(@Wondertan): This is not an ideal way to know when to start + // reconstruction and can cause idle reconstruction tries in some cases, + // but it is totally fine for the happy case and for now. + // The earlier we correctly know that we have the full square - the earlier + // we cancel ongoing requests - the less data is being wastedly transferred. + if atomic.AddUint32(&rs.squareCellsCount, 1) >= uint32(size*size) { + select { + case rs.squareSig <- struct{}{}: + default: + } } } diff --git a/share/shwap/getter/retriever_quadrant.go b/share/shwap/getter/retriever_quadrant.go new file mode 100644 index 0000000000..2fa028b959 --- /dev/null +++ b/share/shwap/getter/retriever_quadrant.go @@ -0,0 +1,44 @@ +package shwap_getter + +import ( + "time" +) + +const ( + // there are always 4 quadrants + numQuadrants = 4 + // blockTime equals to the time with which new blocks are produced in the network. + // TODO(@Wondertan): Here we assume that the block time is a minute, but + // block time is a network wide variable/param that has to be taken from + // a proper place + blockTime = time.Minute +) + +// RetrieveQuadrantTimeout defines how much time edsRetriver waits before +// starting to retrieve another quadrant. +// +// NOTE: +// - The whole data square must be retrieved in less than block time. +// - We have 4 quadrants from two sources(rows, cols) which equals to 8 in total. +var RetrieveQuadrantTimeout = blockTime / numQuadrants * 2 + +type quadrant struct { + // Example coordinates(x;y) of each quadrant + // ------ ------- + // | Q0 | | Q1 | + // |(0;0)| |(1;0)| + // ------ ------- + // | Q2 | | Q3 | + // |(0;1)| |(1;1)| + // ------ ------- + x, y int +} + +// newQuadrants constructs a slice of quadrants. There are always 4 quadrants. +func newQuadrants() []quadrant { + quadrants := make([]quadrant, 0, numQuadrants) + for i := 0; i < numQuadrants; i++ { + quadrants = append(quadrants, quadrant{x: i % 2, y: i / 2}) + } + return quadrants +} diff --git a/share/shwap/getter/retriever_test.go b/share/shwap/getter/retriever_test.go new file mode 100644 index 0000000000..0e1030273d --- /dev/null +++ b/share/shwap/getter/retriever_test.go @@ -0,0 +1,247 @@ +package shwap_getter + +import ( + "context" + "go.uber.org/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/testing/edstest" +) + +func TestRetriever_Retrieve(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + store, bstore := edsBlockstore(t) + exch := DummySessionExchange{bstore} + getter := NewGetter(exch, bstore) + bServ := ipld.NewMemBlockservice() + r := NewRetriever(bServ, getter) + + height := atomic.NewUint64(1) + type test struct { + name string + squareSize int + } + tests := []test{ + {"1x1(min)", 1}, + {"2x2(med)", 2}, + {"4x4(med)", 4}, + {"8x8(med)", 8}, + {"16x16(med)", 16}, + {"32x32(med)", 32}, + {"64x64(med)", 64}, + {"128x128(max)", share.MaxSquareSize}, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + // generate EDS + eds := edstest.RandEDS(t, tc.squareSize) + height := height.Add(1) + put(t, store, eds, height) + + // limit with timeout, specifically retrieval + ctx, cancel := context.WithTimeout(ctx, time.Second*10) + defer cancel() + + root, err := share.NewRoot(eds) + require.NoError(t, err) + hdr := &header.ExtendedHeader{RawHeader: header.RawHeader{Height: int64(height)}, DAH: root} + + out, err := r.Retrieve(ctx, hdr) + require.NoError(t, err) + assert.True(t, eds.Equals(out)) + }) + } +} + +// +//func TestRetriever_ByzantineError(t *testing.T) { +// const width = 8 +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) +// defer cancel() +// +// bserv := ipld.NewMemBlockservice() +// shares := edstest.RandEDS(t, width).Flattened() +// _, err := ipld.ImportShares(ctx, shares, bserv) +// require.NoError(t, err) +// +// // corrupt shares so that eds erasure coding does not match +// copy(shares[14][share.NamespaceSize:], shares[15][share.NamespaceSize:]) +// +// // import corrupted eds +// batchAdder := ipld.NewNmtNodeAdder(ctx, bserv, ipld.MaxSizeBatchOption(width*2)) +// attackerEDS, err := rsmt2d.ImportExtendedDataSquare( +// shares, +// share.DefaultRSMT2DCodec(), +// wrapper.NewConstructor(uint64(width), +// nmt.NodeVisitor(batchAdder.Visit)), +// ) +// require.NoError(t, err) +// err = batchAdder.Commit() +// require.NoError(t, err) +// +// // ensure we rcv an error +// dah, err := da.NewDataAvailabilityHeader(attackerEDS) +// require.NoError(t, err) +// r := NewRetriever(bserv) +// _, err = r.Retrieve(ctx, &dah) +// var errByz *byzantine.ErrByzantine +// require.ErrorAs(t, err, &errByz) +//} +// +//// TestRetriever_MultipleRandQuadrants asserts that reconstruction succeeds +//// when any three random quadrants requested. +//func TestRetriever_MultipleRandQuadrants(t *testing.T) { +// RetrieveQuadrantTimeout = time.Millisecond * 500 +// const squareSize = 32 +// ctx, cancel := context.WithTimeout(context.Background(), time.Minute) +// defer cancel() +// +// bServ := ipld.NewMemBlockservice() +// r := NewRetriever(bServ) +// +// // generate EDS +// shares := sharetest.RandShares(t, squareSize*squareSize) +// in, err := ipld.AddShares(ctx, shares, bServ) +// require.NoError(t, err) +// +// dah, err := da.NewDataAvailabilityHeader(in) +// require.NoError(t, err) +// ses, err := r.newSession(ctx, &dah) +// require.NoError(t, err) +// +// // wait until two additional quadrants requested +// // this reliably allows us to reproduce the issue +// time.Sleep(RetrieveQuadrantTimeout * 2) +// // then ensure we have enough shares for reconstruction for slow machines e.g. CI +// <-ses.Done() +// +// _, err = ses.Reconstruct(ctx) +// assert.NoError(t, err) +//} +// +//func TestFraudProofValidation(t *testing.T) { +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) +// defer t.Cleanup(cancel) +// bServ := ipld.NewMemBlockservice() +// +// odsSize := []int{2, 4, 16, 32, 64, 128} +// for _, size := range odsSize { +// t.Run(fmt.Sprintf("ods size:%d", size), func(t *testing.T) { +// var errByz *byzantine.ErrByzantine +// faultHeader, err := generateByzantineError(ctx, t, size, bServ) +// require.True(t, errors.As(err, &errByz)) +// +// p := byzantine.CreateBadEncodingProof([]byte("hash"), faultHeader.Height(), errByz) +// err = p.Validate(faultHeader) +// require.NoError(t, err) +// }) +// } +//} +// +//func generateByzantineError( +// ctx context.Context, +// t *testing.T, +// odsSize int, +// bServ blockservice.BlockService, +//) (*header.ExtendedHeader, error) { +// eds := edstest.RandByzantineEDS(t, odsSize) +// err := ipld.ImportEDS(ctx, eds, bServ) +// require.NoError(t, err) +// h := headertest.ExtendedHeaderFromEDS(t, 1, eds) +// _, err = NewRetriever(bServ).Retrieve(ctx, h.DAH) +// +// return h, err +//} +// +///* +//BenchmarkBEFPValidation/ods_size:2 31273 38819 ns/op 68052 B/op 366 allocs/op +//BenchmarkBEFPValidation/ods_size:4 14664 80439 ns/op 135892 B/op 894 allocs/op +//BenchmarkBEFPValidation/ods_size:16 2850 386178 ns/op 587890 B/op 4945 allocs/op +//BenchmarkBEFPValidation/ods_size:32 1399 874490 ns/op 1233399 B/op 11284 allocs/op +//BenchmarkBEFPValidation/ods_size:64 619 2047540 ns/op 2578008 B/op 25364 allocs/op +//BenchmarkBEFPValidation/ods_size:128 259 4934375 ns/op 5418406 B/op 56345 allocs/op +//*/ +//func BenchmarkBEFPValidation(b *testing.B) { +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) +// defer b.Cleanup(cancel) +// bServ := ipld.NewMemBlockservice() +// r := NewRetriever(bServ) +// t := &testing.T{} +// odsSize := []int{2, 4, 16, 32, 64, 128} +// for _, size := range odsSize { +// b.Run(fmt.Sprintf("ods size:%d", size), func(b *testing.B) { +// b.ResetTimer() +// b.StopTimer() +// eds := edstest.RandByzantineEDS(t, size) +// err := ipld.ImportEDS(ctx, eds, bServ) +// require.NoError(t, err) +// h := headertest.ExtendedHeaderFromEDS(t, 1, eds) +// _, err = r.Retrieve(ctx, h.DAH) +// var errByz *byzantine.ErrByzantine +// require.ErrorAs(t, err, &errByz) +// b.StartTimer() +// +// for i := 0; i < b.N; i++ { +// b.ReportAllocs() +// p := byzantine.CreateBadEncodingProof([]byte("hash"), h.Height(), errByz) +// err = p.Validate(h) +// require.NoError(b, err) +// } +// }) +// } +//} +// +///* +//BenchmarkNewErrByzantineData/ods_size:2 29605 38846 ns/op 49518 B/op 579 allocs/op +//BenchmarkNewErrByzantineData/ods_size:4 11380 105302 ns/op 134967 B/op 1571 allocs/op +//BenchmarkNewErrByzantineData/ods_size:16 1902 631086 ns/op 830199 B/op 9601 allocs/op +//BenchmarkNewErrByzantineData/ods_size:32 756 1530985 ns/op 1985272 B/op 22901 allocs/op +//BenchmarkNewErrByzantineData/ods_size:64 340 3445544 ns/op 4767053 B/op 54704 allocs/op +//BenchmarkNewErrByzantineData/ods_size:128 132 8740678 ns/op 11991093 B/op 136584 allocs/op +//*/ +//func BenchmarkNewErrByzantineData(b *testing.B) { +// odsSize := []int{2, 4, 16, 32, 64, 128} +// ctx, cancel := context.WithTimeout(context.Background(), time.Minute) +// defer cancel() +// bServ := ipld.NewMemBlockservice() +// r := NewRetriever(bServ) +// t := &testing.T{} +// for _, size := range odsSize { +// b.Run(fmt.Sprintf("ods size:%d", size), func(b *testing.B) { +// b.StopTimer() +// eds := edstest.RandByzantineEDS(t, size) +// err := ipld.ImportEDS(ctx, eds, bServ) +// require.NoError(t, err) +// h := headertest.ExtendedHeaderFromEDS(t, 1, eds) +// ses, err := r.newSession(ctx, h.DAH) +// require.NoError(t, err) +// +// select { +// case <-ctx.Done(): +// b.Fatal(ctx.Err()) +// case <-ses.Done(): +// } +// +// _, err = ses.Reconstruct(ctx) +// assert.NoError(t, err) +// var errByz *rsmt2d.ErrByzantineData +// require.ErrorAs(t, err, &errByz) +// b.StartTimer() +// +// for i := 0; i < b.N; i++ { +// err = byzantine.NewErrByzantine(ctx, bServ, h.DAH, errByz) +// require.NotNil(t, err) +// } +// }) +// } +//} From 02030bf983ebf2311c54b771233b5c28bc133664 Mon Sep 17 00:00:00 2001 From: rene <41963722+renaynay@users.noreply.github.com> Date: Thu, 28 Mar 2024 13:49:10 +0100 Subject: [PATCH 118/132] feat(pruner): Implement `full` and `bridge` node pruning (#3150) This PR introduces `full` and `bridge` node pruning via the `--experimental-pruning` flag. Support is included for nodes that start from scratch with pruning enabled and also for `archival` (nodes retaining all historical blocks) that enable the `--experimental-pruning` flag. _Note that this PR does not support the conversion of a pruned node into an archival one explicitly (it would not support re-syncing deleted blocks)._ With pruning enabled, `full` and `bridge` nodes' block stores can be expected not to exceed ~4TB (as the upper bound). In follow-up PRs (hardening), the following features can be expected: - [x] discovery for archival nodes for archival sync - [ ] inverted_index / light node pruning - [ ] include more metrics for errors TODO: - [x] clean up some TODOs - [x] fix one flakey unit test - [x] change values back to the actual (GC cycle, sampling window, pruning window, etc). - [x] figure out whether to store error in pruner checkpoint - [x] fix issue with pruning genesis block via findPruneableHeaders - [x] metrics for failed prunes - [x] set a sane default for max pruneable / consider removing `MaxPruneablePerGC` as now context timeouts are on a per block basis - [ ] dedup findPruneableHeader test utility - [x] badger dep --------- Co-authored-by: Ryan --- cmd/node.go | 3 + cmd/util.go | 26 ++- header/headertest/testing.go | 34 ++- nodebuilder/config.go | 3 + nodebuilder/module.go | 4 +- nodebuilder/prune/module.go | 47 ---- nodebuilder/pruner/config.go | 13 ++ nodebuilder/pruner/constructors.go | 33 +++ nodebuilder/pruner/flags.go | 20 ++ nodebuilder/pruner/module.go | 71 ++++++ nodebuilder/settings.go | 2 + pruner/archival/pruner.go | 2 +- pruner/checkpoint.go | 73 ++++++ pruner/find.go | 114 ++++++++++ pruner/full/pruner.go | 40 ++++ pruner/full/window.go | 12 + pruner/light/pruner.go | 2 +- pruner/light/window.go | 4 +- pruner/metrics.go | 80 +++++++ pruner/params.go | 41 ++++ pruner/pruner.go | 2 +- pruner/service.go | 181 ++++++++++++++- pruner/service_test.go | 349 +++++++++++++++++++++++++++++ 23 files changed, 1078 insertions(+), 78 deletions(-) delete mode 100644 nodebuilder/prune/module.go create mode 100644 nodebuilder/pruner/config.go create mode 100644 nodebuilder/pruner/constructors.go create mode 100644 nodebuilder/pruner/flags.go create mode 100644 nodebuilder/pruner/module.go create mode 100644 pruner/checkpoint.go create mode 100644 pruner/find.go create mode 100644 pruner/full/pruner.go create mode 100644 pruner/full/window.go create mode 100644 pruner/metrics.go create mode 100644 pruner/params.go create mode 100644 pruner/service_test.go diff --git a/cmd/node.go b/cmd/node.go index 51ac4a6d2e..e8891c78f5 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -9,6 +9,7 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/pruner" "github.com/celestiaorg/celestia-node/nodebuilder/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/state" ) @@ -22,6 +23,7 @@ func NewBridge(options ...func(*cobra.Command, []*pflag.FlagSet)) *cobra.Command rpc.Flags(), gateway.Flags(), state.Flags(), + pruner.Flags(), } cmd := &cobra.Command{ Use: "bridge [subcommand]", @@ -72,6 +74,7 @@ func NewFull(options ...func(*cobra.Command, []*pflag.FlagSet)) *cobra.Command { rpc.Flags(), gateway.Flags(), state.Flags(), + pruner.Flags(), } cmd := &cobra.Command{ Use: "full [subcommand]", diff --git a/cmd/util.go b/cmd/util.go index 08fa02155b..bbc901e4f2 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -16,6 +16,7 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/pruner" rpc_cfg "github.com/celestiaorg/celestia-node/nodebuilder/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/state" "github.com/celestiaorg/celestia-node/share" @@ -105,13 +106,6 @@ func PersistentPreRunEnv(cmd *cobra.Command, nodeType node.Type, _ []string) err return err } - if nodeType != node.Bridge { - err = header.ParseFlags(cmd, &cfg.Header) - if err != nil { - return err - } - } - ctx, err = ParseMiscFlags(ctx, cmd) if err != nil { return err @@ -121,6 +115,24 @@ func PersistentPreRunEnv(cmd *cobra.Command, nodeType node.Type, _ []string) err gateway.ParseFlags(cmd, &cfg.Gateway) state.ParseFlags(cmd, &cfg.State) + switch nodeType { + case node.Light: + err = header.ParseFlags(cmd, &cfg.Header) + if err != nil { + return err + } + case node.Full: + err = header.ParseFlags(cmd, &cfg.Header) + if err != nil { + return err + } + pruner.ParseFlags(cmd, &cfg.Pruner) + case node.Bridge: + pruner.ParseFlags(cmd, &cfg.Pruner) + default: + panic(fmt.Sprintf("invalid node type: %v", nodeType)) + } + // set config ctx = WithNodeConfig(ctx, &cfg) cmd.SetContext(ctx) diff --git a/header/headertest/testing.go b/header/headertest/testing.go index 7b0ae64262..1285e2cdd0 100644 --- a/header/headertest/testing.go +++ b/header/headertest/testing.go @@ -46,6 +46,14 @@ func NewStore(t *testing.T) libhead.Store[*header.ExtendedHeader] { return headertest.NewStore[*header.ExtendedHeader](t, NewTestSuite(t, 3, 0), 10) } +func NewCustomStore( + t *testing.T, + generator headertest.Generator[*header.ExtendedHeader], + numHeaders int, +) libhead.Store[*header.ExtendedHeader] { + return headertest.NewStore[*header.ExtendedHeader](t, generator, numHeaders) +} + // NewTestSuite setups a new test suite with a given number of validators. func NewTestSuite(t *testing.T, numValidators int, blockTime time.Duration) *TestSuite { valSet, vals := RandValidatorSet(numValidators, 10) @@ -82,8 +90,10 @@ func (s *TestSuite) genesis() *header.ExtendedHeader { return eh } -func MakeCommit(blockID types.BlockID, height int64, round int32, - voteSet *types.VoteSet, validators []types.PrivValidator, now time.Time) (*types.Commit, error) { +func MakeCommit( + blockID types.BlockID, height int64, round int32, + voteSet *types.VoteSet, validators []types.PrivValidator, now time.Time, +) (*types.Commit, error) { // all sign for i := 0; i < len(validators); i++ { @@ -157,7 +167,8 @@ func (s *TestSuite) NextHeader() *header.ExtendedHeader { } func (s *TestSuite) GenRawHeader( - height uint64, lastHeader, lastCommit, dataHash libhead.Hash) *header.RawHeader { + height uint64, lastHeader, lastCommit, dataHash libhead.Hash, +) *header.RawHeader { rh := RandRawHeader(s.t) rh.Height = int64(height) rh.LastBlockID = types.BlockID{Hash: bytes.HexBytes(lastHeader)} @@ -167,9 +178,9 @@ func (s *TestSuite) GenRawHeader( rh.NextValidatorsHash = s.valSet.Hash() rh.ProposerAddress = s.nextProposer().Address - rh.Time = time.Now() + rh.Time = time.Now().UTC() if s.blockTime > 0 { - rh.Time = s.Head().Time().Add(s.blockTime) + rh.Time = s.Head().Time().UTC().Add(s.blockTime) } return rh @@ -189,7 +200,7 @@ func (s *TestSuite) Commit(h *header.RawHeader) *types.Commit { ValidatorIndex: int32(i), Height: h.Height, Round: round, - Timestamp: tmtime.Now(), + Timestamp: tmtime.Now().UTC(), Type: tmproto.PrecommitType, BlockID: bid, } @@ -214,6 +225,11 @@ func (s *TestSuite) nextProposer() *types.Validator { // RandExtendedHeader provides an ExtendedHeader fixture. func RandExtendedHeader(t testing.TB) *header.ExtendedHeader { + timestamp := time.Now().UTC() + return RandExtendedHeaderAtTimestamp(t, timestamp) +} + +func RandExtendedHeaderAtTimestamp(t testing.TB, timestamp time.Time) *header.ExtendedHeader { dah := share.EmptyRoot() rh := RandRawHeader(t) @@ -224,7 +240,7 @@ func RandExtendedHeader(t testing.TB) *header.ExtendedHeader { voteSet := types.NewVoteSet(rh.ChainID, rh.Height, 0, tmproto.PrecommitType, valSet) blockID := RandBlockID(t) blockID.Hash = rh.Hash() - commit, err := MakeCommit(blockID, rh.Height, 0, voteSet, vals, time.Now()) + commit, err := MakeCommit(blockID, rh.Height, 0, voteSet, vals, timestamp) require.NoError(t, err) return &header.ExtendedHeader{ @@ -279,7 +295,7 @@ func RandRawHeader(t testing.TB) *header.RawHeader { Version: version.Consensus{Block: 11, App: 1}, ChainID: "test", Height: mrand.Int63(), //nolint:gosec - Time: time.Now(), + Time: time.Now().UTC(), LastBlockID: RandBlockID(t), LastCommitHash: tmrand.Bytes(32), DataHash: tmrand.Bytes(32), @@ -320,7 +336,7 @@ func ExtendedHeaderFromEDS(t testing.TB, height uint64, eds *rsmt2d.ExtendedData blockID := RandBlockID(t) blockID.Hash = gen.Hash() voteSet := types.NewVoteSet(gen.ChainID, gen.Height, 0, tmproto.PrecommitType, valSet) - commit, err := MakeCommit(blockID, gen.Height, 0, voteSet, vals, time.Now()) + commit, err := MakeCommit(blockID, gen.Height, 0, voteSet, vals, time.Now().UTC()) require.NoError(t, err) eh := &header.ExtendedHeader{ diff --git a/nodebuilder/config.go b/nodebuilder/config.go index d323f401d7..bf9b1a5bfe 100644 --- a/nodebuilder/config.go +++ b/nodebuilder/config.go @@ -15,6 +15,7 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/nodebuilder/pruner" "github.com/celestiaorg/celestia-node/nodebuilder/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/share" "github.com/celestiaorg/celestia-node/nodebuilder/state" @@ -35,6 +36,7 @@ type Config struct { Share share.Config Header header.Config DASer das.Config `toml:",omitempty"` + Pruner pruner.Config } // DefaultConfig provides a default Config for a given Node Type 'tp'. @@ -49,6 +51,7 @@ func DefaultConfig(tp node.Type) *Config { Gateway: gateway.DefaultConfig(), Share: share.DefaultConfig(tp), Header: header.DefaultConfig(tp), + Pruner: pruner.DefaultConfig(), } switch tp { diff --git a/nodebuilder/module.go b/nodebuilder/module.go index ad287b1ac8..e3370eb083 100644 --- a/nodebuilder/module.go +++ b/nodebuilder/module.go @@ -16,7 +16,7 @@ import ( modhead "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" - "github.com/celestiaorg/celestia-node/nodebuilder/prune" + "github.com/celestiaorg/celestia-node/nodebuilder/pruner" "github.com/celestiaorg/celestia-node/nodebuilder/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/share" "github.com/celestiaorg/celestia-node/nodebuilder/state" @@ -58,7 +58,7 @@ func ConstructModule(tp node.Type, network p2p.Network, cfg *Config, store Store blob.ConstructModule(), da.ConstructModule(), node.ConstructModule(tp), - prune.ConstructModule(tp), + pruner.ConstructModule(tp, &cfg.Pruner), rpc.ConstructModule(tp, &cfg.RPC), ) diff --git a/nodebuilder/prune/module.go b/nodebuilder/prune/module.go deleted file mode 100644 index 2141b74bf1..0000000000 --- a/nodebuilder/prune/module.go +++ /dev/null @@ -1,47 +0,0 @@ -package prune - -import ( - "context" - - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/nodebuilder/node" - "github.com/celestiaorg/celestia-node/pruner" - "github.com/celestiaorg/celestia-node/pruner/archival" - "github.com/celestiaorg/celestia-node/pruner/light" -) - -func ConstructModule(tp node.Type) fx.Option { - baseComponents := fx.Options( - fx.Provide(fx.Annotate( - pruner.NewService, - fx.OnStart(func(ctx context.Context, p *pruner.Service) error { - return p.Start(ctx) - }), - fx.OnStop(func(ctx context.Context, p *pruner.Service) error { - return p.Stop(ctx) - }), - )), - ) - - switch tp { - case node.Full, node.Bridge: - return fx.Module("prune", - baseComponents, - fx.Provide(func() pruner.Pruner { - return archival.NewPruner() - }), - fx.Supply(archival.Window), - ) - case node.Light: - return fx.Module("prune", - baseComponents, - fx.Provide(func() pruner.Pruner { - return light.NewPruner() - }), - fx.Supply(light.Window), - ) - default: - panic("unknown node type") - } -} diff --git a/nodebuilder/pruner/config.go b/nodebuilder/pruner/config.go new file mode 100644 index 0000000000..1aa8c6ad6f --- /dev/null +++ b/nodebuilder/pruner/config.go @@ -0,0 +1,13 @@ +package pruner + +var MetricsEnabled bool + +type Config struct { + EnableService bool +} + +func DefaultConfig() Config { + return Config{ + EnableService: false, + } +} diff --git a/nodebuilder/pruner/constructors.go b/nodebuilder/pruner/constructors.go new file mode 100644 index 0000000000..1b84d19d0d --- /dev/null +++ b/nodebuilder/pruner/constructors.go @@ -0,0 +1,33 @@ +package pruner + +import ( + "github.com/ipfs/go-datastore" + + hdr "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/pruner" +) + +func newPrunerService( + p pruner.Pruner, + window pruner.AvailabilityWindow, + getter hdr.Store[*header.ExtendedHeader], + ds datastore.Batching, + opts ...pruner.Option, +) (*pruner.Service, error) { + serv, err := pruner.NewService(p, window, getter, ds, p2p.BlockTime, opts...) + if err != nil { + return nil, err + } + + if MetricsEnabled { + err := pruner.WithPrunerMetrics(serv) + if err != nil { + return nil, err + } + } + + return serv, nil +} diff --git a/nodebuilder/pruner/flags.go b/nodebuilder/pruner/flags.go new file mode 100644 index 0000000000..7734c49e46 --- /dev/null +++ b/nodebuilder/pruner/flags.go @@ -0,0 +1,20 @@ +package pruner + +import ( + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" +) + +const pruningFlag = "experimental-pruning" + +func Flags() *flag.FlagSet { + flags := &flag.FlagSet{} + + flags.Bool(pruningFlag, false, "EXPERIMENTAL: Enables pruning of blocks outside the pruning window.") + + return flags +} + +func ParseFlags(cmd *cobra.Command, cfg *Config) { + cfg.EnableService = cmd.Flag(pruningFlag).Changed +} diff --git a/nodebuilder/pruner/module.go b/nodebuilder/pruner/module.go new file mode 100644 index 0000000000..248798c3a4 --- /dev/null +++ b/nodebuilder/pruner/module.go @@ -0,0 +1,71 @@ +package pruner + +import ( + "context" + + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/pruner" + "github.com/celestiaorg/celestia-node/pruner/archival" + "github.com/celestiaorg/celestia-node/pruner/full" + "github.com/celestiaorg/celestia-node/pruner/light" + "github.com/celestiaorg/celestia-node/share/eds" +) + +func ConstructModule(tp node.Type, cfg *Config) fx.Option { + if !cfg.EnableService { + switch tp { + case node.Light: + // light nodes are still subject to sampling within window + // even if pruning is not enabled. + return fx.Supply(light.Window) + case node.Full, node.Bridge: + return fx.Supply(archival.Window) + default: + panic("unknown node type") + } + } + + baseComponents := fx.Options( + fx.Provide(fx.Annotate( + newPrunerService, + fx.OnStart(func(ctx context.Context, p *pruner.Service) error { + return p.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, p *pruner.Service) error { + return p.Stop(ctx) + }), + )), + // This is necessary to invoke the pruner service as independent thanks to a + // quirk in FX. + fx.Invoke(func(_ *pruner.Service) {}), + ) + + switch tp { + case node.Full: + return fx.Module("prune", + baseComponents, + fx.Provide(func(store *eds.Store) pruner.Pruner { + return full.NewPruner(store) + }), + fx.Supply(full.Window), + ) + case node.Bridge: + return fx.Module("prune", + baseComponents, + fx.Provide(func(store *eds.Store) pruner.Pruner { + return full.NewPruner(store) + }), + fx.Supply(full.Window), + ) + // TODO: Eventually, light nodes will be capable of pruning samples + // in which case, this can be enabled. + case node.Light: + return fx.Module("prune", + fx.Supply(light.Window), + ) + default: + panic("unknown node type") + } +} diff --git a/nodebuilder/settings.go b/nodebuilder/settings.go index 7830f0e8f6..72a0b7c960 100644 --- a/nodebuilder/settings.go +++ b/nodebuilder/settings.go @@ -29,6 +29,7 @@ import ( modhead "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + modprune "github.com/celestiaorg/celestia-node/nodebuilder/pruner" "github.com/celestiaorg/celestia-node/nodebuilder/share" "github.com/celestiaorg/celestia-node/state" ) @@ -80,6 +81,7 @@ func WithMetrics(metricOpts []otlpmetrichttp.Option, nodeType node.Type) fx.Opti // control over which module to enable metrics for modhead.MetricsEnabled = true modcore.MetricsEnabled = true + modprune.MetricsEnabled = true baseComponents := fx.Options( fx.Supply(metricOpts), diff --git a/pruner/archival/pruner.go b/pruner/archival/pruner.go index 7b1cb935f3..a1a55db0da 100644 --- a/pruner/archival/pruner.go +++ b/pruner/archival/pruner.go @@ -15,6 +15,6 @@ func NewPruner() *Pruner { return &Pruner{} } -func (p *Pruner) Prune(context.Context, ...*header.ExtendedHeader) error { +func (p *Pruner) Prune(context.Context, *header.ExtendedHeader) error { return nil } diff --git a/pruner/checkpoint.go b/pruner/checkpoint.go new file mode 100644 index 0000000000..10db918cb5 --- /dev/null +++ b/pruner/checkpoint.go @@ -0,0 +1,73 @@ +package pruner + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/ipfs/go-datastore" + + "github.com/celestiaorg/celestia-node/header" +) + +var ( + storePrefix = datastore.NewKey("pruner") + checkpointKey = datastore.NewKey("checkpoint") +) + +// checkpoint contains information related to the state of the +// pruner service that is periodically persisted to disk. +type checkpoint struct { + LastPrunedHeight uint64 `json:"last_pruned_height"` + FailedHeaders map[uint64]struct{} `json:"failed"` +} + +// initializeCheckpoint initializes the checkpoint, storing the earliest header in the chain. +func (s *Service) initializeCheckpoint(ctx context.Context) error { + return s.updateCheckpoint(ctx, uint64(1), nil) +} + +// loadCheckpoint loads the last checkpoint from disk, initializing it if it does not already exist. +func (s *Service) loadCheckpoint(ctx context.Context) error { + bin, err := s.ds.Get(ctx, checkpointKey) + if err != nil { + if err == datastore.ErrNotFound { + return s.initializeCheckpoint(ctx) + } + return fmt.Errorf("failed to load checkpoint: %w", err) + } + + var cp *checkpoint + err = json.Unmarshal(bin, &cp) + if err != nil { + return fmt.Errorf("failed to unmarshal checkpoint: %w", err) + } + + s.checkpoint = cp + return nil +} + +// updateCheckpoint updates the checkpoint with the last pruned header height +// and persists it to disk. +func (s *Service) updateCheckpoint( + ctx context.Context, + lastPrunedHeight uint64, + failedHeights map[uint64]struct{}, +) error { + for height := range failedHeights { + s.checkpoint.FailedHeaders[height] = struct{}{} + } + + s.checkpoint.LastPrunedHeight = lastPrunedHeight + + bin, err := json.Marshal(s.checkpoint) + if err != nil { + return err + } + + return s.ds.Put(ctx, checkpointKey, bin) +} + +func (s *Service) lastPruned(ctx context.Context) (*header.ExtendedHeader, error) { + return s.getter.GetByHeight(ctx, s.checkpoint.LastPrunedHeight) +} diff --git a/pruner/find.go b/pruner/find.go new file mode 100644 index 0000000000..5091c168a0 --- /dev/null +++ b/pruner/find.go @@ -0,0 +1,114 @@ +package pruner + +import ( + "context" + "time" + + "github.com/celestiaorg/celestia-node/header" +) + +// maxHeadersPerLoop is the maximum number of headers to fetch +// for a prune loop (prevents fetching too many headers at a +// time for nodes that have a large number of pruneable headers). +var maxHeadersPerLoop = uint64(512) + +// findPruneableHeaders returns all headers that are eligible for pruning +// (outside the sampling window). +func (s *Service) findPruneableHeaders( + ctx context.Context, + lastPruned *header.ExtendedHeader, +) ([]*header.ExtendedHeader, error) { + pruneCutoff := time.Now().UTC().Add(time.Duration(-s.window)) + + if !lastPruned.Time().UTC().Before(pruneCutoff) { + // this can happen when the network is young and all blocks + // are still within the AvailabilityWindow + return nil, nil + } + + estimatedCutoffHeight, err := s.calculateEstimatedCutoff(ctx, lastPruned, pruneCutoff) + if err != nil { + return nil, err + } + + if lastPruned.Height() == estimatedCutoffHeight { + // nothing left to prune + return nil, nil + } + + log.Debugw("finder: fetching header range", "last pruned", lastPruned.Height(), + "target height", estimatedCutoffHeight) + + headers, err := s.getter.GetRangeByHeight(ctx, lastPruned, estimatedCutoffHeight) + if err != nil { + log.Errorw("failed to get range from header store", "from", lastPruned.Height(), + "to", estimatedCutoffHeight, "error", err) + return nil, err + } + // ensures genesis block gets pruned + if lastPruned.Height() == 1 { + headers = append([]*header.ExtendedHeader{lastPruned}, headers...) + } + + // if our estimated range didn't cover enough headers, we need to fetch more + // TODO: This is really inefficient in the case that lastPruned is the default value, or if the + // node has been offline for a long time. Instead of increasing the boundary by one in the for + // loop we could increase by a range every iteration + headerCount := len(headers) + for { + if headerCount > int(maxHeadersPerLoop) { + headers = headers[:maxHeadersPerLoop] + break + } + lastHeader := headers[len(headers)-1] + if lastHeader.Time().After(pruneCutoff) { + break + } + + nextHeader, err := s.getter.GetByHeight(ctx, lastHeader.Height()+1) + if err != nil { + log.Errorw("failed to get header by height", "height", lastHeader.Height()+1, "error", err) + return nil, err + } + headers = append(headers, nextHeader) + headerCount++ + } + + for i, h := range headers { + if h.Time().After(pruneCutoff) { + if i == 0 { + // we can't prune anything + return nil, nil + } + + // we can ignore the rest of the headers since they are all newer than the cutoff + return headers[:i], nil + } + } + return headers, nil +} + +func (s *Service) calculateEstimatedCutoff( + ctx context.Context, + lastPruned *header.ExtendedHeader, + pruneCutoff time.Time, +) (uint64, error) { + estimatedRange := uint64(pruneCutoff.UTC().Sub(lastPruned.Time().UTC()) / s.blockTime) + estimatedCutoffHeight := lastPruned.Height() + estimatedRange + + head, err := s.getter.Head(ctx) + if err != nil { + log.Errorw("failed to get Head from header store", "error", err) + return 0, err + } + + if head.Height() < estimatedCutoffHeight { + estimatedCutoffHeight = head.Height() + } + + if estimatedCutoffHeight-lastPruned.Height() > maxHeadersPerLoop { + estimatedCutoffHeight = lastPruned.Height() + maxHeadersPerLoop + } + + return estimatedCutoffHeight, nil +} diff --git a/pruner/full/pruner.go b/pruner/full/pruner.go new file mode 100644 index 0000000000..49967b5050 --- /dev/null +++ b/pruner/full/pruner.go @@ -0,0 +1,40 @@ +package full + +import ( + "context" + "errors" + + "github.com/filecoin-project/dagstore" + logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" +) + +var log = logging.Logger("pruner/full") + +type Pruner struct { + store *eds.Store +} + +func NewPruner(store *eds.Store) *Pruner { + return &Pruner{ + store: store, + } +} + +func (p *Pruner) Prune(ctx context.Context, eh *header.ExtendedHeader) error { + // short circuit on empty roots + if eh.DAH.Equals(share.EmptyRoot()) { + return nil + } + + log.Debugf("pruning header %s", eh.DAH.Hash()) + + err := p.store.Remove(ctx, eh.DAH.Hash()) + if err != nil && !errors.Is(err, dagstore.ErrShardUnknown) { + return err + } + return nil +} diff --git a/pruner/full/window.go b/pruner/full/window.go new file mode 100644 index 0000000000..4ad69234e2 --- /dev/null +++ b/pruner/full/window.go @@ -0,0 +1,12 @@ +package full + +import ( + "time" + + "github.com/celestiaorg/celestia-node/pruner" + "github.com/celestiaorg/celestia-node/pruner/light" +) + +// Window is the availability window for light nodes in the Celestia +// network (30 days + 1 hour). +const Window = pruner.AvailabilityWindow(time.Duration(light.Window) + time.Hour) diff --git a/pruner/light/pruner.go b/pruner/light/pruner.go index 513bfa2b66..61401bae74 100644 --- a/pruner/light/pruner.go +++ b/pruner/light/pruner.go @@ -12,6 +12,6 @@ func NewPruner() *Pruner { return &Pruner{} } -func (p *Pruner) Prune(context.Context, ...*header.ExtendedHeader) error { +func (p *Pruner) Prune(context.Context, *header.ExtendedHeader) error { return nil } diff --git a/pruner/light/window.go b/pruner/light/window.go index dc1a9e4444..2241ecb063 100644 --- a/pruner/light/window.go +++ b/pruner/light/window.go @@ -1,11 +1,9 @@ package light import ( - "time" - "github.com/celestiaorg/celestia-node/pruner" ) // Window is the availability window for light nodes in the Celestia // network (30 days). -const Window = pruner.AvailabilityWindow(time.Second * 86400 * 30) +const Window = pruner.AvailabilityWindow(30 * 24 * 60 * 60) diff --git a/pruner/metrics.go b/pruner/metrics.go new file mode 100644 index 0000000000..c43217dc3d --- /dev/null +++ b/pruner/metrics.go @@ -0,0 +1,80 @@ +package pruner + +import ( + "context" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +var ( + meter = otel.Meter("storage_pruner") +) + +type metrics struct { + prunedCounter metric.Int64Counter + + lastPruned metric.Int64ObservableGauge + failedPrunes metric.Int64ObservableGauge + + clientReg metric.Registration +} + +func (s *Service) WithMetrics() error { + prunedCounter, err := meter.Int64Counter("prnr_pruned_counter", + metric.WithDescription("pruner pruned header counter")) + if err != nil { + return err + } + + failedPrunes, err := meter.Int64ObservableGauge("prnr_failed_counter", + metric.WithDescription("pruner failed prunes counter")) + if err != nil { + return err + } + + lastPruned, err := meter.Int64ObservableGauge("prnr_last_pruned", + metric.WithDescription("pruner highest pruned height")) + if err != nil { + return err + } + + callback := func(_ context.Context, observer metric.Observer) error { + observer.ObserveInt64(lastPruned, int64(s.checkpoint.LastPrunedHeight)) + observer.ObserveInt64(failedPrunes, int64(len(s.checkpoint.FailedHeaders))) + return nil + } + + clientReg, err := meter.RegisterCallback(callback, lastPruned, failedPrunes) + if err != nil { + return err + } + + s.metrics = &metrics{ + prunedCounter: prunedCounter, + lastPruned: lastPruned, + failedPrunes: failedPrunes, + clientReg: clientReg, + } + return nil +} + +func (m *metrics) close() error { + if m == nil { + return nil + } + + return m.clientReg.Unregister() +} + +func (m *metrics) observePrune(ctx context.Context, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + m.prunedCounter.Add(ctx, 1, metric.WithAttributes( + attribute.Bool("failed", failed))) +} diff --git a/pruner/params.go b/pruner/params.go new file mode 100644 index 0000000000..253ea5e1a9 --- /dev/null +++ b/pruner/params.go @@ -0,0 +1,41 @@ +package pruner + +import ( + "fmt" + "time" +) + +type Option func(*Params) + +type Params struct { + // pruneCycle is the frequency at which the pruning Service + // runs the ticker. If set to 0, the Service will not run. + pruneCycle time.Duration +} + +func (p *Params) Validate() error { + if p.pruneCycle == time.Duration(0) { + return fmt.Errorf("invalid GC cycle given, value should be positive and non-zero") + } + return nil +} + +func DefaultParams() Params { + return Params{ + pruneCycle: time.Minute * 5, + } +} + +// WithPruneCycle configures how often the pruning Service +// triggers a pruning cycle. +func WithPruneCycle(cycle time.Duration) Option { + return func(p *Params) { + p.pruneCycle = cycle + } +} + +// WithPrunerMetrics is a utility function to turn on pruner metrics and that is +// expected to be "invoked" by the fx lifecycle. +func WithPrunerMetrics(s *Service) error { + return s.WithMetrics() +} diff --git a/pruner/pruner.go b/pruner/pruner.go index fae60e483c..a591a65392 100644 --- a/pruner/pruner.go +++ b/pruner/pruner.go @@ -9,5 +9,5 @@ import ( // Pruner contains methods necessary to prune data // from the node's datastore. type Pruner interface { - Prune(context.Context, ...*header.ExtendedHeader) error + Prune(context.Context, *header.ExtendedHeader) error } diff --git a/pruner/service.go b/pruner/service.go index f67265977a..65935e75d8 100644 --- a/pruner/service.go +++ b/pruner/service.go @@ -2,24 +2,191 @@ package pruner import ( "context" + "fmt" + "time" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + logging "github.com/ipfs/go-log/v2" + + hdr "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/header" ) -// Service handles the pruning routine for the node using the -// prune Pruner. +var log = logging.Logger("pruner/service") + +// Service handles running the pruning cycle for the node. type Service struct { pruner Pruner + window AvailabilityWindow + + getter hdr.Getter[*header.ExtendedHeader] + + ds datastore.Datastore + checkpoint *checkpoint + + blockTime time.Duration + + ctx context.Context + cancel context.CancelFunc + doneCh chan struct{} + + params Params + metrics *metrics } -func NewService(p Pruner) *Service { - return &Service{ - pruner: p, +func NewService( + p Pruner, + window AvailabilityWindow, + getter hdr.Getter[*header.ExtendedHeader], + ds datastore.Datastore, + blockTime time.Duration, + opts ...Option, +) (*Service, error) { + params := DefaultParams() + for _, opt := range opts { + opt(¶ms) + } + + if err := params.Validate(); err != nil { + return nil, err } + + return &Service{ + pruner: p, + window: window, + getter: getter, + checkpoint: &checkpoint{FailedHeaders: map[uint64]struct{}{}}, + ds: namespace.Wrap(ds, storePrefix), + blockTime: blockTime, + doneCh: make(chan struct{}), + params: params, + }, nil } +// Start loads the pruner's last pruned height (1 if pruner is freshly +// initialized) and runs the prune loop, pruning any blocks older than +// the given availability window. func (s *Service) Start(context.Context) error { + s.ctx, s.cancel = context.WithCancel(context.Background()) + + err := s.loadCheckpoint(s.ctx) + if err != nil { + return err + } + log.Debugw("loaded checkpoint", "lastPruned", s.checkpoint.LastPrunedHeight) + + go s.run() return nil } -func (s *Service) Stop(context.Context) error { - return nil +func (s *Service) Stop(ctx context.Context) error { + s.cancel() + + s.metrics.close() + + select { + case <-s.doneCh: + return nil + case <-ctx.Done(): + return fmt.Errorf("pruner unable to exit within context deadline") + } +} + +// run prunes blocks older than the availability wiindow periodically until the +// pruner service is stopped. +func (s *Service) run() { + defer close(s.doneCh) + + ticker := time.NewTicker(s.params.pruneCycle) + defer ticker.Stop() + + lastPrunedHeader, err := s.lastPruned(s.ctx) + if err != nil { + log.Errorw("failed to get last pruned header", "height", s.checkpoint.LastPrunedHeight, + "err", err) + log.Warn("exiting pruner service!") + + s.cancel() + } + + for { + select { + case <-s.ctx.Done(): + return + case <-ticker.C: + lastPrunedHeader = s.prune(s.ctx, lastPrunedHeader) + } + } +} + +func (s *Service) prune( + ctx context.Context, + lastPrunedHeader *header.ExtendedHeader, +) *header.ExtendedHeader { + // prioritize retrying previously-failed headers + s.retryFailed(s.ctx) + + for { + select { + case <-s.ctx.Done(): + return lastPrunedHeader + default: + } + + headers, err := s.findPruneableHeaders(ctx, lastPrunedHeader) + if err != nil || len(headers) == 0 { + return lastPrunedHeader + } + + failed := make(map[uint64]struct{}) + + log.Debugw("pruning headers", "from", headers[0].Height(), "to", + headers[len(headers)-1].Height()) + + for _, eh := range headers { + pruneCtx, cancel := context.WithTimeout(ctx, time.Second*5) + + err = s.pruner.Prune(pruneCtx, eh) + if err != nil { + log.Errorw("failed to prune block", "height", eh.Height(), "err", err) + failed[eh.Height()] = struct{}{} + } else { + lastPrunedHeader = eh + } + + s.metrics.observePrune(pruneCtx, err != nil) + cancel() + } + + err = s.updateCheckpoint(s.ctx, lastPrunedHeader.Height(), failed) + if err != nil { + log.Errorw("failed to update checkpoint", "err", err) + return lastPrunedHeader + } + + if uint64(len(headers)) < maxHeadersPerLoop { + // we've pruned all the blocks we can + return lastPrunedHeader + } + } +} + +func (s *Service) retryFailed(ctx context.Context) { + log.Debugw("retrying failed headers", "amount", len(s.checkpoint.FailedHeaders)) + + for failed := range s.checkpoint.FailedHeaders { + h, err := s.getter.GetByHeight(ctx, failed) + if err != nil { + log.Errorw("failed to load header from failed map", "height", failed, "err", err) + continue + } + err = s.pruner.Prune(ctx, h) + if err != nil { + log.Errorw("failed to prune block from failed map", "height", failed, "err", err) + continue + } + delete(s.checkpoint.FailedHeaders, failed) + } } diff --git a/pruner/service_test.go b/pruner/service_test.go new file mode 100644 index 0000000000..01932abaf2 --- /dev/null +++ b/pruner/service_test.go @@ -0,0 +1,349 @@ +package pruner + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" +) + +/* + | toPrune | availability window | +*/ + +// TestService tests the pruner service to check whether the expected +// amount of blocks are pruned within a given AvailabilityWindow. +// This test runs a pruning cycle once which should prune at least +// 2 blocks (as the AvailabilityWindow is ~2 blocks). Since the +// prune-able header determination is time-based, it cannot be +// exact. +func TestService(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + blockTime := time.Millisecond + + // all headers generated in suite are timestamped to time.Now(), so + // they will all be considered "pruneable" within the availability window ( + suite := headertest.NewTestSuite(t, 1, blockTime) + store := headertest.NewCustomStore(t, suite, 20) + + mp := &mockPruner{} + + serv, err := NewService( + mp, + AvailabilityWindow(time.Millisecond*2), + store, + sync.MutexWrap(datastore.NewMapDatastore()), + blockTime, + ) + require.NoError(t, err) + + serv.ctx, serv.cancel = ctx, cancel + + err = serv.loadCheckpoint(ctx) + require.NoError(t, err) + + time.Sleep(time.Millisecond * 2) + + lastPruned, err := serv.lastPruned(ctx) + require.NoError(t, err) + lastPruned = serv.prune(ctx, lastPruned) + + assert.Greater(t, lastPruned.Height(), uint64(2)) + assert.Greater(t, serv.checkpoint.LastPrunedHeight, uint64(2)) +} + +// TestService_FailedAreRecorded checks whether the pruner service +// can accurately detect blocks to be pruned and store them +// to checkpoint. +func TestService_FailedAreRecorded(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + blockTime := time.Millisecond + + // all headers generated in suite are timestamped to time.Now(), so + // they will all be considered "pruneable" within the availability window + suite := headertest.NewTestSuite(t, 1, blockTime) + store := headertest.NewCustomStore(t, suite, 100) + + mp := &mockPruner{ + failHeight: map[uint64]int{4: 0, 5: 0, 13: 0}, + } + + serv, err := NewService( + mp, + AvailabilityWindow(time.Millisecond*20), + store, + sync.MutexWrap(datastore.NewMapDatastore()), + blockTime, + ) + require.NoError(t, err) + + serv.ctx = ctx + + err = serv.loadCheckpoint(ctx) + require.NoError(t, err) + + // ensures at least 13 blocks are prune-able + time.Sleep(time.Millisecond * 50) + + // trigger a prune job + lastPruned, err := serv.lastPruned(ctx) + require.NoError(t, err) + _ = serv.prune(ctx, lastPruned) + + assert.Len(t, serv.checkpoint.FailedHeaders, 3) + for expectedFail := range mp.failHeight { + _, exists := serv.checkpoint.FailedHeaders[expectedFail] + assert.True(t, exists) + } + + // trigger another prune job, which will prioritize retrying + // failed blocks + lastPruned, err = serv.lastPruned(ctx) + require.NoError(t, err) + _ = serv.prune(ctx, lastPruned) + + assert.Len(t, serv.checkpoint.FailedHeaders, 0) +} + +func TestServiceCheckpointing(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + store := headertest.NewStore(t) + + mp := &mockPruner{} + + serv, err := NewService( + mp, + AvailabilityWindow(time.Second), + store, + sync.MutexWrap(datastore.NewMapDatastore()), + time.Millisecond, + ) + require.NoError(t, err) + + err = serv.loadCheckpoint(ctx) + require.NoError(t, err) + + // ensure checkpoint was initialized correctly + assert.Equal(t, uint64(1), serv.checkpoint.LastPrunedHeight) + assert.Empty(t, serv.checkpoint.FailedHeaders) + + // update checkpoint + err = serv.updateCheckpoint(ctx, uint64(3), map[uint64]struct{}{2: {}}) + require.NoError(t, err) + + // ensure checkpoint was updated correctly in datastore + err = serv.loadCheckpoint(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(3), serv.checkpoint.LastPrunedHeight) + assert.Len(t, serv.checkpoint.FailedHeaders, 1) +} + +// TestPrune_LargeNumberOfBlocks tests that the pruner service with a large +// number of blocks to prune (an archival node turning into a pruned node) is +// able to prune the blocks in one prune cycle. +func TestPrune_LargeNumberOfBlocks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + maxHeadersPerLoop = 10 + t.Cleanup(func() { + maxHeadersPerLoop = 1024 + }) + + blockTime := time.Nanosecond + availabilityWindow := AvailabilityWindow(blockTime * 10) + + // all headers generated in suite are timestamped to time.Now(), so + // they will all be considered "pruneable" within the availability window + suite := headertest.NewTestSuite(t, 1, blockTime) + store := headertest.NewCustomStore(t, suite, int(maxHeadersPerLoop*6)) // add small buffer + + mp := &mockPruner{failHeight: make(map[uint64]int, 0)} + + serv, err := NewService( + mp, + availabilityWindow, + store, + sync.MutexWrap(datastore.NewMapDatastore()), + blockTime, + ) + require.NoError(t, err) + serv.ctx = ctx + + err = serv.loadCheckpoint(ctx) + require.NoError(t, err) + + // ensures availability window has passed + time.Sleep(time.Duration(availabilityWindow) + time.Millisecond*100) + + // trigger a prune job + lastPruned, err := serv.lastPruned(ctx) + require.NoError(t, err) + _ = serv.prune(ctx, lastPruned) + + // ensure all headers have been pruned + assert.Equal(t, maxHeadersPerLoop*5, serv.checkpoint.LastPrunedHeight) + assert.Len(t, serv.checkpoint.FailedHeaders, 0) +} + +func TestFindPruneableHeaders(t *testing.T) { + testCases := []struct { + name string + availWindow AvailabilityWindow + blockTime time.Duration + startTime time.Time + headerAmount int + expectedLength int + }{ + { + name: "Estimated range matches expected", + // Availability window is one week + availWindow: AvailabilityWindow(time.Hour * 24 * 7), + blockTime: time.Hour, + // Make two weeks of headers + headerAmount: 2 * (24 * 7), + startTime: time.Now().Add(-2 * time.Hour * 24 * 7), + // One week of headers are pruneable + expectedLength: (24 * 7) + 1, + }, + { + name: "Estimated range not sufficient but finds the correct tail", + // Availability window is one week + availWindow: AvailabilityWindow(time.Hour * 24 * 7), + blockTime: time.Hour, + // Make three weeks of headers + headerAmount: 3 * (24 * 7), + startTime: time.Now().Add(-3 * time.Hour * 24 * 7), + // Two weeks of headers are pruneable + expectedLength: (2 * 24 * 7) + 1, + }, + { + name: "No pruneable headers", + // Availability window is two weeks + availWindow: AvailabilityWindow(2 * time.Hour * 24 * 7), + blockTime: time.Hour, + // Make one week of headers + headerAmount: 24 * 7, + startTime: time.Now().Add(-time.Hour * 24 * 7), + // No headers are pruneable + expectedLength: 0, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + headerGenerator := NewSpacedHeaderGenerator(t, tc.startTime, tc.blockTime) + store := headertest.NewCustomStore(t, headerGenerator, tc.headerAmount) + + mp := &mockPruner{} + + serv, err := NewService( + mp, + tc.availWindow, + store, + sync.MutexWrap(datastore.NewMapDatastore()), + tc.blockTime, + ) + require.NoError(t, err) + + err = serv.Start(ctx) + require.NoError(t, err) + + lastPruned, err := serv.lastPruned(ctx) + require.NoError(t, err) + + pruneable, err := serv.findPruneableHeaders(ctx, lastPruned) + require.NoError(t, err) + require.Len(t, pruneable, tc.expectedLength) + + pruneableCutoff := time.Now().Add(-time.Duration(tc.availWindow)) + // All returned headers are older than the availability window + for _, h := range pruneable { + require.WithinRange(t, h.Time(), tc.startTime, pruneableCutoff) + } + + // The next header after the last pruneable header is too new to prune + if len(pruneable) != 0 { + lastPruneable := pruneable[len(pruneable)-1] + if lastPruneable.Height() != store.Height() { + firstUnpruneable, err := store.GetByHeight(ctx, lastPruneable.Height()+1) + require.NoError(t, err) + require.WithinRange(t, firstUnpruneable.Time(), pruneableCutoff, time.Now()) + } + } + }) + } +} + +type mockPruner struct { + deletedHeaderHashes []pruned + + // tells the mockPruner on which heights to fail + failHeight map[uint64]int +} + +type pruned struct { + hash string + height uint64 +} + +func (mp *mockPruner) Prune(_ context.Context, h *header.ExtendedHeader) error { + for fail := range mp.failHeight { + if h.Height() == fail { + // if retried, return successful + if mp.failHeight[fail] > 0 { + return nil + } + mp.failHeight[fail]++ + return fmt.Errorf("failed to prune") + } + } + mp.deletedHeaderHashes = append(mp.deletedHeaderHashes, pruned{hash: h.Hash().String(), height: h.Height()}) + return nil +} + +// TODO @renaynay @distractedm1nd: Deduplicate via headertest utility. +// https://github.com/celestiaorg/celestia-node/issues/3278. +type SpacedHeaderGenerator struct { + t *testing.T + TimeBetweenHeaders time.Duration + currentTime time.Time + currentHeight int64 +} + +func NewSpacedHeaderGenerator( + t *testing.T, startTime time.Time, timeBetweenHeaders time.Duration, +) *SpacedHeaderGenerator { + return &SpacedHeaderGenerator{ + t: t, + TimeBetweenHeaders: timeBetweenHeaders, + currentTime: startTime, + currentHeight: 1, + } +} + +func (shg *SpacedHeaderGenerator) NextHeader() *header.ExtendedHeader { + h := headertest.RandExtendedHeaderAtTimestamp(shg.t, shg.currentTime) + h.RawHeader.Height = shg.currentHeight + h.RawHeader.Time = shg.currentTime + shg.currentHeight++ + shg.currentTime = shg.currentTime.Add(shg.TimeBetweenHeaders) + return h +} From 2ba8ec6466f321365593efaef1a872733b1df8f8 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 28 Mar 2024 17:19:19 +0400 Subject: [PATCH 119/132] use rlock in retriever --- share/shwap/getter/retriever.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/share/shwap/getter/retriever.go b/share/shwap/getter/retriever.go index 50354d0605..2eb7d45191 100644 --- a/share/shwap/getter/retriever.go +++ b/share/shwap/getter/retriever.go @@ -264,8 +264,8 @@ func (rs *retrievalSession) requestCell(ctx context.Context, x, y int) { return } - rs.squareLk.Lock() - defer rs.squareLk.Unlock() + rs.squareLk.RLock() + defer rs.squareLk.RUnlock() if err := rs.square.SetCell(uint(x), uint(y), share); err != nil { log.Warnw("failed to set cell", From 04ad0415070103f60858f9eb39054eae5fe84faf Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 28 Mar 2024 17:29:54 +0400 Subject: [PATCH 120/132] use reconstruction getter in full and light nodes --- nodebuilder/share/config.go | 1 + nodebuilder/share/constructors.go | 12 ++++++++++-- nodebuilder/share/module.go | 2 ++ share/shwap/getter/reconstruction.go | 6 +++++- share/shwap/getter/retriever.go | 6 ++++-- share/shwap/getter/retriever_test.go | 12 ++++++------ 6 files changed, 28 insertions(+), 11 deletions(-) diff --git a/nodebuilder/share/config.go b/nodebuilder/share/config.go index d08048ebc9..aff8c27bb4 100644 --- a/nodebuilder/share/config.go +++ b/nodebuilder/share/config.go @@ -18,6 +18,7 @@ type Config struct { EDSStoreParams *store.Parameters UseShareExchange bool + UseShareSwap bool // ShrExEDSParams sets shrexeds client and server configuration parameters ShrExEDSParams *shrexeds.Parameters // ShrExNDParams sets shrexnd client and server configuration parameters diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index 9cf42229dd..1a14c0762e 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -41,13 +41,17 @@ func newModule(getter share.Getter, avail share.Availability) Module { func lightGetter( shrexGetter *getters.ShrexGetter, shwapGetter *shwap_getter.Getter, + reconstructGetter *shwap_getter.ReconstructionGetter, cfg Config, ) share.Getter { var cascade []share.Getter if cfg.UseShareExchange { cascade = append(cascade, shrexGetter) } - cascade = append(cascade, shwapGetter) + if cfg.UseShareSwap { + cascade = append(cascade, shwapGetter) + } + cascade = append(cascade, reconstructGetter) return getters.NewCascadeGetter(cascade) } @@ -72,6 +76,7 @@ func fullGetter( storeGetter *getters.StoreGetter, shrexGetter *getters.ShrexGetter, shwapGetter *shwap_getter.Getter, + reconstructGetter *shwap_getter.ReconstructionGetter, cfg Config, ) share.Getter { var cascade []share.Getter @@ -79,6 +84,9 @@ func fullGetter( if cfg.UseShareExchange { cascade = append(cascade, shrexGetter) } - cascade = append(cascade, shwapGetter) + if cfg.UseShareSwap { + cascade = append(cascade, shwapGetter) + } + cascade = append(cascade, reconstructGetter) return getters.NewCascadeGetter(cascade) } diff --git a/nodebuilder/share/module.go b/nodebuilder/share/module.go index 91e94bad1f..2fe022b515 100644 --- a/nodebuilder/share/module.go +++ b/nodebuilder/share/module.go @@ -179,6 +179,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option bridgeAndFullComponents, shrexGetterComponents, fx.Provide(shwap_getter.NewGetter), + fx.Provide(shwap_getter.NewReconstructionGetter), fx.Provide(fullGetter), ) case node.Light: @@ -193,6 +194,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option peerManagerWithShrexPools, shrexGetterComponents, fx.Provide(shwap_getter.NewGetter), + fx.Provide(shwap_getter.NewReconstructionGetter), fx.Provide(lightGetter), // shrexsub broadcaster stub for daser fx.Provide(func() shrexsub.BroadcastFn { diff --git a/share/shwap/getter/reconstruction.go b/share/shwap/getter/reconstruction.go index e66749cfb5..b0c6fc866f 100644 --- a/share/shwap/getter/reconstruction.go +++ b/share/shwap/getter/reconstruction.go @@ -8,7 +8,11 @@ import ( ) type ReconstructionGetter struct { - retriever edsRetriver + retriever *edsRetriver +} + +func NewReconstructionGetter(getter *Getter) *ReconstructionGetter { + return &ReconstructionGetter{retriever: newRetriever(getter)} } func (r ReconstructionGetter) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { diff --git a/share/shwap/getter/retriever.go b/share/shwap/getter/retriever.go index 2eb7d45191..7788ec0981 100644 --- a/share/shwap/getter/retriever.go +++ b/share/shwap/getter/retriever.go @@ -4,6 +4,7 @@ import ( "context" "errors" "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/shwap" "sync" "sync/atomic" "time" @@ -55,8 +56,9 @@ type edsRetriver struct { getter share.Getter } -// NewRetriever creates a new instance of the edsRetriver over IPLD BlockService and rmst2d.Codec -func NewRetriever(bServ blockservice.BlockService, getter share.Getter) *edsRetriver { +// newRetriever creates a new instance of the edsRetriver over IPLD BlockService and rmst2d.Codec +func newRetriever(getter *Getter) *edsRetriver { + bServ := blockservice.New(getter.bstore, getter.fetch, blockservice.WithAllowlist(shwap.DefaultAllowlist)) return &edsRetriver{ bServ: bServ, getter: getter, diff --git a/share/shwap/getter/retriever_test.go b/share/shwap/getter/retriever_test.go index 0e1030273d..8d927a1541 100644 --- a/share/shwap/getter/retriever_test.go +++ b/share/shwap/getter/retriever_test.go @@ -23,7 +23,7 @@ func TestRetriever_Retrieve(t *testing.T) { exch := DummySessionExchange{bstore} getter := NewGetter(exch, bstore) bServ := ipld.NewMemBlockservice() - r := NewRetriever(bServ, getter) + r := newRetriever(bServ, getter) height := atomic.NewUint64(1) type test struct { @@ -92,7 +92,7 @@ func TestRetriever_Retrieve(t *testing.T) { // // ensure we rcv an error // dah, err := da.NewDataAvailabilityHeader(attackerEDS) // require.NoError(t, err) -// r := NewRetriever(bserv) +// r := newRetriever(bserv) // _, err = r.Retrieve(ctx, &dah) // var errByz *byzantine.ErrByzantine // require.ErrorAs(t, err, &errByz) @@ -107,7 +107,7 @@ func TestRetriever_Retrieve(t *testing.T) { // defer cancel() // // bServ := ipld.NewMemBlockservice() -// r := NewRetriever(bServ) +// r := newRetriever(bServ) // // // generate EDS // shares := sharetest.RandShares(t, squareSize*squareSize) @@ -158,7 +158,7 @@ func TestRetriever_Retrieve(t *testing.T) { // err := ipld.ImportEDS(ctx, eds, bServ) // require.NoError(t, err) // h := headertest.ExtendedHeaderFromEDS(t, 1, eds) -// _, err = NewRetriever(bServ).Retrieve(ctx, h.DAH) +// _, err = newRetriever(bServ).Retrieve(ctx, h.DAH) // // return h, err //} @@ -175,7 +175,7 @@ func TestRetriever_Retrieve(t *testing.T) { // ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) // defer b.Cleanup(cancel) // bServ := ipld.NewMemBlockservice() -// r := NewRetriever(bServ) +// r := newRetriever(bServ) // t := &testing.T{} // odsSize := []int{2, 4, 16, 32, 64, 128} // for _, size := range odsSize { @@ -214,7 +214,7 @@ func TestRetriever_Retrieve(t *testing.T) { // ctx, cancel := context.WithTimeout(context.Background(), time.Minute) // defer cancel() // bServ := ipld.NewMemBlockservice() -// r := NewRetriever(bServ) +// r := newRetriever(bServ) // t := &testing.T{} // for _, size := range odsSize { // b.Run(fmt.Sprintf("ods size:%d", size), func(b *testing.B) { From eef869de33e9a056f364f6496d8c37c2ef30b7be Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 28 Mar 2024 20:51:33 +0400 Subject: [PATCH 121/132] address Hlibs comments --- nodebuilder/config_test.go | 2 +- nodebuilder/share/config.go | 6 ++--- nodebuilder/share/constructors.go | 10 ++++----- nodebuilder/tests/fraud_test.go | 2 +- nodebuilder/tests/reconstruct_test.go | 6 ++--- share/shwap/data_id.go | 6 +++++ share/shwap/getter/getter.go | 32 +++++---------------------- share/shwap/row_id.go | 5 ++--- share/shwap/sample_id.go | 4 ++-- 9 files changed, 28 insertions(+), 45 deletions(-) diff --git a/nodebuilder/config_test.go b/nodebuilder/config_test.go index e7b64b0aed..3b98664025 100644 --- a/nodebuilder/config_test.go +++ b/nodebuilder/config_test.go @@ -97,7 +97,7 @@ var outdatedConfig = ` PeersLimit = 5 DiscoveryInterval = "30s" AdvertiseInterval = "30s" - UseShareExchange = true + UseShrEx = true [Share.ShrExEDSParams] ServerReadTimeout = "5s" ServerWriteTimeout = "1m0s" diff --git a/nodebuilder/share/config.go b/nodebuilder/share/config.go index aff8c27bb4..970cac8238 100644 --- a/nodebuilder/share/config.go +++ b/nodebuilder/share/config.go @@ -17,8 +17,8 @@ type Config struct { // EDSStoreParams sets eds store configuration parameters EDSStoreParams *store.Parameters - UseShareExchange bool - UseShareSwap bool + UseShrEx bool + UseShwap bool // ShrExEDSParams sets shrexeds client and server configuration parameters ShrExEDSParams *shrexeds.Parameters // ShrExNDParams sets shrexnd client and server configuration parameters @@ -36,7 +36,7 @@ func DefaultConfig(tp node.Type) Config { Discovery: discovery.DefaultParameters(), ShrExEDSParams: shrexeds.DefaultParameters(), ShrExNDParams: shrexnd.DefaultParameters(), - UseShareExchange: true, + UseShrEx: true, PeerManagerParams: peers.DefaultParameters(), } diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index 4885d47a20..097f267490 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -45,10 +45,10 @@ func lightGetter( cfg Config, ) share.Getter { var cascade []share.Getter - if cfg.UseShareExchange { + if cfg.UseShrEx { cascade = append(cascade, shrexGetter) } - if cfg.UseShareSwap { + if cfg.UseShwap { cascade = append(cascade, shwapGetter) } cascade = append(cascade, reconstructGetter) @@ -66,7 +66,7 @@ func bridgeGetter( ) share.Getter { var cascade []share.Getter cascade = append(cascade, storeGetter) - if cfg.UseShareExchange { + if cfg.UseShrEx { cascade = append(cascade, shrexGetter) } return getters.NewCascadeGetter(cascade) @@ -81,10 +81,10 @@ func fullGetter( ) share.Getter { var cascade []share.Getter cascade = append(cascade, storeGetter) - if cfg.UseShareExchange { + if cfg.UseShrEx { cascade = append(cascade, shrexGetter) } - if cfg.UseShareSwap { + if cfg.UseShwap { cascade = append(cascade, shwapGetter) } cascade = append(cascade, reconstructGetter) diff --git a/nodebuilder/tests/fraud_test.go b/nodebuilder/tests/fraud_test.go index 03999de7f9..1296fab39f 100644 --- a/nodebuilder/tests/fraud_test.go +++ b/nodebuilder/tests/fraud_test.go @@ -79,7 +79,7 @@ func TestFraudProofHandling(t *testing.T) { addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) require.NoError(t, err) cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) - cfg.Share.UseShareExchange = false + cfg.Share.UseShrEx = false store := nodebuilder.MockStore(t, cfg) full := sw.NewNodeWithStore(node.Full, store) diff --git a/nodebuilder/tests/reconstruct_test.go b/nodebuilder/tests/reconstruct_test.go index d047182669..6b45d90dfa 100644 --- a/nodebuilder/tests/reconstruct_test.go +++ b/nodebuilder/tests/reconstruct_test.go @@ -58,7 +58,7 @@ func TestFullReconstructFromBridge(t *testing.T) { require.NoError(t, err) cfg := nodebuilder.DefaultConfig(node.Full) - cfg.Share.UseShareExchange = false + cfg.Share.UseShrEx = false cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, getMultiAddr(t, bridge.Host)) full := sw.NewNodeWithConfig(node.Full, cfg) err = full.Start(ctx) @@ -170,7 +170,7 @@ func TestFullReconstructFromFulls(t *testing.T) { cfg := nodebuilder.DefaultConfig(node.Full) setTimeInterval(cfg, defaultTimeInterval) - cfg.Share.UseShareExchange = false + cfg.Share.UseShrEx = false cfg.Share.Discovery.PeersLimit = 0 cfg.Header.TrustedPeers = []string{lnBootstrapper1[0].String()} full1 := sw.NewNodeWithConfig(node.Full, cfg) @@ -301,7 +301,7 @@ func TestFullReconstructFromLights(t *testing.T) { cfg = nodebuilder.DefaultConfig(node.Full) setTimeInterval(cfg, defaultTimeInterval) - cfg.Share.UseShareExchange = false + cfg.Share.UseShrEx = false cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrsBridge[0].String()) nodesConfig := nodebuilder.WithBootstrappers([]peer.AddrInfo{*bootstrapperAddr}) full := sw.NewNodeWithConfig(node.Full, cfg, nodesConfig) diff --git a/share/shwap/data_id.go b/share/shwap/data_id.go index e8b8a83687..9266fb139f 100644 --- a/share/shwap/data_id.go +++ b/share/shwap/data_id.go @@ -125,6 +125,7 @@ func (s DataID) Verify(root *share.Root) error { return nil } +// BlockFromFile returns the IPLD block of the DataID from the given file. func (s DataID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block, error) { data, err := f.Data(ctx, s.Namespace(), int(s.RowIndex)) if err != nil { @@ -138,3 +139,8 @@ func (s DataID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block } return blk, nil } + +// Release releases the verifier of the DataID. +func (s DataID) Release() { + dataVerifiers.Delete(s) +} diff --git a/share/shwap/getter/getter.go b/share/shwap/getter/getter.go index 28f640899c..782cac26b2 100644 --- a/share/shwap/getter/getter.go +++ b/share/shwap/getter/getter.go @@ -72,13 +72,6 @@ func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, smpl return nil, fmt.Errorf("getting blocks: %w", err) } - if len(blks) != len(smplIdxs) { - if ctx.Err() != nil { - return nil, ctx.Err() - } - return nil, fmt.Errorf("not all shares were found") - } - // ensure we persist samples/blks and make them available for Bitswap err = g.bstore.PutMany(ctx, blks) if err != nil { @@ -91,19 +84,19 @@ func (g *Getter) GetShares(ctx context.Context, hdr *header.ExtendedHeader, smpl } // ensure we return shares in the requested order - shrs := make(map[int]share.Share, len(blks)) + shares := make(map[int]share.Share, len(blks)) for _, blk := range blks { sample, err := shwap.SampleFromBlock(blk) if err != nil { return nil, fmt.Errorf("getting sample from block: %w", err) } shrIdx := int(sample.SampleID.RowIndex)*len(hdr.DAH.RowRoots) + int(sample.SampleID.ShareIndex) - shrs[shrIdx] = sample.SampleShare + shares[shrIdx] = sample.SampleShare } - ordered := make([]share.Share, len(shrs)) + ordered := make([]share.Share, len(shares)) for i, shrIdx := range smplIdxs { - sh, ok := shrs[shrIdx] + sh, ok := shares[shrIdx] if !ok { return nil, fmt.Errorf("missing share for index %d", shrIdx) } @@ -137,23 +130,12 @@ func (g *Getter) GetEDS(ctx context.Context, hdr *header.ExtendedHeader) (*rsmt2 } - if len(blks) != sqrLn/2 { - if ctx.Err() != nil { - return nil, ctx.Err() - } - return nil, fmt.Errorf("not all rows were found") - } - rows := make([]*shwap.Row, len(blks)) for _, blk := range blks { row, err := shwap.RowFromBlock(blk) if err != nil { return nil, fmt.Errorf("getting row from block: %w", err) } - if row.RowIndex >= uint16(sqrLn/2) { - // should never happen, because rows should be verified against root by the time they are returned - return nil, fmt.Errorf("row index out of bounds: %d", row.RowIndex) - } rows[row.RowIndex] = row } @@ -219,10 +201,6 @@ func (g *Getter) GetSharesByNamespace( return nil, fmt.Errorf("getting row from block: %w", err) } - if data.RowIndex < uint16(from) || data.RowIndex >= uint16(to) { - // should never happen, because rows should be verified against root by the time they are returned - return nil, fmt.Errorf("row index out of bounds: %d", data.RowIndex) - } nShrs[int(data.RowIndex)-from] = share.NamespacedRow{ Shares: data.DataShares, Proof: &data.DataProof, @@ -251,7 +229,7 @@ func (g *Getter) getBlocks(ctx context.Context, cids []cid.Cid) ([]block.Block, if ctx.Err() != nil { return nil, ctx.Err() } - return nil, fmt.Errorf("not all shares were found") + return nil, fmt.Errorf("not all blocks were found") } return blks, nil diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index 5dd706f915..29309a3968 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -15,8 +15,6 @@ import ( "github.com/celestiaorg/celestia-node/share/store/file" ) -// TODO(@walldiss): maybe move into separate subpkg? - // RowIDSize is the size of the RowID in bytes const RowIDSize = EdsIDSize + 2 @@ -55,7 +53,6 @@ func RowIDFromCID(cid cid.Cid) (id RowID, err error) { if err != nil { return id, fmt.Errorf("while unmarhaling RowID: %w", err) } - return id, nil } @@ -123,6 +120,7 @@ func (rid RowID) Verify(root *share.Root) error { return nil } +// BlockFromFile returns the IPLD block of the RowID from the given file. func (rid RowID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block, error) { axisHalf, err := f.AxisHalf(ctx, rsmt2d.Row, int(rid.RowIndex)) if err != nil { @@ -147,6 +145,7 @@ func (rid RowID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Bloc return blk, nil } +// Release releases the verifier of the RowID. func (rid RowID) Release() { rowVerifiers.Delete(rid) } diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index 36c8cdaf0b..63de58cb77 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -13,8 +13,6 @@ import ( "github.com/celestiaorg/celestia-node/share/store/file" ) -// TODO(@walldiss): maybe move into separate subpkg? - // SampleIDSize is the size of the SampleID in bytes const SampleIDSize = RowIDSize + 2 @@ -117,6 +115,7 @@ func (sid SampleID) Verify(root *share.Root) error { return sid.RowID.Verify(root) } +// BlockFromFile returns the IPLD block of the Sample. func (sid SampleID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block, error) { shr, err := f.Share(ctx, int(sid.ShareIndex), int(sid.RowID.RowIndex)) if err != nil { @@ -131,6 +130,7 @@ func (sid SampleID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.B return blk, nil } +// Release releases the verifier of the SampleID. func (sid SampleID) Release() { sampleVerifiers.Delete(sid) } From 6394d3786cba3853d876001c44bc42e3e9d0d20e Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 1 Apr 2024 15:17:41 +0400 Subject: [PATCH 122/132] move retriever test to proper pkg --- share/eds/retriever_no_race_test.go | 55 ---------------------------- share/shwap/getter/retriever_test.go | 32 ++++++++++++++-- 2 files changed, 29 insertions(+), 58 deletions(-) delete mode 100644 share/eds/retriever_no_race_test.go diff --git a/share/eds/retriever_no_race_test.go b/share/eds/retriever_no_race_test.go deleted file mode 100644 index 34c54b7d02..0000000000 --- a/share/eds/retriever_no_race_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// go:build !race - -package eds - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/byzantine" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/testing/edstest" -) - -func TestRetriever_ByzantineError(t *testing.T) { - const width = 8 - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - bserv := ipld.NewMemBlockservice() - shares := edstest.RandEDS(t, width).Flattened() - _, err := ipld.ImportShares(ctx, shares, bserv) - require.NoError(t, err) - - // corrupt shares so that eds erasure coding does not match - copy(shares[14][share.NamespaceSize:], shares[15][share.NamespaceSize:]) - - // import corrupted eds - batchAdder := ipld.NewNmtNodeAdder(ctx, bserv, ipld.MaxSizeBatchOption(width*2)) - attackerEDS, err := rsmt2d.ImportExtendedDataSquare( - shares, - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(width), - nmt.NodeVisitor(batchAdder.Visit)), - ) - require.NoError(t, err) - err = batchAdder.Commit() - require.NoError(t, err) - - // ensure we rcv an error - dah, err := da.NewDataAvailabilityHeader(attackerEDS) - require.NoError(t, err) - r := NewRetriever(bserv) - _, err = r.Retrieve(ctx, &dah) - var errByz *byzantine.ErrByzantine - require.ErrorAs(t, err, &errByz) -} diff --git a/share/shwap/getter/retriever_test.go b/share/shwap/getter/retriever_test.go index 8d927a1541..2694b72330 100644 --- a/share/shwap/getter/retriever_test.go +++ b/share/shwap/getter/retriever_test.go @@ -2,6 +2,7 @@ package shwap_getter import ( "context" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" "go.uber.org/atomic" "testing" "time" @@ -11,7 +12,6 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/testing/edstest" ) @@ -22,8 +22,7 @@ func TestRetriever_Retrieve(t *testing.T) { store, bstore := edsBlockstore(t) exch := DummySessionExchange{bstore} getter := NewGetter(exch, bstore) - bServ := ipld.NewMemBlockservice() - r := newRetriever(bServ, getter) + r := newRetriever(getter) height := atomic.NewUint64(1) type test struct { @@ -63,6 +62,33 @@ func TestRetriever_Retrieve(t *testing.T) { } } +func TestRetriever_ByzantineError(t *testing.T) { + const width = 8 + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + store, bstore := edsBlockstore(t) + exch := DummySessionExchange{bstore} + getter := NewGetter(exch, bstore) + r := newRetriever(getter) + + eds := edstest.RandEDS(t, width) + shares := eds.Flattened() + // corrupt shares so that eds erasure coding does not match + copy(shares[14][share.NamespaceSize:], shares[15][share.NamespaceSize:]) + + // store corrupted eds + put(t, store, eds, 1) + + // ensure we rcv an error + root, err := share.NewRoot(eds) + require.NoError(t, err) + hdr := &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 1}, DAH: root} + _, err = r.Retrieve(ctx, hdr) + var errByz *byzantine.ErrByzantine + require.ErrorAs(t, err, &errByz) +} + // //func TestRetriever_ByzantineError(t *testing.T) { // const width = 8 From 09e93d7715f2beccf9a9c600f95956b1ba4cdac2 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 1 Apr 2024 15:17:59 +0400 Subject: [PATCH 123/132] integrate with pruner --- nodebuilder/pruner/module.go | 6 +++--- nodebuilder/share/module.go | 1 - pruner/full/pruner.go | 8 ++++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/nodebuilder/pruner/module.go b/nodebuilder/pruner/module.go index 248798c3a4..553ac32476 100644 --- a/nodebuilder/pruner/module.go +++ b/nodebuilder/pruner/module.go @@ -2,6 +2,7 @@ package pruner import ( "context" + "github.com/celestiaorg/celestia-node/share/store" "go.uber.org/fx" @@ -10,7 +11,6 @@ import ( "github.com/celestiaorg/celestia-node/pruner/archival" "github.com/celestiaorg/celestia-node/pruner/full" "github.com/celestiaorg/celestia-node/pruner/light" - "github.com/celestiaorg/celestia-node/share/eds" ) func ConstructModule(tp node.Type, cfg *Config) fx.Option { @@ -46,7 +46,7 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { case node.Full: return fx.Module("prune", baseComponents, - fx.Provide(func(store *eds.Store) pruner.Pruner { + fx.Provide(func(store *store.Store) pruner.Pruner { return full.NewPruner(store) }), fx.Supply(full.Window), @@ -54,7 +54,7 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { case node.Bridge: return fx.Module("prune", baseComponents, - fx.Provide(func(store *eds.Store) pruner.Pruner { + fx.Provide(func(store *store.Store) pruner.Pruner { return full.NewPruner(store) }), fx.Supply(full.Window), diff --git a/nodebuilder/share/module.go b/nodebuilder/share/module.go index f3dfec18a6..fde0636822 100644 --- a/nodebuilder/share/module.go +++ b/nodebuilder/share/module.go @@ -84,7 +84,6 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option baseComponents, shrexGetterComponents(cfg), lightAvailabilityComponents(cfg), - fx.Invoke(ensureEmptyEDSInBS), fx.Provide(shwap_getter.NewGetter), fx.Provide(shwap_getter.NewReconstructionGetter), fx.Provide(lightGetter), diff --git a/pruner/full/pruner.go b/pruner/full/pruner.go index 49967b5050..eb55be962e 100644 --- a/pruner/full/pruner.go +++ b/pruner/full/pruner.go @@ -3,22 +3,22 @@ package full import ( "context" "errors" + "github.com/celestiaorg/celestia-node/share/store" "github.com/filecoin-project/dagstore" logging "github.com/ipfs/go-log/v2" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" ) var log = logging.Logger("pruner/full") type Pruner struct { - store *eds.Store + store *store.Store } -func NewPruner(store *eds.Store) *Pruner { +func NewPruner(store *store.Store) *Pruner { return &Pruner{ store: store, } @@ -32,7 +32,7 @@ func (p *Pruner) Prune(ctx context.Context, eh *header.ExtendedHeader) error { log.Debugf("pruning header %s", eh.DAH.Hash()) - err := p.store.Remove(ctx, eh.DAH.Hash()) + err := p.store.Remove(ctx, eh.Height()) if err != nil && !errors.Is(err, dagstore.ErrShardUnknown) { return err } From 39c7276abc67813112d0bab0299d01197dda152a Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 1 Apr 2024 15:50:44 +0400 Subject: [PATCH 124/132] remove redundant panic on marshaling error allow id marshallers to not depend on size of provided data container fix id types to have both pointer and value receivers R --- share/shwap/data.go | 32 +++++------------- share/shwap/data_hasher.go | 6 ++-- share/shwap/data_hasher_test.go | 3 +- share/shwap/data_id.go | 44 +++++++++++-------------- share/shwap/data_id_test.go | 7 ++-- share/shwap/data_test.go | 3 +- share/shwap/eds_id.go | 37 +++++++-------------- share/shwap/eds_id_test.go | 7 ++-- share/shwap/row.go | 30 +++++------------ share/shwap/row_hasher.go | 6 ++-- share/shwap/row_hasher_test.go | 3 +- share/shwap/row_id.go | 55 ++++++++++++------------------- share/shwap/row_id_test.go | 7 ++-- share/shwap/row_test.go | 3 +- share/shwap/sample.go | 35 ++++++++------------ share/shwap/sample_hasher.go | 6 ++-- share/shwap/sample_hasher_test.go | 3 +- share/shwap/sample_id.go | 40 +++++++++++----------- share/shwap/sample_id_test.go | 7 ++-- share/shwap/sample_test.go | 3 +- 20 files changed, 124 insertions(+), 213 deletions(-) diff --git a/share/shwap/data.go b/share/shwap/data.go index 126566245a..5cd96a780c 100644 --- a/share/shwap/data.go +++ b/share/shwap/data.go @@ -101,14 +101,7 @@ func DataFromBlock(blk blocks.Block) (*Data, error) { if err := validateCID(blk.Cid()); err != nil { return nil, err } - - s := &Data{} - err := s.UnmarshalBinary(blk.RawData()) - if err != nil { - return nil, fmt.Errorf("while unmarshalling Data: %w", err) - } - - return s, nil + return DataFromBinary(blk.RawData()) } // IPLDBlock converts Data to an IPLD block for Bitswap compatibility. @@ -123,11 +116,7 @@ func (s *Data) IPLDBlock() (blocks.Block, error) { // MarshalBinary marshals Data to binary. func (s *Data) MarshalBinary() ([]byte, error) { - id, err := s.DataID.MarshalBinary() - if err != nil { - return nil, err - } - + did := s.DataID.MarshalBinary() proof := &nmtpb.Proof{} proof.Nodes = s.DataProof.Nodes() proof.End = int64(s.DataProof.End()) @@ -136,27 +125,24 @@ func (s *Data) MarshalBinary() ([]byte, error) { proof.LeafHash = s.DataProof.LeafHash() return (&shwappb.Data{ - DataId: id, + DataId: did, DataShares: s.DataShares, DataProof: proof, }).Marshal() } -// UnmarshalBinary unmarshal Data from binary. -func (s *Data) UnmarshalBinary(data []byte) error { +// DataFromBinary unmarshal Data from binary. +func DataFromBinary(data []byte) (*Data, error) { proto := &shwappb.Data{} if err := proto.Unmarshal(data); err != nil { - return err + return nil, err } - err := s.DataID.UnmarshalBinary(proto.DataId) + did, err := DataIDFromBinary(proto.DataId) if err != nil { - return err + return nil, err } - - s.DataShares = proto.DataShares - s.DataProof = nmt.ProtoToProof(*proto.DataProof) - return nil + return NewData(did, proto.DataShares, nmt.ProtoToProof(*proto.DataProof)), nil } // Verify validates Data's fields and verifies Data inclusion. diff --git a/share/shwap/data_hasher.go b/share/shwap/data_hasher.go index 79c5e523df..15fdc909f7 100644 --- a/share/shwap/data_hasher.go +++ b/share/shwap/data_hasher.go @@ -12,14 +12,14 @@ type DataHasher struct { // Write expects a marshaled Data to validate. func (h *DataHasher) Write(data []byte) (int, error) { - var d Data - if err := d.UnmarshalBinary(data); err != nil { + d, err := DataFromBinary(data) + if err != nil { err = fmt.Errorf("unmarshaling Data: %w", err) log.Error(err) return 0, err } - if err := dataVerifiers.Verify(d.DataID, d); err != nil { + if err := dataVerifiers.Verify(d.DataID, *d); err != nil { err = fmt.Errorf("verifying Data: %w", err) log.Error(err) return 0, err diff --git a/share/shwap/data_hasher_test.go b/share/shwap/data_hasher_test.go index b7b36166af..72be5e79e7 100644 --- a/share/shwap/data_hasher_test.go +++ b/share/shwap/data_hasher_test.go @@ -36,8 +36,7 @@ func TestDataHasher(t *testing.T) { assert.EqualValues(t, len(dat), n) digest := hasher.Sum(nil) - id, err := data.DataID.MarshalBinary() - require.NoError(t, err) + id := data.DataID.MarshalBinary() assert.EqualValues(t, id, digest) hasher.Reset() diff --git a/share/shwap/data_id.go b/share/shwap/data_id.go index 9266fb139f..b35fd448e3 100644 --- a/share/shwap/data_id.go +++ b/share/shwap/data_id.go @@ -50,7 +50,7 @@ func DataIDFromCID(cid cid.Cid) (id DataID, err error) { return id, err } - err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) + id, err = DataIDFromBinary(cid.Hash()[mhPrefixSize:]) if err != nil { return id, fmt.Errorf("unmarhalling DataID: %w", err) } @@ -66,10 +66,7 @@ func (s DataID) Namespace() share.Namespace { // Cid returns DataID encoded as CID. func (s DataID) Cid() cid.Cid { // avoid using proto serialization for CID as it's not deterministic - data, err := s.MarshalBinary() - if err != nil { - panic(fmt.Errorf("marshaling DataID: %w", err)) - } + data := s.MarshalBinary() buf, err := mh.Encode(data, dataMultihashCode) if err != nil { @@ -83,34 +80,28 @@ func (s DataID) Cid() cid.Cid { // NOTE: Proto is avoided because // * Its size is not deterministic which is required for IPLD. // * No support for uint16 -func (s DataID) MarshalBinary() ([]byte, error) { +func (s DataID) MarshalBinary() []byte { data := make([]byte, 0, DataIDSize) - n, err := s.RowID.MarshalTo(data) - if err != nil { - return nil, err - } - data = data[:n] - data = append(data, s.DataNamespace...) - return data, nil + return s.appendTo(data) } -// UnmarshalBinary decodes DataID from binary form. -func (s *DataID) UnmarshalBinary(data []byte) error { +// DataIDFromBinary decodes DataID from binary form. +func DataIDFromBinary(data []byte) (DataID, error) { + var did DataID if len(data) != DataIDSize { - return fmt.Errorf("invalid DataID data length: %d != %d", len(data), DataIDSize) + return did, fmt.Errorf("invalid DataID data length: %d != %d", len(data), DataIDSize) } - n, err := s.RowID.UnmarshalFrom(data) + rid, err := RowIDFromBinary(data[:RowIDSize]) if err != nil { - return err + return did, fmt.Errorf("while unmarhaling RowID: %w", err) } - - ns := share.Namespace(data[n:]) + did.RowID = rid + ns := share.Namespace(data[RowIDSize:]) if err = ns.ValidateForData(); err != nil { - return err + return did, fmt.Errorf("validating DataNamespace: %w", err) } - - s.DataNamespace = string(ns) - return nil + did.DataNamespace = string(ns) + return did, err } // Verify verifies DataID fields. @@ -144,3 +135,8 @@ func (s DataID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block func (s DataID) Release() { dataVerifiers.Delete(s) } + +func (s DataID) appendTo(data []byte) []byte { + data = s.RowID.appendTo(data) + return append(data, s.DataNamespace...) +} diff --git a/share/shwap/data_id_test.go b/share/shwap/data_id_test.go index 6d902d2245..9963a4ecba 100644 --- a/share/shwap/data_id_test.go +++ b/share/shwap/data_id_test.go @@ -22,11 +22,8 @@ func TestDataID(t *testing.T) { assert.EqualValues(t, dataMultihashCode, cid.Prefix().MhType) assert.EqualValues(t, DataIDSize, cid.Prefix().MhLength) - data, err := id.MarshalBinary() - require.NoError(t, err) - - sidOut := DataID{} - err = sidOut.UnmarshalBinary(data) + data := id.MarshalBinary() + sidOut, err := DataIDFromBinary(data) require.NoError(t, err) assert.EqualValues(t, id, sidOut) diff --git a/share/shwap/data_test.go b/share/shwap/data_test.go index 0d925a2ec3..c41373aedc 100644 --- a/share/shwap/data_test.go +++ b/share/shwap/data_test.go @@ -25,8 +25,7 @@ func TestData(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, blk.Cid(), nd.Cid()) - dataOut := &Data{} - err = dataOut.UnmarshalBinary(data) + dataOut, err := DataFromBinary(data) require.NoError(t, err) assert.EqualValues(t, nd, dataOut) diff --git a/share/shwap/eds_id.go b/share/shwap/eds_id.go index 60b3e072d2..5044fa6b5e 100644 --- a/share/shwap/eds_id.go +++ b/share/shwap/eds_id.go @@ -3,7 +3,6 @@ package shwap import ( "encoding/binary" "fmt" - "github.com/celestiaorg/celestia-node/share" ) @@ -25,37 +24,19 @@ func NewEdsID(height uint64, root *share.Root) (EdsID, error) { return rid, rid.Verify(root) } -// MarshalTo encodes EdsID into given byte slice. -// NOTE: Proto is avoided because -// * Its size is not deterministic which is required for IPLD. -// * No support for uint16 -func (rid EdsID) MarshalTo(data []byte) (int, error) { - // TODO:(@walldiss): this works, only if data underlying array was preallocated with - // enough size. Otherwise Caller might not see the changes. - data = binary.BigEndian.AppendUint64(data, rid.Height) - return EdsIDSize, nil -} - -// UnmarshalFrom decodes EdsID from given byte slice. -func (rid *EdsID) UnmarshalFrom(data []byte) (int, error) { - rid.Height = binary.BigEndian.Uint64(data) - return EdsIDSize, nil -} - // MarshalBinary encodes EdsID into binary form. -func (rid EdsID) MarshalBinary() ([]byte, error) { +func (rid EdsID) MarshalBinary() []byte { data := make([]byte, 0, EdsIDSize) - n, err := rid.MarshalTo(data) - return data[:n], err + return rid.appendTo(data) } -// UnmarshalBinary decodes EdsID from binary form. -func (rid *EdsID) UnmarshalBinary(data []byte) error { +// EdsIDFromBinary decodes EdsID from binary form. +func EdsIDFromBinary(data []byte) (rid EdsID, err error) { if len(data) != EdsIDSize { - return fmt.Errorf("invalid EdsID data length: %d != %d", len(data), EdsIDSize) + return rid, fmt.Errorf("invalid EdsID data length: %d != %d", len(data), EdsIDSize) } - _, err := rid.UnmarshalFrom(data) - return err + rid.Height = binary.BigEndian.Uint64(data) + return rid, nil } // Verify verifies EdsID fields. @@ -73,3 +54,7 @@ func (rid EdsID) Verify(root *share.Root) error { func (rid EdsID) GetHeight() uint64 { return rid.Height } + +func (rid EdsID) appendTo(data []byte) []byte { + return binary.BigEndian.AppendUint64(data, rid.Height) +} diff --git a/share/shwap/eds_id_test.go b/share/shwap/eds_id_test.go index 754403f9d4..9f0a3190fb 100644 --- a/share/shwap/eds_id_test.go +++ b/share/shwap/eds_id_test.go @@ -18,11 +18,8 @@ func TestEdsID(t *testing.T) { id, err := NewEdsID(2, root) require.NoError(t, err) - data, err := id.MarshalBinary() - require.NoError(t, err) - - idOut := EdsID{} - err = idOut.UnmarshalBinary(data) + data := id.MarshalBinary() + idOut, err := EdsIDFromBinary(data) require.NoError(t, err) assert.EqualValues(t, id, idOut) diff --git a/share/shwap/row.go b/share/shwap/row.go index f2785870c4..266fef4d82 100644 --- a/share/shwap/row.go +++ b/share/shwap/row.go @@ -56,14 +56,7 @@ func RowFromBlock(blk blocks.Block) (*Row, error) { if err := validateCID(blk.Cid()); err != nil { return nil, err } - - s := &Row{} - err := s.UnmarshalBinary(blk.RawData()) - if err != nil { - return nil, fmt.Errorf("while unmarshalling Row: %w", err) - } - - return s, nil + return RowFromBinary(blk.RawData()) } // IPLDBlock converts Row to an IPLD block for Bitswap compatibility. @@ -78,31 +71,24 @@ func (r *Row) IPLDBlock() (blocks.Block, error) { // MarshalBinary marshals Row to binary. func (r *Row) MarshalBinary() ([]byte, error) { - id, err := r.RowID.MarshalBinary() - if err != nil { - return nil, err - } - return (&shwappb.Row{ - RowId: id, + RowId: r.RowID.MarshalBinary(), RowHalf: r.RowShares, }).Marshal() } -// UnmarshalBinary unmarshal Row from binary. -func (r *Row) UnmarshalBinary(data []byte) error { +// RowFromBinary unmarshal Row from binary. +func RowFromBinary(data []byte) (*Row, error) { proto := &shwappb.Row{} if err := proto.Unmarshal(data); err != nil { - return err + return nil, err } - err := r.RowID.UnmarshalBinary(proto.RowId) + rid, err := RowIDFromBinary(proto.RowId) if err != nil { - return err + return nil, err } - - r.RowShares = proto.RowHalf - return nil + return NewRow(rid, proto.RowHalf), nil } // Verify validates Row's fields and verifies Row inclusion. diff --git a/share/shwap/row_hasher.go b/share/shwap/row_hasher.go index 0da51bc0e4..db11616ad8 100644 --- a/share/shwap/row_hasher.go +++ b/share/shwap/row_hasher.go @@ -12,14 +12,14 @@ type RowHasher struct { // Write expects a marshaled Row to validate. func (h *RowHasher) Write(data []byte) (int, error) { - var row Row - if err := row.UnmarshalBinary(data); err != nil { + row, err := RowFromBinary(data) + if err != nil { err = fmt.Errorf("unmarshaling Row: %w", err) log.Error(err) return 0, err } - if err := rowVerifiers.Verify(row.RowID, row); err != nil { + if err := rowVerifiers.Verify(row.RowID, *row); err != nil { err = fmt.Errorf("verifying Row: %w", err) log.Error(err) return 0, err diff --git a/share/shwap/row_hasher_test.go b/share/shwap/row_hasher_test.go index 51142ee949..ff8bd0c41f 100644 --- a/share/shwap/row_hasher_test.go +++ b/share/shwap/row_hasher_test.go @@ -35,8 +35,7 @@ func TestRowHasher(t *testing.T) { assert.EqualValues(t, len(data), n) digest := hasher.Sum(nil) - id, err := row.RowID.MarshalBinary() - require.NoError(t, err) + id := row.RowID.MarshalBinary() assert.EqualValues(t, id, digest) hasher.Reset() diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index 29309a3968..fce6155475 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -1,10 +1,10 @@ package shwap import ( + "bytes" "context" "encoding/binary" "fmt" - blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" @@ -49,19 +49,16 @@ func RowIDFromCID(cid cid.Cid) (id RowID, err error) { return id, err } - err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) + rid, err := RowIDFromBinary(cid.Hash()[mhPrefixSize:]) if err != nil { return id, fmt.Errorf("while unmarhaling RowID: %w", err) } - return id, nil + return rid, nil } // Cid returns RowID encoded as CID. func (rid RowID) Cid() cid.Cid { - data, err := rid.MarshalBinary() - if err != nil { - panic(fmt.Errorf("marshaling RowID: %w", err)) - } + data := rid.MarshalBinary() buf, err := mh.Encode(data, rowMultihashCode) if err != nil { @@ -71,39 +68,24 @@ func (rid RowID) Cid() cid.Cid { return cid.NewCidV1(rowCodec, buf) } -// MarshalTo encodes RowID into given byte slice. -// NOTE: Proto is avoided because -// * Its size is not deterministic which is required for IPLD. -// * No support for uint16 -func (rid RowID) MarshalTo(data []byte) (int, error) { - // TODO:(@walldiss): this works, only if data underlying array was preallocated with - // enough size. Otherwise Caller might not see the changes. - data = binary.BigEndian.AppendUint64(data, rid.Height) - data = binary.BigEndian.AppendUint16(data, rid.RowIndex) - return RowIDSize, nil -} - -// UnmarshalFrom decodes RowID from given byte slice. -func (rid *RowID) UnmarshalFrom(data []byte) (int, error) { - rid.Height = binary.BigEndian.Uint64(data) - rid.RowIndex = binary.BigEndian.Uint16(data[8:]) - return RowIDSize, nil -} - // MarshalBinary encodes RowID into binary form. -func (rid RowID) MarshalBinary() ([]byte, error) { +func (rid RowID) MarshalBinary() []byte { data := make([]byte, 0, RowIDSize) - n, err := rid.MarshalTo(data) - return data[:n], err + return rid.appendTo(data) } -// UnmarshalBinary decodes RowID from binary form. -func (rid *RowID) UnmarshalBinary(data []byte) error { +// RowIDFromBinary decodes RowID from binary form. +func RowIDFromBinary(data []byte) (RowID, error) { + var rid RowID if len(data) != RowIDSize { - return fmt.Errorf("invalid RowID data length: %d != %d", len(data), RowIDSize) + return rid, fmt.Errorf("invalid RowID data length: %d != %d", len(data), RowIDSize) + } + eid, err := EdsIDFromBinary(data[:EdsIDSize]) + if err != nil { + return rid, fmt.Errorf("while decoding EdsID: %w", err) } - _, err := rid.UnmarshalFrom(data) - return err + rid.EdsID = eid + return rid, binary.Read(bytes.NewReader(data[EdsIDSize:]), binary.BigEndian, &rid.RowIndex) } // Verify verifies RowID fields. @@ -149,3 +131,8 @@ func (rid RowID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Bloc func (rid RowID) Release() { rowVerifiers.Delete(rid) } + +func (rid RowID) appendTo(data []byte) []byte { + data = rid.EdsID.appendTo(data) + return binary.BigEndian.AppendUint16(data, rid.RowIndex) +} diff --git a/share/shwap/row_id_test.go b/share/shwap/row_id_test.go index 479cc1d4e9..36f399cf7b 100644 --- a/share/shwap/row_id_test.go +++ b/share/shwap/row_id_test.go @@ -23,11 +23,8 @@ func TestRowID(t *testing.T) { assert.EqualValues(t, rowMultihashCode, cid.Prefix().MhType) assert.EqualValues(t, RowIDSize, cid.Prefix().MhLength) - data, err := id.MarshalBinary() - require.NoError(t, err) - - idOut := RowID{} - err = idOut.UnmarshalBinary(data) + data := id.MarshalBinary() + idOut, err := RowIDFromBinary(data) require.NoError(t, err) assert.EqualValues(t, id, idOut) diff --git a/share/shwap/row_test.go b/share/shwap/row_test.go index a5eecd5190..0f5103b2d9 100644 --- a/share/shwap/row_test.go +++ b/share/shwap/row_test.go @@ -25,8 +25,7 @@ func TestRow(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, blk.Cid(), row.Cid()) - rowOut := &Row{} - err = rowOut.UnmarshalBinary(data) + rowOut, err := RowFromBinary(data) require.NoError(t, err) assert.EqualValues(t, row, rowOut) diff --git a/share/shwap/sample.go b/share/shwap/sample.go index fa4ea58c0c..6d14680883 100644 --- a/share/shwap/sample.go +++ b/share/shwap/sample.go @@ -100,14 +100,7 @@ func SampleFromBlock(blk blocks.Block) (*Sample, error) { if err := validateCID(blk.Cid()); err != nil { return nil, err } - - s := &Sample{} - err := s.UnmarshalBinary(blk.RawData()) - if err != nil { - return nil, fmt.Errorf("while unmarshalling Sample: %w", err) - } - - return s, nil + return SampleFromBinary(blk.RawData()) } // IPLDBlock converts Sample to an IPLD block for Bitswap compatibility. @@ -122,11 +115,7 @@ func (s *Sample) IPLDBlock() (blocks.Block, error) { // MarshalBinary marshals Sample to binary. func (s *Sample) MarshalBinary() ([]byte, error) { - id, err := s.SampleID.MarshalBinary() - if err != nil { - return nil, err - } - + id := s.SampleID.MarshalBinary() proof := &nmtpb.Proof{} proof.Nodes = s.SampleProof.Nodes() proof.End = int64(s.SampleProof.End()) @@ -142,22 +131,24 @@ func (s *Sample) MarshalBinary() ([]byte, error) { }).Marshal() } -// UnmarshalBinary unmarshal Sample from binary. -func (s *Sample) UnmarshalBinary(data []byte) error { +// SampleFromBinary unmarshal Sample from binary. +func SampleFromBinary(data []byte) (*Sample, error) { proto := &shwappb.Sample{} if err := proto.Unmarshal(data); err != nil { - return err + return nil, err } - err := s.SampleID.UnmarshalBinary(proto.SampleId) + sid, err := SampleIdFromBinary(proto.SampleId) if err != nil { - return err + return nil, err } - s.SampleProofType = SampleProofType(proto.ProofType) - s.SampleProof = nmt.ProtoToProof(*proto.SampleProof) - s.SampleShare = proto.SampleShare - return nil + return &Sample{ + SampleID: sid, + SampleProofType: SampleProofType(proto.ProofType), + SampleProof: nmt.ProtoToProof(*proto.SampleProof), + SampleShare: proto.SampleShare, + }, nil } // Verify validates Sample's fields and verifies SampleShare inclusion. diff --git a/share/shwap/sample_hasher.go b/share/shwap/sample_hasher.go index 30867dc04c..6aa4845d34 100644 --- a/share/shwap/sample_hasher.go +++ b/share/shwap/sample_hasher.go @@ -12,14 +12,14 @@ type SampleHasher struct { // Write expects a marshaled Sample to validate. func (h *SampleHasher) Write(data []byte) (int, error) { - var s Sample - if err := s.UnmarshalBinary(data); err != nil { + s, err := SampleFromBinary(data) + if err != nil { err = fmt.Errorf("unmarshaling Sample: %w", err) log.Error(err) return 0, err } - if err := sampleVerifiers.Verify(s.SampleID, s); err != nil { + if err := sampleVerifiers.Verify(s.SampleID, *s); err != nil { err = fmt.Errorf("verifying Sample: %w", err) log.Error(err) return 0, err diff --git a/share/shwap/sample_hasher_test.go b/share/shwap/sample_hasher_test.go index 9adaa0c543..fc8d1e2ef1 100644 --- a/share/shwap/sample_hasher_test.go +++ b/share/shwap/sample_hasher_test.go @@ -35,8 +35,7 @@ func TestSampleHasher(t *testing.T) { assert.EqualValues(t, len(data), n) digest := hasher.Sum(nil) - id, err := sample.SampleID.MarshalBinary() - require.NoError(t, err) + id := sample.SampleID.MarshalBinary() assert.EqualValues(t, id, digest) hasher.Reset() diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index 63de58cb77..97739eccc1 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -1,6 +1,7 @@ package shwap import ( + "bytes" "context" "encoding/binary" "fmt" @@ -52,7 +53,7 @@ func SampleIDFromCID(cid cid.Cid) (id SampleID, err error) { return id, err } - err = id.UnmarshalBinary(cid.Hash()[mhPrefixSize:]) + id, err = SampleIdFromBinary(cid.Hash()[mhPrefixSize:]) if err != nil { return id, fmt.Errorf("while unmarhaling SampleID: %w", err) } @@ -63,10 +64,7 @@ func SampleIDFromCID(cid cid.Cid) (id SampleID, err error) { // Cid returns SampleID encoded as CID. func (sid SampleID) Cid() cid.Cid { // avoid using proto serialization for CID as it's not deterministic - data, err := sid.MarshalBinary() - if err != nil { - panic(fmt.Errorf("marshaling SampleID: %w", err)) - } + data := sid.MarshalBinary() buf, err := mh.Encode(data, sampleMultihashCode) if err != nil { @@ -80,29 +78,24 @@ func (sid SampleID) Cid() cid.Cid { // NOTE: Proto is avoided because // * Its size is not deterministic which is required for IPLD. // * No support for uint16 -func (sid SampleID) MarshalBinary() ([]byte, error) { +func (sid SampleID) MarshalBinary() []byte { data := make([]byte, 0, SampleIDSize) - n, err := sid.RowID.MarshalTo(data) - if err != nil { - return nil, err - } - data = data[:n] - data = binary.BigEndian.AppendUint16(data, sid.ShareIndex) - return data, nil + return sid.appendTo(data) } -// UnmarshalBinary decodes SampleID from binary form. -func (sid *SampleID) UnmarshalBinary(data []byte) error { +// SampleIdFromBinary decodes SampleID from binary form. +func SampleIdFromBinary(data []byte) (SampleID, error) { + var sid SampleID if len(data) != SampleIDSize { - return fmt.Errorf("invalid SampleID data length: %d != %d", len(data), SampleIDSize) + return sid, fmt.Errorf("invalid SampleID data length: %d != %d", len(data), SampleIDSize) } - n, err := sid.RowID.UnmarshalFrom(data) + + rid, err := RowIDFromBinary(data[:RowIDSize]) if err != nil { - return err + return sid, fmt.Errorf("while decoding RowID: %w", err) } - data = data[n:] - sid.ShareIndex = binary.BigEndian.Uint16(data) - return nil + sid.RowID = rid + return sid, binary.Read(bytes.NewReader(data[RowIDSize:]), binary.BigEndian, &sid.ShareIndex) } // Verify verifies SampleID fields. @@ -134,3 +127,8 @@ func (sid SampleID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.B func (sid SampleID) Release() { sampleVerifiers.Delete(sid) } + +func (sid SampleID) appendTo(data []byte) []byte { + data = sid.RowID.appendTo(data) + return binary.BigEndian.AppendUint16(data, sid.ShareIndex) +} diff --git a/share/shwap/sample_id_test.go b/share/shwap/sample_id_test.go index 43c07ddb11..373cfa69fc 100644 --- a/share/shwap/sample_id_test.go +++ b/share/shwap/sample_id_test.go @@ -23,11 +23,8 @@ func TestSampleID(t *testing.T) { assert.EqualValues(t, sampleMultihashCode, cid.Prefix().MhType) assert.EqualValues(t, SampleIDSize, cid.Prefix().MhLength) - data, err := id.MarshalBinary() - require.NoError(t, err) - - idOut := SampleID{} - err = idOut.UnmarshalBinary(data) + data := id.MarshalBinary() + idOut, err := SampleIdFromBinary(data) require.NoError(t, err) assert.EqualValues(t, id, idOut) diff --git a/share/shwap/sample_test.go b/share/shwap/sample_test.go index 9b14740ba7..77acd310e0 100644 --- a/share/shwap/sample_test.go +++ b/share/shwap/sample_test.go @@ -25,8 +25,7 @@ func TestSample(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, blk.Cid(), sample.Cid()) - sampleOut := &Sample{} - err = sampleOut.UnmarshalBinary(data) + sampleOut, err := SampleFromBinary(data) require.NoError(t, err) assert.EqualValues(t, sample, sampleOut) From 948013c5eb0a430224623cf9b36f124f4f372159 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 1 Apr 2024 16:04:30 +0400 Subject: [PATCH 125/132] remove generics from shwap verification code --- share/shwap/data_hasher.go | 2 +- share/shwap/data_id.go | 11 +++++----- share/shwap/row_hasher.go | 2 +- share/shwap/row_hasher_test.go | 4 +--- share/shwap/row_id.go | 13 ++++++------ share/shwap/sample_hasher.go | 2 +- share/shwap/sample_hasher_test.go | 4 +--- share/shwap/sample_id.go | 12 +++++------ share/shwap/shwap.go | 35 +++++++++++++++++++------------ share/shwap/shwap_test.go | 27 ++++++------------------ 10 files changed, 52 insertions(+), 60 deletions(-) diff --git a/share/shwap/data_hasher.go b/share/shwap/data_hasher.go index 15fdc909f7..1ad554930c 100644 --- a/share/shwap/data_hasher.go +++ b/share/shwap/data_hasher.go @@ -19,7 +19,7 @@ func (h *DataHasher) Write(data []byte) (int, error) { return 0, err } - if err := dataVerifiers.Verify(d.DataID, *d); err != nil { + if err := rootVerifiers.Verify(d); err != nil { err = fmt.Errorf("verifying Data: %w", err) log.Error(err) return 0, err diff --git a/share/shwap/data_id.go b/share/shwap/data_id.go index b35fd448e3..53369d6b30 100644 --- a/share/shwap/data_id.go +++ b/share/shwap/data_id.go @@ -36,10 +36,7 @@ func NewDataID(height uint64, rowIdx uint16, namespace share.Namespace, root *sh DataNamespace: string(namespace), } - verifyFn := func(d Data) error { - return d.Verify(root) - } - dataVerifiers.Add(did, verifyFn) + rootVerifiers.Add(did, root) return did, did.Verify(root) } @@ -133,10 +130,14 @@ func (s DataID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block // Release releases the verifier of the DataID. func (s DataID) Release() { - dataVerifiers.Delete(s) + rootVerifiers.Delete(s) } func (s DataID) appendTo(data []byte) []byte { data = s.RowID.appendTo(data) return append(data, s.DataNamespace...) } + +func (s DataID) key() any { + return s +} diff --git a/share/shwap/row_hasher.go b/share/shwap/row_hasher.go index db11616ad8..2f246dfa9a 100644 --- a/share/shwap/row_hasher.go +++ b/share/shwap/row_hasher.go @@ -19,7 +19,7 @@ func (h *RowHasher) Write(data []byte) (int, error) { return 0, err } - if err := rowVerifiers.Verify(row.RowID, *row); err != nil { + if err := rootVerifiers.Verify(row); err != nil { err = fmt.Errorf("verifying Row: %w", err) log.Error(err) return 0, err diff --git a/share/shwap/row_hasher_test.go b/share/shwap/row_hasher_test.go index ff8bd0c41f..1b052a4d6b 100644 --- a/share/shwap/row_hasher_test.go +++ b/share/shwap/row_hasher_test.go @@ -23,9 +23,7 @@ func TestRowHasher(t *testing.T) { row, err := NewRowFromEDS(2, 1, square) require.NoError(t, err) - rowVerifiers.Add(row.RowID, func(row Row) error { - return row.Verify(root) - }) + rootVerifiers.Add(row.RowID, root) data, err := row.MarshalBinary() require.NoError(t, err) diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index fce6155475..1acd344fcb 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -5,6 +5,7 @@ import ( "context" "encoding/binary" "fmt" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" @@ -34,11 +35,7 @@ func NewRowID(height uint64, rowIdx uint16, root *share.Root) (RowID, error) { }, RowIndex: rowIdx, } - - verifyFn := func(row Row) error { - return row.Verify(root) - } - rowVerifiers.Add(rid, verifyFn) + rootVerifiers.Add(rid, root) return rid, rid.Verify(root) } @@ -129,10 +126,14 @@ func (rid RowID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Bloc // Release releases the verifier of the RowID. func (rid RowID) Release() { - rowVerifiers.Delete(rid) + rootVerifiers.Delete(rid) } func (rid RowID) appendTo(data []byte) []byte { data = rid.EdsID.appendTo(data) return binary.BigEndian.AppendUint16(data, rid.RowIndex) } + +func (rid RowID) key() any { + return rid +} diff --git a/share/shwap/sample_hasher.go b/share/shwap/sample_hasher.go index 6aa4845d34..8ac585d8a7 100644 --- a/share/shwap/sample_hasher.go +++ b/share/shwap/sample_hasher.go @@ -19,7 +19,7 @@ func (h *SampleHasher) Write(data []byte) (int, error) { return 0, err } - if err := sampleVerifiers.Verify(s.SampleID, *s); err != nil { + if err := rootVerifiers.Verify(s); err != nil { err = fmt.Errorf("verifying Sample: %w", err) log.Error(err) return 0, err diff --git a/share/shwap/sample_hasher_test.go b/share/shwap/sample_hasher_test.go index fc8d1e2ef1..9f616c5e63 100644 --- a/share/shwap/sample_hasher_test.go +++ b/share/shwap/sample_hasher_test.go @@ -23,9 +23,7 @@ func TestSampleHasher(t *testing.T) { sample, err := NewSampleFromEDS(RowProofType, 10, square, 1) require.NoError(t, err) - sampleVerifiers.Add(sample.SampleID, func(s Sample) error { - return s.Verify(root) - }) + rootVerifiers.Add(sample.SampleID, root) data, err := sample.MarshalBinary() require.NoError(t, err) diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index 97739eccc1..44e4684117 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -39,11 +39,7 @@ func NewSampleID(height uint64, smplIdx int, root *share.Root) (SampleID, error) ShareIndex: shrIdx, } - verifyFn := func(s Sample) error { - return s.Verify(root) - } - sampleVerifiers.Add(sid, verifyFn) - + rootVerifiers.Add(sid, root) return sid, sid.Verify(root) } @@ -125,10 +121,14 @@ func (sid SampleID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.B // Release releases the verifier of the SampleID. func (sid SampleID) Release() { - sampleVerifiers.Delete(sid) + rootVerifiers.Delete(sid) } func (sid SampleID) appendTo(data []byte) []byte { data = sid.RowID.appendTo(data) return binary.BigEndian.AppendUint16(data, sid.ShareIndex) } + +func (sid SampleID) key() any { + return sid +} diff --git a/share/shwap/shwap.go b/share/shwap/shwap.go index bb8e414094..51b494e580 100644 --- a/share/shwap/shwap.go +++ b/share/shwap/shwap.go @@ -9,6 +9,8 @@ import ( "github.com/ipfs/go-cid" logger "github.com/ipfs/go-log/v2" mh "github.com/multiformats/go-multihash" + + "github.com/celestiaorg/celestia-node/share" ) var log = logger.Logger("shwap") @@ -55,31 +57,27 @@ func init() { }) } -var ( - rowVerifiers verifiers[RowID, Row] - sampleVerifiers verifiers[SampleID, Sample] - dataVerifiers verifiers[DataID, Data] -) +var rootVerifiers verifiers -type verifiers[ID comparable, V any] struct { +type verifiers struct { mp sync.Map } -func (v *verifiers[ID, V]) Add(id ID, f func(V) error) { - v.mp.Store(id, f) +func (vs *verifiers) Add(id id, root *share.Root) { + vs.mp.Store(id.key(), root) } -func (v *verifiers[ID, V]) Verify(id ID, val V) error { - f, ok := v.mp.LoadAndDelete(id) +func (vs *verifiers) Verify(v verifier) error { + r, ok := vs.mp.LoadAndDelete(v.key()) if !ok { return fmt.Errorf("no verifier") } - return f.(func(V) error)(val) + return v.Verify(r.(*share.Root)) } -func (v *verifiers[ID, V]) Delete(id ID) { - v.mp.Delete(id) +func (vs *verifiers) Delete(id id) { + vs.mp.Delete(id.key()) } // DefaultAllowlist keeps default list of hashes allowed in the network. @@ -116,3 +114,14 @@ func validateCID(cid cid.Cid) error { return nil } + +// id represents an interface for objects that can produce a key. +type id interface { + key() any +} + +// verifier represents an interface for verification of data roots. +type verifier interface { + id + Verify(root *share.Root) error +} diff --git a/share/shwap/shwap_test.go b/share/shwap/shwap_test.go index cf851f58af..b631d5c520 100644 --- a/share/shwap/shwap_test.go +++ b/share/shwap/shwap_test.go @@ -47,9 +47,7 @@ func TestSampleRoundtripGetBlock(t *testing.T) { smpl, err := NewSampleFromEDS(RowProofType, i, eds, height) // TODO: Col require.NoError(t, err) - sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { - return sample.Verify(root) - }) + rootVerifiers.Add(smpl.SampleID, root) cid := smpl.Cid() blkOut, err := client.GetBlock(ctx, cid) @@ -82,10 +80,7 @@ func TestSampleRoundtripGetBlocks(t *testing.T) { smpl, err := NewSampleFromEDS(RowProofType, i, eds, height) // TODO: Col require.NoError(t, err) set.Add(smpl.Cid()) - - sampleVerifiers.Add(smpl.SampleID, func(sample Sample) error { - return sample.Verify(root) - }) + rootVerifiers.Add(smpl.SampleID, root) } blks, err := client.GetBlocks(ctx, set.Keys()) @@ -125,9 +120,7 @@ func TestRowRoundtripGetBlock(t *testing.T) { row, err := NewRowFromEDS(height, i, eds) require.NoError(t, err) - rowVerifiers.Add(row.RowID, func(row Row) error { - return row.Verify(root) - }) + rootVerifiers.Add(row.RowID, root) cid := row.Cid() blkOut, err := client.GetBlock(ctx, cid) @@ -159,10 +152,7 @@ func TestRowRoundtripGetBlocks(t *testing.T) { row, err := NewRowFromEDS(height, i, eds) require.NoError(t, err) set.Add(row.Cid()) - - rowVerifiers.Add(row.RowID, func(row Row) error { - return row.Verify(root) - }) + rootVerifiers.Add(row.RowID, root) } blks, err := client.GetBlocks(ctx, set.Keys()) @@ -200,9 +190,7 @@ func TestDataRoundtripGetBlock(t *testing.T) { require.NoError(t, err) for _, nd := range nds { - dataVerifiers.Add(nd.DataID, func(data Data) error { - return data.Verify(root) - }) + rootVerifiers.Add(nd.DataID, root) cid := nd.Cid() blkOut, err := client.GetBlock(ctx, cid) @@ -233,10 +221,7 @@ func TestDataRoundtripGetBlocks(t *testing.T) { set := cid.NewSet() for _, nd := range nds { set.Add(nd.Cid()) - - dataVerifiers.Add(nd.DataID, func(data Data) error { - return data.Verify(root) - }) + rootVerifiers.Add(nd.DataID, root) } blks, err := client.GetBlocks(ctx, set.Keys()) From 6f5db44d948498197dc76cdd112b6704294d5089 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 1 Apr 2024 16:04:46 +0400 Subject: [PATCH 126/132] sort imports --- nodebuilder/pruner/module.go | 2 +- nodebuilder/share/constructors.go | 2 +- nodebuilder/share/module.go | 2 +- pruner/full/pruner.go | 2 +- share/shwap/data_hasher_test.go | 4 +--- share/shwap/eds_id.go | 1 + share/shwap/getter/getter.go | 8 +++++--- share/shwap/getter/reconstruction.go | 4 +++- share/shwap/getter/retriever.go | 4 ++-- share/shwap/getter/retriever_test.go | 4 ++-- share/store/file/axis_half.go | 1 + share/store/file/axis_half_test.go | 6 ++++-- share/store/file/q1q4_file.go | 6 ++++-- share/store/file/q1q4_file_test.go | 6 ++++-- share/store/file/square.go | 3 ++- 15 files changed, 33 insertions(+), 22 deletions(-) diff --git a/nodebuilder/pruner/module.go b/nodebuilder/pruner/module.go index 553ac32476..ccb06d7188 100644 --- a/nodebuilder/pruner/module.go +++ b/nodebuilder/pruner/module.go @@ -2,7 +2,6 @@ package pruner import ( "context" - "github.com/celestiaorg/celestia-node/share/store" "go.uber.org/fx" @@ -11,6 +10,7 @@ import ( "github.com/celestiaorg/celestia-node/pruner/archival" "github.com/celestiaorg/celestia-node/pruner/full" "github.com/celestiaorg/celestia-node/pruner/light" + "github.com/celestiaorg/celestia-node/share/store" ) func ConstructModule(tp node.Type, cfg *Config) fx.Option { diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index 097f267490..ed4578f712 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -1,7 +1,6 @@ package share import ( - shwap_getter "github.com/celestiaorg/celestia-node/share/shwap/getter" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/routing" routingdisc "github.com/libp2p/go-libp2p/p2p/discovery/routing" @@ -10,6 +9,7 @@ import ( "github.com/celestiaorg/celestia-node/share/getters" disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" "github.com/celestiaorg/celestia-node/share/p2p/peers" + shwap_getter "github.com/celestiaorg/celestia-node/share/shwap/getter" ) const ( diff --git a/nodebuilder/share/module.go b/nodebuilder/share/module.go index fde0636822..3792012fd8 100644 --- a/nodebuilder/share/module.go +++ b/nodebuilder/share/module.go @@ -2,7 +2,6 @@ package share import ( "context" - shwap_getter "github.com/celestiaorg/celestia-node/share/shwap/getter" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/p2p/net/conngater" @@ -23,6 +22,7 @@ import ( "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + shwap_getter "github.com/celestiaorg/celestia-node/share/shwap/getter" "github.com/celestiaorg/celestia-node/share/store" ) diff --git a/pruner/full/pruner.go b/pruner/full/pruner.go index eb55be962e..f2f687e45a 100644 --- a/pruner/full/pruner.go +++ b/pruner/full/pruner.go @@ -3,13 +3,13 @@ package full import ( "context" "errors" - "github.com/celestiaorg/celestia-node/share/store" "github.com/filecoin-project/dagstore" logging "github.com/ipfs/go-log/v2" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/store" ) var log = logging.Logger("pruner/full") diff --git a/share/shwap/data_hasher_test.go b/share/shwap/data_hasher_test.go index 72be5e79e7..8d9c778fe2 100644 --- a/share/shwap/data_hasher_test.go +++ b/share/shwap/data_hasher_test.go @@ -24,9 +24,7 @@ func TestDataHasher(t *testing.T) { require.NoError(t, err) data := datas[0] - dataVerifiers.Add(data.DataID, func(data Data) error { - return data.Verify(root) - }) + rootVerifiers.Add(data.DataID, root) dat, err := data.MarshalBinary() require.NoError(t, err) diff --git a/share/shwap/eds_id.go b/share/shwap/eds_id.go index 5044fa6b5e..3ac2069247 100644 --- a/share/shwap/eds_id.go +++ b/share/shwap/eds_id.go @@ -3,6 +3,7 @@ package shwap import ( "encoding/binary" "fmt" + "github.com/celestiaorg/celestia-node/share" ) diff --git a/share/shwap/getter/getter.go b/share/shwap/getter/getter.go index 782cac26b2..7e865ad27e 100644 --- a/share/shwap/getter/getter.go +++ b/share/shwap/getter/getter.go @@ -3,16 +3,18 @@ package shwap_getter import ( "context" "fmt" - "github.com/celestiaorg/celestia-app/pkg/wrapper" - "github.com/celestiaorg/celestia-node/share/shwap" - "github.com/celestiaorg/rsmt2d" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/boxo/exchange" block "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" + "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" ) type Getter struct { diff --git a/share/shwap/getter/reconstruction.go b/share/shwap/getter/reconstruction.go index b0c6fc866f..132b1eeeb0 100644 --- a/share/shwap/getter/reconstruction.go +++ b/share/shwap/getter/reconstruction.go @@ -2,9 +2,11 @@ package shwap_getter import ( "context" + + "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/rsmt2d" ) type ReconstructionGetter struct { diff --git a/share/shwap/getter/retriever.go b/share/shwap/getter/retriever.go index 7788ec0981..4602ed0820 100644 --- a/share/shwap/getter/retriever.go +++ b/share/shwap/getter/retriever.go @@ -3,8 +3,6 @@ package shwap_getter import ( "context" "errors" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share/shwap" "sync" "sync/atomic" "time" @@ -19,8 +17,10 @@ import ( "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/shwap" ) // TODO(@walldiss): diff --git a/share/shwap/getter/retriever_test.go b/share/shwap/getter/retriever_test.go index 2694b72330..dba857ca8e 100644 --- a/share/shwap/getter/retriever_test.go +++ b/share/shwap/getter/retriever_test.go @@ -2,16 +2,16 @@ package shwap_getter import ( "context" - "github.com/celestiaorg/celestia-node/share/eds/byzantine" - "go.uber.org/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.uber.org/atomic" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" "github.com/celestiaorg/celestia-node/share/testing/edstest" ) diff --git a/share/store/file/axis_half.go b/share/store/file/axis_half.go index 2737839a22..128899a3d4 100644 --- a/share/store/file/axis_half.go +++ b/share/store/file/axis_half.go @@ -2,6 +2,7 @@ package file import ( "fmt" + "github.com/celestiaorg/celestia-node/share" ) diff --git a/share/store/file/axis_half_test.go b/share/store/file/axis_half_test.go index eb5b129d0f..a96910ce79 100644 --- a/share/store/file/axis_half_test.go +++ b/share/store/file/axis_half_test.go @@ -1,9 +1,11 @@ package file import ( - "github.com/celestiaorg/celestia-node/share/testing/sharetest" - "github.com/stretchr/testify/require" "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share/testing/sharetest" ) func TestExtendAxisHalf(t *testing.T) { diff --git a/share/store/file/q1q4_file.go b/share/store/file/q1q4_file.go index 804b8ffd3d..78252a4706 100644 --- a/share/store/file/q1q4_file.go +++ b/share/store/file/q1q4_file.go @@ -3,9 +3,11 @@ package file import ( "context" "fmt" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/rsmt2d" "io" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" ) var _ EdsFile = (*Q1Q4File)(nil) diff --git a/share/store/file/q1q4_file_test.go b/share/store/file/q1q4_file_test.go index 53aedb7555..f488d4d2b1 100644 --- a/share/store/file/q1q4_file_test.go +++ b/share/store/file/q1q4_file_test.go @@ -1,9 +1,11 @@ package file import ( - "github.com/celestiaorg/rsmt2d" - "github.com/stretchr/testify/require" "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" ) func TestQ1Q4File(t *testing.T) { diff --git a/share/store/file/square.go b/share/store/file/square.go index ab145dfb9a..e084d53af0 100644 --- a/share/store/file/square.go +++ b/share/store/file/square.go @@ -5,9 +5,10 @@ import ( "bytes" "context" "fmt" - "golang.org/x/sync/errgroup" "io" + "golang.org/x/sync/errgroup" + "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/rsmt2d" From 7ea43f8bdd49bea3a4b5938865f87ffff8649904 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 1 Apr 2024 16:52:13 +0400 Subject: [PATCH 127/132] improve locking in store --- share/store/store.go | 33 ++++++++++++++++++++++++--------- share/store/striplock.go | 6 +++--- 2 files changed, 27 insertions(+), 12 deletions(-) diff --git a/share/store/store.go b/share/store/store.go index 2a984d2378..16527cf31e 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -76,19 +76,25 @@ func NewStore(params *Parameters, basePath string) (*Store, error) { return nil, err } - // ensure blocks folder - if err := ensureFolder(basePath + blocksPath); err != nil { - return nil, fmt.Errorf("ensure blocks folder: %w", err) + // Ensure the blocks folder exists or is created. + blocksFolderPath := basePath + blocksPath + if err := ensureFolder(blocksFolderPath); err != nil { + log.Errorf("Failed to ensure the existence of the blocks folder at '%s': %s", blocksFolderPath, err) + return nil, fmt.Errorf("ensure blocks folder '%s': %w", blocksFolderPath, err) } - // ensure heights folder - if err := ensureFolder(basePath + heightsPath); err != nil { - return nil, fmt.Errorf("ensure blocks folder: %w", err) + // Ensure the heights folder exists or is created. + heightsFolderPath := basePath + heightsPath + if err := ensureFolder(heightsFolderPath); err != nil { + log.Errorf("Failed to ensure the existence of the heights folder at '%s': %s", heightsFolderPath, err) + return nil, fmt.Errorf("ensure heights folder '%s': %w", heightsFolderPath, err) } - // ensure empty heights file - if err := ensureFile(basePath + emptyHeightsFile); err != nil { - return nil, fmt.Errorf("ensure empty heights file: %w", err) + // Ensure the empty heights file exists or is created. + emptyHeightsFilePath := basePath + emptyHeightsFile + if err := ensureFile(emptyHeightsFilePath); err != nil { + log.Errorf("Failed to ensure the empty heights file at '%s': %s", emptyHeightsFilePath, err) + return nil, fmt.Errorf("ensure empty heights file '%s': %w", emptyHeightsFilePath, err) } recentBlocksCache, err := cache.NewFileCache("recent", 1) @@ -355,6 +361,12 @@ func (s *Store) remove(height uint64) error { return fmt.Errorf("removing from cache: %w", err) } + // additionally lock by datahash to prevent concurrent access to the same underlying file + // using links from different heights + dlock := s.stripLock.byDatahash(f.DataHash()) + dlock.Lock() + defer dlock.Unlock() + // remove hard link by height heightPath := s.basepath + heightsPath + fmt.Sprintf("%d", height) if err = os.Remove(heightPath); err != nil { @@ -452,6 +464,9 @@ func (s *Store) storeEmptyHeights() error { } defer utils.CloseAndLog(log, "empty heights file", file) + s.emptyHeightsLock.RLock() + defer s.emptyHeightsLock.RUnlock() + encoder := gob.NewEncoder(file) if err := encoder.Encode(s.emptyHeights); err != nil { return fmt.Errorf("encoding empty heights: %w", err) diff --git a/share/store/striplock.go b/share/store/striplock.go index 9d17dc5881..69cee69f2d 100644 --- a/share/store/striplock.go +++ b/share/store/striplock.go @@ -1,7 +1,6 @@ package store import ( - "encoding/binary" "sync" "github.com/celestiaorg/celestia-node/share" @@ -33,8 +32,9 @@ func (l *striplock) byHeight(height uint64) *sync.RWMutex { } func (l *striplock) byDatahash(datahash share.DataHash) *sync.RWMutex { - key := binary.LittleEndian.Uint16(datahash[len(datahash)-3:]) - lkIdx := key % uint16(len(l.datahashes)) + // Use the last 2 bytes of the datahash as hash to distribute the locks + last := uint16(datahash[len(datahash)-1]) | uint16(datahash[len(datahash)-2])<<8 + lkIdx := last % uint16(len(l.datahashes)) return l.datahashes[lkIdx] } From b4b5a034f6bc8916ebdf841940253192927d043f Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 1 Apr 2024 19:10:32 +0400 Subject: [PATCH 128/132] store roots instead of verifier --- share/shwap/data_hasher.go | 8 +++++++- share/shwap/data_hasher_test.go | 2 +- share/shwap/data_id.go | 6 +++--- share/shwap/row_hasher.go | 10 +++++++-- share/shwap/row_hasher_test.go | 2 +- share/shwap/row_id.go | 5 +++-- share/shwap/sample_hasher.go | 10 +++++++-- share/shwap/sample_hasher_test.go | 2 +- share/shwap/sample_id.go | 5 +++-- share/shwap/shwap.go | 34 ++++++------------------------- share/shwap/shwap_test.go | 12 +++++------ 11 files changed, 47 insertions(+), 49 deletions(-) diff --git a/share/shwap/data_hasher.go b/share/shwap/data_hasher.go index 1ad554930c..932172b523 100644 --- a/share/shwap/data_hasher.go +++ b/share/shwap/data_hasher.go @@ -19,7 +19,13 @@ func (h *DataHasher) Write(data []byte) (int, error) { return 0, err } - if err := rootVerifiers.Verify(d); err != nil { + root, err := getRoot(d.DataID) + if err != nil { + err = fmt.Errorf("getting root: %w", err) + return 0, err + } + + if err := d.Verify(root); err != nil { err = fmt.Errorf("verifying Data: %w", err) log.Error(err) return 0, err diff --git a/share/shwap/data_hasher_test.go b/share/shwap/data_hasher_test.go index 8d9c778fe2..9dfb0ce1c6 100644 --- a/share/shwap/data_hasher_test.go +++ b/share/shwap/data_hasher_test.go @@ -24,7 +24,7 @@ func TestDataHasher(t *testing.T) { require.NoError(t, err) data := datas[0] - rootVerifiers.Add(data.DataID, root) + globalRootsCache.Store(data.DataID, root) dat, err := data.MarshalBinary() require.NoError(t, err) diff --git a/share/shwap/data_id.go b/share/shwap/data_id.go index 53369d6b30..742cbbc05b 100644 --- a/share/shwap/data_id.go +++ b/share/shwap/data_id.go @@ -36,8 +36,8 @@ func NewDataID(height uint64, rowIdx uint16, namespace share.Namespace, root *sh DataNamespace: string(namespace), } - rootVerifiers.Add(did, root) - + // Store the root in the cache for verification later + globalRootsCache.Store(did, root) return did, did.Verify(root) } @@ -130,7 +130,7 @@ func (s DataID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Block // Release releases the verifier of the DataID. func (s DataID) Release() { - rootVerifiers.Delete(s) + globalRootsCache.Delete(s) } func (s DataID) appendTo(data []byte) []byte { diff --git a/share/shwap/row_hasher.go b/share/shwap/row_hasher.go index 2f246dfa9a..07ecdcc1d4 100644 --- a/share/shwap/row_hasher.go +++ b/share/shwap/row_hasher.go @@ -19,8 +19,14 @@ func (h *RowHasher) Write(data []byte) (int, error) { return 0, err } - if err := rootVerifiers.Verify(row); err != nil { - err = fmt.Errorf("verifying Row: %w", err) + root, err := getRoot(row.RowID) + if err != nil { + err = fmt.Errorf("getting root: %w", err) + return 0, err + } + + if err := row.Verify(root); err != nil { + err = fmt.Errorf("verifying Data: %w", err) log.Error(err) return 0, err } diff --git a/share/shwap/row_hasher_test.go b/share/shwap/row_hasher_test.go index 1b052a4d6b..cf0f109d10 100644 --- a/share/shwap/row_hasher_test.go +++ b/share/shwap/row_hasher_test.go @@ -23,7 +23,7 @@ func TestRowHasher(t *testing.T) { row, err := NewRowFromEDS(2, 1, square) require.NoError(t, err) - rootVerifiers.Add(row.RowID, root) + globalRootsCache.Store(row.RowID, root) data, err := row.MarshalBinary() require.NoError(t, err) diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index 1acd344fcb..ba500e299f 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -35,8 +35,9 @@ func NewRowID(height uint64, rowIdx uint16, root *share.Root) (RowID, error) { }, RowIndex: rowIdx, } - rootVerifiers.Add(rid, root) + // Store the root in the cache for verification later + globalRootsCache.Store(rid, root) return rid, rid.Verify(root) } @@ -126,7 +127,7 @@ func (rid RowID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.Bloc // Release releases the verifier of the RowID. func (rid RowID) Release() { - rootVerifiers.Delete(rid) + globalRootsCache.Delete(rid) } func (rid RowID) appendTo(data []byte) []byte { diff --git a/share/shwap/sample_hasher.go b/share/shwap/sample_hasher.go index 8ac585d8a7..7d5cdb7f30 100644 --- a/share/shwap/sample_hasher.go +++ b/share/shwap/sample_hasher.go @@ -19,8 +19,14 @@ func (h *SampleHasher) Write(data []byte) (int, error) { return 0, err } - if err := rootVerifiers.Verify(s); err != nil { - err = fmt.Errorf("verifying Sample: %w", err) + root, err := getRoot(s.SampleID) + if err != nil { + err = fmt.Errorf("getting root: %w", err) + return 0, err + } + + if err := s.Verify(root); err != nil { + err = fmt.Errorf("verifying Data: %w", err) log.Error(err) return 0, err } diff --git a/share/shwap/sample_hasher_test.go b/share/shwap/sample_hasher_test.go index 9f616c5e63..f2448507a4 100644 --- a/share/shwap/sample_hasher_test.go +++ b/share/shwap/sample_hasher_test.go @@ -23,7 +23,7 @@ func TestSampleHasher(t *testing.T) { sample, err := NewSampleFromEDS(RowProofType, 10, square, 1) require.NoError(t, err) - rootVerifiers.Add(sample.SampleID, root) + globalRootsCache.Store(sample.SampleID, root) data, err := sample.MarshalBinary() require.NoError(t, err) diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index 44e4684117..80726ba172 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -39,7 +39,8 @@ func NewSampleID(height uint64, smplIdx int, root *share.Root) (SampleID, error) ShareIndex: shrIdx, } - rootVerifiers.Add(sid, root) + // Store the root in the cache for verification later + globalRootsCache.Store(sid, root) return sid, sid.Verify(root) } @@ -121,7 +122,7 @@ func (sid SampleID) BlockFromFile(ctx context.Context, f file.EdsFile) (blocks.B // Release releases the verifier of the SampleID. func (sid SampleID) Release() { - rootVerifiers.Delete(sid) + globalRootsCache.Delete(sid) } func (sid SampleID) appendTo(data []byte) []byte { diff --git a/share/shwap/shwap.go b/share/shwap/shwap.go index 51b494e580..53b80574c6 100644 --- a/share/shwap/shwap.go +++ b/share/shwap/shwap.go @@ -57,27 +57,16 @@ func init() { }) } -var rootVerifiers verifiers +// TODO(@walldiss): store refscount along with roots to avoid verify errors on parallel requests +var globalRootsCache sync.Map -type verifiers struct { - mp sync.Map -} - -func (vs *verifiers) Add(id id, root *share.Root) { - vs.mp.Store(id.key(), root) -} - -func (vs *verifiers) Verify(v verifier) error { - r, ok := vs.mp.LoadAndDelete(v.key()) +func getRoot(key any) (*share.Root, error) { + r, ok := globalRootsCache.Load(key) if !ok { - return fmt.Errorf("no verifier") + return nil, fmt.Errorf("no verifier") } - return v.Verify(r.(*share.Root)) -} - -func (vs *verifiers) Delete(id id) { - vs.mp.Delete(id.key()) + return r.(*share.Root), nil } // DefaultAllowlist keeps default list of hashes allowed in the network. @@ -114,14 +103,3 @@ func validateCID(cid cid.Cid) error { return nil } - -// id represents an interface for objects that can produce a key. -type id interface { - key() any -} - -// verifier represents an interface for verification of data roots. -type verifier interface { - id - Verify(root *share.Root) error -} diff --git a/share/shwap/shwap_test.go b/share/shwap/shwap_test.go index b631d5c520..4610655d96 100644 --- a/share/shwap/shwap_test.go +++ b/share/shwap/shwap_test.go @@ -47,7 +47,7 @@ func TestSampleRoundtripGetBlock(t *testing.T) { smpl, err := NewSampleFromEDS(RowProofType, i, eds, height) // TODO: Col require.NoError(t, err) - rootVerifiers.Add(smpl.SampleID, root) + globalRootsCache.Store(smpl.SampleID, root) cid := smpl.Cid() blkOut, err := client.GetBlock(ctx, cid) @@ -80,7 +80,7 @@ func TestSampleRoundtripGetBlocks(t *testing.T) { smpl, err := NewSampleFromEDS(RowProofType, i, eds, height) // TODO: Col require.NoError(t, err) set.Add(smpl.Cid()) - rootVerifiers.Add(smpl.SampleID, root) + globalRootsCache.Store(smpl.SampleID, root) } blks, err := client.GetBlocks(ctx, set.Keys()) @@ -120,7 +120,7 @@ func TestRowRoundtripGetBlock(t *testing.T) { row, err := NewRowFromEDS(height, i, eds) require.NoError(t, err) - rootVerifiers.Add(row.RowID, root) + globalRootsCache.Store(row.RowID, root) cid := row.Cid() blkOut, err := client.GetBlock(ctx, cid) @@ -152,7 +152,7 @@ func TestRowRoundtripGetBlocks(t *testing.T) { row, err := NewRowFromEDS(height, i, eds) require.NoError(t, err) set.Add(row.Cid()) - rootVerifiers.Add(row.RowID, root) + globalRootsCache.Store(row.RowID, root) } blks, err := client.GetBlocks(ctx, set.Keys()) @@ -190,7 +190,7 @@ func TestDataRoundtripGetBlock(t *testing.T) { require.NoError(t, err) for _, nd := range nds { - rootVerifiers.Add(nd.DataID, root) + globalRootsCache.Store(nd.DataID, root) cid := nd.Cid() blkOut, err := client.GetBlock(ctx, cid) @@ -221,7 +221,7 @@ func TestDataRoundtripGetBlocks(t *testing.T) { set := cid.NewSet() for _, nd := range nds { set.Add(nd.Cid()) - rootVerifiers.Add(nd.DataID, root) + globalRootsCache.Store(nd.DataID, root) } blks, err := client.GetBlocks(ctx, set.Keys()) From 623a3d337b16b7d2dcf8c97554bb4fe4643b2e9c Mon Sep 17 00:00:00 2001 From: Wondertan Date: Wed, 10 Apr 2024 17:51:18 +0200 Subject: [PATCH 129/132] remove unnecessary key methods --- share/shwap/data_id.go | 4 ---- share/shwap/eds_id.go | 16 ++++++++-------- share/shwap/row_id.go | 4 ---- share/shwap/sample_id.go | 4 ---- 4 files changed, 8 insertions(+), 20 deletions(-) diff --git a/share/shwap/data_id.go b/share/shwap/data_id.go index 742cbbc05b..7241cddbc9 100644 --- a/share/shwap/data_id.go +++ b/share/shwap/data_id.go @@ -137,7 +137,3 @@ func (s DataID) appendTo(data []byte) []byte { data = s.RowID.appendTo(data) return append(data, s.DataNamespace...) } - -func (s DataID) key() any { - return s -} diff --git a/share/shwap/eds_id.go b/share/shwap/eds_id.go index 3ac2069247..83cb07ecad 100644 --- a/share/shwap/eds_id.go +++ b/share/shwap/eds_id.go @@ -26,9 +26,9 @@ func NewEdsID(height uint64, root *share.Root) (EdsID, error) { } // MarshalBinary encodes EdsID into binary form. -func (rid EdsID) MarshalBinary() []byte { +func (eid EdsID) MarshalBinary() []byte { data := make([]byte, 0, EdsIDSize) - return rid.appendTo(data) + return eid.appendTo(data) } // EdsIDFromBinary decodes EdsID from binary form. @@ -41,21 +41,21 @@ func EdsIDFromBinary(data []byte) (rid EdsID, err error) { } // Verify verifies EdsID fields. -func (rid EdsID) Verify(root *share.Root) error { +func (eid EdsID) Verify(root *share.Root) error { if root == nil { return fmt.Errorf("nil Root") } - if rid.Height == 0 { + if eid.Height == 0 { return fmt.Errorf("zero Height") } return nil } -func (rid EdsID) GetHeight() uint64 { - return rid.Height +func (eid EdsID) GetHeight() uint64 { + return eid.Height } -func (rid EdsID) appendTo(data []byte) []byte { - return binary.BigEndian.AppendUint64(data, rid.Height) +func (eid EdsID) appendTo(data []byte) []byte { + return binary.BigEndian.AppendUint64(data, eid.Height) } diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go index ba500e299f..79e61b38f7 100644 --- a/share/shwap/row_id.go +++ b/share/shwap/row_id.go @@ -134,7 +134,3 @@ func (rid RowID) appendTo(data []byte) []byte { data = rid.EdsID.appendTo(data) return binary.BigEndian.AppendUint16(data, rid.RowIndex) } - -func (rid RowID) key() any { - return rid -} diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go index 80726ba172..96a6e9ace2 100644 --- a/share/shwap/sample_id.go +++ b/share/shwap/sample_id.go @@ -129,7 +129,3 @@ func (sid SampleID) appendTo(data []byte) []byte { data = sid.RowID.appendTo(data) return binary.BigEndian.AppendUint16(data, sid.ShareIndex) } - -func (sid SampleID) key() any { - return sid -} From a02f31d1d018df28fe56b6abcc65a79d33c24e63 Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 11 Apr 2024 18:24:29 +0400 Subject: [PATCH 130/132] wrap file on get in store --- share/store/blockstore.go | 5 ++--- share/store/store.go | 8 ++++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/share/store/blockstore.go b/share/store/blockstore.go index f5d61a4c22..89e2de979e 100644 --- a/share/store/blockstore.go +++ b/share/store/blockstore.go @@ -171,11 +171,10 @@ func (bs *Blockstore) HashOnRead(bool) { func (bs *Blockstore) openFile(height uint64) cache.OpenFileFn { return func(ctx context.Context) (file.EdsFile, error) { - path := bs.store.basepath + heightsPath + fmt.Sprintf("%d", height) - f, err := file.OpenQ1Q4File(path) + f, err := bs.store.GetByHeight(ctx, height) if err != nil { return nil, fmt.Errorf("opening ODS file: %w", err) } - return wrappedFile(f), nil + return f, nil } } diff --git a/share/store/store.go b/share/store/store.go index 16527cf31e..cb7151f603 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -209,14 +209,14 @@ func (s *Store) getByHash(datahash share.DataHash) (file.EdsFile, error) { } path := s.basepath + blocksPath + datahash.String() - odsFile, err := file.OpenQ1Q4File(path) + f, err := file.OpenQ1Q4File(path) if err != nil { if os.IsNotExist(err) { return nil, ErrNotFound } return nil, fmt.Errorf("opening ODS file: %w", err) } - return odsFile, nil + return wrappedFile(f), nil } func (s *Store) LinkHashToHeight(_ context.Context, datahash share.DataHash, height uint64) error { @@ -274,14 +274,14 @@ func (s *Store) getByHeight(height uint64) (file.EdsFile, error) { } path := s.basepath + heightsPath + fmt.Sprintf("%d", height) - odsFile, err := file.OpenQ1Q4File(path) + f, err = file.OpenQ1Q4File(path) if err != nil { if os.IsNotExist(err) { return nil, ErrNotFound } return nil, fmt.Errorf("opening ODS file: %w", err) } - return odsFile, nil + return wrappedFile(f), nil } func (s *Store) HasByHash(ctx context.Context, datahash share.DataHash) (bool, error) { From 94f1f024970e93a48949f373929d08769d6b67ef Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 15 Apr 2024 13:04:38 +0400 Subject: [PATCH 131/132] refactor file wrapping in store --- share/store/blockstore.go | 4 ++-- share/store/store.go | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/share/store/blockstore.go b/share/store/blockstore.go index 89e2de979e..7368df6ea5 100644 --- a/share/store/blockstore.go +++ b/share/store/blockstore.go @@ -171,10 +171,10 @@ func (bs *Blockstore) HashOnRead(bool) { func (bs *Blockstore) openFile(height uint64) cache.OpenFileFn { return func(ctx context.Context) (file.EdsFile, error) { - f, err := bs.store.GetByHeight(ctx, height) + f, err := bs.store.getByHeight(height) if err != nil { return nil, fmt.Errorf("opening ODS file: %w", err) } - return f, nil + return fileLoader(f)(ctx) } } diff --git a/share/store/store.go b/share/store/store.go index cb7151f603..8262109095 100644 --- a/share/store/store.go +++ b/share/store/store.go @@ -200,7 +200,7 @@ func (s *Store) GetByHash(ctx context.Context, datahash share.DataHash) (file.Ed tNow := time.Now() f, err := s.getByHash(datahash) s.metrics.observeGet(ctx, time.Since(tNow), err != nil) - return f, err + return wrappedFile(f), err } func (s *Store) getByHash(datahash share.DataHash) (file.EdsFile, error) { @@ -216,7 +216,7 @@ func (s *Store) getByHash(datahash share.DataHash) (file.EdsFile, error) { } return nil, fmt.Errorf("opening ODS file: %w", err) } - return wrappedFile(f), nil + return f, nil } func (s *Store) LinkHashToHeight(_ context.Context, datahash share.DataHash, height uint64) error { @@ -260,7 +260,7 @@ func (s *Store) GetByHeight(ctx context.Context, height uint64) (file.EdsFile, e tNow := time.Now() f, err := s.getByHeight(height) s.metrics.observeGet(ctx, time.Since(tNow), err != nil) - return f, err + return wrappedFile(f), err } func (s *Store) getByHeight(height uint64) (file.EdsFile, error) { @@ -281,7 +281,7 @@ func (s *Store) getByHeight(height uint64) (file.EdsFile, error) { } return nil, fmt.Errorf("opening ODS file: %w", err) } - return wrappedFile(f), nil + return f, nil } func (s *Store) HasByHash(ctx context.Context, datahash share.DataHash) (bool, error) { @@ -390,13 +390,13 @@ func (s *Store) remove(height uint64) error { func fileLoader(f file.EdsFile) cache.OpenFileFn { return func(ctx context.Context) (file.EdsFile, error) { - return wrappedFile(f), nil + withCache := file.WithProofsCache(f) + return wrappedFile(withCache), nil } } func wrappedFile(f file.EdsFile) file.EdsFile { - withCache := file.WithProofsCache(f) - closedOnce := file.WithClosedOnce(withCache) + closedOnce := file.WithClosedOnce(f) sanityChecked := file.WithValidation(closedOnce) return sanityChecked } From 7580cdfefd1b4f4a1a9bd62dc85e3f02fe5cb017 Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 15 Apr 2024 13:09:32 +0400 Subject: [PATCH 132/132] add validation conditions to shrex-nd server --- share/p2p/shrexnd/server.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/share/p2p/shrexnd/server.go b/share/p2p/shrexnd/server.go index 7905394f7b..a7553efcf9 100644 --- a/share/p2p/shrexnd/server.go +++ b/share/p2p/shrexnd/server.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/celestiaorg/celestia-node/share/ipld" "time" "github.com/libp2p/go-libp2p/core/host" @@ -187,10 +188,19 @@ func (srv *Server) getNamespaceData(ctx context.Context, } defer utils.CloseAndLog(log, "file", file) + if toRow > file.Size()/2 { + // TODO(@Walldiss): needs refactoring for better handling + return nil, pb.StatusCode_NOT_FOUND, fmt.Errorf("toRow: (%d) exceeds file ods size: (%d)", toRow, file.Size()) + } + namespacedRows := make(share.NamespacedShares, 0, toRow-fromRow+1) for rowIdx := fromRow; rowIdx < toRow; rowIdx++ { data, err := file.Data(ctx, namespace, rowIdx) if err != nil { + if errors.Is(err, ipld.ErrNamespaceOutsideRange) { + // TODO(@Walldiss): needs refactoring for better handling + return nil, pb.StatusCode_NOT_FOUND, fmt.Errorf("namespace outside range: %w for row %d", err, rowIdx) + } return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving data: %w", err) } namespacedRows = append(namespacedRows, data) @@ -254,5 +264,8 @@ func (srv *Server) observeStatus(ctx context.Context, status pb.StatusCode) { // validateRequest checks correctness of the request func validateRequest(req pb.GetSharesByNamespaceRequest) error { + if req.ToRow < req.FromRow { + return fmt.Errorf("invalid request: ToRow must be greater than FromRow") + } return share.Namespace(req.Namespace).ValidateForData() }