diff --git a/.mockery.yaml b/.mockery.yaml index 8b13bd649..3f81f53cc 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -41,9 +41,23 @@ packages: github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller: interfaces: RPCClient: + WorkerGroup: + filtersI: + config: + inpackage: True + dir: "pkg/solana/logpoller" + filename: mock_filters.go + mockname: mockFilters + logsLoader: + config: + inpackage: True + dir: "pkg/solana/logpoller" + filename: mock_logs_loader.go + mockname: mockLogsLoader ORM: config: inpackage: True dir: "pkg/solana/logpoller" filename: mock_orm.go - mockname: mockORM + mockname: MockORM + diff --git a/integration-tests/smoke/event_loader_test.go b/integration-tests/smoke/log_poller_test.go similarity index 80% rename from integration-tests/smoke/event_loader_test.go rename to integration-tests/smoke/log_poller_test.go index a209d3843..7732733ec 100644 --- a/integration-tests/smoke/event_loader_test.go +++ b/integration-tests/smoke/log_poller_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "crypto/sha256" + "database/sql" "encoding/base64" "fmt" "os" @@ -17,6 +18,7 @@ import ( "github.com/gagliardetto/solana-go/rpc" "github.com/gagliardetto/solana-go/rpc/ws" "github.com/gagliardetto/solana-go/text" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" @@ -63,15 +65,18 @@ func TestEventLoader(t *testing.T) { totalLogsToSend := 30 parser := &printParser{t: t} sender := newLogSender(t, rpcClient, wsClient) - collector := logpoller.NewEncodedLogCollector( - cl, - parser, - logger.Nop(), - ) + orm := logpoller.NewMockORM(t) // TODO: replace with real DB, when available + programPubKey, err := solana.PublicKeyFromBase58(programPubKey) + require.NoError(t, err) + orm.EXPECT().SelectFilters(mock.Anything).Return([]logpoller.Filter{{ID: 1, IsBackfilled: false, Address: logpoller.PublicKey(programPubKey)}}, nil).Once() + orm.EXPECT().MarkFilterBackfilled(mock.Anything, mock.Anything).Return(nil).Once() + orm.EXPECT().GetLatestBlock(mock.Anything).Return(0, sql.ErrNoRows) + orm.EXPECT().SelectSeqNums(mock.Anything).Return(map[int64]int64{1: 0}, nil).Once() + lp := logpoller.NewWithCustomProcessor(logger.TestSugared(t), orm, cl, parser.ProcessBlocks) - require.NoError(t, collector.Start(ctx)) + require.NoError(t, lp.Start(ctx)) t.Cleanup(func() { - require.NoError(t, collector.Close()) + require.NoError(t, lp.Close()) }) go func(ctx context.Context, sender *logSender, privateKey *solana.PrivateKey) { @@ -145,26 +150,39 @@ type printParser struct { values []uint64 } -func (p *printParser) Process(evt logpoller.ProgramEvent) error { - p.t.Helper() - - data, err := base64.StdEncoding.DecodeString(evt.Data) - if err != nil { - return err +func (p *printParser) ProcessBlocks(ctx context.Context, blocks []logpoller.Block) error { + for _, b := range blocks { + err := p.process(b) + if err != nil { + return err + } } + return nil +} + +func (p *printParser) process(block logpoller.Block) error { + p.t.Helper() + sum := sha256.Sum256([]byte("event:TestEvent")) sig := sum[:8] - if bytes.Equal(sig, data[:8]) { - var event testEvent - if err := bin.UnmarshalBorsh(&event, data[8:]); err != nil { - return nil + for _, evt := range block.Events { + data, err := base64.StdEncoding.DecodeString(evt.Data) + if err != nil { + return err } - p.mu.Lock() - p.values = append(p.values, event.U64Value) - p.mu.Unlock() + if bytes.Equal(sig, data[:8]) { + var event testEvent + if err := bin.UnmarshalBorsh(&event, data[8:]); err != nil { + return nil + } + + p.mu.Lock() + p.values = append(p.values, event.U64Value) + p.mu.Unlock() + } } return nil diff --git a/pkg/solana/client/client.go b/pkg/solana/client/client.go index cdd4646ac..1e3a2cc6c 100644 --- a/pkg/solana/client/client.go +++ b/pkg/solana/client/client.go @@ -17,6 +17,10 @@ import ( "github.com/smartcontractkit/chainlink-solana/pkg/solana/monitor" ) +// MaxSupportTransactionVersion defines max transaction version to return in responses. +// If the requested block contains a transaction with a higher version, an error will be returned. +const MaxSupportTransactionVersion = uint64(0) // (legacy + v0) + const ( DevnetGenesisHash = "EtWTRABZaYq6iMfeYKouRu166VU2xqa1wcaWoxPkrZBG" TestnetGenesisHash = "4uhcVJyU9pJkvQyS88uRDiswHXSCkY3zQawwpjk2NsNY" @@ -44,6 +48,7 @@ type Reader interface { GetBlockWithOpts(context.Context, uint64, *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) GetBlock(ctx context.Context, slot uint64) (*rpc.GetBlockResult, error) GetSignaturesForAddressWithOpts(ctx context.Context, addr solana.PublicKey, opts *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) + SlotHeightWithCommitment(ctx context.Context, commitment rpc.CommitmentType) (uint64, error) } // AccountReader is an interface that allows users to pass either the solana rpc client or the relay client @@ -372,12 +377,11 @@ func (c *Client) GetBlock(ctx context.Context, slot uint64) (*rpc.GetBlockResult defer done() ctx, cancel := context.WithTimeout(ctx, c.txTimeout) defer cancel() - // Adding slot to the key so concurrent calls to GetBlock for different slots are not merged. Without including the slot, // it would treat all GetBlock calls as identical and merge them, returning whichever block it fetched first to all callers. key := fmt.Sprintf("GetBlockWithOpts(%d)", slot) v, err, _ := c.requestGroup.Do(key, func() (interface{}, error) { - version := uint64(0) // pull all tx types (legacy + v0) + version := MaxSupportTransactionVersion return c.rpc.GetBlockWithOpts(ctx, slot, &rpc.GetBlockOpts{ Commitment: c.commitment, MaxSupportedTransactionVersion: &version, diff --git a/pkg/solana/client/mocks/reader_writer.go b/pkg/solana/client/mocks/reader_writer.go index 0bcead04b..1c76423fb 100644 --- a/pkg/solana/client/mocks/reader_writer.go +++ b/pkg/solana/client/mocks/reader_writer.go @@ -1080,6 +1080,63 @@ func (_c *ReaderWriter_SlotHeight_Call) RunAndReturn(run func(context.Context) ( return _c } +// SlotHeightWithCommitment provides a mock function with given fields: ctx, commitment +func (_m *ReaderWriter) SlotHeightWithCommitment(ctx context.Context, commitment rpc.CommitmentType) (uint64, error) { + ret := _m.Called(ctx, commitment) + + if len(ret) == 0 { + panic("no return value specified for SlotHeightWithCommitment") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, rpc.CommitmentType) (uint64, error)); ok { + return rf(ctx, commitment) + } + if rf, ok := ret.Get(0).(func(context.Context, rpc.CommitmentType) uint64); ok { + r0 = rf(ctx, commitment) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, rpc.CommitmentType) error); ok { + r1 = rf(ctx, commitment) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_SlotHeightWithCommitment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SlotHeightWithCommitment' +type ReaderWriter_SlotHeightWithCommitment_Call struct { + *mock.Call +} + +// SlotHeightWithCommitment is a helper method to define mock.On call +// - ctx context.Context +// - commitment rpc.CommitmentType +func (_e *ReaderWriter_Expecter) SlotHeightWithCommitment(ctx interface{}, commitment interface{}) *ReaderWriter_SlotHeightWithCommitment_Call { + return &ReaderWriter_SlotHeightWithCommitment_Call{Call: _e.mock.On("SlotHeightWithCommitment", ctx, commitment)} +} + +func (_c *ReaderWriter_SlotHeightWithCommitment_Call) Run(run func(ctx context.Context, commitment rpc.CommitmentType)) *ReaderWriter_SlotHeightWithCommitment_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(rpc.CommitmentType)) + }) + return _c +} + +func (_c *ReaderWriter_SlotHeightWithCommitment_Call) Return(_a0 uint64, _a1 error) *ReaderWriter_SlotHeightWithCommitment_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_SlotHeightWithCommitment_Call) RunAndReturn(run func(context.Context, rpc.CommitmentType) (uint64, error)) *ReaderWriter_SlotHeightWithCommitment_Call { + _c.Call.Return(run) + return _c +} + // NewReaderWriter creates a new instance of ReaderWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewReaderWriter(t interface { diff --git a/pkg/solana/client/multi_client.go b/pkg/solana/client/multi_client.go index 46282775b..d5c1eaf72 100644 --- a/pkg/solana/client/multi_client.go +++ b/pkg/solana/client/multi_client.go @@ -184,3 +184,12 @@ func (m *MultiClient) GetBlockWithOpts(ctx context.Context, slot uint64, opts *r return r.GetBlockWithOpts(ctx, slot, opts) } + +func (m *MultiClient) SlotHeightWithCommitment(ctx context.Context, commitment rpc.CommitmentType) (uint64, error) { + r, err := m.getClient() + if err != nil { + return 0, err + } + + return r.SlotHeightWithCommitment(ctx, commitment) +} diff --git a/pkg/solana/logpoller/blocks_sorter.go b/pkg/solana/logpoller/blocks_sorter.go new file mode 100644 index 000000000..d3dfdb1e3 --- /dev/null +++ b/pkg/solana/logpoller/blocks_sorter.go @@ -0,0 +1,135 @@ +package logpoller + +import ( + "container/list" + "context" + "sync" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" +) + +const blocksChBuffer = 16 + +type blocksSorter struct { + // service state management + services.Service + engine *services.Engine + lggr logger.Logger + + inBlocks <-chan Block + receivedNewBlock chan struct{} + + outBlocks chan Block + + mu sync.Mutex + queue *list.List + readyBlocks map[uint64]Block +} + +// newBlocksSorter - returns new instance of blocksSorter that writes blocks into output channel in order defined by expectedBlocks. +func newBlocksSorter(inBlocks <-chan Block, lggr logger.Logger, expectedBlocks []uint64) (*blocksSorter, <-chan Block) { + op := &blocksSorter{ + queue: list.New(), + readyBlocks: make(map[uint64]Block), + inBlocks: inBlocks, + outBlocks: make(chan Block, blocksChBuffer), + receivedNewBlock: make(chan struct{}, 1), + lggr: lggr, + } + + for _, b := range expectedBlocks { + op.queue.PushBack(b) + } + + op.Service, op.engine = services.Config{ + Name: "blocksSorter", + Start: op.start, + Close: nil, + }.NewServiceEngine(lggr) + + return op, op.outBlocks +} + +func (p *blocksSorter) start(_ context.Context) error { + p.engine.Go(p.writeOrderedBlocks) + p.engine.Go(p.readBlocks) + return nil +} + +func (p *blocksSorter) readBlocks(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case block, ok := <-p.inBlocks: + if !ok { + close(p.receivedNewBlock) // trigger last flush of ready blocks + return + } + + p.mu.Lock() + p.readyBlocks[block.SlotNumber] = block + p.mu.Unlock() + // try leaving a msg that new block is ready + select { + case p.receivedNewBlock <- struct{}{}: + default: + } + } + } +} + +func (p *blocksSorter) writeOrderedBlocks(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case _, ok := <-p.receivedNewBlock: + p.flushReadyBlocks(ctx) + if !ok { + p.mu.Lock() + // signal to consumer that work is done, when it's actually done + if p.queue.Len() == 0 { + close(p.outBlocks) + } + p.mu.Unlock() + return + } + } + } +} + +func (p *blocksSorter) readNextReadyBlock() *Block { + p.mu.Lock() + defer p.mu.Unlock() + element := p.queue.Front() + if element == nil { + return nil + } + + slotNumber := element.Value.(uint64) + block, ok := p.readyBlocks[slotNumber] + if !ok { + return nil + } + + p.queue.Remove(element) + return &block +} + +// flushReadyBlocks - sends all blocks in order defined by queue to the consumer. +func (p *blocksSorter) flushReadyBlocks(ctx context.Context) { + for { + block := p.readNextReadyBlock() + if block == nil { + return + } + + select { + case p.outBlocks <- *block: + case <-ctx.Done(): + return + } + } +} diff --git a/pkg/solana/logpoller/blocks_sorter_test.go b/pkg/solana/logpoller/blocks_sorter_test.go new file mode 100644 index 000000000..9e3cb862a --- /dev/null +++ b/pkg/solana/logpoller/blocks_sorter_test.go @@ -0,0 +1,61 @@ +package logpoller + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" +) + +func TestBlocksSorter(t *testing.T) { + t.Parallel() + t.Run("Properly closes even if there is still work to do", func(t *testing.T) { + ctx := tests.Context(t) + sorter, ch := newBlocksSorter(make(chan Block), logger.Test(t), []uint64{1, 2}) + require.NoError(t, sorter.Start(ctx)) + require.NoError(t, sorter.Close()) + select { + case <-ch: + require.Fail(t, "expected channel to remain open as not all work was done") + default: + } + }) + t.Run("Writes blocks in specified order defined by expectedBlocks", func(t *testing.T) { + ctx := tests.Context(t) + inCh := make(chan Block) + expectedBlocks := []uint64{1, 2, 10, 3} + sorter, ch := newBlocksSorter(inCh, logger.Test(t), expectedBlocks) + require.NoError(t, sorter.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, sorter.Close()) + }) + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + for _, b := range []uint64{2, 10, 1, 3} { + inCh <- Block{SlotNumber: b} + } + close(inCh) + }() + for _, b := range expectedBlocks { + select { + case block, ok := <-ch: + require.True(t, ok) + require.Equal(t, b, block.SlotNumber) + case <-ctx.Done(): + require.Fail(t, "expected to receive all blocks, before timeout") + } + } + + select { + case _, ok := <-ch: + require.False(t, ok) + case <-ctx.Done(): + require.Fail(t, "expected channel to be closed") + } + }) +} diff --git a/pkg/solana/logpoller/filters.go b/pkg/solana/logpoller/filters.go index 990d83432..dc1d52252 100644 --- a/pkg/solana/logpoller/filters.go +++ b/pkg/solana/logpoller/filters.go @@ -13,6 +13,7 @@ import ( "sync/atomic" "github.com/gagliardetto/solana-go" + "github.com/smartcontractkit/chainlink-common/pkg/codec/encodings/binary" "github.com/smartcontractkit/chainlink-common/pkg/logger" @@ -239,6 +240,29 @@ func (fl *filters) removeFilterFromIndexes(filter Filter) { } } +func (fl *filters) GetDistinctAddresses(ctx context.Context) ([]PublicKey, error) { + err := fl.LoadFilters(ctx) + if err != nil { + return nil, fmt.Errorf("failed to load filters: %w", err) + } + + fl.filtersMutex.RLock() + defer fl.filtersMutex.RUnlock() + + var result []PublicKey + set := map[PublicKey]struct{}{} + for _, filter := range fl.filtersByID { + if _, ok := set[filter.Address]; ok { + continue + } + + set[filter.Address] = struct{}{} + result = append(result, filter.Address) + } + + return result, nil +} + // MatchingFilters - returns iterator to go through all matching filters. // Requires LoadFilters to be called at least once. func (fl *filters) matchingFilters(addr PublicKey, eventSignature EventSignature) iter.Seq[Filter] { @@ -365,6 +389,9 @@ func (fl *filters) LoadFilters(ctx context.Context) error { fl.lggr.Debugw("Loading filters from db") fl.filtersMutex.Lock() defer fl.filtersMutex.Unlock() + if fl.loadedFilters.Load() { + return nil + } // reset filters' indexes to ensure we do not have partial data from the previous run fl.filtersByID = make(map[int64]*Filter) fl.filtersByName = make(map[string]int64) diff --git a/pkg/solana/logpoller/filters_test.go b/pkg/solana/logpoller/filters_test.go index b83b71385..086b911e4 100644 --- a/pkg/solana/logpoller/filters_test.go +++ b/pkg/solana/logpoller/filters_test.go @@ -18,7 +18,7 @@ import ( ) func TestFilters_LoadFilters(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(logger.Sugared(logger.Test(t)), orm) ctx := tests.Context(t) orm.On("SelectFilters", mock.Anything).Return(nil, errors.New("db failed")).Once() @@ -73,13 +73,13 @@ func TestFilters_LoadFilters(t *testing.T) { func TestFilters_RegisterFilter(t *testing.T) { lggr := logger.Sugared(logger.Test(t)) t.Run("Returns an error if name is empty", func(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(lggr, orm) err := fs.RegisterFilter(tests.Context(t), Filter{}) require.EqualError(t, err, "name is required") }) t.Run("Returns an error if fails to load filters from db", func(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(lggr, orm) orm.On("SelectFilters", mock.Anything).Return(nil, errors.New("db failed")).Once() err := fs.RegisterFilter(tests.Context(t), Filter{Name: "Filter"}) @@ -113,7 +113,7 @@ func TestFilters_RegisterFilter(t *testing.T) { } for _, tc := range testCases { t.Run(fmt.Sprintf("Updating %s", tc.Name), func(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(lggr, orm) const filterName = "Filter" dbFilter := Filter{Name: filterName} @@ -127,7 +127,7 @@ func TestFilters_RegisterFilter(t *testing.T) { } }) t.Run("Happy path", func(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(lggr, orm) const filterName = "Filter" orm.On("SelectFilters", mock.Anything).Return(nil, nil).Once() @@ -155,7 +155,7 @@ func TestFilters_RegisterFilter(t *testing.T) { require.Equal(t, filter, storedFilters[0]) }) t.Run("Can reregister after unregister", func(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(lggr, orm) const filterName = "Filter" orm.On("SelectFilters", mock.Anything).Return(nil, nil).Once() @@ -180,14 +180,14 @@ func TestFilters_RegisterFilter(t *testing.T) { func TestFilters_UnregisterFilter(t *testing.T) { lggr := logger.Sugared(logger.Test(t)) t.Run("Returns an error if fails to load filters from db", func(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(lggr, orm) orm.On("SelectFilters", mock.Anything).Return(nil, errors.New("db failed")).Once() err := fs.UnregisterFilter(tests.Context(t), "Filter") require.EqualError(t, err, "failed to load filters: failed to select filters from db: db failed") }) t.Run("Noop if filter is not present", func(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(lggr, orm) const filterName = "Filter" orm.On("SelectFilters", mock.Anything).Return(nil, nil).Once() @@ -196,7 +196,7 @@ func TestFilters_UnregisterFilter(t *testing.T) { require.NoError(t, err) }) t.Run("Returns error if fails to mark filter as deleted", func(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(lggr, orm) const filterName = "Filter" const id int64 = 10 @@ -207,7 +207,7 @@ func TestFilters_UnregisterFilter(t *testing.T) { require.EqualError(t, err, "failed to mark filter deleted: db query failed") }) t.Run("Happy path", func(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(lggr, orm) const filterName = "Filter" const id int64 = 10 @@ -226,7 +226,7 @@ func TestFilters_UnregisterFilter(t *testing.T) { func TestFilters_PruneFilters(t *testing.T) { lggr := logger.Sugared(logger.Test(t)) t.Run("Happy path", func(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(lggr, orm) toDelete := Filter{ ID: 1, @@ -249,7 +249,7 @@ func TestFilters_PruneFilters(t *testing.T) { require.Len(t, fs.filtersToDelete, 0) }) t.Run("If DB removal fails will add filters back into removal slice ", func(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) fs := newFilters(lggr, orm) toDelete := Filter{ ID: 1, @@ -283,8 +283,8 @@ func TestFilters_PruneFilters(t *testing.T) { }) } -func TestFilters_matchingFilters(t *testing.T) { - orm := newMockORM(t) +func TestFilters_MatchingFilters(t *testing.T) { + orm := NewMockORM(t) lggr := logger.Sugared(logger.Test(t)) expectedFilter1 := Filter{ ID: 1, @@ -332,7 +332,7 @@ func TestFilters_matchingFilters(t *testing.T) { } func TestFilters_GetFiltersToBackfill(t *testing.T) { - orm := newMockORM(t) + orm := NewMockORM(t) lggr := logger.Sugared(logger.Test(t)) backfilledFilter := Filter{ ID: 1, @@ -384,10 +384,10 @@ func TestFilters_GetFiltersToBackfill(t *testing.T) { require.NoError(t, filters.RegisterFilter(tests.Context(t), notBackfilled)) ensureInQueue(notBackfilled) // new filter is always added to the queue - newFilter := Filter{Name: "new filter", ID: 3} - orm.EXPECT().InsertFilter(mock.Anything, newFilter).Return(newFilter.ID, nil).Once() + newFilter := Filter{Name: "new filter"} + orm.EXPECT().InsertFilter(mock.Anything, newFilter).Return(3, nil).Once() require.NoError(t, filters.RegisterFilter(tests.Context(t), newFilter)) - ensureInQueue(notBackfilled, newFilter) + ensureInQueue(notBackfilled, Filter{ID: 3, Name: "new filter"}) } func TestExtractField(t *testing.T) { diff --git a/pkg/solana/logpoller/job.go b/pkg/solana/logpoller/job.go deleted file mode 100644 index 448d800e4..000000000 --- a/pkg/solana/logpoller/job.go +++ /dev/null @@ -1,166 +0,0 @@ -package logpoller - -import ( - "context" - "fmt" - "time" - - "github.com/gagliardetto/solana-go" - "github.com/gagliardetto/solana-go/rpc" -) - -// Job is a function that should be run by the worker group. The context provided -// allows the Job to cancel if the worker group is closed. All other life-cycle -// management should be wrapped within the Job. -type Job interface { - String() string - Run(context.Context) error -} - -type retryableJob struct { - name string - count uint8 - when time.Time - job Job -} - -func (j retryableJob) String() string { - return j.job.String() -} - -func (j retryableJob) Run(ctx context.Context) error { - return j.job.Run(ctx) -} - -type eventDetail struct { - slotNumber uint64 - blockHeight uint64 - blockHash solana.Hash - blockTime solana.UnixTimeSeconds - trxIdx int - trxSig solana.Signature -} - -// processEventJob is a job that processes a single event. The parser should be a pure function -// such that no network requests are made and no side effects are produced. -type processEventJob struct { - parser ProgramEventProcessor - event ProgramEvent -} - -func (j *processEventJob) String() string { - return "processEventJob" -} - -func (j *processEventJob) Run(_ context.Context) error { - return j.parser.Process(j.event) -} - -type wrappedParser interface { - ProgramEventProcessor - ExpectBlock(uint64) - ExpectTxs(uint64, int) -} - -// getTransactionsFromBlockJob is a job that fetches transaction signatures from a block and loads -// the job queue with getTransactionLogsJobs for each transaction found in the block. -type getTransactionsFromBlockJob struct { - slotNumber uint64 - client RPCClient - parser wrappedParser - chJobs chan Job -} - -func (j *getTransactionsFromBlockJob) String() string { - return fmt.Sprintf("getTransactionsFromBlockJob for block: %d", j.slotNumber) -} - -func (j *getTransactionsFromBlockJob) Run(ctx context.Context) error { - var excludeRewards bool - - block, err := j.client.GetBlockWithOpts( - ctx, - j.slotNumber, - &rpc.GetBlockOpts{ - Encoding: solana.EncodingBase64, - Commitment: rpc.CommitmentFinalized, - // get the full transaction details - TransactionDetails: rpc.TransactionDetailsFull, - // exclude rewards - Rewards: &excludeRewards, - }, - ) - if err != nil { - return err - } - - blockSigsOnly, err := j.client.GetBlockWithOpts( - ctx, - j.slotNumber, - &rpc.GetBlockOpts{ - Encoding: solana.EncodingBase64, - Commitment: rpc.CommitmentFinalized, - // get the signatures only - TransactionDetails: rpc.TransactionDetailsSignatures, - // exclude rewards - Rewards: &excludeRewards, - }, - ) - if err != nil { - return err - } - - detail := eventDetail{ - slotNumber: j.slotNumber, - blockHash: block.Blockhash, - } - - if block.BlockHeight == nil { - return fmt.Errorf("block at slot %d returned from rpc is missing block number", j.slotNumber) - } - detail.blockHeight = *block.BlockHeight - - if block.BlockTime == nil { - return fmt.Errorf("received block %d from rpc with missing block time", block.BlockHeight) - } - detail.blockTime = *block.BlockTime - - if len(block.Transactions) != len(blockSigsOnly.Signatures) { - return fmt.Errorf("block %d has %d transactions but %d signatures", j.slotNumber, len(block.Transactions), len(blockSigsOnly.Signatures)) - } - - j.parser.ExpectTxs(j.slotNumber, len(block.Transactions)) - - for idx, trx := range block.Transactions { - detail.trxIdx = idx - if len(blockSigsOnly.Signatures)-1 <= idx { - detail.trxSig = blockSigsOnly.Signatures[idx] - } - - messagesToEvents(trx.Meta.LogMessages, j.parser, detail, j.chJobs) - } - - return nil -} - -func messagesToEvents(messages []string, parser ProgramEventProcessor, detail eventDetail, chJobs chan Job) { - var logIdx uint - for _, outputs := range parseProgramLogs(messages) { - for _, event := range outputs.Events { - event.SlotNumber = detail.slotNumber - event.BlockHeight = detail.blockHeight - event.BlockHash = detail.blockHash - event.BlockTime = detail.blockTime - event.TransactionHash = detail.trxSig - event.TransactionIndex = detail.trxIdx - event.TransactionLogIndex = logIdx - - logIdx++ - - chJobs <- &processEventJob{ - parser: parser, - event: event, - } - } - } -} diff --git a/pkg/solana/logpoller/job_get_block.go b/pkg/solana/logpoller/job_get_block.go new file mode 100644 index 000000000..7cf62ba98 --- /dev/null +++ b/pkg/solana/logpoller/job_get_block.go @@ -0,0 +1,152 @@ +package logpoller + +import ( + "context" + "errors" + "fmt" + + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" +) + +// getBlockJob is a job that fetches a block with transactions, converts logs into ProgramEvents and writes them into blocks channel +type getBlockJob struct { + slotNumber uint64 + client RPCClient + blocks chan Block + done chan struct{} + parseProgramLogs func(logs []string) []ProgramOutput + lggr logger.SugaredLogger +} + +func newGetBlockJob(client RPCClient, blocks chan Block, lggr logger.SugaredLogger, slotNumber uint64) *getBlockJob { + return &getBlockJob{ + client: client, + blocks: blocks, + slotNumber: slotNumber, + done: make(chan struct{}), + parseProgramLogs: parseProgramLogs, + lggr: lggr, + } +} + +func (j *getBlockJob) String() string { + return fmt.Sprintf("getBlock for slotNumber: %d", j.slotNumber) +} + +func (j *getBlockJob) Done() <-chan struct{} { + return j.done +} + +func (j *getBlockJob) Run(ctx context.Context) error { + var excludeRewards bool + version := client.MaxSupportTransactionVersion + block, err := j.client.GetBlockWithOpts( + ctx, + j.slotNumber, + // NOTE: any change to the filtering arguments may affect calculation of txIndex, which could lead to events duplication. + &rpc.GetBlockOpts{ + Encoding: solana.EncodingBase64, + Commitment: rpc.CommitmentFinalized, + // get the full transaction details + TransactionDetails: rpc.TransactionDetailsFull, + MaxSupportedTransactionVersion: &version, + // exclude rewards + Rewards: &excludeRewards, + }, + ) + if err != nil { + return err + } + + detail := eventDetail{ + slotNumber: j.slotNumber, + blockHash: block.Blockhash, + } + + if block.BlockHeight == nil { + return fmt.Errorf("block at slot %d returned from rpc is missing block number", j.slotNumber) + } + detail.blockHeight = *block.BlockHeight + + if block.BlockTime == nil { + return fmt.Errorf("block at slot %d returned from rpc is missing block time", j.slotNumber) + } + detail.blockTime = *block.BlockTime + + events := make([]ProgramEvent, 0, len(block.Transactions)) + for idx, txWithMeta := range block.Transactions { + detail.trxIdx = idx + if txWithMeta.Transaction == nil { + return fmt.Errorf("failed to parse transaction %d in slot %d: %w", idx, j.slotNumber, errors.New("missing transaction field")) + } + tx, err := txWithMeta.GetTransaction() + if err != nil { + return fmt.Errorf("failed to parse transaction %d in slot %d: %w", idx, j.slotNumber, err) + } + if len(tx.Signatures) == 0 { + return fmt.Errorf("expected all transactions to have at least one signature %d in slot %d", idx, j.slotNumber) + } + if txWithMeta.Meta == nil { + return fmt.Errorf("expected transaction to have meta. signature: %s; slot: %d; idx: %d", tx.Signatures[0], j.slotNumber, idx) + } + if txWithMeta.Meta.Err != nil { + j.lggr.Debugw("Skipping all events of failed transaction", "err", txWithMeta.Meta.Err, "signature", tx.Signatures[0]) + continue + } + detail.trxSig = tx.Signatures[0] // according to Solana docs fist signature is used as ID + + txEvents := j.messagesToEvents(txWithMeta.Meta.LogMessages, detail) + events = append(events, txEvents...) + } + + result := Block{ + SlotNumber: j.slotNumber, + BlockHash: block.Blockhash, + Events: events, + } + select { + case <-ctx.Done(): + return ctx.Err() + case j.blocks <- result: + close(j.done) + } + + return nil +} + +func (j *getBlockJob) messagesToEvents(messages []string, detail eventDetail) []ProgramEvent { + var logIdx uint + events := make([]ProgramEvent, 0, len(messages)) + for _, outputs := range j.parseProgramLogs(messages) { + for i, event := range outputs.Events { + event.SlotNumber = detail.slotNumber + event.BlockHeight = detail.blockHeight + event.BlockHash = detail.blockHash + event.BlockTime = detail.blockTime + event.TransactionHash = detail.trxSig + event.TransactionIndex = detail.trxIdx + event.TransactionLogIndex = logIdx + + logIdx++ + outputs.Events[i] = event + } + + events = append(events, outputs.Events...) + } + + return events +} + +type eventDetail struct { + slotNumber uint64 + blockHeight uint64 + blockHash solana.Hash + blockTime solana.UnixTimeSeconds + trxIdx int + trxSig solana.Signature +} diff --git a/pkg/solana/logpoller/job_get_block_test.go b/pkg/solana/logpoller/job_get_block_test.go new file mode 100644 index 000000000..078746f13 --- /dev/null +++ b/pkg/solana/logpoller/job_get_block_test.go @@ -0,0 +1,204 @@ +package logpoller + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller/mocks" +) + +func TestGetBlockJob(t *testing.T) { + const slotNumber = uint64(42) + t.Run("String contains slot number", func(t *testing.T) { + lggr := logger.Sugared(logger.Test(t)) + job := newGetBlockJob(nil, nil, lggr, slotNumber) + require.Equal(t, "getBlock for slotNumber: 42", job.String()) + }) + t.Run("Error if fails to get block", func(t *testing.T) { + client := mocks.NewRPCClient(t) + lggr := logger.Sugared(logger.Test(t)) + expectedError := errors.New("rpc failed") + client.EXPECT().GetBlockWithOpts(mock.Anything, slotNumber, mock.Anything).Return(nil, expectedError).Once() + job := newGetBlockJob(client, make(chan Block), lggr, slotNumber) + err := job.Run(tests.Context(t)) + require.ErrorIs(t, err, expectedError) + }) + t.Run("Error if block height is not present", func(t *testing.T) { + client := mocks.NewRPCClient(t) + lggr := logger.Sugared(logger.Test(t)) + block := rpc.GetBlockResult{} + client.EXPECT().GetBlockWithOpts(mock.Anything, slotNumber, mock.Anything).Return(&block, nil).Once() + job := newGetBlockJob(client, make(chan Block), lggr, slotNumber) + err := job.Run(tests.Context(t)) + require.ErrorContains(t, err, "block at slot 42 returned from rpc is missing block number") + }) + t.Run("Error if block time is not present", func(t *testing.T) { + client := mocks.NewRPCClient(t) + lggr := logger.Sugared(logger.Test(t)) + + block := rpc.GetBlockResult{BlockHeight: ptr(uint64(10))} + client.EXPECT().GetBlockWithOpts(mock.Anything, slotNumber, mock.Anything).Return(&block, nil).Once() + job := newGetBlockJob(client, make(chan Block), lggr, slotNumber) + err := job.Run(tests.Context(t)) + require.ErrorContains(t, err, "block at slot 42 returned from rpc is missing block time") + }) + t.Run("Error if transaction field is not present", func(t *testing.T) { + client := mocks.NewRPCClient(t) + lggr := logger.Sugared(logger.Test(t)) + block := rpc.GetBlockResult{BlockHeight: ptr(uint64(10)), BlockTime: ptr(solana.UnixTimeSeconds(10)), Transactions: []rpc.TransactionWithMeta{{Transaction: nil}}} + client.EXPECT().GetBlockWithOpts(mock.Anything, slotNumber, mock.Anything).Return(&block, nil).Once() + job := newGetBlockJob(client, make(chan Block), lggr, slotNumber) + err := job.Run(tests.Context(t)) + require.ErrorContains(t, err, "failed to parse transaction 0 in slot 42: missing transaction field") + }) + t.Run("Error if fails to get transaction", func(t *testing.T) { + client := mocks.NewRPCClient(t) + lggr := logger.Sugared(logger.Test(t)) + block := rpc.GetBlockResult{BlockHeight: ptr(uint64(10)), BlockTime: ptr(solana.UnixTimeSeconds(10)), Transactions: []rpc.TransactionWithMeta{{Transaction: rpc.DataBytesOrJSONFromBytes([]byte("{"))}}} + client.EXPECT().GetBlockWithOpts(mock.Anything, slotNumber, mock.Anything).Return(&block, nil).Once() + job := newGetBlockJob(client, make(chan Block), lggr, slotNumber) + err := job.Run(tests.Context(t)) + require.ErrorContains(t, err, "failed to parse transaction 0 in slot 42") + }) + t.Run("Error if Tx has no signatures", func(t *testing.T) { + client := mocks.NewRPCClient(t) + lggr := logger.Sugared(logger.Test(t)) + tx := solana.Transaction{} + txB, err := tx.MarshalBinary() + require.NoError(t, err) + block := rpc.GetBlockResult{BlockHeight: ptr(uint64(10)), BlockTime: ptr(solana.UnixTimeSeconds(10)), Transactions: []rpc.TransactionWithMeta{{Transaction: rpc.DataBytesOrJSONFromBytes(txB)}}} + client.EXPECT().GetBlockWithOpts(mock.Anything, slotNumber, mock.Anything).Return(&block, nil).Once() + job := newGetBlockJob(client, make(chan Block), lggr, slotNumber) + err = job.Run(tests.Context(t)) + require.ErrorContains(t, err, "expected all transactions to have at least one signature 0 in slot 42") + }) + t.Run("Error if Tx has no Meta", func(t *testing.T) { + client := mocks.NewRPCClient(t) + lggr := logger.Sugared(logger.Test(t)) + tx := solana.Transaction{Signatures: []solana.Signature{{1, 2, 3}}} + txB, err := tx.MarshalBinary() + require.NoError(t, err) + block := rpc.GetBlockResult{BlockHeight: ptr(uint64(10)), BlockTime: ptr(solana.UnixTimeSeconds(10)), Transactions: []rpc.TransactionWithMeta{{Transaction: rpc.DataBytesOrJSONFromBytes(txB)}}} + client.EXPECT().GetBlockWithOpts(mock.Anything, slotNumber, mock.Anything).Return(&block, nil).Once() + job := newGetBlockJob(client, make(chan Block), lggr, slotNumber) + err = job.Run(tests.Context(t)) + require.ErrorContains(t, err, "expected transaction to have meta. signature: 2AnZxg8HN2sGa7GC7iWGDgpXbEasqXQNEumCjvHUFDcBnfRKAdaN3SvKLhbQwheN15xDkL5D5mdX21A5gH1MdYB; slot: 42; idx: 0") + }) + t.Run("Can abort even if no one waits for result", func(t *testing.T) { + client := mocks.NewRPCClient(t) + lggr := logger.Sugared(logger.Test(t)) + tx := solana.Transaction{Signatures: make([]solana.Signature, 1)} + txB, err := tx.MarshalBinary() + require.NoError(t, err) + block := rpc.GetBlockResult{BlockHeight: ptr(uint64(10)), BlockTime: ptr(solana.UnixTimeSeconds(10)), Transactions: []rpc.TransactionWithMeta{{Transaction: rpc.DataBytesOrJSONFromBytes(txB), Meta: &rpc.TransactionMeta{}}}} + client.EXPECT().GetBlockWithOpts(mock.Anything, slotNumber, mock.Anything).Return(&block, nil).Once() + job := newGetBlockJob(client, make(chan Block), lggr, slotNumber) + ctx, cancel := context.WithCancel(tests.Context(t)) + cancel() + err = job.Run(ctx) + require.ErrorIs(t, err, context.Canceled) + select { + case <-job.Done(): + require.Fail(t, "expected done channel to be open as job was aborted") + default: + } + }) + t.Run("Happy path", func(t *testing.T) { + client := mocks.NewRPCClient(t) + lggr := logger.Sugared(logger.Test(t)) + tx1Signature := solana.Signature{4, 5, 6} + tx2Signature := solana.Signature{7, 8, 9} + txSigToDataBytes := func(sig solana.Signature) *rpc.DataBytesOrJSON { + tx := solana.Transaction{Signatures: []solana.Signature{sig}} + binary, err := tx.MarshalBinary() + require.NoError(t, err) + return rpc.DataBytesOrJSONFromBytes(binary) + } + txWithMeta1 := rpc.TransactionWithMeta{Transaction: txSigToDataBytes(tx1Signature), Meta: &rpc.TransactionMeta{LogMessages: []string{"log1", "log2"}}} + txWithMeta2 := rpc.TransactionWithMeta{Transaction: txSigToDataBytes(tx2Signature), Meta: &rpc.TransactionMeta{LogMessages: []string{"log3"}}} + // tx3 must be ignored due to error + txWithMeta3 := rpc.TransactionWithMeta{Transaction: txSigToDataBytes(solana.Signature{10, 11}), Meta: &rpc.TransactionMeta{LogMessages: []string{"log4"}, Err: fmt.Errorf("some error")}} + height := uint64(41) + blockTime := solana.UnixTimeSeconds(128) + block := rpc.GetBlockResult{BlockHeight: &height, BlockTime: ptr(blockTime), Blockhash: solana.Hash{1, 2, 3}, Transactions: []rpc.TransactionWithMeta{txWithMeta1, txWithMeta2, txWithMeta3}} + client.EXPECT().GetBlockWithOpts(mock.Anything, slotNumber, mock.Anything).Return(&block, nil).Once() + job := newGetBlockJob(client, make(chan Block, 1), lggr, slotNumber) + job.parseProgramLogs = func(logs []string) []ProgramOutput { + result := ProgramOutput{ + Program: "myProgram", + } + for _, l := range logs { + result.Events = append(result.Events, ProgramEvent{Data: l, Program: "myProgram"}) + } + return []ProgramOutput{result} + } + err := job.Run(tests.Context(t)) + require.NoError(t, err) + result := <-job.blocks + require.Equal(t, Block{ + SlotNumber: slotNumber, + BlockHash: block.Blockhash, + Events: []ProgramEvent{ + { + BlockData: BlockData{ + SlotNumber: slotNumber, + BlockHeight: height, + BlockHash: block.Blockhash, + TransactionHash: tx1Signature, + TransactionLogIndex: 0, + TransactionIndex: 0, + BlockTime: blockTime, + }, + Program: "myProgram", + Data: "log1", + }, + { + BlockData: BlockData{ + SlotNumber: slotNumber, + BlockHeight: height, + BlockHash: block.Blockhash, + TransactionHash: tx1Signature, + TransactionLogIndex: 1, + TransactionIndex: 0, + BlockTime: blockTime, + }, + Program: "myProgram", + Data: "log2", + }, + { + BlockData: BlockData{ + SlotNumber: slotNumber, + BlockHeight: height, + BlockHash: block.Blockhash, + TransactionHash: tx2Signature, + TransactionLogIndex: 0, + TransactionIndex: 1, + BlockTime: blockTime, + }, + Program: "myProgram", + Data: "log3", + }, + }, + }, result) + select { + case <-job.Done(): + default: + t.Fatal("expected job to be done") + } + }) +} + +func ptr[T any](v T) *T { + return &v +} diff --git a/pkg/solana/logpoller/job_get_slots_for_addr.go b/pkg/solana/logpoller/job_get_slots_for_addr.go new file mode 100644 index 000000000..d7518a058 --- /dev/null +++ b/pkg/solana/logpoller/job_get_slots_for_addr.go @@ -0,0 +1,115 @@ +package logpoller + +import ( + "context" + "fmt" + + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller/worker" +) + +var _ worker.Job = (*getSlotsForAddressJob)(nil) + +// getSlotsForAddressJob - identifies slots that contain transactions for specified address in range [from, to] and +// calls storeSlot for each. If single request was not sufficient to identify all slots - schedules a new job. Channel +// returned by Done() will be closed only when all jobs are done. +type getSlotsForAddressJob struct { + address PublicKey + beforeSig solana.Signature + from, to uint64 + + client RPCClient + + storeSlot func(slot uint64) + done chan struct{} + workers WorkerGroup +} + +func newGetSlotsForAddress(client RPCClient, workers WorkerGroup, storeSlot func(uint64), address PublicKey, from, to uint64) *getSlotsForAddressJob { + return &getSlotsForAddressJob{ + address: address, + client: client, + from: from, + to: to, + storeSlot: storeSlot, + workers: workers, + done: make(chan struct{}), + } +} + +func (f *getSlotsForAddressJob) String() string { + return fmt.Sprintf("getSlotsForAddress: %s, from: %d, to: %d, beforeSig: %s", f.address, f.from, f.to, f.beforeSig) +} + +func (f *getSlotsForAddressJob) Done() <-chan struct{} { + return f.done +} + +func (f *getSlotsForAddressJob) Run(ctx context.Context) error { + isDone, err := f.run(ctx) + if err != nil { + return err + } + + if isDone { + close(f.done) + } + return nil +} + +// run - returns true, nil - if job was fully done, and we have not created a child job +func (f *getSlotsForAddressJob) run(ctx context.Context) (bool, error) { + opts := rpc.GetSignaturesForAddressOpts{ + Commitment: rpc.CommitmentFinalized, + MinContextSlot: &f.to, // MinContextSlot is not filter. It defines min slot that RPC is expected to observe to handle the request + } + + if !f.beforeSig.IsZero() { + opts.Before = f.beforeSig + } + + sigs, err := f.client.GetSignaturesForAddressWithOpts(ctx, f.address.ToSolana(), &opts) + if err != nil { + return false, fmt.Errorf("failed getting signatures for address: %w", err) + } + + // NOTE: there is no reliable way for us to verify that RPC has sufficient history depth. Instead of + // doing additional requests in attempt to verify it, we prefer to just trust RPC and hope that sufficient + // number of nodes in DON were able to fetch required logs + if len(sigs) == 0 { + return true, nil + } + + // signatures ordered from newest to oldest, defined in the Solana RPC docs + for _, sig := range sigs { + // RPC may return slots that are higher than requested. Skip them to simplify mental model. + if sig.Slot > f.to { + continue + } + + if sig.Slot < f.from { + return true, nil + } + + // no need to fetch slot, if transaction failed + if sig.Err == nil { + f.storeSlot(sig.Slot) + } + } + + oldestSig := sigs[len(sigs)-1] + // to ensure we do not overload RPC perform next call as a separate job + err = f.workers.Do(ctx, &getSlotsForAddressJob{ + address: f.address, + beforeSig: oldestSig.Signature, + from: f.from, + to: oldestSig.Slot, + client: f.client, + storeSlot: f.storeSlot, + done: f.done, + workers: f.workers, + }) + return false, err +} diff --git a/pkg/solana/logpoller/job_get_slots_for_addr_test.go b/pkg/solana/logpoller/job_get_slots_for_addr_test.go new file mode 100644 index 000000000..48c66060f --- /dev/null +++ b/pkg/solana/logpoller/job_get_slots_for_addr_test.go @@ -0,0 +1,121 @@ +package logpoller + +import ( + "context" + "errors" + "testing" + + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller/mocks" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller/worker" +) + +func TestGetSlotsForAddressJob(t *testing.T) { + sig, err := solana.SignatureFromBase58("4VJEi7D9ia2R4L6xgPE7bKTtNAtJ2KGHTtq1VEztEMtpcevGPzGpyvnm6EgkMCPhSQTAQ9XwdyqVYzqbf35zJyF") + require.NoError(t, err) + rawAddr, err := solana.PublicKeyFromBase58("Cv4T27XbjVoKUYwP72NQQanvZeA7W4YF9L4EnYT9kx5o") + require.NoError(t, err) + address := PublicKey(rawAddr) + const from = uint64(10) + const to = uint64(20) + t.Run("String representation contains all details", func(t *testing.T) { + job := &getSlotsForAddressJob{address: address, from: from, to: to, beforeSig: sig} + require.Equal(t, "getSlotsForAddress: Cv4T27XbjVoKUYwP72NQQanvZeA7W4YF9L4EnYT9kx5o, from: 10, to: 20, beforeSig: 4VJEi7D9ia2R4L6xgPE7bKTtNAtJ2KGHTtq1VEztEMtpcevGPzGpyvnm6EgkMCPhSQTAQ9XwdyqVYzqbf35zJyF", job.String()) + }) + t.Run("Returns error if RPC request failed", func(t *testing.T) { + client := mocks.NewRPCClient(t) + expectedError := errors.New("rpc error") + client.EXPECT().GetSignaturesForAddressWithOpts(mock.Anything, mock.Anything, mock.Anything).RunAndReturn( + func(ctx context.Context, key solana.PublicKey, opts *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { + require.Equal(t, address.String(), key.String()) + require.NotNil(t, opts) + require.True(t, opts.Before.IsZero()) + require.NotNil(t, opts.MinContextSlot) + require.Equal(t, to, *opts.MinContextSlot) + return nil, expectedError + }).Once() + job := newGetSlotsForAddress(client, nil, nil, address, from, to) + err := job.Run(tests.Context(t)) + require.ErrorIs(t, err, expectedError) + }) + requireJobIsDone := func(t *testing.T, done <-chan struct{}, msg string) { + select { + case <-done: + default: + require.Fail(t, msg) + } + } + t.Run("Completes successfully if there is no signatures", func(t *testing.T) { + client := mocks.NewRPCClient(t) + client.EXPECT().GetSignaturesForAddressWithOpts(mock.Anything, mock.Anything, mock.Anything).Return([]*rpc.TransactionSignature{}, nil).Once() + job := newGetSlotsForAddress(client, nil, nil, address, from, to) + err := job.Run(tests.Context(t)) + require.NoError(t, err) + requireJobIsDone(t, job.Done(), "expected job to be done") + }) + t.Run("Stores slots only if they are in range", func(t *testing.T) { + client := mocks.NewRPCClient(t) + var signatures []*rpc.TransactionSignature + for _, slot := range []uint64{21, 20, 11, 10, 9} { + if slot == 20 { + // must be skipped due to error + signatures = append(signatures, &rpc.TransactionSignature{Slot: 19, Err: errors.New("transaction failed")}) + } + if slot == 10 { + // add errored transaction before a valid into the last slot within range to ensure that we won't skip that slot + signatures = append(signatures, &rpc.TransactionSignature{Slot: 10, Err: errors.New("transaction failed")}) + } + signatures = append(signatures, &rpc.TransactionSignature{Slot: slot}) + } + client.EXPECT().GetSignaturesForAddressWithOpts(mock.Anything, mock.Anything, mock.Anything).Return(signatures, nil).Once() + var actualSlots []uint64 + job := newGetSlotsForAddress(client, nil, func(s uint64) { + actualSlots = append(actualSlots, s) + }, address, from, to) + err := job.Run(tests.Context(t)) + require.NoError(t, err) + requireJobIsDone(t, job.Done(), "expected job to be done") + require.Equal(t, []uint64{20, 11, 10}, actualSlots) + }) + t.Run("If slot range may have more signatures, schedules a new job", func(t *testing.T) { + client := mocks.NewRPCClient(t) + signatures := []*rpc.TransactionSignature{{Slot: 19, Signature: sig}} + client.EXPECT().GetSignaturesForAddressWithOpts(mock.Anything, mock.Anything, mock.Anything).Return(signatures, nil).Once() + workers := mocks.NewWorkerGroup(t) + var secondJob *getSlotsForAddressJob + workers.EXPECT().Do(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, rawJob worker.Job) error { + job, ok := rawJob.(*getSlotsForAddressJob) + require.True(t, ok) + require.Equal(t, from, job.from) + require.Equal(t, uint64(19), job.to) + require.Equal(t, address, job.address) + require.Equal(t, sig, job.beforeSig) + secondJob = job + return nil + }) + var actualSlots []uint64 + firstJob := newGetSlotsForAddress(client, workers, func(s uint64) { + actualSlots = append(actualSlots, s) + }, address, from, to) + err := firstJob.Run(tests.Context(t)) + require.NoError(t, err) + select { + case <-firstJob.Done(): + require.FailNow(t, "expected job to schedule second job and not to be done") + default: + } + require.NotNil(t, secondJob) + client.EXPECT().GetSignaturesForAddressWithOpts(mock.Anything, mock.Anything, mock.Anything).Return([]*rpc.TransactionSignature{{Slot: 18}, {Slot: 9}}, nil).Once() + err = secondJob.Run(tests.Context(t)) + require.NoError(t, err) + requireJobIsDone(t, firstJob.Done(), "expected fist job to be done") + requireJobIsDone(t, secondJob.Done(), "expected second job to be done") + require.Equal(t, []uint64{19, 18}, actualSlots) + }) +} diff --git a/pkg/solana/logpoller/loader.go b/pkg/solana/logpoller/loader.go index 39a985a98..ddd940602 100644 --- a/pkg/solana/logpoller/loader.go +++ b/pkg/solana/logpoller/loader.go @@ -1,477 +1,163 @@ package logpoller import ( - "container/list" "context" - "errors" "fmt" - "slices" + "sort" "sync" - "sync/atomic" - "time" "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller/worker" ) +type Block struct { + SlotNumber uint64 + BlockHash solana.Hash + Events []ProgramEvent +} + type ProgramEventProcessor interface { // Process should take a ProgramEvent and parseProgramLogs it based on log signature // and expected encoding. Only return errors that cannot be handled and // should exit further transaction processing on the running thread. // // Process should be thread safe. - Process(ProgramEvent) error + Process(Block) error } type RPCClient interface { - LatestBlockhash(ctx context.Context) (out *rpc.GetLatestBlockhashResult, err error) - GetBlocks(ctx context.Context, startSlot uint64, endSlot *uint64) (out rpc.BlocksResult, err error) GetBlockWithOpts(context.Context, uint64, *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) GetSignaturesForAddressWithOpts(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) + SlotHeightWithCommitment(ctx context.Context, commitment rpc.CommitmentType) (uint64, error) } -const ( - DefaultNextSlotPollingInterval = 1_000 * time.Millisecond -) - +type WorkerGroup interface { + Do(ctx context.Context, job worker.Job) error +} type EncodedLogCollector struct { // service state management services.Service engine *services.Engine // dependencies and configuration - client RPCClient - ordered *orderedParser - unordered *unorderedParser - lggr logger.Logger - rpcTimeLimit time.Duration - - // internal state - chSlot chan uint64 - chBlock chan uint64 - chJobs chan Job - workers *WorkerGroup + client RPCClient + lggr logger.SugaredLogger - highestSlot atomic.Uint64 - highestSlotLoaded atomic.Uint64 - lastSentSlot atomic.Uint64 + workers *worker.Group } func NewEncodedLogCollector( client RPCClient, - parser ProgramEventProcessor, - lggr logger.Logger, + lggr logger.SugaredLogger, ) *EncodedLogCollector { c := &EncodedLogCollector{ - client: client, - unordered: newUnorderedParser(parser), - chSlot: make(chan uint64), - chBlock: make(chan uint64, 1), - chJobs: make(chan Job, 1), - lggr: lggr, - rpcTimeLimit: 1 * time.Second, + client: client, + lggr: lggr, } c.Service, c.engine = services.Config{ Name: "EncodedLogCollector", NewSubServices: func(lggr logger.Logger) []services.Service { - c.workers = NewWorkerGroup(DefaultWorkerCount, lggr) - c.ordered = newOrderedParser(parser, lggr) + c.workers = worker.NewGroup(worker.DefaultWorkerCount, logger.Sugared(lggr)) - return []services.Service{c.workers, c.ordered} + return []services.Service{c.workers} }, - Start: c.start, - Close: c.close, }.NewServiceEngine(lggr) return c } -func (c *EncodedLogCollector) BackfillForAddress(ctx context.Context, address string, fromSlot uint64) error { - pubKey, err := solana.PublicKeyFromBase58(address) - if err != nil { - return err +func (c *EncodedLogCollector) getSlotsToFetch(ctx context.Context, addresses []PublicKey, fromSlot, toSlot uint64) ([]uint64, error) { + // identify slots to fetch + slotsForAddressJobs := make([]*getSlotsForAddressJob, len(addresses)) + slotsToFetch := make(map[uint64]struct{}, toSlot-fromSlot) + var slotsToFetchMu sync.Mutex + storeSlot := func(slot uint64) { + slotsToFetchMu.Lock() + slotsToFetch[slot] = struct{}{} + slotsToFetchMu.Unlock() } - - var ( - lowestSlotRead uint64 - lowestSlotSig solana.Signature - ) - - for lowestSlotRead > fromSlot || lowestSlotRead == 0 { - opts := rpc.GetSignaturesForAddressOpts{ - Commitment: rpc.CommitmentFinalized, - MinContextSlot: &fromSlot, - } - - if lowestSlotRead > 0 { - opts.Before = lowestSlotSig - } - - sigs, err := c.client.GetSignaturesForAddressWithOpts(ctx, pubKey, &opts) + for i, address := range addresses { + slotsForAddressJobs[i] = newGetSlotsForAddress(c.client, c.workers, storeSlot, address, fromSlot, toSlot) + err := c.workers.Do(ctx, slotsForAddressJobs[i]) if err != nil { - return err - } - - if len(sigs) == 0 { - break - } - - // signatures ordered from newest to oldest, defined in the Solana RPC docs - for _, sig := range sigs { - lowestSlotSig = sig.Signature - - if sig.Slot >= lowestSlotRead && lowestSlotRead != 0 { - continue - } - - lowestSlotRead = sig.Slot - - if err := c.workers.Do(ctx, &getTransactionsFromBlockJob{ - slotNumber: sig.Slot, - client: c.client, - parser: c.unordered, - chJobs: c.chJobs, - }); err != nil { - return err - } + return nil, fmt.Errorf("could not shedule job to fetch slots for address: %w", err) } } - return nil -} - -func (c *EncodedLogCollector) start(_ context.Context) error { - c.engine.Go(c.runSlotPolling) - c.engine.Go(c.runSlotProcessing) - c.engine.Go(c.runBlockProcessing) - c.engine.Go(c.runJobProcessing) - - return nil -} - -func (c *EncodedLogCollector) close() error { - return nil -} - -func (c *EncodedLogCollector) runSlotPolling(ctx context.Context) { - for { - timer := time.NewTimer(DefaultNextSlotPollingInterval) - + for _, job := range slotsForAddressJobs { select { case <-ctx.Done(): - timer.Stop() - - return - case <-timer.C: - ctxB, cancel := context.WithTimeout(ctx, c.rpcTimeLimit) - - // not to be run as a job, but as a blocking call - result, err := c.client.LatestBlockhash(ctxB) - if err != nil { - c.lggr.Error("failed to get latest blockhash", "err", err) - cancel() - - continue - } - - cancel() - - // if the slot is not higher than the highest slot, skip it - if c.lastSentSlot.Load() >= result.Context.Slot { - continue - } - - c.lastSentSlot.Store(result.Context.Slot) - - select { - case c.chSlot <- result.Context.Slot: - default: - } + return nil, ctx.Err() + case <-job.Done(): } - - timer.Stop() } -} -func (c *EncodedLogCollector) runSlotProcessing(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case slot := <-c.chSlot: - if c.highestSlot.Load() >= slot { - continue - } - - from := c.highestSlot.Load() + 1 - if c.highestSlot.Load() == 0 { - from = slot - } - - c.highestSlot.Store(slot) - - // load blocks in slot range - c.loadRange(ctx, from, slot) - } + // it should be safe to access slotsToFetch without lock as all the jobs signalled that they are done + result := make([]uint64, 0, len(slotsToFetch)) + for slot := range slotsToFetch { + result = append(result, slot) } -} -func (c *EncodedLogCollector) runBlockProcessing(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case slot := <-c.chBlock: - if err := c.workers.Do(ctx, &getTransactionsFromBlockJob{ - slotNumber: slot, - client: c.client, - parser: c.ordered, - chJobs: c.chJobs, - }); err != nil { - c.lggr.Errorf("failed to add job to queue: %s", err) - } - } - } + sort.Slice(result, func(i, j int) bool { return result[i] < result[j] }) + return result, nil } -func (c *EncodedLogCollector) runJobProcessing(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case job := <-c.chJobs: - if err := c.workers.Do(ctx, job); err != nil { - c.lggr.Errorf("failed to add job to queue: %s", err) - } +func (c *EncodedLogCollector) scheduleBlocksFetching(ctx context.Context, slots []uint64) (<-chan Block, error) { + blocks := make(chan Block) + getBlockJobs := make([]*getBlockJob, len(slots)) + for i, slot := range slots { + getBlockJobs[i] = newGetBlockJob(c.client, blocks, c.lggr, slot) + err := c.workers.Do(ctx, getBlockJobs[i]) + if err != nil { + return nil, fmt.Errorf("could not schedule job to fetch blocks for slot: %w", err) } } -} - -func (c *EncodedLogCollector) loadRange(ctx context.Context, start, end uint64) { - if err := c.loadSlotBlocksRange(ctx, start, end); err != nil { - // a retry will happen anyway on the next round of slots - // so the error is handled by doing nothing - c.lggr.Errorw("failed to load slot blocks range", "start", start, "end", end, "err", err) - - return - } - - c.highestSlotLoaded.Store(end) -} - -func (c *EncodedLogCollector) loadSlotBlocksRange(ctx context.Context, start, end uint64) error { - if start >= end { - return errors.New("the start block must come before the end block") - } - - var ( - result rpc.BlocksResult - err error - ) - - rpcCtx, cancel := context.WithTimeout(ctx, c.rpcTimeLimit) - defer cancel() - - if result, err = c.client.GetBlocks(rpcCtx, start, &end); err != nil { - return err - } - // as a safety mechanism, order the blocks ascending (oldest to newest) in the extreme case - // that the RPC changes and results get jumbled. - slices.SortFunc(result, func(a, b uint64) int { - if a < b { - return -1 - } else if a > b { - return 1 + c.engine.Go(func(ctx context.Context) { + for _, job := range getBlockJobs { + select { + case <-ctx.Done(): + return + case <-job.Done(): + continue + } } - - return 0 + close(blocks) }) - for _, block := range result { - c.ordered.ExpectBlock(block) - - select { - case <-ctx.Done(): - return nil - case c.chBlock <- block: - } - } - - return nil -} - -type unorderedParser struct { - parser ProgramEventProcessor -} - -func newUnorderedParser(parser ProgramEventProcessor) *unorderedParser { - return &unorderedParser{parser: parser} -} - -func (p *unorderedParser) ExpectBlock(_ uint64) {} -func (p *unorderedParser) ExpectTxs(_ uint64, _ int) {} -func (p *unorderedParser) Process(evt ProgramEvent) error { - return p.parser.Process(evt) + return blocks, nil } -type orderedParser struct { - // service state management - services.Service - engine *services.Engine - - // internal state - parser ProgramEventProcessor - mu sync.Mutex - blocks *list.List - expect map[uint64]int - actual map[uint64][]ProgramEvent -} - -func newOrderedParser(parser ProgramEventProcessor, lggr logger.Logger) *orderedParser { - op := &orderedParser{ - parser: parser, - blocks: list.New(), - expect: make(map[uint64]int), - actual: make(map[uint64][]ProgramEvent), - } - - op.Service, op.engine = services.Config{ - Name: "OrderedParser", - Start: op.start, - Close: op.close, - }.NewServiceEngine(lggr) - - return op -} - -// ExpectBlock should be called in block order to preserve block progression. -func (p *orderedParser) ExpectBlock(block uint64) { - p.mu.Lock() - defer p.mu.Unlock() - - p.blocks.PushBack(block) -} - -func (p *orderedParser) ExpectTxs(block uint64, quantity int) { - p.mu.Lock() - defer p.mu.Unlock() - - p.expect[block] = quantity - p.actual[block] = make([]ProgramEvent, 0, quantity) -} - -func (p *orderedParser) Process(event ProgramEvent) error { - p.mu.Lock() - defer p.mu.Unlock() - - if err := p.addToExpectations(event); err != nil { - // TODO: log error because this is an unrecoverable error - return nil - } - - return p.sendReadySlots() -} - -func (p *orderedParser) start(_ context.Context) error { - p.engine.GoTick(services.NewTicker(time.Second), p.run) - - return nil -} - -func (p *orderedParser) close() error { - return nil -} - -func (p *orderedParser) addToExpectations(evt ProgramEvent) error { - _, ok := p.expect[evt.SlotNumber] - if !ok { - return fmt.Errorf("%w: %d", errExpectationsNotSet, evt.SlotNumber) - } - - evts, ok := p.actual[evt.SlotNumber] - if !ok { - return fmt.Errorf("%w: %d", errExpectationsNotSet, evt.SlotNumber) +func (c *EncodedLogCollector) BackfillForAddresses(ctx context.Context, addresses []PublicKey, fromSlot, toSlot uint64) (orderedBlocks <-chan Block, cleanUp func(), err error) { + slotsToFetch, err := c.getSlotsToFetch(ctx, addresses, fromSlot, toSlot) + if err != nil { + return nil, func() {}, fmt.Errorf("failed to identify slots to fetch: %w", err) } - p.actual[evt.SlotNumber] = append(evts, evt) + c.lggr.Debugw("Got all slots that need fetching for backfill operations", "addresses", PublicKeysToString(addresses), "fromSlot", fromSlot, "toSlot", toSlot, "slotsToFetch", slotsToFetch) - return nil -} - -func (p *orderedParser) expectations(block uint64) (int, bool, error) { - expectations, ok := p.expect[block] - if !ok { - return 0, false, fmt.Errorf("%w: %d", errExpectationsNotSet, block) + unorderedBlocks, err := c.scheduleBlocksFetching(ctx, slotsToFetch) + if err != nil { + return nil, func() {}, fmt.Errorf("failed to schedule blocks to fetch: %w", err) } - - evts, ok := p.actual[block] - if !ok { - return 0, false, fmt.Errorf("%w: %d", errExpectationsNotSet, block) + blocksSorter, sortedBlocks := newBlocksSorter(unorderedBlocks, c.lggr, slotsToFetch) + err = blocksSorter.Start(ctx) + if err != nil { + return nil, func() {}, fmt.Errorf("failed to start blocks sorter: %w", err) } - return expectations, expectations == len(evts), nil -} - -func (p *orderedParser) clearExpectations(block uint64) { - delete(p.expect, block) - delete(p.actual, block) -} - -func (p *orderedParser) run(_ context.Context) { - p.mu.Lock() - defer p.mu.Unlock() - - _ = p.sendReadySlots() -} - -func (p *orderedParser) sendReadySlots() error { - // start at the lowest block and find ready blocks - for element := p.blocks.Front(); element != nil; element = p.blocks.Front() { - block := element.Value.(uint64) - // if no expectations are set, we are still waiting on information for the block. - // if expectations set and not met, we are still waiting on information for the block - // no other block data should be sent until this is resolved - exp, met, err := p.expectations(block) - if err != nil || !met { - break - } - - // if expectations are 0 -> remove and continue - if exp == 0 { - p.clearExpectations(block) - p.blocks.Remove(element) - - continue - } - - evts, ok := p.actual[block] - if !ok { - return errInvalidState - } - - var errs error - for _, evt := range evts { - errs = errors.Join(errs, p.parser.Process(evt)) - } - - // need possible retry - if errs != nil { - return errs + cleanUp = func() { + err := blocksSorter.Close() + if err != nil { + blocksSorter.lggr.Errorw("Failed to close blocks sorter", "err", err) } - - p.blocks.Remove(element) - p.clearExpectations(block) } - return nil + return sortedBlocks, cleanUp, nil } - -var ( - errExpectationsNotSet = errors.New("expectations not set") - errInvalidState = errors.New("invalid state") -) diff --git a/pkg/solana/logpoller/loader_test.go b/pkg/solana/logpoller/loader_test.go index 9eb2482bd..f7257554b 100644 --- a/pkg/solana/logpoller/loader_test.go +++ b/pkg/solana/logpoller/loader_test.go @@ -3,14 +3,13 @@ package logpoller_test import ( "context" "crypto/rand" - "sync" + "slices" "sync/atomic" "testing" "time" "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -31,93 +30,13 @@ var ( } ) -func TestEncodedLogCollector_StartClose(t *testing.T) { - t.Parallel() - - client := new(mocks.RPCClient) - ctx := tests.Context(t) - - collector := logpoller.NewEncodedLogCollector(client, nil, logger.Nop()) - - assert.NoError(t, collector.Start(ctx)) - assert.NoError(t, collector.Close()) -} - -func TestEncodedLogCollector_ParseSingleEvent(t *testing.T) { - t.Parallel() - - client := new(mocks.RPCClient) - parser := new(testParser) - ctx := tests.Context(t) - - collector := logpoller.NewEncodedLogCollector(client, parser, logger.Nop()) - - require.NoError(t, collector.Start(ctx)) - t.Cleanup(func() { - require.NoError(t, collector.Close()) - }) - - var latest atomic.Uint64 - - latest.Store(uint64(40)) - - client.EXPECT(). - LatestBlockhash(mock.Anything). - RunAndReturn(latestBlockhashReturnFunc(&latest)) - - client.EXPECT(). - GetBlocks( - mock.Anything, - mock.MatchedBy(getBlocksStartValMatcher), - mock.MatchedBy(getBlocksEndValMatcher(&latest)), - ). - RunAndReturn(getBlocksReturnFunc(false)) - - client.EXPECT(). - GetBlockWithOpts(mock.Anything, mock.Anything, mock.Anything). - RunAndReturn(func(_ context.Context, slot uint64, _ *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) { - height := slot - 1 - timeStamp := solana.UnixTimeSeconds(time.Now().Unix()) - - result := rpc.GetBlockResult{ - Transactions: []rpc.TransactionWithMeta{}, - Signatures: []solana.Signature{}, - BlockHeight: &height, - BlockTime: &timeStamp, - } - - _, _ = rand.Read(result.Blockhash[:]) - - if slot == 42 { - var sig solana.Signature - _, _ = rand.Read(sig[:]) - - result.Signatures = []solana.Signature{sig} - result.Transactions = []rpc.TransactionWithMeta{ - { - Meta: &rpc.TransactionMeta{ - LogMessages: messages, - }, - }, - } - } - - return &result, nil - }) - - tests.AssertEventually(t, func() bool { - return parser.Called() - }) -} - func TestEncodedLogCollector_MultipleEventOrdered(t *testing.T) { t.Parallel() - client := new(mocks.RPCClient) - parser := new(testParser) + client := mocks.NewRPCClient(t) ctx := tests.Context(t) - collector := logpoller.NewEncodedLogCollector(client, parser, logger.Nop()) + collector := logpoller.NewEncodedLogCollector(client, logger.TestSugared(t)) require.NoError(t, collector.Start(ctx)) t.Cleanup(func() { @@ -128,7 +47,24 @@ func TestEncodedLogCollector_MultipleEventOrdered(t *testing.T) { latest.Store(uint64(40)) + address, err := solana.PublicKeyFromBase58("J1zQwrBNBngz26jRPNWsUSZMHJwBwpkoDitXRV95LdK4") + require.NoError(t, err) slots := []uint64{44, 43, 42, 41} + var txSigsResponse []*rpc.TransactionSignature + for _, slot := range slots { + txSigsResponse = append(txSigsResponse, &rpc.TransactionSignature{Slot: slot}) + } + client.EXPECT().GetSignaturesForAddressWithOpts(mock.Anything, mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, key solana.PublicKey, opts *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { + switch *opts.MinContextSlot { + case 44: + return txSigsResponse, nil + case 41: + return nil, nil + default: + panic("unexpected call") + } + }).Twice() + sigs := make([]solana.Signature, len(slots)) hashes := make([]solana.Hash, len(slots)) scrambler := &slotUnsync{ch: make(chan struct{})} @@ -140,28 +76,12 @@ func TestEncodedLogCollector_MultipleEventOrdered(t *testing.T) { _, _ = rand.Read(hashes[idx][:]) } - client.EXPECT(). - LatestBlockhash(mock.Anything). - RunAndReturn(latestBlockhashReturnFunc(&latest)) - - client.EXPECT(). - GetBlocks( - mock.Anything, - mock.MatchedBy(getBlocksStartValMatcher), - mock.MatchedBy(getBlocksEndValMatcher(&latest)), - ). - RunAndReturn(getBlocksReturnFunc(false)) - client.EXPECT(). GetBlockWithOpts(mock.Anything, mock.Anything, mock.Anything). RunAndReturn(func(_ context.Context, slot uint64, _ *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) { - slotIdx := -1 - for idx, slt := range slots { - if slt == slot { - slotIdx = idx - - break - } + slotIdx := slices.Index(slots, slot) + if slotIdx == -1 { + require.Fail(t, "trying to get block for unexpected slot", slot) } // imitate loading block data out of order @@ -170,39 +90,33 @@ func TestEncodedLogCollector_MultipleEventOrdered(t *testing.T) { height := slot - 1 - if slotIdx == -1 { - var hash solana.Hash - _, _ = rand.Read(hash[:]) - - return &rpc.GetBlockResult{ - Blockhash: hash, - Transactions: []rpc.TransactionWithMeta{}, - Signatures: []solana.Signature{}, - BlockHeight: &height, - BlockTime: &timeStamp, - }, nil - } - + tx := solana.Transaction{Signatures: []solana.Signature{sigs[slotIdx]}} + binaryTx, txErr := tx.MarshalBinary() + require.NoError(t, txErr) return &rpc.GetBlockResult{ Blockhash: hashes[slotIdx], Transactions: []rpc.TransactionWithMeta{ { + Transaction: rpc.DataBytesOrJSONFromBytes(binaryTx), Meta: &rpc.TransactionMeta{ LogMessages: messages, }, }, }, - Signatures: []solana.Signature{sigs[slotIdx]}, BlockHeight: &height, BlockTime: &timeStamp, }, nil }) - tests.AssertEventually(t, func() bool { - return len(parser.Events()) >= 4 - }) + results, cleanUp, err := collector.BackfillForAddresses(tests.Context(t), []logpoller.PublicKey{logpoller.PublicKey(address)}, 41, 44) + require.NoError(t, err) + defer cleanUp() + var events []logpoller.ProgramEvent + for event := range results { + events = append(events, event.Events...) + } - assert.Equal(t, []logpoller.ProgramEvent{ + require.Equal(t, []logpoller.ProgramEvent{ { BlockData: logpoller.BlockData{ SlotNumber: 41, @@ -255,9 +169,7 @@ func TestEncodedLogCollector_MultipleEventOrdered(t *testing.T) { Program: "J1zQwrBNBngz26jRPNWsUSZMHJwBwpkoDitXRV95LdK4", Data: "HDQnaQjSWwkNAAAASGVsbG8sIFdvcmxkISoAAAAAAAAA", }, - }, parser.Events()) - - client.AssertExpectations(t) + }, events) } type slotUnsync struct { @@ -268,378 +180,10 @@ type slotUnsync struct { func (u *slotUnsync) next() { if u.waiting.Load() { u.waiting.Store(false) - <-u.ch - return } - u.waiting.Store(true) u.ch <- struct{}{} } - -func TestEncodedLogCollector_BackfillForAddress(t *testing.T) { - t.Parallel() - - client := new(mocks.RPCClient) - parser := new(testParser) - ctx := tests.Context(t) - - collector := logpoller.NewEncodedLogCollector(client, parser, logger.Nop()) - - require.NoError(t, collector.Start(ctx)) - t.Cleanup(func() { - require.NoError(t, collector.Close()) - }) - - pubKey := solana.PublicKey{2, 1, 4, 2} - slots := []uint64{44, 43, 42} - sigs := make([]solana.Signature, len(slots)*2) - - for idx := range len(sigs) { - _, _ = rand.Read(sigs[idx][:]) - } - - var latest atomic.Uint64 - - latest.Store(uint64(40)) - - // GetLatestBlockhash might be called at start-up; make it take some time because the result isn't needed for this test - client.EXPECT(). - LatestBlockhash(mock.Anything). - RunAndReturn(latestBlockhashReturnFunc(&latest)). - After(2 * time.Second). - Maybe() - - client.EXPECT(). - GetBlocks( - mock.Anything, - mock.MatchedBy(getBlocksStartValMatcher), - mock.MatchedBy(getBlocksEndValMatcher(&latest)), - ). - RunAndReturn(getBlocksReturnFunc(true)) - - client.EXPECT(). - GetSignaturesForAddressWithOpts(mock.Anything, pubKey, mock.Anything). - RunAndReturn(func(_ context.Context, pk solana.PublicKey, opts *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { - ret := []*rpc.TransactionSignature{} - - if opts != nil && opts.Before.String() == (solana.Signature{}).String() { - for idx := range slots { - ret = append(ret, &rpc.TransactionSignature{Slot: slots[idx], Signature: sigs[idx*2]}) - ret = append(ret, &rpc.TransactionSignature{Slot: slots[idx], Signature: sigs[(idx*2)+1]}) - } - } - - return ret, nil - }) - - client.EXPECT(). - GetBlockWithOpts(mock.Anything, mock.Anything, mock.Anything). - RunAndReturn(func(_ context.Context, slot uint64, _ *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) { - idx := -1 - for sIdx, slt := range slots { - if slt == slot { - idx = sIdx - - break - } - } - - height := slot - 1 - timeStamp := solana.UnixTimeSeconds(time.Now().Unix()) - - if idx == -1 { - return &rpc.GetBlockResult{ - Transactions: []rpc.TransactionWithMeta{}, - Signatures: []solana.Signature{}, - BlockHeight: &height, - BlockTime: &timeStamp, - }, nil - } - - return &rpc.GetBlockResult{ - Transactions: []rpc.TransactionWithMeta{ - { - Meta: &rpc.TransactionMeta{ - LogMessages: messages, - }, - }, - { - Meta: &rpc.TransactionMeta{ - LogMessages: messages, - }, - }, - }, - Signatures: []solana.Signature{sigs[idx*2], sigs[(idx*2)+1]}, - BlockHeight: &height, - BlockTime: &timeStamp, - }, nil - }) - - assert.NoError(t, collector.BackfillForAddress(ctx, pubKey.String(), 42)) - - tests.AssertEventually(t, func() bool { - return parser.Count() == 6 - }) -} - -func BenchmarkEncodedLogCollector(b *testing.B) { - ctx := tests.Context(b) - - ticker := time.NewTimer(500 * time.Millisecond) - defer ticker.Stop() - - parser := new(testParser) - blockProducer := &testBlockProducer{ - b: b, - nextSlot: 10, - blockSigs: make(map[uint64][]solana.Signature), - sigs: make(map[string]bool), - } - - collector := logpoller.NewEncodedLogCollector(blockProducer, parser, logger.Nop()) - - require.NoError(b, collector.Start(ctx)) - b.Cleanup(func() { - require.NoError(b, collector.Close()) - }) - - b.ReportAllocs() - b.ResetTimer() - -BenchLoop: - for i := 0; i < b.N; i++ { - select { - case <-ticker.C: - blockProducer.incrementSlot() - case <-ctx.Done(): - break BenchLoop - default: - blockProducer.makeEvent() - } - } - - b.ReportMetric(float64(parser.Count())/b.Elapsed().Seconds(), "events/sec") - b.ReportMetric(float64(blockProducer.Count())/b.Elapsed().Seconds(), "rcp_calls/sec") -} - -type testBlockProducer struct { - b *testing.B - - mu sync.RWMutex - nextSlot uint64 - blockSigs map[uint64][]solana.Signature - sigs map[string]bool - count uint64 -} - -func (p *testBlockProducer) incrementSlot() { - p.b.Helper() - - p.mu.Lock() - defer p.mu.Unlock() - - p.nextSlot++ - p.blockSigs[p.nextSlot] = make([]solana.Signature, 0, 100) -} - -func (p *testBlockProducer) makeEvent() { - p.b.Helper() - - p.mu.Lock() - defer p.mu.Unlock() - - var sig solana.Signature - - _, _ = rand.Read(sig[:]) - - p.blockSigs[p.nextSlot] = append(p.blockSigs[p.nextSlot], sig) - p.sigs[sig.String()] = true -} - -func (p *testBlockProducer) Count() uint64 { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.count -} - -func (p *testBlockProducer) LatestBlockhash(_ context.Context) (out *rpc.GetLatestBlockhashResult, err error) { - p.b.Helper() - - p.mu.Lock() - p.count++ - p.mu.Unlock() - - p.mu.RLock() - defer p.mu.RUnlock() - - return &rpc.GetLatestBlockhashResult{ - RPCContext: rpc.RPCContext{ - Context: rpc.Context{ - Slot: p.nextSlot, - }, - }, - }, nil -} - -func (p *testBlockProducer) GetBlocks(_ context.Context, startSlot uint64, endSlot *uint64) (out rpc.BlocksResult, err error) { - p.b.Helper() - - p.mu.Lock() - p.count++ - p.mu.Unlock() - - blocks := make([]uint64, *endSlot-startSlot) - for idx := range blocks { - blocks[idx] = startSlot + uint64(idx) - } - - return blocks, nil -} - -func (p *testBlockProducer) GetBlockWithOpts(_ context.Context, block uint64, opts *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) { - p.b.Helper() - - p.mu.Lock() - defer p.mu.Unlock() - - var result rpc.GetBlockResult - - sigs := p.blockSigs[block] - - switch opts.TransactionDetails { - case rpc.TransactionDetailsFull: - result.Transactions = make([]rpc.TransactionWithMeta, len(sigs)) - for idx, sig := range sigs { - delete(p.sigs, sig.String()) - - result.Transactions[idx] = rpc.TransactionWithMeta{ - Slot: block, - Meta: &rpc.TransactionMeta{ - LogMessages: messages, - }, - } - } - case rpc.TransactionDetailsSignatures: - result.Signatures = sigs - delete(p.blockSigs, block) - case rpc.TransactionDetailsNone: - fallthrough - default: - } - - p.count++ - result.BlockHeight = &block - - return &result, nil -} - -func (p *testBlockProducer) GetSignaturesForAddressWithOpts(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { - p.b.Helper() - - return nil, nil -} - -func (p *testBlockProducer) GetTransaction(_ context.Context, sig solana.Signature, _ *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error) { - p.b.Helper() - - p.mu.Lock() - defer p.mu.Unlock() - - var msgs []string - - p.count++ - _, ok := p.sigs[sig.String()] - if ok { - msgs = messages - } - - delete(p.sigs, sig.String()) - - return &rpc.GetTransactionResult{ - Meta: &rpc.TransactionMeta{ - LogMessages: msgs, - }, - }, nil -} - -type testParser struct { - called atomic.Bool - mu sync.Mutex - events []logpoller.ProgramEvent -} - -func (p *testParser) Process(event logpoller.ProgramEvent) error { - p.called.Store(true) - - p.mu.Lock() - p.events = append(p.events, event) - p.mu.Unlock() - - return nil -} - -func (p *testParser) Called() bool { - return p.called.Load() -} - -func (p *testParser) Count() uint64 { - p.mu.Lock() - defer p.mu.Unlock() - - return uint64(len(p.events)) -} - -func (p *testParser) Events() []logpoller.ProgramEvent { - p.mu.Lock() - defer p.mu.Unlock() - - return p.events -} - -func latestBlockhashReturnFunc(latest *atomic.Uint64) func(context.Context) (*rpc.GetLatestBlockhashResult, error) { - return func(ctx context.Context) (*rpc.GetLatestBlockhashResult, error) { - defer func() { - latest.Store(latest.Load() + 2) - }() - - return &rpc.GetLatestBlockhashResult{ - RPCContext: rpc.RPCContext{ - Context: rpc.Context{ - Slot: latest.Load(), - }, - }, - Value: &rpc.LatestBlockhashResult{ - LastValidBlockHeight: latest.Load() - 1, - }, - }, nil - } -} - -func getBlocksReturnFunc(empty bool) func(context.Context, uint64, *uint64) (rpc.BlocksResult, error) { - return func(_ context.Context, u1 uint64, u2 *uint64) (rpc.BlocksResult, error) { - blocks := []uint64{} - - if !empty { - blocks = make([]uint64, *u2-u1+1) - for idx := range blocks { - blocks[idx] = u1 + uint64(idx) - } - } - - return blocks, nil - } -} - -func getBlocksStartValMatcher(val uint64) bool { - return val > uint64(0) -} - -func getBlocksEndValMatcher(latest *atomic.Uint64) func(*uint64) bool { - return func(val *uint64) bool { - return val != nil && *val <= latest.Load() - } -} diff --git a/pkg/solana/logpoller/log_poller.go b/pkg/solana/logpoller/log_poller.go index c82e8eb47..2ab672eb9 100644 --- a/pkg/solana/logpoller/log_poller.go +++ b/pkg/solana/logpoller/log_poller.go @@ -2,17 +2,18 @@ package logpoller import ( "context" + "database/sql" "encoding/base64" "errors" "fmt" + "iter" "math" "time" + "github.com/gagliardetto/solana-go/rpc" + "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" - "github.com/smartcontractkit/chainlink-common/pkg/utils" - - "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" ) var ( @@ -27,24 +28,43 @@ type ORM interface { DeleteFilters(ctx context.Context, filters map[int64]Filter) error MarkFilterDeleted(ctx context.Context, id int64) (err error) MarkFilterBackfilled(ctx context.Context, id int64) (err error) + GetLatestBlock(ctx context.Context) (int64, error) InsertLogs(context.Context, []Log) (err error) SelectSeqNums(ctx context.Context) (map[int64]int64, error) } +type logsLoader interface { + BackfillForAddresses(ctx context.Context, addresses []PublicKey, fromSlot, toSlot uint64) (orderedBlocks <-chan Block, cleanUp func(), err error) +} + +type filtersI interface { + RegisterFilter(ctx context.Context, filter Filter) error + UnregisterFilter(ctx context.Context, name string) error + LoadFilters(ctx context.Context) error + PruneFilters(ctx context.Context) error + GetDistinctAddresses(ctx context.Context) ([]PublicKey, error) + GetFiltersToBackfill() []Filter + MarkFilterBackfilled(ctx context.Context, filterID int64) error + MatchingFiltersForEncodedEvent(event ProgramEvent) iter.Seq[Filter] + DecodeSubKey(ctx context.Context, raw []byte, ID int64, subKeyPath []string) (any, error) + IncrementSeqNum(filterID int64) int64 +} + type Service struct { services.StateMachine services.Service eng *services.Engine - lggr logger.SugaredLogger - orm ORM - client client.Reader - collector *EncodedLogCollector - - filters *filters + lggr logger.SugaredLogger + orm ORM + lastProcessedSlot int64 + client RPCClient + loader logsLoader + filters filtersI + processBlocks func(ctx context.Context, blocks []Block) error } -func New(lggr logger.SugaredLogger, orm ORM, cl client.Reader) *Service { +func New(lggr logger.SugaredLogger, orm ORM, cl RPCClient) *Service { lggr = logger.Sugared(logger.Named(lggr, "LogPoller")) lp := &Service{ orm: orm, @@ -52,18 +72,36 @@ func New(lggr logger.SugaredLogger, orm ORM, cl client.Reader) *Service { filters: newFilters(lggr, orm), } + lp.processBlocks = lp.processBlocksImpl + lp.Service, lp.eng = services.Config{ Name: "LogPollerService", Start: lp.start, + NewSubServices: func(l logger.Logger) []services.Service { + loader := NewEncodedLogCollector(cl, lggr) + lp.loader = loader + return []services.Service{loader} + }, }.NewServiceEngine(lggr) lp.lggr = lp.eng.SugaredLogger return lp } +func NewWithCustomProcessor(lggr logger.SugaredLogger, orm ORM, client RPCClient, processBlocks func(ctx context.Context, blocks []Block) error) *Service { + lp := New(lggr, orm, client) + lp.processBlocks = processBlocks + return lp +} + func (lp *Service) start(_ context.Context) error { - lp.eng.Go(lp.run) - lp.eng.Go(lp.backgroundWorkerRun) + lp.eng.GoTick(services.NewTicker(time.Second), func(ctx context.Context) { + err := lp.run(ctx) + if err != nil { + lp.lggr.Errorw("log poller iteration failed - retrying", "err", err) + } + }) + lp.eng.GoTick(services.NewTicker(time.Minute), lp.backgroundWorkerRun) return nil } @@ -75,10 +113,7 @@ func makeLogIndex(txIndex int, txLogIndex uint) (int64, error) { } // Process - process stream of events coming from log ingester -func (lp *Service) Process(programEvent ProgramEvent) (err error) { - ctx, cancel := utils.ContextFromChan(lp.eng.StopChan) - defer cancel() - +func (lp *Service) Process(ctx context.Context, programEvent ProgramEvent) (err error) { // This should never happen, since the log collector isn't started until after the filters // get loaded. But just in case, return an error if they aren't so the collector knows to retry later. if err = lp.filters.LoadFilters(ctx); err != nil { @@ -94,15 +129,16 @@ func (lp *Service) Process(programEvent ProgramEvent) (err error) { var logs []Log for filter := range matchingFilters { - logIndex, logIndexErr := makeLogIndex(blockData.TransactionIndex, blockData.TransactionLogIndex) - if logIndexErr != nil { - lp.lggr.Critical(err) + var logIndex int64 + logIndex, err = makeLogIndex(blockData.TransactionIndex, blockData.TransactionLogIndex) + if err != nil { + lp.lggr.Criticalw("failed to make log index", "err", err, "tx", programEvent.TransactionHash) return err } if blockData.SlotNumber == math.MaxInt64 { - errSlot := fmt.Errorf("slot number %d out of range", blockData.SlotNumber) + err = fmt.Errorf("slot number %d out of range", blockData.SlotNumber) lp.lggr.Critical(err.Error()) - return errSlot + return err } log := Log{ FilterID: filter.ID, @@ -153,11 +189,7 @@ func (lp *Service) Process(programEvent ProgramEvent) (err error) { return nil } - err = lp.orm.InsertLogs(ctx, logs) - if err != nil { - return err - } - return nil + return lp.orm.InsertLogs(ctx, logs) } // RegisterFilter - refer to filters.RegisterFilter for details. @@ -174,86 +206,179 @@ func (lp *Service) UnregisterFilter(ctx context.Context, name string) error { return lp.filters.UnregisterFilter(ctx, name) } -func (lp *Service) retryUntilSuccess(ctx context.Context, failMessage string, fn func(context.Context) error) error { - retryTicker := services.TickerConfig{Initial: 0, JitterPct: services.DefaultJitter}.NewTicker(time.Second) - defer retryTicker.Stop() +func (lp *Service) getLastProcessedSlot(ctx context.Context) (int64, error) { + if lp.lastProcessedSlot != 0 { + return lp.lastProcessedSlot, nil + } + + latestDBBlock, err := lp.orm.GetLatestBlock(ctx) + if err == nil { + return latestDBBlock, nil + } + + if !errors.Is(err, sql.ErrNoRows) { + return 0, fmt.Errorf("error getting latest block from db: %w", err) + } + + latestFinalizedSlot, err := lp.client.SlotHeightWithCommitment(ctx, rpc.CommitmentFinalized) + if err != nil { + return 0, fmt.Errorf("error getting latest slot from RPC: %w", err) + } + + if latestFinalizedSlot == 0 { + return 0, fmt.Errorf("latest finalized slot is 0 - waiting for next slot to start processing") + } + // nolint:gosec + // G115: integer overflow conversion uint64 -> int64 + return int64(latestFinalizedSlot) - 1, nil // +} + +func (lp *Service) backfillFilters(ctx context.Context, filters []Filter, to int64) error { + addressesSet := make(map[PublicKey]struct{}) + addresses := make([]PublicKey, 0, len(filters)) + minSlot := to + for _, filter := range filters { + if _, ok := addressesSet[filter.Address]; !ok { + addressesSet[filter.Address] = struct{}{} + addresses = append(addresses, filter.Address) + } + if filter.StartingBlock < minSlot { + minSlot = filter.StartingBlock + } + } + + err := lp.processBlocksRange(ctx, addresses, minSlot, to) + if err != nil { + return err + } + + for _, filter := range filters { + filterErr := lp.filters.MarkFilterBackfilled(ctx, filter.ID) + if filterErr != nil { + err = errors.Join(err, fmt.Errorf("failed to mark filter %d backfilled: %w", filter.ID, filterErr)) + } + } + + return err +} + +func (lp *Service) processBlocksRange(ctx context.Context, addresses []PublicKey, from, to int64) error { + // nolint:gosec + // G115: integer overflow conversion uint64 -> int64 + blocks, cleanup, err := lp.loader.BackfillForAddresses(ctx, addresses, uint64(from), uint64(to)) + if err != nil { + return fmt.Errorf("error backfilling filters: %w", err) + } + defer cleanup() +consumedAllBlocks: for { select { case <-ctx.Done(): return ctx.Err() - case <-retryTicker.C: + case block, ok := <-blocks: + if !ok { + break consumedAllBlocks + } + + batch := []Block{block} + batch = appendBuffered(blocks, blocksChBuffer, batch) + err = lp.processBlocks(ctx, batch) + if err != nil { + return fmt.Errorf("error processing blocks: %w", err) + } } - err := fn(ctx) - if err == nil { - return nil + } + + return nil +} + +func (lp *Service) processBlocksImpl(ctx context.Context, blocks []Block) error { + for _, block := range blocks { + for _, event := range block.Events { + err := lp.Process(ctx, event) + if err != nil { + return fmt.Errorf("error processing event for tx %s in block %d: %w", event.TransactionHash, block.SlotNumber, err) + } } - lp.lggr.Errorw(failMessage, "err", err) } - // unreachable + + return nil } -func (lp *Service) run(ctx context.Context) { - err := lp.retryUntilSuccess(ctx, "failed loading filters in init Service loop, retrying later", lp.filters.LoadFilters) +func (lp *Service) run(ctx context.Context) error { + err := lp.filters.LoadFilters(ctx) if err != nil { - lp.lggr.Warnw("never loaded filters before shutdown", "err", err) - return + return fmt.Errorf("error loading filters: %w", err) } - // safe to start fetching logs, now that filters are loaded - err = lp.retryUntilSuccess(ctx, "failed to start EncodedLogCollector, retrying later", lp.collector.Start) + lastProcessedSlot, err := lp.getLastProcessedSlot(ctx) if err != nil { - lp.lggr.Warnw("EncodedLogCollector never started before shutdown", "err", err) - return + return fmt.Errorf("failed getting last processed slot: %w", err) } - defer lp.collector.Close() - var blocks chan struct { - BlockNumber int64 - Logs any // to be defined + filtersToBackfill := lp.filters.GetFiltersToBackfill() + if len(filtersToBackfill) != 0 { + lp.lggr.Debugw("Got new filters to backfill", "filters", filtersToBackfill) + return lp.backfillFilters(ctx, filtersToBackfill, lastProcessedSlot) } - for { - select { - case <-ctx.Done(): - return - case block := <-blocks: - filtersToBackfill := lp.filters.GetFiltersToBackfill() - - // TODO: NONEVM-916 parse, filters and persist logs - // NOTE: removal of filters occurs in the separate goroutine, so there is a chance that upon insert - // of log corresponding filter won't be present in the db. Ensure to refilter and retry on insert error - for i := range filtersToBackfill { - filter := filtersToBackfill[i] - lp.eng.Go(func(ctx context.Context) { - lp.startFilterBackfill(ctx, filter, block.BlockNumber) - }) - } - } + addresses, err := lp.filters.GetDistinctAddresses(ctx) + if err != nil { + return fmt.Errorf("failed getting addresses: %w", err) } + if len(addresses) == 0 { + return nil + } + rawHighestSlot, err := lp.client.SlotHeightWithCommitment(ctx, rpc.CommitmentFinalized) + if err != nil { + return fmt.Errorf("failed getting highest slot: %w", err) + } + + // nolint:gosec + // G115: integer overflow conversion uint64 -> int64 + highestSlot := int64(rawHighestSlot) + + if lastProcessedSlot > highestSlot { + return fmt.Errorf("last processed slot %d is higher than highest RPC slot %d", lastProcessedSlot, highestSlot) + } + + if lastProcessedSlot == highestSlot { + lp.lggr.Debugw("RPC's latest finalized block is the same as latest processed - skipping", "lastProcessedSlot", lastProcessedSlot) + return nil + } + + lp.lggr.Debugw("Got new slot range to process", "from", lastProcessedSlot+1, "to", highestSlot) + err = lp.processBlocksRange(ctx, addresses, lastProcessedSlot+1, highestSlot) + if err != nil { + return fmt.Errorf("failed processing block range [%d, %d]: %w", lastProcessedSlot+1, highestSlot, err) + } + + lp.lastProcessedSlot = highestSlot + return nil } -func (lp *Service) backgroundWorkerRun(ctx context.Context) { - pruneFilters := services.NewTicker(time.Minute) - defer pruneFilters.Stop() +func appendBuffered(ch <-chan Block, max int, blocks []Block) []Block { for { select { - case <-ctx.Done(): - return - case <-pruneFilters.C: - err := lp.filters.PruneFilters(ctx) - if err != nil { - lp.lggr.Errorw("Failed to prune filters", "err", err) + case block, ok := <-ch: + if !ok { + return blocks } + + blocks = append(blocks, block) + if len(blocks) >= max { + return blocks + } + default: + return blocks } } } -func (lp *Service) startFilterBackfill(ctx context.Context, filter Filter, toBlock int64) { - // TODO: NONEVM-916 start backfill - lp.lggr.Debugw("Starting filter backfill", "filter", filter) - err := lp.filters.MarkFilterBackfilled(ctx, filter.ID) +func (lp *Service) backgroundWorkerRun(ctx context.Context) { + err := lp.filters.PruneFilters(ctx) if err != nil { - lp.lggr.Errorw("Failed to mark filter backfill", "filter", filter, "err", err) + lp.lggr.Errorw("Failed to prune filters", "err", err) } } diff --git a/pkg/solana/logpoller/log_poller_test.go b/pkg/solana/logpoller/log_poller_test.go index e5006ae7b..033f05b19 100644 --- a/pkg/solana/logpoller/log_poller_test.go +++ b/pkg/solana/logpoller/log_poller_test.go @@ -2,24 +2,249 @@ package logpoller import ( "context" + "database/sql" "encoding/base64" "encoding/json" + "errors" "math/rand" + "sync/atomic" "testing" bin "github.com/gagliardetto/binary" "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" "github.com/google/uuid" - "github.com/smartcontractkit/chainlink-common/pkg/logger" - "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - clientmocks "github.com/smartcontractkit/chainlink-solana/pkg/solana/client/mocks" + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/codec" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller/mocks" ) +type mockedLP struct { + ORM *MockORM + Client *mocks.RPCClient + Loader *mockLogsLoader + Filters *mockFilters + LogPoller *Service +} + +func newMockedLP(t *testing.T) mockedLP { + result := mockedLP{ + ORM: NewMockORM(t), + Client: mocks.NewRPCClient(t), + Loader: newMockLogsLoader(t), + Filters: newMockFilters(t), + } + result.LogPoller = New(logger.TestSugared(t), result.ORM, result.Client) + result.LogPoller.loader = result.Loader + result.LogPoller.filters = result.Filters + return result +} + +func TestLogPoller_run(t *testing.T) { + t.Run("Abort run if failed to load filters", func(t *testing.T) { + lp := newMockedLP(t) + expectedErr := errors.New("failed to load filters") + lp.Filters.EXPECT().LoadFilters(mock.Anything).Return(expectedErr).Once() + err := lp.LogPoller.run(tests.Context(t)) + require.ErrorIs(t, err, expectedErr) + }) + t.Run("Aborts backfill if loader fails", func(t *testing.T) { + lp := newMockedLP(t) + lp.LogPoller.lastProcessedSlot = 128 + lp.Filters.EXPECT().LoadFilters(mock.Anything).Return(nil).Once() + lp.Filters.EXPECT().GetFiltersToBackfill().Return([]Filter{{StartingBlock: 16}}).Once() + expectedErr := errors.New("loaderFailed") + lp.Loader.EXPECT().BackfillForAddresses(mock.Anything, mock.Anything, uint64(16), uint64(128)).Return(nil, nil, expectedErr).Once() + err := lp.LogPoller.run(tests.Context(t)) + require.ErrorIs(t, err, expectedErr) + }) + t.Run("Backfill happy path", func(t *testing.T) { + lp := newMockedLP(t) + lp.LogPoller.lastProcessedSlot = 128 + lp.Filters.EXPECT().LoadFilters(mock.Anything).Return(nil).Once() + lp.Filters.EXPECT().GetFiltersToBackfill().Return([]Filter{ + {ID: 1, StartingBlock: 16, Address: PublicKey{1, 2, 3}}, + {ID: 2, StartingBlock: 12, Address: PublicKey{1, 2, 3}}, + {ID: 3, StartingBlock: 14, Address: PublicKey{3, 2, 1}}, + }).Once() + done := func() {} + blocks := make(chan Block) + close(blocks) + lp.Loader.EXPECT().BackfillForAddresses(mock.Anything, []PublicKey{{1, 2, 3}, {3, 2, 1}}, uint64(12), uint64(128)).Return(blocks, done, nil).Once() + lp.Filters.EXPECT().MarkFilterBackfilled(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, filterID int64) error { + switch filterID { + case 1: + return errors.New("filter no longer exists") + case 2, 3: + return nil + default: + require.Fail(t, "unexpected filter ID") + return nil + } + }).Times(3) + err := lp.LogPoller.run(tests.Context(t)) + require.ErrorContains(t, err, "failed to mark filter 1 backfilled: filter no longer exists") + }) + t.Run("Returns error, if failed to get address for global backfill", func(t *testing.T) { + lp := newMockedLP(t) + lp.LogPoller.lastProcessedSlot = 128 + lp.Filters.EXPECT().LoadFilters(mock.Anything).Return(nil).Once() + lp.Filters.EXPECT().GetFiltersToBackfill().Return(nil).Once() + expectedErr := errors.New("failed to load filters") + lp.Filters.EXPECT().GetDistinctAddresses(mock.Anything).Return(nil, expectedErr).Once() + err := lp.LogPoller.run(tests.Context(t)) + require.ErrorContains(t, err, "failed getting addresses: failed to load filters") + }) + t.Run("Aborts if there is no addresses", func(t *testing.T) { + lp := newMockedLP(t) + lp.LogPoller.lastProcessedSlot = 128 + lp.Filters.EXPECT().LoadFilters(mock.Anything).Return(nil).Once() + lp.Filters.EXPECT().GetFiltersToBackfill().Return(nil).Once() + lp.Filters.EXPECT().GetDistinctAddresses(mock.Anything).Return(nil, nil).Once() + err := lp.LogPoller.run(tests.Context(t)) + require.NoError(t, err) + }) + t.Run("Returns error, if failed to get latest slot", func(t *testing.T) { + lp := newMockedLP(t) + lp.LogPoller.lastProcessedSlot = 128 + lp.Filters.EXPECT().LoadFilters(mock.Anything).Return(nil).Once() + lp.Filters.EXPECT().GetFiltersToBackfill().Return(nil).Once() + lp.Filters.EXPECT().GetDistinctAddresses(mock.Anything).Return([]PublicKey{{}}, nil).Once() + expectedErr := errors.New("RPC failed") + lp.Client.EXPECT().SlotHeightWithCommitment(mock.Anything, rpc.CommitmentFinalized).Return(0, expectedErr).Once() + err := lp.LogPoller.run(tests.Context(t)) + require.ErrorIs(t, err, expectedErr) + }) + t.Run("Returns error, if last processed slot is higher than latest finalized", func(t *testing.T) { + lp := newMockedLP(t) + lp.LogPoller.lastProcessedSlot = 128 + lp.Filters.EXPECT().LoadFilters(mock.Anything).Return(nil).Once() + lp.Filters.EXPECT().GetFiltersToBackfill().Return(nil).Once() + lp.Filters.EXPECT().GetDistinctAddresses(mock.Anything).Return([]PublicKey{{}}, nil).Once() + lp.Client.EXPECT().SlotHeightWithCommitment(mock.Anything, rpc.CommitmentFinalized).Return(16, nil).Once() + err := lp.LogPoller.run(tests.Context(t)) + require.ErrorContains(t, err, "last processed slot 128 is higher than highest RPC slot 16") + }) + t.Run("Returns error, if fails to do block backfill", func(t *testing.T) { + lp := newMockedLP(t) + lp.LogPoller.lastProcessedSlot = 128 + lp.Filters.EXPECT().LoadFilters(mock.Anything).Return(nil).Once() + lp.Filters.EXPECT().GetFiltersToBackfill().Return(nil).Once() + lp.Filters.EXPECT().GetDistinctAddresses(mock.Anything).Return([]PublicKey{{}}, nil).Once() + lp.Client.EXPECT().SlotHeightWithCommitment(mock.Anything, rpc.CommitmentFinalized).Return(130, nil).Once() + expectedError := errors.New("failed to start backfill") + lp.Loader.EXPECT().BackfillForAddresses(mock.Anything, mock.Anything, uint64(129), uint64(130)).Return(nil, nil, expectedError).Once() + err := lp.LogPoller.run(tests.Context(t)) + require.ErrorContains(t, err, "failed processing block range [129, 130]: error backfilling filters: failed to start backfill") + }) + t.Run("Happy path", func(t *testing.T) { + lp := newMockedLP(t) + lp.LogPoller.lastProcessedSlot = 128 + lp.Filters.EXPECT().LoadFilters(mock.Anything).Return(nil).Once() + lp.Filters.EXPECT().GetFiltersToBackfill().Return(nil).Once() + lp.Filters.EXPECT().GetDistinctAddresses(mock.Anything).Return([]PublicKey{{}}, nil).Once() + lp.Client.EXPECT().SlotHeightWithCommitment(mock.Anything, rpc.CommitmentFinalized).Return(130, nil).Once() + blocks := make(chan Block) + close(blocks) + lp.Loader.EXPECT().BackfillForAddresses(mock.Anything, mock.Anything, uint64(129), uint64(130)).Return(blocks, func() {}, nil).Once() + err := lp.LogPoller.run(tests.Context(t)) + require.NoError(t, err) + require.Equal(t, int64(130), lp.LogPoller.lastProcessedSlot) + }) +} + +func TestLogPoller_getLastProcessedSlot(t *testing.T) { + t.Run("Returns cached value if available", func(t *testing.T) { + lp := newMockedLP(t) + lp.LogPoller.lastProcessedSlot = 10 + result, err := lp.LogPoller.getLastProcessedSlot(tests.Context(t)) + require.NoError(t, err) + require.Equal(t, int64(10), result) + }) + t.Run("Returns error if failed to read from db", func(t *testing.T) { + lp := newMockedLP(t) + expectedErr := errors.New("failed to read from db") + lp.ORM.EXPECT().GetLatestBlock(mock.Anything).Return(0, expectedErr).Once() + _, err := lp.LogPoller.getLastProcessedSlot(tests.Context(t)) + require.ErrorIs(t, err, expectedErr) + }) + t.Run("Reads latest processed from db", func(t *testing.T) { + lp := newMockedLP(t) + expectedValue := int64(10) + lp.ORM.EXPECT().GetLatestBlock(mock.Anything).Return(expectedValue, nil).Once() + result, err := lp.LogPoller.getLastProcessedSlot(tests.Context(t)) + require.NoError(t, err) + require.Equal(t, expectedValue, result) + }) + t.Run("Returns error if failed to read from DB (no data) and RPC", func(t *testing.T) { + lp := newMockedLP(t) + lp.ORM.EXPECT().GetLatestBlock(mock.Anything).Return(0, sql.ErrNoRows).Once() + expectedError := errors.New("RPC failed") + lp.Client.EXPECT().SlotHeightWithCommitment(mock.Anything, rpc.CommitmentFinalized).Return(0, expectedError).Once() + _, err := lp.LogPoller.getLastProcessedSlot(tests.Context(t)) + require.ErrorIs(t, err, expectedError) + }) + t.Run("Returns error if genesis block is the latest finalized", func(t *testing.T) { + lp := newMockedLP(t) + lp.ORM.EXPECT().GetLatestBlock(mock.Anything).Return(0, sql.ErrNoRows).Once() + lp.Client.EXPECT().SlotHeightWithCommitment(mock.Anything, rpc.CommitmentFinalized).Return(0, nil).Once() + _, err := lp.LogPoller.getLastProcessedSlot(tests.Context(t)) + require.ErrorContains(t, err, "latest finalized slot is 0 - waiting for next slot to start processing") + }) + t.Run("Returns block before latest finalized as last processed if using RPC", func(t *testing.T) { + lp := newMockedLP(t) + lp.ORM.EXPECT().GetLatestBlock(mock.Anything).Return(0, sql.ErrNoRows).Once() + const latestFinalized = uint64(10) + lp.Client.EXPECT().SlotHeightWithCommitment(mock.Anything, rpc.CommitmentFinalized).Return(latestFinalized, nil).Once() + actual, err := lp.LogPoller.getLastProcessedSlot(tests.Context(t)) + require.NoError(t, err) + require.Equal(t, int64(latestFinalized-1), actual) + }) +} + +func TestLogPoller_processBlocksRange(t *testing.T) { + t.Run("Returns error if failed to start backfill", func(t *testing.T) { + lp := newMockedLP(t) + expectedErr := errors.New("failed to start backfill") + lp.Loader.EXPECT().BackfillForAddresses(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, expectedErr).Once() + err := lp.LogPoller.processBlocksRange(tests.Context(t), nil, 10, 20) + require.ErrorIs(t, err, expectedErr) + }) + funcWithCallExpectation := func(t *testing.T) func() { + var called atomic.Bool + t.Cleanup(func() { + require.True(t, called.Load(), "expected function to be called") + }) + return func() { called.Store(true) } + } + t.Run("Can abort by cancelling context", func(t *testing.T) { + ctx, cancel := context.WithCancel(tests.Context(t)) + lp := newMockedLP(t) + lp.Loader.EXPECT().BackfillForAddresses(mock.Anything, mock.Anything, mock.Anything, mock.Anything).RunAndReturn(func(context.Context, []PublicKey, uint64, uint64) (<-chan Block, func(), error) { + cancel() + return nil, funcWithCallExpectation(t), nil + }).Once() + err := lp.LogPoller.processBlocksRange(ctx, nil, 10, 20) + require.ErrorIs(t, err, context.Canceled) + }) + t.Run("Happy path", func(t *testing.T) { + lp := newMockedLP(t) + blocks := make(chan Block, 2) + blocks <- Block{} + blocks <- Block{} + close(blocks) + lp.Loader.EXPECT().BackfillForAddresses(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(blocks, funcWithCallExpectation(t), nil).Once() + err := lp.LogPoller.processBlocksRange(tests.Context(t), nil, 10, 20) + require.NoError(t, err) + }) +} + func TestProcess(t *testing.T) { ctx := tests.Context(t) @@ -65,8 +290,8 @@ func TestProcess(t *testing.T) { Data: base64.StdEncoding.EncodeToString(append(eventSig[:], expectedLog.Data...)), } - orm := newMockORM(t) - cl := clientmocks.NewReaderWriter(t) + orm := NewMockORM(t) + cl := mocks.NewRPCClient(t) lggr := logger.Sugared(logger.Test(t)) lp := New(lggr, orm, cl) @@ -117,7 +342,7 @@ func TestProcess(t *testing.T) { err = lp.RegisterFilter(ctx, filter) require.NoError(t, err) - err = lp.Process(ev) + err = lp.Process(ctx, ev) require.NoError(t, err) orm.EXPECT().MarkFilterDeleted(mock.Anything, mock.Anything).Return(nil).Once() diff --git a/pkg/solana/logpoller/mock_filters.go b/pkg/solana/logpoller/mock_filters.go new file mode 100644 index 000000000..29ad41632 --- /dev/null +++ b/pkg/solana/logpoller/mock_filters.go @@ -0,0 +1,530 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package logpoller + +import ( + context "context" + iter "iter" + + mock "github.com/stretchr/testify/mock" +) + +// mockFilters is an autogenerated mock type for the filtersI type +type mockFilters struct { + mock.Mock +} + +type mockFilters_Expecter struct { + mock *mock.Mock +} + +func (_m *mockFilters) EXPECT() *mockFilters_Expecter { + return &mockFilters_Expecter{mock: &_m.Mock} +} + +// DecodeSubKey provides a mock function with given fields: ctx, raw, ID, subKeyPath +func (_m *mockFilters) DecodeSubKey(ctx context.Context, raw []byte, ID int64, subKeyPath []string) (interface{}, error) { + ret := _m.Called(ctx, raw, ID, subKeyPath) + + if len(ret) == 0 { + panic("no return value specified for DecodeSubKey") + } + + var r0 interface{} + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, int64, []string) (interface{}, error)); ok { + return rf(ctx, raw, ID, subKeyPath) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, int64, []string) interface{}); ok { + r0 = rf(ctx, raw, ID, subKeyPath) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, int64, []string) error); ok { + r1 = rf(ctx, raw, ID, subKeyPath) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockFilters_DecodeSubKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DecodeSubKey' +type mockFilters_DecodeSubKey_Call struct { + *mock.Call +} + +// DecodeSubKey is a helper method to define mock.On call +// - ctx context.Context +// - raw []byte +// - ID int64 +// - subKeyPath []string +func (_e *mockFilters_Expecter) DecodeSubKey(ctx interface{}, raw interface{}, ID interface{}, subKeyPath interface{}) *mockFilters_DecodeSubKey_Call { + return &mockFilters_DecodeSubKey_Call{Call: _e.mock.On("DecodeSubKey", ctx, raw, ID, subKeyPath)} +} + +func (_c *mockFilters_DecodeSubKey_Call) Run(run func(ctx context.Context, raw []byte, ID int64, subKeyPath []string)) *mockFilters_DecodeSubKey_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]byte), args[2].(int64), args[3].([]string)) + }) + return _c +} + +func (_c *mockFilters_DecodeSubKey_Call) Return(_a0 interface{}, _a1 error) *mockFilters_DecodeSubKey_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *mockFilters_DecodeSubKey_Call) RunAndReturn(run func(context.Context, []byte, int64, []string) (interface{}, error)) *mockFilters_DecodeSubKey_Call { + _c.Call.Return(run) + return _c +} + +// GetDistinctAddresses provides a mock function with given fields: ctx +func (_m *mockFilters) GetDistinctAddresses(ctx context.Context) ([]PublicKey, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetDistinctAddresses") + } + + var r0 []PublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]PublicKey, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []PublicKey); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]PublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockFilters_GetDistinctAddresses_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDistinctAddresses' +type mockFilters_GetDistinctAddresses_Call struct { + *mock.Call +} + +// GetDistinctAddresses is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockFilters_Expecter) GetDistinctAddresses(ctx interface{}) *mockFilters_GetDistinctAddresses_Call { + return &mockFilters_GetDistinctAddresses_Call{Call: _e.mock.On("GetDistinctAddresses", ctx)} +} + +func (_c *mockFilters_GetDistinctAddresses_Call) Run(run func(ctx context.Context)) *mockFilters_GetDistinctAddresses_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockFilters_GetDistinctAddresses_Call) Return(_a0 []PublicKey, _a1 error) *mockFilters_GetDistinctAddresses_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *mockFilters_GetDistinctAddresses_Call) RunAndReturn(run func(context.Context) ([]PublicKey, error)) *mockFilters_GetDistinctAddresses_Call { + _c.Call.Return(run) + return _c +} + +// GetFiltersToBackfill provides a mock function with given fields: +func (_m *mockFilters) GetFiltersToBackfill() []Filter { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFiltersToBackfill") + } + + var r0 []Filter + if rf, ok := ret.Get(0).(func() []Filter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]Filter) + } + } + + return r0 +} + +// mockFilters_GetFiltersToBackfill_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFiltersToBackfill' +type mockFilters_GetFiltersToBackfill_Call struct { + *mock.Call +} + +// GetFiltersToBackfill is a helper method to define mock.On call +func (_e *mockFilters_Expecter) GetFiltersToBackfill() *mockFilters_GetFiltersToBackfill_Call { + return &mockFilters_GetFiltersToBackfill_Call{Call: _e.mock.On("GetFiltersToBackfill")} +} + +func (_c *mockFilters_GetFiltersToBackfill_Call) Run(run func()) *mockFilters_GetFiltersToBackfill_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockFilters_GetFiltersToBackfill_Call) Return(_a0 []Filter) *mockFilters_GetFiltersToBackfill_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockFilters_GetFiltersToBackfill_Call) RunAndReturn(run func() []Filter) *mockFilters_GetFiltersToBackfill_Call { + _c.Call.Return(run) + return _c +} + +// IncrementSeqNum provides a mock function with given fields: filterID +func (_m *mockFilters) IncrementSeqNum(filterID int64) int64 { + ret := _m.Called(filterID) + + if len(ret) == 0 { + panic("no return value specified for IncrementSeqNum") + } + + var r0 int64 + if rf, ok := ret.Get(0).(func(int64) int64); ok { + r0 = rf(filterID) + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// mockFilters_IncrementSeqNum_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IncrementSeqNum' +type mockFilters_IncrementSeqNum_Call struct { + *mock.Call +} + +// IncrementSeqNum is a helper method to define mock.On call +// - filterID int64 +func (_e *mockFilters_Expecter) IncrementSeqNum(filterID interface{}) *mockFilters_IncrementSeqNum_Call { + return &mockFilters_IncrementSeqNum_Call{Call: _e.mock.On("IncrementSeqNum", filterID)} +} + +func (_c *mockFilters_IncrementSeqNum_Call) Run(run func(filterID int64)) *mockFilters_IncrementSeqNum_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int64)) + }) + return _c +} + +func (_c *mockFilters_IncrementSeqNum_Call) Return(_a0 int64) *mockFilters_IncrementSeqNum_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockFilters_IncrementSeqNum_Call) RunAndReturn(run func(int64) int64) *mockFilters_IncrementSeqNum_Call { + _c.Call.Return(run) + return _c +} + +// LoadFilters provides a mock function with given fields: ctx +func (_m *mockFilters) LoadFilters(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for LoadFilters") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockFilters_LoadFilters_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadFilters' +type mockFilters_LoadFilters_Call struct { + *mock.Call +} + +// LoadFilters is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockFilters_Expecter) LoadFilters(ctx interface{}) *mockFilters_LoadFilters_Call { + return &mockFilters_LoadFilters_Call{Call: _e.mock.On("LoadFilters", ctx)} +} + +func (_c *mockFilters_LoadFilters_Call) Run(run func(ctx context.Context)) *mockFilters_LoadFilters_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockFilters_LoadFilters_Call) Return(_a0 error) *mockFilters_LoadFilters_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockFilters_LoadFilters_Call) RunAndReturn(run func(context.Context) error) *mockFilters_LoadFilters_Call { + _c.Call.Return(run) + return _c +} + +// MarkFilterBackfilled provides a mock function with given fields: ctx, filterID +func (_m *mockFilters) MarkFilterBackfilled(ctx context.Context, filterID int64) error { + ret := _m.Called(ctx, filterID) + + if len(ret) == 0 { + panic("no return value specified for MarkFilterBackfilled") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { + r0 = rf(ctx, filterID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockFilters_MarkFilterBackfilled_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MarkFilterBackfilled' +type mockFilters_MarkFilterBackfilled_Call struct { + *mock.Call +} + +// MarkFilterBackfilled is a helper method to define mock.On call +// - ctx context.Context +// - filterID int64 +func (_e *mockFilters_Expecter) MarkFilterBackfilled(ctx interface{}, filterID interface{}) *mockFilters_MarkFilterBackfilled_Call { + return &mockFilters_MarkFilterBackfilled_Call{Call: _e.mock.On("MarkFilterBackfilled", ctx, filterID)} +} + +func (_c *mockFilters_MarkFilterBackfilled_Call) Run(run func(ctx context.Context, filterID int64)) *mockFilters_MarkFilterBackfilled_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int64)) + }) + return _c +} + +func (_c *mockFilters_MarkFilterBackfilled_Call) Return(_a0 error) *mockFilters_MarkFilterBackfilled_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockFilters_MarkFilterBackfilled_Call) RunAndReturn(run func(context.Context, int64) error) *mockFilters_MarkFilterBackfilled_Call { + _c.Call.Return(run) + return _c +} + +// MatchingFiltersForEncodedEvent provides a mock function with given fields: event +func (_m *mockFilters) MatchingFiltersForEncodedEvent(event ProgramEvent) iter.Seq[Filter] { + ret := _m.Called(event) + + if len(ret) == 0 { + panic("no return value specified for MatchingFiltersForEncodedEvent") + } + + var r0 iter.Seq[Filter] + if rf, ok := ret.Get(0).(func(ProgramEvent) iter.Seq[Filter]); ok { + r0 = rf(event) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(iter.Seq[Filter]) + } + } + + return r0 +} + +// mockFilters_MatchingFiltersForEncodedEvent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MatchingFiltersForEncodedEvent' +type mockFilters_MatchingFiltersForEncodedEvent_Call struct { + *mock.Call +} + +// MatchingFiltersForEncodedEvent is a helper method to define mock.On call +// - event ProgramEvent +func (_e *mockFilters_Expecter) MatchingFiltersForEncodedEvent(event interface{}) *mockFilters_MatchingFiltersForEncodedEvent_Call { + return &mockFilters_MatchingFiltersForEncodedEvent_Call{Call: _e.mock.On("MatchingFiltersForEncodedEvent", event)} +} + +func (_c *mockFilters_MatchingFiltersForEncodedEvent_Call) Run(run func(event ProgramEvent)) *mockFilters_MatchingFiltersForEncodedEvent_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(ProgramEvent)) + }) + return _c +} + +func (_c *mockFilters_MatchingFiltersForEncodedEvent_Call) Return(_a0 iter.Seq[Filter]) *mockFilters_MatchingFiltersForEncodedEvent_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockFilters_MatchingFiltersForEncodedEvent_Call) RunAndReturn(run func(ProgramEvent) iter.Seq[Filter]) *mockFilters_MatchingFiltersForEncodedEvent_Call { + _c.Call.Return(run) + return _c +} + +// PruneFilters provides a mock function with given fields: ctx +func (_m *mockFilters) PruneFilters(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for PruneFilters") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockFilters_PruneFilters_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PruneFilters' +type mockFilters_PruneFilters_Call struct { + *mock.Call +} + +// PruneFilters is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockFilters_Expecter) PruneFilters(ctx interface{}) *mockFilters_PruneFilters_Call { + return &mockFilters_PruneFilters_Call{Call: _e.mock.On("PruneFilters", ctx)} +} + +func (_c *mockFilters_PruneFilters_Call) Run(run func(ctx context.Context)) *mockFilters_PruneFilters_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockFilters_PruneFilters_Call) Return(_a0 error) *mockFilters_PruneFilters_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockFilters_PruneFilters_Call) RunAndReturn(run func(context.Context) error) *mockFilters_PruneFilters_Call { + _c.Call.Return(run) + return _c +} + +// RegisterFilter provides a mock function with given fields: ctx, filter +func (_m *mockFilters) RegisterFilter(ctx context.Context, filter Filter) error { + ret := _m.Called(ctx, filter) + + if len(ret) == 0 { + panic("no return value specified for RegisterFilter") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, Filter) error); ok { + r0 = rf(ctx, filter) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockFilters_RegisterFilter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RegisterFilter' +type mockFilters_RegisterFilter_Call struct { + *mock.Call +} + +// RegisterFilter is a helper method to define mock.On call +// - ctx context.Context +// - filter Filter +func (_e *mockFilters_Expecter) RegisterFilter(ctx interface{}, filter interface{}) *mockFilters_RegisterFilter_Call { + return &mockFilters_RegisterFilter_Call{Call: _e.mock.On("RegisterFilter", ctx, filter)} +} + +func (_c *mockFilters_RegisterFilter_Call) Run(run func(ctx context.Context, filter Filter)) *mockFilters_RegisterFilter_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(Filter)) + }) + return _c +} + +func (_c *mockFilters_RegisterFilter_Call) Return(_a0 error) *mockFilters_RegisterFilter_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockFilters_RegisterFilter_Call) RunAndReturn(run func(context.Context, Filter) error) *mockFilters_RegisterFilter_Call { + _c.Call.Return(run) + return _c +} + +// UnregisterFilter provides a mock function with given fields: ctx, name +func (_m *mockFilters) UnregisterFilter(ctx context.Context, name string) error { + ret := _m.Called(ctx, name) + + if len(ret) == 0 { + panic("no return value specified for UnregisterFilter") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, name) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockFilters_UnregisterFilter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnregisterFilter' +type mockFilters_UnregisterFilter_Call struct { + *mock.Call +} + +// UnregisterFilter is a helper method to define mock.On call +// - ctx context.Context +// - name string +func (_e *mockFilters_Expecter) UnregisterFilter(ctx interface{}, name interface{}) *mockFilters_UnregisterFilter_Call { + return &mockFilters_UnregisterFilter_Call{Call: _e.mock.On("UnregisterFilter", ctx, name)} +} + +func (_c *mockFilters_UnregisterFilter_Call) Run(run func(ctx context.Context, name string)) *mockFilters_UnregisterFilter_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *mockFilters_UnregisterFilter_Call) Return(_a0 error) *mockFilters_UnregisterFilter_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockFilters_UnregisterFilter_Call) RunAndReturn(run func(context.Context, string) error) *mockFilters_UnregisterFilter_Call { + _c.Call.Return(run) + return _c +} + +// newMockFilters creates a new instance of mockFilters. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockFilters(t interface { + mock.TestingT + Cleanup(func()) +}) *mockFilters { + mock := &mockFilters{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/solana/logpoller/mock_logs_loader.go b/pkg/solana/logpoller/mock_logs_loader.go new file mode 100644 index 000000000..d3af54644 --- /dev/null +++ b/pkg/solana/logpoller/mock_logs_loader.go @@ -0,0 +1,106 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package logpoller + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// mockLogsLoader is an autogenerated mock type for the logsLoader type +type mockLogsLoader struct { + mock.Mock +} + +type mockLogsLoader_Expecter struct { + mock *mock.Mock +} + +func (_m *mockLogsLoader) EXPECT() *mockLogsLoader_Expecter { + return &mockLogsLoader_Expecter{mock: &_m.Mock} +} + +// BackfillForAddresses provides a mock function with given fields: ctx, addresses, fromSlot, toSlot +func (_m *mockLogsLoader) BackfillForAddresses(ctx context.Context, addresses []PublicKey, fromSlot uint64, toSlot uint64) (<-chan Block, func(), error) { + ret := _m.Called(ctx, addresses, fromSlot, toSlot) + + if len(ret) == 0 { + panic("no return value specified for BackfillForAddresses") + } + + var r0 <-chan Block + var r1 func() + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []PublicKey, uint64, uint64) (<-chan Block, func(), error)); ok { + return rf(ctx, addresses, fromSlot, toSlot) + } + if rf, ok := ret.Get(0).(func(context.Context, []PublicKey, uint64, uint64) <-chan Block); ok { + r0 = rf(ctx, addresses, fromSlot, toSlot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []PublicKey, uint64, uint64) func()); ok { + r1 = rf(ctx, addresses, fromSlot, toSlot) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(func()) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, []PublicKey, uint64, uint64) error); ok { + r2 = rf(ctx, addresses, fromSlot, toSlot) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockLogsLoader_BackfillForAddresses_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BackfillForAddresses' +type mockLogsLoader_BackfillForAddresses_Call struct { + *mock.Call +} + +// BackfillForAddresses is a helper method to define mock.On call +// - ctx context.Context +// - addresses []PublicKey +// - fromSlot uint64 +// - toSlot uint64 +func (_e *mockLogsLoader_Expecter) BackfillForAddresses(ctx interface{}, addresses interface{}, fromSlot interface{}, toSlot interface{}) *mockLogsLoader_BackfillForAddresses_Call { + return &mockLogsLoader_BackfillForAddresses_Call{Call: _e.mock.On("BackfillForAddresses", ctx, addresses, fromSlot, toSlot)} +} + +func (_c *mockLogsLoader_BackfillForAddresses_Call) Run(run func(ctx context.Context, addresses []PublicKey, fromSlot uint64, toSlot uint64)) *mockLogsLoader_BackfillForAddresses_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]PublicKey), args[2].(uint64), args[3].(uint64)) + }) + return _c +} + +func (_c *mockLogsLoader_BackfillForAddresses_Call) Return(orderedBlocks <-chan Block, cleanUp func(), err error) *mockLogsLoader_BackfillForAddresses_Call { + _c.Call.Return(orderedBlocks, cleanUp, err) + return _c +} + +func (_c *mockLogsLoader_BackfillForAddresses_Call) RunAndReturn(run func(context.Context, []PublicKey, uint64, uint64) (<-chan Block, func(), error)) *mockLogsLoader_BackfillForAddresses_Call { + _c.Call.Return(run) + return _c +} + +// newMockLogsLoader creates a new instance of mockLogsLoader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockLogsLoader(t interface { + mock.TestingT + Cleanup(func()) +}) *mockLogsLoader { + mock := &mockLogsLoader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/solana/logpoller/mock_orm.go b/pkg/solana/logpoller/mock_orm.go index 1508ba4aa..3b3546266 100644 --- a/pkg/solana/logpoller/mock_orm.go +++ b/pkg/solana/logpoller/mock_orm.go @@ -8,21 +8,21 @@ import ( mock "github.com/stretchr/testify/mock" ) -// mockORM is an autogenerated mock type for the ORM type -type mockORM struct { +// MockORM is an autogenerated mock type for the ORM type +type MockORM struct { mock.Mock } -type mockORM_Expecter struct { +type MockORM_Expecter struct { mock *mock.Mock } -func (_m *mockORM) EXPECT() *mockORM_Expecter { - return &mockORM_Expecter{mock: &_m.Mock} +func (_m *MockORM) EXPECT() *MockORM_Expecter { + return &MockORM_Expecter{mock: &_m.Mock} } // ChainID provides a mock function with given fields: -func (_m *mockORM) ChainID() string { +func (_m *MockORM) ChainID() string { ret := _m.Called() if len(ret) == 0 { @@ -39,35 +39,35 @@ func (_m *mockORM) ChainID() string { return r0 } -// mockORM_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' -type mockORM_ChainID_Call struct { +// MockORM_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' +type MockORM_ChainID_Call struct { *mock.Call } // ChainID is a helper method to define mock.On call -func (_e *mockORM_Expecter) ChainID() *mockORM_ChainID_Call { - return &mockORM_ChainID_Call{Call: _e.mock.On("ChainID")} +func (_e *MockORM_Expecter) ChainID() *MockORM_ChainID_Call { + return &MockORM_ChainID_Call{Call: _e.mock.On("ChainID")} } -func (_c *mockORM_ChainID_Call) Run(run func()) *mockORM_ChainID_Call { +func (_c *MockORM_ChainID_Call) Run(run func()) *MockORM_ChainID_Call { _c.Call.Run(func(args mock.Arguments) { run() }) return _c } -func (_c *mockORM_ChainID_Call) Return(_a0 string) *mockORM_ChainID_Call { +func (_c *MockORM_ChainID_Call) Return(_a0 string) *MockORM_ChainID_Call { _c.Call.Return(_a0) return _c } -func (_c *mockORM_ChainID_Call) RunAndReturn(run func() string) *mockORM_ChainID_Call { +func (_c *MockORM_ChainID_Call) RunAndReturn(run func() string) *MockORM_ChainID_Call { _c.Call.Return(run) return _c } // DeleteFilters provides a mock function with given fields: ctx, filters -func (_m *mockORM) DeleteFilters(ctx context.Context, filters map[int64]Filter) error { +func (_m *MockORM) DeleteFilters(ctx context.Context, filters map[int64]Filter) error { ret := _m.Called(ctx, filters) if len(ret) == 0 { @@ -84,37 +84,93 @@ func (_m *mockORM) DeleteFilters(ctx context.Context, filters map[int64]Filter) return r0 } -// mockORM_DeleteFilters_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteFilters' -type mockORM_DeleteFilters_Call struct { +// MockORM_DeleteFilters_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteFilters' +type MockORM_DeleteFilters_Call struct { *mock.Call } // DeleteFilters is a helper method to define mock.On call // - ctx context.Context // - filters map[int64]Filter -func (_e *mockORM_Expecter) DeleteFilters(ctx interface{}, filters interface{}) *mockORM_DeleteFilters_Call { - return &mockORM_DeleteFilters_Call{Call: _e.mock.On("DeleteFilters", ctx, filters)} +func (_e *MockORM_Expecter) DeleteFilters(ctx interface{}, filters interface{}) *MockORM_DeleteFilters_Call { + return &MockORM_DeleteFilters_Call{Call: _e.mock.On("DeleteFilters", ctx, filters)} } -func (_c *mockORM_DeleteFilters_Call) Run(run func(ctx context.Context, filters map[int64]Filter)) *mockORM_DeleteFilters_Call { +func (_c *MockORM_DeleteFilters_Call) Run(run func(ctx context.Context, filters map[int64]Filter)) *MockORM_DeleteFilters_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(map[int64]Filter)) }) return _c } -func (_c *mockORM_DeleteFilters_Call) Return(_a0 error) *mockORM_DeleteFilters_Call { +func (_c *MockORM_DeleteFilters_Call) Return(_a0 error) *MockORM_DeleteFilters_Call { _c.Call.Return(_a0) return _c } -func (_c *mockORM_DeleteFilters_Call) RunAndReturn(run func(context.Context, map[int64]Filter) error) *mockORM_DeleteFilters_Call { +func (_c *MockORM_DeleteFilters_Call) RunAndReturn(run func(context.Context, map[int64]Filter) error) *MockORM_DeleteFilters_Call { + _c.Call.Return(run) + return _c +} + +// GetLatestBlock provides a mock function with given fields: ctx +func (_m *MockORM) GetLatestBlock(ctx context.Context) (int64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestBlock") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (int64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) int64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockORM_GetLatestBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestBlock' +type MockORM_GetLatestBlock_Call struct { + *mock.Call +} + +// GetLatestBlock is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockORM_Expecter) GetLatestBlock(ctx interface{}) *MockORM_GetLatestBlock_Call { + return &MockORM_GetLatestBlock_Call{Call: _e.mock.On("GetLatestBlock", ctx)} +} + +func (_c *MockORM_GetLatestBlock_Call) Run(run func(ctx context.Context)) *MockORM_GetLatestBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockORM_GetLatestBlock_Call) Return(_a0 int64, _a1 error) *MockORM_GetLatestBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockORM_GetLatestBlock_Call) RunAndReturn(run func(context.Context) (int64, error)) *MockORM_GetLatestBlock_Call { _c.Call.Return(run) return _c } // InsertFilter provides a mock function with given fields: ctx, filter -func (_m *mockORM) InsertFilter(ctx context.Context, filter Filter) (int64, error) { +func (_m *MockORM) InsertFilter(ctx context.Context, filter Filter) (int64, error) { ret := _m.Called(ctx, filter) if len(ret) == 0 { @@ -141,37 +197,37 @@ func (_m *mockORM) InsertFilter(ctx context.Context, filter Filter) (int64, erro return r0, r1 } -// mockORM_InsertFilter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertFilter' -type mockORM_InsertFilter_Call struct { +// MockORM_InsertFilter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertFilter' +type MockORM_InsertFilter_Call struct { *mock.Call } // InsertFilter is a helper method to define mock.On call // - ctx context.Context // - filter Filter -func (_e *mockORM_Expecter) InsertFilter(ctx interface{}, filter interface{}) *mockORM_InsertFilter_Call { - return &mockORM_InsertFilter_Call{Call: _e.mock.On("InsertFilter", ctx, filter)} +func (_e *MockORM_Expecter) InsertFilter(ctx interface{}, filter interface{}) *MockORM_InsertFilter_Call { + return &MockORM_InsertFilter_Call{Call: _e.mock.On("InsertFilter", ctx, filter)} } -func (_c *mockORM_InsertFilter_Call) Run(run func(ctx context.Context, filter Filter)) *mockORM_InsertFilter_Call { +func (_c *MockORM_InsertFilter_Call) Run(run func(ctx context.Context, filter Filter)) *MockORM_InsertFilter_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(Filter)) }) return _c } -func (_c *mockORM_InsertFilter_Call) Return(id int64, err error) *mockORM_InsertFilter_Call { +func (_c *MockORM_InsertFilter_Call) Return(id int64, err error) *MockORM_InsertFilter_Call { _c.Call.Return(id, err) return _c } -func (_c *mockORM_InsertFilter_Call) RunAndReturn(run func(context.Context, Filter) (int64, error)) *mockORM_InsertFilter_Call { +func (_c *MockORM_InsertFilter_Call) RunAndReturn(run func(context.Context, Filter) (int64, error)) *MockORM_InsertFilter_Call { _c.Call.Return(run) return _c } // InsertLogs provides a mock function with given fields: _a0, _a1 -func (_m *mockORM) InsertLogs(_a0 context.Context, _a1 []Log) error { +func (_m *MockORM) InsertLogs(_a0 context.Context, _a1 []Log) error { ret := _m.Called(_a0, _a1) if len(ret) == 0 { @@ -188,37 +244,37 @@ func (_m *mockORM) InsertLogs(_a0 context.Context, _a1 []Log) error { return r0 } -// mockORM_InsertLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertLogs' -type mockORM_InsertLogs_Call struct { +// MockORM_InsertLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertLogs' +type MockORM_InsertLogs_Call struct { *mock.Call } // InsertLogs is a helper method to define mock.On call // - _a0 context.Context // - _a1 []Log -func (_e *mockORM_Expecter) InsertLogs(_a0 interface{}, _a1 interface{}) *mockORM_InsertLogs_Call { - return &mockORM_InsertLogs_Call{Call: _e.mock.On("InsertLogs", _a0, _a1)} +func (_e *MockORM_Expecter) InsertLogs(_a0 interface{}, _a1 interface{}) *MockORM_InsertLogs_Call { + return &MockORM_InsertLogs_Call{Call: _e.mock.On("InsertLogs", _a0, _a1)} } -func (_c *mockORM_InsertLogs_Call) Run(run func(_a0 context.Context, _a1 []Log)) *mockORM_InsertLogs_Call { +func (_c *MockORM_InsertLogs_Call) Run(run func(_a0 context.Context, _a1 []Log)) *MockORM_InsertLogs_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].([]Log)) }) return _c } -func (_c *mockORM_InsertLogs_Call) Return(err error) *mockORM_InsertLogs_Call { +func (_c *MockORM_InsertLogs_Call) Return(err error) *MockORM_InsertLogs_Call { _c.Call.Return(err) return _c } -func (_c *mockORM_InsertLogs_Call) RunAndReturn(run func(context.Context, []Log) error) *mockORM_InsertLogs_Call { +func (_c *MockORM_InsertLogs_Call) RunAndReturn(run func(context.Context, []Log) error) *MockORM_InsertLogs_Call { _c.Call.Return(run) return _c } // MarkFilterBackfilled provides a mock function with given fields: ctx, id -func (_m *mockORM) MarkFilterBackfilled(ctx context.Context, id int64) error { +func (_m *MockORM) MarkFilterBackfilled(ctx context.Context, id int64) error { ret := _m.Called(ctx, id) if len(ret) == 0 { @@ -235,37 +291,37 @@ func (_m *mockORM) MarkFilterBackfilled(ctx context.Context, id int64) error { return r0 } -// mockORM_MarkFilterBackfilled_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MarkFilterBackfilled' -type mockORM_MarkFilterBackfilled_Call struct { +// MockORM_MarkFilterBackfilled_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MarkFilterBackfilled' +type MockORM_MarkFilterBackfilled_Call struct { *mock.Call } // MarkFilterBackfilled is a helper method to define mock.On call // - ctx context.Context // - id int64 -func (_e *mockORM_Expecter) MarkFilterBackfilled(ctx interface{}, id interface{}) *mockORM_MarkFilterBackfilled_Call { - return &mockORM_MarkFilterBackfilled_Call{Call: _e.mock.On("MarkFilterBackfilled", ctx, id)} +func (_e *MockORM_Expecter) MarkFilterBackfilled(ctx interface{}, id interface{}) *MockORM_MarkFilterBackfilled_Call { + return &MockORM_MarkFilterBackfilled_Call{Call: _e.mock.On("MarkFilterBackfilled", ctx, id)} } -func (_c *mockORM_MarkFilterBackfilled_Call) Run(run func(ctx context.Context, id int64)) *mockORM_MarkFilterBackfilled_Call { +func (_c *MockORM_MarkFilterBackfilled_Call) Run(run func(ctx context.Context, id int64)) *MockORM_MarkFilterBackfilled_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(int64)) }) return _c } -func (_c *mockORM_MarkFilterBackfilled_Call) Return(err error) *mockORM_MarkFilterBackfilled_Call { +func (_c *MockORM_MarkFilterBackfilled_Call) Return(err error) *MockORM_MarkFilterBackfilled_Call { _c.Call.Return(err) return _c } -func (_c *mockORM_MarkFilterBackfilled_Call) RunAndReturn(run func(context.Context, int64) error) *mockORM_MarkFilterBackfilled_Call { +func (_c *MockORM_MarkFilterBackfilled_Call) RunAndReturn(run func(context.Context, int64) error) *MockORM_MarkFilterBackfilled_Call { _c.Call.Return(run) return _c } // MarkFilterDeleted provides a mock function with given fields: ctx, id -func (_m *mockORM) MarkFilterDeleted(ctx context.Context, id int64) error { +func (_m *MockORM) MarkFilterDeleted(ctx context.Context, id int64) error { ret := _m.Called(ctx, id) if len(ret) == 0 { @@ -282,37 +338,37 @@ func (_m *mockORM) MarkFilterDeleted(ctx context.Context, id int64) error { return r0 } -// mockORM_MarkFilterDeleted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MarkFilterDeleted' -type mockORM_MarkFilterDeleted_Call struct { +// MockORM_MarkFilterDeleted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MarkFilterDeleted' +type MockORM_MarkFilterDeleted_Call struct { *mock.Call } // MarkFilterDeleted is a helper method to define mock.On call // - ctx context.Context // - id int64 -func (_e *mockORM_Expecter) MarkFilterDeleted(ctx interface{}, id interface{}) *mockORM_MarkFilterDeleted_Call { - return &mockORM_MarkFilterDeleted_Call{Call: _e.mock.On("MarkFilterDeleted", ctx, id)} +func (_e *MockORM_Expecter) MarkFilterDeleted(ctx interface{}, id interface{}) *MockORM_MarkFilterDeleted_Call { + return &MockORM_MarkFilterDeleted_Call{Call: _e.mock.On("MarkFilterDeleted", ctx, id)} } -func (_c *mockORM_MarkFilterDeleted_Call) Run(run func(ctx context.Context, id int64)) *mockORM_MarkFilterDeleted_Call { +func (_c *MockORM_MarkFilterDeleted_Call) Run(run func(ctx context.Context, id int64)) *MockORM_MarkFilterDeleted_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(int64)) }) return _c } -func (_c *mockORM_MarkFilterDeleted_Call) Return(err error) *mockORM_MarkFilterDeleted_Call { +func (_c *MockORM_MarkFilterDeleted_Call) Return(err error) *MockORM_MarkFilterDeleted_Call { _c.Call.Return(err) return _c } -func (_c *mockORM_MarkFilterDeleted_Call) RunAndReturn(run func(context.Context, int64) error) *mockORM_MarkFilterDeleted_Call { +func (_c *MockORM_MarkFilterDeleted_Call) RunAndReturn(run func(context.Context, int64) error) *MockORM_MarkFilterDeleted_Call { _c.Call.Return(run) return _c } // SelectFilters provides a mock function with given fields: ctx -func (_m *mockORM) SelectFilters(ctx context.Context) ([]Filter, error) { +func (_m *MockORM) SelectFilters(ctx context.Context) ([]Filter, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -341,36 +397,36 @@ func (_m *mockORM) SelectFilters(ctx context.Context) ([]Filter, error) { return r0, r1 } -// mockORM_SelectFilters_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SelectFilters' -type mockORM_SelectFilters_Call struct { +// MockORM_SelectFilters_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SelectFilters' +type MockORM_SelectFilters_Call struct { *mock.Call } // SelectFilters is a helper method to define mock.On call // - ctx context.Context -func (_e *mockORM_Expecter) SelectFilters(ctx interface{}) *mockORM_SelectFilters_Call { - return &mockORM_SelectFilters_Call{Call: _e.mock.On("SelectFilters", ctx)} +func (_e *MockORM_Expecter) SelectFilters(ctx interface{}) *MockORM_SelectFilters_Call { + return &MockORM_SelectFilters_Call{Call: _e.mock.On("SelectFilters", ctx)} } -func (_c *mockORM_SelectFilters_Call) Run(run func(ctx context.Context)) *mockORM_SelectFilters_Call { +func (_c *MockORM_SelectFilters_Call) Run(run func(ctx context.Context)) *MockORM_SelectFilters_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context)) }) return _c } -func (_c *mockORM_SelectFilters_Call) Return(_a0 []Filter, _a1 error) *mockORM_SelectFilters_Call { +func (_c *MockORM_SelectFilters_Call) Return(_a0 []Filter, _a1 error) *MockORM_SelectFilters_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *mockORM_SelectFilters_Call) RunAndReturn(run func(context.Context) ([]Filter, error)) *mockORM_SelectFilters_Call { +func (_c *MockORM_SelectFilters_Call) RunAndReturn(run func(context.Context) ([]Filter, error)) *MockORM_SelectFilters_Call { _c.Call.Return(run) return _c } // SelectSeqNums provides a mock function with given fields: ctx -func (_m *mockORM) SelectSeqNums(ctx context.Context) (map[int64]int64, error) { +func (_m *MockORM) SelectSeqNums(ctx context.Context) (map[int64]int64, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -399,41 +455,41 @@ func (_m *mockORM) SelectSeqNums(ctx context.Context) (map[int64]int64, error) { return r0, r1 } -// mockORM_SelectSeqNums_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SelectSeqNums' -type mockORM_SelectSeqNums_Call struct { +// MockORM_SelectSeqNums_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SelectSeqNums' +type MockORM_SelectSeqNums_Call struct { *mock.Call } // SelectSeqNums is a helper method to define mock.On call // - ctx context.Context -func (_e *mockORM_Expecter) SelectSeqNums(ctx interface{}) *mockORM_SelectSeqNums_Call { - return &mockORM_SelectSeqNums_Call{Call: _e.mock.On("SelectSeqNums", ctx)} +func (_e *MockORM_Expecter) SelectSeqNums(ctx interface{}) *MockORM_SelectSeqNums_Call { + return &MockORM_SelectSeqNums_Call{Call: _e.mock.On("SelectSeqNums", ctx)} } -func (_c *mockORM_SelectSeqNums_Call) Run(run func(ctx context.Context)) *mockORM_SelectSeqNums_Call { +func (_c *MockORM_SelectSeqNums_Call) Run(run func(ctx context.Context)) *MockORM_SelectSeqNums_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context)) }) return _c } -func (_c *mockORM_SelectSeqNums_Call) Return(_a0 map[int64]int64, _a1 error) *mockORM_SelectSeqNums_Call { +func (_c *MockORM_SelectSeqNums_Call) Return(_a0 map[int64]int64, _a1 error) *MockORM_SelectSeqNums_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *mockORM_SelectSeqNums_Call) RunAndReturn(run func(context.Context) (map[int64]int64, error)) *mockORM_SelectSeqNums_Call { +func (_c *MockORM_SelectSeqNums_Call) RunAndReturn(run func(context.Context) (map[int64]int64, error)) *MockORM_SelectSeqNums_Call { _c.Call.Return(run) return _c } -// newMockORM creates a new instance of mockORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// NewMockORM creates a new instance of MockORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockORM(t interface { +func NewMockORM(t interface { mock.TestingT Cleanup(func()) -}) *mockORM { - mock := &mockORM{} +}) *MockORM { + mock := &MockORM{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/pkg/solana/logpoller/mocks/rpc_client.go b/pkg/solana/logpoller/mocks/rpc_client.go index 1d112f399..3de0356ba 100644 --- a/pkg/solana/logpoller/mocks/rpc_client.go +++ b/pkg/solana/logpoller/mocks/rpc_client.go @@ -85,66 +85,6 @@ func (_c *RPCClient_GetBlockWithOpts_Call) RunAndReturn(run func(context.Context return _c } -// GetBlocks provides a mock function with given fields: ctx, startSlot, endSlot -func (_m *RPCClient) GetBlocks(ctx context.Context, startSlot uint64, endSlot *uint64) (rpc.BlocksResult, error) { - ret := _m.Called(ctx, startSlot, endSlot) - - if len(ret) == 0 { - panic("no return value specified for GetBlocks") - } - - var r0 rpc.BlocksResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, *uint64) (rpc.BlocksResult, error)); ok { - return rf(ctx, startSlot, endSlot) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, *uint64) rpc.BlocksResult); ok { - r0 = rf(ctx, startSlot, endSlot) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(rpc.BlocksResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, *uint64) error); ok { - r1 = rf(ctx, startSlot, endSlot) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RPCClient_GetBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlocks' -type RPCClient_GetBlocks_Call struct { - *mock.Call -} - -// GetBlocks is a helper method to define mock.On call -// - ctx context.Context -// - startSlot uint64 -// - endSlot *uint64 -func (_e *RPCClient_Expecter) GetBlocks(ctx interface{}, startSlot interface{}, endSlot interface{}) *RPCClient_GetBlocks_Call { - return &RPCClient_GetBlocks_Call{Call: _e.mock.On("GetBlocks", ctx, startSlot, endSlot)} -} - -func (_c *RPCClient_GetBlocks_Call) Run(run func(ctx context.Context, startSlot uint64, endSlot *uint64)) *RPCClient_GetBlocks_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(*uint64)) - }) - return _c -} - -func (_c *RPCClient_GetBlocks_Call) Return(out rpc.BlocksResult, err error) *RPCClient_GetBlocks_Call { - _c.Call.Return(out, err) - return _c -} - -func (_c *RPCClient_GetBlocks_Call) RunAndReturn(run func(context.Context, uint64, *uint64) (rpc.BlocksResult, error)) *RPCClient_GetBlocks_Call { - _c.Call.Return(run) - return _c -} - // GetSignaturesForAddressWithOpts provides a mock function with given fields: _a0, _a1, _a2 func (_m *RPCClient) GetSignaturesForAddressWithOpts(_a0 context.Context, _a1 solana.PublicKey, _a2 *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { ret := _m.Called(_a0, _a1, _a2) @@ -205,29 +145,27 @@ func (_c *RPCClient_GetSignaturesForAddressWithOpts_Call) RunAndReturn(run func( return _c } -// LatestBlockhash provides a mock function with given fields: ctx -func (_m *RPCClient) LatestBlockhash(ctx context.Context) (*rpc.GetLatestBlockhashResult, error) { - ret := _m.Called(ctx) +// SlotHeightWithCommitment provides a mock function with given fields: ctx, commitment +func (_m *RPCClient) SlotHeightWithCommitment(ctx context.Context, commitment rpc.CommitmentType) (uint64, error) { + ret := _m.Called(ctx, commitment) if len(ret) == 0 { - panic("no return value specified for LatestBlockhash") + panic("no return value specified for SlotHeightWithCommitment") } - var r0 *rpc.GetLatestBlockhashResult + var r0 uint64 var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*rpc.GetLatestBlockhashResult, error)); ok { - return rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context, rpc.CommitmentType) (uint64, error)); ok { + return rf(ctx, commitment) } - if rf, ok := ret.Get(0).(func(context.Context) *rpc.GetLatestBlockhashResult); ok { - r0 = rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context, rpc.CommitmentType) uint64); ok { + r0 = rf(ctx, commitment) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.GetLatestBlockhashResult) - } + r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) + if rf, ok := ret.Get(1).(func(context.Context, rpc.CommitmentType) error); ok { + r1 = rf(ctx, commitment) } else { r1 = ret.Error(1) } @@ -235,30 +173,31 @@ func (_m *RPCClient) LatestBlockhash(ctx context.Context) (*rpc.GetLatestBlockha return r0, r1 } -// RPCClient_LatestBlockhash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestBlockhash' -type RPCClient_LatestBlockhash_Call struct { +// RPCClient_SlotHeightWithCommitment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SlotHeightWithCommitment' +type RPCClient_SlotHeightWithCommitment_Call struct { *mock.Call } -// LatestBlockhash is a helper method to define mock.On call +// SlotHeightWithCommitment is a helper method to define mock.On call // - ctx context.Context -func (_e *RPCClient_Expecter) LatestBlockhash(ctx interface{}) *RPCClient_LatestBlockhash_Call { - return &RPCClient_LatestBlockhash_Call{Call: _e.mock.On("LatestBlockhash", ctx)} +// - commitment rpc.CommitmentType +func (_e *RPCClient_Expecter) SlotHeightWithCommitment(ctx interface{}, commitment interface{}) *RPCClient_SlotHeightWithCommitment_Call { + return &RPCClient_SlotHeightWithCommitment_Call{Call: _e.mock.On("SlotHeightWithCommitment", ctx, commitment)} } -func (_c *RPCClient_LatestBlockhash_Call) Run(run func(ctx context.Context)) *RPCClient_LatestBlockhash_Call { +func (_c *RPCClient_SlotHeightWithCommitment_Call) Run(run func(ctx context.Context, commitment rpc.CommitmentType)) *RPCClient_SlotHeightWithCommitment_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) + run(args[0].(context.Context), args[1].(rpc.CommitmentType)) }) return _c } -func (_c *RPCClient_LatestBlockhash_Call) Return(out *rpc.GetLatestBlockhashResult, err error) *RPCClient_LatestBlockhash_Call { - _c.Call.Return(out, err) +func (_c *RPCClient_SlotHeightWithCommitment_Call) Return(_a0 uint64, _a1 error) *RPCClient_SlotHeightWithCommitment_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *RPCClient_LatestBlockhash_Call) RunAndReturn(run func(context.Context) (*rpc.GetLatestBlockhashResult, error)) *RPCClient_LatestBlockhash_Call { +func (_c *RPCClient_SlotHeightWithCommitment_Call) RunAndReturn(run func(context.Context, rpc.CommitmentType) (uint64, error)) *RPCClient_SlotHeightWithCommitment_Call { _c.Call.Return(run) return _c } diff --git a/pkg/solana/logpoller/mocks/worker_group.go b/pkg/solana/logpoller/mocks/worker_group.go new file mode 100644 index 000000000..e4a98bdec --- /dev/null +++ b/pkg/solana/logpoller/mocks/worker_group.go @@ -0,0 +1,85 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + worker "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller/worker" +) + +// WorkerGroup is an autogenerated mock type for the WorkerGroup type +type WorkerGroup struct { + mock.Mock +} + +type WorkerGroup_Expecter struct { + mock *mock.Mock +} + +func (_m *WorkerGroup) EXPECT() *WorkerGroup_Expecter { + return &WorkerGroup_Expecter{mock: &_m.Mock} +} + +// Do provides a mock function with given fields: ctx, job +func (_m *WorkerGroup) Do(ctx context.Context, job worker.Job) error { + ret := _m.Called(ctx, job) + + if len(ret) == 0 { + panic("no return value specified for Do") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, worker.Job) error); ok { + r0 = rf(ctx, job) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// WorkerGroup_Do_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Do' +type WorkerGroup_Do_Call struct { + *mock.Call +} + +// Do is a helper method to define mock.On call +// - ctx context.Context +// - job worker.Job +func (_e *WorkerGroup_Expecter) Do(ctx interface{}, job interface{}) *WorkerGroup_Do_Call { + return &WorkerGroup_Do_Call{Call: _e.mock.On("Do", ctx, job)} +} + +func (_c *WorkerGroup_Do_Call) Run(run func(ctx context.Context, job worker.Job)) *WorkerGroup_Do_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(worker.Job)) + }) + return _c +} + +func (_c *WorkerGroup_Do_Call) Return(_a0 error) *WorkerGroup_Do_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *WorkerGroup_Do_Call) RunAndReturn(run func(context.Context, worker.Job) error) *WorkerGroup_Do_Call { + _c.Call.Return(run) + return _c +} + +// NewWorkerGroup creates a new instance of WorkerGroup. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWorkerGroup(t interface { + mock.TestingT + Cleanup(func()) +}) *WorkerGroup { + mock := &WorkerGroup{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/solana/logpoller/orm.go b/pkg/solana/logpoller/orm.go index ae6e9118e..417b2e6ba 100644 --- a/pkg/solana/logpoller/orm.go +++ b/pkg/solana/logpoller/orm.go @@ -238,6 +238,13 @@ func (o *DSORM) FilteredLogs(ctx context.Context, filter []query.Expression, lim return logs, nil } +func (o *DSORM) GetLatestBlock(ctx context.Context) (int64, error) { + q := `SELECT block_number FROM solana.logs WHERE chain_id = $1 ORDER BY block_number DESC LIMIT 1` + var result int64 + err := o.ds.GetContext(ctx, &result, q, o.chainID) + return result, err +} + func (o *DSORM) SelectSeqNums(ctx context.Context) (map[int64]int64, error) { results := make([]struct { FilterID int64 diff --git a/pkg/solana/logpoller/orm_test.go b/pkg/solana/logpoller/orm_test.go index 7fab36467..362333d27 100644 --- a/pkg/solana/logpoller/orm_test.go +++ b/pkg/solana/logpoller/orm_test.go @@ -3,18 +3,18 @@ package logpoller import ( - "math/rand" + "context" "testing" - "time" "github.com/gagliardetto/solana-go" "github.com/google/uuid" _ "github.com/jackc/pgx/v4/stdlib" + "github.com/stretchr/testify/require" + "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/sqlutil/pg" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" - "github.com/stretchr/testify/require" ) // NOTE: at the moment it's not possible to run all db tests at once. This issue will be addressed separately @@ -134,7 +134,7 @@ func TestLogPollerFilters(t *testing.T) { ctx := tests.Context(t) filterID, err := orm.InsertFilter(ctx, filter) require.NoError(t, err) - log := newRandomLog(t, filterID, chainID) + log := newRandomLog(t, filterID, chainID, "myEvent") err = orm.InsertLogs(ctx, []Log{log}) require.NoError(t, err) logs, err := orm.SelectLogs(ctx, 0, log.BlockNumber, log.Address, log.EventSig) @@ -191,8 +191,8 @@ func TestLogPollerLogs(t *testing.T) { filterID, err := orm.InsertFilter(ctx, newRandomFilter(t)) filterID2, err := orm.InsertFilter(ctx, newRandomFilter(t)) require.NoError(t, err) - log := newRandomLog(t, filterID, chainID) - log2 := newRandomLog(t, filterID2, chainID) + log := newRandomLog(t, filterID, chainID, "myEvent") + log2 := newRandomLog(t, filterID2, chainID, "myEvent") err = orm.InsertLogs(ctx, []Log{log, log2}) require.NoError(t, err) // insert of the same Log should not produce two instances @@ -220,6 +220,33 @@ func TestLogPollerLogs(t *testing.T) { }) } +func TestLogPoller_GetLatestBlock(t *testing.T) { + lggr := logger.Test(t) + dbx := pg.NewTestDB(t, pg.TestURL(t)) + + createLogsForBlocks := func(ctx context.Context, orm *DSORM, blocks ...int64) { + filterID, err := orm.InsertFilter(ctx, newRandomFilter(t)) + require.NoError(t, err) + for _, block := range blocks { + log := newRandomLog(t, filterID, orm.chainID, "myEvent") + log.BlockNumber = block + err = orm.InsertLogs(ctx, []Log{log}) + require.NoError(t, err) + } + } + ctx := tests.Context(t) + orm1 := NewORM(uuid.NewString(), dbx, lggr) + createLogsForBlocks(tests.Context(t), orm1, 10, 11, 12) + orm2 := NewORM(uuid.NewString(), dbx, lggr) + createLogsForBlocks(context.Background(), orm2, 100, 110, 120) + latestBlockChain1, err := orm1.GetLatestBlock(ctx) + require.NoError(t, err) + require.Equal(t, int64(12), latestBlockChain1) + latestBlockChain2, err := orm2.GetLatestBlock(ctx) + require.NoError(t, err) + require.Equal(t, int64(120), latestBlockChain2) +} + func newRandomFilter(t *testing.T) Filter { return Filter{ Name: uuid.NewString(), @@ -232,26 +259,3 @@ func newRandomFilter(t *testing.T) Filter { MaxLogsKept: 3, } } - -func newRandomLog(t *testing.T, filterID int64, chainID string) Log { - privateKey, err := solana.NewRandomPrivateKey() - require.NoError(t, err) - pubKey := privateKey.PublicKey() - data := []byte("solana is fun") - signature, err := privateKey.Sign(data) - require.NoError(t, err) - return Log{ - FilterID: filterID, - ChainID: chainID, - LogIndex: rand.Int63n(1000), - BlockHash: Hash(pubKey), - BlockNumber: rand.Int63n(1000000), - BlockTimestamp: time.Unix(1731590113, 0), - Address: PublicKey(pubKey), - EventSig: EventSignature{3, 2, 1}, - SubkeyValues: [][]byte{{3, 2, 1}, {1}, {1, 2}, pubKey.Bytes()}, - TxHash: Signature(signature), - Data: data, - SequenceNum: rand.Int63n(500), - } -} diff --git a/pkg/solana/logpoller/types.go b/pkg/solana/logpoller/types.go index dc8b614bc..5b75ff9a7 100644 --- a/pkg/solana/logpoller/types.go +++ b/pkg/solana/logpoller/types.go @@ -9,6 +9,7 @@ import ( "math" "reflect" "slices" + "strings" "github.com/gagliardetto/solana-go" @@ -31,6 +32,23 @@ func (k PublicKey) ToSolana() solana.PublicKey { return solana.PublicKey(k) } +func (k PublicKey) String() string { + return k.ToSolana().String() +} + +func PublicKeysToString(keys []PublicKey) string { + var buf strings.Builder + buf.WriteString("[") + for i, key := range keys { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString(key.String()) + } + buf.WriteString("]") + return buf.String() +} + type Hash solana.Hash // Scan implements Scanner for database/sql. diff --git a/pkg/solana/logpoller/worker/job.go b/pkg/solana/logpoller/worker/job.go new file mode 100644 index 000000000..84cca1f47 --- /dev/null +++ b/pkg/solana/logpoller/worker/job.go @@ -0,0 +1,29 @@ +package worker + +import ( + "context" + "time" +) + +// Job is a function that should be run by the worker group. The context provided +// allows the Job to cancel if the worker group is closed. All other life-cycle +// management should be wrapped within the Job. +type Job interface { + String() string + Run(context.Context) error +} + +type retryableJob struct { + name string + count uint8 + when time.Time + job Job +} + +func (j retryableJob) String() string { + return j.job.String() +} + +func (j retryableJob) Run(ctx context.Context) error { + return j.job.Run(ctx) +} diff --git a/pkg/solana/logpoller/worker.go b/pkg/solana/logpoller/worker/worker.go similarity index 86% rename from pkg/solana/logpoller/worker.go rename to pkg/solana/logpoller/worker/worker.go index 98226452f..961f2625d 100644 --- a/pkg/solana/logpoller/worker.go +++ b/pkg/solana/logpoller/worker/worker.go @@ -1,4 +1,4 @@ -package logpoller +package worker import ( "context" @@ -25,7 +25,7 @@ const ( DefaultNotifyRetryDepth = 200 // DefaultNotifyQueueDepth is the queue depth at which the worker group will log a warning. DefaultNotifyQueueDepth = 100 - // DefaultWorkerCount is the default number of workers in a WorkerGroup. + // DefaultWorkerCount is the default number of workers in a Group. DefaultWorkerCount = 10 ) @@ -38,10 +38,14 @@ type worker struct { func (w *worker) Do(ctx context.Context, job Job) { if ctx.Err() == nil { + start := time.Now() + w.Lggr.Debugf("Starting job %s", job.String()) if err := job.Run(ctx); err != nil { w.Lggr.Errorf("job %s failed with error; retrying: %s", job, err) w.Retry <- job } + // TODO: add prom metric + w.Lggr.Debugf("Finished job %s in %s", job.String(), time.Since(start)) } // put itself back on the queue when done @@ -51,7 +55,7 @@ func (w *worker) Do(ctx context.Context, job Job) { } } -type WorkerGroup struct { +type Group struct { // service state management services.Service engine *services.Engine @@ -59,7 +63,7 @@ type WorkerGroup struct { // dependencies and configuration maxWorkers int maxRetryCount uint8 - lggr logger.Logger + lggr logger.SugaredLogger // worker group state workers chan *worker @@ -76,8 +80,8 @@ type WorkerGroup struct { retryMap map[string]retryableJob } -func NewWorkerGroup(workers int, lggr logger.Logger) *WorkerGroup { - g := &WorkerGroup{ +func NewGroup(workers int, lggr logger.SugaredLogger) *Group { + g := &Group{ maxWorkers: workers, maxRetryCount: DefaultMaxRetryCount, workers: make(chan *worker, workers), @@ -108,9 +112,9 @@ func NewWorkerGroup(workers int, lggr logger.Logger) *WorkerGroup { return g } -var _ services.Service = &WorkerGroup{} +var _ services.Service = &Group{} -func (g *WorkerGroup) start(ctx context.Context) error { +func (g *Group) start(ctx context.Context) error { g.engine.Go(g.runQueuing) g.engine.Go(g.runProcessing) g.engine.Go(g.runRetryQueue) @@ -119,7 +123,7 @@ func (g *WorkerGroup) start(ctx context.Context) error { return nil } -func (g *WorkerGroup) close() error { +func (g *Group) close() error { if !g.queueClosed.Load() { g.queueClosed.Store(true) close(g.chStopInputs) @@ -133,7 +137,7 @@ func (g *WorkerGroup) close() error { // time for the queue to open. Or a context can wrap a collection of jobs that // need to be run and when the context cancels, the jobs don't get added to the // queue. -func (g *WorkerGroup) Do(ctx context.Context, job Job) error { +func (g *Group) Do(ctx context.Context, job Job) error { if ctx.Err() != nil { return fmt.Errorf("%w; work not added to queue", ErrContextCancelled) } @@ -152,7 +156,7 @@ func (g *WorkerGroup) Do(ctx context.Context, job Job) error { } } -func (g *WorkerGroup) runQueuing(ctx context.Context) { +func (g *Group) runQueuing(ctx context.Context) { for { select { case <-ctx.Done(): @@ -170,7 +174,7 @@ func (g *WorkerGroup) runQueuing(ctx context.Context) { } } -func (g *WorkerGroup) runProcessing(ctx context.Context) { +func (g *Group) runProcessing(ctx context.Context) { Loop: for { select { @@ -184,7 +188,7 @@ Loop: } } -func (g *WorkerGroup) runRetryQueue(ctx context.Context) { +func (g *Group) runRetryQueue(ctx context.Context) { for { select { case <-ctx.Done(): @@ -198,12 +202,10 @@ func (g *WorkerGroup) runRetryQueue(ctx context.Context) { retry.count++ if retry.count > g.maxRetryCount { - g.lggr.Errorf("job %s dropped after max retries", job) - - continue + g.lggr.Criticalf("job %s exceeded max retries %d/%d", job, retry.count, g.maxRetryCount) } - wait := calculateExponentialBackoff(retry.count) + wait := calculateExponentialBackoff(min(retry.count, g.maxRetryCount)) g.lggr.Errorf("retrying job in %dms", wait/time.Millisecond) retry.when = time.Now().Add(wait) @@ -230,7 +232,7 @@ func (g *WorkerGroup) runRetryQueue(ctx context.Context) { } } -func (g *WorkerGroup) runRetries(ctx context.Context) { +func (g *Group) runRetries(ctx context.Context) { for { // run timer on minimum backoff timer := time.NewTimer(calculateExponentialBackoff(0)) @@ -266,7 +268,7 @@ func (g *WorkerGroup) runRetries(ctx context.Context) { } } -func (g *WorkerGroup) processQueue(ctx context.Context) { +func (g *Group) processQueue(ctx context.Context) { for { if g.queue.Len() == 0 { break @@ -289,7 +291,7 @@ func (g *WorkerGroup) processQueue(ctx context.Context) { } } -func (g *WorkerGroup) doJob(ctx context.Context, job Job) { +func (g *Group) doJob(ctx context.Context, job Job) { wkr := <-g.workers go wkr.Do(ctx, job) diff --git a/pkg/solana/logpoller/worker_test.go b/pkg/solana/logpoller/worker/worker_test.go similarity index 90% rename from pkg/solana/logpoller/worker_test.go rename to pkg/solana/logpoller/worker/worker_test.go index 2d2afadec..2c50e49f2 100644 --- a/pkg/solana/logpoller/worker_test.go +++ b/pkg/solana/logpoller/worker/worker_test.go @@ -1,4 +1,4 @@ -package logpoller_test +package worker_test import ( "context" @@ -15,12 +15,12 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" - "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller/worker" ) func TestWorkerGroup(t *testing.T) { ctx := tests.Context(t) - group := logpoller.NewWorkerGroup(5, logger.Nop()) + group := worker.NewGroup(5, logger.Sugared(logger.Nop())) require.NoError(t, group.Start(ctx)) t.Cleanup(func() { @@ -55,7 +55,7 @@ func TestWorkerGroup(t *testing.T) { func TestWorkerGroup_Retry(t *testing.T) { ctx := tests.Context(t) - group := logpoller.NewWorkerGroup(5, logger.Nop()) + group := worker.NewGroup(5, logger.Sugared(logger.Nop())) require.NoError(t, group.Start(ctx)) t.Cleanup(func() { @@ -109,7 +109,7 @@ func TestWorkerGroup_Retry(t *testing.T) { func TestWorkerGroup_Close(t *testing.T) { ctx := tests.Context(t) - group := logpoller.NewWorkerGroup(5, logger.Nop()) + group := worker.NewGroup(5, logger.Sugared(logger.Nop())) require.NoError(t, group.Start(ctx)) @@ -176,7 +176,7 @@ func TestWorkerGroup_Close(t *testing.T) { func TestWorkerGroup_DoContext(t *testing.T) { t.Run("will not add to queue", func(t *testing.T) { ctx := tests.Context(t) - group := logpoller.NewWorkerGroup(2, logger.Nop()) + group := worker.NewGroup(2, logger.Sugared(logger.Nop())) job := testJob{job: func(ctx context.Context) error { return nil }} require.NoError(t, group.Start(ctx)) @@ -187,13 +187,13 @@ func TestWorkerGroup_DoContext(t *testing.T) { // calling cancel before calling Do should result in an error cancel() - require.ErrorIs(t, group.Do(ctxB, job), logpoller.ErrContextCancelled) + require.ErrorIs(t, group.Do(ctxB, job), worker.ErrContextCancelled) }) t.Run("if queue closed", func(t *testing.T) { require.NoError(t, group.Close()) - require.ErrorIs(t, group.Do(ctx, job), logpoller.ErrProcessStopped) + require.ErrorIs(t, group.Do(ctx, job), worker.ErrProcessStopped) }) }) } @@ -201,7 +201,7 @@ func TestWorkerGroup_DoContext(t *testing.T) { func BenchmarkWorkerGroup(b *testing.B) { ctx := tests.Context(b) - group := logpoller.NewWorkerGroup(100, logger.Nop()) + group := worker.NewGroup(100, logger.Sugared(logger.Nop())) job := testJob{job: func(ctx context.Context) error { return nil }} require.NoError(b, group.Start(ctx))