From 39d09094fea9b1754cb94e41d0fae017c9126cb2 Mon Sep 17 00:00:00 2001 From: gustavogama-cll <165679773+gustavogama-cll@users.noreply.github.com> Date: Wed, 26 Feb 2025 14:19:58 -0300 Subject: [PATCH 01/17] feat(job-distributor): add exp. backoff retry to `feeds.SyncNodeInfo()` (#15752) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(job-distributor): add exp. backoff retry to feeds.SyncNodeInfo() There’s a behavior that we’ve observed for some time on the NOP side where they will add/update a chain configuration of the Job Distributor panel but the change is not reflected on the service itself. This leads to inefficiencies as NOPs are unaware of this and thus need to be notified so that they may "reapply" the configuration. After some investigation, we suspect that this is due to connectivity issues between the nodes and the job distributor instance, which causes the message with the update to be lost. This PR attempts to solve this by adding a "retry" wrapper on top of the existing `SyncNodeInfo` method. We rely on `avast/retry-go` to implement the bulk of the retry logic. It's configured with a minimal delay of 10 seconds, maximum delay of 30 minutes and retry a total of 56 times -- which adds up to a bit more than 24 hours. Ticket Number: DPA-1371 * review: protect cancel func access with a mutex to avoid race conditions * review: trigger retry on partial failures and support multiple job distributors * review: clear contexts before closing the connection manager --- .changeset/neat-penguins-report.md | 5 + core/scripts/go.mod | 2 +- core/scripts/go.sum | 4 +- core/services/feeds/service.go | 166 ++++++++++++++++++++--- core/services/feeds/service_test.go | 201 +++++++++++++++++++++++++++- deployment/go.mod | 2 +- deployment/go.sum | 4 +- go.mod | 2 +- go.sum | 4 +- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 +- integration-tests/load/go.mod | 2 +- integration-tests/load/go.sum | 4 +- system-tests/lib/go.mod | 2 +- system-tests/lib/go.sum | 4 +- system-tests/tests/go.mod | 2 +- system-tests/tests/go.sum | 4 +- 17 files changed, 366 insertions(+), 48 deletions(-) create mode 100644 .changeset/neat-penguins-report.md diff --git a/.changeset/neat-penguins-report.md b/.changeset/neat-penguins-report.md new file mode 100644 index 00000000000..053faa00178 --- /dev/null +++ b/.changeset/neat-penguins-report.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +#added add exponential backoff retry to feeds.SyncNodeInfo() diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 6094343ad4b..72bbcfcf04b 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -346,7 +346,7 @@ require ( github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20250207205350-420ccacab78a // indirect github.com/smartcontractkit/chainlink-framework/multinode v0.0.0-20250211162441-3d6cea220efb // indirect github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 // indirect - github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 // indirect + github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 // indirect github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 // indirect github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 // indirect github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a // indirect diff --git a/core/scripts/go.sum b/core/scripts/go.sum index a35215ab7bd..c1a570007be 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1107,8 +1107,8 @@ github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02/go.mod h1:7DbPnG0E39eZaX1CXKxRiJ1NOWHwTZYDWR9ys3kZZuU= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 h1:hfMRj2ny6oNHd8w1rhJHdoX3YkoWJtCkBK6wTlCE4+c= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 h1:xRgu/kMkxcY4LeDKMBhaXU4khgya7v2wyb4Sa5Nzb+Y= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 h1:L6KJ4kGv/yNNoCk8affk7Y1vAY0qglPMXC/hevV/IsA= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6/go.mod h1:FRwzI3hGj4CJclNS733gfcffmqQ62ONCkbGi49s658w= github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 h1:c77Gi/APraqwbBO8fbd/5JY2wW+MSIpYg8Uma9MEZFE= diff --git a/core/services/feeds/service.go b/core/services/feeds/service.go index 99bbb2e0cbb..06c964130cf 100644 --- a/core/services/feeds/service.go +++ b/core/services/feeds/service.go @@ -5,7 +5,11 @@ import ( "database/sql" "encoding/hex" "fmt" + "strings" + "sync" + "time" + "github.com/avast/retry-go/v4" "github.com/ethereum/go-ethereum/common" "github.com/google/uuid" "github.com/lib/pq" @@ -77,6 +81,10 @@ var ( // Job Proposal status "status", }) + + defaultSyncMinDelay = 10 * time.Second + defaultSyncMaxDelay = 30 * time.Minute + defaultSyncMaxAttempts = uint(48 + 8) // 30m * 48 =~ 24h; plus the initial 8 shorter retries ) // Service represents a behavior of the feeds service @@ -142,6 +150,10 @@ type service struct { lggr logger.Logger version string loopRegistrarConfig plugins.RegistrarConfig + syncNodeInfoCancel atomicCancelFns + syncMinDelay time.Duration + syncMaxDelay time.Duration + syncMaxAttempts uint } // NewService constructs a new feeds service @@ -161,6 +173,7 @@ func NewService( lggr logger.Logger, version string, rc plugins.RegistrarConfig, + opts ...ServiceOption, ) *service { lggr = lggr.Named("Feeds") svc := &service{ @@ -184,6 +197,14 @@ func NewService( lggr: lggr, version: version, loopRegistrarConfig: rc, + syncNodeInfoCancel: atomicCancelFns{fns: map[int64]context.CancelFunc{}}, + syncMinDelay: defaultSyncMinDelay, + syncMaxDelay: defaultSyncMaxDelay, + syncMaxAttempts: defaultSyncMaxAttempts, + } + + for _, opt := range opts { + opt(svc) } return svc @@ -255,8 +276,43 @@ func (s *service) RegisterManager(ctx context.Context, params RegisterManagerPar return id, nil } -// SyncNodeInfo syncs the node's information with FMS +// syncNodeInfoWithRetry syncs the node's information with FMS. In case of failures, +// it retries with an exponential backoff for up to 24h. +func (s *service) syncNodeInfoWithRetry(id int64) { + ctx, cancel := context.WithCancel(context.Background()) + + // cancel the previous context -- and, by extension, the existing goroutine -- + // so that we can start anew + s.syncNodeInfoCancel.callAndSwap(id, cancel) + + retryOpts := []retry.Option{ + retry.Context(ctx), + retry.DelayType(retry.BackOffDelay), + retry.Delay(s.syncMinDelay), + retry.MaxDelay(s.syncMaxDelay), + retry.Attempts(s.syncMaxAttempts), + retry.LastErrorOnly(true), + retry.OnRetry(func(attempt uint, err error) { + s.lggr.Infow("failed to sync node info", "attempt", attempt, "err", err.Error()) + }), + } + + go func() { + err := retry.Do(func() error { return s.SyncNodeInfo(ctx, id) }, retryOpts...) + if err != nil { + s.lggr.Errorw("failed to sync node info; aborting", "err", err) + } else { + s.lggr.Info("successfully synced node info") + } + + s.syncNodeInfoCancel.callAndSwap(id, nil) + }() +} + func (s *service) SyncNodeInfo(ctx context.Context, id int64) error { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + // Get the FMS RPC client fmsClient, err := s.connMgr.GetClient(id) if err != nil { @@ -281,12 +337,22 @@ func (s *service) SyncNodeInfo(ctx context.Context, id int64) error { } workflowKey := s.getWorkflowPublicKey() - if _, err = fmsClient.UpdateNode(ctx, &pb.UpdateNodeRequest{ + + resp, err := fmsClient.UpdateNode(ctx, &pb.UpdateNodeRequest{ Version: s.version, ChainConfigs: cfgMsgs, WorkflowKey: &workflowKey, - }); err != nil { - return err + }) + if err != nil { + return errors.Wrap(err, "SyncNodeInfo.UpdateNode call failed") + } + if len(resp.ChainConfigErrors) > 0 { + errMsgs := make([]string, 0, len(resp.ChainConfigErrors)) + for _, ccErr := range resp.ChainConfigErrors { + errMsgs = append(errMsgs, ccErr.Message) + } + + return errors.Errorf("SyncNodeInfo.UpdateNode call partially failed: %s", strings.Join(errMsgs, "; ")) } return nil @@ -402,9 +468,7 @@ func (s *service) CreateChainConfig(ctx context.Context, cfg ChainConfig) (int64 return 0, errors.Wrap(err, "CreateChainConfig: failed to fetch manager") } - if err := s.SyncNodeInfo(ctx, mgr.ID); err != nil { - s.lggr.Infof("FMS: Unable to sync node info: %v", err) - } + s.syncNodeInfoWithRetry(mgr.ID) return id, nil } @@ -426,9 +490,7 @@ func (s *service) DeleteChainConfig(ctx context.Context, id int64) (int64, error return 0, errors.Wrap(err, "DeleteChainConfig: failed to fetch manager") } - if err := s.SyncNodeInfo(ctx, mgr.ID); err != nil { - s.lggr.Infof("FMS: Unable to sync node info: %v", err) - } + s.syncNodeInfoWithRetry(mgr.ID) return id, nil } @@ -467,9 +529,7 @@ func (s *service) UpdateChainConfig(ctx context.Context, cfg ChainConfig) (int64 return 0, errors.Wrap(err, "UpdateChainConfig failed: could not get chain config") } - if err := s.SyncNodeInfo(ctx, ccfg.FeedsManagerID); err != nil { - s.lggr.Infof("FMS: Unable to sync node info: %v", err) - } + s.syncNodeInfoWithRetry(ccfg.FeedsManagerID) return id, nil } @@ -1031,9 +1091,7 @@ func (s *service) CancelSpec(ctx context.Context, id int64) error { ) err = s.transact(ctx, func(tx datasources) error { - var ( - txerr error - ) + var txerr error if txerr = tx.orm.CancelSpec(ctx, id); txerr != nil { return txerr @@ -1153,6 +1211,8 @@ func (s *service) Start(ctx context.Context) error { // Close shuts down the service func (s *service) Close() error { return s.StopOnce("FeedsService", func() error { + s.syncNodeInfoCancel.callAllAndClear() + // This blocks until it finishes s.connMgr.Close() @@ -1173,10 +1233,7 @@ func (s *service) connectFeedManager(ctx context.Context, mgr FeedsManager, priv }, OnConnect: func(pb.FeedsManagerClient) { // Sync the node's information with FMS once connected - err := s.SyncNodeInfo(ctx, mgr.ID) - if err != nil { - s.lggr.Infof("Error syncing node info: %v", err) - } + s.syncNodeInfoWithRetry(mgr.ID) }, }) } @@ -1220,8 +1277,10 @@ func (s *service) observeJobProposalCounts(ctx context.Context) error { metrics := counts.toMetrics() // Set the prometheus gauge metrics. - for _, status := range []JobProposalStatus{JobProposalStatusPending, JobProposalStatusApproved, - JobProposalStatusCancelled, JobProposalStatusRejected, JobProposalStatusDeleted, JobProposalStatusRevoked} { + for _, status := range []JobProposalStatus{ + JobProposalStatusPending, JobProposalStatusApproved, + JobProposalStatusCancelled, JobProposalStatusRejected, JobProposalStatusDeleted, JobProposalStatusRevoked, + } { status := status promJobProposalCounts.With(prometheus.Labels{"status": string(status)}).Set(metrics[status]) @@ -1565,6 +1624,49 @@ func (s *service) isRevokable(propStatus JobProposalStatus, specStatus SpecStatu return propStatus != JobProposalStatusDeleted && (specStatus == SpecStatusPending || specStatus == SpecStatusCancelled) } +type atomicCancelFns struct { + fns map[int64]context.CancelFunc + mutex sync.Mutex +} + +func (f *atomicCancelFns) callAndSwap(id int64, other func()) { + f.mutex.Lock() + defer f.mutex.Unlock() + + fn, found := f.fns[id] + if found && fn != nil { + fn() + } + + f.fns[id] = other +} + +func (f *atomicCancelFns) callAllAndClear() { + f.mutex.Lock() + defer f.mutex.Unlock() + + for _, fn := range f.fns { + if fn != nil { + fn() + } + } + clear(f.fns) +} + +type ServiceOption func(*service) + +func WithSyncMinDelay(delay time.Duration) ServiceOption { + return func(s *service) { s.syncMinDelay = delay } +} + +func WithSyncMaxDelay(delay time.Duration) ServiceOption { + return func(s *service) { s.syncMaxDelay = delay } +} + +func WithSyncMaxAttempts(attempts uint) ServiceOption { + return func(s *service) { s.syncMaxAttempts = attempts } +} + var _ Service = &NullService{} // NullService defines an implementation of the Feeds Service that is used @@ -1577,24 +1679,31 @@ func (ns NullService) Close() error { return nil } func (ns NullService) ApproveSpec(ctx context.Context, id int64, force bool) error { return ErrFeedsManagerDisabled } + func (ns NullService) CountJobProposalsByStatus(ctx context.Context) (*JobProposalCounts, error) { return nil, ErrFeedsManagerDisabled } + func (ns NullService) CancelSpec(ctx context.Context, id int64) error { return ErrFeedsManagerDisabled } + func (ns NullService) GetJobProposal(ctx context.Context, id int64) (*JobProposal, error) { return nil, ErrFeedsManagerDisabled } + func (ns NullService) ListSpecsByJobProposalIDs(ctx context.Context, ids []int64) ([]JobProposalSpec, error) { return nil, ErrFeedsManagerDisabled } + func (ns NullService) GetManager(ctx context.Context, id int64) (*FeedsManager, error) { return nil, ErrFeedsManagerDisabled } + func (ns NullService) ListManagersByIDs(ctx context.Context, ids []int64) ([]FeedsManager, error) { return nil, ErrFeedsManagerDisabled } + func (ns NullService) GetSpec(ctx context.Context, id int64) (*JobProposalSpec, error) { return nil, ErrFeedsManagerDisabled } @@ -1602,15 +1711,19 @@ func (ns NullService) ListManagers(ctx context.Context) ([]FeedsManager, error) func (ns NullService) CreateChainConfig(ctx context.Context, cfg ChainConfig) (int64, error) { return 0, ErrFeedsManagerDisabled } + func (ns NullService) GetChainConfig(ctx context.Context, id int64) (*ChainConfig, error) { return nil, ErrFeedsManagerDisabled } + func (ns NullService) DeleteChainConfig(ctx context.Context, id int64) (int64, error) { return 0, ErrFeedsManagerDisabled } + func (ns NullService) ListChainConfigsByManagerIDs(ctx context.Context, mgrIDs []int64) ([]ChainConfig, error) { return nil, ErrFeedsManagerDisabled } + func (ns NullService) UpdateChainConfig(ctx context.Context, cfg ChainConfig) (int64, error) { return 0, ErrFeedsManagerDisabled } @@ -1618,18 +1731,23 @@ func (ns NullService) ListJobProposals(ctx context.Context) ([]JobProposal, erro func (ns NullService) ListJobProposalsByManagersIDs(ctx context.Context, ids []int64) ([]JobProposal, error) { return nil, ErrFeedsManagerDisabled } + func (ns NullService) ProposeJob(ctx context.Context, args *ProposeJobArgs) (int64, error) { return 0, ErrFeedsManagerDisabled } + func (ns NullService) DeleteJob(ctx context.Context, args *DeleteJobArgs) (int64, error) { return 0, ErrFeedsManagerDisabled } + func (ns NullService) RevokeJob(ctx context.Context, args *RevokeJobArgs) (int64, error) { return 0, ErrFeedsManagerDisabled } + func (ns NullService) RegisterManager(ctx context.Context, params RegisterManagerParams) (int64, error) { return 0, ErrFeedsManagerDisabled } + func (ns NullService) RejectSpec(ctx context.Context, id int64) error { return ErrFeedsManagerDisabled } @@ -1637,15 +1755,19 @@ func (ns NullService) SyncNodeInfo(ctx context.Context, id int64) error { return func (ns NullService) UpdateManager(ctx context.Context, mgr FeedsManager) error { return ErrFeedsManagerDisabled } + func (ns NullService) EnableManager(ctx context.Context, id int64) (*FeedsManager, error) { return nil, ErrFeedsManagerDisabled } + func (ns NullService) DisableManager(ctx context.Context, id int64) (*FeedsManager, error) { return nil, ErrFeedsManagerDisabled } + func (ns NullService) IsJobManaged(ctx context.Context, jobID int64) (bool, error) { return false, nil } + func (ns NullService) UpdateSpecDefinition(ctx context.Context, id int64, spec string) error { return ErrFeedsManagerDisabled } diff --git a/core/services/feeds/service_test.go b/core/services/feeds/service_test.go index ce0e933df49..1cf14b00ef5 100644 --- a/core/services/feeds/service_test.go +++ b/core/services/feeds/service_test.go @@ -5,7 +5,9 @@ import ( "database/sql" "encoding/hex" "fmt" + "maps" "math/big" + "slices" "testing" "time" @@ -16,6 +18,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" "gopkg.in/guregu/null.v4" commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" @@ -196,15 +200,18 @@ type TestService struct { ocr2Keystore *ksmocks.OCR2 workflowKeystore *ksmocks.Workflow legacyChains legacyevm.LegacyChainContainer + logs *observer.ObservedLogs } -func setupTestService(t *testing.T) *TestService { +func setupTestService(t *testing.T, opts ...feeds.ServiceOption) *TestService { t.Helper() - return setupTestServiceCfg(t, nil) + return setupTestServiceCfg(t, nil, opts...) } -func setupTestServiceCfg(t *testing.T, overrideCfg func(c *chainlink.Config, s *chainlink.Secrets)) *TestService { +func setupTestServiceCfg( + t *testing.T, overrideCfg func(c *chainlink.Config, s *chainlink.Secrets), opts ...feeds.ServiceOption, +) *TestService { t.Helper() var ( @@ -220,7 +227,7 @@ func setupTestServiceCfg(t *testing.T, overrideCfg func(c *chainlink.Config, s * workflowKeystore = ksmocks.NewWorkflow(t) ) - lggr := logger.TestLogger(t) + lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel) db := pgtest.NewSqlxDB(t) gcfg := configtest.NewGeneralConfig(t, overrideCfg) @@ -241,7 +248,8 @@ func setupTestServiceCfg(t *testing.T, overrideCfg func(c *chainlink.Config, s * keyStore.On("OCR").Return(ocr1Keystore) keyStore.On("OCR2").Return(ocr2Keystore) keyStore.On("Workflow").Return(workflowKeystore) - svc := feeds.NewService(orm, jobORM, db, spawner, keyStore, gcfg, gcfg.Feature(), gcfg.Insecure(), gcfg.JobPipeline(), gcfg.OCR(), gcfg.OCR2(), legacyChains, lggr, "1.0.0", nil) + svc := feeds.NewService(orm, jobORM, db, spawner, keyStore, gcfg, gcfg.Feature(), gcfg.Insecure(), + gcfg.JobPipeline(), gcfg.OCR(), gcfg.OCR2(), legacyChains, lggr, "1.0.0", nil, opts...) svc.SetConnectionsManager(connMgr) return &TestService{ @@ -257,6 +265,7 @@ func setupTestServiceCfg(t *testing.T, overrideCfg func(c *chainlink.Config, s * ocr2Keystore: ocr2Keystore, workflowKeystore: workflowKeystore, legacyChains: legacyChains, + logs: observedLogs, } } @@ -1856,6 +1865,170 @@ func Test_Service_SyncNodeInfo(t *testing.T) { } } +func Test_Service_syncNodeInfoWithRetry(t *testing.T) { + t.Parallel() + + mgr := feeds.FeedsManager{ID: 1} + nodeVersion := &versioning.NodeVersion{Version: "1.0.0"} + cfg := feeds.ChainConfig{ + FeedsManagerID: mgr.ID, + ChainID: "42", + ChainType: feeds.ChainTypeEVM, + AccountAddress: "0x0000000000000000000000000000000000000000", + AccountAddressPublicKey: null.StringFrom("0x0000000000000000000000000000000000000002"), + AdminAddress: "0x0000000000000000000000000000000000000001", + FluxMonitorConfig: feeds.FluxMonitorConfig{Enabled: true}, + OCR1Config: feeds.OCR1Config{Enabled: false}, + OCR2Config: feeds.OCR2ConfigModel{Enabled: false}, + } + workflowKey, err := workflowkey.New() + require.NoError(t, err) + + request := &proto.UpdateNodeRequest{ + Version: nodeVersion.Version, + ChainConfigs: []*proto.ChainConfig{ + { + Chain: &proto.Chain{ + Id: cfg.ChainID, + Type: proto.ChainType_CHAIN_TYPE_EVM, + }, + AccountAddress: cfg.AccountAddress, + AccountAddressPublicKey: &cfg.AccountAddressPublicKey.String, + AdminAddress: cfg.AdminAddress, + FluxMonitorConfig: &proto.FluxMonitorConfig{Enabled: true}, + Ocr1Config: &proto.OCR1Config{Enabled: false}, + Ocr2Config: &proto.OCR2Config{Enabled: false}, + }, + }, + WorkflowKey: func(s string) *string { return &s }(workflowKey.ID()), + } + successResponse := &proto.UpdateNodeResponse{ChainConfigErrors: map[string]*proto.ChainConfigError{}} + failureResponse := func(chainID string) *proto.UpdateNodeResponse { + return &proto.UpdateNodeResponse{ + ChainConfigErrors: map[string]*proto.ChainConfigError{chainID: {Message: "error chain " + chainID}}, + } + } + + tests := []struct { + name string + setup func(t *testing.T, svc *TestService) + run func(svc *TestService) (any, error) + wantLogs []string + }{ + { + name: "create chain", + setup: func(t *testing.T, svc *TestService) { + svc.workflowKeystore.EXPECT().GetAll().Return([]workflowkey.Key{workflowKey}, nil) + svc.orm.EXPECT().CreateChainConfig(mock.Anything, cfg).Return(int64(1), nil) + svc.orm.EXPECT().GetManager(mock.Anything, mgr.ID).Return(&mgr, nil) + svc.orm.EXPECT().ListChainConfigsByManagerIDs(mock.Anything, []int64{mgr.ID}).Return([]feeds.ChainConfig{cfg}, nil) + svc.connMgr.EXPECT().GetClient(mgr.ID).Return(svc.fmsClient, nil) + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(nil, errors.New("error-0")).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(failureResponse("1"), nil).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(failureResponse("2"), nil).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(successResponse, nil).Once() + }, + run: func(svc *TestService) (any, error) { + return svc.CreateChainConfig(testutils.Context(t), cfg) + }, + wantLogs: []string{ + `failed to sync node info attempt="0" err="SyncNodeInfo.UpdateNode call failed: error-0"`, + `failed to sync node info attempt="1" err="SyncNodeInfo.UpdateNode call partially failed: error chain 1"`, + `failed to sync node info attempt="2" err="SyncNodeInfo.UpdateNode call partially failed: error chain 2"`, + `successfully synced node info`, + }, + }, + { + name: "update chain", + setup: func(t *testing.T, svc *TestService) { + svc.workflowKeystore.EXPECT().GetAll().Return([]workflowkey.Key{workflowKey}, nil) + svc.orm.EXPECT().UpdateChainConfig(mock.Anything, cfg).Return(int64(1), nil) + svc.orm.EXPECT().GetChainConfig(mock.Anything, cfg.ID).Return(&cfg, nil) + svc.orm.EXPECT().ListChainConfigsByManagerIDs(mock.Anything, []int64{mgr.ID}).Return([]feeds.ChainConfig{cfg}, nil) + svc.connMgr.EXPECT().GetClient(mgr.ID).Return(svc.fmsClient, nil) + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(failureResponse("3"), nil).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(nil, errors.New("error-4")).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(failureResponse("5"), nil).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(successResponse, nil).Once() + }, + run: func(svc *TestService) (any, error) { + return svc.UpdateChainConfig(testutils.Context(t), cfg) + }, + wantLogs: []string{ + `failed to sync node info attempt="0" err="SyncNodeInfo.UpdateNode call partially failed: error chain 3"`, + `failed to sync node info attempt="1" err="SyncNodeInfo.UpdateNode call failed: error-4"`, + `failed to sync node info attempt="2" err="SyncNodeInfo.UpdateNode call partially failed: error chain 5"`, + `successfully synced node info`, + }, + }, + { + name: "delete chain", + setup: func(t *testing.T, svc *TestService) { + svc.workflowKeystore.EXPECT().GetAll().Return([]workflowkey.Key{workflowKey}, nil) + svc.orm.EXPECT().GetChainConfig(mock.Anything, cfg.ID).Return(&cfg, nil) + svc.orm.EXPECT().DeleteChainConfig(mock.Anything, cfg.ID).Return(cfg.ID, nil) + svc.orm.EXPECT().GetManager(mock.Anything, mgr.ID).Return(&mgr, nil) + svc.orm.EXPECT().ListChainConfigsByManagerIDs(mock.Anything, []int64{mgr.ID}).Return([]feeds.ChainConfig{cfg}, nil) + svc.connMgr.EXPECT().GetClient(mgr.ID).Return(svc.fmsClient, nil) + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(failureResponse("6"), nil).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(failureResponse("7"), nil).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(nil, errors.New("error-8")).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(successResponse, nil).Once() + }, + run: func(svc *TestService) (any, error) { + return svc.DeleteChainConfig(testutils.Context(t), cfg.ID) + }, + wantLogs: []string{ + `failed to sync node info attempt="0" err="SyncNodeInfo.UpdateNode call partially failed: error chain 6"`, + `failed to sync node info attempt="1" err="SyncNodeInfo.UpdateNode call partially failed: error chain 7"`, + `failed to sync node info attempt="2" err="SyncNodeInfo.UpdateNode call failed: error-8"`, + `successfully synced node info`, + }, + }, + { + name: "more errors than MaxAttempts", + setup: func(t *testing.T, svc *TestService) { + svc.workflowKeystore.EXPECT().GetAll().Return([]workflowkey.Key{workflowKey}, nil) + svc.orm.EXPECT().CreateChainConfig(mock.Anything, cfg).Return(int64(1), nil) + svc.orm.EXPECT().GetManager(mock.Anything, mgr.ID).Return(&mgr, nil) + svc.orm.EXPECT().ListChainConfigsByManagerIDs(mock.Anything, []int64{mgr.ID}).Return([]feeds.ChainConfig{cfg}, nil) + svc.connMgr.EXPECT().GetClient(mgr.ID).Return(svc.fmsClient, nil) + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(failureResponse("9"), nil).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(failureResponse("10"), nil).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(nil, errors.New("error-11")).Once() + svc.fmsClient.EXPECT().UpdateNode(mock.Anything, request).Return(failureResponse("12"), nil).Once() + }, + run: func(svc *TestService) (any, error) { + return svc.CreateChainConfig(testutils.Context(t), cfg) + }, + wantLogs: []string{ + `failed to sync node info attempt="0" err="SyncNodeInfo.UpdateNode call partially failed: error chain 9"`, + `failed to sync node info attempt="1" err="SyncNodeInfo.UpdateNode call partially failed: error chain 10"`, + `failed to sync node info attempt="2" err="SyncNodeInfo.UpdateNode call failed: error-11"`, + `failed to sync node info attempt="3" err="SyncNodeInfo.UpdateNode call partially failed: error chain 12"`, + `failed to sync node info; aborting err="SyncNodeInfo.UpdateNode call partially failed: error chain 12"`, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + svc := setupTestService(t, feeds.WithSyncMinDelay(5*time.Millisecond), + feeds.WithSyncMaxDelay(50*time.Millisecond), feeds.WithSyncMaxAttempts(4)) + + tt.setup(t, svc) + _, err := tt.run(svc) + + require.NoError(t, err) + assert.EventuallyWithT(t, func(collect *assert.CollectT) { + assert.Equal(collect, tt.wantLogs, logMessages(svc.logs.All())) + }, 1*time.Second, 50*time.Millisecond) + }) + } +} + func Test_Service_IsJobManaged(t *testing.T) { t.Parallel() @@ -4751,3 +4924,21 @@ func Test_Service_StartStop(t *testing.T) { }) } } + +func logMessages(logEntries []observer.LoggedEntry) []string { + messages := make([]string, 0, len(logEntries)) + for _, entry := range logEntries { + messageWithContext := entry.Message + contextMap := entry.ContextMap() + for _, key := range slices.Sorted(maps.Keys(contextMap)) { + if key == "version" || key == "errVerbose" { + continue + } + messageWithContext += fmt.Sprintf(" %v=\"%v\"", key, entry.ContextMap()[key]) + } + + messages = append(messages, messageWithContext) + } + + return messages +} diff --git a/deployment/go.mod b/deployment/go.mod index 861b7e8515e..b89112f0083 100644 --- a/deployment/go.mod +++ b/deployment/go.mod @@ -358,7 +358,7 @@ require ( github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20250224190032-809e4b8cf29e // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20250207205350-420ccacab78a // indirect - github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 // indirect + github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 // indirect github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 // indirect github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 // indirect github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.10 // indirect diff --git a/deployment/go.sum b/deployment/go.sum index 008a81c31c9..d826718379e 100644 --- a/deployment/go.sum +++ b/deployment/go.sum @@ -1154,8 +1154,8 @@ github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02/go.mod h1:7DbPnG0E39eZaX1CXKxRiJ1NOWHwTZYDWR9ys3kZZuU= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 h1:hfMRj2ny6oNHd8w1rhJHdoX3YkoWJtCkBK6wTlCE4+c= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 h1:xRgu/kMkxcY4LeDKMBhaXU4khgya7v2wyb4Sa5Nzb+Y= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 h1:L6KJ4kGv/yNNoCk8affk7Y1vAY0qglPMXC/hevV/IsA= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6/go.mod h1:FRwzI3hGj4CJclNS733gfcffmqQ62ONCkbGi49s658w= github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 h1:c77Gi/APraqwbBO8fbd/5JY2wW+MSIpYg8Uma9MEZFE= diff --git a/go.mod b/go.mod index 91839a87219..c3b52661302 100644 --- a/go.mod +++ b/go.mod @@ -86,7 +86,7 @@ require ( github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20250207205350-420ccacab78a github.com/smartcontractkit/chainlink-framework/multinode v0.0.0-20250211162441-3d6cea220efb github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02 - github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 + github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a github.com/smartcontractkit/libocr v0.0.0-20250220133800-f3b940c4f298 github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20241009055228-33d0c0bf38de diff --git a/go.sum b/go.sum index 441c8b933ec..6860ae1bdcb 100644 --- a/go.sum +++ b/go.sum @@ -1032,8 +1032,8 @@ github.com/smartcontractkit/chainlink-framework/multinode v0.0.0-20250211162441- github.com/smartcontractkit/chainlink-framework/multinode v0.0.0-20250211162441-3d6cea220efb/go.mod h1:4JqpgFy01LaqG1yM2iFTzwX3ZgcAvW9WdstBZQgPHzU= github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02 h1:3icYNFldKQbs6Qrfai2LE+tKbNcE4tfgPRELF30mnEA= github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02/go.mod h1:7DbPnG0E39eZaX1CXKxRiJ1NOWHwTZYDWR9ys3kZZuU= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 h1:xRgu/kMkxcY4LeDKMBhaXU4khgya7v2wyb4Sa5Nzb+Y= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 h1:L6KJ4kGv/yNNoCk8affk7Y1vAY0qglPMXC/hevV/IsA= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6/go.mod h1:FRwzI3hGj4CJclNS733gfcffmqQ62ONCkbGi49s658w= github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 h1:c77Gi/APraqwbBO8fbd/5JY2wW+MSIpYg8Uma9MEZFE= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 02a22943d6b..df5ffad26d9 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -444,7 +444,7 @@ require ( github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20250207205350-420ccacab78a // indirect github.com/smartcontractkit/chainlink-framework/multinode v0.0.0-20250211162441-3d6cea220efb // indirect - github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 // indirect + github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 // indirect github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 // indirect github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 // indirect github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index e0ee69f9e01..b987c1228e9 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1448,8 +1448,8 @@ github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02/go.mod h1:7DbPnG0E39eZaX1CXKxRiJ1NOWHwTZYDWR9ys3kZZuU= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 h1:hfMRj2ny6oNHd8w1rhJHdoX3YkoWJtCkBK6wTlCE4+c= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 h1:xRgu/kMkxcY4LeDKMBhaXU4khgya7v2wyb4Sa5Nzb+Y= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 h1:L6KJ4kGv/yNNoCk8affk7Y1vAY0qglPMXC/hevV/IsA= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6/go.mod h1:FRwzI3hGj4CJclNS733gfcffmqQ62ONCkbGi49s658w= github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 h1:c77Gi/APraqwbBO8fbd/5JY2wW+MSIpYg8Uma9MEZFE= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 587c30e4f31..850d608dfe5 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -435,7 +435,7 @@ require ( github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20250207205350-420ccacab78a // indirect github.com/smartcontractkit/chainlink-framework/multinode v0.0.0-20250211162441-3d6cea220efb // indirect github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 // indirect - github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 // indirect + github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 // indirect github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 // indirect github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 // indirect github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a // indirect diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index e3e3a07128c..65671427faa 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1433,8 +1433,8 @@ github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02/go.mod h1:7DbPnG0E39eZaX1CXKxRiJ1NOWHwTZYDWR9ys3kZZuU= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 h1:hfMRj2ny6oNHd8w1rhJHdoX3YkoWJtCkBK6wTlCE4+c= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 h1:xRgu/kMkxcY4LeDKMBhaXU4khgya7v2wyb4Sa5Nzb+Y= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 h1:L6KJ4kGv/yNNoCk8affk7Y1vAY0qglPMXC/hevV/IsA= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6/go.mod h1:FRwzI3hGj4CJclNS733gfcffmqQ62ONCkbGi49s658w= github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 h1:c77Gi/APraqwbBO8fbd/5JY2wW+MSIpYg8Uma9MEZFE= diff --git a/system-tests/lib/go.mod b/system-tests/lib/go.mod index 526d92473e4..32efa110bf9 100644 --- a/system-tests/lib/go.mod +++ b/system-tests/lib/go.mod @@ -347,7 +347,7 @@ require ( github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20250207205350-420ccacab78a // indirect github.com/smartcontractkit/chainlink-framework/multinode v0.0.0-20250211162441-3d6cea220efb // indirect github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02 // indirect - github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 // indirect + github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 // indirect github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 // indirect github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 // indirect github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a // indirect diff --git a/system-tests/lib/go.sum b/system-tests/lib/go.sum index 83acbd61e21..64a9a9b9350 100644 --- a/system-tests/lib/go.sum +++ b/system-tests/lib/go.sum @@ -1142,8 +1142,8 @@ github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02/go.mod h1:7DbPnG0E39eZaX1CXKxRiJ1NOWHwTZYDWR9ys3kZZuU= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 h1:hfMRj2ny6oNHd8w1rhJHdoX3YkoWJtCkBK6wTlCE4+c= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 h1:xRgu/kMkxcY4LeDKMBhaXU4khgya7v2wyb4Sa5Nzb+Y= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 h1:L6KJ4kGv/yNNoCk8affk7Y1vAY0qglPMXC/hevV/IsA= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6/go.mod h1:FRwzI3hGj4CJclNS733gfcffmqQ62ONCkbGi49s658w= github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 h1:c77Gi/APraqwbBO8fbd/5JY2wW+MSIpYg8Uma9MEZFE= diff --git a/system-tests/tests/go.mod b/system-tests/tests/go.mod index 64c23415501..b7915ef440b 100644 --- a/system-tests/tests/go.mod +++ b/system-tests/tests/go.mod @@ -352,7 +352,7 @@ require ( github.com/smartcontractkit/chainlink-framework/multinode v0.0.0-20250211162441-3d6cea220efb // indirect github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02 // indirect github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 // indirect - github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 // indirect + github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 // indirect github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 // indirect github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 // indirect github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a // indirect diff --git a/system-tests/tests/go.sum b/system-tests/tests/go.sum index 8ec9815675c..df065994470 100644 --- a/system-tests/tests/go.sum +++ b/system-tests/tests/go.sum @@ -1142,8 +1142,8 @@ github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02/go.mod h1:7DbPnG0E39eZaX1CXKxRiJ1NOWHwTZYDWR9ys3kZZuU= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 h1:hfMRj2ny6oNHd8w1rhJHdoX3YkoWJtCkBK6wTlCE4+c= github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 h1:ZBat8EBvE2LpSQR9U1gEbRV6PfAkiFdINmQ8nVnXIAQ= -github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 h1:xRgu/kMkxcY4LeDKMBhaXU4khgya7v2wyb4Sa5Nzb+Y= +github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 h1:L6KJ4kGv/yNNoCk8affk7Y1vAY0qglPMXC/hevV/IsA= github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6/go.mod h1:FRwzI3hGj4CJclNS733gfcffmqQ62ONCkbGi49s658w= github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 h1:c77Gi/APraqwbBO8fbd/5JY2wW+MSIpYg8Uma9MEZFE= From 5403123073f043520f6f1fbd7a2e07d47913193c Mon Sep 17 00:00:00 2001 From: Erik Burton Date: Wed, 26 Feb 2025 14:01:14 -0500 Subject: [PATCH 02/17] fix: goreleaser amd64 disk space issue (#16592) --- .../actions/goreleaser-build-sign-publish/action.yml | 10 ++++++++++ .github/workflows/build-publish-develop-pr.yml | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.github/actions/goreleaser-build-sign-publish/action.yml b/.github/actions/goreleaser-build-sign-publish/action.yml index 0c7c05fe618..e63f22a1b70 100644 --- a/.github/actions/goreleaser-build-sign-publish/action.yml +++ b/.github/actions/goreleaser-build-sign-publish/action.yml @@ -37,6 +37,16 @@ inputs: runs: using: composite steps: + # See https://github.com/orgs/community/discussions/25678#discussioncomment-5242449 + - name: Delete unused tools to free up space + shell: bash + run: | + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo rm -rf /opt/hostedtoolcache/PyPy + sudo rm -rf /opt/hostedtoolcache/Python + sudo rm -rf /opt/hostedtoolcache/Ruby + sudo rm -rf /opt/hostedtoolcache/Java_* + - # We need QEMU to test the cross architecture builds after they're built. name: Set up QEMU uses: docker/setup-qemu-action@4574d27a4764455b42196d70a065bc6853246a25 # v3.4.0 diff --git a/.github/workflows/build-publish-develop-pr.yml b/.github/workflows/build-publish-develop-pr.yml index 9f918541673..d49e539e35e 100644 --- a/.github/workflows/build-publish-develop-pr.yml +++ b/.github/workflows/build-publish-develop-pr.yml @@ -98,7 +98,7 @@ jobs: with: persist-credentials: false ref: ${{ env.CHECKOUT_REF }} - fetch-depth: 0 + fetch-depth: 1 - name: Setup Github Token id: token From d27cae599135636da62c9af40ddcbafc61b864cb Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Wed, 26 Feb 2025 21:15:47 +0100 Subject: [PATCH 03/17] [TT-1992] use CTF/lib that supports Docker running go-ethereum v1.15.0 (#16477) * use CTF/lib that supports Docker running go-ethereum v1.15.0 * use CTF versions that use go-ethereum v1.15.0 * make sure thet eth client used with Seth is ethclient not simulated.Backend * try another commit * use tagged CTF versions * add changeset * remove havoc.Schedule calls --- .changeset/swift-nails-yell.md | 5 ++ core/scripts/go.mod | 4 +- deployment/go.mod | 4 +- go.mod | 4 +- integration-tests/go.mod | 10 +-- integration-tests/go.sum | 20 ++--- integration-tests/load/go.mod | 10 +-- integration-tests/load/go.sum | 20 ++--- .../testsetups/automation_benchmark.go | 9 +- integration-tests/testsetups/ocr.go | 6 -- integration-tests/wrappers/contract_caller.go | 82 +++++++++++++++---- system-tests/lib/go.mod | 10 +-- system-tests/lib/go.sum | 12 +-- system-tests/tests/go.mod | 10 +-- system-tests/tests/go.sum | 12 +-- 15 files changed, 129 insertions(+), 89 deletions(-) create mode 100644 .changeset/swift-nails-yell.md diff --git a/.changeset/swift-nails-yell.md b/.changeset/swift-nails-yell.md new file mode 100644 index 00000000000..3c96000ff7f --- /dev/null +++ b/.changeset/swift-nails-yell.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +go.mod update in core/scripts #updated diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 72bbcfcf04b..36d15adffbd 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -1,8 +1,6 @@ module github.com/smartcontractkit/chainlink/core/scripts -go 1.24 - -toolchain go1.24.0 +go 1.24.0 // Make sure we're working with the latest chainlink libs replace github.com/smartcontractkit/chainlink/v2 => ../../ diff --git a/deployment/go.mod b/deployment/go.mod index b89112f0083..16510ba7b37 100644 --- a/deployment/go.mod +++ b/deployment/go.mod @@ -1,8 +1,6 @@ module github.com/smartcontractkit/chainlink/deployment -go 1.24 - -toolchain go1.24.0 +go 1.24.0 // Make sure we're working with the latest chainlink libs replace github.com/smartcontractkit/chainlink/v2 => ../ diff --git a/go.mod b/go.mod index c3b52661302..6ffb117780e 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/smartcontractkit/chainlink/v2 -go 1.24 - -toolchain go1.24.0 +go 1.24.0 require ( github.com/Depado/ginprom v1.8.0 diff --git a/integration-tests/go.mod b/integration-tests/go.mod index df5ffad26d9..6d7280b4740 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -48,12 +48,12 @@ require ( github.com/smartcontractkit/chainlink-common v0.4.2-0.20250221174903-e1e47fdb11b0 github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02 github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 - github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5-0.20250225210020-fc215b29321e - github.com/smartcontractkit/chainlink-testing-framework/lib v1.51.1-0.20250225210020-fc215b29321e - github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.1-0.20250225210020-fc215b29321e + github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5 + github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0 + github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.51.0 github.com/smartcontractkit/chainlink-testing-framework/sentinel v0.1.2 - github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.10 - github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.6-0.20250225210020-fc215b29321e + github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0 + github.com/smartcontractkit/chainlink-testing-framework/wasp v1.51.0 github.com/smartcontractkit/libocr v0.0.0-20250220133800-f3b940c4f298 github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.10.0 diff --git a/integration-tests/go.sum b/integration-tests/go.sum index b987c1228e9..cd175605d3a 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1458,18 +1458,18 @@ github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14 github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a/go.mod h1:aFm1QC/n99mVeBDtv0SE0co56+IECY6Y1fR3OfNYy3c= github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.8-0.20250225210020-fc215b29321e h1:6poVaumg1KZYW/K8Aeip2VN2k9TnSjK6ujwfxJnIsoY= github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.8-0.20250225210020-fc215b29321e/go.mod h1:tnyujVNgajqe67i2/0iwS4Y3mvbA30XBQWLEAArpwfw= -github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5-0.20250225210020-fc215b29321e h1:2LFrQLx5VoilcbmiDC9+0rENoNLNECEVYYj3c/VDpHs= -github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5-0.20250225210020-fc215b29321e/go.mod h1:SKBYQvtnl3OqOTr5aQyt9YbIckuNNn40LOJUCR0vlMo= -github.com/smartcontractkit/chainlink-testing-framework/lib v1.51.1-0.20250225210020-fc215b29321e h1:8fgAWzBdPrJcqX/QJMxYB3Xoi3v0IZkML7pOiLyV/tQ= -github.com/smartcontractkit/chainlink-testing-framework/lib v1.51.1-0.20250225210020-fc215b29321e/go.mod h1:jNxIJa9Fl/zM7rFahUFE8E55VGPC/2e6ilqVKoSbr8U= -github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.1-0.20250225210020-fc215b29321e h1:g1fOH4P+JLU70bpjQVL7viDU7EhB+JKr1SpvpIDce+g= -github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.1-0.20250225210020-fc215b29321e/go.mod h1:ym1yBKknQkGfgSQF7EGXSevDuex1YZbz+zgDwRvhY3U= +github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5 h1:S5HND0EDtlA+xp2E+mD11DlUTp2wD6uojwixye8ZB/k= +github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5/go.mod h1:SKBYQvtnl3OqOTr5aQyt9YbIckuNNn40LOJUCR0vlMo= +github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0 h1:rNjLZrwY3TcrANHVz/JUm55vufzoeRogSlgjAH7plvU= +github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0/go.mod h1:jNxIJa9Fl/zM7rFahUFE8E55VGPC/2e6ilqVKoSbr8U= +github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.51.0 h1:+6L/PVxWsaYCr9jmxtKfyCcEJm1o6UaKrFJU9jAiZwA= +github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.51.0/go.mod h1:ym1yBKknQkGfgSQF7EGXSevDuex1YZbz+zgDwRvhY3U= github.com/smartcontractkit/chainlink-testing-framework/sentinel v0.1.2 h1:ihRlWrii5nr4RUuMu1hStTbwFvVuHUDoQQwXmCU5IdQ= github.com/smartcontractkit/chainlink-testing-framework/sentinel v0.1.2/go.mod h1:J1Za5EuI/vWDsQSIh6qbPXlVvuEhmHmnvLQBN0XVxqA= -github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.10 h1:Yf+n3T/fnUWcYyfe7bsygV4sWAkNo0QhN58APJFIKIc= -github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.10/go.mod h1:05duR85P8YHuIfIkA7sn2bvrhKo/pDpFKV2rliYHNOo= -github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.6-0.20250225210020-fc215b29321e h1:Xuulg3EfQdsgO4j+IaMkffpmd0UbIcppB4wsIafKPsA= -github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.6-0.20250225210020-fc215b29321e/go.mod h1:eqV2n0vpqnY5N51je5/1vC/Qm8MMXVKvOXjLM+53Sog= +github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0 h1:cH+/lRpm7VN1a/tX7HmJCtQfZjLRyw1khG7CEQS94jA= +github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0/go.mod h1:kHYJnZUqiPF7/xN5273prV+srrLJkS77GbBXHLKQpx0= +github.com/smartcontractkit/chainlink-testing-framework/wasp v1.51.0 h1:qaLw7J7oRRsj+lUzzIjGVlXAVNmkAEwjj7xTXe0hcAk= +github.com/smartcontractkit/chainlink-testing-framework/wasp v1.51.0/go.mod h1:eqV2n0vpqnY5N51je5/1vC/Qm8MMXVKvOXjLM+53Sog= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 h1:12ijqMM9tvYVEm+nR826WsrNi6zCKpwBhuApq127wHs= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7/go.mod h1:FX7/bVdoep147QQhsOPkYsPEXhGZjeYx6lBSaSXtZOA= github.com/smartcontractkit/libocr v0.0.0-20250220133800-f3b940c4f298 h1:PKiqnVOTChlH4a4ljJKL3OKGRgYfIpJS4YD1daAIKks= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 850d608dfe5..9dbc79c8cbb 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -29,9 +29,9 @@ require ( github.com/smartcontractkit/chainlink-ccip v0.0.0-20250226083129-e596590f48f7 github.com/smartcontractkit/chainlink-common v0.4.2-0.20250221174903-e1e47fdb11b0 github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02 - github.com/smartcontractkit/chainlink-testing-framework/lib v1.51.1-0.20250225210020-fc215b29321e - github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.14-0.20250225174253-0fe1e95e89b2 - github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.6-0.20250225210020-fc215b29321e + github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0 + github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0 + github.com/smartcontractkit/chainlink-testing-framework/wasp v1.51.0 github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20241009055228-33d0c0bf38de github.com/stretchr/testify v1.10.0 github.com/wiremock/go-wiremock v1.9.0 @@ -440,8 +440,8 @@ require ( github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 // indirect github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a // indirect github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.8-0.20250225210020-fc215b29321e // indirect - github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5-0.20250225210020-fc215b29321e // indirect - github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.1-0.20250225210020-fc215b29321e // indirect + github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5 // indirect + github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.51.0 // indirect github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 // indirect github.com/smartcontractkit/libocr v0.0.0-20250220133800-f3b940c4f298 // indirect github.com/smartcontractkit/mcms v0.12.2 // indirect diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 65671427faa..37213c2e46c 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1443,16 +1443,16 @@ github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14 github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a/go.mod h1:aFm1QC/n99mVeBDtv0SE0co56+IECY6Y1fR3OfNYy3c= github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.8-0.20250225210020-fc215b29321e h1:6poVaumg1KZYW/K8Aeip2VN2k9TnSjK6ujwfxJnIsoY= github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.8-0.20250225210020-fc215b29321e/go.mod h1:tnyujVNgajqe67i2/0iwS4Y3mvbA30XBQWLEAArpwfw= -github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5-0.20250225210020-fc215b29321e h1:2LFrQLx5VoilcbmiDC9+0rENoNLNECEVYYj3c/VDpHs= -github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5-0.20250225210020-fc215b29321e/go.mod h1:SKBYQvtnl3OqOTr5aQyt9YbIckuNNn40LOJUCR0vlMo= -github.com/smartcontractkit/chainlink-testing-framework/lib v1.51.1-0.20250225210020-fc215b29321e h1:8fgAWzBdPrJcqX/QJMxYB3Xoi3v0IZkML7pOiLyV/tQ= -github.com/smartcontractkit/chainlink-testing-framework/lib v1.51.1-0.20250225210020-fc215b29321e/go.mod h1:jNxIJa9Fl/zM7rFahUFE8E55VGPC/2e6ilqVKoSbr8U= -github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.1-0.20250225210020-fc215b29321e h1:g1fOH4P+JLU70bpjQVL7viDU7EhB+JKr1SpvpIDce+g= -github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.1-0.20250225210020-fc215b29321e/go.mod h1:ym1yBKknQkGfgSQF7EGXSevDuex1YZbz+zgDwRvhY3U= -github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.14-0.20250225174253-0fe1e95e89b2 h1:DFP15nHtZntUnhAsHFxeE40jgHf5qgBX9TvzX49yRdU= -github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.14-0.20250225174253-0fe1e95e89b2/go.mod h1:kHYJnZUqiPF7/xN5273prV+srrLJkS77GbBXHLKQpx0= -github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.6-0.20250225210020-fc215b29321e h1:Xuulg3EfQdsgO4j+IaMkffpmd0UbIcppB4wsIafKPsA= -github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.6-0.20250225210020-fc215b29321e/go.mod h1:eqV2n0vpqnY5N51je5/1vC/Qm8MMXVKvOXjLM+53Sog= +github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5 h1:S5HND0EDtlA+xp2E+mD11DlUTp2wD6uojwixye8ZB/k= +github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5/go.mod h1:SKBYQvtnl3OqOTr5aQyt9YbIckuNNn40LOJUCR0vlMo= +github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0 h1:rNjLZrwY3TcrANHVz/JUm55vufzoeRogSlgjAH7plvU= +github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0/go.mod h1:jNxIJa9Fl/zM7rFahUFE8E55VGPC/2e6ilqVKoSbr8U= +github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.51.0 h1:+6L/PVxWsaYCr9jmxtKfyCcEJm1o6UaKrFJU9jAiZwA= +github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.51.0/go.mod h1:ym1yBKknQkGfgSQF7EGXSevDuex1YZbz+zgDwRvhY3U= +github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0 h1:cH+/lRpm7VN1a/tX7HmJCtQfZjLRyw1khG7CEQS94jA= +github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0/go.mod h1:kHYJnZUqiPF7/xN5273prV+srrLJkS77GbBXHLKQpx0= +github.com/smartcontractkit/chainlink-testing-framework/wasp v1.51.0 h1:qaLw7J7oRRsj+lUzzIjGVlXAVNmkAEwjj7xTXe0hcAk= +github.com/smartcontractkit/chainlink-testing-framework/wasp v1.51.0/go.mod h1:eqV2n0vpqnY5N51je5/1vC/Qm8MMXVKvOXjLM+53Sog= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 h1:12ijqMM9tvYVEm+nR826WsrNi6zCKpwBhuApq127wHs= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7/go.mod h1:FX7/bVdoep147QQhsOPkYsPEXhGZjeYx6lBSaSXtZOA= github.com/smartcontractkit/libocr v0.0.0-20250220133800-f3b940c4f298 h1:PKiqnVOTChlH4a4ljJKL3OKGRgYfIpJS4YD1daAIKks= diff --git a/integration-tests/testsetups/automation_benchmark.go b/integration-tests/testsetups/automation_benchmark.go index 1d9675decb0..d54cf592b79 100644 --- a/integration-tests/testsetups/automation_benchmark.go +++ b/integration-tests/testsetups/automation_benchmark.go @@ -21,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -230,10 +231,14 @@ func (k *KeeperBenchmarkTest) Run() { var startedObservations = atomic.Int32{} var finishedObservations = atomic.Int32{} + // since Seth can also be using simulated.Backend we need to make sure we are using ethclient.Client + sethAsEthClient, ok := k.chainClient.Client.(*ethclient.Client) + require.True(k.t, ok, "chainClient (Seth) client should be an ethclient.Client") + // We create as many channels as listening goroutines (1 per upkeep). In the background we will be fanning out // headers that we get from a single channel connected to EVM node to all upkeep-specific channels. headerCh := make(chan *blockchain.SafeEVMHeader, 10) - sub, err := k.chainClient.Client.Client().EthSubscribe(context.Background(), headerCh, "newHeads") + sub, err := sethAsEthClient.Client().EthSubscribe(context.Background(), headerCh, "newHeads") require.NoError(k.t, err, "Subscribing to new headers for upkeep observation shouldn't fail") totalNumberOfChannels := 0 @@ -279,7 +284,7 @@ func (k *KeeperBenchmarkTest) Run() { // we use infinite loop here on purposes, these nodes can be down for extended periods of time ¯\_(ツ)_/¯ RECONNECT: for { - sub, err = k.chainClient.Client.Client().EthSubscribe(context.Background(), headerCh, "newHeads") + sub, err = sethAsEthClient.Client().EthSubscribe(context.Background(), headerCh, "newHeads") if err == nil { break RECONNECT } diff --git a/integration-tests/testsetups/ocr.go b/integration-tests/testsetups/ocr.go index 7a90c38fdd0..82a6a2b6c26 100644 --- a/integration-tests/testsetups/ocr.go +++ b/integration-tests/testsetups/ocr.go @@ -1314,9 +1314,3 @@ func (l ocrTestChaosListener) OnChaosEnded(_ havoc.Chaos) { func (l ocrTestChaosListener) OnChaosStatusUnknown(_ havoc.Chaos) { } - -func (l ocrTestChaosListener) OnScheduleCreated(_ havoc.Schedule) { -} - -func (l ocrTestChaosListener) OnScheduleDeleted(_ havoc.Schedule) { -} diff --git a/integration-tests/wrappers/contract_caller.go b/integration-tests/wrappers/contract_caller.go index f904e2fa1da..c256ea1cfd1 100644 --- a/integration-tests/wrappers/contract_caller.go +++ b/integration-tests/wrappers/contract_caller.go @@ -63,16 +63,20 @@ func MustNewRetryingWrappedContractBackend(sethClient *seth.Client, logger zerol } } -func (w *WrappedContractBackend) getGethClient() *ethclient.Client { +func (w *WrappedContractBackend) getGethClient() (*ethclient.Client, error) { if w.sethClient != nil { - return w.sethClient.Client + if asEthClient, ok := w.sethClient.Client.(*ethclient.Client); ok { + return asEthClient, nil + } + + return nil, fmt.Errorf("seth client is not an ethclient, but %T", w.sethClient.Client) } if w.evmClient != nil { - return w.evmClient.GetEthClient() + return w.evmClient.GetEthClient(), nil } - panic("No client found") + return nil, errors.New("no client found") } func (w *WrappedContractBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { @@ -81,7 +85,10 @@ func (w *WrappedContractBackend) CodeAt(ctx context.Context, contract common.Add } var fn = func() ([]byte, error) { - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return nil, clientErr + } return client.CodeAt(ctx, contract, blockNumber) } @@ -95,7 +102,10 @@ func (w *WrappedContractBackend) PendingCodeAt(ctx context.Context, contract com } var fn = func() ([]byte, error) { - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return nil, clientErr + } return client.PendingCodeAt(ctx, contract) } @@ -109,7 +119,10 @@ func (w *WrappedContractBackend) CodeAtHash(ctx context.Context, contract common } var fn = func() ([]byte, error) { - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return nil, clientErr + } return client.CodeAtHash(ctx, contract, blockHash) } @@ -123,7 +136,10 @@ func (w *WrappedContractBackend) CallContractAtHash(ctx context.Context, call et } var fn = func() ([]byte, error) { - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return nil, clientErr + } return client.CallContractAtHash(ctx, call, blockHash) } @@ -137,7 +153,10 @@ func (w *WrappedContractBackend) HeaderByNumber(ctx context.Context, number *big } var fn = func() (*types.Header, error) { - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return nil, clientErr + } return client.HeaderByNumber(ctx, number) } @@ -151,7 +170,10 @@ func (w *WrappedContractBackend) PendingNonceAt(ctx context.Context, account com } var fn = func() (uint64, error) { - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return 0, clientErr + } return client.PendingNonceAt(ctx, account) } @@ -165,7 +187,10 @@ func (w *WrappedContractBackend) SuggestGasPrice(ctx context.Context) (*big.Int, } var fn = func() (*big.Int, error) { - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return nil, clientErr + } return client.SuggestGasPrice(ctx) } @@ -179,7 +204,10 @@ func (w *WrappedContractBackend) SuggestGasTipCap(ctx context.Context) (*big.Int } var fn = func() (*big.Int, error) { - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return nil, clientErr + } return client.SuggestGasTipCap(ctx) } @@ -193,7 +221,10 @@ func (w *WrappedContractBackend) EstimateGas(ctx context.Context, call ethereum. } var fn = func() (uint64, error) { - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return 0, clientErr + } return client.EstimateGas(ctx, call) } @@ -206,7 +237,10 @@ func (w *WrappedContractBackend) SendTransaction(ctx context.Context, tx *types. return errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call SendTransaction") } - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return clientErr + } return client.SendTransaction(ctx, tx) } @@ -216,7 +250,10 @@ func (w *WrappedContractBackend) FilterLogs(ctx context.Context, query ethereum. } var fn = func() ([]types.Log, error) { - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return nil, clientErr + } return client.FilterLogs(ctx, query) } @@ -230,7 +267,10 @@ func (w *WrappedContractBackend) SubscribeFilterLogs(ctx context.Context, query } var fn = func() (ethereum.Subscription, error) { - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return nil, clientErr + } return client.SubscribeFilterLogs(ctx, query, ch) } @@ -245,7 +285,10 @@ func (w *WrappedContractBackend) CallContract(ctx context.Context, msg ethereum. var fn = func() ([]byte, error) { var hex hexutil.Bytes - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return nil, clientErr + } err := client.Client().CallContext(ctx, &hex, "eth_call", evmClient.ToBackwardCompatibleCallArg(msg), evmClient.ToBackwardCompatibleBlockNumArg(blockNumber)) if err != nil { return nil, err @@ -264,7 +307,10 @@ func (w *WrappedContractBackend) PendingCallContract(ctx context.Context, msg et var fn = func() ([]byte, error) { var hex hexutil.Bytes - client := w.getGethClient() + client, clientErr := w.getGethClient() + if clientErr != nil { + return nil, clientErr + } err := client.Client().CallContext(ctx, &hex, "eth_call", evmClient.ToBackwardCompatibleCallArg(msg), "pending") if err != nil { return nil, err diff --git a/system-tests/lib/go.mod b/system-tests/lib/go.mod index 32efa110bf9..458d8fd2054 100644 --- a/system-tests/lib/go.mod +++ b/system-tests/lib/go.mod @@ -1,8 +1,6 @@ module github.com/smartcontractkit/chainlink/system-tests/lib -go 1.24 - -toolchain go1.24.0 +go 1.24.0 require ( github.com/smartcontractkit/chainlink/deployment v0.0.0-20250221182743-098d1b0a763a @@ -21,9 +19,9 @@ require ( github.com/rs/zerolog v1.33.0 github.com/smartcontractkit/chainlink-common v0.4.2-0.20250221174903-e1e47fdb11b0 github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 - github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.4 - github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.23 - github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.10 + github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.8 + github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0 + github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0 google.golang.org/grpc v1.70.0 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/system-tests/lib/go.sum b/system-tests/lib/go.sum index 64a9a9b9350..74810cf9921 100644 --- a/system-tests/lib/go.sum +++ b/system-tests/lib/go.sum @@ -1150,12 +1150,12 @@ github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32 github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112/go.mod h1:TcOliTQU6r59DwG4lo3U+mFM9WWyBHGuFkkxQpvSujo= github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a h1:C+XavZQ0rBOpOrh45LUhdOsvtI8OQ0XZKI5pi+GP6h4= github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a/go.mod h1:aFm1QC/n99mVeBDtv0SE0co56+IECY6Y1fR3OfNYy3c= -github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.4 h1:8R/xyOh/SaifOhhmOYNB3uv3sUzKeshILtVCqRgv7ck= -github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.4/go.mod h1:fAmPov5WZg2WjTRoUrz7fzcvD9b77ZOw5yPwJXI2IOE= -github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.23 h1:gji906uw6vkSPXUUBoRuFeeL/93l4YYZAQY4G03R1RM= -github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.23/go.mod h1:70JLBXQncNHyW63ik4PvPQGjQGZ1xK67MKrDanVAk2w= -github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.10 h1:Yf+n3T/fnUWcYyfe7bsygV4sWAkNo0QhN58APJFIKIc= -github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.10/go.mod h1:05duR85P8YHuIfIkA7sn2bvrhKo/pDpFKV2rliYHNOo= +github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.8 h1:WZkQt31AsUtnsTSBsH5UdOXrXTfulq0DnuVfHKLaBto= +github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.8/go.mod h1:tnyujVNgajqe67i2/0iwS4Y3mvbA30XBQWLEAArpwfw= +github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0 h1:rNjLZrwY3TcrANHVz/JUm55vufzoeRogSlgjAH7plvU= +github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0/go.mod h1:jNxIJa9Fl/zM7rFahUFE8E55VGPC/2e6ilqVKoSbr8U= +github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0 h1:cH+/lRpm7VN1a/tX7HmJCtQfZjLRyw1khG7CEQS94jA= +github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0/go.mod h1:kHYJnZUqiPF7/xN5273prV+srrLJkS77GbBXHLKQpx0= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 h1:12ijqMM9tvYVEm+nR826WsrNi6zCKpwBhuApq127wHs= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7/go.mod h1:FX7/bVdoep147QQhsOPkYsPEXhGZjeYx6lBSaSXtZOA= github.com/smartcontractkit/libocr v0.0.0-20250220133800-f3b940c4f298 h1:PKiqnVOTChlH4a4ljJKL3OKGRgYfIpJS4YD1daAIKks= diff --git a/system-tests/tests/go.mod b/system-tests/tests/go.mod index b7915ef440b..4d1c9863524 100644 --- a/system-tests/tests/go.mod +++ b/system-tests/tests/go.mod @@ -1,8 +1,6 @@ module github.com/smartcontractkit/chainlink/system-tests/tests -go 1.24 - -toolchain go1.24.0 +go 1.24.0 // Using a separate `require` here to avoid surrounding line changes // creating potential merge conflicts. @@ -26,9 +24,9 @@ require ( github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.33.0 github.com/smartcontractkit/chain-selectors v1.0.40 - github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.4 - github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.23 - github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.10 + github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.8 + github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0 + github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0 github.com/smartcontractkit/chainlink/system-tests/lib v0.0.0-00010101000000-000000000000 github.com/stretchr/testify v1.10.0 ) diff --git a/system-tests/tests/go.sum b/system-tests/tests/go.sum index df065994470..c5bddf1be3a 100644 --- a/system-tests/tests/go.sum +++ b/system-tests/tests/go.sum @@ -1150,12 +1150,12 @@ github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32 github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112/go.mod h1:TcOliTQU6r59DwG4lo3U+mFM9WWyBHGuFkkxQpvSujo= github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a h1:C+XavZQ0rBOpOrh45LUhdOsvtI8OQ0XZKI5pi+GP6h4= github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a/go.mod h1:aFm1QC/n99mVeBDtv0SE0co56+IECY6Y1fR3OfNYy3c= -github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.4 h1:8R/xyOh/SaifOhhmOYNB3uv3sUzKeshILtVCqRgv7ck= -github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.4/go.mod h1:fAmPov5WZg2WjTRoUrz7fzcvD9b77ZOw5yPwJXI2IOE= -github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.23 h1:gji906uw6vkSPXUUBoRuFeeL/93l4YYZAQY4G03R1RM= -github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.23/go.mod h1:70JLBXQncNHyW63ik4PvPQGjQGZ1xK67MKrDanVAk2w= -github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.10 h1:Yf+n3T/fnUWcYyfe7bsygV4sWAkNo0QhN58APJFIKIc= -github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.10/go.mod h1:05duR85P8YHuIfIkA7sn2bvrhKo/pDpFKV2rliYHNOo= +github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.8 h1:WZkQt31AsUtnsTSBsH5UdOXrXTfulq0DnuVfHKLaBto= +github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.8/go.mod h1:tnyujVNgajqe67i2/0iwS4Y3mvbA30XBQWLEAArpwfw= +github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0 h1:rNjLZrwY3TcrANHVz/JUm55vufzoeRogSlgjAH7plvU= +github.com/smartcontractkit/chainlink-testing-framework/lib v1.52.0/go.mod h1:jNxIJa9Fl/zM7rFahUFE8E55VGPC/2e6ilqVKoSbr8U= +github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0 h1:cH+/lRpm7VN1a/tX7HmJCtQfZjLRyw1khG7CEQS94jA= +github.com/smartcontractkit/chainlink-testing-framework/seth v1.51.0/go.mod h1:kHYJnZUqiPF7/xN5273prV+srrLJkS77GbBXHLKQpx0= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 h1:12ijqMM9tvYVEm+nR826WsrNi6zCKpwBhuApq127wHs= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7/go.mod h1:FX7/bVdoep147QQhsOPkYsPEXhGZjeYx6lBSaSXtZOA= github.com/smartcontractkit/libocr v0.0.0-20250220133800-f3b940c4f298 h1:PKiqnVOTChlH4a4ljJKL3OKGRgYfIpJS4YD1daAIKks= From 87b3664d31b89d22214a7267b99da7e604ec4aee Mon Sep 17 00:00:00 2001 From: "Simon B.Robert" Date: Wed, 26 Feb 2025 15:49:18 -0500 Subject: [PATCH 04/17] Add curse subject to RMNRemote view (#16563) * Add curse subject to RMNRemote view * Fix linting issues * Address PR feedback * Fix linting issues * Fix import rename --- deployment/ccip/changeset/globals/helpers.go | 24 ++++++++++ .../changeset/v1_6/cs_rmn_curse_uncurse.go | 47 +++++++------------ deployment/ccip/view/v1_6/rmnremote.go | 37 +++++++++++++-- deployment/ccip/view/v1_6/rmnremote_test.go | 42 +++++++++++++++++ .../ccip/ccip_cs_rmn_curse_uncurse_test.go | 5 +- 5 files changed, 118 insertions(+), 37 deletions(-) create mode 100644 deployment/ccip/changeset/globals/helpers.go create mode 100644 deployment/ccip/view/v1_6/rmnremote_test.go diff --git a/deployment/ccip/changeset/globals/helpers.go b/deployment/ccip/changeset/globals/helpers.go new file mode 100644 index 00000000000..5169cc7a732 --- /dev/null +++ b/deployment/ccip/changeset/globals/helpers.go @@ -0,0 +1,24 @@ +package globals + +import "encoding/binary" + +// GlobalCurseSubject as defined here: https://github.com/smartcontractkit/chainlink/blob/new-rmn-curse-changeset/contracts/src/v0.8/ccip/rmn/RMNRemote.sol#L15 +func GlobalCurseSubject() Subject { + return Subject{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} +} + +type Subject = [16]byte + +func SelectorToSubject(selector uint64) Subject { + var b Subject + binary.BigEndian.PutUint64(b[8:], selector) + return b +} + +func SubjectToSelector(subject [16]byte) uint64 { + if subject == GlobalCurseSubject() { + return 0 + } + + return binary.BigEndian.Uint64(subject[8:]) +} diff --git a/deployment/ccip/changeset/v1_6/cs_rmn_curse_uncurse.go b/deployment/ccip/changeset/v1_6/cs_rmn_curse_uncurse.go index 63ac89689ae..248a944dc39 100644 --- a/deployment/ccip/changeset/v1_6/cs_rmn_curse_uncurse.go +++ b/deployment/ccip/changeset/v1_6/cs_rmn_curse_uncurse.go @@ -1,12 +1,12 @@ package v1_6 import ( - "encoding/binary" "errors" "fmt" "github.com/smartcontractkit/chainlink/deployment" "github.com/smartcontractkit/chainlink/deployment/ccip/changeset" + "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/globals" commoncs "github.com/smartcontractkit/chainlink/deployment/common/changeset" ) @@ -15,16 +15,11 @@ var ( _ deployment.ChangeSet[RMNCurseConfig] = RMNUncurseChangeset ) -// GlobalCurseSubject as defined here: https://github.com/smartcontractkit/chainlink/blob/new-rmn-curse-changeset/contracts/src/v0.8/ccip/rmn/RMNRemote.sol#L15 -func GlobalCurseSubject() Subject { - return Subject{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} -} - // RMNCurseAction represent a curse action to be applied on a chain (ChainSelector) with a specific subject (SubjectToCurse) // The curse action will by applied by calling the Curse method on the RMNRemote contract on the chain (ChainSelector) type RMNCurseAction struct { ChainSelector uint64 - SubjectToCurse Subject + SubjectToCurse globals.Subject } // CurseAction is a function that returns a list of RMNCurseAction to be applied on a chain @@ -52,11 +47,11 @@ func (c RMNCurseConfig) Validate(e deployment.Environment) error { return errors.New("reason is required") } - validSubjects := map[Subject]struct{}{ - GlobalCurseSubject(): {}, + validSubjects := map[globals.Subject]struct{}{ + globals.GlobalCurseSubject(): {}, } for _, selector := range e.AllChainSelectors() { - validSubjects[SelectorToSubject(selector)] = struct{}{} + validSubjects[globals.SelectorToSubject(selector)] = struct{}{} } for _, curseAction := range c.CurseActions { @@ -85,14 +80,6 @@ func (c RMNCurseConfig) Validate(e deployment.Environment) error { return nil } -type Subject = [16]byte - -func SelectorToSubject(selector uint64) Subject { - var b Subject - binary.BigEndian.PutUint64(b[8:], selector) - return b -} - // CurseLaneOnlyOnSource curses a lane only on the source chain // This will prevent message from source to destination to be initiated // One noteworthy behaviour is that this means that message can be sent from destination to source but will not be executed on the source @@ -104,7 +91,7 @@ func CurseLaneOnlyOnSource(sourceSelector uint64, destinationSelector uint64) Cu return []RMNCurseAction{ { ChainSelector: sourceSelector, - SubjectToCurse: SelectorToSubject(destinationSelector), + SubjectToCurse: globals.SelectorToSubject(destinationSelector), }, } } @@ -118,7 +105,7 @@ func CurseGloballyOnlyOnChain(selector uint64) CurseAction { return []RMNCurseAction{ { ChainSelector: selector, - SubjectToCurse: GlobalCurseSubject(), + SubjectToCurse: globals.GlobalCurseSubject(), }, } } @@ -150,7 +137,7 @@ func CurseChain(chainSelector uint64) CurseAction { if otherChainSelector != chainSelector { curseActions = append(curseActions, RMNCurseAction{ ChainSelector: otherChainSelector, - SubjectToCurse: SelectorToSubject(chainSelector), + SubjectToCurse: globals.SelectorToSubject(chainSelector), }) } } @@ -162,24 +149,24 @@ func CurseChain(chainSelector uint64) CurseAction { } } -func groupRMNSubjectBySelector(rmnSubjects []RMNCurseAction, avoidCursingSelf bool, onlyKeepGlobal bool) map[uint64][]Subject { - grouped := make(map[uint64][]Subject) +func groupRMNSubjectBySelector(rmnSubjects []RMNCurseAction, avoidCursingSelf bool, onlyKeepGlobal bool) map[uint64][]globals.Subject { + grouped := make(map[uint64][]globals.Subject) for _, s := range rmnSubjects { // Skip self-curse if needed - if s.SubjectToCurse == SelectorToSubject(s.ChainSelector) && avoidCursingSelf { + if s.SubjectToCurse == globals.SelectorToSubject(s.ChainSelector) && avoidCursingSelf { continue } // Initialize slice for this chain if needed if _, ok := grouped[s.ChainSelector]; !ok { - grouped[s.ChainSelector] = []Subject{} + grouped[s.ChainSelector] = []globals.Subject{} } // If global is already set and we only keep global, skip - if onlyKeepGlobal && len(grouped[s.ChainSelector]) == 1 && grouped[s.ChainSelector][0] == GlobalCurseSubject() { + if onlyKeepGlobal && len(grouped[s.ChainSelector]) == 1 && grouped[s.ChainSelector][0] == globals.GlobalCurseSubject() { continue } // If subject is global and we only keep global, reset immediately - if s.SubjectToCurse == GlobalCurseSubject() && onlyKeepGlobal { - grouped[s.ChainSelector] = []Subject{GlobalCurseSubject()} + if s.SubjectToCurse == globals.GlobalCurseSubject() && onlyKeepGlobal { + grouped[s.ChainSelector] = []globals.Subject{globals.GlobalCurseSubject()} continue } // Ensure uniqueness @@ -238,7 +225,7 @@ func RMNCurseChangeset(e deployment.Environment, cfg RMNCurseConfig) (deployment } if curseSubjects, ok := grouped[selector]; ok { // Only curse the subjects that are not actually cursed - notAlreadyCursedSubjects := make([]Subject, 0) + notAlreadyCursedSubjects := make([]globals.Subject, 0) for _, subject := range curseSubjects { cursed, err := chain.RMNRemote.IsCursed(nil, subject) if err != nil { @@ -311,7 +298,7 @@ func RMNUncurseChangeset(e deployment.Environment, cfg RMNCurseConfig) (deployme if curseSubjects, ok := grouped[selector]; ok { // Only keep the subject that are actually cursed - actuallyCursedSubjects := make([]Subject, 0) + actuallyCursedSubjects := make([]globals.Subject, 0) for _, subject := range curseSubjects { cursed, err := chain.RMNRemote.IsCursed(nil, subject) if err != nil { diff --git a/deployment/ccip/view/v1_6/rmnremote.go b/deployment/ccip/view/v1_6/rmnremote.go index 36df620196d..236c0a1d6c4 100644 --- a/deployment/ccip/view/v1_6/rmnremote.go +++ b/deployment/ccip/view/v1_6/rmnremote.go @@ -1,14 +1,23 @@ package v1_6 import ( + "encoding/hex" + + "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/globals" "github.com/smartcontractkit/chainlink/deployment/common/view/types" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/v1_6_0/rmn_remote" ) +type RMNRemoteCurseEntry struct { + Subject string `json:"subject"` + Selector uint64 `json:"selector"` +} + type RMNRemoteView struct { types.ContractMetaData - IsCursed bool `json:"isCursed"` - Config RMNRemoteVersionedConfig `json:"config,omitempty"` + IsCursed bool `json:"isCursed"` + Config RMNRemoteVersionedConfig `json:"config,omitempty"` + CursedSubjectEntries []RMNRemoteCurseEntry `json:"cursedSubjectEntries,omitempty"` } type RMNRemoteVersionedConfig struct { @@ -22,6 +31,17 @@ type RMNRemoteSigner struct { NodeIndex uint64 `json:"node_index"` } +func mapCurseSubjects(subjects [][16]byte) []RMNRemoteCurseEntry { + res := make([]RMNRemoteCurseEntry, 0, len(subjects)) + for _, subject := range subjects { + res = append(res, RMNRemoteCurseEntry{ + Subject: hex.EncodeToString(subject[:]), + Selector: globals.SubjectToSelector(subject), + }) + } + return res +} + func GenerateRMNRemoteView(rmnReader *rmn_remote.RMNRemote) (RMNRemoteView, error) { tv, err := types.NewContractMetaData(rmnReader, rmnReader.Address()) if err != nil { @@ -46,9 +66,16 @@ func GenerateRMNRemoteView(rmnReader *rmn_remote.RMNRemote) (RMNRemoteView, erro if err != nil { return RMNRemoteView{}, err } + + curseSubjects, err := rmnReader.GetCursedSubjects(nil) + if err != nil { + return RMNRemoteView{}, err + } + return RMNRemoteView{ - ContractMetaData: tv, - IsCursed: isCursed, - Config: rmnConfig, + ContractMetaData: tv, + IsCursed: isCursed, + Config: rmnConfig, + CursedSubjectEntries: mapCurseSubjects(curseSubjects), }, nil } diff --git a/deployment/ccip/view/v1_6/rmnremote_test.go b/deployment/ccip/view/v1_6/rmnremote_test.go new file mode 100644 index 00000000000..d4465f6c022 --- /dev/null +++ b/deployment/ccip/view/v1_6/rmnremote_test.go @@ -0,0 +1,42 @@ +package v1_6 + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/globals" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/v1_6_0/rmn_remote" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func Test_RMNRemote_Curse_View(t *testing.T) { + e := memory.NewMemoryEnvironment(t, logger.TestLogger(t), zapcore.InfoLevel, memory.MemoryEnvironmentConfig{ + Chains: 1, + }) + chain := e.Chains[e.AllChainSelectors()[0]] + _, tx, remote, err := rmn_remote.DeployRMNRemote(chain.DeployerKey, chain.Client, e.AllChainSelectors()[0], common.Address{}) + _, err = deployment.ConfirmIfNoError(chain, tx, err) + require.NoError(t, err) + + tx, err = remote.Curse(chain.DeployerKey, globals.GlobalCurseSubject()) + _, err = deployment.ConfirmIfNoError(chain, tx, err) + require.NoError(t, err) + + tx, err = remote.Curse(chain.DeployerKey, globals.SelectorToSubject(e.AllChainSelectors()[0])) + _, err = deployment.ConfirmIfNoError(chain, tx, err) + require.NoError(t, err) + + view, err := GenerateRMNRemoteView(remote) + require.NoError(t, err) + + require.True(t, view.IsCursed) + require.Len(t, view.CursedSubjectEntries, 2) + require.Equal(t, "01000000000000000000000000000001", view.CursedSubjectEntries[0].Subject) + require.Equal(t, uint64(0), view.CursedSubjectEntries[0].Selector) + require.Equal(t, e.AllChainSelectors()[0], view.CursedSubjectEntries[1].Selector) +} diff --git a/integration-tests/smoke/ccip/ccip_cs_rmn_curse_uncurse_test.go b/integration-tests/smoke/ccip/ccip_cs_rmn_curse_uncurse_test.go index c5457d2bee6..2fef0fdf39e 100644 --- a/integration-tests/smoke/ccip/ccip_cs_rmn_curse_uncurse_test.go +++ b/integration-tests/smoke/ccip/ccip_cs_rmn_curse_uncurse_test.go @@ -8,6 +8,7 @@ import ( "github.com/smartcontractkit/chainlink/deployment" "github.com/smartcontractkit/chainlink/deployment/ccip/changeset" + "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/globals" "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/testhelpers" "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/v1_6" commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset" @@ -369,9 +370,9 @@ func verifyTestCaseAssertions(t *testing.T, e *testhelpers.DeployedEnv, tc Curse require.NoError(t, err) for _, assertion := range tc.curseAssertions { - cursedSubject := v1_6.SelectorToSubject(mapIDToSelector(assertion.subject)) + cursedSubject := globals.SelectorToSubject(mapIDToSelector(assertion.subject)) if assertion.globalCurse { - cursedSubject = v1_6.GlobalCurseSubject() + cursedSubject = globals.GlobalCurseSubject() } isCursed, err := state.Chains[mapIDToSelector(assertion.chainID)].RMNRemote.IsCursed(nil, cursedSubject) From 5ead33246f2e6dad35eed8cb20df0b1b566a697d Mon Sep 17 00:00:00 2001 From: Erik Burton Date: Wed, 26 Feb 2025 16:53:40 -0500 Subject: [PATCH 05/17] fix: full fetch depth for nightly builds (#16597) --- .github/workflows/build-publish-develop-pr.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-publish-develop-pr.yml b/.github/workflows/build-publish-develop-pr.yml index d49e539e35e..9f4a0d8deef 100644 --- a/.github/workflows/build-publish-develop-pr.yml +++ b/.github/workflows/build-publish-develop-pr.yml @@ -98,7 +98,8 @@ jobs: with: persist-credentials: false ref: ${{ env.CHECKOUT_REF }} - fetch-depth: 1 + # nightly builds require a full clone for goreleaser to work + fetch-depth: ${{ needs.image-tag.outputs.release-type == 'nightly' && '0' || '1' }} - name: Setup Github Token id: token From b804a37d59b21ed0422a92f5d6ba9199e71a47eb Mon Sep 17 00:00:00 2001 From: Anindita Ghosh <88458927+AnieeG@users.noreply.github.com> Date: Wed, 26 Feb 2025 14:29:00 -0800 Subject: [PATCH 06/17] Ccip-5315 ocr params defaults (#16573) * changes * updates * fix lint * fix pointer error * go import * fix integration-in-memory pipeline * another fix * revert * use test configs * fox crib * add required config * all required config * another try * more fixes * fixes * CCIP-5315 Removing globally shared OCR params and relying on structs instead (#16585) * Replacing with Mergo * Replacing with Mergo * go.mod --------- Co-authored-by: Mateusz Sekara --- deployment/ccip/changeset/globals/config.go | 64 +++++--- deployment/ccip/changeset/globals/ocr3.go | 97 +++++++++++ .../ccip/changeset/globals/ocr3_test.go | 26 +++ .../changeset/testhelpers/test_environment.go | 47 +++--- deployment/ccip/changeset/v1_6/config.go | 153 ++++++++++++++++++ .../v1_6/cs_active_candidate_test.go | 13 +- .../ccip/changeset/v1_6/cs_ccip_home.go | 111 ++----------- .../ccip/changeset/v1_6/cs_ccip_home_test.go | 69 +++----- .../changeset/v1_6/cs_chain_contracts_test.go | 10 +- .../ccip/changeset/v1_6/cs_home_chain_test.go | 6 +- deployment/environment/crib/ccip_deployer.go | 20 ++- deployment/go.mod | 2 +- .../smoke/ccip/ccip_gas_price_updates_test.go | 7 +- .../ccip/ccip_token_price_updates_test.go | 7 +- 14 files changed, 413 insertions(+), 219 deletions(-) create mode 100644 deployment/ccip/changeset/globals/ocr3.go create mode 100644 deployment/ccip/changeset/globals/ocr3_test.go create mode 100644 deployment/ccip/changeset/v1_6/config.go diff --git a/deployment/ccip/changeset/globals/config.go b/deployment/ccip/changeset/globals/config.go index a0ca800f1d7..c190d44fda1 100644 --- a/deployment/ccip/changeset/globals/config.go +++ b/deployment/ccip/changeset/globals/config.go @@ -2,6 +2,10 @@ package globals import ( "time" + + "github.com/smartcontractkit/chainlink-ccip/pluginconfig" + "github.com/smartcontractkit/chainlink-common/pkg/config" + "github.com/smartcontractkit/chainlink-common/pkg/merklemulti" ) type ConfigType string @@ -10,28 +14,16 @@ const ( ConfigTypeActive ConfigType = "active" ConfigTypeCandidate ConfigType = "candidate" // ========= Changeset Defaults ========= - PermissionLessExecutionThreshold = 8 * time.Hour - RemoteGasPriceBatchWriteFrequency = 30 * time.Minute - TokenPriceBatchWriteFrequency = 30 * time.Minute - BatchGasLimit = 6_500_000 - InflightCacheExpiry = 10 * time.Minute - RootSnoozeTime = 30 * time.Minute - BatchingStrategyID = 0 - DeltaProgress = 10 * time.Second - DeltaResend = 10 * time.Second - DeltaInitial = 20 * time.Second - DeltaRound = 2 * time.Second - DeltaGrace = 2 * time.Second - DeltaCertifiedCommitRequest = 10 * time.Second - DeltaStage = 10 * time.Second - Rmax = 50 - MaxDurationQuery = 500 * time.Millisecond - MaxDurationObservation = 5 * time.Second - MaxDurationShouldAcceptAttestedReport = 10 * time.Second - MaxDurationShouldTransmitAcceptedReport = 10 * time.Second - GasPriceDeviationPPB = 1000 - DAGasPriceDeviationPPB = 0 - OptimisticConfirmations = 1 + PermissionLessExecutionThreshold = 8 * time.Hour + RemoteGasPriceBatchWriteFrequency = 30 * time.Minute + TokenPriceBatchWriteFrequency = 30 * time.Minute + BatchGasLimit = 6_500_000 + InflightCacheExpiry = 10 * time.Minute + RootSnoozeTime = 30 * time.Minute + BatchingStrategyID = 0 + GasPriceDeviationPPB = 1000 + DAGasPriceDeviationPPB = 0 + OptimisticConfirmations = 1 // ====================================== // ========= Onchain consts ========= @@ -40,3 +32,31 @@ const ( CCIPLockOrBurnV1RetBytes = 32 // ====================================== ) + +var ( + DefaultCommitOffChainCfg = pluginconfig.CommitOffchainConfig{ + RemoteGasPriceBatchWriteFrequency: *config.MustNewDuration(30 * time.Minute), + TokenPriceBatchWriteFrequency: *config.MustNewDuration(30 * time.Minute), + NewMsgScanBatchSize: merklemulti.MaxNumberTreeLeaves, + MaxReportTransmissionCheckAttempts: 5, + RMNSignaturesTimeout: 6900 * time.Millisecond, + RMNEnabled: true, + MaxMerkleTreeSize: merklemulti.MaxNumberTreeLeaves, + SignObservationPrefix: "chainlink ccip 1.6 rmn observation", + TransmissionDelayMultiplier: 1 * time.Minute, + InflightPriceCheckRetries: 10, + MerkleRootAsyncObserverDisabled: false, + MerkleRootAsyncObserverSyncFreq: 4 * time.Second, + MerkleRootAsyncObserverSyncTimeout: 12 * time.Second, + ChainFeeAsyncObserverSyncFreq: 10 * time.Second, + ChainFeeAsyncObserverSyncTimeout: 12 * time.Second, + } + DefaultExecuteOffChainCfg = pluginconfig.ExecuteOffchainConfig{ + BatchGasLimit: 6_500_000, // Building batches with 6.5m and transmit with 8m to account for overhead. Clarify with offchain + InflightCacheExpiry: *config.MustNewDuration(5 * time.Minute), + RootSnoozeTime: *config.MustNewDuration(5 * time.Minute), // does not work now + MessageVisibilityInterval: *config.MustNewDuration(8 * time.Hour), + BatchingStrategyID: 0, + TransmissionDelayMultiplier: 1 * time.Minute, // Clarify with offchain + } +) diff --git a/deployment/ccip/changeset/globals/ocr3.go b/deployment/ccip/changeset/globals/ocr3.go new file mode 100644 index 00000000000..903f9ab00ca --- /dev/null +++ b/deployment/ccip/changeset/globals/ocr3.go @@ -0,0 +1,97 @@ +package globals + +import ( + "fmt" + "time" + + "dario.cat/mergo" + + "github.com/smartcontractkit/chainlink/deployment/common/types" +) + +// Intention of this file is to be a single source of the truth for OCR3 parameters used by CCIP plugins. +// +// Assumptions: +// - Although, some values are similar between Commit and Execute, we should keep them separate, because +// these plugins have different requirements and characteristics. This way we can avoid misconfiguration +// by accidentally changing parameter for one plugin while adjusting it for the other +// - OCR3 parameters are chain agnostic and should be reused across different chains. There might be some use cases +// for overrides to accommodate specific chain characteristics (e.g. Ethereum). +// However, for most of the cases we should strive to rely on defaults under CommitOCRParams and ExecOCRParams. +// This makes the testing process much easier and increase our confidence that the configuration is safe to use. +// - The fewer overrides the better. Introducing new overrides should be done with caution and only if there's a strong +// justification for it. Moreover, it requires detailed chaos / load testing to ensure that the new parameters are safe to use +// and meet CCIP SLOs +// - Single params must not be stored under const or exposed outside of this file to limit the risk of +// accidental configuration or partial configuration +// - MaxDurations should be set on the latencies observed on various environments using p99 OCR3 latencies +// These values should be specific to the plugin type and should not depend on the chain family +// or the environment in which plugin runs +var ( + // CommitOCRParams represents the default OCR3 parameters for all chains (beside Ethereum, see CommitOCRParamsForEthereum). + // Most of the intervals here should be generic enough (and chain agnostic) to be reused across different chains. + CommitOCRParams = types.OCRParameters{ + DeltaProgress: 120 * time.Second, + DeltaResend: 30 * time.Second, + DeltaInitial: 20 * time.Second, + DeltaRound: 15 * time.Second, + DeltaGrace: 5 * time.Second, + DeltaCertifiedCommitRequest: 10 * time.Second, + // TransmissionDelayMultiplier overrides DeltaStage + DeltaStage: 25 * time.Second, + Rmax: 3, + MaxDurationQuery: 7 * time.Second, + MaxDurationObservation: 13 * time.Second, + MaxDurationShouldAcceptAttestedReport: 5 * time.Second, + MaxDurationShouldTransmitAcceptedReport: 10 * time.Second, + } + + // CommitOCRParamsForEthereum represents a dedicated set of OCR3 parameters for Ethereum. + // It's driven by the fact that Ethereum block time is slow (12 seconds) and chain is considered + // more expensive to other EVM compatible chains + CommitOCRParamsForEthereum = withOverrides( + CommitOCRParams, + types.OCRParameters{ + DeltaRound: 90 * time.Second, + DeltaStage: 60 * time.Second, + }, + ) +) + +var ( + // ExecOCRParams represents the default OCR3 parameters for all chains (beside Ethereum, see ExecOCRParamsForEthereum). + ExecOCRParams = types.OCRParameters{ + DeltaProgress: 100 * time.Second, + DeltaResend: 30 * time.Second, + DeltaInitial: 20 * time.Second, + DeltaRound: 15 * time.Second, + DeltaGrace: 5 * time.Second, + DeltaCertifiedCommitRequest: 10 * time.Second, + // TransmissionDelayMultiplier overrides DeltaStage + DeltaStage: 25 * time.Second, + Rmax: 3, + // MaxDurationQuery is set to very low value, because Execution plugin doesn't use Query + MaxDurationQuery: 200 * time.Millisecond, + MaxDurationObservation: 13 * time.Second, + MaxDurationShouldAcceptAttestedReport: 5 * time.Second, + MaxDurationShouldTransmitAcceptedReport: 10 * time.Second, + } + + // ExecOCRParamsForEthereum represents a dedicated set of OCR3 parameters for Ethereum. + // Similarly to Commit, it's here to accommodate Ethereum specific characteristics + ExecOCRParamsForEthereum = withOverrides( + ExecOCRParams, + types.OCRParameters{ + DeltaRound: 90 * time.Second, + DeltaStage: 60 * time.Second, + }, + ) +) + +func withOverrides(base types.OCRParameters, overrides types.OCRParameters) types.OCRParameters { + outcome := base + if err := mergo.Merge(&outcome, overrides, mergo.WithOverride); err != nil { + panic(fmt.Sprintf("error while building an OCR config %v", err)) + } + return outcome +} diff --git a/deployment/ccip/changeset/globals/ocr3_test.go b/deployment/ccip/changeset/globals/ocr3_test.go new file mode 100644 index 00000000000..672a67095a2 --- /dev/null +++ b/deployment/ccip/changeset/globals/ocr3_test.go @@ -0,0 +1,26 @@ +package globals + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_MergeWithOverrides(t *testing.T) { + assert.Equal(t, ExecOCRParams.DeltaProgress, ExecOCRParamsForEthereum.DeltaProgress) + assert.Equal(t, ExecOCRParams.DeltaResend, ExecOCRParamsForEthereum.DeltaResend) + assert.Equal(t, ExecOCRParams.DeltaInitial, ExecOCRParamsForEthereum.DeltaInitial) + assert.Equal(t, ExecOCRParams.DeltaGrace, ExecOCRParamsForEthereum.DeltaGrace) + assert.Equal(t, ExecOCRParams.DeltaCertifiedCommitRequest, ExecOCRParamsForEthereum.DeltaCertifiedCommitRequest) + assert.Equal(t, ExecOCRParams.MaxDurationQuery, ExecOCRParamsForEthereum.MaxDurationQuery) + assert.Equal(t, ExecOCRParams.MaxDurationObservation, ExecOCRParamsForEthereum.MaxDurationObservation) + assert.Equal(t, ExecOCRParams.MaxDurationShouldAcceptAttestedReport, ExecOCRParamsForEthereum.MaxDurationShouldAcceptAttestedReport) + assert.Equal(t, ExecOCRParams.MaxDurationShouldTransmitAcceptedReport, ExecOCRParamsForEthereum.MaxDurationShouldTransmitAcceptedReport) + assert.Equal(t, ExecOCRParams.MaxDurationQuery, ExecOCRParamsForEthereum.MaxDurationQuery) + + assert.Equal(t, 90*time.Second, ExecOCRParamsForEthereum.DeltaRound) + assert.Equal(t, 60*time.Second, ExecOCRParamsForEthereum.DeltaStage) + assert.Equal(t, 200*time.Millisecond, ExecOCRParams.MaxDurationQuery) + assert.Equal(t, 200*time.Millisecond, ExecOCRParamsForEthereum.MaxDurationQuery) +} diff --git a/deployment/ccip/changeset/testhelpers/test_environment.go b/deployment/ccip/changeset/testhelpers/test_environment.go index cd8f009060c..e8dca38d4ca 100644 --- a/deployment/ccip/changeset/testhelpers/test_environment.go +++ b/deployment/ccip/changeset/testhelpers/test_environment.go @@ -63,7 +63,7 @@ type TestConfigs struct { IsUSDCAttestationMissing bool IsMultiCall3 bool IsStaticLink bool - OCRConfigOverride func(*v1_6.CCIPOCRParams) + OCRConfigOverride func(v1_6.CCIPOCRParams) v1_6.CCIPOCRParams RMNEnabled bool NumOfRMNNodes int LinkPrice *big.Int @@ -174,7 +174,7 @@ func WithRMNEnabled(numOfNode int) TestOps { } } -func WithOCRConfigOverride(override func(*v1_6.CCIPOCRParams)) TestOps { +func WithOCRConfigOverride(override func(v1_6.CCIPOCRParams) v1_6.CCIPOCRParams) TestOps { return func(testCfg *TestConfigs) { testCfg.OCRConfigOverride = override } @@ -662,7 +662,8 @@ func AddCCIPContractsToEnvironment(t *testing.T, allChains []uint64, tEnv TestEn require.NoError(t, err) // Build the per chain config. chainConfigs := make(map[uint64]v1_6.ChainConfig) - ocrConfigs := make(map[uint64]v1_6.CCIPOCRParams) + commitOCRConfigs := make(map[uint64]v1_6.CCIPOCRParams) + execOCRConfigs := make(map[uint64]v1_6.CCIPOCRParams) for _, chain := range evmChains { timelockContractsPerChain[chain] = &proposalutils.TimelockExecutionContracts{ Timelock: state.Chains[chain].Timelock, @@ -674,22 +675,23 @@ func AddCCIPContractsToEnvironment(t *testing.T, allChains []uint64, tEnv TestEn } else { linkTokenAddr = state.Chains[chain].LinkToken.Address() } - tokenInfo := tokenConfig.GetTokenInfo(e.Env.Logger, linkTokenAddr, state.Chains[chain].Weth9.Address()) - ocrOverride := tc.OCRConfigOverride - if tc.RMNEnabled { - ocrOverride = func(ocrParams *v1_6.CCIPOCRParams) { - if tc.OCRConfigOverride != nil { - tc.OCRConfigOverride(ocrParams) + ocrOverride := func(ocrParams v1_6.CCIPOCRParams) v1_6.CCIPOCRParams { + if tc.OCRConfigOverride != nil { + tc.OCRConfigOverride(ocrParams) + } + if tc.RMNEnabled { + if ocrParams.CommitOffChainConfig != nil { + ocrParams.CommitOffChainConfig.RMNEnabled = true + } + } else { + if ocrParams.CommitOffChainConfig != nil { + ocrParams.CommitOffChainConfig.RMNEnabled = false } - ocrParams.CommitOffChainConfig.RMNEnabled = true } + return ocrParams } - ocrParams := v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultCommitOffChainConfig(e.FeedChainSel, tokenInfo), - v1_6.WithDefaultExecuteOffChainConfig(tokenDataProviders), - v1_6.WithOCRParamOverride(ocrOverride), - ) - ocrConfigs[chain] = ocrParams + commitOCRConfigs[chain] = v1_6.DeriveOCRParamsForCommit(v1_6.SimulationTest, e.FeedChainSel, tokenConfig.GetTokenInfo(e.Env.Logger, linkTokenAddr, state.Chains[chain].Weth9.Address()), ocrOverride) + execOCRConfigs[chain] = v1_6.DeriveOCRParamsForExec(v1_6.SimulationTest, tokenDataProviders, ocrOverride) chainConfigs[chain] = v1_6.ChainConfig{ Readers: nodeInfo.NonBootstraps().PeerIDs(), FChain: uint8(len(nodeInfo.NonBootstraps().PeerIDs()) / 3), @@ -703,13 +705,8 @@ func AddCCIPContractsToEnvironment(t *testing.T, allChains []uint64, tEnv TestEn for _, chain := range solChains { ocrOverride := tc.OCRConfigOverride - ocrParams := v1_6.DeriveCCIPOCRParams( - // TODO: tokenInfo is nil for solana - v1_6.WithDefaultCommitOffChainConfig(e.FeedChainSel, nil), - v1_6.WithDefaultExecuteOffChainConfig(tokenDataProviders), - v1_6.WithOCRParamOverride(ocrOverride), - ) - ocrConfigs[chain] = ocrParams + commitOCRConfigs[chain] = v1_6.DeriveOCRParamsForCommit(v1_6.SimulationTest, e.FeedChainSel, nil, ocrOverride) + execOCRConfigs[chain] = v1_6.DeriveOCRParamsForExec(v1_6.SimulationTest, tokenDataProviders, ocrOverride) chainConfigs[chain] = v1_6.ChainConfig{ Readers: nodeInfo.NonBootstraps().PeerIDs(), // #nosec G115 - Overflow is not a concern in this test scenario @@ -750,7 +747,7 @@ func AddCCIPContractsToEnvironment(t *testing.T, allChains []uint64, tEnv TestEn MCMS: mcmsConfig, }, PluginInfo: v1_6.SetCandidatePluginInfo{ - OCRConfigPerRemoteChainSelector: ocrConfigs, + OCRConfigPerRemoteChainSelector: commitOCRConfigs, PluginType: types.PluginTypeCCIPCommit, }, }, @@ -767,7 +764,7 @@ func AddCCIPContractsToEnvironment(t *testing.T, allChains []uint64, tEnv TestEn }, PluginInfo: []v1_6.SetCandidatePluginInfo{ { - OCRConfigPerRemoteChainSelector: ocrConfigs, + OCRConfigPerRemoteChainSelector: execOCRConfigs, PluginType: types.PluginTypeCCIPExec, }, }, diff --git a/deployment/ccip/changeset/v1_6/config.go b/deployment/ccip/changeset/v1_6/config.go new file mode 100644 index 00000000000..7c2c4276862 --- /dev/null +++ b/deployment/ccip/changeset/v1_6/config.go @@ -0,0 +1,153 @@ +package v1_6 + +import ( + "time" + + chain_selectors "github.com/smartcontractkit/chain-selectors" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + "github.com/smartcontractkit/chainlink-common/pkg/merklemulti" + + "github.com/smartcontractkit/chainlink-ccip/pkg/types/ccipocr3" + "github.com/smartcontractkit/chainlink-ccip/pluginconfig" + + "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/globals" + "github.com/smartcontractkit/chainlink/deployment/common/types" +) + +var ( + DefaultOCRParamsForCommitForNonETH = CCIPOCRParams{ + OCRParameters: globals.CommitOCRParams, + CommitOffChainConfig: &globals.DefaultCommitOffChainCfg, + } + + DefaultOCRParamsForCommitForETH = CCIPOCRParams{ + OCRParameters: globals.CommitOCRParamsForEthereum, + CommitOffChainConfig: &globals.DefaultCommitOffChainCfg, + } + + DefaultOCRParamsForExecForNonETH = CCIPOCRParams{ + OCRParameters: globals.ExecOCRParams, + ExecuteOffChainConfig: &globals.DefaultExecuteOffChainCfg, + } + + DefaultOCRParamsForExecForETH = CCIPOCRParams{ + OCRParameters: globals.ExecOCRParamsForEthereum, + ExecuteOffChainConfig: &globals.DefaultExecuteOffChainCfg, + } + + // Used for only testing with simulated chains + OcrParamsForTest = CCIPOCRParams{ + OCRParameters: types.OCRParameters{ + DeltaProgress: 10 * time.Second, + DeltaResend: 10 * time.Second, + DeltaInitial: 20 * time.Second, + DeltaRound: 2 * time.Second, + DeltaGrace: 2 * time.Second, + DeltaCertifiedCommitRequest: 10 * time.Second, + DeltaStage: 10 * time.Second, + Rmax: 50, + MaxDurationQuery: 10 * time.Second, + MaxDurationObservation: 10 * time.Second, + MaxDurationShouldAcceptAttestedReport: 10 * time.Second, + MaxDurationShouldTransmitAcceptedReport: 10 * time.Second, + }, + CommitOffChainConfig: &pluginconfig.CommitOffchainConfig{ + RemoteGasPriceBatchWriteFrequency: *config.MustNewDuration(globals.RemoteGasPriceBatchWriteFrequency), + TokenPriceBatchWriteFrequency: *config.MustNewDuration(globals.TokenPriceBatchWriteFrequency), + NewMsgScanBatchSize: merklemulti.MaxNumberTreeLeaves, + MaxReportTransmissionCheckAttempts: 5, + RMNEnabled: false, + RMNSignaturesTimeout: 30 * time.Minute, + MaxMerkleTreeSize: merklemulti.MaxNumberTreeLeaves, + SignObservationPrefix: "chainlink ccip 1.6 rmn observation", + MerkleRootAsyncObserverDisabled: false, + MerkleRootAsyncObserverSyncFreq: 4 * time.Second, + MerkleRootAsyncObserverSyncTimeout: 12 * time.Second, + ChainFeeAsyncObserverSyncFreq: 10 * time.Second, + ChainFeeAsyncObserverSyncTimeout: 12 * time.Second, + }, + ExecuteOffChainConfig: &pluginconfig.ExecuteOffchainConfig{ + BatchGasLimit: globals.BatchGasLimit, + InflightCacheExpiry: *config.MustNewDuration(globals.InflightCacheExpiry), + RootSnoozeTime: *config.MustNewDuration(globals.RootSnoozeTime), + MessageVisibilityInterval: *config.MustNewDuration(globals.PermissionLessExecutionThreshold), + BatchingStrategyID: globals.BatchingStrategyID, + }, + } +) + +type OCRConfigChainType int + +const ( + Default OCRConfigChainType = iota + 1 + Ethereum + // SimulationTest is kept only for backward compatibility. Tests probably should + // migrate to using Default or Ethereum + SimulationTest +) + +func DeriveOCRConfigTypeFromSelector(chainsel uint64) OCRConfigChainType { + switch chainsel { + case chain_selectors.ETHEREUM_TESTNET_SEPOLIA.Selector, + chain_selectors.ETHEREUM_TESTNET_HOLESKY.Selector, + chain_selectors.ETHEREUM_MAINNET.Selector: + return Ethereum + default: + return Default + } +} + +func (c OCRConfigChainType) CommitOCRParams() CCIPOCRParams { + switch c { + case Ethereum: + return DefaultOCRParamsForCommitForETH.Copy() + case Default: + return DefaultOCRParamsForCommitForNonETH.Copy() + case SimulationTest: + return OcrParamsForTest.Copy() + default: + panic("unknown OCRConfigChainType") + } +} + +func (c OCRConfigChainType) ExecuteOCRParams() CCIPOCRParams { + switch c { + case Ethereum: + return DefaultOCRParamsForExecForETH.Copy() + case Default: + return DefaultOCRParamsForExecForNonETH.Copy() + case SimulationTest: + return OcrParamsForTest.Copy() + default: + panic("unknown OCRConfigChainType") + } +} + +func DeriveOCRParamsForCommit( + ocrChainType OCRConfigChainType, + feedChain uint64, + feeTokenInfo map[ccipocr3.UnknownEncodedAddress]pluginconfig.TokenInfo, + override func(params CCIPOCRParams) CCIPOCRParams, +) CCIPOCRParams { + params := ocrChainType.CommitOCRParams() + params.CommitOffChainConfig.TokenInfo = feeTokenInfo + params.CommitOffChainConfig.PriceFeedChainSelector = ccipocr3.ChainSelector(feedChain) + if override == nil { + return params + } + return override(params) +} + +func DeriveOCRParamsForExec( + ocrChainType OCRConfigChainType, + observerConfig []pluginconfig.TokenDataObserverConfig, + override func(params CCIPOCRParams) CCIPOCRParams, +) CCIPOCRParams { + params := ocrChainType.ExecuteOCRParams() + params.ExecuteOffChainConfig.TokenDataObservers = observerConfig + if override == nil { + return params + } + return override(params) +} diff --git a/deployment/ccip/changeset/v1_6/cs_active_candidate_test.go b/deployment/ccip/changeset/v1_6/cs_active_candidate_test.go index 55326fd7111..f9f668c3906 100644 --- a/deployment/ccip/changeset/v1_6/cs_active_candidate_test.go +++ b/deployment/ccip/changeset/v1_6/cs_active_candidate_test.go @@ -210,21 +210,16 @@ func Test_ActiveCandidate(t *testing.T) { { // NOTE: this is technically not a new chain, but needed for validation. OCRConfigPerRemoteChainSelector: map[uint64]v1_6.CCIPOCRParams{ - dest: v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultCommitOffChainConfig(tenv.FeedChainSel, - tokenConfig.GetTokenInfo(logger.TestLogger(t), - state.Chains[dest].LinkToken.Address(), - state.Chains[dest].Weth9.Address())), - ), + dest: v1_6.DeriveOCRParamsForCommit(v1_6.SimulationTest, tenv.FeedChainSel, tokenConfig.GetTokenInfo(logger.TestLogger(t), + state.Chains[dest].LinkToken.Address(), + state.Chains[dest].Weth9.Address()), nil), }, PluginType: types.PluginTypeCCIPCommit, }, { // NOTE: this is technically not a new chain, but needed for validation. OCRConfigPerRemoteChainSelector: map[uint64]v1_6.CCIPOCRParams{ - dest: v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultExecuteOffChainConfig(nil), - ), + dest: v1_6.DeriveOCRParamsForExec(v1_6.SimulationTest, nil, nil), }, PluginType: types.PluginTypeCCIPExec, }, diff --git a/deployment/ccip/changeset/v1_6/cs_ccip_home.go b/deployment/ccip/changeset/v1_6/cs_ccip_home.go index ca8fe58e13b..aebac81c54b 100644 --- a/deployment/ccip/changeset/v1_6/cs_ccip_home.go +++ b/deployment/ccip/changeset/v1_6/cs_ccip_home.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "math/big" - "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -17,17 +16,13 @@ import ( mcmsevmsdk "github.com/smartcontractkit/mcms/sdk/evm" mcmstypes "github.com/smartcontractkit/mcms/types" - "github.com/smartcontractkit/chainlink-common/pkg/config" - "github.com/smartcontractkit/chainlink-common/pkg/logger" - "github.com/smartcontractkit/chainlink-common/pkg/merklemulti" - "github.com/smartcontractkit/chainlink-ccip/chainconfig" "github.com/smartcontractkit/chainlink-ccip/pkg/types/ccipocr3" "github.com/smartcontractkit/chainlink-ccip/pluginconfig" + "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink/deployment" "github.com/smartcontractkit/chainlink/deployment/ccip/changeset" - "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/globals" "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/internal" commoncs "github.com/smartcontractkit/chainlink/deployment/common/changeset" "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" @@ -124,7 +119,7 @@ func validateCommitOffchainConfig(c *pluginconfig.CommitOffchainConfig, selector tokenInfos = append(tokenInfos, onchainState.Weth9) symbol, decimal, err := findTokenInfo(tokenInfos, token) if err != nil { - return err + return fmt.Errorf("chain %d- %w", selector, err) } if decimal != tokenConfig.Decimals { return fmt.Errorf("token %s -address %s has %d decimals in provided token config, expected %d", @@ -173,6 +168,21 @@ type CCIPOCRParams struct { ExecuteOffChainConfig *pluginconfig.ExecuteOffchainConfig } +func (c CCIPOCRParams) Copy() CCIPOCRParams { + newC := CCIPOCRParams{ + OCRParameters: c.OCRParameters, + } + if c.CommitOffChainConfig != nil { + commit := *c.CommitOffChainConfig + newC.CommitOffChainConfig = &commit + } + if c.ExecuteOffChainConfig != nil { + exec := *c.ExecuteOffChainConfig + newC.ExecuteOffChainConfig = &exec + } + return newC +} + func (c CCIPOCRParams) Validate(e deployment.Environment, selector uint64, feedChainSel uint64, state changeset.CCIPOnChainState) error { if err := c.OCRParameters.Validate(); err != nil { return fmt.Errorf("invalid OCR parameters: %w", err) @@ -193,93 +203,6 @@ func (c CCIPOCRParams) Validate(e deployment.Environment, selector uint64, feedC return nil } -type CCIPOCROpts func(params *CCIPOCRParams) - -// WithOCRParamOverride can be used if you want to override the default OCR parameters with your custom function. -func WithOCRParamOverride(override func(params *CCIPOCRParams)) CCIPOCROpts { - return func(params *CCIPOCRParams) { - if override != nil { - override(params) - } - } -} - -// WithDefaultCommitOffChainConfig can be used to add token info to the existing commit off-chain config. If no commit off-chain config is set, it will be created with default values. -func WithDefaultCommitOffChainConfig(feedChainSel uint64, tokenInfo map[ccipocr3.UnknownEncodedAddress]pluginconfig.TokenInfo) CCIPOCROpts { - return func(params *CCIPOCRParams) { - if params.CommitOffChainConfig == nil { - params.CommitOffChainConfig = &pluginconfig.CommitOffchainConfig{ - RemoteGasPriceBatchWriteFrequency: *config.MustNewDuration(globals.RemoteGasPriceBatchWriteFrequency), - TokenPriceBatchWriteFrequency: *config.MustNewDuration(globals.TokenPriceBatchWriteFrequency), - TokenInfo: tokenInfo, - PriceFeedChainSelector: ccipocr3.ChainSelector(feedChainSel), - NewMsgScanBatchSize: merklemulti.MaxNumberTreeLeaves, - MaxReportTransmissionCheckAttempts: 5, - RMNEnabled: false, - RMNSignaturesTimeout: 30 * time.Minute, - MaxMerkleTreeSize: merklemulti.MaxNumberTreeLeaves, - SignObservationPrefix: "chainlink ccip 1.6 rmn observation", - MerkleRootAsyncObserverDisabled: false, - MerkleRootAsyncObserverSyncFreq: 4 * time.Second, - MerkleRootAsyncObserverSyncTimeout: 12 * time.Second, - ChainFeeAsyncObserverSyncFreq: 10 * time.Second, - ChainFeeAsyncObserverSyncTimeout: 12 * time.Second, - } - } else { - if params.CommitOffChainConfig.TokenInfo == nil { - params.CommitOffChainConfig.TokenInfo = make(map[ccipocr3.UnknownEncodedAddress]pluginconfig.TokenInfo) - } - for k, v := range tokenInfo { - params.CommitOffChainConfig.TokenInfo[k] = v - } - } - } -} - -// WithDefaultExecuteOffChainConfig can be used to add token data observers to the execute off-chain config. If no execute off-chain config is set, it will be created with default values. -func WithDefaultExecuteOffChainConfig(tokenDataObservers []pluginconfig.TokenDataObserverConfig) CCIPOCROpts { - return func(params *CCIPOCRParams) { - if params.ExecuteOffChainConfig == nil { - params.ExecuteOffChainConfig = &pluginconfig.ExecuteOffchainConfig{ - BatchGasLimit: globals.BatchGasLimit, - InflightCacheExpiry: *config.MustNewDuration(globals.InflightCacheExpiry), - RootSnoozeTime: *config.MustNewDuration(globals.RootSnoozeTime), - MessageVisibilityInterval: *config.MustNewDuration(globals.PermissionLessExecutionThreshold), - BatchingStrategyID: globals.BatchingStrategyID, - TokenDataObservers: tokenDataObservers, - } - } else if tokenDataObservers != nil { - params.ExecuteOffChainConfig.TokenDataObservers = append(params.ExecuteOffChainConfig.TokenDataObservers, tokenDataObservers...) - } - } -} - -// DeriveCCIPOCRParams derives the default OCR parameters for a chain, with the option to override them. -func DeriveCCIPOCRParams( - opts ...CCIPOCROpts, -) CCIPOCRParams { - params := CCIPOCRParams{ - OCRParameters: commontypes.OCRParameters{ - DeltaProgress: globals.DeltaProgress, - DeltaResend: globals.DeltaResend, - DeltaInitial: globals.DeltaInitial, - DeltaRound: globals.DeltaRound, - DeltaGrace: globals.DeltaGrace, - DeltaCertifiedCommitRequest: globals.DeltaCertifiedCommitRequest, - DeltaStage: globals.DeltaStage, - Rmax: globals.Rmax, - MaxDurationQuery: globals.MaxDurationQuery, - MaxDurationObservation: globals.MaxDurationObservation, - MaxDurationShouldAcceptAttestedReport: globals.MaxDurationShouldAcceptAttestedReport, - MaxDurationShouldTransmitAcceptedReport: globals.MaxDurationShouldTransmitAcceptedReport, - }, - } - for _, opt := range opts { - opt(¶ms) - } - return params -} - type PromoteCandidatePluginInfo struct { // RemoteChainSelectors is the chain selector of the DONs that we want to promote the candidate config of. // Note that each (chain, ccip capability version) pair has a unique DON ID. diff --git a/deployment/ccip/changeset/v1_6/cs_ccip_home_test.go b/deployment/ccip/changeset/v1_6/cs_ccip_home_test.go index 254c69e37c1..33a01dd8990 100644 --- a/deployment/ccip/changeset/v1_6/cs_ccip_home_test.go +++ b/deployment/ccip/changeset/v1_6/cs_ccip_home_test.go @@ -71,10 +71,8 @@ func TestInvalidOCR3Params(t *testing.T) { require.NoError(t, err) nodes, err := deployment.NodeInfo(e.Env.NodeIDs, e.Env.Offchain) require.NoError(t, err) - params := v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultCommitOffChainConfig(e.FeedChainSel, nil), - v1_6.WithDefaultExecuteOffChainConfig(nil), - ) + params := v1_6.DeriveOCRParamsForCommit(v1_6.SimulationTest, e.FeedChainSel, nil, nil) + // tweak params to have invalid config // make DeltaRound greater than DeltaProgress params.OCRParameters.DeltaRound = params.OCRParameters.DeltaProgress + time.Duration(1) @@ -87,7 +85,7 @@ func TestInvalidOCR3Params(t *testing.T) { state.Chains[e.HomeChainSel].RMNHome.Address(), params.OCRParameters, params.CommitOffChainConfig, - params.ExecuteOffChainConfig, + &globals.DefaultExecuteOffChainCfg, ) require.Errorf(t, err, "expected error") pattern := `DeltaRound \(\d+\.\d+s\) must be less than DeltaProgress \(\d+s\)` @@ -270,31 +268,24 @@ func Test_SetCandidate(t *testing.T) { PluginInfo: []v1_6.SetCandidatePluginInfo{ { OCRConfigPerRemoteChainSelector: map[uint64]v1_6.CCIPOCRParams{ - dest: v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultCommitOffChainConfig( - tenv.FeedChainSel, - tokenConfig.GetTokenInfo(logger.TestLogger(t), - state.Chains[dest].LinkToken.Address(), - state.Chains[dest].Weth9.Address())), - ), + dest: v1_6.DeriveOCRParamsForCommit(v1_6.SimulationTest, tenv.FeedChainSel, tokenConfig.GetTokenInfo(logger.TestLogger(t), + state.Chains[dest].LinkToken.Address(), + state.Chains[dest].Weth9.Address()), nil), }, PluginType: types.PluginTypeCCIPCommit, }, { OCRConfigPerRemoteChainSelector: map[uint64]v1_6.CCIPOCRParams{ - dest: v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultExecuteOffChainConfig(nil), - // change the default config to make MessageVisibilityInterval != PermissionLessExecutionThresholdSeconds - v1_6.WithOCRParamOverride(func(params *v1_6.CCIPOCRParams) { - dCfg, err := state.Chains[dest].OffRamp.GetDynamicConfig(&bind.CallOpts{ - Context: ctx, - }) - require.NoError(t, err) - params.ExecuteOffChainConfig.MessageVisibilityInterval = - *config.MustNewDuration( - time.Duration(dCfg.PermissionLessExecutionThresholdSeconds + uint32(time.Second))) - }), - ), + dest: v1_6.DeriveOCRParamsForExec(v1_6.SimulationTest, nil, func(params v1_6.CCIPOCRParams) v1_6.CCIPOCRParams { + dCfg, err := state.Chains[dest].OffRamp.GetDynamicConfig(&bind.CallOpts{ + Context: ctx, + }) + require.NoError(t, err) + params.ExecuteOffChainConfig.MessageVisibilityInterval = + *config.MustNewDuration( + time.Duration(dCfg.PermissionLessExecutionThresholdSeconds + uint32(time.Second))) + return params + }), }, PluginType: types.PluginTypeCCIPExec, }, @@ -324,21 +315,15 @@ func Test_SetCandidate(t *testing.T) { PluginInfo: []v1_6.SetCandidatePluginInfo{ { OCRConfigPerRemoteChainSelector: map[uint64]v1_6.CCIPOCRParams{ - dest: v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultCommitOffChainConfig( - tenv.FeedChainSel, - tokenConfig.GetTokenInfo(logger.TestLogger(t), - state.Chains[dest].LinkToken.Address(), - state.Chains[dest].Weth9.Address())), - ), + dest: v1_6.DeriveOCRParamsForCommit(v1_6.SimulationTest, tenv.FeedChainSel, tokenConfig.GetTokenInfo(logger.TestLogger(t), + state.Chains[dest].LinkToken.Address(), + state.Chains[dest].Weth9.Address()), nil), }, PluginType: types.PluginTypeCCIPCommit, }, { OCRConfigPerRemoteChainSelector: map[uint64]v1_6.CCIPOCRParams{ - dest: v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultExecuteOffChainConfig(nil), - ), + dest: v1_6.DeriveOCRParamsForExec(v1_6.SimulationTest, nil, nil), }, PluginType: types.PluginTypeCCIPExec, }, @@ -441,21 +426,15 @@ func Test_RevokeCandidate(t *testing.T) { PluginInfo: []v1_6.SetCandidatePluginInfo{ { OCRConfigPerRemoteChainSelector: map[uint64]v1_6.CCIPOCRParams{ - dest: v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultCommitOffChainConfig( - tenv.FeedChainSel, - tokenConfig.GetTokenInfo(logger.TestLogger(t), - state.Chains[dest].LinkToken.Address(), - state.Chains[dest].Weth9.Address())), - ), + dest: v1_6.DeriveOCRParamsForCommit(v1_6.SimulationTest, tenv.FeedChainSel, tokenConfig.GetTokenInfo(logger.TestLogger(t), + state.Chains[dest].LinkToken.Address(), + state.Chains[dest].Weth9.Address()), nil), }, PluginType: types.PluginTypeCCIPCommit, }, { OCRConfigPerRemoteChainSelector: map[uint64]v1_6.CCIPOCRParams{ - dest: v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultExecuteOffChainConfig(nil), - ), + dest: v1_6.DeriveOCRParamsForExec(v1_6.SimulationTest, nil, nil), }, PluginType: types.PluginTypeCCIPExec, }, diff --git a/deployment/ccip/changeset/v1_6/cs_chain_contracts_test.go b/deployment/ccip/changeset/v1_6/cs_chain_contracts_test.go index b8430355012..a067bb45250 100644 --- a/deployment/ccip/changeset/v1_6/cs_chain_contracts_test.go +++ b/deployment/ccip/changeset/v1_6/cs_chain_contracts_test.go @@ -859,13 +859,9 @@ func TestSetOCR3ConfigValidations(t *testing.T) { // Build the per chain config. wrongChainConfigs := make(map[uint64]v1_6.ChainConfig) - ocrConfigs := make(map[uint64]v1_6.CCIPOCRParams) + commitOCRConfigs := make(map[uint64]v1_6.CCIPOCRParams) for _, chain := range allChains { - ocrParams := v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultCommitOffChainConfig(e.FeedChainSel, nil), - v1_6.WithDefaultExecuteOffChainConfig(nil), - ) - ocrConfigs[chain] = ocrParams + commitOCRConfigs[chain] = v1_6.DeriveOCRParamsForCommit(v1_6.SimulationTest, e.FeedChainSel, nil, nil) // set wrong chain config with incorrect value of FChain wrongChainConfigs[chain] = v1_6.ChainConfig{ Readers: envNodes.NonBootstraps().PeerIDs(), @@ -898,7 +894,7 @@ func TestSetOCR3ConfigValidations(t *testing.T) { FeedChainSelector: e.FeedChainSel, }, PluginInfo: v1_6.SetCandidatePluginInfo{ - OCRConfigPerRemoteChainSelector: ocrConfigs, + OCRConfigPerRemoteChainSelector: commitOCRConfigs, PluginType: types.PluginTypeCCIPCommit, }, }, diff --git a/deployment/ccip/changeset/v1_6/cs_home_chain_test.go b/deployment/ccip/changeset/v1_6/cs_home_chain_test.go index cbcccb52d86..d63cec2b501 100644 --- a/deployment/ccip/changeset/v1_6/cs_home_chain_test.go +++ b/deployment/ccip/changeset/v1_6/cs_home_chain_test.go @@ -207,10 +207,6 @@ func TestAddDonAfterRemoveDons(t *testing.T) { allChains := e.Env.AllChainSelectors() homeChain := s.Chains[e.HomeChainSel] ocrConfigs := make(map[uint64]v1_6.CCIPOCRParams) - ocrParams := v1_6.DeriveCCIPOCRParams( - v1_6.WithDefaultCommitOffChainConfig(e.FeedChainSel, nil), - v1_6.WithDefaultExecuteOffChainConfig(nil), - ) // Remove a don donsBefore, err := homeChain.CapabilityRegistry.GetDONs(nil) require.NoError(t, err) @@ -254,7 +250,7 @@ func TestAddDonAfterRemoveDons(t *testing.T) { break } } - ocrConfigs[donRemovedForChain] = ocrParams + ocrConfigs[donRemovedForChain] = v1_6.DeriveOCRParamsForCommit(v1_6.SimulationTest, e.FeedChainSel, nil, nil) // try to add the another don e.Env, err = commoncs.Apply(t, e.Env, nil, commoncs.Configure( diff --git a/deployment/environment/crib/ccip_deployer.go b/deployment/environment/crib/ccip_deployer.go index 1d1ef80167d..598b2285a8f 100644 --- a/deployment/environment/crib/ccip_deployer.go +++ b/deployment/environment/crib/ccip_deployer.go @@ -530,11 +530,17 @@ func setupLanes(e *deployment.Environment, state changeset.CCIPOnChainState) (de func mustOCR(e *deployment.Environment, homeChainSel uint64, feedChainSel uint64, newDons bool) (deployment.Environment, error) { chainSelectors := e.AllChainSelectors() - var ocrConfigPerSelector = make(map[uint64]v1_6.CCIPOCRParams) + var commitOCRConfigPerSelector = make(map[uint64]v1_6.CCIPOCRParams) + var execOCRConfigPerSelector = make(map[uint64]v1_6.CCIPOCRParams) + // Should be configured in the future based on the load test scenario + // chainType := v1_6.Default + + // TODO Passing SimulationTest to reduce number of changes in the CRIB (load test setup) + // @Austin please flip it back to Default once we reach a stable state + chainType := v1_6.SimulationTest for selector := range e.Chains { - ocrConfigPerSelector[selector] = v1_6.DeriveCCIPOCRParams(v1_6.WithDefaultCommitOffChainConfig(feedChainSel, nil), - v1_6.WithDefaultExecuteOffChainConfig(nil), - ) + commitOCRConfigPerSelector[selector] = v1_6.DeriveOCRParamsForCommit(chainType, feedChainSel, nil, nil) + execOCRConfigPerSelector[selector] = v1_6.DeriveOCRParamsForExec(chainType, nil, nil) } var commitChangeset commonchangeset.ConfiguredChangeSet @@ -548,7 +554,7 @@ func mustOCR(e *deployment.Environment, homeChainSel uint64, feedChainSel uint64 FeedChainSelector: feedChainSel, }, PluginInfo: v1_6.SetCandidatePluginInfo{ - OCRConfigPerRemoteChainSelector: ocrConfigPerSelector, + OCRConfigPerRemoteChainSelector: commitOCRConfigPerSelector, PluginType: types.PluginTypeCCIPCommit, }, }, @@ -564,7 +570,7 @@ func mustOCR(e *deployment.Environment, homeChainSel uint64, feedChainSel uint64 }, PluginInfo: []v1_6.SetCandidatePluginInfo{ { - OCRConfigPerRemoteChainSelector: ocrConfigPerSelector, + OCRConfigPerRemoteChainSelector: commitOCRConfigPerSelector, PluginType: types.PluginTypeCCIPCommit, }, }, @@ -584,7 +590,7 @@ func mustOCR(e *deployment.Environment, homeChainSel uint64, feedChainSel uint64 }, PluginInfo: []v1_6.SetCandidatePluginInfo{ { - OCRConfigPerRemoteChainSelector: ocrConfigPerSelector, + OCRConfigPerRemoteChainSelector: execOCRConfigPerSelector, PluginType: types.PluginTypeCCIPExec, }, }, diff --git a/deployment/go.mod b/deployment/go.mod index 16510ba7b37..bbaf6492c22 100644 --- a/deployment/go.mod +++ b/deployment/go.mod @@ -10,6 +10,7 @@ replace github.com/smartcontractkit/chainlink/v2 => ../ require github.com/smartcontractkit/chainlink/v2 v2.0.0-20250221182743-098d1b0a763a require ( + dario.cat/mergo v1.0.1 github.com/Khan/genqlient v0.7.0 github.com/Masterminds/semver/v3 v3.3.1 github.com/aptos-labs/aptos-go-sdk v1.5.0 @@ -56,7 +57,6 @@ require ( require ( cosmossdk.io/errors v1.0.1 // indirect cosmossdk.io/math v1.3.0 // indirect - dario.cat/mergo v1.0.1 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.1 // indirect diff --git a/integration-tests/smoke/ccip/ccip_gas_price_updates_test.go b/integration-tests/smoke/ccip/ccip_gas_price_updates_test.go index 96e99ec4929..5f49c15e4a4 100644 --- a/integration-tests/smoke/ccip/ccip_gas_price_updates_test.go +++ b/integration-tests/smoke/ccip/ccip_gas_price_updates_test.go @@ -29,8 +29,11 @@ func Test_CCIPGasPriceUpdates(t *testing.T) { var gasPriceExpiry = 5 * time.Second e, _, _ := testsetups.NewIntegrationEnvironment(t, - testhelpers.WithOCRConfigOverride(func(params *v1_6.CCIPOCRParams) { - params.CommitOffChainConfig.RemoteGasPriceBatchWriteFrequency = *config.MustNewDuration(gasPriceExpiry) + testhelpers.WithOCRConfigOverride(func(params v1_6.CCIPOCRParams) v1_6.CCIPOCRParams { + if params.CommitOffChainConfig != nil { + params.CommitOffChainConfig.RemoteGasPriceBatchWriteFrequency = *config.MustNewDuration(gasPriceExpiry) + } + return params }), ) state, err := changeset.LoadOnchainState(e.Env) diff --git a/integration-tests/smoke/ccip/ccip_token_price_updates_test.go b/integration-tests/smoke/ccip/ccip_token_price_updates_test.go index 12ada106764..f299b36b35c 100644 --- a/integration-tests/smoke/ccip/ccip_token_price_updates_test.go +++ b/integration-tests/smoke/ccip/ccip_token_price_updates_test.go @@ -31,8 +31,11 @@ func Test_CCIPTokenPriceUpdates(t *testing.T) { var tokenPriceExpiry = 5 * time.Second e, _, _ := testsetups.NewIntegrationEnvironment(t, - testhelpers.WithOCRConfigOverride(func(params *v1_6.CCIPOCRParams) { - params.CommitOffChainConfig.TokenPriceBatchWriteFrequency = *config.MustNewDuration(tokenPriceExpiry) + testhelpers.WithOCRConfigOverride(func(params v1_6.CCIPOCRParams) v1_6.CCIPOCRParams { + if params.CommitOffChainConfig != nil { + params.CommitOffChainConfig.TokenPriceBatchWriteFrequency = *config.MustNewDuration(tokenPriceExpiry) + } + return params })) state, err := changeset.LoadOnchainState(e.Env) require.NoError(t, err) From 7c5efbe6b2aa19367e1c32053546fd494bb96839 Mon Sep 17 00:00:00 2001 From: Balamurali Gopalswami <167726375+b-gopalswami@users.noreply.github.com> Date: Wed, 26 Feb 2025 18:07:47 -0500 Subject: [PATCH 07/17] Adding Test Router view to state (#16590) lint fix review comments remove test router --- deployment/ccip/changeset/state.go | 9 ++++++++- deployment/ccip/view/v1_2/router.go | 4 +++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/deployment/ccip/changeset/state.go b/deployment/ccip/changeset/state.go index 81981d81b07..d66f2f76996 100644 --- a/deployment/ccip/changeset/state.go +++ b/deployment/ccip/changeset/state.go @@ -236,12 +236,19 @@ func (c CCIPChainState) LinkTokenAddress() (common.Address, error) { func (c CCIPChainState) GenerateView() (view.ChainView, error) { chainView := view.NewChain() if c.Router != nil { - routerView, err := v1_2.GenerateRouterView(c.Router) + routerView, err := v1_2.GenerateRouterView(c.Router, false) if err != nil { return chainView, errors.Wrapf(err, "failed to generate router view for router %s", c.Router.Address().String()) } chainView.Router[c.Router.Address().Hex()] = routerView } + if c.TestRouter != nil { + testRouterView, err := v1_2.GenerateRouterView(c.TestRouter, true) + if err != nil { + return chainView, errors.Wrapf(err, "failed to generate router view for test router %s", c.TestRouter.Address().String()) + } + chainView.Router[c.TestRouter.Address().Hex()] = testRouterView + } if c.TokenAdminRegistry != nil { taView, err := viewv1_5.GenerateTokenAdminRegistryView(c.TokenAdminRegistry) if err != nil { diff --git a/deployment/ccip/view/v1_2/router.go b/deployment/ccip/view/v1_2/router.go index 53bcc1b6e1c..a0d3c63d0ca 100644 --- a/deployment/ccip/view/v1_2/router.go +++ b/deployment/ccip/view/v1_2/router.go @@ -11,13 +11,14 @@ import ( type RouterView struct { types.ContractMetaData + IsTestRouter bool `json:"isTestRouter,omitempty"` WrappedNative common.Address `json:"wrappedNative,omitempty"` ARMProxy common.Address `json:"armProxy,omitempty"` OnRamps map[uint64]common.Address `json:"onRamps,omitempty"` // Map of DestinationChainSelectors to OnRamp Addresses OffRamps map[uint64]common.Address `json:"offRamps,omitempty"` // Map of SourceChainSelectors to a list of OffRamp Addresses } -func GenerateRouterView(r *router.Router) (RouterView, error) { +func GenerateRouterView(r *router.Router, isTestRouter bool) (RouterView, error) { meta, err := types.NewContractMetaData(r, r.Address()) if err != nil { return RouterView{}, fmt.Errorf("view error to get router metadata: %w", err) @@ -48,6 +49,7 @@ func GenerateRouterView(r *router.Router) (RouterView, error) { } return RouterView{ ContractMetaData: meta, + IsTestRouter: isTestRouter, WrappedNative: wrappedNative, ARMProxy: armProxy, OnRamps: onRamps, From b97deaf6f792b1e7cdc98f854bd6f5c8d866f933 Mon Sep 17 00:00:00 2001 From: Anindita Ghosh <88458927+AnieeG@users.noreply.github.com> Date: Wed, 26 Feb 2025 15:50:49 -0800 Subject: [PATCH 08/17] Fix mcms state in CCIP load chain state (#16599) * fix loachain state for new mcms * more validation * fix lint * add comment --- deployment/ccip/changeset/state.go | 11 +++++++++-- deployment/ccip/changeset/state_test.go | 21 +++++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/deployment/ccip/changeset/state.go b/deployment/ccip/changeset/state.go index d66f2f76996..0cddf415b4e 100644 --- a/deployment/ccip/changeset/state.go +++ b/deployment/ccip/changeset/state.go @@ -725,8 +725,7 @@ func LoadChainState(ctx context.Context, chain deployment.Chain, addresses map[s state.ABIByAddress[address] = gethwrappers.CallProxyABI case deployment.NewTypeAndVersion(commontypes.ProposerManyChainMultisig, deployment.Version1_0_0).String(), deployment.NewTypeAndVersion(commontypes.CancellerManyChainMultisig, deployment.Version1_0_0).String(), - deployment.NewTypeAndVersion(commontypes.BypasserManyChainMultisig, deployment.Version1_0_0).String(), - deployment.NewTypeAndVersion(commontypes.ManyChainMultisig, deployment.Version1_0_0).String(): + deployment.NewTypeAndVersion(commontypes.BypasserManyChainMultisig, deployment.Version1_0_0).String(): state.ABIByAddress[address] = gethwrappers.ManyChainMultiSigABI case deployment.NewTypeAndVersion(commontypes.LinkToken, deployment.Version1_0_0).String(): state.ABIByAddress[address] = link_token.LinkTokenABI @@ -1050,6 +1049,14 @@ func LoadChainState(ctx context.Context, chain deployment.Chain, addresses map[s state.MockRMN = mockRMN state.ABIByAddress[address] = mock_rmn_contract.MockRMNContractABI default: + // ManyChainMultiSig 1.0.0 can have any of these labels, it can have either 1,2 or 3 of these - + // bypasser, proposer and canceller + // if you try to compare tvStr.String() you will have to compare all combinations of labels + // so we will compare the type and version only + if tvStr.Type == commontypes.ManyChainMultisig && tvStr.Version == deployment.Version1_0_0 { + state.ABIByAddress[address] = gethwrappers.ManyChainMultiSigABI + continue + } return state, fmt.Errorf("unknown contract %s", tvStr) } } diff --git a/deployment/ccip/changeset/state_test.go b/deployment/ccip/changeset/state_test.go index 0bde8bc48ab..9a4c0728144 100644 --- a/deployment/ccip/changeset/state_test.go +++ b/deployment/ccip/changeset/state_test.go @@ -5,8 +5,12 @@ import ( "github.com/stretchr/testify/require" + "github.com/smartcontractkit/chainlink-integrations/evm/utils" + + "github.com/smartcontractkit/chainlink/deployment" "github.com/smartcontractkit/chainlink/deployment/ccip/changeset" "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/testhelpers" + "github.com/smartcontractkit/chainlink/deployment/common/types" ) func TestSmokeState(t *testing.T) { @@ -17,4 +21,21 @@ func TestSmokeState(t *testing.T) { require.NoError(t, err) } +func TestMCMSState(t *testing.T) { + tenv, _ := testhelpers.NewMemoryEnvironment(t, testhelpers.WithNoJobsAndContracts()) + addressbook := deployment.NewMemoryAddressBook() + newTv := deployment.NewTypeAndVersion(types.ManyChainMultisig, deployment.Version1_0_0) + newTv.AddLabel(types.BypasserRole.String()) + newTv.AddLabel(types.CancellerRole.String()) + newTv.AddLabel(types.ProposerRole.String()) + addr := utils.RandomAddress() + require.NoError(t, addressbook.Save(tenv.HomeChainSel, addr.String(), newTv)) + require.NoError(t, tenv.Env.ExistingAddresses.Merge(addressbook)) + state, err := changeset.LoadOnchainState(tenv.Env) + require.NoError(t, err) + require.Equal(t, addr.String(), state.Chains[tenv.HomeChainSel].BypasserMcm.Address().String()) + require.Equal(t, addr.String(), state.Chains[tenv.HomeChainSel].ProposerMcm.Address().String()) + require.Equal(t, addr.String(), state.Chains[tenv.HomeChainSel].CancellerMcm.Address().String()) +} + // TODO: add solana state test From 1efe8076e02f6d531bfcc91b0f5668a151079096 Mon Sep 17 00:00:00 2001 From: Vyzaldy Sanchez Date: Wed, 26 Feb 2025 21:28:03 -0400 Subject: [PATCH 09/17] feat(CRE-86): Generate `ForwarderView` for `KeystoneView` (#16488) * Sends extra config + chainlink-common bump * Fixes CI * Bumps chainlink-common * Fixes CI * Fixes CI * Fixes CI * gomods tidy * Renames import * Renames Accept/Transmit fields into ShouldAccept/ShouldTransmit * bumps `chainlink-common` * bumps deps * bumps deps * bumps deps * bumps deps * bumps deps * bumps deps * bumps deps * Fixes test * gomods tidy * Fixes test * Generates OCR3 config view (#16395) * Generates OCR3 config view * WIP - parses the OCR3 config event into readable values * CRE-226: mv ContractSet; cleanup used compatibility * implement continue-on-error view * Loads readable `OracleConfig` into view * Fixes lint * Fixes view implementation * Adds test validating the `OracleConfig` view generation * Tests cleanup * Lint cleanup --------- Co-authored-by: krehermann <16602512+krehermann@users.noreply.github.com> * Fixes merge + gomods tidy * gomods tidy * Fixes test * Renames view field * Renames view field * Hex encodes view signers field values * Fixes merge conflicts issues * gomods tidy * Fixes merge conflicts issues * Fixes lint * Prevents break on 0 `RequestTimeout` * Prevents break on 0 `RequestTimeout` * gomods tidy * git merge conflicts * fixes lint * Generates `ForwarderView` for `KeystoneView` * Fixes lint * Refactors `ForwarderView` generation * Fixes lint * gomods tidy * gomods tidy * gomods tidy * Fixes merge conflicts issues * Fixes merge conflicts issues * bumps chainlink-common + fixes(?) * gomods tidy * gomods tidy * remove unused file * fixes conflicts * Updates comment * gomods tidy * gomods tidy * Renames property correctly * Test * Test * Properly comments on the implementation of `GenerateForwarderView()` * Fixes lint * Fixes test * Passes down parent context * Generates as a slice * Adds `txHash` and `blockNumber` to the view --------- Co-authored-by: krehermann <16602512+krehermann@users.noreply.github.com> --- deployment/keystone/changeset/state.go | 16 ++++- deployment/keystone/changeset/view.go | 2 +- .../keystone/changeset/view_contracts.go | 71 ++++++++++++++++++- deployment/keystone/changeset/view_test.go | 24 ++++++- 4 files changed, 103 insertions(+), 10 deletions(-) diff --git a/deployment/keystone/changeset/state.go b/deployment/keystone/changeset/state.go index be7f63b7410..3cad434525d 100644 --- a/deployment/keystone/changeset/state.go +++ b/deployment/keystone/changeset/state.go @@ -1,6 +1,7 @@ package changeset import ( + "context" "errors" "fmt" @@ -68,8 +69,8 @@ func (cs ContractSet) TransferableContracts() []common.Address { } // View is a view of the keystone chain -// It is best effort and logs errors -func (cs ContractSet) View(lggr logger.Logger) (KeystoneChainView, error) { +// It is best-effort and logs errors +func (cs ContractSet) View(ctx context.Context, lggr logger.Logger) (KeystoneChainView, error) { out := NewKeystoneChainView() var allErrs error if cs.CapabilitiesRegistry != nil { @@ -85,7 +86,7 @@ func (cs ContractSet) View(lggr logger.Logger) (KeystoneChainView, error) { for addr, ocr3Cap := range cs.OCR3 { oc := *ocr3Cap addrCopy := addr - ocrView, err := GenerateOCR3ConfigView(oc) + ocrView, err := GenerateOCR3ConfigView(ctx, oc) if err != nil { allErrs = errors.Join(allErrs, err) // don't block view on single OCR3 not being configured @@ -109,6 +110,15 @@ func (cs ContractSet) View(lggr logger.Logger) (KeystoneChainView, error) { out.WorkflowRegistry[cs.WorkflowRegistry.Address().String()] = wrView } + if cs.Forwarder != nil { + fwrView, fwrErr := GenerateForwarderView(ctx, cs.Forwarder) + if fwrErr != nil { + allErrs = errors.Join(allErrs, fwrErr) + lggr.Errorf("failed to generate forwarder view: %v", fwrErr) + } + out.Forwarders[cs.Forwarder.Address().String()] = fwrView + } + return out, allErrs } diff --git a/deployment/keystone/changeset/view.go b/deployment/keystone/changeset/view.go index 0a5667781ed..40d2ad961f0 100644 --- a/deployment/keystone/changeset/view.go +++ b/deployment/keystone/changeset/view.go @@ -40,7 +40,7 @@ func ViewKeystone(e deployment.Environment) (json.Marshaler, error) { viewErrs = errors.Join(viewErrs, err2) continue } - v, err := contracts.View(e.Logger) + v, err := contracts.View(e.GetContext(), e.Logger) if err != nil { err2 := fmt.Errorf("failed to view chain %s: %w", chainName, err) lggr.Error(err2) diff --git a/deployment/keystone/changeset/view_contracts.go b/deployment/keystone/changeset/view_contracts.go index a5868f4d74e..952f4eed851 100644 --- a/deployment/keystone/changeset/view_contracts.go +++ b/deployment/keystone/changeset/view_contracts.go @@ -1,9 +1,11 @@ package changeset import ( + "context" "encoding/hex" "encoding/json" "errors" + "fmt" "math" "time" @@ -15,6 +17,7 @@ import ( capocr3types "github.com/smartcontractkit/chainlink-common/pkg/capabilities/consensus/ocr3/types" + forwarder "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/forwarder_1_0_0" ocr3_capability "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/ocr3_capability_1_0_0" "github.com/smartcontractkit/chainlink/deployment/common/view" @@ -26,6 +29,7 @@ type KeystoneChainView struct { // OCRContracts is a map of OCR3 contract addresses to their configuration view OCRContracts map[string]OCR3ConfigView `json:"ocrContracts,omitempty"` WorkflowRegistry map[string]common_v1_0.WorkflowRegistryView `json:"workflowRegistry,omitempty"` + Forwarders map[string][]ForwarderView `json:"forwarders,omitempty"` } type OCR3ConfigView struct { @@ -37,9 +41,21 @@ type OCR3ConfigView struct { OffchainConfig OracleConfig `json:"offchainConfig"` } -var ErrOCR3NotConfigured = errors.New("OCR3 not configured") +type ForwarderView struct { + DonID uint32 `json:"donId"` + ConfigVersion uint32 `json:"configVersion"` + F uint8 `json:"f"` + Signers []string `json:"signers"` + TxHash string `json:"txHash,omitempty"` + BlockNumber uint64 `json:"blockNumber,omitempty"` +} + +var ( + ErrOCR3NotConfigured = errors.New("OCR3 not configured") + ErrForwarderNotConfigured = errors.New("forwarder not configured") +) -func GenerateOCR3ConfigView(ocr3Cap ocr3_capability.OCR3Capability) (OCR3ConfigView, error) { +func GenerateOCR3ConfigView(ctx context.Context, ocr3Cap ocr3_capability.OCR3Capability) (OCR3ConfigView, error) { details, err := ocr3Cap.LatestConfigDetails(nil) if err != nil { return OCR3ConfigView{}, err @@ -49,7 +65,7 @@ func GenerateOCR3ConfigView(ocr3Cap ocr3_capability.OCR3Capability) (OCR3ConfigV configIterator, err := ocr3Cap.FilterConfigSet(&bind.FilterOpts{ Start: blockNumber, End: &blockNumber, - Context: nil, + Context: ctx, }) if err != nil { return OCR3ConfigView{}, err @@ -135,6 +151,54 @@ func GenerateOCR3ConfigView(ocr3Cap ocr3_capability.OCR3Capability) (OCR3ConfigV }, nil } +func GenerateForwarderView(ctx context.Context, f *forwarder.KeystoneForwarder) ([]ForwarderView, error) { + // This could be effectively done with 2 other approaches: + // 1. Fetching the transaction receipt of the contract deployment, getting the deployment block number, + // and extracting the config from the logs, but we don't have access to the transaction hash needed for this. + // 2. Using `CodeAt()` to find the block number in which the contract was created, and use that. + // We would have to go from block number 0 to find it, which in the end is similar what's done here. + configIterator, err := f.FilterConfigSet(&bind.FilterOpts{ + Start: 0, + End: nil, + Context: ctx, + }, nil, nil) + if err != nil { + return nil, fmt.Errorf("error filtering ConfigSet events: %w", err) + } + + configSets := make([]*forwarder.KeystoneForwarderConfigSet, 0) + for configIterator.Next() { + // We wait for the iterator to receive an event + if configIterator.Event == nil { + // We cannot return an error, since we are capturing all `SetConfig` events, so if there's a nil event, + // we ignore it. + continue + } + configSets = append(configSets, configIterator.Event) + } + if len(configSets) == 0 { + return nil, ErrForwarderNotConfigured + } + + var forwarderViews []ForwarderView + for _, configSet := range configSets { + var readableSigners []string + for _, s := range configSet.Signers { + readableSigners = append(readableSigners, s.String()) + } + forwarderViews = append(forwarderViews, ForwarderView{ + DonID: configSet.DonId, + ConfigVersion: configSet.ConfigVersion, + F: configSet.F, + Signers: readableSigners, + TxHash: configSet.Raw.TxHash.String(), + BlockNumber: configSet.Raw.BlockNumber, + }) + } + + return forwarderViews, nil +} + func millisecondsToUint32(dur time.Duration) uint32 { ms := dur.Milliseconds() if ms > int64(math.MaxUint32) { @@ -149,6 +213,7 @@ func NewKeystoneChainView() KeystoneChainView { CapabilityRegistry: make(map[string]common_v1_0.CapabilityRegistryView), OCRContracts: make(map[string]OCR3ConfigView), WorkflowRegistry: make(map[string]common_v1_0.WorkflowRegistryView), + Forwarders: make(map[string][]ForwarderView), } } diff --git a/deployment/keystone/changeset/view_test.go b/deployment/keystone/changeset/view_test.go index aa21ba4619b..3917924f295 100644 --- a/deployment/keystone/changeset/view_test.go +++ b/deployment/keystone/changeset/view_test.go @@ -3,6 +3,7 @@ package changeset_test import ( "bytes" "encoding/json" + "fmt" "testing" "time" @@ -56,12 +57,20 @@ func TestKeystoneView(t *testing.T) { addrs, err := env.Env.ExistingAddresses.AddressesForChain(registryChain) require.NoError(t, err) - var newOCR3Addr string + var newOCR3Addr, newForwarderAddr string for addr, tv := range addrs { - if tv.Type == internal.OCR3Capability { - newOCR3Addr = addr + if newForwarderAddr != "" && newOCR3Addr != "" { break } + switch tv.Type { + case internal.KeystoneForwarder: + newForwarderAddr = addr + continue + case internal.OCR3Capability: + newOCR3Addr = addr + default: + continue + } } t.Run("successfully generates a view of the keystone state", func(t *testing.T) { @@ -98,6 +107,15 @@ func TestKeystoneView(t *testing.T) { viewOCR3Config, ok := viewChain.OCRContracts[newOCR3Addr] require.True(t, ok) require.Equal(t, oracleConfig, viewOCR3Config.OffchainConfig) + viewForwarders, ok := viewChain.Forwarders[newForwarderAddr] + require.True(t, ok) + require.Len(t, viewForwarders, 1) + require.Equal(t, uint32(1), viewForwarders[0].DonID) + require.Equal(t, uint8(1), viewForwarders[0].F) + require.Equal(t, uint32(1), viewForwarders[0].ConfigVersion) + require.Len(t, viewForwarders[0].Signers, 4) + + fmt.Printf("%+v\n", outView.Chains[chainName].Forwarders) }) t.Run("fails to generate a view of the keystone state with OCR3 not configured", func(t *testing.T) { From b923007175608a10317f5672db6a286a9b873ffb Mon Sep 17 00:00:00 2001 From: krehermann <16602512+krehermann@users.noreply.github.com> Date: Wed, 26 Feb 2025 19:59:13 -0700 Subject: [PATCH 10/17] feat(CRE-329): high fidelity offchain job service client (#16527) * feat(deployment): high fidelty offchain client * fix tests * cleanup --- core/services/feeds/service.go | 2 +- core/services/feeds/service_test.go | 2 +- deployment/ccip/changeset/v1_5/cs_jobspec.go | 2 +- .../changeset/jd_register_nodes_test.go | 14 +- deployment/environment/memory/job_client.go | 333 ----------- .../environment/memory/job_service_client.go | 551 ++++++++++++++++++ .../memory/job_service_client_test.go | 341 +++++++++++ deployment/environment/memory/node.go | 64 ++ .../environment/memory/node_service_client.go | 120 ++++ .../environment/memory/offchain_client.go | 96 +++ deployment/go.mod | 2 +- 11 files changed, 1185 insertions(+), 342 deletions(-) delete mode 100644 deployment/environment/memory/job_client.go create mode 100644 deployment/environment/memory/job_service_client.go create mode 100644 deployment/environment/memory/job_service_client_test.go create mode 100644 deployment/environment/memory/node_service_client.go create mode 100644 deployment/environment/memory/offchain_client.go diff --git a/core/services/feeds/service.go b/core/services/feeds/service.go index 06c964130cf..b789749a384 100644 --- a/core/services/feeds/service.go +++ b/core/services/feeds/service.go @@ -705,7 +705,7 @@ func (s *service) ProposeJob(ctx context.Context, args *ProposeJobArgs) (int64, if exists { // note: CLO auto-increments the version number on re-proposal, so this should never happen - return 0, errors.New("proposed job spec version already exists") + return 0, fmt.Errorf("external job id %s: version conflict: version %d already exists at job proposal id %d %v", args.RemoteUUID, args.Version, existing.ID, *existing) } } diff --git a/core/services/feeds/service_test.go b/core/services/feeds/service_test.go index 1cf14b00ef5..bc95a1d061e 100644 --- a/core/services/feeds/service_test.go +++ b/core/services/feeds/service_test.go @@ -1285,7 +1285,7 @@ func Test_Service_ProposeJob(t *testing.T) { svc.orm.On("ExistsSpecByJobProposalIDAndVersion", mock.Anything, jpFluxMonitor.ID, argsFluxMonitor.Version).Return(true, nil) }, args: argsFluxMonitor, - wantErr: "proposed job spec version already exists", + wantErr: "version conflict", }, { name: "upsert error", diff --git a/deployment/ccip/changeset/v1_5/cs_jobspec.go b/deployment/ccip/changeset/v1_5/cs_jobspec.go index 882d78c6ff2..de6182c2742 100644 --- a/deployment/ccip/changeset/v1_5/cs_jobspec.go +++ b/deployment/ccip/changeset/v1_5/cs_jobspec.go @@ -90,7 +90,7 @@ func JobSpecsForLanesChangeset(env deployment.Environment, c JobSpecsForLanesCon // JOBID will be empty if the proposal failed. return deployment.ChangesetOutput{ Jobs: Jobs, - }, fmt.Errorf("failed to propose job: %w", err) + }, fmt.Errorf("failed to propose job %s: %w", job, err) } Jobs[len(Jobs)-1].JobID = res.Proposal.JobId } diff --git a/deployment/data-streams/changeset/jd_register_nodes_test.go b/deployment/data-streams/changeset/jd_register_nodes_test.go index c0b539a1f15..731d013a6b6 100644 --- a/deployment/data-streams/changeset/jd_register_nodes_test.go +++ b/deployment/data-streams/changeset/jd_register_nodes_test.go @@ -6,6 +6,8 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" + "github.com/smartcontractkit/chainlink-integrations/evm/testutils" + nodev1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" "github.com/smartcontractkit/chainlink/deployment" "github.com/smartcontractkit/chainlink/deployment/common/changeset" "github.com/smartcontractkit/chainlink/deployment/environment/memory" @@ -14,19 +16,21 @@ import ( func TestRegisterNodesWithJD(t *testing.T) { t.Parallel() + ctx := testutils.Context(t) lggr := logger.TestLogger(t) e := memory.NewMemoryEnvironment(t, lggr, zapcore.InfoLevel, memory.MemoryEnvironmentConfig{Chains: 1, Nodes: 1}) - nodeP2pKey := e.NodeIDs[0] - jobClient, ok := e.Offchain.(*memory.JobClient) require.True(t, ok, "expected Offchain to be of type *memory.JobClient") - require.Lenf(t, jobClient.Nodes, 1, "expected exactly 1 node") + + resp, err := jobClient.ListNodes(ctx, &nodev1.ListNodesRequest{}) + require.NoError(t, err) + require.Lenf(t, resp.Nodes, 1, "expected exactly 1 node") require.Emptyf(t, jobClient.RegisteredNodes, "no registered nodes expected") - csaKey := jobClient.Nodes[nodeP2pKey].Keys.CSA.PublicKeyString() + csaKey := resp.Nodes[0].GetPublicKey() - e, err := changeset.Apply(t, e, nil, + e, err = changeset.Apply(t, e, nil, changeset.Configure( deployment.CreateLegacyChangeSet(RegisterNodesWithJD), RegisterNodesInput{ diff --git a/deployment/environment/memory/job_client.go b/deployment/environment/memory/job_client.go deleted file mode 100644 index 2b8adec6a14..00000000000 --- a/deployment/environment/memory/job_client.go +++ /dev/null @@ -1,333 +0,0 @@ -package memory - -import ( - "context" - "errors" - "fmt" - "slices" - "strings" - - "github.com/pelletier/go-toml/v2" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/timestamppb" - - csav1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/csa" - jobv1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/job" - nodev1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" - "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/shared/ptypes" - - "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/validate" - ocr2validate "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate" - "github.com/smartcontractkit/chainlink/v2/core/services/ocrbootstrap" -) - -type JobClient struct { - Nodes map[string]Node - RegisteredNodes map[string]Node -} - -func (j JobClient) BatchProposeJob(ctx context.Context, in *jobv1.BatchProposeJobRequest, opts ...grpc.CallOption) (*jobv1.BatchProposeJobResponse, error) { - // TODO CCIP-3108 implement me - panic("implement me") -} - -func (j JobClient) UpdateJob(ctx context.Context, in *jobv1.UpdateJobRequest, opts ...grpc.CallOption) (*jobv1.UpdateJobResponse, error) { - // TODO CCIP-3108 implement me - panic("implement me") -} - -func (j JobClient) DisableNode(ctx context.Context, in *nodev1.DisableNodeRequest, opts ...grpc.CallOption) (*nodev1.DisableNodeResponse, error) { - // TODO CCIP-3108 implement me - panic("implement me") -} - -func (j JobClient) EnableNode(ctx context.Context, in *nodev1.EnableNodeRequest, opts ...grpc.CallOption) (*nodev1.EnableNodeResponse, error) { - // TODO CCIP-3108 implement me - panic("implement me") -} - -func (j *JobClient) RegisterNode(ctx context.Context, in *nodev1.RegisterNodeRequest, opts ...grpc.CallOption) (*nodev1.RegisterNodeResponse, error) { - if in == nil || in.GetPublicKey() == "" { - return nil, errors.New("public key is required") - } - - if _, exists := j.RegisteredNodes[in.GetPublicKey()]; exists { - return nil, fmt.Errorf("node with Public Key %s is already registered", in.GetPublicKey()) - } - - var foundNode *Node - for _, node := range j.Nodes { - if node.Keys.CSA.ID() == in.GetPublicKey() { - foundNode = &node - break - } - } - - if foundNode == nil { - return nil, fmt.Errorf("node with Public Key %s is not known", in.GetPublicKey()) - } - - j.RegisteredNodes[in.GetPublicKey()] = *foundNode - - return &nodev1.RegisterNodeResponse{ - Node: &nodev1.Node{ - Id: in.GetPublicKey(), - PublicKey: in.GetPublicKey(), - IsEnabled: true, - IsConnected: true, - Labels: in.Labels, - }, - }, nil -} - -func (j JobClient) UpdateNode(ctx context.Context, in *nodev1.UpdateNodeRequest, opts ...grpc.CallOption) (*nodev1.UpdateNodeResponse, error) { - // TODO CCIP-3108 implement me - panic("implement me") -} - -func (j JobClient) GetKeypair(ctx context.Context, in *csav1.GetKeypairRequest, opts ...grpc.CallOption) (*csav1.GetKeypairResponse, error) { - // TODO implement me - panic("implement me") -} - -func (j JobClient) ListKeypairs(ctx context.Context, in *csav1.ListKeypairsRequest, opts ...grpc.CallOption) (*csav1.ListKeypairsResponse, error) { - // TODO CCIP-3108 implement me - panic("implement me") -} - -func (j JobClient) GetNode(ctx context.Context, in *nodev1.GetNodeRequest, opts ...grpc.CallOption) (*nodev1.GetNodeResponse, error) { - n, ok := j.Nodes[in.Id] - if !ok { - return nil, errors.New("node not found") - } - return &nodev1.GetNodeResponse{ - Node: &nodev1.Node{ - Id: in.Id, - PublicKey: n.Keys.CSA.PublicKeyString(), - IsEnabled: true, - IsConnected: true, - }, - }, nil -} - -func (j JobClient) ListNodes(ctx context.Context, in *nodev1.ListNodesRequest, opts ...grpc.CallOption) (*nodev1.ListNodesResponse, error) { - var nodes []*nodev1.Node - for id, n := range j.Nodes { - node := &nodev1.Node{ - Id: id, - PublicKey: n.Keys.CSA.ID(), - IsEnabled: true, - IsConnected: true, - Labels: []*ptypes.Label{ - { - Key: "p2p_id", - Value: ptr(n.Keys.PeerID.String()), - }, - }, - } - if ApplyNodeFilter(in.Filter, node) { - nodes = append(nodes, node) - } - } - return &nodev1.ListNodesResponse{ - Nodes: nodes, - }, nil -} - -func (j JobClient) ListNodeChainConfigs(ctx context.Context, in *nodev1.ListNodeChainConfigsRequest, opts ...grpc.CallOption) (*nodev1.ListNodeChainConfigsResponse, error) { - if in.Filter == nil { - return nil, errors.New("filter is required") - } - if len(in.Filter.NodeIds) != 1 { - return nil, errors.New("only one node id is supported") - } - n, ok := j.Nodes[in.Filter.NodeIds[0]] - if !ok { - return nil, fmt.Errorf("node id not found: %s", in.Filter.NodeIds[0]) - } - chainConfigs, err := n.JDChainConfigs() - if err != nil { - return nil, err - } - return &nodev1.ListNodeChainConfigsResponse{ - ChainConfigs: chainConfigs, - }, nil -} - -func (j JobClient) GetJob(ctx context.Context, in *jobv1.GetJobRequest, opts ...grpc.CallOption) (*jobv1.GetJobResponse, error) { - // TODO CCIP-3108 implement me - panic("implement me") -} - -func (j JobClient) GetProposal(ctx context.Context, in *jobv1.GetProposalRequest, opts ...grpc.CallOption) (*jobv1.GetProposalResponse, error) { - // we are using proposal id as job id - // refer to ListJobs and ProposeJobs for the assignment of proposal id - for _, node := range j.Nodes { - jobs, _, err := node.App.JobORM().FindJobs(ctx, 0, 1000) - if err != nil { - return nil, err - } - for _, job := range jobs { - if job.ExternalJobID.String() == in.Id { - specBytes, err := toml.Marshal(job.CCIPSpec) - if err != nil { - return nil, fmt.Errorf("failed to marshal job spec: %w", err) - } - return &jobv1.GetProposalResponse{ - Proposal: &jobv1.Proposal{ - Id: job.ExternalJobID.String(), - Status: jobv1.ProposalStatus_PROPOSAL_STATUS_APPROVED, - Spec: string(specBytes), - JobId: job.ExternalJobID.String(), - }, - }, nil - } - } - } - return nil, fmt.Errorf("job not found: %s", in.Id) -} - -func (j JobClient) ListJobs(ctx context.Context, in *jobv1.ListJobsRequest, opts ...grpc.CallOption) (*jobv1.ListJobsResponse, error) { - jobResponse := make([]*jobv1.Job, 0) - for _, req := range in.Filter.NodeIds { - if _, ok := j.Nodes[req]; !ok { - return nil, fmt.Errorf("node not found: %s", req) - } - n := j.Nodes[req] - jobs, _, err := n.App.JobORM().FindJobs(ctx, 0, 1000) - if err != nil { - return nil, err - } - for _, job := range jobs { - jobResponse = append(jobResponse, &jobv1.Job{ - Id: string(job.ID), - Uuid: job.ExternalJobID.String(), - NodeId: req, - // based on the current implementation, there is only one proposal per job - // see ProposeJobs for ProposalId assignment - ProposalIds: []string{job.ExternalJobID.String()}, - CreatedAt: timestamppb.New(job.CreatedAt), - UpdatedAt: timestamppb.New(job.CreatedAt), - }) - } - } - return &jobv1.ListJobsResponse{ - Jobs: jobResponse, - }, nil -} - -func (j JobClient) ListProposals(ctx context.Context, in *jobv1.ListProposalsRequest, opts ...grpc.CallOption) (*jobv1.ListProposalsResponse, error) { - // TODO CCIP-3108 implement me - panic("implement me") -} - -func (j JobClient) ProposeJob(ctx context.Context, in *jobv1.ProposeJobRequest, opts ...grpc.CallOption) (*jobv1.ProposeJobResponse, error) { - n := j.Nodes[in.NodeId] - // TODO: Use FMS - jb, err := validate.ValidatedCCIPSpec(in.Spec) - if err != nil { - if !strings.Contains(err.Error(), "the only supported type is currently 'ccip'") { - return nil, err - } - // check if it's offchainreporting2 job - jb, err = ocr2validate.ValidatedOracleSpecToml( - ctx, - n.App.GetConfig().OCR2(), - n.App.GetConfig().Insecure(), - in.Spec, - nil, // not required for validation - ) - if err != nil { - if !strings.Contains(err.Error(), "the only supported type is currently 'offchainreporting2'") { - return nil, err - } - // check if it's bootstrap job - jb, err = ocrbootstrap.ValidatedBootstrapSpecToml(in.Spec) - if err != nil { - return nil, fmt.Errorf("failed to validate job spec only ccip, bootstrap and offchainreporting2 are supported: %w", err) - } - } - } - err = n.App.AddJobV2(ctx, &jb) - if err != nil { - return nil, err - } - return &jobv1.ProposeJobResponse{Proposal: &jobv1.Proposal{ - // make the proposal id the same as the job id for further reference - // if you are changing this make sure to change the GetProposal and ListJobs method implementation - Id: jb.ExternalJobID.String(), - // Auto approve for now - Status: jobv1.ProposalStatus_PROPOSAL_STATUS_APPROVED, - DeliveryStatus: jobv1.ProposalDeliveryStatus_PROPOSAL_DELIVERY_STATUS_DELIVERED, - Spec: in.Spec, - JobId: jb.ExternalJobID.String(), - CreatedAt: nil, - UpdatedAt: nil, - AckedAt: nil, - ResponseReceivedAt: nil, - }}, nil -} - -func (j JobClient) RevokeJob(ctx context.Context, in *jobv1.RevokeJobRequest, opts ...grpc.CallOption) (*jobv1.RevokeJobResponse, error) { - // TODO CCIP-3108 implement me - panic("implement me") -} - -func (j JobClient) DeleteJob(ctx context.Context, in *jobv1.DeleteJobRequest, opts ...grpc.CallOption) (*jobv1.DeleteJobResponse, error) { - // TODO CCIP-3108 implement me - panic("implement me") -} - -func (j JobClient) ReplayLogs(selectorToBlock map[uint64]uint64) error { - for _, node := range j.Nodes { - if err := node.ReplayLogs(selectorToBlock); err != nil { - return err - } - } - return nil -} - -func NewMemoryJobClient(nodesByPeerID map[string]Node) *JobClient { - return &JobClient{nodesByPeerID, make(map[string]Node)} -} - -func ApplyNodeFilter(filter *nodev1.ListNodesRequest_Filter, node *nodev1.Node) bool { - if filter == nil { - return true - } - if len(filter.Ids) > 0 { - idx := slices.IndexFunc(filter.Ids, func(id string) bool { - return node.Id == id - }) - if idx < 0 { - return false - } - } - for _, selector := range filter.Selectors { - idx := slices.IndexFunc(node.Labels, func(label *ptypes.Label) bool { - return label.Key == selector.Key - }) - if idx < 0 { - return false - } - label := node.Labels[idx] - - switch selector.Op { - case ptypes.SelectorOp_IN: - values := strings.Split(*selector.Value, ",") - found := slices.Contains(values, *label.Value) - if !found { - return false - } - case ptypes.SelectorOp_EQ: - if *label.Value != *selector.Value { - return false - } - case ptypes.SelectorOp_EXIST: - // do nothing - default: - panic("unimplemented selector") - } - } - return true -} diff --git a/deployment/environment/memory/job_service_client.go b/deployment/environment/memory/job_service_client.go new file mode 100644 index 00000000000..ef2c52b4944 --- /dev/null +++ b/deployment/environment/memory/job_service_client.go @@ -0,0 +1,551 @@ +package memory + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/google/uuid" + "github.com/pelletier/go-toml/v2" + "google.golang.org/grpc" + + jobv1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/job" + "github.com/smartcontractkit/chainlink/v2/core/services/feeds" + "github.com/smartcontractkit/chainlink/v2/core/services/job" +) + +type JobServiceClient struct { + jobStore + proposalStore + nodeStore +} + +func NewJobServiceClient(ns nodeStore) *JobServiceClient { + return &JobServiceClient{ + jobStore: newMapJobStore(), + proposalStore: newMapProposalStore(), + nodeStore: ns, + } +} + +func (j *JobServiceClient) BatchProposeJob(ctx context.Context, in *jobv1.BatchProposeJobRequest, opts ...grpc.CallOption) (*jobv1.BatchProposeJobResponse, error) { + targets := make(map[string]Node) + for _, nodeID := range in.NodeIds { + node, err := j.nodeStore.get(nodeID) + if err != nil { + return nil, fmt.Errorf("node not found: %s", nodeID) + } + targets[nodeID] = *node + } + if len(targets) == 0 { + return nil, errors.New("no nodes found") + } + out := &jobv1.BatchProposeJobResponse{ + SuccessResponses: make(map[string]*jobv1.ProposeJobResponse), + FailedResponses: make(map[string]*jobv1.ProposeJobFailure), + } + var totalErr error + for id := range targets { + singleReq := &jobv1.ProposeJobRequest{ + NodeId: id, + Spec: in.Spec, + Labels: in.Labels, + } + resp, err := j.ProposeJob(ctx, singleReq) + if err != nil { + out.FailedResponses[id] = &jobv1.ProposeJobFailure{ + ErrorMessage: err.Error(), + } + totalErr = errors.Join(totalErr, fmt.Errorf("failed to propose job for node %s: %w", id, err)) + } + out.SuccessResponses[id] = resp + } + return out, totalErr +} + +func (j *JobServiceClient) UpdateJob(ctx context.Context, in *jobv1.UpdateJobRequest, opts ...grpc.CallOption) (*jobv1.UpdateJobResponse, error) { + // TODO CCIP-3108 implement me + panic("implement me") +} + +func (j *JobServiceClient) GetJob(ctx context.Context, in *jobv1.GetJobRequest, opts ...grpc.CallOption) (*jobv1.GetJobResponse, error) { + // implementation detail that job id and uuid is the same + jb, err := j.jobStore.get(in.GetId()) + if err != nil { + return nil, fmt.Errorf("failed to get job: %w", err) + } + // TODO CCIP-3108 implement me + return &jobv1.GetJobResponse{ + Job: jb, + }, nil +} + +func (j *JobServiceClient) GetProposal(ctx context.Context, in *jobv1.GetProposalRequest, opts ...grpc.CallOption) (*jobv1.GetProposalResponse, error) { + p, err := j.proposalStore.get(in.Id) + if err != nil { + return nil, fmt.Errorf("failed to get proposal: %w", err) + } + return &jobv1.GetProposalResponse{ + Proposal: p, + }, nil +} + +func (j *JobServiceClient) ListJobs(ctx context.Context, in *jobv1.ListJobsRequest, opts ...grpc.CallOption) (*jobv1.ListJobsResponse, error) { + jbs, err := j.jobStore.list(in.Filter) + if err != nil { + return nil, fmt.Errorf("failed to list jobs: %w", err) + } + + return &jobv1.ListJobsResponse{ + Jobs: jbs, + }, nil +} + +func (j *JobServiceClient) ListProposals(ctx context.Context, in *jobv1.ListProposalsRequest, opts ...grpc.CallOption) (*jobv1.ListProposalsResponse, error) { + proposals, err := j.proposalStore.list(in.Filter) + if err != nil { + return nil, fmt.Errorf("failed to list proposals: %w", err) + } + return &jobv1.ListProposalsResponse{ + Proposals: proposals, + }, nil +} + +// ProposeJob is used to propose a job to the node +// It auto approves the job +func (j *JobServiceClient) ProposeJob(ctx context.Context, in *jobv1.ProposeJobRequest, opts ...grpc.CallOption) (*jobv1.ProposeJobResponse, error) { + n, err := j.nodeStore.get(in.NodeId) + if err != nil { + return nil, fmt.Errorf("node not found: %w", err) + } + _, err = job.ValidateSpec(in.Spec) + if err != nil { + return nil, fmt.Errorf("failed to validate job spec: %w", err) + } + var extractor ExternalJobIDExtractor + err = toml.Unmarshal([]byte(in.Spec), &extractor) + if err != nil { + return nil, fmt.Errorf("failed to load job spec: %w", err) + } + if extractor.ExternalJobID == "" { + return nil, errors.New("externalJobID is required") + } + + // must auto increment the version to avoid collision on the node side + proposals, err := j.proposalStore.list(&jobv1.ListProposalsRequest_Filter{ + JobIds: []string{extractor.ExternalJobID}, + }) + if err != nil { + return nil, fmt.Errorf("failed to list proposals: %w", err) + } + proposalVersion := int32(len(proposals) + 1) //nolint:gosec // G115 + appProposalID, err := n.App.GetFeedsService().ProposeJob(ctx, &feeds.ProposeJobArgs{ + FeedsManagerID: 1, + Spec: in.Spec, + RemoteUUID: uuid.MustParse(extractor.ExternalJobID), + Version: proposalVersion, + }) + if err != nil { + return nil, fmt.Errorf("failed to propose job: %w", err) + } + fmt.Printf("proposed job uuid %s with id, spec, version: %d\n%s\n%d\n", extractor.ExternalJobID, appProposalID, in.Spec, len(proposals)+1) + // auto approve for now + proposedSpec, err := n.App.GetFeedsService().ListSpecsByJobProposalIDs(ctx, []int64{appProposalID}) + if err != nil { + return nil, fmt.Errorf("failed to list specs: %w", err) + } + // possible to have multiple specs for the same job proposal id; take the last one + if len(proposedSpec) == 0 { + return nil, fmt.Errorf("no specs found for job proposal id: %d", appProposalID) + } + err = n.App.GetFeedsService().ApproveSpec(ctx, proposedSpec[len(proposedSpec)-1].ID, true) + if err != nil { + return nil, fmt.Errorf("failed to approve job: %w", err) + } + + storeProposalID := uuid.Must(uuid.NewRandom()).String() + p := &jobv1.ProposeJobResponse{Proposal: &jobv1.Proposal{ + // make the proposal id the same as the job id for further reference + // if you are changing this make sure to change the GetProposal and ListJobs method implementation + Id: storeProposalID, + Revision: int64(proposalVersion), + // Auto approve for now + Status: jobv1.ProposalStatus_PROPOSAL_STATUS_APPROVED, + DeliveryStatus: jobv1.ProposalDeliveryStatus_PROPOSAL_DELIVERY_STATUS_DELIVERED, + Spec: in.Spec, + JobId: extractor.ExternalJobID, + CreatedAt: nil, + UpdatedAt: nil, + AckedAt: nil, + ResponseReceivedAt: nil, + }} + + // save the proposal and job + { + var ( + storeErr error // used to cleanup if we fail to save the job + job *jobv1.Job + ) + + storeErr = j.proposalStore.put(storeProposalID, p.Proposal) + if err != nil { + return nil, fmt.Errorf("failed to save proposal: %w", err) + } + defer func() { + // cleanup if we fail to save the job + if storeErr != nil { + j.proposalStore.delete(storeProposalID) //nolint:errcheck // ignore error nothing to do + } + }() + + job, storeErr = j.jobStore.get(extractor.ExternalJobID) + if storeErr != nil && !errors.Is(storeErr, errNoExist) { + return nil, fmt.Errorf("failed to get job: %w", storeErr) + } + if errors.Is(storeErr, errNoExist) { + job = &jobv1.Job{ + Id: extractor.ExternalJobID, + Uuid: extractor.ExternalJobID, + NodeId: in.NodeId, + ProposalIds: []string{storeProposalID}, + Labels: in.Labels, + } + } else { + job.ProposalIds = append(job.ProposalIds, storeProposalID) + } + storeErr = j.jobStore.put(extractor.ExternalJobID, job) + if storeErr != nil { + return nil, fmt.Errorf("failed to save job: %w", storeErr) + } + } + return p, nil +} + +func (j *JobServiceClient) RevokeJob(ctx context.Context, in *jobv1.RevokeJobRequest, opts ...grpc.CallOption) (*jobv1.RevokeJobResponse, error) { + // TODO CCIP-3108 implement me + panic("implement me") +} + +func (j *JobServiceClient) DeleteJob(ctx context.Context, in *jobv1.DeleteJobRequest, opts ...grpc.CallOption) (*jobv1.DeleteJobResponse, error) { + // TODO CCIP-3108 implement me + panic("implement me") +} + +type ExternalJobIDExtractor struct { + ExternalJobID string `toml:"externalJobID"` +} + +var errNoExist = errors.New("does not exist") + +// proposalStore is an interface for storing job proposals. +type proposalStore interface { + put(proposalID string, proposal *jobv1.Proposal) error + get(proposalID string) (*jobv1.Proposal, error) + list(filter *jobv1.ListProposalsRequest_Filter) ([]*jobv1.Proposal, error) + delete(proposalID string) error +} + +// jobStore is an interface for storing jobs. +type jobStore interface { + put(jobID string, job *jobv1.Job) error + get(jobID string) (*jobv1.Job, error) + list(filter *jobv1.ListJobsRequest_Filter) ([]*jobv1.Job, error) + delete(jobID string) error +} + +// nodeStore is an interface for storing nodes. +type nodeStore interface { + put(nodeID string, node *Node) error + get(nodeID string) (*Node, error) + list() []*Node + asMap() map[string]*Node + delete(nodeID string) error +} + +var _ jobStore = &mapJobStore{} + +type mapJobStore struct { + mu sync.Mutex + jobs map[string]*jobv1.Job + nodesToJobIDs map[string][]string + uuidToJobIDs map[string][]string +} + +func newMapJobStore() *mapJobStore { + return &mapJobStore{ + jobs: make(map[string]*jobv1.Job), + nodesToJobIDs: make(map[string][]string), + uuidToJobIDs: make(map[string][]string), + } +} + +func (m *mapJobStore) put(jobID string, job *jobv1.Job) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.jobs == nil { + m.jobs = make(map[string]*jobv1.Job) + m.nodesToJobIDs = make(map[string][]string) + m.uuidToJobIDs = make(map[string][]string) + } + m.jobs[jobID] = job + if _, ok := m.nodesToJobIDs[job.NodeId]; !ok { + m.nodesToJobIDs[job.NodeId] = make([]string, 0) + } + m.nodesToJobIDs[job.NodeId] = append(m.nodesToJobIDs[job.NodeId], jobID) + if _, ok := m.uuidToJobIDs[job.Uuid]; !ok { + m.uuidToJobIDs[job.Uuid] = make([]string, 0) + } + m.uuidToJobIDs[job.Uuid] = append(m.uuidToJobIDs[job.Uuid], jobID) + return nil +} + +func (m *mapJobStore) get(jobID string) (*jobv1.Job, error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.jobs == nil { + return nil, fmt.Errorf("%w: job not found: %s", errNoExist, jobID) + } + job, ok := m.jobs[jobID] + if !ok { + return nil, fmt.Errorf("%w: job not found: %s", errNoExist, jobID) + } + return job, nil +} + +func (m *mapJobStore) list(filter *jobv1.ListJobsRequest_Filter) ([]*jobv1.Job, error) { + if filter != nil && filter.NodeIds != nil && filter.Uuids != nil && filter.Ids != nil { + return nil, errors.New("only one of NodeIds, Uuids or Ids can be set") + } + m.mu.Lock() + defer m.mu.Unlock() + if m.jobs == nil { + return []*jobv1.Job{}, nil + } + + jobs := make([]*jobv1.Job, 0, len(m.jobs)) + + if filter == nil || (filter.NodeIds == nil && filter.Uuids == nil && filter.Ids == nil) { + for _, job := range m.jobs { + jobs = append(jobs, job) + } + return jobs, nil + } + + wantedJobIDs := make(map[string]struct{}) + // use node ids to construct wanted job ids + switch { + case filter.NodeIds != nil: + for _, nodeID := range filter.NodeIds { + jobIDs, ok := m.nodesToJobIDs[nodeID] + if !ok { + continue + } + for _, jobID := range jobIDs { + wantedJobIDs[jobID] = struct{}{} + } + } + case filter.Uuids != nil: + for _, uuid := range filter.Uuids { + jobIDs, ok := m.uuidToJobIDs[uuid] + if !ok { + continue + } + for _, jobID := range jobIDs { + wantedJobIDs[jobID] = struct{}{} + } + } + case filter.Ids != nil: + for _, jobID := range filter.Ids { + wantedJobIDs[jobID] = struct{}{} + } + default: + panic("this should never happen because of the nil filter check") + } + + for _, job := range m.jobs { + if _, ok := wantedJobIDs[job.Id]; ok { + jobs = append(jobs, job) + } + } + return jobs, nil +} + +func (m *mapJobStore) delete(jobID string) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.jobs == nil { + return fmt.Errorf("job not found: %s", jobID) + } + job, ok := m.jobs[jobID] + if !ok { + return nil + } + delete(m.jobs, jobID) + delete(m.nodesToJobIDs, job.NodeId) + delete(m.uuidToJobIDs, job.Uuid) + return nil +} + +var _ proposalStore = &mapProposalStore{} + +type mapProposalStore struct { + mu sync.Mutex + proposals map[string]*jobv1.Proposal + jobIdToProposalId map[string]string +} + +func newMapProposalStore() *mapProposalStore { + return &mapProposalStore{ + proposals: make(map[string]*jobv1.Proposal), + jobIdToProposalId: make(map[string]string), + } +} + +func (m *mapProposalStore) put(proposalID string, proposal *jobv1.Proposal) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.proposals == nil { + m.proposals = make(map[string]*jobv1.Proposal) + } + if m.jobIdToProposalId == nil { + m.jobIdToProposalId = make(map[string]string) + } + m.proposals[proposalID] = proposal + m.jobIdToProposalId[proposal.JobId] = proposalID + return nil +} +func (m *mapProposalStore) get(proposalID string) (*jobv1.Proposal, error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.proposals == nil { + return nil, fmt.Errorf("proposal not found: %s", proposalID) + } + proposal, ok := m.proposals[proposalID] + if !ok { + return nil, fmt.Errorf("%w: proposal not found: %s", errNoExist, proposalID) + } + return proposal, nil +} +func (m *mapProposalStore) list(filter *jobv1.ListProposalsRequest_Filter) ([]*jobv1.Proposal, error) { + if filter != nil && filter.GetIds() != nil && filter.GetJobIds() != nil { + return nil, errors.New("only one of Ids or JobIds can be set") + } + m.mu.Lock() + defer m.mu.Unlock() + if m.proposals == nil { + return nil, nil + } + proposals := make([]*jobv1.Proposal, 0) + // all proposals + if filter == nil || (filter.GetIds() == nil && filter.GetJobIds() == nil) { + for _, proposal := range m.proposals { + proposals = append(proposals, proposal) + } + return proposals, nil + } + + // can't both be nil at this point + wantedProposalIDs := filter.GetIds() + if wantedProposalIDs == nil { + wantedProposalIDs = make([]string, 0) + for _, jobId := range filter.GetJobIds() { + proposalID, ok := m.jobIdToProposalId[jobId] + if !ok { + continue + } + wantedProposalIDs = append(wantedProposalIDs, proposalID) + } + } + + for _, want := range wantedProposalIDs { + p, ok := m.proposals[want] + if !ok { + continue + } + proposals = append(proposals, p) + } + return proposals, nil +} +func (m *mapProposalStore) delete(proposalID string) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.proposals == nil { + return fmt.Errorf("proposal not found: %s", proposalID) + } + + delete(m.proposals, proposalID) + return nil +} + +var _ nodeStore = &mapNodeStore{} + +type mapNodeStore struct { + mu sync.Mutex + nodes map[string]*Node +} + +func newMapNodeStore(n map[string]*Node) *mapNodeStore { + return &mapNodeStore{ + nodes: n, + } +} +func (m *mapNodeStore) put(nodeID string, node *Node) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.nodes == nil { + m.nodes = make(map[string]*Node) + } + m.nodes[nodeID] = node + return nil +} +func (m *mapNodeStore) get(nodeID string) (*Node, error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.nodes == nil { + return nil, fmt.Errorf("node not found: %s", nodeID) + } + node, ok := m.nodes[nodeID] + if !ok { + return nil, fmt.Errorf("%w: node not found: %s", errNoExist, nodeID) + } + return node, nil +} +func (m *mapNodeStore) list() []*Node { + m.mu.Lock() + defer m.mu.Unlock() + if m.nodes == nil { + return nil + } + nodes := make([]*Node, 0) + for _, node := range m.nodes { + nodes = append(nodes, node) + } + return nodes +} +func (m *mapNodeStore) delete(nodeID string) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.nodes == nil { + return fmt.Errorf("node not found: %s", nodeID) + } + _, ok := m.nodes[nodeID] + if !ok { + return nil + } + delete(m.nodes, nodeID) + return nil +} + +func (m *mapNodeStore) asMap() map[string]*Node { + m.mu.Lock() + defer m.mu.Unlock() + if m.nodes == nil { + return nil + } + nodes := make(map[string]*Node) + for k, v := range m.nodes { + nodes[k] = v + } + return nodes +} diff --git a/deployment/environment/memory/job_service_client_test.go b/deployment/environment/memory/job_service_client_test.go new file mode 100644 index 00000000000..4f8d096010d --- /dev/null +++ b/deployment/environment/memory/job_service_client_test.go @@ -0,0 +1,341 @@ +package memory_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/consul/sdk/freeport" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink-integrations/evm/testutils" + jobv1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/job" + "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/shared/ptypes" + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestJobClientProposeJob(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + chains, _ := memory.NewMemoryChains(t, 1, 1) + ports := freeport.GetN(t, 1) + testNode := memory.NewNode(t, ports[0], chains, nil, zapcore.DebugLevel, false, deployment.CapabilityRegistryConfig{}) + + // Set up the JobClient with a mock node + nodeID := "node-1" + nodes := map[string]memory.Node{ + nodeID: *testNode, + } + jobClient := memory.NewMemoryJobClient(nodes) + + type testCase struct { + name string + req *jobv1.ProposeJobRequest + checkErr func(t *testing.T, err error) + checkResp func(t *testing.T, resp *jobv1.ProposeJobResponse) + } + cases := []testCase{ + { + name: "valid request", + req: &jobv1.ProposeJobRequest{ + NodeId: "node-1", + Spec: testJobProposalTOML(t, "f1ac5211-ab79-4c31-ba1c-0997b72db466"), + }, + checkResp: func(t *testing.T, resp *jobv1.ProposeJobResponse) { + assert.NotNil(t, resp) + assert.Equal(t, int64(1), resp.Proposal.Revision) + assert.Equal(t, jobv1.ProposalStatus_PROPOSAL_STATUS_APPROVED, resp.Proposal.Status) + assert.Equal(t, jobv1.ProposalDeliveryStatus_PROPOSAL_DELIVERY_STATUS_DELIVERED, resp.Proposal.DeliveryStatus) + assert.Equal(t, "f1ac5211-ab79-4c31-ba1c-0997b72db466", resp.Proposal.JobId) + assert.Equal(t, testJobProposalTOML(t, "f1ac5211-ab79-4c31-ba1c-0997b72db466"), resp.Proposal.Spec) + }, + }, + { + name: "idempotent request bumps version", + req: &jobv1.ProposeJobRequest{ + NodeId: "node-1", + Spec: testJobProposalTOML(t, "f1ac5211-ab79-4c31-ba1c-0997b72db466"), + }, + // the feeds service doesn't allow duplicate job names + checkResp: func(t *testing.T, resp *jobv1.ProposeJobResponse) { + assert.NotNil(t, resp) + assert.Equal(t, int64(2), resp.Proposal.Revision) + assert.Equal(t, jobv1.ProposalStatus_PROPOSAL_STATUS_APPROVED, resp.Proposal.Status) + assert.Equal(t, jobv1.ProposalDeliveryStatus_PROPOSAL_DELIVERY_STATUS_DELIVERED, resp.Proposal.DeliveryStatus) + assert.Equal(t, "f1ac5211-ab79-4c31-ba1c-0997b72db466", resp.Proposal.JobId) + assert.Equal(t, testJobProposalTOML(t, "f1ac5211-ab79-4c31-ba1c-0997b72db466"), resp.Proposal.Spec) + }, + }, + { + name: "another request", + req: &jobv1.ProposeJobRequest{ + NodeId: "node-1", + Spec: testJobProposalTOML(t, "11115211-ab79-4c31-ba1c-0997b72aaaaa"), + }, + checkResp: func(t *testing.T, resp *jobv1.ProposeJobResponse) { + assert.NotNil(t, resp) + assert.Equal(t, int64(1), resp.Proposal.Revision) + assert.Equal(t, jobv1.ProposalStatus_PROPOSAL_STATUS_APPROVED, resp.Proposal.Status) + assert.Equal(t, jobv1.ProposalDeliveryStatus_PROPOSAL_DELIVERY_STATUS_DELIVERED, resp.Proposal.DeliveryStatus) + assert.Equal(t, "11115211-ab79-4c31-ba1c-0997b72aaaaa", resp.Proposal.JobId) + assert.Equal(t, testJobProposalTOML(t, "11115211-ab79-4c31-ba1c-0997b72aaaaa"), resp.Proposal.Spec) + }, + }, + { + name: "node does not exist", + req: &jobv1.ProposeJobRequest{ + NodeId: "node-2", + Spec: testJobProposalTOML(t, "f1ac5211-ab79-4c31-ba1c-0997b72db466"), + }, + checkErr: func(t *testing.T, err error) { + require.Error(t, err) + assert.Contains(t, err.Error(), "node not found") + }, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + // Call the ProposeJob method + resp, err := jobClient.ProposeJob(ctx, c.req) + if c.checkErr != nil { + c.checkErr(t, err) + return + } + require.NoError(t, err) + c.checkResp(t, resp) + }) + } +} + +func TestJobClientJobAPI(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + chains, _ := memory.NewMemoryChains(t, 1, 1) + ports := freeport.GetN(t, 1) + testNode := memory.NewNode(t, ports[0], chains, nil, zapcore.DebugLevel, false, deployment.CapabilityRegistryConfig{}) + + // Set up the JobClient with a mock node + nodeID := "node-1" + externalJobID := "f1ac5211-ab79-4c31-ba1c-0997b72db466" + + jobSpecToml := testJobProposalTOML(t, externalJobID) + nodes := map[string]memory.Node{ + nodeID: *testNode, + } + jobClient := memory.NewMemoryJobClient(nodes) + + // Create a mock request + req := &jobv1.ProposeJobRequest{ + NodeId: nodeID, + Spec: jobSpecToml, + Labels: []*ptypes.Label{ + { + Key: "label-key", + Value: ptr("label-value"), + }, + }, + } + + // Call the ProposeJob method + resp, err := jobClient.ProposeJob(ctx, req) + + // Validate the response + require.NoError(t, err) + assert.NotNil(t, resp) + assert.Equal(t, jobv1.ProposalStatus_PROPOSAL_STATUS_APPROVED, resp.Proposal.Status) + assert.Equal(t, jobv1.ProposalDeliveryStatus_PROPOSAL_DELIVERY_STATUS_DELIVERED, resp.Proposal.DeliveryStatus) + assert.Equal(t, jobSpecToml, resp.Proposal.Spec) + assert.Equal(t, externalJobID, resp.Proposal.JobId) + + expectedProposalID := resp.Proposal.Id + expectedProposal := resp.Proposal + + t.Run("GetJob", func(t *testing.T) { + t.Run("existing job", func(t *testing.T) { + // Create a mock request + getReq := &jobv1.GetJobRequest{ + IdOneof: &jobv1.GetJobRequest_Id{Id: externalJobID}, + } + + getResp, err := jobClient.GetJob(ctx, getReq) + require.NoError(t, err) + assert.NotNil(t, getResp) + assert.Equal(t, externalJobID, getResp.Job.Id) + }) + + t.Run("non-existing job", func(t *testing.T) { + // Create a mock request + getReq := &jobv1.GetJobRequest{ + IdOneof: &jobv1.GetJobRequest_Id{Id: "non-existing-job"}, + } + + getResp, err := jobClient.GetJob(ctx, getReq) + require.Error(t, err) + assert.Nil(t, getResp) + }) + }) + + t.Run("ListJobs", func(t *testing.T) { + type listCase struct { + name string + req *jobv1.ListJobsRequest + checkErr func(t *testing.T, err error) + checkResp func(t *testing.T, resp *jobv1.ListJobsResponse) + } + cases := []listCase{ + { + name: "no filters", + req: &jobv1.ListJobsRequest{}, + checkResp: func(t *testing.T, resp *jobv1.ListJobsResponse) { + assert.NotNil(t, resp) + assert.Len(t, resp.Jobs, 1) + assert.Equal(t, externalJobID, resp.Jobs[0].Id) + }, + }, + { + name: "with id filter", + req: &jobv1.ListJobsRequest{ + Filter: &jobv1.ListJobsRequest_Filter{ + Ids: []string{externalJobID}, + }, + }, + checkResp: func(t *testing.T, resp *jobv1.ListJobsResponse) { + assert.NotNil(t, resp) + assert.Len(t, resp.Jobs, 1) + assert.Equal(t, externalJobID, resp.Jobs[0].Id) + }, + }, + { + name: "non-existing job id", + req: &jobv1.ListJobsRequest{ + Filter: &jobv1.ListJobsRequest_Filter{ + Ids: []string{"non-existing-job-id"}, + }, + }, + checkResp: func(t *testing.T, resp *jobv1.ListJobsResponse) { + require.NotNil(t, resp) + assert.Empty(t, resp.Jobs) + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + // Call the ListJobs method + listResp, err := jobClient.ListJobs(ctx, c.req) + if c.checkErr != nil { + c.checkErr(t, err) + return + } + require.NoError(t, err) + c.checkResp(t, listResp) + }) + } + }) + + t.Run("GetProposal", func(t *testing.T) { + t.Run("existing proposal", func(t *testing.T) { + // Create a mock request + getReq := &jobv1.GetProposalRequest{ + Id: expectedProposalID, + } + + getResp, err := jobClient.GetProposal(ctx, getReq) + require.NoError(t, err) + assert.NotNil(t, getResp) + assert.Equal(t, expectedProposal, getResp.Proposal) + }) + + t.Run("non-existing proposal", func(t *testing.T) { + // Create a mock request + getReq := &jobv1.GetProposalRequest{ + Id: "non-existing-job", + } + + getResp, err := jobClient.GetProposal(ctx, getReq) + require.Error(t, err) + assert.Nil(t, getResp) + }) + }) + + t.Run("ListProposals", func(t *testing.T) { + type listCase struct { + name string + req *jobv1.ListProposalsRequest + checkErr func(t *testing.T, err error) + checkResp func(t *testing.T, resp *jobv1.ListProposalsResponse) + } + cases := []listCase{ + + { + name: "no filters", + req: &jobv1.ListProposalsRequest{}, + checkResp: func(t *testing.T, resp *jobv1.ListProposalsResponse) { + assert.NotNil(t, resp) + assert.Len(t, resp.Proposals, 1) + assert.Equal(t, expectedProposalID, resp.Proposals[0].Id) + assert.Equal(t, expectedProposal, resp.Proposals[0]) + }, + }, + { + name: "with id filter", + req: &jobv1.ListProposalsRequest{ + Filter: &jobv1.ListProposalsRequest_Filter{ + Ids: []string{expectedProposalID}, + }, + }, + checkResp: func(t *testing.T, resp *jobv1.ListProposalsResponse) { + assert.NotNil(t, resp) + assert.Len(t, resp.Proposals, 1) + assert.Equal(t, expectedProposalID, resp.Proposals[0].Id) + assert.Equal(t, expectedProposal, resp.Proposals[0]) + }, + }, + + { + name: "non-existing job id", + req: &jobv1.ListProposalsRequest{ + Filter: &jobv1.ListProposalsRequest_Filter{ + Ids: []string{"non-existing-job-id"}, + }, + }, + checkResp: func(t *testing.T, resp *jobv1.ListProposalsResponse) { + require.NotNil(t, resp) + assert.Empty(t, resp.Proposals, "expected no proposals %v", resp.Proposals) + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + listResp, err := jobClient.ListProposals(ctx, c.req) + if c.checkErr != nil { + c.checkErr(t, err) + return + } + require.NoError(t, err) + c.checkResp(t, listResp) + }) + } + }) +} + +func ptr(s string) *string { + return &s +} + +// need some non-ocr job type to avoid the ocr validation and the p2pwrapper check +func testJobProposalTOML(t *testing.T, externalJobId string) string { + tomlString := ` +type = "standardcapabilities" +schemaVersion = 1 +externalJobID = "%s" +name = "hacking-%s" +forwardingAllowed = false +command = "/home/capabilities/nowhere" +config = "" +` + return fmt.Sprintf(tomlString, externalJobId, externalJobId) +} diff --git a/deployment/environment/memory/node.go b/deployment/environment/memory/node.go index d4a950f431f..4593024c71a 100644 --- a/deployment/environment/memory/node.go +++ b/deployment/environment/memory/node.go @@ -2,6 +2,9 @@ package memory import ( "context" + "crypto/rand" + + "encoding/hex" "fmt" "math/big" "net" @@ -15,6 +18,7 @@ import ( "github.com/ethereum/go-ethereum/common" gethtypes "github.com/ethereum/go-ethereum/core/types" chainsel "github.com/smartcontractkit/chain-selectors" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" "golang.org/x/exp/maps" @@ -35,6 +39,7 @@ import ( "github.com/smartcontractkit/chainlink-integrations/evm/assets" "github.com/smartcontractkit/chainlink-integrations/evm/client" v2toml "github.com/smartcontractkit/chainlink-integrations/evm/config/toml" + "github.com/smartcontractkit/chainlink-integrations/evm/testutils" evmutils "github.com/smartcontractkit/chainlink-integrations/evm/utils/big" "github.com/smartcontractkit/chainlink/v2/core/capabilities" "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm" @@ -49,8 +54,13 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/workflowkey" "github.com/smartcontractkit/chainlink/v2/core/services/relay" "github.com/smartcontractkit/chainlink/v2/core/utils" + "github.com/smartcontractkit/chainlink/v2/core/utils/crypto" "github.com/smartcontractkit/chainlink/v2/core/utils/testutils/heavyweight" "github.com/smartcontractkit/chainlink/v2/plugins" + + pb "github.com/smartcontractkit/chainlink-protos/orchestrator/feedsmanager" + feeds2 "github.com/smartcontractkit/chainlink/v2/core/services/feeds" + feedsMocks "github.com/smartcontractkit/chainlink/v2/core/services/feeds/mocks" ) type Node struct { @@ -380,6 +390,9 @@ func NewNode( }) keys := CreateKeys(t, app, chains, solchains) + // JD + + setupJD(t, app) return &Node{ App: app, Chains: slices.Concat( @@ -603,3 +616,54 @@ func (e KeystoreSim) Eth() keystore.Eth { func (e KeystoreSim) CSA() keystore.CSA { return e.csa } + +func setupJD(t *testing.T, app chainlink.Application) { + secret := randomBytes32(t) + pkey, err := crypto.PublicKeyFromHex(hex.EncodeToString(secret)) + require.NoError(t, err) + m := feeds2.RegisterManagerParams{ + Name: "In memory env test", + URI: "http://dev.null:8080", + PublicKey: *pkey, + } + f := app.GetFeedsService() + connManager := feedsMocks.NewConnectionsManager(t) + connManager.On("Connect", mock.Anything).Maybe() + connManager.On("GetClient", mock.Anything).Maybe().Return(noopFeedsClient{}, nil) + connManager.On("Close").Maybe().Return() + connManager.On("IsConnected", mock.Anything).Maybe().Return(true) + f.Unsafe_SetConnectionsManager(connManager) + + _, err = f.RegisterManager(testutils.Context(t), m) + require.NoError(t, err) +} + +func randomBytes32(t *testing.T) []byte { + t.Helper() + b := make([]byte, 32) + _, err := rand.Read(b) + require.NoError(t, err) + return b +} + +type noopFeedsClient struct{} + +func (n noopFeedsClient) ApprovedJob(context.Context, *pb.ApprovedJobRequest) (*pb.ApprovedJobResponse, error) { + return &pb.ApprovedJobResponse{}, nil +} + +func (n noopFeedsClient) Healthcheck(context.Context, *pb.HealthcheckRequest) (*pb.HealthcheckResponse, error) { + return &pb.HealthcheckResponse{}, nil +} + +func (n noopFeedsClient) UpdateNode(context.Context, *pb.UpdateNodeRequest) (*pb.UpdateNodeResponse, error) { + return &pb.UpdateNodeResponse{}, nil +} + +func (n noopFeedsClient) RejectedJob(context.Context, *pb.RejectedJobRequest) (*pb.RejectedJobResponse, error) { + return &pb.RejectedJobResponse{}, nil +} + +func (n noopFeedsClient) CancelledJob(context.Context, *pb.CancelledJobRequest) (*pb.CancelledJobResponse, error) { + return &pb.CancelledJobResponse{}, nil +} diff --git a/deployment/environment/memory/node_service_client.go b/deployment/environment/memory/node_service_client.go new file mode 100644 index 00000000000..d11ce8fe5b8 --- /dev/null +++ b/deployment/environment/memory/node_service_client.go @@ -0,0 +1,120 @@ +package memory + +import ( + "context" + "errors" + "fmt" + + "google.golang.org/grpc" + + nodev1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" + "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/shared/ptypes" +) + +func (j JobClient) EnableNode(ctx context.Context, in *nodev1.EnableNodeRequest, opts ...grpc.CallOption) (*nodev1.EnableNodeResponse, error) { + // TODO CCIP-3108 implement me + panic("implement me") +} + +func (j JobClient) DisableNode(ctx context.Context, in *nodev1.DisableNodeRequest, opts ...grpc.CallOption) (*nodev1.DisableNodeResponse, error) { + // TODO CCIP-3108 implement me + panic("implement me") +} + +func (j *JobClient) RegisterNode(ctx context.Context, in *nodev1.RegisterNodeRequest, opts ...grpc.CallOption) (*nodev1.RegisterNodeResponse, error) { + if in == nil || in.GetPublicKey() == "" { + return nil, errors.New("public key is required") + } + + if _, exists := j.RegisteredNodes[in.GetPublicKey()]; exists { + return nil, fmt.Errorf("node with Public Key %s is already registered", in.GetPublicKey()) + } + + var foundNode *Node + for _, node := range j.nodeStore.list() { + if node.Keys.CSA.ID() == in.GetPublicKey() { + foundNode = node + break + } + } + + if foundNode == nil { + return nil, fmt.Errorf("node with Public Key %s is not known", in.GetPublicKey()) + } + + j.RegisteredNodes[in.GetPublicKey()] = *foundNode + + return &nodev1.RegisterNodeResponse{ + Node: &nodev1.Node{ + Id: in.GetPublicKey(), + PublicKey: in.GetPublicKey(), + IsEnabled: true, + IsConnected: true, + Labels: in.Labels, + }, + }, nil +} + +func (j JobClient) UpdateNode(ctx context.Context, in *nodev1.UpdateNodeRequest, opts ...grpc.CallOption) (*nodev1.UpdateNodeResponse, error) { + // TODO CCIP-3108 implement me + panic("implement me") +} + +func (j JobClient) GetNode(ctx context.Context, in *nodev1.GetNodeRequest, opts ...grpc.CallOption) (*nodev1.GetNodeResponse, error) { + n, err := j.nodeStore.get(in.Id) + if err != nil { + return nil, err + } + return &nodev1.GetNodeResponse{ + Node: &nodev1.Node{ + Id: in.Id, + PublicKey: n.Keys.CSA.PublicKeyString(), + IsEnabled: true, + IsConnected: true, + }, + }, nil +} + +func (j JobClient) ListNodes(ctx context.Context, in *nodev1.ListNodesRequest, opts ...grpc.CallOption) (*nodev1.ListNodesResponse, error) { + var nodes []*nodev1.Node + for id, n := range j.nodeStore.asMap() { + node := &nodev1.Node{ + Id: id, + PublicKey: n.Keys.CSA.ID(), + IsEnabled: true, + IsConnected: true, + Labels: []*ptypes.Label{ + { + Key: "p2p_id", + Value: ptr(n.Keys.PeerID.String()), + }, + }, + } + if ApplyNodeFilter(in.Filter, node) { + nodes = append(nodes, node) + } + } + return &nodev1.ListNodesResponse{ + Nodes: nodes, + }, nil +} + +func (j JobClient) ListNodeChainConfigs(ctx context.Context, in *nodev1.ListNodeChainConfigsRequest, opts ...grpc.CallOption) (*nodev1.ListNodeChainConfigsResponse, error) { + if in.Filter == nil { + return nil, errors.New("filter is required") + } + if len(in.Filter.NodeIds) != 1 { + return nil, errors.New("only one node id is supported") + } + n, err := j.nodeStore.get(in.Filter.NodeIds[0]) // j.Nodes[in.Filter.NodeIds[0]] + if err != nil { + return nil, fmt.Errorf("node id not found: %s", in.Filter.NodeIds[0]) + } + chainConfigs, err := n.JDChainConfigs() + if err != nil { + return nil, err + } + return &nodev1.ListNodeChainConfigsResponse{ + ChainConfigs: chainConfigs, + }, nil +} diff --git a/deployment/environment/memory/offchain_client.go b/deployment/environment/memory/offchain_client.go new file mode 100644 index 00000000000..6e40a1a6498 --- /dev/null +++ b/deployment/environment/memory/offchain_client.go @@ -0,0 +1,96 @@ +package memory + +import ( + "context" + "slices" + "strings" + + "google.golang.org/grpc" + + csav1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/csa" + nodev1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" + "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/shared/ptypes" + "github.com/smartcontractkit/chainlink/deployment" +) + +var _ deployment.OffchainClient = &JobClient{} + +type JobClient struct { + RegisteredNodes map[string]Node + nodeStore + *JobServiceClient +} + +func NewMemoryJobClient(nodesByPeerID map[string]Node) *JobClient { + m := make(map[string]*Node) + for id, node := range nodesByPeerID { + m[id] = &node + } + ns := newMapNodeStore(m) + return &JobClient{ + // Nodes: nodesByPeerID, + RegisteredNodes: make(map[string]Node), + JobServiceClient: NewJobServiceClient(ns), + nodeStore: ns, + } +} + +func (j JobClient) GetKeypair(ctx context.Context, in *csav1.GetKeypairRequest, opts ...grpc.CallOption) (*csav1.GetKeypairResponse, error) { + // TODO implement me + panic("implement me") +} + +func (j JobClient) ListKeypairs(ctx context.Context, in *csav1.ListKeypairsRequest, opts ...grpc.CallOption) (*csav1.ListKeypairsResponse, error) { + // TODO CCIP-3108 implement me + panic("implement me") +} + +func (j JobClient) ReplayLogs(selectorToBlock map[uint64]uint64) error { + for _, node := range j.nodeStore.list() { + if err := node.ReplayLogs(selectorToBlock); err != nil { + return err + } + } + return nil +} + +func ApplyNodeFilter(filter *nodev1.ListNodesRequest_Filter, node *nodev1.Node) bool { + if filter == nil { + return true + } + if len(filter.Ids) > 0 { + idx := slices.IndexFunc(filter.Ids, func(id string) bool { + return node.Id == id + }) + if idx < 0 { + return false + } + } + for _, selector := range filter.Selectors { + idx := slices.IndexFunc(node.Labels, func(label *ptypes.Label) bool { + return label.Key == selector.Key + }) + if idx < 0 { + return false + } + label := node.Labels[idx] + + switch selector.Op { + case ptypes.SelectorOp_IN: + values := strings.Split(*selector.Value, ",") + found := slices.Contains(values, *label.Value) + if !found { + return false + } + case ptypes.SelectorOp_EQ: + if *label.Value != *selector.Value { + return false + } + case ptypes.SelectorOp_EXIST: + // do nothing + default: + panic("unimplemented selector") + } + } + return true +} diff --git a/deployment/go.mod b/deployment/go.mod index bbaf6492c22..00ffe0cba27 100644 --- a/deployment/go.mod +++ b/deployment/go.mod @@ -37,6 +37,7 @@ require ( github.com/smartcontractkit/chainlink-framework/multinode v0.0.0-20250211162441-3d6cea220efb github.com/smartcontractkit/chainlink-integrations/evm v0.0.0-20250213145514-41d874782c02 github.com/smartcontractkit/chainlink-protos/job-distributor v0.9.0 + github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 github.com/smartcontractkit/chainlink-solana v1.1.2-0.20250213203720-e15b1333a14a github.com/smartcontractkit/chainlink-testing-framework/framework v0.5.3 github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.22 @@ -356,7 +357,6 @@ require ( github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20250224190032-809e4b8cf29e // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20250207205350-420ccacab78a // indirect - github.com/smartcontractkit/chainlink-protos/orchestrator v0.5.0 // indirect github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 // indirect github.com/smartcontractkit/chainlink-protos/svr v0.0.0-20250123084029-58cce9b32112 // indirect github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.10 // indirect From 30bbf70796a26d8b757cce161fa7db4e97dd44da Mon Sep 17 00:00:00 2001 From: Balamurali Gopalswami <167726375+b-gopalswami@users.noreply.github.com> Date: Wed, 26 Feb 2025 22:33:12 -0500 Subject: [PATCH 11/17] CCIP-2573: Adding Test router setup (#16503) * CCIP-2573: Adding Test router setup add validation * Adding USDCTokenPools view * lint fix * fix side-effects * Fix state view * lint fix * review comments * additonal check --- .../v1_5_1/cs_deploy_usdc_token_pools.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/deployment/ccip/changeset/v1_5_1/cs_deploy_usdc_token_pools.go b/deployment/ccip/changeset/v1_5_1/cs_deploy_usdc_token_pools.go index 68ae0f08a40..2708a2c25a3 100644 --- a/deployment/ccip/changeset/v1_5_1/cs_deploy_usdc_token_pools.go +++ b/deployment/ccip/changeset/v1_5_1/cs_deploy_usdc_token_pools.go @@ -80,7 +80,8 @@ func (i DeployUSDCTokenPoolInput) Validate(ctx context.Context, chain deployment // DeployUSDCTokenPoolContractsConfig defines the USDC token pool contracts that need to be deployed on each chain. type DeployUSDCTokenPoolContractsConfig struct { // USDCPools defines the per-chain configuration of each new USDC pool. - USDCPools map[uint64]DeployUSDCTokenPoolInput + USDCPools map[uint64]DeployUSDCTokenPoolInput + IsTestRouter bool } func (c DeployUSDCTokenPoolContractsConfig) Validate(env deployment.Environment) error { @@ -101,9 +102,12 @@ func (c DeployUSDCTokenPoolContractsConfig) Validate(env deployment.Environment) if !ok { return fmt.Errorf("chain with selector %d does not exist in state", chainSelector) } - if chainState.Router == nil { + if !c.IsTestRouter && chainState.Router == nil { return fmt.Errorf("missing router on %s", chain) } + if c.IsTestRouter && chainState.TestRouter == nil { + return fmt.Errorf("missing test router on %s", chain) + } if chainState.RMNProxy == nil { return fmt.Errorf("missing rmnProxy on %s", chain) } @@ -130,12 +134,15 @@ func DeployUSDCTokenPoolContractsChangeset(env deployment.Environment, c DeployU for chainSelector, poolConfig := range c.USDCPools { chain := env.Chains[chainSelector] chainState := state.Chains[chainSelector] - + router := chainState.Router + if c.IsTestRouter { + router = chainState.TestRouter + } _, err := deployment.DeployContract(env.Logger, chain, newAddresses, func(chain deployment.Chain) deployment.ContractDeploy[*usdc_token_pool.USDCTokenPool] { poolAddress, tx, usdcTokenPool, err := usdc_token_pool.DeployUSDCTokenPool( chain.DeployerKey, chain.Client, poolConfig.TokenMessenger, poolConfig.TokenAddress, - poolConfig.AllowList, chainState.RMNProxy.Address(), chainState.Router.Address(), + poolConfig.AllowList, chainState.RMNProxy.Address(), router.Address(), ) return deployment.ContractDeploy[*usdc_token_pool.USDCTokenPool]{ Address: poolAddress, From 3f33b49e4f6388466ad3d80a620d621b8a2ea6b9 Mon Sep 17 00:00:00 2001 From: Will Winder Date: Thu, 27 Feb 2025 01:38:10 -0500 Subject: [PATCH 12/17] [CCIP-5233] CCIP: price-only commit report method override. (#16422) * Option to send price only commit reports to a different method. * Update test compilation. * Lint * PR feedback. * Add named params. * Add missing import. --- .../ccip/ocrimpls/contract_transmitter.go | 139 ++++++++++-------- .../ocrimpls/contract_transmitter_test.go | 5 +- .../capabilities/ccip/oraclecreator/plugin.go | 5 + 3 files changed, 87 insertions(+), 62 deletions(-) diff --git a/core/capabilities/ccip/ocrimpls/contract_transmitter.go b/core/capabilities/ccip/ocrimpls/contract_transmitter.go index 1766ae39865..4d413083261 100644 --- a/core/capabilities/ccip/ocrimpls/contract_transmitter.go +++ b/core/capabilities/ccip/ocrimpls/contract_transmitter.go @@ -18,59 +18,76 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key" ) +// ToCalldataFunc is a function that takes in the OCR3 report and signature data and processes them. +// It returns the contract name, method name, and arguments for the on-chain contract call. +// The ReportWithInfo bytes field is also decoded according to the implementation of this function, +// the commit and execute plugins have different representations for this data. type ToCalldataFunc func( rawReportCtx [2][32]byte, report ocr3types.ReportWithInfo[[]byte], rs, ss [][32]byte, vs [32]byte, -) (any, error) +) (contract string, method string, args any, err error) + +// NewToCommitCalldataFunc returns a ToCalldataFunc that is used to generate the calldata for the commit method. +// Multiple methods are accepted in order to allow for different methods to be called based on the report data. +// The Solana on-chain contract has two methods, one for the default commit and one for the price-only commit. +func NewToCommitCalldataFunc(defaultMethod, priceOnlyMethod string) ToCalldataFunc { + return func( + rawReportCtx [2][32]byte, + report ocr3types.ReportWithInfo[[]byte], + rs, ss [][32]byte, + vs [32]byte, + ) (contract string, method string, args any, err error) { + // Note that the name of the struct field is very important, since the encoder used + // by the chainwriter uses mapstructure, which will use the struct field name to map + // to the argument name in the function call. + // If, for whatever reason, we want to change the field name, make sure to add a `mapstructure:""` tag + // for that field. + var info ccipocr3.CommitReportInfo + if len(report.Info) != 0 { + var err error + info, err = ccipocr3.DecodeCommitReportInfo(report.Info) + if err != nil { + return "", "", nil, err + } + } -func ToCommitCalldata( - rawReportCtx [2][32]byte, - report ocr3types.ReportWithInfo[[]byte], - rs, ss [][32]byte, - vs [32]byte, -) (any, error) { - // Note that the name of the struct field is very important, since the encoder used - // by the chainwriter uses mapstructure, which will use the struct field name to map - // to the argument name in the function call. - // If, for whatever reason, we want to change the field name, make sure to add a `mapstructure:""` tag - // for that field. - var info ccipocr3.CommitReportInfo - if len(report.Info) != 0 { - var err error - info, err = ccipocr3.DecodeCommitReportInfo(report.Info) - if err != nil { - return nil, err + method = defaultMethod + if priceOnlyMethod != "" && len(info.MerkleRoots) == 0 && len(info.TokenPrices) > 0 { + method = priceOnlyMethod } - } - // WARNING: Be careful if you change the data types. - // Using a different type e.g. `type Foo [32]byte` instead of `[32]byte` - // will trigger undefined chainWriter behavior, e.g. transactions submitted with wrong arguments. - return struct { - ReportContext [2][32]byte - Report []byte - Rs [][32]byte - Ss [][32]byte - RawVs [32]byte - Info ccipocr3.CommitReportInfo - }{ - ReportContext: rawReportCtx, - Report: report.Report, - Rs: rs, - Ss: ss, - RawVs: vs, - Info: info, - }, nil + // WARNING: Be careful if you change the data types. + // Using a different type e.g. `type Foo [32]byte` instead of `[32]byte` + // will trigger undefined chainWriter behavior, e.g. transactions submitted with wrong arguments. + return consts.ContractNameOffRamp, + method, + struct { + ReportContext [2][32]byte + Report []byte + Rs [][32]byte + Ss [][32]byte + RawVs [32]byte + Info ccipocr3.CommitReportInfo + }{ + ReportContext: rawReportCtx, + Report: report.Report, + Rs: rs, + Ss: ss, + RawVs: vs, + Info: info, + }, nil + } } +// ToExecCalldata is a ToCalldataFunc that is used to generate the calldata for the execute method. func ToExecCalldata( rawReportCtx [2][32]byte, report ocr3types.ReportWithInfo[[]byte], _, _ [][32]byte, _ [32]byte, -) (any, error) { +) (contract string, method string, args any, err error) { // Note that the name of the struct field is very important, since the encoder used // by the chainwriter uses mapstructure, which will use the struct field name to map // to the argument name in the function call. @@ -85,19 +102,21 @@ func ToExecCalldata( var err error info, err = ccipocr3.DecodeExecuteReportInfo(report.Info) if err != nil { - return nil, err + return "", "", nil, err } } - return struct { - ReportContext [2][32]byte - Report []byte - Info ccipocr3.ExecuteReportInfo - }{ - ReportContext: rawReportCtx, - Report: report.Report, - Info: info, - }, nil + return consts.ContractNameOffRamp, + consts.MethodExecute, + struct { + ReportContext [2][32]byte + Report []byte + Info ccipocr3.ExecuteReportInfo + }{ + ReportContext: rawReportCtx, + Report: report.Report, + Info: info, + }, nil } var _ ocr3types.ContractTransmitter[[]byte] = &ccipTransmitter{} @@ -105,8 +124,6 @@ var _ ocr3types.ContractTransmitter[[]byte] = &ccipTransmitter{} type ccipTransmitter struct { cw commontypes.ContractWriter fromAccount ocrtypes.Account - contractName string - method string offrampAddress string toCalldataFn ToCalldataFunc } @@ -119,13 +136,18 @@ func XXXNewContractTransmitterTestsOnly( offrampAddress string, toCalldataFn ToCalldataFunc, ) ocr3types.ContractTransmitter[[]byte] { + wrappedToCalldataFunc := func(rawReportCtx [2][32]byte, + report ocr3types.ReportWithInfo[[]byte], + rs, ss [][32]byte, + vs [32]byte) (string, string, any, error) { + _, _, args, err := toCalldataFn(rawReportCtx, report, rs, ss, vs) + return contractName, method, args, err + } return &ccipTransmitter{ cw: cw, fromAccount: fromAccount, - contractName: contractName, - method: method, offrampAddress: offrampAddress, - toCalldataFn: toCalldataFn, + toCalldataFn: wrappedToCalldataFunc, } } @@ -133,14 +155,13 @@ func NewCommitContractTransmitter( cw commontypes.ContractWriter, fromAccount ocrtypes.Account, offrampAddress string, + defaultMethod, priceOnlyMethod string, ) ocr3types.ContractTransmitter[[]byte] { return &ccipTransmitter{ cw: cw, fromAccount: fromAccount, - contractName: consts.ContractNameOffRamp, - method: consts.MethodCommit, offrampAddress: offrampAddress, - toCalldataFn: ToCommitCalldata, + toCalldataFn: NewToCommitCalldataFunc(defaultMethod, priceOnlyMethod), } } @@ -152,8 +173,6 @@ func NewExecContractTransmitter( return &ccipTransmitter{ cw: cw, fromAccount: fromAccount, - contractName: consts.ContractNameOffRamp, - method: consts.MethodExecute, offrampAddress: offrampAddress, toCalldataFn: ToExecCalldata, } @@ -198,7 +217,7 @@ func (c *ccipTransmitter) Transmit( } // chain writer takes in the raw calldata and packs it on its own. - args, err := c.toCalldataFn(rawReportCtx, reportWithInfo, rs, ss, vs) + contract, method, args, err := c.toCalldataFn(rawReportCtx, reportWithInfo, rs, ss, vs) if err != nil { return fmt.Errorf("failed to generate call data: %w", err) } @@ -211,7 +230,7 @@ func (c *ccipTransmitter) Transmit( return fmt.Errorf("failed to generate UUID: %w", err) } zero := big.NewInt(0) - if err := c.cw.SubmitTransaction(ctx, c.contractName, c.method, args, fmt.Sprintf("%s-%s-%s", c.contractName, c.offrampAddress, txID.String()), c.offrampAddress, &meta, zero); err != nil { + if err := c.cw.SubmitTransaction(ctx, contract, method, args, fmt.Sprintf("%s-%s-%s", contract, c.offrampAddress, txID.String()), c.offrampAddress, &meta, zero); err != nil { return fmt.Errorf("failed to submit transaction thru chainwriter: %w", err) } diff --git a/core/capabilities/ccip/ocrimpls/contract_transmitter_test.go b/core/capabilities/ccip/ocrimpls/contract_transmitter_test.go index 53042e475e4..ddfcfb52b05 100644 --- a/core/capabilities/ccip/ocrimpls/contract_transmitter_test.go +++ b/core/capabilities/ccip/ocrimpls/contract_transmitter_test.go @@ -21,14 +21,15 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" - "github.com/smartcontractkit/chainlink-integrations/evm/heads" + "github.com/smartcontractkit/chainlink-ccip/pkg/consts" "github.com/smartcontractkit/chainlink-integrations/evm/assets" "github.com/smartcontractkit/chainlink-integrations/evm/client" evmconfig "github.com/smartcontractkit/chainlink-integrations/evm/config" "github.com/smartcontractkit/chainlink-integrations/evm/config/chaintype" "github.com/smartcontractkit/chainlink-integrations/evm/config/toml" "github.com/smartcontractkit/chainlink-integrations/evm/gas" + "github.com/smartcontractkit/chainlink-integrations/evm/heads" "github.com/smartcontractkit/chainlink-integrations/evm/keystore" "github.com/smartcontractkit/chainlink-integrations/evm/logpoller" evmtestutils "github.com/smartcontractkit/chainlink-integrations/evm/testutils" @@ -316,7 +317,7 @@ func newTestUniverse(t *testing.T, ks *keyringsAndSigners[[]byte]) *testUniverse contractName, methodTransmitWithSignatures, ocr3HelperAddr.Hex(), - ocrimpls.ToCommitCalldata, + ocrimpls.NewToCommitCalldataFunc(consts.MethodCommit, ""), ) transmitterWithoutSigs := ocrimpls.XXXNewContractTransmitterTestsOnly( chainWriter, diff --git a/core/capabilities/ccip/oraclecreator/plugin.go b/core/capabilities/ccip/oraclecreator/plugin.go index e11d913ed52..bdb71f9e3d5 100644 --- a/core/capabilities/ccip/oraclecreator/plugin.go +++ b/core/capabilities/ccip/oraclecreator/plugin.go @@ -79,6 +79,7 @@ var plugins = map[string]plugin{ TokenDataEncoder: ccipsolana.NewSolanaTokenDataEncoder(), GasEstimateProvider: ccipsolana.NewGasEstimateProvider(), RMNCrypto: func(lggr logger.Logger) cciptypes.RMNCrypto { return nil }, + PriceOnlyCommitFn: consts.MethodCommitPriceOnly, }, } @@ -94,6 +95,8 @@ type plugin struct { TokenDataEncoder cciptypes.TokenDataEncoder GasEstimateProvider cciptypes.EstimateProvider RMNCrypto func(lggr logger.Logger) cciptypes.RMNCrypto + // PriceOnlyCommitFn optional method override for price only commit reports. + PriceOnlyCommitFn string } // pluginOracleCreator creates oracles that reference plugins running @@ -354,6 +357,8 @@ func (i *pluginOracleCreator) createFactoryAndTransmitter( transmitter = ocrimpls.NewCommitContractTransmitter(destChainWriter, ocrtypes.Account(destFromAccounts[0]), offrampAddrStr, + consts.MethodCommit, + plugins[chainFamily].PriceOnlyCommitFn, ) } else if config.Config.PluginType == uint8(cctypes.PluginTypeCCIPExec) { factory = execocr3.NewExecutePluginFactory( From 88a2f8f5cf9ed283fd366501c9f40a3a899e3cb8 Mon Sep 17 00:00:00 2001 From: Yashvardhan Nevatia Date: Thu, 27 Feb 2025 09:01:39 +0000 Subject: [PATCH 13/17] disable in CI only (#16591) Co-authored-by: Terry Tata --- .../changeset/solana/cs_deploy_chain_test.go | 223 +++++++++--------- 1 file changed, 113 insertions(+), 110 deletions(-) diff --git a/deployment/ccip/changeset/solana/cs_deploy_chain_test.go b/deployment/ccip/changeset/solana/cs_deploy_chain_test.go index 55803335097..9d377838abe 100644 --- a/deployment/ccip/changeset/solana/cs_deploy_chain_test.go +++ b/deployment/ccip/changeset/solana/cs_deploy_chain_test.go @@ -143,130 +143,133 @@ func TestDeployChainContractsChangesetSolana(t *testing.T) { }) require.NoError(t, err) testhelpers.ValidateSolanaState(t, e, solChainSelectors) - timelockSignerPDA, _ := testhelpers.TransferOwnershipSolana(t, &e, solChainSelectors[0], true, true, true, true) - upgradeAuthority := timelockSignerPDA - state, err := changeset.LoadOnchainStateSolana(e) - require.NoError(t, err) + // Expensive to run in CI + if !ci { + timelockSignerPDA, _ := testhelpers.TransferOwnershipSolana(t, &e, solChainSelectors[0], true, true, true, true) + upgradeAuthority := timelockSignerPDA + state, err := changeset.LoadOnchainStateSolana(e) + require.NoError(t, err) - e, err = commonchangeset.ApplyChangesetsV2(t, e, []commonchangeset.ConfiguredChangeSet{ - commonchangeset.Configure( - deployment.CreateLegacyChangeSet(ccipChangesetSolana.DeployChainContractsChangeset), - ccipChangesetSolana.DeployChainContractsConfig{ - HomeChainSelector: homeChainSel, - ContractParamsPerChain: map[uint64]ccipChangesetSolana.ChainContractParams{ - solChainSelectors[0]: { - FeeQuoterParams: ccipChangesetSolana.FeeQuoterParams{ - DefaultMaxFeeJuelsPerMsg: solBinary.Uint128{Lo: 300000000, Hi: 0, Endianness: nil}, - }, - OffRampParams: ccipChangesetSolana.OffRampParams{ - EnableExecutionAfter: int64(globals.PermissionLessExecutionThreshold.Seconds()), + e, err = commonchangeset.ApplyChangesetsV2(t, e, []commonchangeset.ConfiguredChangeSet{ + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.DeployChainContractsChangeset), + ccipChangesetSolana.DeployChainContractsConfig{ + HomeChainSelector: homeChainSel, + ContractParamsPerChain: map[uint64]ccipChangesetSolana.ChainContractParams{ + solChainSelectors[0]: { + FeeQuoterParams: ccipChangesetSolana.FeeQuoterParams{ + DefaultMaxFeeJuelsPerMsg: solBinary.Uint128{Lo: 300000000, Hi: 0, Endianness: nil}, + }, + OffRampParams: ccipChangesetSolana.OffRampParams{ + EnableExecutionAfter: int64(globals.PermissionLessExecutionThreshold.Seconds()), + }, }, }, + NewUpgradeAuthority: &upgradeAuthority, }, - NewUpgradeAuthority: &upgradeAuthority, - }, - ), - commonchangeset.Configure( - deployment.CreateLegacyChangeSet(ccipChangesetSolana.BuildSolanaChangeset), - ccipChangesetSolana.BuildSolanaConfig{ - ChainSelector: solChainSelectors[0], - GitCommitSha: "0863d8fed5fbada9f352f33c405e1753cbb7d72c", - DestinationDir: e.SolChains[solChainSelectors[0]].ProgramsPath, - CleanDestinationDir: true, - CleanGitDir: true, - UpgradeKeys: map[deployment.ContractType]string{ - cs.Router: state.SolChains[solChainSelectors[0]].Router.String(), - cs.FeeQuoter: state.SolChains[solChainSelectors[0]].FeeQuoter.String(), + ), + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.BuildSolanaChangeset), + ccipChangesetSolana.BuildSolanaConfig{ + ChainSelector: solChainSelectors[0], + GitCommitSha: "0863d8fed5fbada9f352f33c405e1753cbb7d72c", + DestinationDir: e.SolChains[solChainSelectors[0]].ProgramsPath, + CleanDestinationDir: true, + CleanGitDir: true, + UpgradeKeys: map[deployment.ContractType]string{ + cs.Router: state.SolChains[solChainSelectors[0]].Router.String(), + cs.FeeQuoter: state.SolChains[solChainSelectors[0]].FeeQuoter.String(), + }, }, - }, - ), - commonchangeset.Configure( - deployment.CreateLegacyChangeSet(ccipChangesetSolana.DeployChainContractsChangeset), - ccipChangesetSolana.DeployChainContractsConfig{ - HomeChainSelector: homeChainSel, - ContractParamsPerChain: map[uint64]ccipChangesetSolana.ChainContractParams{ - solChainSelectors[0]: { - FeeQuoterParams: ccipChangesetSolana.FeeQuoterParams{ - DefaultMaxFeeJuelsPerMsg: solBinary.Uint128{Lo: 300000000, Hi: 0, Endianness: nil}, - }, - OffRampParams: ccipChangesetSolana.OffRampParams{ - EnableExecutionAfter: int64(globals.PermissionLessExecutionThreshold.Seconds()), + ), + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.DeployChainContractsChangeset), + ccipChangesetSolana.DeployChainContractsConfig{ + HomeChainSelector: homeChainSel, + ContractParamsPerChain: map[uint64]ccipChangesetSolana.ChainContractParams{ + solChainSelectors[0]: { + FeeQuoterParams: ccipChangesetSolana.FeeQuoterParams{ + DefaultMaxFeeJuelsPerMsg: solBinary.Uint128{Lo: 300000000, Hi: 0, Endianness: nil}, + }, + OffRampParams: ccipChangesetSolana.OffRampParams{ + EnableExecutionAfter: int64(globals.PermissionLessExecutionThreshold.Seconds()), + }, }, }, - }, - UpgradeConfig: ccipChangesetSolana.UpgradeConfig{ - NewFeeQuoterVersion: &deployment.Version1_1_0, - NewRouterVersion: &deployment.Version1_1_0, - UpgradeAuthority: upgradeAuthority, - SpillAddress: upgradeAuthority, - MCMS: &ccipChangeset.MCMSConfig{ - MinDelay: 1 * time.Second, + UpgradeConfig: ccipChangesetSolana.UpgradeConfig{ + NewFeeQuoterVersion: &deployment.Version1_1_0, + NewRouterVersion: &deployment.Version1_1_0, + UpgradeAuthority: upgradeAuthority, + SpillAddress: upgradeAuthority, + MCMS: &ccipChangeset.MCMSConfig{ + MinDelay: 1 * time.Second, + }, }, }, - }, - ), - }) - require.NoError(t, err) - testhelpers.ValidateSolanaState(t, e, solChainSelectors) - state, err = changeset.LoadOnchainStateSolana(e) - require.NoError(t, err) - oldOffRampAddress := state.SolChains[solChainSelectors[0]].OffRamp - // add a second offramp address - e, err = commonchangeset.ApplyChangesetsV2(t, e, []commonchangeset.ConfiguredChangeSet{ - commonchangeset.Configure( - deployment.CreateLegacyChangeSet(ccipChangesetSolana.DeployChainContractsChangeset), - ccipChangesetSolana.DeployChainContractsConfig{ - HomeChainSelector: homeChainSel, - ContractParamsPerChain: map[uint64]ccipChangesetSolana.ChainContractParams{ - solChainSelectors[0]: { - FeeQuoterParams: ccipChangesetSolana.FeeQuoterParams{ - DefaultMaxFeeJuelsPerMsg: solBinary.Uint128{Lo: 300000000, Hi: 0, Endianness: nil}, - }, - OffRampParams: ccipChangesetSolana.OffRampParams{ - EnableExecutionAfter: int64(globals.PermissionLessExecutionThreshold.Seconds()), + ), + }) + require.NoError(t, err) + testhelpers.ValidateSolanaState(t, e, solChainSelectors) + state, err = changeset.LoadOnchainStateSolana(e) + require.NoError(t, err) + oldOffRampAddress := state.SolChains[solChainSelectors[0]].OffRamp + // add a second offramp address + e, err = commonchangeset.ApplyChangesetsV2(t, e, []commonchangeset.ConfiguredChangeSet{ + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.DeployChainContractsChangeset), + ccipChangesetSolana.DeployChainContractsConfig{ + HomeChainSelector: homeChainSel, + ContractParamsPerChain: map[uint64]ccipChangesetSolana.ChainContractParams{ + solChainSelectors[0]: { + FeeQuoterParams: ccipChangesetSolana.FeeQuoterParams{ + DefaultMaxFeeJuelsPerMsg: solBinary.Uint128{Lo: 300000000, Hi: 0, Endianness: nil}, + }, + OffRampParams: ccipChangesetSolana.OffRampParams{ + EnableExecutionAfter: int64(globals.PermissionLessExecutionThreshold.Seconds()), + }, }, }, - }, - UpgradeConfig: ccipChangesetSolana.UpgradeConfig{ - NewOffRampVersion: &deployment.Version1_1_0, - UpgradeAuthority: upgradeAuthority, - SpillAddress: upgradeAuthority, - MCMS: &ccipChangeset.MCMSConfig{ - MinDelay: 1 * time.Second, + UpgradeConfig: ccipChangesetSolana.UpgradeConfig{ + NewOffRampVersion: &deployment.Version1_1_0, + UpgradeAuthority: upgradeAuthority, + SpillAddress: upgradeAuthority, + MCMS: &ccipChangeset.MCMSConfig{ + MinDelay: 1 * time.Second, + }, }, }, - }, - ), - }) - require.NoError(t, err) - // verify the offramp address is different - state, err = changeset.LoadOnchainStateSolana(e) - require.NoError(t, err) - newOffRampAddress := state.SolChains[solChainSelectors[0]].OffRamp - require.NotEqual(t, oldOffRampAddress, newOffRampAddress) + ), + }) + require.NoError(t, err) + // verify the offramp address is different + state, err = changeset.LoadOnchainStateSolana(e) + require.NoError(t, err) + newOffRampAddress := state.SolChains[solChainSelectors[0]].OffRamp + require.NotEqual(t, oldOffRampAddress, newOffRampAddress) - // Verify router and fee quoter upgraded in place - // and offramp had 2nd address added - addresses, err := e.ExistingAddresses.AddressesForChain(solChainSelectors[0]) - require.NoError(t, err) - numRouters := 0 - numFeeQuoters := 0 - numOffRamps := 0 - for _, address := range addresses { - if address.Type == ccipChangeset.Router { - numRouters++ - } - if address.Type == ccipChangeset.FeeQuoter { - numFeeQuoters++ - } - if address.Type == ccipChangeset.OffRamp { - numOffRamps++ + // Verify router and fee quoter upgraded in place + // and offramp had 2nd address added + addresses, err := e.ExistingAddresses.AddressesForChain(solChainSelectors[0]) + require.NoError(t, err) + numRouters := 0 + numFeeQuoters := 0 + numOffRamps := 0 + for _, address := range addresses { + if address.Type == ccipChangeset.Router { + numRouters++ + } + if address.Type == ccipChangeset.FeeQuoter { + numFeeQuoters++ + } + if address.Type == ccipChangeset.OffRamp { + numOffRamps++ + } } + require.Equal(t, 1, numRouters) + require.Equal(t, 1, numFeeQuoters) + require.Equal(t, 2, numOffRamps) + require.NoError(t, err) + // solana verification + testhelpers.ValidateSolanaState(t, e, solChainSelectors) } - require.Equal(t, 1, numRouters) - require.Equal(t, 1, numFeeQuoters) - require.Equal(t, 2, numOffRamps) - require.NoError(t, err) - // solana verification - testhelpers.ValidateSolanaState(t, e, solChainSelectors) } From e4d6134a5e34d9180c57b9207d0c715232038aae Mon Sep 17 00:00:00 2001 From: karen-stepanyan <91897037+karen-stepanyan@users.noreply.github.com> Date: Thu, 27 Feb 2025 14:22:40 +0400 Subject: [PATCH 14/17] Added data feeds deployment changesets (#16323) * add data feeds deployment changesets * fix lint issues pt1 * fix lint issues pt2 * fix major lint issues * fix goimports * replace if/else with switch in state * replace mcms with mcmsv2 * create BuildMCMProposal func * add acceptOwnership changeset * lint * add tests for mcms, accept_ownership changeset * fix lint issues * use changesetv2 * add test cases for mcms * fix lint issues * fix cache_deploy test * add mcms support to confirm/propose aggregator changesets. buildproposal batch * fix lint issues * remove custom wrapper for legacy changeset * add more complex changesets * fix lint * minor changes * fix lint * fix typo * update buildproposal env --- .../data-feeds/changeset/accept_ownership.go | 56 ++++++ .../changeset/accept_ownership_test.go | 65 +++++++ .../changeset/confirm_aggregator.go | 69 ++++++++ .../changeset/confirm_aggregator_test.go | 118 +++++++++++++ deployment/data-feeds/changeset/deploy.go | 78 +++++++++ .../changeset/deploy_aggregator_proxy.go | 70 ++++++++ .../changeset/deploy_aggregator_proxy_test.go | 52 ++++++ .../data-feeds/changeset/deploy_cache.go | 44 +++++ .../data-feeds/changeset/deploy_cache_test.go | 43 +++++ .../changeset/import_to_addressbook.go | 58 +++++++ .../changeset/import_to_addressbook_test.go | 48 ++++++ .../data-feeds/changeset/migrate_feeds.go | 101 +++++++++++ .../changeset/migrate_feeds_test.go | 79 +++++++++ .../changeset/new_feed_with_proxy.go | 144 ++++++++++++++++ .../changeset/new_feed_with_proxy_test.go | 111 ++++++++++++ deployment/data-feeds/changeset/proposal.go | 64 +++++++ .../changeset/propose_aggregator.go | 68 ++++++++ .../changeset/propose_aggregator_test.go | 101 +++++++++++ .../changeset/remove_dataid_proxy_mapping.go | 69 ++++++++ .../remove_dataid_proxy_mapping_test.go | 144 ++++++++++++++++ .../data-feeds/changeset/remove_feed.go | 92 ++++++++++ .../changeset/remove_feed_config.go | 64 +++++++ .../changeset/remove_feed_config_test.go | 160 +++++++++++++++++ .../data-feeds/changeset/remove_feed_test.go | 162 ++++++++++++++++++ .../data-feeds/changeset/set_feed_admin.go | 65 +++++++ .../changeset/set_feed_admin_test.go | 98 +++++++++++ .../data-feeds/changeset/set_feed_config.go | 73 ++++++++ .../changeset/set_feed_config_test.go | 138 +++++++++++++++ deployment/data-feeds/changeset/state.go | 143 ++++++++++++++++ .../changeset/testdata/import_addresses.json | 18 ++ .../changeset/testdata/migrate_feeds.json | 20 +++ .../data-feeds/changeset/types/types.go | 133 ++++++++++++++ .../changeset/update_data_id_proxy.go | 73 ++++++++ .../changeset/update_data_id_proxy_test.go | 122 +++++++++++++ deployment/data-feeds/changeset/validation.go | 45 +++++ deployment/data-feeds/changeset/view.go | 26 +++ deployment/data-feeds/shared/utils.go | 50 ++++++ .../data-feeds/view/v1_0/cache_contract.go | 28 +++ .../data-feeds/view/v1_0/proxy_contract.go | 48 ++++++ deployment/data-feeds/view/view.go | 31 ++++ 40 files changed, 3171 insertions(+) create mode 100644 deployment/data-feeds/changeset/accept_ownership.go create mode 100644 deployment/data-feeds/changeset/accept_ownership_test.go create mode 100644 deployment/data-feeds/changeset/confirm_aggregator.go create mode 100644 deployment/data-feeds/changeset/confirm_aggregator_test.go create mode 100644 deployment/data-feeds/changeset/deploy.go create mode 100644 deployment/data-feeds/changeset/deploy_aggregator_proxy.go create mode 100644 deployment/data-feeds/changeset/deploy_aggregator_proxy_test.go create mode 100644 deployment/data-feeds/changeset/deploy_cache.go create mode 100644 deployment/data-feeds/changeset/deploy_cache_test.go create mode 100644 deployment/data-feeds/changeset/import_to_addressbook.go create mode 100644 deployment/data-feeds/changeset/import_to_addressbook_test.go create mode 100644 deployment/data-feeds/changeset/migrate_feeds.go create mode 100644 deployment/data-feeds/changeset/migrate_feeds_test.go create mode 100644 deployment/data-feeds/changeset/new_feed_with_proxy.go create mode 100644 deployment/data-feeds/changeset/new_feed_with_proxy_test.go create mode 100644 deployment/data-feeds/changeset/proposal.go create mode 100644 deployment/data-feeds/changeset/propose_aggregator.go create mode 100644 deployment/data-feeds/changeset/propose_aggregator_test.go create mode 100644 deployment/data-feeds/changeset/remove_dataid_proxy_mapping.go create mode 100644 deployment/data-feeds/changeset/remove_dataid_proxy_mapping_test.go create mode 100644 deployment/data-feeds/changeset/remove_feed.go create mode 100644 deployment/data-feeds/changeset/remove_feed_config.go create mode 100644 deployment/data-feeds/changeset/remove_feed_config_test.go create mode 100644 deployment/data-feeds/changeset/remove_feed_test.go create mode 100644 deployment/data-feeds/changeset/set_feed_admin.go create mode 100644 deployment/data-feeds/changeset/set_feed_admin_test.go create mode 100644 deployment/data-feeds/changeset/set_feed_config.go create mode 100644 deployment/data-feeds/changeset/set_feed_config_test.go create mode 100644 deployment/data-feeds/changeset/state.go create mode 100644 deployment/data-feeds/changeset/testdata/import_addresses.json create mode 100644 deployment/data-feeds/changeset/testdata/migrate_feeds.json create mode 100644 deployment/data-feeds/changeset/types/types.go create mode 100644 deployment/data-feeds/changeset/update_data_id_proxy.go create mode 100644 deployment/data-feeds/changeset/update_data_id_proxy_test.go create mode 100644 deployment/data-feeds/changeset/validation.go create mode 100644 deployment/data-feeds/changeset/view.go create mode 100644 deployment/data-feeds/shared/utils.go create mode 100644 deployment/data-feeds/view/v1_0/cache_contract.go create mode 100644 deployment/data-feeds/view/v1_0/proxy_contract.go create mode 100644 deployment/data-feeds/view/view.go diff --git a/deployment/data-feeds/changeset/accept_ownership.go b/deployment/data-feeds/changeset/accept_ownership.go new file mode 100644 index 00000000000..726d782404a --- /dev/null +++ b/deployment/data-feeds/changeset/accept_ownership.go @@ -0,0 +1,56 @@ +package changeset + +import ( + "errors" + "fmt" + + mcmslib "github.com/smartcontractkit/mcms" + + "github.com/smartcontractkit/chainlink/deployment" + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" +) + +// AcceptOwnershipChangeset is a changeset that will create an MCM proposal to accept the ownership of a contract. +// Returns an MSM proposal to accept the ownership of a contract. Doesn't return a new addressbook. +// Once proposal is executed, new owned contract can be imported into the addressbook. +var AcceptOwnershipChangeset = deployment.CreateChangeSet(acceptOwnershipLogic, acceptOwnershipPrecondition) + +func acceptOwnershipLogic(env deployment.Environment, c types.AcceptOwnershipConfig) (deployment.ChangesetOutput, error) { + chain := env.Chains[c.ChainSelector] + + _, contract, err := commonChangesets.LoadOwnableContract(c.ContractAddress, chain.Client) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to load the contract %w", err) + } + + tx, err := contract.AcceptOwnership(deployment.SimTransactOpts()) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to create accept transfer ownership tx %w", err) + } + + proposal, err := BuildMCMProposals(env, "accept ownership to timelock", c.ChainSelector, []ProposalData{ + { + contract: c.ContractAddress.Hex(), + tx: tx, + }, + }, c.McmsConfig.MinDelay) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + + return deployment.ChangesetOutput{MCMSTimelockProposals: []mcmslib.TimelockProposal{*proposal}}, nil +} + +func acceptOwnershipPrecondition(env deployment.Environment, c types.AcceptOwnershipConfig) error { + _, ok := env.Chains[c.ChainSelector] + if !ok { + return fmt.Errorf("chain not found in env %d", c.ChainSelector) + } + + if c.McmsConfig == nil { + return errors.New("mcms config is required") + } + + return ValidateMCMSAddresses(env.ExistingAddresses, c.ChainSelector) +} diff --git a/deployment/data-feeds/changeset/accept_ownership_test.go b/deployment/data-feeds/changeset/accept_ownership_test.go new file mode 100644 index 00000000000..60ac25d9d24 --- /dev/null +++ b/deployment/data-feeds/changeset/accept_ownership_test.go @@ -0,0 +1,65 @@ +package changeset + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + + "github.com/smartcontractkit/chainlink/deployment" + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestAcceptOwnership(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + chain := env.Chains[chainSelector] + + newEnv, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.DeployMCMSWithTimelockV2), + map[uint64]commonTypes.MCMSWithTimelockConfigV2{ + chainSelector: proposalutils.SingleGroupTimelockConfigV2(t), + }, + ), + ) + require.NoError(t, err) + + timeLockAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "RBACTimelock") + require.NoError(t, err) + + cache, _ := DeployCache(chain, []string{}) + tx, _ := cache.Contract.TransferOwnership(chain.DeployerKey, common.HexToAddress(timeLockAddress)) + _, err = chain.Confirm(tx) + require.NoError(t, err) + + _, err = commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + AcceptOwnershipChangeset, + types.AcceptOwnershipConfig{ + ChainSelector: chainSelector, + ContractAddress: cache.Contract.Address(), + McmsConfig: &types.MCMSConfig{ + MinDelay: 1, + }, + }, + ), + ) + require.NoError(t, err) +} diff --git a/deployment/data-feeds/changeset/confirm_aggregator.go b/deployment/data-feeds/changeset/confirm_aggregator.go new file mode 100644 index 00000000000..e4cc4cb919a --- /dev/null +++ b/deployment/data-feeds/changeset/confirm_aggregator.go @@ -0,0 +1,69 @@ +package changeset + +import ( + "fmt" + + mcmslib "github.com/smartcontractkit/mcms" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + proxy "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/aggregator_proxy" +) + +// ConfirmAggregatorChangeset is a changeset that confirms a proposed aggregator on deployed AggregatorProxy contract +// This changeset may return a timelock proposal if the MCMS config is provided, otherwise it will execute the transaction with the deployer key. +var ConfirmAggregatorChangeset = deployment.CreateChangeSet(confirmAggregatorLogic, confirmAggregatorPrecondition) + +func confirmAggregatorLogic(env deployment.Environment, c types.ProposeConfirmAggregatorConfig) (deployment.ChangesetOutput, error) { + chain := env.Chains[c.ChainSelector] + + aggregatorProxy, err := proxy.NewAggregatorProxy(c.ProxyAddress, chain.Client) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to load AggregatorProxy: %w", err) + } + + txOpt := chain.DeployerKey + if c.McmsConfig != nil { + txOpt = deployment.SimTransactOpts() + } + + tx, err := aggregatorProxy.ConfirmAggregator(txOpt, c.NewAggregatorAddress) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to execute ConfirmAggregator: %w", err) + } + + if c.McmsConfig != nil { + proposal, err := BuildMCMProposals(env, "proposal to confirm a new aggregator", c.ChainSelector, []ProposalData{ + { + contract: aggregatorProxy.Address().Hex(), + tx: tx, + }, + }, c.McmsConfig.MinDelay) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + return deployment.ChangesetOutput{MCMSTimelockProposals: []mcmslib.TimelockProposal{*proposal}}, nil + } + + _, err = chain.Confirm(tx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", tx.Hash().String(), err) + } + + return deployment.ChangesetOutput{}, nil +} + +func confirmAggregatorPrecondition(env deployment.Environment, c types.ProposeConfirmAggregatorConfig) error { + _, ok := env.Chains[c.ChainSelector] + if !ok { + return fmt.Errorf("chain not found in env %d", c.ChainSelector) + } + + if c.McmsConfig != nil { + if err := ValidateMCMSAddresses(env.ExistingAddresses, c.ChainSelector); err != nil { + return err + } + } + + return nil +} diff --git a/deployment/data-feeds/changeset/confirm_aggregator_test.go b/deployment/data-feeds/changeset/confirm_aggregator_test.go new file mode 100644 index 00000000000..2dcace3dbf3 --- /dev/null +++ b/deployment/data-feeds/changeset/confirm_aggregator_test.go @@ -0,0 +1,118 @@ +package changeset_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestConfirmAggregator(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + // without MCMS + newEnv, err := commonChangesets.Apply(t, env, nil, + // Deploy cache and aggregator proxy + commonChangesets.Configure( + changeset.DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, + }, + ), + commonChangesets.Configure( + changeset.DeployAggregatorProxyChangeset, + types.DeployAggregatorProxyConfig{ + ChainsToDeploy: []uint64{chainSelector}, + AccessController: []common.Address{common.HexToAddress("0x")}, + }, + ), + ) + require.NoError(t, err) + + proxyAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "AggregatorProxy") + require.NoError(t, err) + + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // Propose and confirm new Aggregator + commonChangesets.Configure( + changeset.ProposeAggregatorChangeset, + types.ProposeConfirmAggregatorConfig{ + ChainSelector: chainSelector, + ProxyAddress: common.HexToAddress(proxyAddress), + NewAggregatorAddress: common.HexToAddress("0x123"), + }, + ), + commonChangesets.Configure( + changeset.ConfirmAggregatorChangeset, + types.ProposeConfirmAggregatorConfig{ + ChainSelector: chainSelector, + ProxyAddress: common.HexToAddress(proxyAddress), + NewAggregatorAddress: common.HexToAddress("0x123"), + }, + ), + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.DeployMCMSWithTimelockV2), + map[uint64]commonTypes.MCMSWithTimelockConfigV2{ + chainSelector: proposalutils.SingleGroupTimelockConfigV2(t), + }, + ), + ) + require.NoError(t, err) + + // with MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // propose new Aggregator + commonChangesets.Configure( + changeset.ProposeAggregatorChangeset, + types.ProposeConfirmAggregatorConfig{ + ChainSelector: chainSelector, + ProxyAddress: common.HexToAddress(proxyAddress), + NewAggregatorAddress: common.HexToAddress("0x124"), + }, + ), + // transfer proxy ownership to timelock + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.TransferToMCMSWithTimelockV2), + commonChangesets.TransferToMCMSWithTimelockConfig{ + ContractsByChain: map[uint64][]common.Address{ + chainSelector: {common.HexToAddress(proxyAddress)}, + }, + MinDelay: 0, + }, + ), + // confirm from timelock + commonChangesets.Configure( + changeset.ConfirmAggregatorChangeset, + types.ProposeConfirmAggregatorConfig{ + ChainSelector: chainSelector, + ProxyAddress: common.HexToAddress(proxyAddress), + NewAggregatorAddress: common.HexToAddress("0x124"), + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + ) + require.NoError(t, err) +} diff --git a/deployment/data-feeds/changeset/deploy.go b/deployment/data-feeds/changeset/deploy.go new file mode 100644 index 00000000000..b30f3acd555 --- /dev/null +++ b/deployment/data-feeds/changeset/deploy.go @@ -0,0 +1,78 @@ +package changeset + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + proxy "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/aggregator_proxy" + cache "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/data_feeds_cache" +) + +func DeployCache(chain deployment.Chain, labels []string) (*types.DeployCacheResponse, error) { + cacheAddr, tx, cacheContract, err := cache.DeployDataFeedsCache(chain.DeployerKey, chain.Client) + if err != nil { + return nil, fmt.Errorf("failed to deploy DataFeedsCache: %w", err) + } + + _, err = chain.Confirm(tx) + if err != nil { + return nil, fmt.Errorf("failed to confirm DataFeedsCache: %w", err) + } + + tvStr, err := cacheContract.TypeAndVersion(&bind.CallOpts{}) + if err != nil { + return nil, fmt.Errorf("failed to get type and version: %w", err) + } + + tv, err := deployment.TypeAndVersionFromString(tvStr) + if err != nil { + return nil, fmt.Errorf("failed to parse type and version from %s: %w", tvStr, err) + } + + for _, label := range labels { + tv.Labels.Add(label) + } + + resp := &types.DeployCacheResponse{ + Address: cacheAddr, + Tx: tx.Hash(), + Tv: tv, + Contract: cacheContract, + } + return resp, nil +} + +func DeployAggregatorProxy(chain deployment.Chain, aggregator common.Address, accessController common.Address, labels []string) (*types.DeployProxyResponse, error) { + proxyAddr, tx, proxyContract, err := proxy.DeployAggregatorProxy(chain.DeployerKey, chain.Client, aggregator, accessController) + if err != nil { + return nil, fmt.Errorf("failed to deploy AggregatorProxy: %w", err) + } + + _, err = chain.Confirm(tx) + if err != nil { + return nil, fmt.Errorf("failed to confirm AggregatorProxy: %w", err) + } + + // AggregatorProxy contract doesn't implement typeAndVersion interface, so we have to set it manually + tvStr := "AggregatorProxy 1.0.0" + tv, err := deployment.TypeAndVersionFromString(tvStr) + if err != nil { + return nil, fmt.Errorf("failed to parse type and version from %s: %w", tvStr, err) + } + + for _, label := range labels { + tv.Labels.Add(label) + } + + resp := &types.DeployProxyResponse{ + Address: proxyAddr, + Tx: tx.Hash(), + Tv: tv, + Contract: proxyContract, + } + return resp, nil +} diff --git a/deployment/data-feeds/changeset/deploy_aggregator_proxy.go b/deployment/data-feeds/changeset/deploy_aggregator_proxy.go new file mode 100644 index 00000000000..7b555d63d9e --- /dev/null +++ b/deployment/data-feeds/changeset/deploy_aggregator_proxy.go @@ -0,0 +1,70 @@ +package changeset + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" +) + +// DeployAggregatorProxyChangeset deploys an AggregatorProxy contract on the given chains. It uses the address of DataFeedsCache contract +// from addressbook to set it in the AggregatorProxy constructor. Returns a new addressbook with deploy AggregatorProxy contract addresses. +var DeployAggregatorProxyChangeset = deployment.CreateChangeSet(deployAggregatorProxyLogic, deployAggregatorProxyPrecondition) + +func deployAggregatorProxyLogic(env deployment.Environment, c types.DeployAggregatorProxyConfig) (deployment.ChangesetOutput, error) { + lggr := env.Logger + ab := deployment.NewMemoryAddressBook() + + for index, chainSelector := range c.ChainsToDeploy { + chain := env.Chains[chainSelector] + addressMap, _ := env.ExistingAddresses.AddressesForChain(chainSelector) + + var dataFeedsCacheAddress string + cacheTV := deployment.NewTypeAndVersion(DataFeedsCache, deployment.Version1_0_0) + cacheTV.Labels.Add("data-feeds") + for addr, tv := range addressMap { + if tv.String() == cacheTV.String() { + dataFeedsCacheAddress = addr + } + } + + if dataFeedsCacheAddress == "" { + return deployment.ChangesetOutput{}, fmt.Errorf("DataFeedsCache contract address not found in addressbook for chain %d", chainSelector) + } + + proxyResponse, err := DeployAggregatorProxy(chain, common.HexToAddress(dataFeedsCacheAddress), c.AccessController[index], c.Labels) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to deploy AggregatorProxy: %w", err) + } + + lggr.Infof("Deployed %s chain selector %d addr %s", proxyResponse.Tv.String(), chain.Selector, proxyResponse.Address.String()) + + err = ab.Save(chain.Selector, proxyResponse.Address.String(), proxyResponse.Tv) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to save AggregatorProxy: %w", err) + } + } + return deployment.ChangesetOutput{AddressBook: ab}, nil +} + +func deployAggregatorProxyPrecondition(env deployment.Environment, c types.DeployAggregatorProxyConfig) error { + if len(c.AccessController) != len(c.ChainsToDeploy) { + return errors.New("AccessController addresses must be provided for each chain to deploy") + } + + for _, chainSelector := range c.ChainsToDeploy { + _, ok := env.Chains[chainSelector] + if !ok { + return errors.New("chain not found in environment") + } + _, err := env.ExistingAddresses.AddressesForChain(chainSelector) + if err != nil { + return fmt.Errorf("failed to get addessbook for chain %d: %w", chainSelector, err) + } + } + + return nil +} diff --git a/deployment/data-feeds/changeset/deploy_aggregator_proxy_test.go b/deployment/data-feeds/changeset/deploy_aggregator_proxy_test.go new file mode 100644 index 00000000000..23c062e1de2 --- /dev/null +++ b/deployment/data-feeds/changeset/deploy_aggregator_proxy_test.go @@ -0,0 +1,52 @@ +package changeset + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestAggregatorProxy(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 2, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + resp, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, + }, + ), + commonChangesets.Configure( + DeployAggregatorProxyChangeset, + types.DeployAggregatorProxyConfig{ + ChainsToDeploy: []uint64{chainSelector}, + AccessController: []common.Address{common.HexToAddress("0x")}, + }, + ), + ) + + require.NoError(t, err) + require.NotNil(t, resp) + + addrs, err := resp.ExistingAddresses.AddressesForChain(chainSelector) + require.NoError(t, err) + require.Len(t, addrs, 2) // AggregatorProxy and DataFeedsCache +} diff --git a/deployment/data-feeds/changeset/deploy_cache.go b/deployment/data-feeds/changeset/deploy_cache.go new file mode 100644 index 00000000000..38bc5619f5d --- /dev/null +++ b/deployment/data-feeds/changeset/deploy_cache.go @@ -0,0 +1,44 @@ +package changeset + +import ( + "errors" + "fmt" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" +) + +// DeployCacheChangeset deploys the DataFeedsCache contract to the specified chains +// Returns a new addressbook with deployed DataFeedsCache contracts +var DeployCacheChangeset = deployment.CreateChangeSet(deployCacheLogic, deployCachePrecondition) + +func deployCacheLogic(env deployment.Environment, c types.DeployConfig) (deployment.ChangesetOutput, error) { + lggr := env.Logger + ab := deployment.NewMemoryAddressBook() + for _, chainSelector := range c.ChainsToDeploy { + chain := env.Chains[chainSelector] + cacheResponse, err := DeployCache(chain, c.Labels) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to deploy DataFeedsCache: %w", err) + } + lggr.Infof("Deployed %s chain selector %d addr %s", cacheResponse.Tv.String(), chain.Selector, cacheResponse.Address.String()) + + err = ab.Save(chain.Selector, cacheResponse.Address.String(), cacheResponse.Tv) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to save DataFeedsCache: %w", err) + } + } + + return deployment.ChangesetOutput{AddressBook: ab}, nil +} + +func deployCachePrecondition(env deployment.Environment, c types.DeployConfig) error { + for _, chainSelector := range c.ChainsToDeploy { + _, ok := env.Chains[chainSelector] + if !ok { + return errors.New("chain not found in environment") + } + } + + return nil +} diff --git a/deployment/data-feeds/changeset/deploy_cache_test.go b/deployment/data-feeds/changeset/deploy_cache_test.go new file mode 100644 index 00000000000..83c0442973e --- /dev/null +++ b/deployment/data-feeds/changeset/deploy_cache_test.go @@ -0,0 +1,43 @@ +package changeset_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestDeployCache(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 2, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + resp, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + changeset.DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + }, + ), + ) + require.NoError(t, err) + require.NotNil(t, resp) + + addrs, err := resp.ExistingAddresses.AddressesForChain(chainSelector) + require.NoError(t, err) + require.Len(t, addrs, 1) +} diff --git a/deployment/data-feeds/changeset/import_to_addressbook.go b/deployment/data-feeds/changeset/import_to_addressbook.go new file mode 100644 index 00000000000..08628568ee9 --- /dev/null +++ b/deployment/data-feeds/changeset/import_to_addressbook.go @@ -0,0 +1,58 @@ +package changeset + +import ( + "errors" + "fmt" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/shared" +) + +// ImportToAddressbookChangeset is a changeset that reads already deployed contract addresses from input file +// and saves them to the address book. Returns a new addressbook with the imported addresses. +var ImportToAddressbookChangeset = deployment.CreateChangeSet(importToAddressbookLogic, importToAddressbookPrecondition) + +type AddressesSchema struct { + Address string `json:"address"` + TypeAndVersion deployment.TypeAndVersion `json:"typeAndVersion"` + Label string `json:"label"` +} + +func importToAddressbookLogic(env deployment.Environment, c types.ImportToAddressbookConfig) (deployment.ChangesetOutput, error) { + ab := deployment.NewMemoryAddressBook() + + addresses, _ := shared.LoadJSON[[]*AddressesSchema](c.InputFileName, c.InputFS) + + for _, address := range addresses { + address.TypeAndVersion.AddLabel(address.Label) + err := ab.Save( + c.ChainSelector, + address.Address, + address.TypeAndVersion, + ) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to save address %s: %w", address.Address, err) + } + } + + return deployment.ChangesetOutput{AddressBook: ab}, nil +} + +func importToAddressbookPrecondition(env deployment.Environment, c types.ImportToAddressbookConfig) error { + _, ok := env.Chains[c.ChainSelector] + if !ok { + return fmt.Errorf("chain not found in env %d", c.ChainSelector) + } + + if c.InputFileName == "" { + return errors.New("input file name is required") + } + + _, err := shared.LoadJSON[[]*AddressesSchema](c.InputFileName, c.InputFS) + if err != nil { + return fmt.Errorf("failed to load addresses input file: %w", err) + } + + return nil +} diff --git a/deployment/data-feeds/changeset/import_to_addressbook_test.go b/deployment/data-feeds/changeset/import_to_addressbook_test.go new file mode 100644 index 00000000000..6df865f7c31 --- /dev/null +++ b/deployment/data-feeds/changeset/import_to_addressbook_test.go @@ -0,0 +1,48 @@ +package changeset_test + +import ( + "embed" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +//go:embed testdata/* +var testFS embed.FS + +func TestImportToAddressbook(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + resp, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + changeset.ImportToAddressbookChangeset, + types.ImportToAddressbookConfig{ + ChainSelector: chainSelector, + InputFileName: "testdata/import_addresses.json", + InputFS: testFS, + }, + ), + ) + + require.NoError(t, err) + require.NotNil(t, resp) + tv, _ := resp.ExistingAddresses.AddressesForChain(chainSelector) + require.Len(t, tv, 2) +} diff --git a/deployment/data-feeds/changeset/migrate_feeds.go b/deployment/data-feeds/changeset/migrate_feeds.go new file mode 100644 index 00000000000..922c29df846 --- /dev/null +++ b/deployment/data-feeds/changeset/migrate_feeds.go @@ -0,0 +1,101 @@ +package changeset + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/shared" +) + +// MigrateFeedsChangeset Migrates feeds to DataFeedsCache contract. +// 1. It reads the existing Aggregator Proxy contract addresses from the input file and saves them to the address book. +// 2. It reads the data ids and descriptions from the input file and sets the feed config on the DataFeedsCache contract. +// Returns a new addressbook with the deployed AggregatorProxy addresses. +var MigrateFeedsChangeset = deployment.CreateChangeSet(migrateFeedsLogic, migrateFeedsPrecondition) + +type MigrationSchema struct { + Address string `json:"address"` + TypeAndVersion deployment.TypeAndVersion `json:"typeAndVersion"` + FeedID string `json:"feedId"` // without 0x prefix + Description string `json:"description"` +} + +func migrateFeedsLogic(env deployment.Environment, c types.MigrationConfig) (deployment.ChangesetOutput, error) { + state, _ := LoadOnchainState(env) + chain := env.Chains[c.ChainSelector] + chainState := state.Chains[c.ChainSelector] + contract := chainState.DataFeedsCache[c.CacheAddress] + ab := deployment.NewMemoryAddressBook() + + proxies, _ := shared.LoadJSON[[]*MigrationSchema](c.InputFileName, c.InputFS) + + dataIDs := make([][16]byte, len(proxies)) + addresses := make([]common.Address, len(proxies)) + descriptions := make([]string, len(proxies)) + for i, proxy := range proxies { + dataIDBytes16, err := shared.ConvertHexToBytes16(proxy.FeedID) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("cannot convert hex to bytes %s: %w", proxy.FeedID, err) + } + + dataIDs[i] = dataIDBytes16 + addresses[i] = common.HexToAddress(proxy.Address) + descriptions[i] = proxy.Description + + proxy.TypeAndVersion.AddLabel(proxy.Description) + err = ab.Save( + c.ChainSelector, + proxy.Address, + proxy.TypeAndVersion, + ) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to save address %s: %w", proxy.Address, err) + } + } + + // Set the feed config + tx, err := contract.SetDecimalFeedConfigs(chain.DeployerKey, dataIDs, descriptions, c.WorkflowMetadata) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to set feed config %w", err) + } + + _, err = chain.Confirm(tx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", tx.Hash().String(), err) + } + + // Set the proxy to dataId mapping + tx, err = contract.UpdateDataIdMappingsForProxies(chain.DeployerKey, addresses, dataIDs) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to update feed proxy mapping %w", err) + } + + _, err = chain.Confirm(tx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", tx.Hash().String(), err) + } + + return deployment.ChangesetOutput{AddressBook: ab}, nil +} + +func migrateFeedsPrecondition(env deployment.Environment, c types.MigrationConfig) error { + _, ok := env.Chains[c.ChainSelector] + if !ok { + return fmt.Errorf("chain not found in env %d", c.ChainSelector) + } + + _, err := shared.LoadJSON[[]*MigrationSchema](c.InputFileName, c.InputFS) + if err != nil { + return fmt.Errorf("failed to load addresses input file: %w", err) + } + + if len(c.WorkflowMetadata) == 0 { + return errors.New("workflow metadata is required") + } + + return ValidateCacheForChain(env, c.ChainSelector, c.CacheAddress) +} diff --git a/deployment/data-feeds/changeset/migrate_feeds_test.go b/deployment/data-feeds/changeset/migrate_feeds_test.go new file mode 100644 index 00000000000..47f772a348d --- /dev/null +++ b/deployment/data-feeds/changeset/migrate_feeds_test.go @@ -0,0 +1,79 @@ +package changeset_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/shared" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" + cache "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/data_feeds_cache" +) + +func TestMigrateFeeds(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + newEnv, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + changeset.DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, + }, + ), + ) + require.NoError(t, err) + + cacheAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "DataFeedsCache") + require.NoError(t, err) + + resp, err := commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(env.Chains[chainSelector].DeployerKey.From.Hex()), + IsAdmin: true, + }, + ), + commonChangesets.Configure( + changeset.MigrateFeedsChangeset, + types.MigrationConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + InputFileName: "testdata/migrate_feeds.json", + InputFS: testFS, + WorkflowMetadata: []cache.DataFeedsCacheWorkflowMetadata{ + cache.DataFeedsCacheWorkflowMetadata{ + AllowedSender: common.HexToAddress("0x22"), + AllowedWorkflowOwner: common.HexToAddress("0x33"), + AllowedWorkflowName: shared.HashedWorkflowName("test"), + }, + }, + }, + ), + ) + require.NoError(t, err) + require.NotNil(t, resp) + addresses, err := resp.ExistingAddresses.AddressesForChain(chainSelector) + require.NoError(t, err) + require.Len(t, addresses, 3) // DataFeedsCache and two migrated proxies +} diff --git a/deployment/data-feeds/changeset/new_feed_with_proxy.go b/deployment/data-feeds/changeset/new_feed_with_proxy.go new file mode 100644 index 00000000000..a56278db53a --- /dev/null +++ b/deployment/data-feeds/changeset/new_feed_with_proxy.go @@ -0,0 +1,144 @@ +package changeset + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + mcmslib "github.com/smartcontractkit/mcms" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/common/changeset" + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" +) + +// NewFeedWithProxyChangeset configures a new feed with a proxy +// 1. Deploys AggregatorProxy contract for given chainselector +// 2. Proposes and confirms DataFeedsCache contract as an aggregator on AggregatorProxy +// 3. Creates an MCMS proposal to transfer the ownership of AggregatorProxy contract to timelock +// 4. Creates a proposal to set a feed config on DataFeedsCache contract +// 5. Creates a proposal to set a feed proxy mapping on DataFeedsCache contract +// Returns a new addressbook with the new AggregatorProxy contract address and 3 MCMS proposals +var NewFeedWithProxyChangeset = deployment.CreateChangeSet(newFeedWithProxyLogic, newFeedWithProxyPrecondition) + +func newFeedWithProxyLogic(env deployment.Environment, c types.NewFeedWithProxyConfig) (deployment.ChangesetOutput, error) { + chain := env.Chains[c.ChainSelector] + state, _ := LoadOnchainState(env) + chainState := state.Chains[c.ChainSelector] + + // Deploy AggregatorProxy contract with deployer key + proxyConfig := types.DeployAggregatorProxyConfig{ + ChainsToDeploy: []uint64{c.ChainSelector}, + AccessController: []common.Address{c.AccessController}, + Labels: c.Labels, + } + newEnv, err := DeployAggregatorProxyChangeset.Apply(env, proxyConfig) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to execute DeployAggregatorProxyChangeset: %w", err) + } + + proxyAddress, err := deployment.SearchAddressBook(newEnv.AddressBook, c.ChainSelector, "AggregatorProxy") + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("AggregatorProxy not present in addressbook: %w", err) + } + + addressMap, _ := env.ExistingAddresses.AddressesForChain(c.ChainSelector) + var dataFeedsCacheAddress string + cacheTV := deployment.NewTypeAndVersion(DataFeedsCache, deployment.Version1_0_0) + cacheTV.Labels.Add("data-feeds") + for addr, tv := range addressMap { + if tv.String() == cacheTV.String() { + dataFeedsCacheAddress = addr + } + } + + dataFeedsCache := chainState.DataFeedsCache[common.HexToAddress(dataFeedsCacheAddress)] + if dataFeedsCache == nil { + return deployment.ChangesetOutput{}, errors.New("DataFeedsCache contract not found in onchain state") + } + + // Propose and confirm DataFeedsCache contract as an aggregator on AggregatorProxy + proposeAggregatorConfig := types.ProposeConfirmAggregatorConfig{ + ChainSelector: c.ChainSelector, + ProxyAddress: common.HexToAddress(proxyAddress), + NewAggregatorAddress: common.HexToAddress(dataFeedsCacheAddress), + } + + _, err = ProposeAggregatorChangeset.Apply(env, proposeAggregatorConfig) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to execute ProposeAggregatorChangeset: %w", err) + } + + _, err = ConfirmAggregatorChangeset.Apply(env, proposeAggregatorConfig) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to execute ConfirmAggregatorChangeset: %w", err) + } + + // Create an MCMS proposal to transfer the ownership of AggregatorProxy contract to timelock and set the feed configs + // We don't use the existing changesets so that we can batch the transactions into a single MCMS proposal + + // transfer proxy ownership + timelockAddr, _ := deployment.SearchAddressBook(env.ExistingAddresses, c.ChainSelector, commonTypes.RBACTimelock) + _, proxyContract, err := changeset.LoadOwnableContract(common.HexToAddress(proxyAddress), chain.Client) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to load proxy contract %w", err) + } + tx, err := proxyContract.TransferOwnership(chain.DeployerKey, common.HexToAddress(timelockAddr)) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to create transfer ownership tx %w", err) + } + _, err = chain.Confirm(tx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", tx.Hash().String(), err) + } + + // accept proxy ownership proposal + acceptProxyOwnerShipTx, err := proxyContract.AcceptOwnership(deployment.SimTransactOpts()) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to create accept transfer ownership tx %w", err) + } + + // set feed config proposal + setFeedConfigTx, err := dataFeedsCache.SetDecimalFeedConfigs(deployment.SimTransactOpts(), [][16]byte{c.DataID}, []string{c.Description}, c.WorkflowMetadata) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to set feed config %w", err) + } + + // set feed proxy mapping proposal + setProxyMappingTx, err := dataFeedsCache.UpdateDataIdMappingsForProxies(deployment.SimTransactOpts(), []common.Address{common.HexToAddress(proxyAddress)}, [][16]byte{c.DataID}) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to set proxy-dataId mapping %w", err) + } + + txs := []ProposalData{ + { + contract: proxyContract.Address().Hex(), + tx: acceptProxyOwnerShipTx, + }, + { + contract: dataFeedsCache.Address().Hex(), + tx: setFeedConfigTx, + }, + { + contract: dataFeedsCache.Address().Hex(), + tx: setProxyMappingTx, + }, + } + + proposals, err := BuildMCMProposals(env, "accept AggregatorProxy ownership to timelock. set feed config and proxy mapping on cache", c.ChainSelector, txs, c.McmsConfig.MinDelay) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + + return deployment.ChangesetOutput{AddressBook: newEnv.AddressBook, MCMSTimelockProposals: []mcmslib.TimelockProposal{*proposals}}, nil +} + +func newFeedWithProxyPrecondition(env deployment.Environment, c types.NewFeedWithProxyConfig) error { + _, ok := env.Chains[c.ChainSelector] + if !ok { + return fmt.Errorf("chain not found in env %d", c.ChainSelector) + } + + return ValidateMCMSAddresses(env.ExistingAddresses, c.ChainSelector) +} diff --git a/deployment/data-feeds/changeset/new_feed_with_proxy_test.go b/deployment/data-feeds/changeset/new_feed_with_proxy_test.go new file mode 100644 index 00000000000..b1ccc85206c --- /dev/null +++ b/deployment/data-feeds/changeset/new_feed_with_proxy_test.go @@ -0,0 +1,111 @@ +package changeset_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink/deployment/data-feeds/shared" + cache "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/data_feeds_cache" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestNewFeedWithProxy(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + newEnv, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + changeset.DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, + }, + ), + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.DeployMCMSWithTimelockV2), + map[uint64]commonTypes.MCMSWithTimelockConfigV2{ + chainSelector: proposalutils.SingleGroupTimelockConfigV2(t), + }, + ), + ) + require.NoError(t, err) + + cacheAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "DataFeedsCache") + require.NoError(t, err) + + timeLockAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "RBACTimelock") + require.NoError(t, err) + + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // Set the admin to the timelock + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(timeLockAddress), + IsAdmin: true, + }, + ), + // Transfer cache ownership to MCMS + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.TransferToMCMSWithTimelockV2), + commonChangesets.TransferToMCMSWithTimelockConfig{ + ContractsByChain: map[uint64][]common.Address{ + chainSelector: {common.HexToAddress(cacheAddress)}, + }, + MinDelay: 0, + }, + ), + ) + require.NoError(t, err) + + dataid, _ := shared.ConvertHexToBytes16("01bb0467f50003040000000000000000") + + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + changeset.NewFeedWithProxyChangeset, + types.NewFeedWithProxyConfig{ + ChainSelector: chainSelector, + AccessController: common.HexToAddress("0x00"), + DataID: dataid, + Description: "test2", + WorkflowMetadata: []cache.DataFeedsCacheWorkflowMetadata{ + cache.DataFeedsCacheWorkflowMetadata{ + AllowedSender: common.HexToAddress("0x22"), + AllowedWorkflowOwner: common.HexToAddress("0x33"), + AllowedWorkflowName: shared.HashedWorkflowName("test"), + }, + }, + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + ) + require.NoError(t, err) + + addrs, err := newEnv.ExistingAddresses.AddressesForChain(chainSelector) + require.NoError(t, err) + // AggregatorProxy, DataFeedsCache, CallProxy, RBACTimelock, ProposerManyChainMultiSig, BypasserManyChainMultiSig, CancellerManyChainMultiSig + require.Len(t, addrs, 7) +} diff --git a/deployment/data-feeds/changeset/proposal.go b/deployment/data-feeds/changeset/proposal.go new file mode 100644 index 00000000000..d896768b976 --- /dev/null +++ b/deployment/data-feeds/changeset/proposal.go @@ -0,0 +1,64 @@ +package changeset + +import ( + "encoding/json" + "time" + + gethTypes "github.com/ethereum/go-ethereum/core/types" + mcmslib "github.com/smartcontractkit/mcms" + "github.com/smartcontractkit/mcms/sdk" + "github.com/smartcontractkit/mcms/sdk/evm" + mcmstypes "github.com/smartcontractkit/mcms/types" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" +) + +type ProposalData struct { + contract string + tx *gethTypes.Transaction +} + +func BuildMCMProposals(env deployment.Environment, description string, chainSelector uint64, pd []ProposalData, minDelay time.Duration) (*mcmslib.TimelockProposal, error) { + state, _ := LoadOnchainState(env) + chain := env.Chains[chainSelector] + chainState := state.Chains[chainSelector] + + var transactions []mcmstypes.Transaction + for _, proposal := range pd { + transactions = append(transactions, mcmstypes.Transaction{ + To: proposal.contract, + Data: proposal.tx.Data(), + AdditionalFields: json.RawMessage(`{"value": 0}`), + }) + } + + ops := &mcmstypes.BatchOperation{ + ChainSelector: mcmstypes.ChainSelector(chainSelector), + Transactions: transactions, + } + + timelocksPerChain := map[uint64]string{ + chainSelector: chainState.Timelock.Address().Hex(), + } + proposerMCMSes := map[uint64]string{ + chainSelector: chainState.ProposerMcm.Address().Hex(), + } + + inspectorPerChain := map[uint64]sdk.Inspector{} + inspectorPerChain[chainSelector] = evm.NewInspector(chain.Client) + + proposal, err := proposalutils.BuildProposalFromBatchesV2( + env, + timelocksPerChain, + proposerMCMSes, + inspectorPerChain, + []mcmstypes.BatchOperation{*ops}, + description, + minDelay, + ) + if err != nil { + return nil, err + } + return proposal, err +} diff --git a/deployment/data-feeds/changeset/propose_aggregator.go b/deployment/data-feeds/changeset/propose_aggregator.go new file mode 100644 index 00000000000..72a1ddc0325 --- /dev/null +++ b/deployment/data-feeds/changeset/propose_aggregator.go @@ -0,0 +1,68 @@ +package changeset + +import ( + "fmt" + + mcmslib "github.com/smartcontractkit/mcms" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + proxy "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/aggregator_proxy" +) + +// ProposeAggregatorChangeset is a changeset that proposes a new aggregator on existing AggregatorProxy contract +// This changeset may return a timelock proposal if the MCMS config is provided, otherwise it will execute the transaction with the deployer key. +var ProposeAggregatorChangeset = deployment.CreateChangeSet(proposeAggregatorLogic, proposeAggregatorPrecondition) + +func proposeAggregatorLogic(env deployment.Environment, c types.ProposeConfirmAggregatorConfig) (deployment.ChangesetOutput, error) { + chain := env.Chains[c.ChainSelector] + + aggregatorProxy, err := proxy.NewAggregatorProxy(c.ProxyAddress, chain.Client) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to load AggregatorProxy: %w", err) + } + + txOpt := chain.DeployerKey + if c.McmsConfig != nil { + txOpt = deployment.SimTransactOpts() + } + + tx, err := aggregatorProxy.ProposeAggregator(txOpt, c.NewAggregatorAddress) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to execute ProposeAggregator: %w", err) + } + + if c.McmsConfig != nil { + proposal, err := BuildMCMProposals(env, "proposal to propose a new aggregator", c.ChainSelector, []ProposalData{ + { + contract: aggregatorProxy.Address().Hex(), + tx: tx, + }, + }, c.McmsConfig.MinDelay) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + return deployment.ChangesetOutput{MCMSTimelockProposals: []mcmslib.TimelockProposal{*proposal}}, nil + } + _, err = chain.Confirm(tx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", tx.Hash().String(), err) + } + + return deployment.ChangesetOutput{}, nil +} + +func proposeAggregatorPrecondition(env deployment.Environment, c types.ProposeConfirmAggregatorConfig) error { + _, ok := env.Chains[c.ChainSelector] + if !ok { + return fmt.Errorf("chain not found in env %d", c.ChainSelector) + } + + if c.McmsConfig != nil { + if err := ValidateMCMSAddresses(env.ExistingAddresses, c.ChainSelector); err != nil { + return err + } + } + + return nil +} diff --git a/deployment/data-feeds/changeset/propose_aggregator_test.go b/deployment/data-feeds/changeset/propose_aggregator_test.go new file mode 100644 index 00000000000..b450173be1f --- /dev/null +++ b/deployment/data-feeds/changeset/propose_aggregator_test.go @@ -0,0 +1,101 @@ +package changeset_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestProposeAggregator(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + // without MCMS + newEnv, err := commonChangesets.Apply(t, env, nil, + // Deploy cache and aggregator proxy + commonChangesets.Configure( + changeset.DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, + }, + ), + commonChangesets.Configure( + changeset.DeployAggregatorProxyChangeset, + types.DeployAggregatorProxyConfig{ + ChainsToDeploy: []uint64{chainSelector}, + AccessController: []common.Address{common.HexToAddress("0x")}, + }, + ), + ) + require.NoError(t, err) + + proxyAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "AggregatorProxy") + require.NoError(t, err) + + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // Propose a new aggregator + commonChangesets.Configure( + changeset.ProposeAggregatorChangeset, + types.ProposeConfirmAggregatorConfig{ + ChainSelector: chainSelector, + ProxyAddress: common.HexToAddress(proxyAddress), + NewAggregatorAddress: common.HexToAddress("0x123"), + }, + ), + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.DeployMCMSWithTimelockV2), + map[uint64]commonTypes.MCMSWithTimelockConfigV2{ + chainSelector: proposalutils.SingleGroupTimelockConfigV2(t), + }, + ), + ) + require.NoError(t, err) + + // with MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // transfer proxy ownership to timelock + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.TransferToMCMSWithTimelockV2), + commonChangesets.TransferToMCMSWithTimelockConfig{ + ContractsByChain: map[uint64][]common.Address{ + chainSelector: {common.HexToAddress(proxyAddress)}, + }, + MinDelay: 0, + }, + ), + commonChangesets.Configure( + changeset.ProposeAggregatorChangeset, + types.ProposeConfirmAggregatorConfig{ + ChainSelector: chainSelector, + ProxyAddress: common.HexToAddress(proxyAddress), + NewAggregatorAddress: common.HexToAddress("0x123"), + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + ) + require.NoError(t, err) +} diff --git a/deployment/data-feeds/changeset/remove_dataid_proxy_mapping.go b/deployment/data-feeds/changeset/remove_dataid_proxy_mapping.go new file mode 100644 index 00000000000..525bb053080 --- /dev/null +++ b/deployment/data-feeds/changeset/remove_dataid_proxy_mapping.go @@ -0,0 +1,69 @@ +package changeset + +import ( + "errors" + "fmt" + + mcmslib "github.com/smartcontractkit/mcms" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" +) + +// RemoveFeedProxyMappingChangeset is a changeset that only removes a feed-aggregator proxy mapping from DataFeedsCache contract. +// This changeset may return a timelock proposal if the MCMS config is provided, otherwise it will execute the transaction with the deployer key. +var RemoveFeedProxyMappingChangeset = deployment.CreateChangeSet(removeFeedProxyMappingLogic, removeFeedFeedProxyMappingPrecondition) + +func removeFeedProxyMappingLogic(env deployment.Environment, c types.RemoveFeedProxyConfig) (deployment.ChangesetOutput, error) { + state, _ := LoadOnchainState(env) + chain := env.Chains[c.ChainSelector] + chainState := state.Chains[c.ChainSelector] + contract := chainState.DataFeedsCache[c.CacheAddress] + + txOpt := chain.DeployerKey + if c.McmsConfig != nil { + txOpt = deployment.SimTransactOpts() + } + + tx, err := contract.RemoveDataIdMappingsForProxies(txOpt, c.ProxyAddresses) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to remove feed proxy mapping %w", err) + } + + if c.McmsConfig != nil { + proposal, err := BuildMCMProposals(env, "proposal to remove a feed proxy mapping from cache", c.ChainSelector, []ProposalData{ + { + contract: contract.Address().Hex(), + tx: tx, + }, + }, c.McmsConfig.MinDelay) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + return deployment.ChangesetOutput{MCMSTimelockProposals: []mcmslib.TimelockProposal{*proposal}}, nil + } + _, err = chain.Confirm(tx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", tx.Hash().String(), err) + } + + return deployment.ChangesetOutput{}, nil +} + +func removeFeedFeedProxyMappingPrecondition(env deployment.Environment, c types.RemoveFeedProxyConfig) error { + _, ok := env.Chains[c.ChainSelector] + if !ok { + return fmt.Errorf("chain not found in env %d", c.ChainSelector) + } + + if len(c.ProxyAddresses) == 0 { + return errors.New("proxy addresses must not be empty") + } + if c.McmsConfig != nil { + if err := ValidateMCMSAddresses(env.ExistingAddresses, c.ChainSelector); err != nil { + return err + } + } + + return ValidateCacheForChain(env, c.ChainSelector, c.CacheAddress) +} diff --git a/deployment/data-feeds/changeset/remove_dataid_proxy_mapping_test.go b/deployment/data-feeds/changeset/remove_dataid_proxy_mapping_test.go new file mode 100644 index 00000000000..e01040dd633 --- /dev/null +++ b/deployment/data-feeds/changeset/remove_dataid_proxy_mapping_test.go @@ -0,0 +1,144 @@ +package changeset_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/shared" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestRemoveFeedProxyMapping(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + newEnv, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + changeset.DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, + }, + ), + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.DeployMCMSWithTimelockV2), + map[uint64]commonTypes.MCMSWithTimelockConfigV2{ + chainSelector: proposalutils.SingleGroupTimelockConfigV2(t), + }, + ), + ) + require.NoError(t, err) + + cacheAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "DataFeedsCache") + require.NoError(t, err) + + dataid, _ := shared.ConvertHexToBytes16("01bb0467f50003040000000000000000") + + // without MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // set the feed admin, only admin can perform set/remove operations + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(env.Chains[chainSelector].DeployerKey.From.Hex()), + IsAdmin: true, + }, + ), + // set the feed proxy mapping + commonChangesets.Configure( + changeset.UpdateDataIDProxyChangeset, + types.UpdateDataIDProxyConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + ProxyAddresses: []common.Address{common.HexToAddress("0x11")}, + DataIDs: [][16]byte{dataid}, + }, + ), + // remove the feed proxy mapping + commonChangesets.Configure( + changeset.RemoveFeedProxyMappingChangeset, + types.RemoveFeedProxyConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + ProxyAddresses: []common.Address{common.HexToAddress("0x11")}, + }, + ), + ) + require.NoError(t, err) + + // with MCMS + timeLockAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "RBACTimelock") + require.NoError(t, err) + + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // Set the admin to the timelock + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(timeLockAddress), + IsAdmin: true, + }, + ), + // Transfer cache ownership to MCMS + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.TransferToMCMSWithTimelockV2), + commonChangesets.TransferToMCMSWithTimelockConfig{ + ContractsByChain: map[uint64][]common.Address{ + chainSelector: {common.HexToAddress(cacheAddress)}, + }, + MinDelay: 0, + }, + ), + ) + require.NoError(t, err) + + // Set and remove the feed config with MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + changeset.UpdateDataIDProxyChangeset, + types.UpdateDataIDProxyConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + ProxyAddresses: []common.Address{common.HexToAddress("0x11")}, + DataIDs: [][16]byte{dataid}, + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + commonChangesets.Configure( + changeset.RemoveFeedProxyMappingChangeset, + types.RemoveFeedProxyConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + ProxyAddresses: []common.Address{common.HexToAddress("0x11")}, + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + ) + require.NoError(t, err) +} diff --git a/deployment/data-feeds/changeset/remove_feed.go b/deployment/data-feeds/changeset/remove_feed.go new file mode 100644 index 00000000000..6b140c2d717 --- /dev/null +++ b/deployment/data-feeds/changeset/remove_feed.go @@ -0,0 +1,92 @@ +package changeset + +import ( + "errors" + "fmt" + + mcmslib "github.com/smartcontractkit/mcms" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" +) + +// RemoveFeedChangeset is a changeset that removes a feed configuration and aggregator proxy mapping from DataFeedsCache contract. +// This changeset may return a timelock proposal if the MCMS config is provided, otherwise it will execute the transactions with the deployer key. +var RemoveFeedChangeset = deployment.CreateChangeSet(removeFeedLogic, removeFeedPrecondition) + +func removeFeedLogic(env deployment.Environment, c types.RemoveFeedConfig) (deployment.ChangesetOutput, error) { + state, _ := LoadOnchainState(env) + chain := env.Chains[c.ChainSelector] + chainState := state.Chains[c.ChainSelector] + contract := chainState.DataFeedsCache[c.CacheAddress] + + txOpt := chain.DeployerKey + if c.McmsConfig != nil { + txOpt = deployment.SimTransactOpts() + } + + // remove the feed config + removeConfigTx, err := contract.RemoveFeedConfigs(txOpt, c.DataIDs) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to remove feed config %w", err) + } + + if c.McmsConfig == nil { + _, err = chain.Confirm(removeConfigTx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", removeConfigTx.Hash().String(), err) + } + } + + // remove from proxy mapping + removeProxyMappingTx, err := contract.RemoveDataIdMappingsForProxies(txOpt, c.ProxyAddresses) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to remove proxy mapping %w", err) + } + + if c.McmsConfig == nil { + _, err = chain.Confirm(removeProxyMappingTx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", removeConfigTx.Hash().String(), err) + } + return deployment.ChangesetOutput{}, nil + } + + txs := []ProposalData{ + { + contract: contract.Address().Hex(), + tx: removeConfigTx, + }, + { + contract: contract.Address().Hex(), + tx: removeProxyMappingTx, + }, + } + proposal, err := BuildMCMProposals(env, "proposal to remove a feed from cache", c.ChainSelector, txs, c.McmsConfig.MinDelay) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + return deployment.ChangesetOutput{MCMSTimelockProposals: []mcmslib.TimelockProposal{*proposal}}, nil +} + +func removeFeedPrecondition(env deployment.Environment, c types.RemoveFeedConfig) error { + _, ok := env.Chains[c.ChainSelector] + if !ok { + return fmt.Errorf("chain not found in env %d", c.ChainSelector) + } + + if (len(c.DataIDs) == 0) || (len(c.ProxyAddresses) == 0) { + return errors.New("dataIDs and proxy addresses must not be empty") + } + if len(c.DataIDs) != len(c.ProxyAddresses) { + return errors.New("dataIDs and proxy addresses must have the same length") + } + + if c.McmsConfig != nil { + if err := ValidateMCMSAddresses(env.ExistingAddresses, c.ChainSelector); err != nil { + return err + } + } + + return ValidateCacheForChain(env, c.ChainSelector, c.CacheAddress) +} diff --git a/deployment/data-feeds/changeset/remove_feed_config.go b/deployment/data-feeds/changeset/remove_feed_config.go new file mode 100644 index 00000000000..eb3b813f2f1 --- /dev/null +++ b/deployment/data-feeds/changeset/remove_feed_config.go @@ -0,0 +1,64 @@ +package changeset + +import ( + "errors" + "fmt" + + mcmslib "github.com/smartcontractkit/mcms" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" +) + +// RemoveFeedConfigChangeset is a changeset that only removes a feed configuration from DataFeedsCache contract. +// This changeset may return a timelock proposal if the MCMS config is provided, otherwise it will execute the transaction with the deployer key. +var RemoveFeedConfigChangeset = deployment.CreateChangeSet(removeFeedConfigLogic, removeFeedConfigPrecondition) + +func removeFeedConfigLogic(env deployment.Environment, c types.RemoveFeedConfigCSConfig) (deployment.ChangesetOutput, error) { + state, _ := LoadOnchainState(env) + chain := env.Chains[c.ChainSelector] + chainState := state.Chains[c.ChainSelector] + contract := chainState.DataFeedsCache[c.CacheAddress] + + txOpt := chain.DeployerKey + if c.McmsConfig != nil { + txOpt = deployment.SimTransactOpts() + } + + tx, err := contract.RemoveFeedConfigs(txOpt, c.DataIDs) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to remove feed config %w", err) + } + + if c.McmsConfig != nil { + proposal, err := BuildMCMProposals(env, "proposal to remove a feed config from cache", c.ChainSelector, []ProposalData{ + { + contract: contract.Address().Hex(), + tx: tx, + }, + }, c.McmsConfig.MinDelay) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + return deployment.ChangesetOutput{MCMSTimelockProposals: []mcmslib.TimelockProposal{*proposal}}, nil + } + _, err = chain.Confirm(tx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", tx.Hash().String(), err) + } + + return deployment.ChangesetOutput{}, nil +} + +func removeFeedConfigPrecondition(env deployment.Environment, c types.RemoveFeedConfigCSConfig) error { + if len(c.DataIDs) == 0 { + return errors.New("dataIDs must not be empty") + } + if c.McmsConfig != nil { + if err := ValidateMCMSAddresses(env.ExistingAddresses, c.ChainSelector); err != nil { + return err + } + } + + return ValidateCacheForChain(env, c.ChainSelector, c.CacheAddress) +} diff --git a/deployment/data-feeds/changeset/remove_feed_config_test.go b/deployment/data-feeds/changeset/remove_feed_config_test.go new file mode 100644 index 00000000000..9223e831b8b --- /dev/null +++ b/deployment/data-feeds/changeset/remove_feed_config_test.go @@ -0,0 +1,160 @@ +package changeset_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + cache "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/data_feeds_cache" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/shared" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestRemoveFeedConfig(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + newEnv, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + changeset.DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, + }, + ), + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.DeployMCMSWithTimelockV2), + map[uint64]commonTypes.MCMSWithTimelockConfigV2{ + chainSelector: proposalutils.SingleGroupTimelockConfigV2(t), + }, + ), + ) + require.NoError(t, err) + + cacheAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "DataFeedsCache") + require.NoError(t, err) + + dataid, _ := shared.ConvertHexToBytes16("01bb0467f50003040000000000000000") + + // without MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // set the feed admin, only admin can perform set/remove operations + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(env.Chains[chainSelector].DeployerKey.From.Hex()), + IsAdmin: true, + }, + ), + // set the feed config + commonChangesets.Configure( + changeset.SetFeedConfigChangeset, + types.SetFeedDecimalConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + DataIDs: [][16]byte{dataid}, + Descriptions: []string{"test"}, + WorkflowMetadata: []cache.DataFeedsCacheWorkflowMetadata{ + cache.DataFeedsCacheWorkflowMetadata{ + AllowedSender: common.HexToAddress("0x22"), + AllowedWorkflowOwner: common.HexToAddress("0x33"), + AllowedWorkflowName: shared.HashedWorkflowName("test"), + }, + }, + }, + ), + // remove the feed config + commonChangesets.Configure( + changeset.RemoveFeedConfigChangeset, + types.RemoveFeedConfigCSConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + DataIDs: [][16]byte{dataid}, + }, + ), + ) + require.NoError(t, err) + + // with MCMS + timeLockAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "RBACTimelock") + require.NoError(t, err) + + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // Set the admin to the timelock + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(timeLockAddress), + IsAdmin: true, + }, + ), + // Transfer cache ownership to MCMS + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.TransferToMCMSWithTimelockV2), + commonChangesets.TransferToMCMSWithTimelockConfig{ + ContractsByChain: map[uint64][]common.Address{ + chainSelector: {common.HexToAddress(cacheAddress)}, + }, + MinDelay: 0, + }, + ), + ) + require.NoError(t, err) + + // Set and remove the feed config with MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + changeset.SetFeedConfigChangeset, + types.SetFeedDecimalConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + DataIDs: [][16]byte{dataid}, + Descriptions: []string{"test2"}, + WorkflowMetadata: []cache.DataFeedsCacheWorkflowMetadata{ + cache.DataFeedsCacheWorkflowMetadata{ + AllowedSender: common.HexToAddress("0x22"), + AllowedWorkflowOwner: common.HexToAddress("0x33"), + AllowedWorkflowName: shared.HashedWorkflowName("test"), + }, + }, + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + commonChangesets.Configure( + changeset.RemoveFeedConfigChangeset, + types.RemoveFeedConfigCSConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + DataIDs: [][16]byte{dataid}, + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + ) + require.NoError(t, err) +} diff --git a/deployment/data-feeds/changeset/remove_feed_test.go b/deployment/data-feeds/changeset/remove_feed_test.go new file mode 100644 index 00000000000..6f611af876e --- /dev/null +++ b/deployment/data-feeds/changeset/remove_feed_test.go @@ -0,0 +1,162 @@ +package changeset_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + cache "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/data_feeds_cache" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/shared" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestRemoveFeed(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + newEnv, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + changeset.DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, + }, + ), + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.DeployMCMSWithTimelockV2), + map[uint64]commonTypes.MCMSWithTimelockConfigV2{ + chainSelector: proposalutils.SingleGroupTimelockConfigV2(t), + }, + ), + ) + require.NoError(t, err) + + cacheAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "DataFeedsCache") + require.NoError(t, err) + + dataid, _ := shared.ConvertHexToBytes16("01bb0467f50003040000000000000000") + + // without MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // set the feed admin, only admin can perform set/remove operations + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(env.Chains[chainSelector].DeployerKey.From.Hex()), + IsAdmin: true, + }, + ), + // set the feed config + commonChangesets.Configure( + changeset.SetFeedConfigChangeset, + types.SetFeedDecimalConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + DataIDs: [][16]byte{dataid}, + Descriptions: []string{"test"}, + WorkflowMetadata: []cache.DataFeedsCacheWorkflowMetadata{ + cache.DataFeedsCacheWorkflowMetadata{ + AllowedSender: common.HexToAddress("0x22"), + AllowedWorkflowOwner: common.HexToAddress("0x33"), + AllowedWorkflowName: shared.HashedWorkflowName("test"), + }, + }, + }, + ), + // remove the feed config + commonChangesets.Configure( + changeset.RemoveFeedChangeset, + types.RemoveFeedConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + DataIDs: [][16]byte{dataid}, + ProxyAddresses: []common.Address{common.HexToAddress("0x123")}, + }, + ), + ) + require.NoError(t, err) + + // with MCMS + timeLockAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "RBACTimelock") + require.NoError(t, err) + + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // Set the admin to the timelock + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(timeLockAddress), + IsAdmin: true, + }, + ), + // Transfer cache ownership to MCMS + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.TransferToMCMSWithTimelockV2), + commonChangesets.TransferToMCMSWithTimelockConfig{ + ContractsByChain: map[uint64][]common.Address{ + chainSelector: {common.HexToAddress(cacheAddress)}, + }, + MinDelay: 0, + }, + ), + ) + require.NoError(t, err) + + // Set and remove the feed config with MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + changeset.SetFeedConfigChangeset, + types.SetFeedDecimalConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + DataIDs: [][16]byte{dataid}, + Descriptions: []string{"test2"}, + WorkflowMetadata: []cache.DataFeedsCacheWorkflowMetadata{ + cache.DataFeedsCacheWorkflowMetadata{ + AllowedSender: common.HexToAddress("0x22"), + AllowedWorkflowOwner: common.HexToAddress("0x33"), + AllowedWorkflowName: shared.HashedWorkflowName("test"), + }, + }, + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + commonChangesets.Configure( + changeset.RemoveFeedChangeset, + types.RemoveFeedConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + DataIDs: [][16]byte{dataid}, + ProxyAddresses: []common.Address{common.HexToAddress("0x123")}, + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + ) + require.NoError(t, err) +} diff --git a/deployment/data-feeds/changeset/set_feed_admin.go b/deployment/data-feeds/changeset/set_feed_admin.go new file mode 100644 index 00000000000..7ef083c8e3e --- /dev/null +++ b/deployment/data-feeds/changeset/set_feed_admin.go @@ -0,0 +1,65 @@ +package changeset + +import ( + "fmt" + + mcmslib "github.com/smartcontractkit/mcms" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" +) + +// SetFeedAdminChangeset is a changeset that sets/removes an admin on DataFeedsCache contract. +// This changeset may return a timelock proposal if the MCMS config is provided, otherwise it will execute the transaction with the deployer key. +var SetFeedAdminChangeset = deployment.CreateChangeSet(setFeedAdminLogic, setFeedAdminPrecondition) + +func setFeedAdminLogic(env deployment.Environment, c types.SetFeedAdminConfig) (deployment.ChangesetOutput, error) { + state, _ := LoadOnchainState(env) + chain := env.Chains[c.ChainSelector] + chainState := state.Chains[c.ChainSelector] + contract := chainState.DataFeedsCache[c.CacheAddress] + + txOpt := chain.DeployerKey + if c.McmsConfig != nil { + txOpt = deployment.SimTransactOpts() + } + + tx, err := contract.SetFeedAdmin(txOpt, c.AdminAddress, c.IsAdmin) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to set feed admin %w", err) + } + + if c.McmsConfig != nil { + proposal, err := BuildMCMProposals(env, "proposal to set feed admin on a cache", c.ChainSelector, []ProposalData{ + { + contract: contract.Address().Hex(), + tx: tx, + }, + }, c.McmsConfig.MinDelay) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + return deployment.ChangesetOutput{MCMSTimelockProposals: []mcmslib.TimelockProposal{*proposal}}, nil + } + _, err = chain.Confirm(tx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", tx.Hash().String(), err) + } + + return deployment.ChangesetOutput{}, nil +} + +func setFeedAdminPrecondition(env deployment.Environment, c types.SetFeedAdminConfig) error { + _, ok := env.Chains[c.ChainSelector] + if !ok { + return fmt.Errorf("chain not found in env %d", c.ChainSelector) + } + + if c.McmsConfig != nil { + if err := ValidateMCMSAddresses(env.ExistingAddresses, c.ChainSelector); err != nil { + return err + } + } + + return ValidateCacheForChain(env, c.ChainSelector, c.CacheAddress) +} diff --git a/deployment/data-feeds/changeset/set_feed_admin_test.go b/deployment/data-feeds/changeset/set_feed_admin_test.go new file mode 100644 index 00000000000..320feffc81c --- /dev/null +++ b/deployment/data-feeds/changeset/set_feed_admin_test.go @@ -0,0 +1,98 @@ +package changeset_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestSetCacheAdmin(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + newEnv, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + changeset.DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, + }, + ), + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.DeployMCMSWithTimelockV2), + map[uint64]commonTypes.MCMSWithTimelockConfigV2{ + chainSelector: proposalutils.SingleGroupTimelockConfigV2(t), + }, + ), + ) + require.NoError(t, err) + + cacheAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "DataFeedsCache") + require.NoError(t, err) + + // without MCMS + resp, err := commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress("0x123"), + IsAdmin: true, + }, + ), + ) + require.NoError(t, err) + require.NotNil(t, resp) + + // with MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.TransferToMCMSWithTimelockV2), + commonChangesets.TransferToMCMSWithTimelockConfig{ + ContractsByChain: map[uint64][]common.Address{ + chainSelector: {common.HexToAddress(cacheAddress)}, + }, + MinDelay: 0, + }, + ), + ) + require.NoError(t, err) + + resp, err = commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress("0x123"), + IsAdmin: true, + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + ) + require.NoError(t, err) + require.NotNil(t, resp) +} diff --git a/deployment/data-feeds/changeset/set_feed_config.go b/deployment/data-feeds/changeset/set_feed_config.go new file mode 100644 index 00000000000..ea83d24d119 --- /dev/null +++ b/deployment/data-feeds/changeset/set_feed_config.go @@ -0,0 +1,73 @@ +package changeset + +import ( + "errors" + "fmt" + + mcmslib "github.com/smartcontractkit/mcms" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" +) + +// SetFeedConfigChangeset is a changeset that sets a feed configuration on DataFeedsCache contract. +// This changeset may return a timelock proposal if the MCMS config is provided, otherwise it will execute the transaction with the deployer key. +var SetFeedConfigChangeset = deployment.CreateChangeSet(setFeedConfigLogic, setFeedConfigPrecondition) + +func setFeedConfigLogic(env deployment.Environment, c types.SetFeedDecimalConfig) (deployment.ChangesetOutput, error) { + state, _ := LoadOnchainState(env) + chain := env.Chains[c.ChainSelector] + chainState := state.Chains[c.ChainSelector] + contract := chainState.DataFeedsCache[c.CacheAddress] + + txOpt := chain.DeployerKey + if c.McmsConfig != nil { + txOpt = deployment.SimTransactOpts() + } + + tx, err := contract.SetDecimalFeedConfigs(txOpt, c.DataIDs, c.Descriptions, c.WorkflowMetadata) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to set feed config %w", err) + } + + if c.McmsConfig != nil { + proposal, err := BuildMCMProposals(env, "proposal to set feed config on a cache", c.ChainSelector, []ProposalData{ + { + contract: contract.Address().Hex(), + tx: tx, + }, + }, c.McmsConfig.MinDelay) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + return deployment.ChangesetOutput{MCMSTimelockProposals: []mcmslib.TimelockProposal{*proposal}}, nil + } + _, err = chain.Confirm(tx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", tx.Hash().String(), err) + } + + return deployment.ChangesetOutput{}, nil +} + +func setFeedConfigPrecondition(env deployment.Environment, c types.SetFeedDecimalConfig) error { + _, ok := env.Chains[c.ChainSelector] + if !ok { + return fmt.Errorf("chain not found in env %d", c.ChainSelector) + } + + if (len(c.DataIDs) == 0) || (len(c.Descriptions) == 0) || (len(c.WorkflowMetadata) == 0) { + return errors.New("dataIDs, descriptions and workflowMetadata must not be empty") + } + if len(c.DataIDs) != len(c.Descriptions) { + return errors.New("dataIDs and descriptions must have the same length") + } + + if c.McmsConfig != nil { + if err := ValidateMCMSAddresses(env.ExistingAddresses, c.ChainSelector); err != nil { + return err + } + } + + return ValidateCacheForChain(env, c.ChainSelector, c.CacheAddress) +} diff --git a/deployment/data-feeds/changeset/set_feed_config_test.go b/deployment/data-feeds/changeset/set_feed_config_test.go new file mode 100644 index 00000000000..fcc9b542d0e --- /dev/null +++ b/deployment/data-feeds/changeset/set_feed_config_test.go @@ -0,0 +1,138 @@ +package changeset_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/shared" + cache "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/data_feeds_cache" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestSetFeedConfig(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + newEnv, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + changeset.DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, + }, + ), + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.DeployMCMSWithTimelockV2), + map[uint64]commonTypes.MCMSWithTimelockConfigV2{ + chainSelector: proposalutils.SingleGroupTimelockConfigV2(t), + }, + ), + ) + require.NoError(t, err) + + cacheAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "DataFeedsCache") + require.NoError(t, err) + + dataid, _ := shared.ConvertHexToBytes16("01bb0467f50003040000000000000000") + + // without MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(env.Chains[chainSelector].DeployerKey.From.Hex()), + IsAdmin: true, + }, + ), + commonChangesets.Configure( + changeset.SetFeedConfigChangeset, + types.SetFeedDecimalConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + DataIDs: [][16]byte{dataid}, + Descriptions: []string{"test"}, + WorkflowMetadata: []cache.DataFeedsCacheWorkflowMetadata{ + cache.DataFeedsCacheWorkflowMetadata{ + AllowedSender: common.HexToAddress("0x22"), + AllowedWorkflowOwner: common.HexToAddress("0x33"), + AllowedWorkflowName: shared.HashedWorkflowName("test"), + }, + }, + }, + ), + ) + require.NoError(t, err) + + // with MCMS + timeLockAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "RBACTimelock") + require.NoError(t, err) + + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // Set the admin to the timelock + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(timeLockAddress), + IsAdmin: true, + }, + ), + // Transfer cache ownership to MCMS + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.TransferToMCMSWithTimelockV2), + commonChangesets.TransferToMCMSWithTimelockConfig{ + ContractsByChain: map[uint64][]common.Address{ + chainSelector: {common.HexToAddress(cacheAddress)}, + }, + MinDelay: 0, + }, + ), + ) + require.NoError(t, err) + + // Set the feed config with MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + changeset.SetFeedConfigChangeset, + types.SetFeedDecimalConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + DataIDs: [][16]byte{dataid}, + Descriptions: []string{"test2"}, + WorkflowMetadata: []cache.DataFeedsCacheWorkflowMetadata{ + cache.DataFeedsCacheWorkflowMetadata{ + AllowedSender: common.HexToAddress("0x22"), + AllowedWorkflowOwner: common.HexToAddress("0x33"), + AllowedWorkflowName: shared.HashedWorkflowName("test"), + }, + }, + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + ) + require.NoError(t, err) +} diff --git a/deployment/data-feeds/changeset/state.go b/deployment/data-feeds/changeset/state.go new file mode 100644 index 00000000000..358c41cef9c --- /dev/null +++ b/deployment/data-feeds/changeset/state.go @@ -0,0 +1,143 @@ +package changeset + +import ( + "fmt" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink/deployment" + commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/view" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/view/v1_0" + proxy "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/aggregator_proxy" + cache "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/data_feeds_cache" +) + +var ( + DataFeedsCache deployment.ContractType = "DataFeedsCache" +) + +type DataFeedsChainState struct { + commonchangeset.MCMSWithTimelockState + DataFeedsCache map[common.Address]*cache.DataFeedsCache + AggregatorProxy map[common.Address]*proxy.AggregatorProxy +} + +type DataFeedsOnChainState struct { + Chains map[uint64]DataFeedsChainState +} + +func LoadOnchainState(e deployment.Environment) (DataFeedsOnChainState, error) { + state := DataFeedsOnChainState{ + Chains: make(map[uint64]DataFeedsChainState), + } + for chainSelector, chain := range e.Chains { + addresses, err := e.ExistingAddresses.AddressesForChain(chainSelector) + if err != nil { + // Chain not found in address book, initialize empty + if !errors.Is(err, deployment.ErrChainNotFound) { + return state, err + } + addresses = make(map[string]deployment.TypeAndVersion) + } + chainState, err := LoadChainState(e.Logger, chain, addresses) + if err != nil { + return state, err + } + state.Chains[chainSelector] = *chainState + } + return state, nil +} + +// LoadChainState Loads all state for a chain into state +func LoadChainState(logger logger.Logger, chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*DataFeedsChainState, error) { + var state DataFeedsChainState + + mcmsWithTimelock, err := commonchangeset.MaybeLoadMCMSWithTimelockChainState(chain, addresses) + if err != nil { + return nil, fmt.Errorf("failed to load mcms contract: %w", err) + } + state.MCMSWithTimelockState = *mcmsWithTimelock + + dfCacheTV := deployment.NewTypeAndVersion(DataFeedsCache, deployment.Version1_0_0) + dfCacheTV.Labels.Add("data-feeds") + + devPlatformCacheTV := deployment.NewTypeAndVersion(DataFeedsCache, deployment.Version1_0_0) + devPlatformCacheTV.Labels.Add("dev-platform") + + state.DataFeedsCache = make(map[common.Address]*cache.DataFeedsCache) + state.AggregatorProxy = make(map[common.Address]*proxy.AggregatorProxy) + + for address, tv := range addresses { + switch { + case tv.String() == dfCacheTV.String() || tv.String() == devPlatformCacheTV.String(): + contract, err := cache.NewDataFeedsCache(common.HexToAddress(address), chain.Client) + if err != nil { + return &state, err + } + state.DataFeedsCache[common.HexToAddress(address)] = contract + case strings.Contains(tv.String(), "AggregatorProxy"): + contract, err := proxy.NewAggregatorProxy(common.HexToAddress(address), chain.Client) + if err != nil { + return &state, err + } + state.AggregatorProxy[common.HexToAddress(address)] = contract + default: + logger.Warnw("unknown contract type", "type", tv.Type) + } + } + return &state, nil +} + +func (s DataFeedsOnChainState) View(chains []uint64) (map[string]view.ChainView, error) { + m := make(map[string]view.ChainView) + for _, chainSelector := range chains { + chainInfo, err := deployment.ChainInfo(chainSelector) + if err != nil { + return m, err + } + if _, ok := s.Chains[chainSelector]; !ok { + return m, fmt.Errorf("chain not supported %d", chainSelector) + } + chainState := s.Chains[chainSelector] + chainView, err := chainState.GenerateView() + if err != nil { + return m, err + } + name := chainInfo.ChainName + if chainInfo.ChainName == "" { + name = strconv.FormatUint(chainSelector, 10) + } + m[name] = chainView + } + return m, nil +} + +func (c DataFeedsChainState) GenerateView() (view.ChainView, error) { + chainView := view.NewChain() + if c.DataFeedsCache != nil { + for _, cache := range c.DataFeedsCache { + fmt.Println(cache.Address().Hex()) + cacheView, err := v1_0.GenerateDataFeedsCacheView(cache) + if err != nil { + return chainView, errors.Wrapf(err, "failed to generate cache view %s", cache.Address().String()) + } + chainView.DataFeedsCache[cache.Address().Hex()] = cacheView + } + } + if c.AggregatorProxy != nil { + for _, proxy := range c.AggregatorProxy { + proxyView, err := v1_0.GenerateAggregatorProxyView(proxy) + if err != nil { + return chainView, errors.Wrapf(err, "failed to generate proxy view %s", proxy.Address().String()) + } + chainView.AggregatorProxy[proxy.Address().Hex()] = proxyView + } + } + return chainView, nil +} diff --git a/deployment/data-feeds/changeset/testdata/import_addresses.json b/deployment/data-feeds/changeset/testdata/import_addresses.json new file mode 100644 index 00000000000..5dafd6b24bd --- /dev/null +++ b/deployment/data-feeds/changeset/testdata/import_addresses.json @@ -0,0 +1,18 @@ +[ + { + "address": "0x33442400910b7B03316fe47eF8fC7bEd54Bca407", + "description": "TEST / USD", + "typeAndVersion": { + "type": "AggregatorProxy", + "version": "1.0.0" + } + }, + { + "address": "0x43442400910b7B03316fe47eF8fC7bEd54Bca407", + "description": "LINK / USD", + "typeAndVersion": { + "type": "AggregatorProxy", + "version": "1.0.0" + } + } +] \ No newline at end of file diff --git a/deployment/data-feeds/changeset/testdata/migrate_feeds.json b/deployment/data-feeds/changeset/testdata/migrate_feeds.json new file mode 100644 index 00000000000..83fca1cb0b1 --- /dev/null +++ b/deployment/data-feeds/changeset/testdata/migrate_feeds.json @@ -0,0 +1,20 @@ +[ + { + "address": "0x33442400910b7B03316fe47eF8fC7bEd54Bca407", + "feedId": "01bb0467f50003040000000000000000", + "description": "TEST / USD", + "typeAndVersion": { + "type": "AggregatorProxy", + "version": "1.0.0" + } + }, + { + "address": "0x43442400910b7B03316fe47eF8fC7bEd54Bca407", + "feedId": "01b40467f50003040000000000000000", + "description": "LINK / USD", + "typeAndVersion": { + "type": "AggregatorProxy", + "version": "1.0.0" + } + } +] \ No newline at end of file diff --git a/deployment/data-feeds/changeset/types/types.go b/deployment/data-feeds/changeset/types/types.go new file mode 100644 index 00000000000..ff5b396f6c5 --- /dev/null +++ b/deployment/data-feeds/changeset/types/types.go @@ -0,0 +1,133 @@ +package types + +import ( + "embed" + "time" + + "github.com/ethereum/go-ethereum/common" + + "github.com/smartcontractkit/chainlink/deployment" + proxy "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/aggregator_proxy" + cache "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/data_feeds_cache" +) + +type MCMSConfig struct { + MinDelay time.Duration // delay for timelock worker to execute the transfers. +} + +type AddressType string + +type DeployCacheResponse struct { + Address common.Address + Tx common.Hash + Tv deployment.TypeAndVersion + Contract *cache.DataFeedsCache +} + +type DeployConfig struct { + ChainsToDeploy []uint64 // Chain Selectors + Labels []string // Labels for the cache, applies to all chains +} + +type DeployAggregatorProxyConfig struct { + ChainsToDeploy []uint64 // Chain Selectors + AccessController []common.Address // AccessController addresses per chain + Labels []string // Labels for the cache, applies to all chains +} + +type DeployBundleAggregatorProxyConfig struct { + ChainsToDeploy []uint64 // Chain Selectors + MCMSAddressesPath string // Path to the MCMS addresses JSON file, per chain + InputFS embed.FS // Filesystem to read MCMS addresses JSON file +} + +type DeployProxyResponse struct { + Address common.Address + Tx common.Hash + Tv deployment.TypeAndVersion + Contract *proxy.AggregatorProxy +} + +type SetFeedAdminConfig struct { + ChainSelector uint64 + CacheAddress common.Address + AdminAddress common.Address + IsAdmin bool + McmsConfig *MCMSConfig +} + +type ProposeConfirmAggregatorConfig struct { + ChainSelector uint64 + ProxyAddress common.Address + NewAggregatorAddress common.Address + McmsConfig *MCMSConfig +} + +type SetFeedDecimalConfig struct { + ChainSelector uint64 + CacheAddress common.Address + DataIDs [][16]byte // without the 0x prefix + Descriptions []string + WorkflowMetadata []cache.DataFeedsCacheWorkflowMetadata + McmsConfig *MCMSConfig +} + +type RemoveFeedConfig struct { + ChainSelector uint64 + CacheAddress common.Address + ProxyAddresses []common.Address + DataIDs [][16]byte // without the 0x prefix + McmsConfig *MCMSConfig +} + +type RemoveFeedConfigCSConfig struct { + ChainSelector uint64 + CacheAddress common.Address + DataIDs [][16]byte // without the 0x prefix + McmsConfig *MCMSConfig +} + +type UpdateDataIDProxyConfig struct { + ChainSelector uint64 + CacheAddress common.Address + ProxyAddresses []common.Address + DataIDs [][16]byte + McmsConfig *MCMSConfig +} + +type RemoveFeedProxyConfig struct { + ChainSelector uint64 + CacheAddress common.Address + ProxyAddresses []common.Address + McmsConfig *MCMSConfig +} + +type ImportToAddressbookConfig struct { + InputFileName string + ChainSelector uint64 + InputFS embed.FS +} + +type MigrationConfig struct { + InputFileName string + CacheAddress common.Address + ChainSelector uint64 + InputFS embed.FS + WorkflowMetadata []cache.DataFeedsCacheWorkflowMetadata +} + +type AcceptOwnershipConfig struct { + ContractAddress common.Address + ChainSelector uint64 + McmsConfig *MCMSConfig +} + +type NewFeedWithProxyConfig struct { + ChainSelector uint64 + AccessController common.Address + Labels []string // labels for AggregatorProxy + DataID [16]byte // without the 0x prefix + Description string + WorkflowMetadata []cache.DataFeedsCacheWorkflowMetadata + McmsConfig *MCMSConfig +} diff --git a/deployment/data-feeds/changeset/update_data_id_proxy.go b/deployment/data-feeds/changeset/update_data_id_proxy.go new file mode 100644 index 00000000000..95115e3ca06 --- /dev/null +++ b/deployment/data-feeds/changeset/update_data_id_proxy.go @@ -0,0 +1,73 @@ +package changeset + +import ( + "errors" + "fmt" + + mcmslib "github.com/smartcontractkit/mcms" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" +) + +// UpdateDataIDProxyChangeset is a changeset that updates the proxy-dataId mapping on DataFeedsCache contract. +// This changeset may return a timelock proposal if the MCMS config is provided, otherwise it will execute the transaction with the deployer key. +var UpdateDataIDProxyChangeset = deployment.CreateChangeSet(updateDataIDProxyLogic, updateDataIDProxyPrecondition) + +func updateDataIDProxyLogic(env deployment.Environment, c types.UpdateDataIDProxyConfig) (deployment.ChangesetOutput, error) { + state, _ := LoadOnchainState(env) + chain := env.Chains[c.ChainSelector] + chainState := state.Chains[c.ChainSelector] + contract := chainState.DataFeedsCache[c.CacheAddress] + + txOpt := chain.DeployerKey + if c.McmsConfig != nil { + txOpt = deployment.SimTransactOpts() + } + + tx, err := contract.UpdateDataIdMappingsForProxies(txOpt, c.ProxyAddresses, c.DataIDs) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to set proxy-dataId mapping %w", err) + } + + if c.McmsConfig != nil { + proposal, err := BuildMCMProposals(env, "proposal to update proxy-dataId mapping on a cache", c.ChainSelector, []ProposalData{ + { + contract: contract.Address().Hex(), + tx: tx, + }, + }, c.McmsConfig.MinDelay) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + return deployment.ChangesetOutput{MCMSTimelockProposals: []mcmslib.TimelockProposal{*proposal}}, nil + } + _, err = chain.Confirm(tx) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm transaction: %s, %w", tx.Hash().String(), err) + } + + return deployment.ChangesetOutput{}, nil +} + +func updateDataIDProxyPrecondition(env deployment.Environment, c types.UpdateDataIDProxyConfig) error { + _, ok := env.Chains[c.ChainSelector] + if !ok { + return fmt.Errorf("chain not found in env %d", c.ChainSelector) + } + + if len(c.ProxyAddresses) == 0 || len(c.DataIDs) == 0 { + return errors.New("empty proxies or dataIds") + } + if len(c.DataIDs) != len(c.ProxyAddresses) { + return errors.New("dataIds and proxies length mismatch") + } + + if c.McmsConfig != nil { + if err := ValidateMCMSAddresses(env.ExistingAddresses, c.ChainSelector); err != nil { + return err + } + } + + return ValidateCacheForChain(env, c.ChainSelector, c.CacheAddress) +} diff --git a/deployment/data-feeds/changeset/update_data_id_proxy_test.go b/deployment/data-feeds/changeset/update_data_id_proxy_test.go new file mode 100644 index 00000000000..779db3a37be --- /dev/null +++ b/deployment/data-feeds/changeset/update_data_id_proxy_test.go @@ -0,0 +1,122 @@ +package changeset_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + commonChangesets "github.com/smartcontractkit/chainlink/deployment/common/changeset" + "github.com/smartcontractkit/chainlink/deployment/common/proposalutils" + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" + "github.com/smartcontractkit/chainlink/deployment/data-feeds/shared" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" +) + +func TestUpdateDataIDProxyMap(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + cfg := memory.MemoryEnvironmentConfig{ + Nodes: 1, + Chains: 1, + } + env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg) + + chainSelector := env.AllChainSelectors()[0] + + newEnv, err := commonChangesets.Apply(t, env, nil, + commonChangesets.Configure( + changeset.DeployCacheChangeset, + types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, + }, + ), + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.DeployMCMSWithTimelockV2), + map[uint64]commonTypes.MCMSWithTimelockConfigV2{ + chainSelector: proposalutils.SingleGroupTimelockConfigV2(t), + }, + ), + ) + require.NoError(t, err) + + cacheAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "DataFeedsCache") + require.NoError(t, err) + + dataID, _ := shared.ConvertHexToBytes16("01bb0467f50003040000000000000000") + + // without MCMS + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(env.Chains[chainSelector].DeployerKey.From.Hex()), + IsAdmin: true, + }, + ), + commonChangesets.Configure( + changeset.UpdateDataIDProxyChangeset, + types.UpdateDataIDProxyConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + ProxyAddresses: []common.Address{common.HexToAddress("0x11")}, + DataIDs: [][16]byte{dataID}, + }, + ), + ) + require.NoError(t, err) + + // with MCMS + timeLockAddress, err := deployment.SearchAddressBook(newEnv.ExistingAddresses, chainSelector, "RBACTimelock") + require.NoError(t, err) + + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + // Set the admin to the timelock + commonChangesets.Configure( + changeset.SetFeedAdminChangeset, + types.SetFeedAdminConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + AdminAddress: common.HexToAddress(timeLockAddress), + IsAdmin: true, + }, + ), + // Transfer cache ownership to MCMS + commonChangesets.Configure( + deployment.CreateLegacyChangeSet(commonChangesets.TransferToMCMSWithTimelockV2), + commonChangesets.TransferToMCMSWithTimelockConfig{ + ContractsByChain: map[uint64][]common.Address{ + chainSelector: {common.HexToAddress(cacheAddress)}, + }, + MinDelay: 0, + }, + ), + ) + require.NoError(t, err) + + newEnv, err = commonChangesets.Apply(t, newEnv, nil, + commonChangesets.Configure( + changeset.UpdateDataIDProxyChangeset, + types.UpdateDataIDProxyConfig{ + ChainSelector: chainSelector, + CacheAddress: common.HexToAddress(cacheAddress), + ProxyAddresses: []common.Address{common.HexToAddress("0x11")}, + DataIDs: [][16]byte{dataID}, + McmsConfig: &types.MCMSConfig{ + MinDelay: 0, + }, + }, + ), + ) + require.NoError(t, err) +} diff --git a/deployment/data-feeds/changeset/validation.go b/deployment/data-feeds/changeset/validation.go new file mode 100644 index 00000000000..ac9c7a758cd --- /dev/null +++ b/deployment/data-feeds/changeset/validation.go @@ -0,0 +1,45 @@ +package changeset + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + commonTypes "github.com/smartcontractkit/chainlink/deployment/common/types" + + "github.com/smartcontractkit/chainlink/deployment" +) + +func ValidateCacheForChain(env deployment.Environment, chainSelector uint64, cacheAddress common.Address) error { + state, err := LoadOnchainState(env) + if err != nil { + return fmt.Errorf("failed to load on chain state %w", err) + } + _, ok := env.Chains[chainSelector] + if !ok { + return errors.New("chain not found in environment") + } + chainState, ok := state.Chains[chainSelector] + if !ok { + return errors.New("chain not found in on chain state") + } + if chainState.DataFeedsCache == nil { + return errors.New("DataFeedsCache not found in on chain state") + } + _, ok = chainState.DataFeedsCache[cacheAddress] + if !ok { + return errors.New("contract not found in on chain state") + } + return nil +} + +func ValidateMCMSAddresses(ab deployment.AddressBook, chainSelector uint64) error { + if _, err := deployment.SearchAddressBook(ab, chainSelector, commonTypes.RBACTimelock); err != nil { + return fmt.Errorf("timelock not present on the chain %w", err) + } + if _, err := deployment.SearchAddressBook(ab, chainSelector, commonTypes.ProposerManyChainMultisig); err != nil { + return fmt.Errorf("mcms proposer not present on the chain %w", err) + } + return nil +} diff --git a/deployment/data-feeds/changeset/view.go b/deployment/data-feeds/changeset/view.go new file mode 100644 index 00000000000..15348e1f8e1 --- /dev/null +++ b/deployment/data-feeds/changeset/view.go @@ -0,0 +1,26 @@ +package changeset + +import ( + "encoding/json" + "fmt" + + "github.com/smartcontractkit/chainlink/deployment" + dfView "github.com/smartcontractkit/chainlink/deployment/data-feeds/view" +) + +var _ deployment.ViewState = ViewDataFeeds + +func ViewDataFeeds(e deployment.Environment) (json.Marshaler, error) { + state, err := LoadOnchainState(e) + fmt.Println(state) + if err != nil { + return nil, err + } + chainView, err := state.View(e.AllChainSelectors()) + if err != nil { + return nil, err + } + return dfView.DataFeedsView{ + Chains: chainView, + }, nil +} diff --git a/deployment/data-feeds/shared/utils.go b/deployment/data-feeds/shared/utils.go new file mode 100644 index 00000000000..b3549065f3f --- /dev/null +++ b/deployment/data-feeds/shared/utils.go @@ -0,0 +1,50 @@ +package shared + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io/fs" +) + +func LoadJSON[T any](pth string, fs fs.ReadFileFS) (T, error) { + var dflt T + f, err := fs.ReadFile(pth) + if err != nil { + return dflt, fmt.Errorf("failed to read %s: %w", pth, err) + } + var v T + err = json.Unmarshal(f, &v) + if err != nil { + return dflt, fmt.Errorf("failed to unmarshal JSON: %w", err) + } + return v, nil +} + +func ConvertHexToBytes16(hexStr string) ([16]byte, error) { + decodedBytes, err := hex.DecodeString(hexStr) + if err != nil { + return [16]byte{}, fmt.Errorf("failed to decode hex string: %w", err) + } + + var result [16]byte + copy(result[:], decodedBytes[:16]) + + return result, nil +} + +func HashedWorkflowName(name string) [10]byte { + // Compute SHA-256 hash of the input string + hash := sha256.Sum256([]byte(name)) + + // Encode as hex to ensure UTF8 + var hashBytes = hash[:] + resultHex := hex.EncodeToString(hashBytes) + + // Truncate to 10 bytes + var truncated [10]byte + copy(truncated[:], []byte(resultHex)[:10]) + + return truncated +} diff --git a/deployment/data-feeds/view/v1_0/cache_contract.go b/deployment/data-feeds/view/v1_0/cache_contract.go new file mode 100644 index 00000000000..5fd06c046c4 --- /dev/null +++ b/deployment/data-feeds/view/v1_0/cache_contract.go @@ -0,0 +1,28 @@ +package v1_0 + +import ( + "errors" + "fmt" + + "github.com/smartcontractkit/chainlink/deployment/common/view/types" + cache "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/data_feeds_cache" +) + +type CacheView struct { + types.ContractMetaData +} + +// GenerateDataFeedsCacheView generates a CacheView from a DataFeedsCache contract. +func GenerateDataFeedsCacheView(cache *cache.DataFeedsCache) (CacheView, error) { + if cache == nil { + return CacheView{}, errors.New("cannot generate view for nil DataFeedsCache") + } + meta, err := types.NewContractMetaData(cache, cache.Address()) + if err != nil { + return CacheView{}, fmt.Errorf("failed to generate contract metadata for DataFeedsCache: %w", err) + } + + return CacheView{ + ContractMetaData: meta, + }, nil +} diff --git a/deployment/data-feeds/view/v1_0/proxy_contract.go b/deployment/data-feeds/view/v1_0/proxy_contract.go new file mode 100644 index 00000000000..96d4cb25f79 --- /dev/null +++ b/deployment/data-feeds/view/v1_0/proxy_contract.go @@ -0,0 +1,48 @@ +package v1_0 + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + proxy "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/data-feeds/generated/aggregator_proxy" +) + +type ProxyView struct { + TypeAndVersion string `json:"typeAndVersion,omitempty"` + Address common.Address `json:"address,omitempty"` + Owner common.Address `json:"owner,omitempty"` + Description string `json:"description,omitempty"` + Aggregator common.Address `json:"aggregator,omitempty"` +} + +// GenerateAggregatorProxyView generates a ProxyView from a AggregatorProxy contract. +func GenerateAggregatorProxyView(proxy *proxy.AggregatorProxy) (ProxyView, error) { + if proxy == nil { + return ProxyView{}, errors.New("cannot generate view for nil AggregatorProxy") + } + + description, err := proxy.Description(nil) + if err != nil { + return ProxyView{}, fmt.Errorf("failed to get description for AggregatorProxy: %w", err) + } + + owner, err := proxy.Owner(nil) + if err != nil { + return ProxyView{}, fmt.Errorf("failed to get owner for AggregatorProxy: %w", err) + } + + aggregator, err := proxy.Aggregator(nil) + if err != nil { + return ProxyView{}, fmt.Errorf("failed to get aggregator for AggregatorProxy: %w", err) + } + + return ProxyView{ + Address: proxy.Address(), + Owner: owner, + Description: description, + TypeAndVersion: "AggregatorProxy 1.0.0", + Aggregator: aggregator, + }, nil +} diff --git a/deployment/data-feeds/view/view.go b/deployment/data-feeds/view/view.go new file mode 100644 index 00000000000..ef5ae9926c9 --- /dev/null +++ b/deployment/data-feeds/view/view.go @@ -0,0 +1,31 @@ +package view + +import ( + "encoding/json" + + "github.com/smartcontractkit/chainlink/deployment/data-feeds/view/v1_0" +) + +type ChainView struct { + // v1.0 + DataFeedsCache map[string]v1_0.CacheView `json:"dataFeedsCache,omitempty"` + AggregatorProxy map[string]v1_0.ProxyView `json:"aggregatorProxy,omitempty"` +} + +func NewChain() ChainView { + return ChainView{ + // v1.0 + DataFeedsCache: make(map[string]v1_0.CacheView), + AggregatorProxy: make(map[string]v1_0.ProxyView), + } +} + +type DataFeedsView struct { + Chains map[string]ChainView `json:"chains,omitempty"` +} + +func (v DataFeedsView) MarshalJSON() ([]byte, error) { + // Alias to avoid recursive calls + type Alias DataFeedsView + return json.MarshalIndent(&struct{ Alias }{Alias: Alias(v)}, "", " ") +} From f4ad216eb2826da25304f488e8363a4eb7dc46d5 Mon Sep 17 00:00:00 2001 From: Makram Date: Thu, 27 Feb 2025 12:53:04 +0200 Subject: [PATCH 15/17] core/capabilities/ccip/launcher: fix not a member updateDON (#16604) --- core/capabilities/ccip/launcher/launcher.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/core/capabilities/ccip/launcher/launcher.go b/core/capabilities/ccip/launcher/launcher.go index 5e4aef934a0..4de56e1c788 100644 --- a/core/capabilities/ccip/launcher/launcher.go +++ b/core/capabilities/ccip/launcher/launcher.go @@ -223,6 +223,10 @@ func (l *launcher) processUpdate(ctx context.Context, updated map[registrysyncer if err != nil { return err } + if len(newPlugins) == 0 { + // not a member of this DON. + continue + } err = newPlugins.TransitionFrom(prevPlugins) if err != nil { @@ -314,7 +318,8 @@ func updateDON( latestConfigs []ccipreader.OCR3ConfigWithMeta, ) (pluginRegistry, error) { if !isMemberOfDON(don, p2pID) { - lggr.Infow("Not a member of this DON, skipping", "donId", don.ID, "p2pId", p2pID.String()) + lggr.Infow("Not a member of this DON, skipping", "donID", don.ID, "p2pID", p2pID.String()) + return nil, nil } newP := make(pluginRegistry) @@ -349,7 +354,7 @@ func createDON( configs []ccipreader.OCR3ConfigWithMeta, ) (pluginRegistry, error) { if !isMemberOfDON(don, p2pID) && oracleCreator.Type() == cctypes.OracleTypePlugin { - lggr.Infow("Not a member of this DON and not a bootstrap node either, skipping", "donId", don.ID, "p2pId", p2pID.String()) + lggr.Infow("Not a member of this DON and not a bootstrap node either, skipping", "donID", don.ID, "p2pID", p2pID.String()) return nil, nil } p := make(pluginRegistry) From 56c2182623d35995c034b765c41e4154df690331 Mon Sep 17 00:00:00 2001 From: tt-cll <141346969+tt-cll@users.noreply.github.com> Date: Thu, 27 Feb 2025 07:48:52 -0500 Subject: [PATCH 16/17] add solana token pool config update ixns (#16593) * add token pool ixns * liquidity * lint * cr comments --- .../changeset/solana/cs_chain_contracts.go | 2 + .../solana/cs_chain_contracts_test.go | 93 --- .../ccip/changeset/solana/cs_token_pool.go | 623 ++++++++++++++++-- .../changeset/solana/cs_token_pool_test.go | 305 +++++++++ ...ransfer_ccip_to_mcms_with_timelock_test.go | 1 - .../changeset/testhelpers/test_helpers.go | 1 - 6 files changed, 878 insertions(+), 147 deletions(-) create mode 100644 deployment/ccip/changeset/solana/cs_token_pool_test.go diff --git a/deployment/ccip/changeset/solana/cs_chain_contracts.go b/deployment/ccip/changeset/solana/cs_chain_contracts.go index a4cc5c11d9f..6f280d33565 100644 --- a/deployment/ccip/changeset/solana/cs_chain_contracts.go +++ b/deployment/ccip/changeset/solana/cs_chain_contracts.go @@ -33,6 +33,8 @@ type MCMSConfigSolana struct { RouterOwnedByTimelock bool FeeQuoterOwnedByTimelock bool OffRampOwnedByTimelock bool + // Assumes whatever token pool we're operating on + TokenPoolPDAOwnedByTimelock bool } // HELPER FUNCTIONS diff --git a/deployment/ccip/changeset/solana/cs_chain_contracts_test.go b/deployment/ccip/changeset/solana/cs_chain_contracts_test.go index 8a4fc0579d8..5d9ecefaba1 100644 --- a/deployment/ccip/changeset/solana/cs_chain_contracts_test.go +++ b/deployment/ccip/changeset/solana/cs_chain_contracts_test.go @@ -8,14 +8,11 @@ import ( "github.com/gagliardetto/solana-go" "github.com/stretchr/testify/require" - solBaseTokenPool "github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings/base_token_pool" solOffRamp "github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings/ccip_offramp" solRouter "github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings/ccip_router" solFeeQuoter "github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings/fee_quoter" - solTestTokenPool "github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings/test_token_pool" solCommonUtil "github.com/smartcontractkit/chainlink-ccip/chains/solana/utils/common" solState "github.com/smartcontractkit/chainlink-ccip/chains/solana/utils/state" - solTokenUtil "github.com/smartcontractkit/chainlink-ccip/chains/solana/utils/tokens" "github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext" @@ -228,96 +225,6 @@ func TestDeployCCIPContracts(t *testing.T) { testhelpers.DeployCCIPContractsTest(t, 1) } -func TestAddTokenPool(t *testing.T) { - t.Parallel() - ctx := testcontext.Get(t) - tenv, _ := testhelpers.NewMemoryEnvironment(t, testhelpers.WithSolChains(1)) - - evmChain := tenv.Env.AllChainSelectors()[0] - solChain := tenv.Env.AllChainSelectorsSolana()[0] - e, newTokenAddress, err := deployToken(t, tenv.Env, solChain) - require.NoError(t, err) - state, err := ccipChangeset.LoadOnchainStateSolana(e) - require.NoError(t, err) - remoteConfig := solBaseTokenPool.RemoteConfig{ - PoolAddresses: []solTestTokenPool.RemoteAddress{{Address: []byte{1, 2, 3}}}, - TokenAddress: solTestTokenPool.RemoteAddress{Address: []byte{4, 5, 6}}, - Decimals: 9, - } - inboundConfig := solBaseTokenPool.RateLimitConfig{ - Enabled: true, - Capacity: uint64(1000), - Rate: 1, - } - outboundConfig := solBaseTokenPool.RateLimitConfig{ - Enabled: false, - Capacity: 0, - Rate: 0, - } - - tokenMap := map[deployment.ContractType]solana.PublicKey{ - ccipChangeset.SPL2022Tokens: newTokenAddress, - ccipChangeset.SPLTokens: state.SolChains[solChain].WSOL, - } - - type poolTestType struct { - poolType solTestTokenPool.PoolType - poolAddress solana.PublicKey - } - testCases := []poolTestType{ - { - poolType: solTestTokenPool.BurnAndMint_PoolType, - poolAddress: state.SolChains[solChain].BurnMintTokenPool, - }, - { - poolType: solTestTokenPool.LockAndRelease_PoolType, - poolAddress: state.SolChains[solChain].LockReleaseTokenPool, - }, - } - for _, testCase := range testCases { - for _, tokenAddress := range tokenMap { - e, err = commonchangeset.Apply(t, e, nil, - commonchangeset.Configure( - deployment.CreateLegacyChangeSet(ccipChangesetSolana.AddTokenPool), - ccipChangesetSolana.TokenPoolConfig{ - ChainSelector: solChain, - TokenPubKey: tokenAddress.String(), - PoolType: testCase.poolType, - // this works for testing, but if we really want some other authority we need to pass in a private key for signing purposes - Authority: tenv.Env.SolChains[solChain].DeployerKey.PublicKey().String(), - }, - ), - commonchangeset.Configure( - deployment.CreateLegacyChangeSet(ccipChangesetSolana.SetupTokenPoolForRemoteChain), - ccipChangesetSolana.RemoteChainTokenPoolConfig{ - SolChainSelector: solChain, - RemoteChainSelector: evmChain, - SolTokenPubKey: tokenAddress.String(), - RemoteConfig: remoteConfig, - InboundRateLimit: inboundConfig, - OutboundRateLimit: outboundConfig, - PoolType: testCase.poolType, - }, - ), - ) - require.NoError(t, err) - // test AddTokenPool results - configAccount := solTestTokenPool.State{} - poolConfigPDA, _ := solTokenUtil.TokenPoolConfigAddress(tokenAddress, testCase.poolAddress) - err = e.SolChains[solChain].GetAccountDataBorshInto(ctx, poolConfigPDA, &configAccount) - require.NoError(t, err) - require.Equal(t, tokenAddress, configAccount.Config.Mint) - // test SetupTokenPoolForRemoteChain results - remoteChainConfigPDA, _, _ := solTokenUtil.TokenPoolChainConfigPDA(evmChain, tokenAddress, testCase.poolAddress) - var remoteChainConfigAccount solTestTokenPool.ChainConfig - err = e.SolChains[solChain].GetAccountDataBorshInto(ctx, remoteChainConfigPDA, &remoteChainConfigAccount) - require.NoError(t, err) - require.Equal(t, uint8(9), remoteChainConfigAccount.Base.Remote.Decimals) - } - } - -} - func TestBilling(t *testing.T) { t.Parallel() tests := []struct { diff --git a/deployment/ccip/changeset/solana/cs_token_pool.go b/deployment/ccip/changeset/solana/cs_token_pool.go index 7d85ecfa4fb..9e2f9bcc324 100644 --- a/deployment/ccip/changeset/solana/cs_token_pool.go +++ b/deployment/ccip/changeset/solana/cs_token_pool.go @@ -6,6 +6,9 @@ import ( "github.com/gagliardetto/solana-go" + "github.com/smartcontractkit/mcms" + mcmsTypes "github.com/smartcontractkit/mcms/types" + solBaseTokenPool "github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings/base_token_pool" solRouter "github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings/ccip_router" solBurnMintTokenPool "github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings/example_burnmint_token_pool" @@ -41,7 +44,6 @@ func validatePoolDeployment(s ccipChangeset.SolCCIPChainState, poolType solTestT type TokenPoolConfig struct { ChainSelector uint64 PoolType solTestTokenPool.PoolType - Authority string TokenPubKey string } @@ -93,7 +95,6 @@ func AddTokenPool(e deployment.Environment, cfg TokenPoolConfig) (deployment.Cha chain := e.SolChains[cfg.ChainSelector] state, _ := ccipChangeset.LoadOnchainState(e) chainState := state.SolChains[cfg.ChainSelector] - authorityPubKey := solana.MustPublicKeyFromBase58(cfg.Authority) tokenPubKey := solana.MustPublicKeyFromBase58(cfg.TokenPubKey) tokenPool := solana.PublicKey{} @@ -130,7 +131,7 @@ func AddTokenPool(e deployment.Environment, cfg TokenPoolConfig) (deployment.Cha chainState.Router, poolConfigPDA, tokenPubKey, - authorityPubKey, // this is assumed to be chain.DeployerKey for now (owner of token pool) + chain.DeployerKey.PublicKey(), // a token pool will only ever be added by the deployer key. solana.SystemProgramID, ).ValidateAndBuild() case solTestTokenPool.LockAndRelease_PoolType: @@ -139,7 +140,7 @@ func AddTokenPool(e deployment.Environment, cfg TokenPoolConfig) (deployment.Cha chainState.Router, poolConfigPDA, tokenPubKey, - authorityPubKey, // this is assumed to be chain.DeployerKey for now (owner of token pool) + chain.DeployerKey.PublicKey(), // a token pool will only ever be added by the deployer key. solana.SystemProgramID, ).ValidateAndBuild() default: @@ -157,7 +158,7 @@ func AddTokenPool(e deployment.Environment, cfg TokenPoolConfig) (deployment.Cha tokenprogramID, poolSigner, tokenPubKey, - authorityPubKey, + chain.DeployerKey.PublicKey(), ) if err != nil { return deployment.ChangesetOutput{}, fmt.Errorf("failed to generate instructions: %w", err) @@ -185,6 +186,8 @@ type RemoteChainTokenPoolConfig struct { RemoteConfig solBaseTokenPool.RemoteConfig InboundRateLimit solBaseTokenPool.RateLimitConfig OutboundRateLimit solBaseTokenPool.RateLimitConfig + MCMSSolana *MCMSConfigSolana + IsUpdate bool } func (cfg RemoteChainTokenPoolConfig) Validate(e deployment.Environment) error { @@ -231,8 +234,12 @@ func (cfg RemoteChainTokenPoolConfig) Validate(e deployment.Environment) error { if err != nil { return fmt.Errorf("failed to get token pool remote chain config pda (remoteSelector: %d, mint: %s, pool: %s): %w", cfg.RemoteChainSelector, tokenPubKey.String(), tokenPool.String(), err) } - if err := chain.GetAccountDataBorshInto(context.Background(), remoteChainConfigPDA, &remoteChainConfigAccount); err == nil { + err = chain.GetAccountDataBorshInto(context.Background(), remoteChainConfigPDA, &remoteChainConfigAccount) + + if !cfg.IsUpdate && err == nil { return fmt.Errorf("remote chain config already exists for (remoteSelector: %d, mint: %s, pool: %s, type: %s)", cfg.RemoteChainSelector, tokenPubKey.String(), tokenPool.String(), cfg.PoolType) + } else if cfg.IsUpdate && err != nil { + return fmt.Errorf("remote chain config not found for (remoteSelector: %d, mint: %s, pool: %s, type: %s): %w", cfg.RemoteChainSelector, tokenPubKey.String(), tokenPool.String(), cfg.PoolType, err) } return nil } @@ -278,17 +285,35 @@ func getInstructionsForBurnMint( poolConfigPDA, _ := solTokenUtil.TokenPoolConfigAddress(tokenPubKey, chainState.BurnMintTokenPool) remoteChainConfigPDA, _, _ := solTokenUtil.TokenPoolChainConfigPDA(cfg.RemoteChainSelector, tokenPubKey, chainState.BurnMintTokenPool) solBurnMintTokenPool.SetProgramID(chainState.BurnMintTokenPool) - ixConfigure, err := solBurnMintTokenPool.NewInitChainRemoteConfigInstruction( - cfg.RemoteChainSelector, - tokenPubKey, - cfg.RemoteConfig, - poolConfigPDA, - remoteChainConfigPDA, - chain.DeployerKey.PublicKey(), - solana.SystemProgramID, - ).ValidateAndBuild() - if err != nil { - return nil, fmt.Errorf("failed to generate instructions: %w", err) + ixns := make([]solana.Instruction, 0) + if !cfg.IsUpdate { + ixConfigure, err := solBurnMintTokenPool.NewInitChainRemoteConfigInstruction( + cfg.RemoteChainSelector, + tokenPubKey, + cfg.RemoteConfig, + poolConfigPDA, + remoteChainConfigPDA, + chain.DeployerKey.PublicKey(), + solana.SystemProgramID, + ).ValidateAndBuild() + if err != nil { + return nil, fmt.Errorf("failed to generate instructions: %w", err) + } + ixns = append(ixns, ixConfigure) + } else { + ixConfigure, err := solBurnMintTokenPool.NewEditChainRemoteConfigInstruction( + cfg.RemoteChainSelector, + tokenPubKey, + cfg.RemoteConfig, + poolConfigPDA, + remoteChainConfigPDA, + chain.DeployerKey.PublicKey(), + solana.SystemProgramID, + ).ValidateAndBuild() + if err != nil { + return nil, fmt.Errorf("failed to generate instructions: %w", err) + } + ixns = append(ixns, ixConfigure) } ixRates, err := solBurnMintTokenPool.NewSetChainRateLimitInstruction( cfg.RemoteChainSelector, @@ -303,19 +328,23 @@ func getInstructionsForBurnMint( if err != nil { return nil, fmt.Errorf("failed to generate instructions: %w", err) } - ixAppend, err := solBurnMintTokenPool.NewAppendRemotePoolAddressesInstruction( - cfg.RemoteChainSelector, - tokenPubKey, - cfg.RemoteConfig.PoolAddresses, // i dont know why this is a list (is it for different types of pool of the same token?) - poolConfigPDA, - remoteChainConfigPDA, - chain.DeployerKey.PublicKey(), - solana.SystemProgramID, - ).ValidateAndBuild() - if err != nil { - return nil, fmt.Errorf("failed to generate instructions: %w", err) + ixns = append(ixns, ixRates) + if len(cfg.RemoteConfig.PoolAddresses) > 0 { + ixAppend, err := solBurnMintTokenPool.NewAppendRemotePoolAddressesInstruction( + cfg.RemoteChainSelector, + tokenPubKey, + cfg.RemoteConfig.PoolAddresses, // i dont know why this is a list (is it for different types of pool of the same token?) + poolConfigPDA, + remoteChainConfigPDA, + chain.DeployerKey.PublicKey(), + solana.SystemProgramID, + ).ValidateAndBuild() + if err != nil { + return nil, fmt.Errorf("failed to generate instructions: %w", err) + } + ixns = append(ixns, ixAppend) } - return []solana.Instruction{ixConfigure, ixRates, ixAppend}, nil + return ixns, nil } func getInstructionsForLockRelease( @@ -327,17 +356,35 @@ func getInstructionsForLockRelease( poolConfigPDA, _ := solTokenUtil.TokenPoolConfigAddress(tokenPubKey, chainState.LockReleaseTokenPool) remoteChainConfigPDA, _, _ := solTokenUtil.TokenPoolChainConfigPDA(cfg.RemoteChainSelector, tokenPubKey, chainState.LockReleaseTokenPool) solLockReleaseTokenPool.SetProgramID(chainState.LockReleaseTokenPool) - ixConfigure, err := solLockReleaseTokenPool.NewInitChainRemoteConfigInstruction( - cfg.RemoteChainSelector, - tokenPubKey, - cfg.RemoteConfig, - poolConfigPDA, - remoteChainConfigPDA, - chain.DeployerKey.PublicKey(), - solana.SystemProgramID, - ).ValidateAndBuild() - if err != nil { - return nil, fmt.Errorf("failed to generate instructions: %w", err) + ixns := make([]solana.Instruction, 0) + if !cfg.IsUpdate { + ixConfigure, err := solLockReleaseTokenPool.NewInitChainRemoteConfigInstruction( + cfg.RemoteChainSelector, + tokenPubKey, + cfg.RemoteConfig, + poolConfigPDA, + remoteChainConfigPDA, + chain.DeployerKey.PublicKey(), + solana.SystemProgramID, + ).ValidateAndBuild() + if err != nil { + return nil, fmt.Errorf("failed to generate instructions: %w", err) + } + ixns = append(ixns, ixConfigure) + } else { + ixConfigure, err := solLockReleaseTokenPool.NewEditChainRemoteConfigInstruction( + cfg.RemoteChainSelector, + tokenPubKey, + cfg.RemoteConfig, + poolConfigPDA, + remoteChainConfigPDA, + chain.DeployerKey.PublicKey(), + solana.SystemProgramID, + ).ValidateAndBuild() + if err != nil { + return nil, fmt.Errorf("failed to generate instructions: %w", err) + } + ixns = append(ixns, ixConfigure) } ixRates, err := solLockReleaseTokenPool.NewSetChainRateLimitInstruction( cfg.RemoteChainSelector, @@ -352,19 +399,23 @@ func getInstructionsForLockRelease( if err != nil { return nil, fmt.Errorf("failed to generate instructions: %w", err) } - ixAppend, err := solLockReleaseTokenPool.NewAppendRemotePoolAddressesInstruction( - cfg.RemoteChainSelector, - tokenPubKey, - cfg.RemoteConfig.PoolAddresses, // i dont know why this is a list (is it for different types of pool of the same token?) - poolConfigPDA, - remoteChainConfigPDA, - chain.DeployerKey.PublicKey(), - solana.SystemProgramID, - ).ValidateAndBuild() - if err != nil { - return nil, fmt.Errorf("failed to generate instructions: %w", err) + ixns = append(ixns, ixRates) + if len(cfg.RemoteConfig.PoolAddresses) > 0 { + ixAppend, err := solLockReleaseTokenPool.NewAppendRemotePoolAddressesInstruction( + cfg.RemoteChainSelector, + tokenPubKey, + cfg.RemoteConfig.PoolAddresses, // i dont know why this is a list (is it for different types of pool of the same token?) + poolConfigPDA, + remoteChainConfigPDA, + chain.DeployerKey.PublicKey(), + solana.SystemProgramID, + ).ValidateAndBuild() + if err != nil { + return nil, fmt.Errorf("failed to generate instructions: %w", err) + } + ixns = append(ixns, ixAppend) } - return []solana.Instruction{ixConfigure, ixRates, ixAppend}, nil + return ixns, nil } // ADD TOKEN POOL LOOKUP TABLE @@ -518,3 +569,471 @@ func SetPool(e deployment.Environment, cfg SetPoolConfig) (deployment.ChangesetO e.Logger.Infow("Set pool config", "token_pubkey", tokenPubKey.String()) return deployment.ChangesetOutput{}, nil } + +type ConfigureTokenPoolAllowListConfig struct { + SolChainSelector uint64 + SolTokenPubKey string + PoolType solTestTokenPool.PoolType + Accounts []solana.PublicKey + Enabled bool + MCMSSolana *MCMSConfigSolana +} + +func (cfg ConfigureTokenPoolAllowListConfig) Validate(e deployment.Environment) error { + tokenPubKey := solana.MustPublicKeyFromBase58(cfg.SolTokenPubKey) + if err := commonValidation(e, cfg.SolChainSelector, tokenPubKey); err != nil { + return err + } + state, _ := ccipChangeset.LoadOnchainState(e) + chainState := state.SolChains[cfg.SolChainSelector] + chain := e.SolChains[cfg.SolChainSelector] + + if err := validatePoolDeployment(chainState, cfg.PoolType, cfg.SolChainSelector); err != nil { + return err + } + + var tokenPool solana.PublicKey + var poolConfigAccount interface{} + + switch cfg.PoolType { + case solTestTokenPool.BurnAndMint_PoolType: + tokenPool = chainState.BurnMintTokenPool + poolConfigAccount = solBurnMintTokenPool.State{} + case solTestTokenPool.LockAndRelease_PoolType: + tokenPool = chainState.LockReleaseTokenPool + poolConfigAccount = solLockReleaseTokenPool.State{} + default: + return fmt.Errorf("invalid pool type: %s", cfg.PoolType) + } + + // check if pool config exists + poolConfigPDA, err := solTokenUtil.TokenPoolConfigAddress(tokenPubKey, tokenPool) + if err != nil { + return fmt.Errorf("failed to get token pool config address (mint: %s, pool: %s): %w", tokenPubKey.String(), tokenPool.String(), err) + } + if err := chain.GetAccountDataBorshInto(context.Background(), poolConfigPDA, &poolConfigAccount); err != nil { + return fmt.Errorf("token pool config not found (mint: %s, pool: %s, type: %s): %w", tokenPubKey.String(), tokenPool.String(), cfg.PoolType, err) + } + return nil +} + +func ConfigureTokenPoolAllowList(e deployment.Environment, cfg ConfigureTokenPoolAllowListConfig) (deployment.ChangesetOutput, error) { + if err := cfg.Validate(e); err != nil { + return deployment.ChangesetOutput{}, err + } + + chain := e.SolChains[cfg.SolChainSelector] + state, _ := ccipChangeset.LoadOnchainState(e) + chainState := state.SolChains[cfg.SolChainSelector] + tokenPubKey := solana.MustPublicKeyFromBase58(cfg.SolTokenPubKey) + + var ix solana.Instruction + var err error + tokenPoolUsingMcms := cfg.MCMSSolana != nil && cfg.MCMSSolana.TokenPoolPDAOwnedByTimelock + // validate ownership + var authority solana.PublicKey + var programID solana.PublicKey + var contractType deployment.ContractType + if tokenPoolUsingMcms { + authority, err = FetchTimelockSigner(e, cfg.SolChainSelector) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to fetch timelock signer: %w", err) + } + } else { + authority = chain.DeployerKey.PublicKey() + } + switch cfg.PoolType { + case solTestTokenPool.BurnAndMint_PoolType: + poolConfigPDA, _ := solTokenUtil.TokenPoolConfigAddress(tokenPubKey, chainState.BurnMintTokenPool) + solBurnMintTokenPool.SetProgramID(chainState.BurnMintTokenPool) + programID = chainState.BurnMintTokenPool + contractType = ccipChangeset.BurnMintTokenPool + ix, err = solBurnMintTokenPool.NewConfigureAllowListInstruction( + cfg.Accounts, + cfg.Enabled, + poolConfigPDA, + authority, + solana.SystemProgramID, + ).ValidateAndBuild() + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to generate instructions: %w", err) + } + case solTestTokenPool.LockAndRelease_PoolType: + poolConfigPDA, _ := solTokenUtil.TokenPoolConfigAddress(tokenPubKey, chainState.LockReleaseTokenPool) + solLockReleaseTokenPool.SetProgramID(chainState.LockReleaseTokenPool) + programID = chainState.LockReleaseTokenPool + contractType = ccipChangeset.LockReleaseTokenPool + ix, err = solLockReleaseTokenPool.NewConfigureAllowListInstruction( + cfg.Accounts, + cfg.Enabled, + poolConfigPDA, + authority, + solana.SystemProgramID, + ).ValidateAndBuild() + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to generate instructions: %w", err) + } + default: + return deployment.ChangesetOutput{}, fmt.Errorf("invalid pool type: %s", cfg.PoolType) + } + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to generate instructions: %w", err) + } + if tokenPoolUsingMcms { + tx, err := BuildMCMSTxn(ix, programID.String(), contractType) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to create transaction: %w", err) + } + proposal, err := BuildProposalsForTxns( + e, cfg.SolChainSelector, "proposal to ConfigureTokenPoolAllowList in Solana", cfg.MCMSSolana.MCMS.MinDelay, []mcmsTypes.Transaction{*tx}) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + return deployment.ChangesetOutput{ + MCMSTimelockProposals: []mcms.TimelockProposal{*proposal}, + }, nil + } + + err = chain.Confirm([]solana.Instruction{ix}) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm instructions: %w", err) + } + e.Logger.Infow("Configured token pool allowlist", "token_pubkey", tokenPubKey.String()) + return deployment.ChangesetOutput{}, nil +} + +type RemoveFromAllowListConfig struct { + SolChainSelector uint64 + SolTokenPubKey string + PoolType solTestTokenPool.PoolType + Accounts []solana.PublicKey + MCMSSolana *MCMSConfigSolana +} + +func (cfg RemoveFromAllowListConfig) Validate(e deployment.Environment) error { + tokenPubKey := solana.MustPublicKeyFromBase58(cfg.SolTokenPubKey) + if err := commonValidation(e, cfg.SolChainSelector, tokenPubKey); err != nil { + return err + } + state, _ := ccipChangeset.LoadOnchainState(e) + chainState := state.SolChains[cfg.SolChainSelector] + chain := e.SolChains[cfg.SolChainSelector] + + if err := validatePoolDeployment(chainState, cfg.PoolType, cfg.SolChainSelector); err != nil { + return err + } + + var tokenPool solana.PublicKey + var poolConfigAccount interface{} + + switch cfg.PoolType { + case solTestTokenPool.BurnAndMint_PoolType: + tokenPool = chainState.BurnMintTokenPool + poolConfigAccount = solBurnMintTokenPool.State{} + case solTestTokenPool.LockAndRelease_PoolType: + tokenPool = chainState.LockReleaseTokenPool + poolConfigAccount = solLockReleaseTokenPool.State{} + default: + return fmt.Errorf("invalid pool type: %s", cfg.PoolType) + } + + // check if pool config exists + poolConfigPDA, err := solTokenUtil.TokenPoolConfigAddress(tokenPubKey, tokenPool) + if err != nil { + return fmt.Errorf("failed to get token pool config address (mint: %s, pool: %s): %w", tokenPubKey.String(), tokenPool.String(), err) + } + if err := chain.GetAccountDataBorshInto(context.Background(), poolConfigPDA, &poolConfigAccount); err != nil { + return fmt.Errorf("token pool config not found (mint: %s, pool: %s, type: %s): %w", tokenPubKey.String(), tokenPool.String(), cfg.PoolType, err) + } + return nil +} + +func RemoveFromTokenPoolAllowList(e deployment.Environment, cfg RemoveFromAllowListConfig) (deployment.ChangesetOutput, error) { + if err := cfg.Validate(e); err != nil { + return deployment.ChangesetOutput{}, err + } + + chain := e.SolChains[cfg.SolChainSelector] + state, _ := ccipChangeset.LoadOnchainState(e) + chainState := state.SolChains[cfg.SolChainSelector] + tokenPubKey := solana.MustPublicKeyFromBase58(cfg.SolTokenPubKey) + + var ix solana.Instruction + var err error + tokenPoolUsingMcms := cfg.MCMSSolana != nil && cfg.MCMSSolana.TokenPoolPDAOwnedByTimelock + // validate ownership + var authority solana.PublicKey + var programID solana.PublicKey + var contractType deployment.ContractType + if tokenPoolUsingMcms { + authority, err = FetchTimelockSigner(e, cfg.SolChainSelector) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to fetch timelock signer: %w", err) + } + } else { + authority = chain.DeployerKey.PublicKey() + } + switch cfg.PoolType { + case solTestTokenPool.BurnAndMint_PoolType: + poolConfigPDA, _ := solTokenUtil.TokenPoolConfigAddress(tokenPubKey, chainState.BurnMintTokenPool) + solBurnMintTokenPool.SetProgramID(chainState.BurnMintTokenPool) + programID = chainState.BurnMintTokenPool + contractType = ccipChangeset.BurnMintTokenPool + ix, err = solBurnMintTokenPool.NewRemoveFromAllowListInstruction( + cfg.Accounts, + poolConfigPDA, + authority, + ).ValidateAndBuild() + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to generate instructions: %w", err) + } + case solTestTokenPool.LockAndRelease_PoolType: + poolConfigPDA, _ := solTokenUtil.TokenPoolConfigAddress(tokenPubKey, chainState.LockReleaseTokenPool) + solLockReleaseTokenPool.SetProgramID(chainState.LockReleaseTokenPool) + programID = chainState.LockReleaseTokenPool + contractType = ccipChangeset.LockReleaseTokenPool + ix, err = solLockReleaseTokenPool.NewRemoveFromAllowListInstruction( + cfg.Accounts, + poolConfigPDA, + authority, + ).ValidateAndBuild() + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to generate instructions: %w", err) + } + default: + return deployment.ChangesetOutput{}, fmt.Errorf("invalid pool type: %s", cfg.PoolType) + } + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to generate instructions: %w", err) + } + if tokenPoolUsingMcms { + tx, err := BuildMCMSTxn(ix, programID.String(), contractType) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to create transaction: %w", err) + } + proposal, err := BuildProposalsForTxns( + e, cfg.SolChainSelector, "proposal to RemoveFromTokenPoolAllowList in Solana", cfg.MCMSSolana.MCMS.MinDelay, []mcmsTypes.Transaction{*tx}) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + return deployment.ChangesetOutput{ + MCMSTimelockProposals: []mcms.TimelockProposal{*proposal}, + }, nil + } + + err = chain.Confirm([]solana.Instruction{ix}) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm instructions: %w", err) + } + e.Logger.Infow("Configured token pool allowlist", "token_pubkey", tokenPubKey.String()) + return deployment.ChangesetOutput{}, nil +} + +type LockReleaseLiquidityOpsConfig struct { + SolChainSelector uint64 + SolTokenPubKey string + SetCfg *SetLiquidityConfig + LiquidityCfg *LiquidityConfig + MCMSSolana *MCMSConfigSolana +} + +type SetLiquidityConfig struct { + Enabled bool +} +type LiquidityOperation int + +const ( + Provide LiquidityOperation = iota + Withdraw +) + +type LiquidityConfig struct { + Amount uint64 + RemoteTokenAccount solana.PublicKey + Type LiquidityOperation +} + +func (cfg LockReleaseLiquidityOpsConfig) Validate(e deployment.Environment) error { + tokenPubKey := solana.MustPublicKeyFromBase58(cfg.SolTokenPubKey) + if err := commonValidation(e, cfg.SolChainSelector, tokenPubKey); err != nil { + return err + } + state, _ := ccipChangeset.LoadOnchainState(e) + chainState := state.SolChains[cfg.SolChainSelector] + chain := e.SolChains[cfg.SolChainSelector] + + if err := validatePoolDeployment(chainState, solTestTokenPool.LockAndRelease_PoolType, cfg.SolChainSelector); err != nil { + return err + } + + tokenPool := chainState.LockReleaseTokenPool + poolConfigAccount := solLockReleaseTokenPool.State{} + + // check if pool config exists + poolConfigPDA, err := solTokenUtil.TokenPoolConfigAddress(tokenPubKey, tokenPool) + if err != nil { + return fmt.Errorf("failed to get token pool config address (mint: %s, pool: %s): %w", tokenPubKey.String(), tokenPool.String(), err) + } + if err := chain.GetAccountDataBorshInto(context.Background(), poolConfigPDA, &poolConfigAccount); err != nil { + return fmt.Errorf("token pool config not found (mint: %s, pool: %s, type: %s): %w", tokenPubKey.String(), tokenPool.String(), tokenPool, err) + } + return nil +} + +func LockReleaseLiquidityOps(e deployment.Environment, cfg LockReleaseLiquidityOpsConfig) (deployment.ChangesetOutput, error) { + if err := cfg.Validate(e); err != nil { + return deployment.ChangesetOutput{}, err + } + + chain := e.SolChains[cfg.SolChainSelector] + state, _ := ccipChangeset.LoadOnchainState(e) + chainState := state.SolChains[cfg.SolChainSelector] + tokenPool := chainState.LockReleaseTokenPool + + var err error + tokenPoolUsingMcms := cfg.MCMSSolana != nil && cfg.MCMSSolana.TokenPoolPDAOwnedByTimelock + // validate ownership + var authority solana.PublicKey + + solLockReleaseTokenPool.SetProgramID(tokenPool) + programID := tokenPool + contractType := ccipChangeset.LockReleaseTokenPool + tokenPubKey := solana.MustPublicKeyFromBase58(cfg.SolTokenPubKey) + poolConfigPDA, _ := solTokenUtil.TokenPoolConfigAddress(tokenPubKey, tokenPool) + if tokenPoolUsingMcms { + authority, err = FetchTimelockSigner(e, cfg.SolChainSelector) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to fetch timelock signer: %w", err) + } + } else { + authority = chain.DeployerKey.PublicKey() + } + ixns := make([]solana.Instruction, 0) + if cfg.SetCfg != nil { + ix, err := solLockReleaseTokenPool.NewSetCanAcceptLiquidityInstruction( + cfg.SetCfg.Enabled, + poolConfigPDA, + authority, + ).ValidateAndBuild() + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to generate instructions: %w", err) + } + ixns = append(ixns, ix) + } + if cfg.LiquidityCfg != nil { + tokenProgram, _ := chainState.TokenToTokenProgram(tokenPubKey) + poolSigner, _ := solTokenUtil.TokenPoolSignerAddress(tokenPubKey, tokenPool) + poolConfigAccount := solLockReleaseTokenPool.State{} + _ = chain.GetAccountDataBorshInto(context.Background(), poolConfigPDA, &poolConfigAccount) + switch cfg.LiquidityCfg.Type { + case Provide: + ix, err := solLockReleaseTokenPool.NewProvideLiquidityInstruction( + cfg.LiquidityCfg.Amount, + poolConfigPDA, + tokenProgram, + tokenPubKey, + poolSigner, + poolConfigAccount.Config.PoolTokenAccount, + cfg.LiquidityCfg.RemoteTokenAccount, + authority, + ).ValidateAndBuild() + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to generate instructions: %w", err) + } + ixns = append(ixns, ix) + case Withdraw: + ix, err := solLockReleaseTokenPool.NewWithdrawLiquidityInstruction( + cfg.LiquidityCfg.Amount, + poolConfigPDA, + tokenProgram, + tokenPubKey, + poolSigner, + poolConfigAccount.Config.PoolTokenAccount, + cfg.LiquidityCfg.RemoteTokenAccount, + authority, + ).ValidateAndBuild() + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to generate instructions: %w", err) + } + ixns = append(ixns, ix) + } + } + + if tokenPoolUsingMcms { + txns := make([]mcmsTypes.Transaction, 0) + for _, ixn := range ixns { + tx, err := BuildMCMSTxn(ixn, programID.String(), contractType) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to create transaction: %w", err) + } + txns = append(txns, *tx) + } + proposal, err := BuildProposalsForTxns( + e, cfg.SolChainSelector, "proposal to RemoveFromTokenPoolAllowList in Solana", cfg.MCMSSolana.MCMS.MinDelay, txns) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal: %w", err) + } + return deployment.ChangesetOutput{ + MCMSTimelockProposals: []mcms.TimelockProposal{*proposal}, + }, nil + } + + err = chain.Confirm(ixns) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to confirm instructions: %w", err) + } + return deployment.ChangesetOutput{}, nil +} + +type TokenApproveCheckedConfig struct { + Amount uint64 + Decimals uint8 + ChainSelector uint64 + TokenPubKey string + PoolType solTestTokenPool.PoolType + SourceATA solana.PublicKey +} + +func TokenApproveChecked(e deployment.Environment, cfg TokenApproveCheckedConfig) (deployment.ChangesetOutput, error) { + chain := e.SolChains[cfg.ChainSelector] + state, _ := ccipChangeset.LoadOnchainState(e) + chainState := state.SolChains[cfg.ChainSelector] + + tokenPubKey := solana.MustPublicKeyFromBase58(cfg.TokenPubKey) + tokenPool := solana.PublicKey{} + + if cfg.PoolType == solTestTokenPool.BurnAndMint_PoolType { + tokenPool = chainState.BurnMintTokenPool + solBurnMintTokenPool.SetProgramID(tokenPool) + } else if cfg.PoolType == solTestTokenPool.LockAndRelease_PoolType { + tokenPool = chainState.LockReleaseTokenPool + solLockReleaseTokenPool.SetProgramID(tokenPool) + } + + // verified + tokenprogramID, _ := chainState.TokenToTokenProgram(tokenPubKey) + poolSigner, _ := solTokenUtil.TokenPoolSignerAddress(tokenPubKey, tokenPool) + + ix, err := solTokenUtil.TokenApproveChecked( + cfg.Amount, + cfg.Decimals, + tokenprogramID, + cfg.SourceATA, + tokenPubKey, + poolSigner, + chain.DeployerKey.PublicKey(), + solana.PublicKeySlice{}, + ) + if err != nil { + return deployment.ChangesetOutput{}, err + } + + // confirm instructions + if err = chain.Confirm([]solana.Instruction{ix}); err != nil { + e.Logger.Errorw("Failed to confirm instructions for TokenApproveChecked", "chain", chain.String(), "err", err) + return deployment.ChangesetOutput{}, err + } + e.Logger.Infow("TokenApproveChecked on", "chain", cfg.ChainSelector, "for token", tokenPubKey.String()) + + return deployment.ChangesetOutput{}, nil +} diff --git a/deployment/ccip/changeset/solana/cs_token_pool_test.go b/deployment/ccip/changeset/solana/cs_token_pool_test.go new file mode 100644 index 00000000000..9026bbdd0c6 --- /dev/null +++ b/deployment/ccip/changeset/solana/cs_token_pool_test.go @@ -0,0 +1,305 @@ +package solana_test + +import ( + "testing" + "time" + + "github.com/gagliardetto/solana-go" + solRpc "github.com/gagliardetto/solana-go/rpc" + "github.com/stretchr/testify/require" + + solBaseTokenPool "github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings/base_token_pool" + solTestTokenPool "github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings/test_token_pool" + solTokenUtil "github.com/smartcontractkit/chainlink-ccip/chains/solana/utils/tokens" + + "github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext" + + ccipChangeset "github.com/smartcontractkit/chainlink/deployment/ccip/changeset" + ccipChangesetSolana "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/solana" + changeset_solana "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/solana" + "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/testhelpers" + + "github.com/smartcontractkit/chainlink/deployment" + commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset" +) + +func TestAddTokenPool(t *testing.T) { + t.Parallel() + doTestTokenPool(t, false) +} + +// func TestAddTokenPoolMcms(t *testing.T) { +// t.Parallel() +// doTestTokenPool(t, true) +// } + +func doTestTokenPool(t *testing.T, mcms bool) { + ctx := testcontext.Get(t) + tenv, _ := testhelpers.NewMemoryEnvironment(t, testhelpers.WithSolChains(1)) + + evmChain := tenv.Env.AllChainSelectors()[0] + solChain := tenv.Env.AllChainSelectorsSolana()[0] + e, newTokenAddress, err := deployToken(t, tenv.Env, solChain) + require.NoError(t, err) + state, err := ccipChangeset.LoadOnchainStateSolana(e) + require.NoError(t, err) + // MintTo does not support native tokens + deployerKey := e.SolChains[solChain].DeployerKey.PublicKey() + testUser, _ := solana.NewRandomPrivateKey() + testUserPubKey := testUser.PublicKey() + e, err = commonchangeset.ApplyChangesetsV2(t, e, []commonchangeset.ConfiguredChangeSet{ + commonchangeset.Configure( + // deployer creates ATA for itself and testUser + deployment.CreateLegacyChangeSet(ccipChangesetSolana.CreateSolanaTokenATA), + ccipChangesetSolana.CreateSolanaTokenATAConfig{ + ChainSelector: solChain, + TokenPubkey: newTokenAddress, + TokenProgram: ccipChangeset.SPL2022Tokens, + ATAList: []string{deployerKey.String(), testUserPubKey.String()}, + }, + ), + commonchangeset.Configure( + // deployer mints token to itself and testUser + deployment.CreateLegacyChangeSet(changeset_solana.MintSolanaToken), + ccipChangesetSolana.MintSolanaTokenConfig{ + ChainSelector: solChain, + TokenPubkey: newTokenAddress.String(), + AmountToAddress: map[string]uint64{ + deployerKey.String(): uint64(1000), + testUserPubKey.String(): uint64(1000), + }, + }, + ), + }, + ) + require.NoError(t, err) + testUserATA, _, err := solTokenUtil.FindAssociatedTokenAddress(solana.Token2022ProgramID, newTokenAddress, testUserPubKey) + require.NoError(t, err) + deployerATA, _, err := solTokenUtil.FindAssociatedTokenAddress( + solana.Token2022ProgramID, + newTokenAddress, + e.SolChains[solChain].DeployerKey.PublicKey(), + ) + require.NoError(t, err) + mcmsConfigured := false + remoteConfig := solBaseTokenPool.RemoteConfig{ + PoolAddresses: []solTestTokenPool.RemoteAddress{{Address: []byte{1, 2, 3}}}, + TokenAddress: solTestTokenPool.RemoteAddress{Address: []byte{4, 5, 6}}, + Decimals: 9, + } + inboundConfig := solBaseTokenPool.RateLimitConfig{ + Enabled: true, + Capacity: uint64(1000), + Rate: 1, + } + outboundConfig := solBaseTokenPool.RateLimitConfig{ + Enabled: false, + Capacity: 0, + Rate: 0, + } + + tokenMap := map[deployment.ContractType]solana.PublicKey{ + ccipChangeset.SPL2022Tokens: newTokenAddress, + ccipChangeset.SPLTokens: state.SolChains[solChain].WSOL, + } + + type poolTestType struct { + poolType solTestTokenPool.PoolType + poolAddress solana.PublicKey + mcms bool + } + testCases := []poolTestType{ + { + poolType: solTestTokenPool.BurnAndMint_PoolType, + poolAddress: state.SolChains[solChain].BurnMintTokenPool, + }, + { + poolType: solTestTokenPool.LockAndRelease_PoolType, + poolAddress: state.SolChains[solChain].LockReleaseTokenPool, + }, + } + for _, testCase := range testCases { + for _, tokenAddress := range tokenMap { + e, err = commonchangeset.ApplyChangesetsV2(t, e, []commonchangeset.ConfiguredChangeSet{ + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.AddTokenPool), + ccipChangesetSolana.TokenPoolConfig{ + ChainSelector: solChain, + TokenPubKey: tokenAddress.String(), + PoolType: testCase.poolType, + }, + ), + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.SetupTokenPoolForRemoteChain), + ccipChangesetSolana.RemoteChainTokenPoolConfig{ + SolChainSelector: solChain, + RemoteChainSelector: evmChain, + SolTokenPubKey: tokenAddress.String(), + RemoteConfig: remoteConfig, + InboundRateLimit: inboundConfig, + OutboundRateLimit: outboundConfig, + PoolType: testCase.poolType, + }, + ), + }, + ) + require.NoError(t, err) + // test AddTokenPool results + configAccount := solTestTokenPool.State{} + poolConfigPDA, _ := solTokenUtil.TokenPoolConfigAddress(tokenAddress, testCase.poolAddress) + err = e.SolChains[solChain].GetAccountDataBorshInto(ctx, poolConfigPDA, &configAccount) + require.NoError(t, err) + require.Equal(t, tokenAddress, configAccount.Config.Mint) + // test SetupTokenPoolForRemoteChain results + remoteChainConfigPDA, _, _ := solTokenUtil.TokenPoolChainConfigPDA(evmChain, tokenAddress, testCase.poolAddress) + var remoteChainConfigAccount solTestTokenPool.ChainConfig + err = e.SolChains[solChain].GetAccountDataBorshInto(ctx, remoteChainConfigPDA, &remoteChainConfigAccount) + require.NoError(t, err) + require.Equal(t, uint8(9), remoteChainConfigAccount.Base.Remote.Decimals) + + var mcmsConfig *ccipChangesetSolana.MCMSConfigSolana + if testCase.mcms && !mcmsConfigured { + _, _ = testhelpers.TransferOwnershipSolana(t, &e, solChain, true, true, true, true) + mcmsConfig = &ccipChangesetSolana.MCMSConfigSolana{ + MCMS: &ccipChangeset.MCMSConfig{ + MinDelay: 1 * time.Second, + }, + RouterOwnedByTimelock: true, + FeeQuoterOwnedByTimelock: true, + OffRampOwnedByTimelock: true, + } + require.NotNil(t, mcmsConfig) + mcmsConfigured = true + } + + allowedAccount1, _ := solana.NewRandomPrivateKey() + allowedAccount2, _ := solana.NewRandomPrivateKey() + + newRemoteConfig := solBaseTokenPool.RemoteConfig{ + PoolAddresses: []solTestTokenPool.RemoteAddress{{Address: []byte{7, 8, 9}}}, + TokenAddress: solTestTokenPool.RemoteAddress{Address: []byte{10, 11, 12}}, + Decimals: 9, + } + newOutboundConfig := solBaseTokenPool.RateLimitConfig{ + Enabled: true, + Capacity: uint64(1000), + Rate: 1, + } + newInboundConfig := solBaseTokenPool.RateLimitConfig{ + Enabled: false, + Capacity: 0, + Rate: 0, + } + + e, err = commonchangeset.ApplyChangesetsV2(t, e, []commonchangeset.ConfiguredChangeSet{ + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.ConfigureTokenPoolAllowList), + ccipChangesetSolana.ConfigureTokenPoolAllowListConfig{ + SolChainSelector: solChain, + SolTokenPubKey: tokenAddress.String(), + PoolType: testCase.poolType, + Accounts: []solana.PublicKey{allowedAccount1.PublicKey(), allowedAccount2.PublicKey()}, + Enabled: true, + MCMSSolana: mcmsConfig, + }, + ), + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.RemoveFromTokenPoolAllowList), + ccipChangesetSolana.RemoveFromAllowListConfig{ + SolChainSelector: solChain, + SolTokenPubKey: tokenAddress.String(), + PoolType: testCase.poolType, + Accounts: []solana.PublicKey{allowedAccount1.PublicKey(), allowedAccount2.PublicKey()}, + MCMSSolana: mcmsConfig, + }, + ), + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.SetupTokenPoolForRemoteChain), + ccipChangesetSolana.RemoteChainTokenPoolConfig{ + SolChainSelector: solChain, + RemoteChainSelector: evmChain, + SolTokenPubKey: tokenAddress.String(), + RemoteConfig: newRemoteConfig, + InboundRateLimit: newInboundConfig, + OutboundRateLimit: newOutboundConfig, + PoolType: testCase.poolType, + MCMSSolana: mcmsConfig, + IsUpdate: true, + }, + ), + }, + ) + require.NoError(t, err) + if testCase.poolType == solTestTokenPool.LockAndRelease_PoolType && tokenAddress == newTokenAddress { + e, err = commonchangeset.ApplyChangesetsV2(t, e, []commonchangeset.ConfiguredChangeSet{ + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.TokenApproveChecked), + ccipChangesetSolana.TokenApproveCheckedConfig{ + Amount: 100, + Decimals: 9, + ChainSelector: solChain, + TokenPubKey: tokenAddress.String(), + PoolType: testCase.poolType, + SourceATA: deployerATA, + }, + ), + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.LockReleaseLiquidityOps), + ccipChangesetSolana.LockReleaseLiquidityOpsConfig{ + SolChainSelector: solChain, + SolTokenPubKey: tokenAddress.String(), + SetCfg: &ccipChangesetSolana.SetLiquidityConfig{ + Enabled: true, + }, + MCMSSolana: mcmsConfig, + }, + ), + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.LockReleaseLiquidityOps), + ccipChangesetSolana.LockReleaseLiquidityOpsConfig{ + SolChainSelector: solChain, + SolTokenPubKey: tokenAddress.String(), + LiquidityCfg: &ccipChangesetSolana.LiquidityConfig{ + Amount: 100, + RemoteTokenAccount: deployerATA, + Type: ccipChangesetSolana.Provide, + }, + MCMSSolana: mcmsConfig, + }, + ), + commonchangeset.Configure( + deployment.CreateLegacyChangeSet(ccipChangesetSolana.LockReleaseLiquidityOps), + ccipChangesetSolana.LockReleaseLiquidityOpsConfig{ + SolChainSelector: solChain, + SolTokenPubKey: tokenAddress.String(), + LiquidityCfg: &ccipChangesetSolana.LiquidityConfig{ + Amount: 50, + RemoteTokenAccount: testUserATA, + Type: ccipChangesetSolana.Withdraw, + }, + MCMSSolana: mcmsConfig, + }, + ), + }, + ) + require.NoError(t, err) + outDec, outVal, err := solTokenUtil.TokenBalance(e.GetContext(), e.SolChains[solChain].Client, deployerATA, solRpc.CommitmentConfirmed) + require.NoError(t, err) + require.Equal(t, int(900), outVal) + require.Equal(t, 9, int(outDec)) + + outDec, outVal, err = solTokenUtil.TokenBalance(e.GetContext(), e.SolChains[solChain].Client, testUserATA, solRpc.CommitmentConfirmed) + require.NoError(t, err) + require.Equal(t, int(1050), outVal) + require.Equal(t, 9, int(outDec)) + + err = e.SolChains[solChain].GetAccountDataBorshInto(ctx, poolConfigPDA, &configAccount) + require.NoError(t, err) + outDec, outVal, err = solTokenUtil.TokenBalance(e.GetContext(), e.SolChains[solChain].Client, configAccount.Config.PoolTokenAccount, solRpc.CommitmentConfirmed) + require.NoError(t, err) + require.Equal(t, int(50), outVal) + require.Equal(t, 9, int(outDec)) + } + } + } +} diff --git a/deployment/ccip/changeset/solana/transfer_ccip_to_mcms_with_timelock_test.go b/deployment/ccip/changeset/solana/transfer_ccip_to_mcms_with_timelock_test.go index 986a06d90f8..b0c7e5e5721 100644 --- a/deployment/ccip/changeset/solana/transfer_ccip_to_mcms_with_timelock_test.go +++ b/deployment/ccip/changeset/solana/transfer_ccip_to_mcms_with_timelock_test.go @@ -297,7 +297,6 @@ func prepareEnvironmentForOwnershipTransfer(t *testing.T) (deployment.Environmen ChainSelector: solChain1, TokenPubKey: tokenAddress.String(), PoolType: test_token_pool.LockAndRelease_PoolType, - Authority: e.SolChains[solChain1].DeployerKey.PublicKey().String(), }, ), }) diff --git a/deployment/ccip/changeset/testhelpers/test_helpers.go b/deployment/ccip/changeset/testhelpers/test_helpers.go index 45e3a2a8b88..98c85afb0cc 100644 --- a/deployment/ccip/changeset/testhelpers/test_helpers.go +++ b/deployment/ccip/changeset/testhelpers/test_helpers.go @@ -909,7 +909,6 @@ func DeployTransferableTokenSolana( ChainSelector: solChainSel, TokenPubKey: solTokenAddress.String(), PoolType: solTestTokenPool.BurnAndMint_PoolType, - Authority: solDeployerKey.String(), }, ), ) From 6c0229e340f9da6a8de6319cb0b2cd715579c7db Mon Sep 17 00:00:00 2001 From: Ryan <80392855+RayXpub@users.noreply.github.com> Date: Thu, 27 Feb 2025 17:12:42 +0400 Subject: [PATCH 17/17] chore: add comment to abigen build script (#16430) --- tools/bin/build_abigen | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/bin/build_abigen b/tools/bin/build_abigen index 5f0781ca782..af0e2d01e96 100755 --- a/tools/bin/build_abigen +++ b/tools/bin/build_abigen @@ -32,6 +32,7 @@ TMPDIR="$(mktemp -d)" pushd "$TMPDIR" +# We do not use go install here as we don't want the behavior to implicitly depend on user-configured variables like $PATH git clone --depth=1 --single-branch --branch "$GETH_VERSION" "$GETH_REPO_URL" cd go-ethereum/cmd/abigen go build -ldflags="-s -w" # necessary on MacOS for code signing (see https://github.com/confluentinc/confluent-kafka-go/issues/1092#issuecomment-2373681430)