diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index accad317d1a..6d48a429f57 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -1741,42 +1741,30 @@ func (builder *FlowAccessNodeBuilder) initPublicLibp2pNode(networkKey crypto.Pri return nil, fmt.Errorf("could not create connection manager: %w", err) } - params := &p2pbuilder.LibP2PNodeBuilderConfig{ - Logger: builder.Logger, - MetricsConfig: &p2pbuilderconfig.MetricsConfig{ - HeroCacheFactory: builder.HeroCacheMetricsFactory(), - Metrics: networkMetrics, - }, - NetworkingType: network.PublicNetwork, - Address: bindAddress, - NetworkKey: networkKey, - SporkId: builder.SporkID, - IdProvider: builder.IdentityProvider, - ResourceManagerParams: &builder.FlowConfig.NetworkConfig.ResourceManager, - RpcInspectorParams: &builder.FlowConfig.NetworkConfig.GossipSub.RpcInspector, - PeerManagerParams: &p2pbuilderconfig.PeerManagerConfig{ + libp2pNode, err := p2pbuilder.NewNodeBuilder(builder.Logger, &builder.FlowConfig.NetworkConfig.GossipSub, &p2pbuilderconfig.MetricsConfig{ + HeroCacheFactory: builder.HeroCacheMetricsFactory(), + Metrics: networkMetrics, + }, + network.PublicNetwork, + bindAddress, + networkKey, + builder.SporkID, + builder.IdentityProvider, + &builder.FlowConfig.NetworkConfig.ResourceManager, + &p2pbuilderconfig.PeerManagerConfig{ // TODO: eventually, we need pruning enabled even on public network. However, it needs a modified version of // the peer manager that also operate on the public identities. ConnectionPruning: connection.PruningDisabled, UpdateInterval: builder.FlowConfig.NetworkConfig.PeerUpdateInterval, ConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(), }, - SubscriptionProviderParams: &builder.FlowConfig.NetworkConfig.GossipSub.SubscriptionProvider, - DisallowListCacheCfg: &p2p.DisallowListCacheConfig{ + &p2p.DisallowListCacheConfig{ MaxSize: builder.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, Metrics: metrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), }, - UnicastConfig: &p2pbuilderconfig.UnicastConfig{ - Unicast: builder.FlowConfig.NetworkConfig.Unicast, - RateLimiterDistributor: builder.UnicastRateLimiterDistributor, - }, - GossipSubCfg: &builder.FlowConfig.NetworkConfig.GossipSub, - } - nodeBuilder, err := p2pbuilder.NewNodeBuilder(params) - if err != nil { - return nil, fmt.Errorf("could not create libp2p node builder: %w", err) - } - libp2pNode, err := nodeBuilder. + &p2pbuilderconfig.UnicastConfig{ + Unicast: builder.FlowConfig.NetworkConfig.Unicast, + }). SetBasicResolver(builder.Resolver). SetSubscriptionFilter(subscription.NewRoleBasedFilter(flow.RoleAccess, builder.IdentityProvider)). SetConnectionManager(connManager). @@ -1784,6 +1772,7 @@ func (builder *FlowAccessNodeBuilder) initPublicLibp2pNode(networkKey crypto.Pri return dht.NewDHT(ctx, h, protocols.FlowPublicDHTProtocolID(builder.SporkID), builder.Logger, networkMetrics, dht.AsServer()) }). Build() + if err != nil { return nil, fmt.Errorf("could not build libp2p node for staked access node: %w", err) } diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index f795cb50bcc..ff9eb063560 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -742,36 +742,27 @@ func (builder *ObserverServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr pis = append(pis, pi) } - params := &p2pbuilder.LibP2PNodeBuilderConfig{ - Logger: builder.Logger, - MetricsConfig: &p2pbuilderconfig.MetricsConfig{ + node, err := p2pbuilder.NewNodeBuilder( + builder.Logger, + &builder.FlowConfig.NetworkConfig.GossipSub, + &p2pbuilderconfig.MetricsConfig{ HeroCacheFactory: builder.HeroCacheMetricsFactory(), Metrics: builder.Metrics.Network, }, - NetworkingType: network.PublicNetwork, - Address: builder.BaseConfig.BindAddr, - NetworkKey: networkKey, - SporkId: builder.SporkID, - IdProvider: builder.IdentityProvider, - ResourceManagerParams: &builder.FlowConfig.NetworkConfig.ResourceManager, - RpcInspectorParams: &builder.FlowConfig.NetworkConfig.GossipSub.RpcInspector, - PeerManagerParams: p2pbuilderconfig.PeerManagerDisableConfig(), - SubscriptionProviderParams: &builder.FlowConfig.NetworkConfig.GossipSub.SubscriptionProvider, - DisallowListCacheCfg: &p2p.DisallowListCacheConfig{ + network.PublicNetwork, + builder.BaseConfig.BindAddr, + networkKey, + builder.SporkID, + builder.IdentityProvider, + &builder.FlowConfig.NetworkConfig.ResourceManager, + p2pbuilderconfig.PeerManagerDisableConfig(), // disable peer manager for observer node. + &p2p.DisallowListCacheConfig{ MaxSize: builder.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, Metrics: metrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), }, - UnicastConfig: &p2pbuilderconfig.UnicastConfig{ - Unicast: builder.FlowConfig.NetworkConfig.Unicast, - RateLimiterDistributor: builder.UnicastRateLimiterDistributor, - }, - GossipSubCfg: &builder.FlowConfig.NetworkConfig.GossipSub, - } - nodeBuilder, err := p2pbuilder.NewNodeBuilder(params) - if err != nil { - return nil, fmt.Errorf("could not create libp2p node builder: %w", err) - } - libp2pNode, err := nodeBuilder. + &p2pbuilderconfig.UnicastConfig{ + Unicast: builder.FlowConfig.NetworkConfig.Unicast, + }). SetSubscriptionFilter( subscription.NewRoleBasedFilter( subscription.UnstakedRole, builder.IdentityProvider, @@ -791,7 +782,7 @@ func (builder *ObserverServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr return nil, fmt.Errorf("could not initialize libp2p node for observer: %w", err) } - builder.LibP2PNode = libp2pNode + builder.LibP2PNode = node return builder.LibP2PNode, nil } diff --git a/cmd/scaffold.go b/cmd/scaffold.go index e9c47b9a927..64ffcf20c94 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -380,34 +380,28 @@ func (fnb *FlowNodeBuilder) EnqueueNetworkInit() { if err != nil { return nil, fmt.Errorf("could not determine dht activation status: %w", err) } - - params := &p2pbuilder.LibP2PNodeBuilderConfig{ - Logger: fnb.Logger, - MetricsConfig: &p2pbuilderconfig.MetricsConfig{ + builder, err := p2pbuilder.DefaultNodeBuilder(fnb.Logger, + myAddr, + network.PrivateNetwork, + fnb.NetworkKey, + fnb.SporkID, + fnb.IdentityProvider, + &p2pbuilderconfig.MetricsConfig{ Metrics: fnb.Metrics.Network, HeroCacheFactory: fnb.HeroCacheMetricsFactory(), }, - NetworkingType: network.PrivateNetwork, - Address: myAddr, - NetworkKey: fnb.NetworkKey, - SporkId: fnb.SporkID, - IdProvider: fnb.IdentityProvider, - ResourceManagerParams: &fnb.FlowConfig.NetworkConfig.ResourceManager, - RpcInspectorParams: &fnb.FlowConfig.NetworkConfig.GossipSub.RpcInspector, - PeerManagerParams: peerManagerCfg, - SubscriptionProviderParams: &fnb.FlowConfig.NetworkConfig.GossipSub.SubscriptionProvider, - DisallowListCacheCfg: &p2p.DisallowListCacheConfig{ - MaxSize: fnb.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, - Metrics: metrics.DisallowListCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork), - }, - UnicastConfig: uniCfg, - GossipSubCfg: &fnb.FlowConfig.NetworkConfig.GossipSub, - } - builder, err := p2pbuilder.DefaultNodeBuilder(params, fnb.Resolver, fnb.BaseConfig.NodeRole, connGaterCfg, + peerManagerCfg, + &fnb.FlowConfig.NetworkConfig.GossipSub, + &fnb.FlowConfig.NetworkConfig.ResourceManager, + uniCfg, &fnb.FlowConfig.NetworkConfig.ConnectionManager, + &p2p.DisallowListCacheConfig{ + MaxSize: fnb.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, + Metrics: metrics.DisallowListCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork), + }, dhtActivationStatus) if err != nil { return nil, fmt.Errorf("could not create libp2p node builder: %w", err) diff --git a/config/config_test.go b/config/config_test.go index 74676b9c407..fe0f076bd03 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -23,31 +23,14 @@ func TestBindPFlags(t *testing.T) { t.Run("should override config values when any flag is set", func(t *testing.T) { c := defaultConfig(t) flags := testFlagSet(c) - err := flags.Set("networking-connection-pruning", "false") require.NoError(t, err) - err = flags.Set("gossipsub-scoring-parameters-scoring-registry-misbehaviour-penalties-graft", "-10") - require.NoError(t, err) - err = flags.Set("gossipsub-scoring-parameters-scoring-registry-misbehaviour-penalties-prune", "-5") - require.NoError(t, err) - err = flags.Set("gossipsub-scoring-parameters-scoring-registry-misbehaviour-penalties-ihave", "-2") - require.NoError(t, err) - err = flags.Set("gossipsub-scoring-parameters-scoring-registry-misbehaviour-penalties-iwant", "-.9") - require.NoError(t, err) - err = flags.Set("gossipsub-scoring-parameters-scoring-registry-misbehaviour-penalties-publish", "-.1") - require.NoError(t, err) - require.NoError(t, flags.Parse(nil)) configFileUsed, err := BindPFlags(c, flags) require.NoError(t, err) require.False(t, configFileUsed) require.False(t, c.NetworkConfig.NetworkConnectionPruning) - require.Equal(t, c.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.MisbehaviourPenalties.GraftMisbehaviour, float64(-10)) - require.Equal(t, c.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.MisbehaviourPenalties.PruneMisbehaviour, float64(-5)) - require.Equal(t, c.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.MisbehaviourPenalties.IHaveMisbehaviour, float64(-2)) - require.Equal(t, c.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.MisbehaviourPenalties.IWantMisbehaviour, float64(-.9)) - require.Equal(t, c.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.MisbehaviourPenalties.PublishMisbehaviour, float64(-.1)) }) t.Run("should return an error if flags are not parsed", func(t *testing.T) { c := defaultConfig(t) diff --git a/config/default-config.yml b/config/default-config.yml index c91cec0b007..620b3c8361c 100644 --- a/config/default-config.yml +++ b/config/default-config.yml @@ -512,15 +512,15 @@ network-config: skip-decay-threshold: -0.1 misbehaviour-penalties: # The penalty applied to the application specific penalty when a peer conducts a graft misbehaviour. - graft: -0.01 + graft: -10 # The penalty applied to the application specific penalty when a peer conducts a prune misbehaviour. - prune: -0.01 + prune: -10 # The penalty applied to the application specific penalty when a peer conducts a iHave misbehaviour. - ihave: -0.01 + ihave: -10 # The penalty applied to the application specific penalty when a peer conducts a iWant misbehaviour. - iwant: -0.01 + iwant: -10 # The penalty applied to the application specific penalty when a peer conducts a rpc publish message misbehaviour. - publish: -0.01 + publish: -10 # The factor used to reduce the penalty for control message misbehaviours on cluster prefixed topics. This allows a more lenient punishment for nodes # that fall behind and may need to request old data. cluster-prefixed-reduction-factor: 0.2 diff --git a/follower/follower_builder.go b/follower/follower_builder.go index e77f8d6a228..443f815ad81 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -572,36 +572,27 @@ func (builder *FollowerServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr pis = append(pis, pi) } - params := &p2pbuilder.LibP2PNodeBuilderConfig{ - Logger: builder.Logger, - MetricsConfig: &p2pbuilderconfig.MetricsConfig{ + node, err := p2pbuilder.NewNodeBuilder( + builder.Logger, + &builder.FlowConfig.NetworkConfig.GossipSub, + &p2pbuilderconfig.MetricsConfig{ HeroCacheFactory: builder.HeroCacheMetricsFactory(), Metrics: builder.Metrics.Network, }, - NetworkingType: network.PublicNetwork, - Address: builder.BaseConfig.BindAddr, - NetworkKey: networkKey, - SporkId: builder.SporkID, - IdProvider: builder.IdentityProvider, - ResourceManagerParams: &builder.FlowConfig.NetworkConfig.ResourceManager, - RpcInspectorParams: &builder.FlowConfig.NetworkConfig.GossipSub.RpcInspector, - PeerManagerParams: p2pbuilderconfig.PeerManagerDisableConfig(), - SubscriptionProviderParams: &builder.FlowConfig.NetworkConfig.GossipSub.SubscriptionProvider, - DisallowListCacheCfg: &p2p.DisallowListCacheConfig{ + network.PublicNetwork, + builder.BaseConfig.BindAddr, + networkKey, + builder.SporkID, + builder.IdentityProvider, + &builder.FlowConfig.NetworkConfig.ResourceManager, + p2pbuilderconfig.PeerManagerDisableConfig(), // disable peer manager for follower + &p2p.DisallowListCacheConfig{ MaxSize: builder.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, Metrics: metrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), }, - UnicastConfig: &p2pbuilderconfig.UnicastConfig{ - Unicast: builder.FlowConfig.NetworkConfig.Unicast, - RateLimiterDistributor: builder.UnicastRateLimiterDistributor, - }, - GossipSubCfg: &builder.FlowConfig.NetworkConfig.GossipSub, - } - nodeBuilder, err := p2pbuilder.NewNodeBuilder(params) - if err != nil { - return nil, fmt.Errorf("could not create libp2p node builder: %w", err) - } - libp2pNode, err := nodeBuilder. + &p2pbuilderconfig.UnicastConfig{ + Unicast: builder.FlowConfig.NetworkConfig.Unicast, + }). SetSubscriptionFilter( subscription.NewRoleBasedFilter( subscription.UnstakedRole, builder.IdentityProvider, @@ -619,7 +610,7 @@ func (builder *FollowerServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr return nil, fmt.Errorf("could not build public libp2p node: %w", err) } - builder.LibP2PNode = libp2pNode + builder.LibP2PNode = node return builder.LibP2PNode, nil } diff --git a/insecure/corruptlibp2p/libp2p_node_factory.go b/insecure/corruptlibp2p/libp2p_node_factory.go index 35d316941d4..8772497fef2 100644 --- a/insecure/corruptlibp2p/libp2p_node_factory.go +++ b/insecure/corruptlibp2p/libp2p_node_factory.go @@ -80,28 +80,25 @@ func InitCorruptLibp2pNode( if err != nil { return nil, fmt.Errorf("could not get dht system activation status: %w", err) } - params := &p2pbuilder.LibP2PNodeBuilderConfig{ - Logger: log, - MetricsConfig: metCfg, - NetworkingType: network.PrivateNetwork, - Address: address, - NetworkKey: flowKey, - SporkId: sporkId, - IdProvider: idProvider, - ResourceManagerParams: &netConfig.ResourceManager, - RpcInspectorParams: &netConfig.GossipSub.RpcInspector, - PeerManagerParams: peerManagerCfg, - SubscriptionProviderParams: &netConfig.GossipSub.SubscriptionProvider, - DisallowListCacheCfg: disallowListCacheCfg, - UnicastConfig: uniCfg, - GossipSubCfg: &netConfig.GossipSub, - } - builder, err := p2pbuilder.DefaultNodeBuilder(params, + builder, err := p2pbuilder.DefaultNodeBuilder( + log, + address, + network.PrivateNetwork, + flowKey, + sporkId, + idProvider, + metCfg, resolver, role, connGaterCfg, + peerManagerCfg, + &netConfig.GossipSub, + &netConfig.ResourceManager, + uniCfg, &netConfig.ConnectionManager, + disallowListCacheCfg, dhtActivationStatus) + if err != nil { return nil, fmt.Errorf("could not create corrupt libp2p node builder: %w", err) } diff --git a/insecure/integration/functional/test/gossipsub/rpc_inspector/validation_inspector_test.go b/insecure/integration/functional/test/gossipsub/rpc_inspector/validation_inspector_test.go index 44f5751353d..6a3e168f0f4 100644 --- a/insecure/integration/functional/test/gossipsub/rpc_inspector/validation_inspector_test.go +++ b/insecure/integration/functional/test/gossipsub/rpc_inspector/validation_inspector_test.go @@ -55,33 +55,15 @@ func TestValidationInspector_InvalidTopicId_Detection(t *testing.T) { invIHaveNotifCount := atomic.NewUint64(0) done := make(chan struct{}) expectedNumOfTotalNotif := 9 - - idProvider := mock.NewIdentityProvider(t) - spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) - - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - // create unknown topic - unknownTopic := channels.Topic(fmt.Sprintf("%s/%s", p2ptest.GossipSubTopicIdFixture(), sporkID)) - // create malformed topic - malformedTopic := channels.Topic(unittest.RandomStringFixture(t, 100)) - // a topics spork ID is considered invalid if it does not match the current spork ID - invalidSporkIDTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, unittest.IdentifierFixture())) - allTopics := []string{unknownTopic.String(), malformedTopic.String(), invalidSporkIDTopic.String()} - distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) - p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) - distributor. - On("Distribute", mockery.Anything). - Times(expectedNumOfTotalNotif). - Run(func(args mockery.Arguments) { + // ensure expected notifications are disseminated with expected error + inspectDisseminatedNotifyFunc := func(spammer *corruptlibp2p.GossipSubRouterSpammer) func(args mockery.Arguments) { + return func(args mockery.Arguments) { count.Inc() notification, ok := args[0].(*p2p.InvCtrlMsgNotif) require.True(t, ok) - require.Contains(t, allTopics, notification.Errors[0].Topic()) - require.Equal(t, notification.Errors[0].CtrlMsgTopicType(), p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) - require.True(t, channels.IsInvalidTopicErr(notification.Errors[0].Err)) + require.True(t, channels.IsInvalidTopicErr(notification.Error)) switch notification.MsgType { case p2pmsg.CtrlMsgGraft: invGraftNotifCount.Inc() @@ -90,13 +72,23 @@ func TestValidationInspector_InvalidTopicId_Detection(t *testing.T) { case p2pmsg.CtrlMsgIHave: invIHaveNotifCount.Inc() default: - require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Errors[0].Err)) + require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) } if count.Load() == uint64(expectedNumOfTotalNotif) { close(done) } - }). - Return(nil) + } + } + + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) + p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) + withExpectedNotificationDissemination(expectedNumOfTotalNotif, inspectDisseminatedNotifyFunc)(distributor, spammer) meshTracer := meshTracerFixture(flowConfig, idProvider) topicProvider := newMockUpdatableTopicProvider() @@ -125,8 +117,15 @@ func TestValidationInspector_InvalidTopicId_Detection(t *testing.T) { idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Maybe() + // create unknown topic + unknownTopic := channels.Topic(fmt.Sprintf("%s/%s", p2ptest.GossipSubTopicIdFixture(), sporkID)) + // create malformed topic + malformedTopic := channels.Topic(unittest.RandomStringFixture(t, 100)) + // a topics spork ID is considered invalid if it does not match the current spork ID + invalidSporkIDTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, unittest.IdentifierFixture())) + // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation - topicProvider.UpdateTopics(allTopics) + topicProvider.UpdateTopics([]string{unknownTopic.String(), malformedTopic.String(), invalidSporkIDTopic.String()}) validationInspector.Start(signalerCtx) nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} @@ -136,7 +135,6 @@ func TestValidationInspector_InvalidTopicId_Detection(t *testing.T) { // prepare to spam - generate control messages graftCtlMsgsWithUnknownTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithGraft(messageCount, unknownTopic.String())) - graftCtlMsgsWithMalformedTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithGraft(messageCount, malformedTopic.String())) graftCtlMsgsInvalidSporkIDTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithGraft(messageCount, invalidSporkIDTopic.String())) @@ -192,27 +190,13 @@ func TestValidationInspector_DuplicateTopicId_Detection(t *testing.T) { invGraftNotifCount := atomic.NewUint64(0) invPruneNotifCount := atomic.NewUint64(0) invIHaveNotifCount := atomic.NewUint64(0) - - idProvider := mock.NewIdentityProvider(t) - spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) - - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) - p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) - // a topics spork ID is considered invalid if it does not match the current spork ID - duplicateTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, sporkID)) - distributor. - On("Distribute", mockery.Anything). - Times(expectedNumOfTotalNotif). - Run(func(args mockery.Arguments) { + inspectDisseminatedNotifyFunc := func(spammer *corruptlibp2p.GossipSubRouterSpammer) func(args mockery.Arguments) { + return func(args mockery.Arguments) { count.Inc() notification, ok := args[0].(*p2p.InvCtrlMsgNotif) require.True(t, ok) - require.Equal(t, duplicateTopic.String(), notification.Errors[0].Topic()) - require.Equal(t, notification.Errors[0].CtrlMsgTopicType(), p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") - require.True(t, validation.IsDuplicateTopicErr(notification.Errors[0].Err)) + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") + require.True(t, validation.IsDuplicateTopicErr(notification.Error)) require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) switch notification.MsgType { case p2pmsg.CtrlMsgGraft: @@ -222,15 +206,24 @@ func TestValidationInspector_DuplicateTopicId_Detection(t *testing.T) { case p2pmsg.CtrlMsgIHave: invIHaveNotifCount.Inc() default: - require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Errors[0].Err)) + require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) } if count.Load() == int64(expectedNumOfTotalNotif) { close(done) } - }). - Return(nil) + } + } + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) + p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) + withExpectedNotificationDissemination(expectedNumOfTotalNotif, inspectDisseminatedNotifyFunc)(distributor, spammer) meshTracer := meshTracerFixture(flowConfig, idProvider) topicProvider := newMockUpdatableTopicProvider() validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ @@ -259,6 +252,8 @@ func TestValidationInspector_DuplicateTopicId_Detection(t *testing.T) { idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Maybe() + // a topics spork ID is considered invalid if it does not match the current spork ID + duplicateTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, sporkID)) // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation topicProvider.UpdateTopics([]string{duplicateTopic.String()}) @@ -300,6 +295,24 @@ func TestValidationInspector_IHaveDuplicateMessageId_Detection(t *testing.T) { done := make(chan struct{}) expectedNumOfTotalNotif := 1 invIHaveNotifCount := atomic.NewUint64(0) + inspectDisseminatedNotifyFunc := func(spammer *corruptlibp2p.GossipSubRouterSpammer) func(args mockery.Arguments) { + return func(args mockery.Arguments) { + count.Inc() + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") + require.True(t, validation.IsDuplicateTopicErr(notification.Error)) + require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) + require.True(t, + notification.MsgType == p2pmsg.CtrlMsgIHave, + fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) + invIHaveNotifCount.Inc() + + if count.Load() == int64(expectedNumOfTotalNotif) { + close(done) + } + } + } idProvider := mock.NewIdentityProvider(t) spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) @@ -309,7 +322,7 @@ func TestValidationInspector_IHaveDuplicateMessageId_Detection(t *testing.T) { distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) - + withExpectedNotificationDissemination(expectedNumOfTotalNotif, inspectDisseminatedNotifyFunc)(distributor, spammer) meshTracer := meshTracerFixture(flowConfig, idProvider) topicProvider := newMockUpdatableTopicProvider() @@ -347,6 +360,9 @@ func TestValidationInspector_IHaveDuplicateMessageId_Detection(t *testing.T) { validationInspector.Start(signalerCtx) nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} + startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) + spammer.Start(t) + defer stopComponents(t, cancel, nodes, validationInspector) // generate 2 control messages with iHaves for different topics ihaveCtlMsgs1 := spammer.GenerateCtlMessages(1, p2ptest.WithIHave(1, 1, pushBlocks.String())) @@ -357,32 +373,6 @@ func TestValidationInspector_IHaveDuplicateMessageId_Detection(t *testing.T) { // duplicate message ids across different topics is valid ihaveCtlMsgs2[0].Ihave[0].MessageIDs[0] = ihaveCtlMsgs1[0].Ihave[0].MessageIDs[0] - distributor. - On("Distribute", mockery.Anything). - Times(expectedNumOfTotalNotif). - Run(func(args mockery.Arguments) { - count.Inc() - notification, ok := args[0].(*p2p.InvCtrlMsgNotif) - require.True(t, ok) - require.Contains(t, ihaveCtlMsgs1[0].Ihave[0].MessageIDs, notification.Errors[0].MessageID()) - require.Equal(t, notification.Errors[0].CtrlMsgTopicType(), p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") - require.True(t, validation.IsDuplicateMessageIDErr(notification.Errors[0].Err)) - require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) - require.True(t, - notification.MsgType == p2pmsg.CtrlMsgIHave, - fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Errors[0].Err)) - invIHaveNotifCount.Inc() - - if count.Load() == int64(expectedNumOfTotalNotif) { - close(done) - } - }). - Return(nil) - - startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) - spammer.Start(t) - defer stopComponents(t, cancel, nodes, validationInspector) - // start spamming the victim peer spammer.SpamControlMessage(t, victimNode, ihaveCtlMsgs1) spammer.SpamControlMessage(t, victimNode, ihaveCtlMsgs2) @@ -407,7 +397,7 @@ func TestValidationInspector_UnknownClusterId_Detection(t *testing.T) { // SafetyThreshold < messageCount < HardThreshold ensures that the RPC message will be further inspected and topic IDs will be checked // restricting the message count to 1 allows us to only aggregate a single error when the error is logged in the inspector. - messageCount := 1 + messageCount := 10 controlMessageCount := int64(1) count := atomic.NewInt64(0) @@ -415,42 +405,37 @@ func TestValidationInspector_UnknownClusterId_Detection(t *testing.T) { expectedNumOfTotalNotif := 2 invGraftNotifCount := atomic.NewUint64(0) invPruneNotifCount := atomic.NewUint64(0) - - idProvider := mock.NewIdentityProvider(t) - spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) - p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) - // setup cluster prefixed topic with an invalid cluster ID - unknownClusterID := channels.Topic(channels.SyncCluster("unknown-cluster-ID")) - distributor. - On("Distribute", mockery.Anything). - Times(expectedNumOfTotalNotif). - Run(func(args mockery.Arguments) { + inspectDisseminatedNotifyFunc := func(spammer *corruptlibp2p.GossipSubRouterSpammer) func(args mockery.Arguments) { + return func(args mockery.Arguments) { count.Inc() notification, ok := args[0].(*p2p.InvCtrlMsgNotif) require.True(t, ok) - require.Equal(t, unknownClusterID.String(), notification.Errors[0].Topic()) - require.Equal(t, notification.Errors[0].CtrlMsgTopicType(), p2p.CtrlMsgTopicTypeClusterPrefixed) + require.Equal(t, notification.TopicType, p2p.CtrlMsgTopicTypeClusterPrefixed) require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) - require.True(t, channels.IsUnknownClusterIDErr(notification.Errors[0].Err)) + require.True(t, channels.IsUnknownClusterIDErr(notification.Error)) switch notification.MsgType { case p2pmsg.CtrlMsgGraft: invGraftNotifCount.Inc() case p2pmsg.CtrlMsgPrune: invPruneNotifCount.Inc() default: - require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Errors[0].Err)) + require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) } if count.Load() == int64(expectedNumOfTotalNotif) { close(done) } - }). - Return(nil) + } + } + + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) + p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) + withExpectedNotificationDissemination(expectedNumOfTotalNotif, inspectDisseminatedNotifyFunc)(distributor, spammer) meshTracer := meshTracerFixture(flowConfig, idProvider) topicProvider := newMockUpdatableTopicProvider() validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ @@ -479,6 +464,8 @@ func TestValidationInspector_UnknownClusterId_Detection(t *testing.T) { idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Times(4) + // setup cluster prefixed topic with an invalid cluster ID + unknownClusterID := channels.Topic(channels.SyncCluster("unknown-cluster-ID")) // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation topicProvider.UpdateTopics([]string{unknownClusterID.String()}) @@ -516,23 +503,20 @@ func TestValidationInspector_ActiveClusterIdsNotSet_Graft_Detection(t *testing.T inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation inspectorConfig.ClusterPrefixedMessage.HardThreshold = 5 inspectorConfig.NumberOfWorkers = 1 - controlMessageCount := int64(6) + controlMessageCount := int64(10) count := atomic.NewInt64(0) - invGraftNotifCount := atomic.NewInt64(0) - logHookDone := make(chan struct{}) - notificationDone := make(chan struct{}) + done := make(chan struct{}) expectedNumOfLogs := 5 - expectedNumOfTotalNotif := 1 + hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { if level == zerolog.WarnLevel { - if message == "failed to validate cluster prefixed control message with cluster prefixed topic active cluster ids not set" { + if message == "active cluster ids not set" { count.Inc() } - - if count.Load() == int64(expectedNumOfLogs) { - close(logHookDone) - } + } + if count.Load() == int64(expectedNumOfLogs) { + close(done) } }) logger := zerolog.New(os.Stdout).Level(zerolog.WarnLevel).Hook(hook) @@ -545,27 +529,6 @@ func TestValidationInspector_ActiveClusterIdsNotSet_Graft_Detection(t *testing.T distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) - distributor. - On("Distribute", mockery.Anything). - Times(expectedNumOfTotalNotif). - Run(func(args mockery.Arguments) { - notification, ok := args[0].(*p2p.InvCtrlMsgNotif) - require.True(t, ok) - require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) - require.True(t, validation.IsErrActiveClusterIDsNotSet(notification.Errors[0].Err)) - switch notification.MsgType { - case p2pmsg.CtrlMsgGraft: - invGraftNotifCount.Inc() - default: - require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Errors[0].Err)) - } - - if invGraftNotifCount.Load() == int64(expectedNumOfTotalNotif) { - close(notificationDone) - } - }). - Return(nil) - meshTracer := meshTracerFixture(flowConfig, idProvider) topicProvider := newMockUpdatableTopicProvider() @@ -596,7 +559,6 @@ func TestValidationInspector_ActiveClusterIdsNotSet_Graft_Detection(t *testing.T idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Maybe() // we expect controlMessageCount plus 1 extra call, this is due to messages that are exchanged when the nodes startup inspectorIdProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Times(int(controlMessageCount + 1)) - clusterPrefixedTopic := randomClusterPrefixedTopic() // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation @@ -615,8 +577,7 @@ func TestValidationInspector_ActiveClusterIdsNotSet_Graft_Detection(t *testing.T // start spamming the victim peer spammer.SpamControlMessage(t, victimNode, ctlMsgs) - unittest.RequireCloseBefore(t, logHookDone, 2*time.Second, "failed to observe warning logs on time") - unittest.RequireCloseBefore(t, notificationDone, 2*time.Second, "failed to observe invalid control message notification on time") + unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") } // TestValidationInspector_ActiveClusterIdsNotSet_Prune_Detection ensures that an error is returned only after the cluster prefixed topics received for a peer exceed the configured @@ -630,23 +591,19 @@ func TestValidationInspector_ActiveClusterIdsNotSet_Prune_Detection(t *testing.T inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation inspectorConfig.ClusterPrefixedMessage.HardThreshold = 5 inspectorConfig.NumberOfWorkers = 1 - controlMessageCount := int64(6) + controlMessageCount := int64(10) count := atomic.NewInt64(0) - invPruneNotifCount := atomic.NewInt64(0) - logHookDone := make(chan struct{}) - notificationDone := make(chan struct{}) + done := make(chan struct{}) expectedNumOfLogs := 5 - expectedNumOfTotalNotif := 1 hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { if level == zerolog.WarnLevel { - if message == "failed to validate cluster prefixed control message with cluster prefixed topic active cluster ids not set" { + if message == "active cluster ids not set" { count.Inc() } - - if count.Load() == int64(expectedNumOfLogs) { - close(logHookDone) - } + } + if count.Load() == int64(expectedNumOfLogs) { + close(done) } }) logger := zerolog.New(os.Stdout).Level(zerolog.WarnLevel).Hook(hook) @@ -658,27 +615,6 @@ func TestValidationInspector_ActiveClusterIdsNotSet_Prune_Detection(t *testing.T distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) - distributor. - On("Distribute", mockery.Anything). - Times(expectedNumOfTotalNotif). - Run(func(args mockery.Arguments) { - notification, ok := args[0].(*p2p.InvCtrlMsgNotif) - require.True(t, ok) - require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) - require.True(t, validation.IsErrActiveClusterIDsNotSet(notification.Errors[0].Err)) - switch notification.MsgType { - case p2pmsg.CtrlMsgPrune: - invPruneNotifCount.Inc() - default: - require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Errors[0].Err)) - } - - if invPruneNotifCount.Load() == int64(expectedNumOfTotalNotif) { - close(notificationDone) - } - }). - Return(nil) - meshTracer := meshTracerFixture(flowConfig, idProvider) topicProvider := newMockUpdatableTopicProvider() inspectorIdProvider := mock.NewIdentityProvider(t) @@ -726,8 +662,7 @@ func TestValidationInspector_ActiveClusterIdsNotSet_Prune_Detection(t *testing.T // start spamming the victim peer spammer.SpamControlMessage(t, victimNode, ctlMsgs) - unittest.RequireCloseBefore(t, logHookDone, 2*time.Second, "failed to observe warning logs on time") - unittest.RequireCloseBefore(t, notificationDone, 2*time.Second, "failed to observe invalid control message notification on time") + unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") } // TestValidationInspector_Unstaked_Node_Detection ensures that RPC control message inspector disseminates an invalid control message notification when an unstaked peer @@ -743,7 +678,25 @@ func TestValidationInspector_UnstakedNode_Detection(t *testing.T) { inspectorConfig.ClusterPrefixedMessage.HardThreshold = 0 inspectorConfig.NumberOfWorkers = 1 + // SafetyThreshold < messageCount < HardThreshold ensures that the RPC message will be further inspected and topic IDs will be checked + // restricting the message count to 1 allows us to only aggregate a single error when the error is logged in the inspector. + messageCount := 10 + controlMessageCount := int64(1) + + count := atomic.NewInt64(0) done := make(chan struct{}) + expectedNumOfLogs := 2 + hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { + if level == zerolog.WarnLevel { + if message == "control message received from unstaked peer" { + count.Inc() + } + } + if count.Load() == int64(expectedNumOfLogs) { + close(done) + } + }) + logger := zerolog.New(os.Stdout).Level(zerolog.WarnLevel).Hook(hook) idProvider := mock.NewIdentityProvider(t) spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) @@ -752,42 +705,12 @@ func TestValidationInspector_UnstakedNode_Detection(t *testing.T) { distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) - - invGraftNotifCount := atomic.NewInt64(0) - invPruneNotifCount := atomic.NewInt64(0) - invIHaveNotifCount := atomic.NewInt64(0) - expectedNumOfTotalNotif := 3 - distributor. - On("Distribute", mockery.Anything). - Times(expectedNumOfTotalNotif). - Run(func(args mockery.Arguments) { - notification, ok := args[0].(*p2p.InvCtrlMsgNotif) - require.True(t, ok) - require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) - require.True(t, validation.IsErrUnstakedPeer(notification.Errors[0].Err)) - switch notification.MsgType { - case p2pmsg.CtrlMsgGraft: - invGraftNotifCount.Inc() - case p2pmsg.CtrlMsgPrune: - invPruneNotifCount.Inc() - case p2pmsg.CtrlMsgIHave: - invIHaveNotifCount.Inc() - default: - require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Errors[0].Err)) - } - - if (invGraftNotifCount.Load() + invPruneNotifCount.Load() + invIHaveNotifCount.Load()) == int64(expectedNumOfTotalNotif) { - close(done) - } - }). - Return(nil) - p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) meshTracer := meshTracerFixture(flowConfig, idProvider) topicProvider := newMockUpdatableTopicProvider() inspectorIdProvider := mock.NewIdentityProvider(t) validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ - Logger: unittest.Logger(), + Logger: logger, SporkID: sporkID, Config: &inspectorConfig, Distributor: distributor, @@ -810,9 +733,8 @@ func TestValidationInspector_UnstakedNode_Detection(t *testing.T) { internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc))) idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Maybe() - controlMessageCount := 3 - // we expect controlMessageCount plus 1 extra call, this is due to messages that are exchanged when the nodes startup - inspectorIdProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(nil, false).Times(controlMessageCount + 1) + // we expect 2 calls from notification inspection plus 1 extra call, this is due to messages that are exchanged when the nodes startup + inspectorIdProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(nil, false).Times(3) // setup cluster prefixed topic with an invalid cluster ID clusterID := flow.ChainID("known-cluster-id") @@ -830,14 +752,12 @@ func TestValidationInspector_UnstakedNode_Detection(t *testing.T) { defer stopComponents(t, cancel, nodes, validationInspector) // prepare to spam - generate control messages - grafts := spammer.GenerateCtlMessages(1, p2ptest.WithGraft(1, clusterIDTopic.String())) - prunes := spammer.GenerateCtlMessages(1, p2ptest.WithPrune(1, clusterIDTopic.String())) - ihaves := spammer.GenerateCtlMessages(1, p2ptest.WithIHave(1, 100, clusterIDTopic.String())) + graftCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithGraft(messageCount, clusterIDTopic.String())) + pruneCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithPrune(messageCount, clusterIDTopic.String())) // start spamming the victim peer - spammer.SpamControlMessage(t, victimNode, grafts) - spammer.SpamControlMessage(t, victimNode, prunes) - spammer.SpamControlMessage(t, victimNode, ihaves) + spammer.SpamControlMessage(t, victimNode, graftCtlMsgsDuplicateTopic) + spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsDuplicateTopic) unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") } @@ -864,10 +784,13 @@ func TestValidationInspector_InspectIWants_CacheMissThreshold(t *testing.T) { return func(args mockery.Arguments) { notification, ok := args[0].(*p2p.InvCtrlMsgNotif) require.True(t, ok) - require.Equal(t, notification.Errors[0].CtrlMsgTopicType(), p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) - require.True(t, notification.MsgType == p2pmsg.CtrlMsgIWant, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Errors[0].Err)) - require.True(t, validation.IsIWantCacheMissThresholdErr(notification.Errors[0].Err)) + require.True(t, + notification.MsgType == p2pmsg.CtrlMsgIWant, + fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) + require.True(t, validation.IsIWantCacheMissThresholdErr(notification.Error)) + cacheMissThresholdNotifCount.Inc() if cacheMissThresholdNotifCount.Load() == 1 { close(done) @@ -995,16 +918,18 @@ func TestValidationInspector_InspectRpcPublishMessages(t *testing.T) { return func(args mockery.Arguments) { notification, ok := args[0].(*p2p.InvCtrlMsgNotif) require.True(t, ok) - require.Equal(t, notification.Errors[0].CtrlMsgTopicType(), p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) - require.Equal(t, p2pmsg.RpcPublishMessage, notification.MsgType, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Errors[0].Err)) - require.True(t, validation.IsInvalidRpcPublishMessagesErr(notification.Errors[0].Err)) + require.True(t, + notification.MsgType == p2pmsg.RpcPublishMessage, + fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) + require.True(t, validation.IsInvalidRpcPublishMessagesErr(notification.Error)) require.Contains(t, - notification.Errors[0].Err.Error(), + notification.Error.Error(), fmt.Sprintf("%d error(s) encountered", len(invalidPublishMsgs)), fmt.Sprintf("expected %d errors, an error for each invalid pubsub message", len(invalidPublishMsgs))) - require.Contains(t, notification.Errors[0].Err.Error(), fmt.Sprintf("received rpc publish message from unstaked peer: %s", unknownPeerID)) - require.Contains(t, notification.Errors[0].Err.Error(), fmt.Sprintf("received rpc publish message from ejected peer: %s", ejectedIdentityPeerID)) + require.Contains(t, notification.Error.Error(), fmt.Sprintf("received rpc publish message from unstaked peer: %s", unknownPeerID)) + require.Contains(t, notification.Error.Error(), fmt.Sprintf("received rpc publish message from ejected peer: %s", ejectedIdentityPeerID)) notificationCount.Inc() if notificationCount.Load() == 1 { close(done) @@ -1081,156 +1006,6 @@ func TestValidationInspector_InspectRpcPublishMessages(t *testing.T) { require.Equal(t, uint64(1), notificationCount.Load()) } -// TestValidationInspector_MultiErrorNotification ensures that control message validation captures all errors for invalid control messages for a single control message type. RPC validation is performed -// on each control message type, during this validation a sample of the control messages for that type are inspected. This test ensures that the entire sample is inspected and all errors are captured in -// the p2p.InvCtrlMsgErrs field of the invalid control message notification. -func TestValidationInspector_MultiErrorNotification(t *testing.T) { - t.Parallel() - role := flow.RoleConsensus - sporkID := unittest.IdentifierFixture() - flowConfig, err := config.DefaultConfig() - require.NoError(t, err) - inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation - - inspectorConfig.NumberOfWorkers = 1 - controlMessageCount := int64(1) - invGraftCount := atomic.NewUint64(0) - invPruneCount := atomic.NewUint64(0) - invIHaveCount := atomic.NewUint64(0) - invIWantCount := atomic.NewUint64(0) - done := make(chan struct{}) - expectedNumOfTotalNotif := 4 - - requireExpectedNotifErr := func(notification *p2p.InvCtrlMsgNotif) { - for _, invErr := range notification.Errors { - require.True(t, channels.IsInvalidTopicErr(invErr.Err)) - } - } - - idProvider := mock.NewIdentityProvider(t) - spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) - - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) - p2ptest.MockInspectorNotificationDistributorReadyDoneAware(distributor) - topicProvider := newMockUpdatableTopicProvider() - distributor. - On("Distribute", mockery.Anything). - Times(expectedNumOfTotalNotif). - Run(func(args mockery.Arguments) { - notification, ok := args[0].(*p2p.InvCtrlMsgNotif) - require.True(t, ok) - require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) - switch notification.MsgType { - case p2pmsg.CtrlMsgGraft: - requireExpectedNotifErr(notification) - require.Len(t, notification.Errors, 3) - invGraftCount.Inc() - case p2pmsg.CtrlMsgPrune: - requireExpectedNotifErr(notification) - require.Len(t, notification.Errors, 3) - invPruneCount.Inc() - case p2pmsg.CtrlMsgIHave: - requireExpectedNotifErr(notification) - require.Len(t, notification.Errors, 3) - invIHaveCount.Inc() - case p2pmsg.CtrlMsgIWant: - require.Len(t, notification.Errors, 1) - require.True(t, validation.IsIWantDuplicateMsgIDThresholdErr(notification.Errors[0].Err)) - invIWantCount.Inc() - default: - require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Errors[0].Err)) - } - - if invGraftCount.Load()+invPruneCount.Load()+invIHaveCount.Load()+invIWantCount.Load() == uint64(expectedNumOfTotalNotif) { - close(done) - } - }). - Return(nil) - - meshTracer := meshTracerFixture(flowConfig, idProvider) - - inspectorMetrics := mock.NewGossipSubRpcValidationInspectorMetrics(t) - inspectorMetrics.On("AsyncProcessingStarted") - inspectorMetrics.On("OnIHaveMessageIDsReceived", mockery.AnythingOfType("string"), mockery.AnythingOfType("int")) - inspectorMetrics.On("OnIWantMessageIDsReceived", mockery.AnythingOfType("int")) - inspectorMetrics.On("OnIncomingRpcReceived", mockery.AnythingOfType("int"), mockery.AnythingOfType("int"), mockery.AnythingOfType("int"), mockery.AnythingOfType("int"), mockery.AnythingOfType("int")) - inspectorMetrics.On("AsyncProcessingFinished", mockery.AnythingOfType("time.Duration")) - inspectorMetrics.On("InvalidControlMessageNotificationError", mockery.AnythingOfType("p2pmsg.ControlMessageType"), mockery.AnythingOfType("float64")).Run(func(args mockery.Arguments) { - count, ok := args.Get(1).(float64) - require.True(t, ok) - // count is expected to be 3 for graft, prune and ihave messages that have been configured with 3 invalid topics. - // 1 for iwant control message with a single duplicate message ID error. - require.True(t, count == 3 || count == 1) - }) - validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ - Logger: unittest.Logger(), - SporkID: sporkID, - Config: &inspectorConfig, - Distributor: distributor, - IdProvider: idProvider, - HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), - InspectorMetrics: inspectorMetrics, - RpcTracker: meshTracer, - NetworkingType: network.PrivateNetwork, - TopicOracle: func() p2p.TopicProvider { - return topicProvider - }, - }) - require.NoError(t, err) - - corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(validationInspector) - victimNode, victimIdentity := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - idProvider, - p2ptest.WithRole(role), - internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), - corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc)), - ) - idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() - idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Maybe() - - // create unknown topic - unknownTopic := channels.Topic(fmt.Sprintf("%s/%s", p2ptest.GossipSubTopicIdFixture(), sporkID)) - // create malformed topic - malformedTopic := channels.Topic(channels.TestNetworkChannel) - // a topics spork ID is considered invalid if it does not match the current spork ID - invalidSporkIDTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, unittest.IdentifierFixture())) - // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation - topicProvider.UpdateTopics([]string{unknownTopic.String(), malformedTopic.String(), invalidSporkIDTopic.String()}) - - validationInspector.Start(signalerCtx) - nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} - startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) - spammer.Start(t) - defer stopComponents(t, cancel, nodes, validationInspector) - - // prepare to spam - generate control messages - grafts := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithGrafts(unknownTopic.String(), malformedTopic.String(), invalidSporkIDTopic.String())) - prunes := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithPrunes(unknownTopic.String(), malformedTopic.String(), invalidSporkIDTopic.String())) - ihaves := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithIHaves(100, unknownTopic.String(), malformedTopic.String(), invalidSporkIDTopic.String())) - iwants := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithIWantMessageIds("duplicate_message_id", "duplicate_message_id")) - - // spam the victim peer with invalid graft messages - spammer.SpamControlMessage(t, victimNode, grafts) - // spam the victim peer with invalid prune messages - spammer.SpamControlMessage(t, victimNode, prunes) - // spam the victim peer with invalid ihaves messages - spammer.SpamControlMessage(t, victimNode, ihaves) - // spam the victim peer with invalid iwants messages - spammer.SpamControlMessage(t, victimNode, iwants) - unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") - - require.Equal(t, uint64(1), invGraftCount.Load()) - require.Equal(t, uint64(1), invPruneCount.Load()) - require.Equal(t, uint64(1), invIWantCount.Load()) - require.Equal(t, uint64(1), invIHaveCount.Load()) -} - // TestGossipSubSpamMitigationIntegration tests that the spam mitigation feature of GossipSub is working as expected. // The test puts toghether the spam detection (through the GossipSubInspector) and the spam mitigation (through the // scoring system) and ensures that the mitigation is triggered when the spam detection detects spam. @@ -1278,8 +1053,8 @@ func TestGossipSubSpamMitigationIntegration(t *testing.T) { } }) - spamRpcCount := 1000 // total number of individual rpc messages to send - spamCtrlMsgCount := int64(500) // total number of control messages to send on each RPC + spamRpcCount := 100 // total number of individual rpc messages to send + spamCtrlMsgCount := int64(100) // total number of control messages to send on each RPC // unknownTopic is an unknown topic to the victim node but shaped like a valid topic (i.e., it has the correct prefix and spork ID). unknownTopic := channels.Topic(fmt.Sprintf("%s/%s", p2ptest.GossipSubTopicIdFixture(), sporkID)) diff --git a/insecure/integration/functional/test/gossipsub/scoring/ihave_spam_test.go b/insecure/integration/functional/test/gossipsub/scoring/ihave_spam_test.go index f44dc41faab..8d6cfec2cef 100644 --- a/insecure/integration/functional/test/gossipsub/scoring/ihave_spam_test.go +++ b/insecure/integration/functional/test/gossipsub/scoring/ihave_spam_test.go @@ -215,11 +215,6 @@ func TestGossipSubIHaveBrokenPromises_Above_Threshold(t *testing.T) { // score tracer interval is set to 500 milliseconds to speed up the test, it should be shorter than the heartbeat interval (1 second) of gossipsub to catch the score updates in time. conf.NetworkConfig.GossipSub.RpcTracer.ScoreTracerInterval = 500 * time.Millisecond - // relaxing the scoring parameters to fit the test scenario. - conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyDecay = 0.99 - conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyThreshold = 10 - conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyWeight = -1 - ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) // we override some of the default scoring parameters in order to speed up the test in a time-efficient manner. @@ -229,6 +224,12 @@ func TestGossipSubIHaveBrokenPromises_Above_Threshold(t *testing.T) { // the node would be penalized for invalid message delivery way sooner than it can mount an ihave broken-promises spam attack. blockTopicOverrideParams.InvalidMessageDeliveriesWeight = 0.0 blockTopicOverrideParams.InvalidMessageDeliveriesDecay = 0.0 + + // relaxing the scoring parameters to fit the test scenario. + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyDecay = 0.99 + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyThreshold = 10 + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyWeight = -1 + victimNode, victimIdentity := p2ptest.NodeFixture( t, sporkId, diff --git a/module/metrics.go b/module/metrics.go index 6a478e74685..9713e022430 100644 --- a/module/metrics.go +++ b/module/metrics.go @@ -255,9 +255,6 @@ type GossipSubRpcValidationInspectorMetrics interface { // the number of inspect message requests being processed asynchronously by the rpc validation inspector workers. AsyncProcessingFinished(duration time.Duration) - // InvalidControlMessageNotificationError tracks the number of errors in each invalid control message notification per msg type. - InvalidControlMessageNotificationError(msgType p2pmsg.ControlMessageType, count float64) - // OnIHaveControlMessageIdsTruncated tracks the number of times message ids on an iHave message were truncated. // Note that this function is called only when the message ids are truncated from an iHave message, not when the iHave message itself is truncated. // This is different from the OnControlMessagesTruncated function which is called when a slice of control messages truncated from an RPC with all their message ids. diff --git a/module/metrics/gossipsub_rpc_validation_inspector.go b/module/metrics/gossipsub_rpc_validation_inspector.go index c67d8209ae4..69706fa0083 100644 --- a/module/metrics/gossipsub_rpc_validation_inspector.go +++ b/module/metrics/gossipsub_rpc_validation_inspector.go @@ -20,7 +20,6 @@ type GossipSubRpcValidationInspectorMetrics struct { prefix string rpcCtrlMsgInAsyncPreProcessingGauge prometheus.Gauge rpcCtrlMsgAsyncProcessingTimeHistogram prometheus.Histogram - invalidCtrlMessageErrorCount *prometheus.CounterVec rpcCtrlMsgTruncation prometheus.HistogramVec receivedIWantMsgCount prometheus.Counter receivedIWantMsgIDsHistogram prometheus.Histogram @@ -36,9 +35,7 @@ var _ module.GossipSubRpcValidationInspectorMetrics = (*GossipSubRpcValidationIn // NewGossipSubRPCValidationInspectorMetrics returns a new *GossipSubRpcValidationInspectorMetrics. func NewGossipSubRPCValidationInspectorMetrics(prefix string) *GossipSubRpcValidationInspectorMetrics { - gc := &GossipSubRpcValidationInspectorMetrics{ - prefix: prefix, - } + gc := &GossipSubRpcValidationInspectorMetrics{prefix: prefix} gc.rpcCtrlMsgInAsyncPreProcessingGauge = promauto.NewGauge( prometheus.GaugeOpts{ Namespace: namespaceNetwork, @@ -57,15 +54,6 @@ func NewGossipSubRPCValidationInspectorMetrics(prefix string) *GossipSubRpcValid }, ) - gc.invalidCtrlMessageErrorCount = promauto.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, - Name: gc.prefix + "rpc_invalid_control_message_error_total", - Help: "the number of invalid control message errors for a specific msg type", - }, []string{LabelCtrlMsgType}, - ) - gc.rpcCtrlMsgTruncation = *promauto.NewHistogramVec(prometheus.HistogramOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, @@ -145,12 +133,7 @@ func (c *GossipSubRpcValidationInspectorMetrics) AsyncProcessingFinished(duratio c.rpcCtrlMsgAsyncProcessingTimeHistogram.Observe(duration.Seconds()) } -// InvalidControlMessageNotificationError tracks the number of errors in each invalid control message notification over time per msg type. -func (c *GossipSubRpcValidationInspectorMetrics) InvalidControlMessageNotificationError(msgType p2pmsg.ControlMessageType, count float64) { - c.invalidCtrlMessageErrorCount.WithLabelValues(msgType.String()).Add(count) -} - -// OnControlMessagesTruncated tracks the number of times a control message was truncated. +// OnControlMessageIDsTruncated tracks the number of times a control message was truncated. // Args: // // messageType: the type of the control message that was truncated diff --git a/module/metrics/labels.go b/module/metrics/labels.go index f698919c303..d9c46ff9704 100644 --- a/module/metrics/labels.go +++ b/module/metrics/labels.go @@ -23,7 +23,6 @@ const ( LabelStatusCode = "code" LabelMethod = "method" LabelService = "service" - LabelCtrlMsgType = "ctlmsgtype" LabelRejectionReason = "rejection_reason" ) diff --git a/module/metrics/noop.go b/module/metrics/noop.go index fc9b9891b7a..b2d2f8d08aa 100644 --- a/module/metrics/noop.go +++ b/module/metrics/noop.go @@ -318,9 +318,7 @@ func (nc *NoopCollector) OnBehaviourPenaltyUpdated(f float64) func (nc *NoopCollector) OnIPColocationFactorUpdated(f float64) {} func (nc *NoopCollector) OnAppSpecificScoreUpdated(f float64) {} func (nc *NoopCollector) OnOverallPeerScoreUpdated(f float64) {} - -func (nc *NoopCollector) InvalidControlMessageNotificationError(p2pmsg.ControlMessageType, float64) {} -func (nc *NoopCollector) OnIHaveControlMessageIdsTruncated(diff int) {} +func (nc *NoopCollector) OnIHaveControlMessageIdsTruncated(diff int) {} func (nc *NoopCollector) OnControlMessagesTruncated(messageType p2pmsg.ControlMessageType, diff int) { } func (nc *NoopCollector) OnIncomingRpcReceived(iHaveCount, iWantCount, graftCount, pruneCount, msgCount int) { diff --git a/module/mock/gossip_sub_metrics.go b/module/mock/gossip_sub_metrics.go index 1e34580c19d..f0fbdec5cfd 100644 --- a/module/mock/gossip_sub_metrics.go +++ b/module/mock/gossip_sub_metrics.go @@ -26,11 +26,6 @@ func (_m *GossipSubMetrics) AsyncProcessingStarted() { _m.Called() } -// InvalidControlMessageNotificationError provides a mock function with given fields: msgType, count -func (_m *GossipSubMetrics) InvalidControlMessageNotificationError(msgType p2pmsg.ControlMessageType, count float64) { - _m.Called(msgType, count) -} - // OnAppSpecificScoreUpdated provides a mock function with given fields: _a0 func (_m *GossipSubMetrics) OnAppSpecificScoreUpdated(_a0 float64) { _m.Called(_a0) diff --git a/module/mock/gossip_sub_rpc_validation_inspector_metrics.go b/module/mock/gossip_sub_rpc_validation_inspector_metrics.go index 96917654fc7..0a10b099844 100644 --- a/module/mock/gossip_sub_rpc_validation_inspector_metrics.go +++ b/module/mock/gossip_sub_rpc_validation_inspector_metrics.go @@ -25,11 +25,6 @@ func (_m *GossipSubRpcValidationInspectorMetrics) AsyncProcessingStarted() { _m.Called() } -// InvalidControlMessageNotificationError provides a mock function with given fields: msgType, count -func (_m *GossipSubRpcValidationInspectorMetrics) InvalidControlMessageNotificationError(msgType p2pmsg.ControlMessageType, count float64) { - _m.Called(msgType, count) -} - // OnControlMessagesTruncated provides a mock function with given fields: messageType, diff func (_m *GossipSubRpcValidationInspectorMetrics) OnControlMessagesTruncated(messageType p2pmsg.ControlMessageType, diff int) { _m.Called(messageType, diff) diff --git a/module/mock/lib_p2_p_metrics.go b/module/mock/lib_p2_p_metrics.go index 69b671b1653..d555414cd6a 100644 --- a/module/mock/lib_p2_p_metrics.go +++ b/module/mock/lib_p2_p_metrics.go @@ -112,11 +112,6 @@ func (_m *LibP2PMetrics) InboundConnections(connectionCount uint) { _m.Called(connectionCount) } -// InvalidControlMessageNotificationError provides a mock function with given fields: msgType, count -func (_m *LibP2PMetrics) InvalidControlMessageNotificationError(msgType p2pmsg.ControlMessageType, count float64) { - _m.Called(msgType, count) -} - // OnAppSpecificScoreUpdated provides a mock function with given fields: _a0 func (_m *LibP2PMetrics) OnAppSpecificScoreUpdated(_a0 float64) { _m.Called(_a0) diff --git a/module/mock/network_metrics.go b/module/mock/network_metrics.go index 3d04c9e42c7..cc941222bc8 100644 --- a/module/mock/network_metrics.go +++ b/module/mock/network_metrics.go @@ -122,11 +122,6 @@ func (_m *NetworkMetrics) InboundMessageReceived(sizeBytes int, topic string, _a _m.Called(sizeBytes, topic, _a2, messageType) } -// InvalidControlMessageNotificationError provides a mock function with given fields: msgType, count -func (_m *NetworkMetrics) InvalidControlMessageNotificationError(msgType p2pmsg.ControlMessageType, count float64) { - _m.Called(msgType, count) -} - // MessageAdded provides a mock function with given fields: priority func (_m *NetworkMetrics) MessageAdded(priority int) { _m.Called(priority) diff --git a/network/internal/p2pfixtures/fixtures.go b/network/internal/p2pfixtures/fixtures.go index d32f74cffef..1897ccc9a10 100644 --- a/network/internal/p2pfixtures/fixtures.go +++ b/network/internal/p2pfixtures/fixtures.go @@ -99,33 +99,27 @@ func CreateNode(t *testing.T, networkKey crypto.PrivateKey, sporkID flow.Identif defaultFlowConfig, err := config.DefaultConfig() require.NoError(t, err) - params := &p2pbuilder.LibP2PNodeBuilderConfig{ - Logger: logger, - MetricsConfig: &p2pbuilderconfig.MetricsConfig{ + builder := p2pbuilder.NewNodeBuilder( + logger, + &defaultFlowConfig.NetworkConfig.GossipSub, + &p2pbuilderconfig.MetricsConfig{ HeroCacheFactory: metrics.NewNoopHeroCacheMetricsFactory(), Metrics: metrics.NewNoopCollector(), }, - NetworkingType: flownet.PrivateNetwork, - Address: unittest.DefaultAddress, - NetworkKey: networkKey, - SporkId: sporkID, - IdProvider: idProvider, - ResourceManagerParams: &defaultFlowConfig.NetworkConfig.ResourceManager, - RpcInspectorParams: &defaultFlowConfig.NetworkConfig.GossipSub.RpcInspector, - PeerManagerParams: p2pbuilderconfig.PeerManagerDisableConfig(), - SubscriptionProviderParams: &defaultFlowConfig.NetworkConfig.GossipSub.SubscriptionProvider, - DisallowListCacheCfg: &p2p.DisallowListCacheConfig{ + flownet.PrivateNetwork, + unittest.DefaultAddress, + networkKey, + sporkID, + idProvider, + &defaultFlowConfig.NetworkConfig.ResourceManager, + p2pbuilderconfig.PeerManagerDisableConfig(), + &p2p.DisallowListCacheConfig{ MaxSize: uint32(1000), Metrics: metrics.NewNoopCollector(), }, - UnicastConfig: &p2pbuilderconfig.UnicastConfig{ + &p2pbuilderconfig.UnicastConfig{ Unicast: defaultFlowConfig.NetworkConfig.Unicast, - }, - GossipSubCfg: &defaultFlowConfig.NetworkConfig.GossipSub, - } - builder, err := p2pbuilder.NewNodeBuilder(params) - require.NoError(t, err) - builder. + }). SetRoutingSystem(func(c context.Context, h host.Host) (routing.Routing, error) { return p2pdht.NewDHT(c, h, protocols.FlowDHTProtocolID(sporkID), zerolog.Nop(), metrics.NewNoopCollector()) }). diff --git a/network/netconf/flags.go b/network/netconf/flags.go index 818a606f353..7c8f8ca5d90 100644 --- a/network/netconf/flags.go +++ b/network/netconf/flags.go @@ -36,11 +36,10 @@ const ( fileDescriptorsLimit = "fd" memoryLimitBytes = "memory-bytes" - alspDisabled = "alsp-disable-penalty" - alspSpamRecordCacheSize = "alsp-spam-record-cache-size" - alspSpamRecordQueueSize = "alsp-spam-report-queue-size" - alspHearBeatInterval = "alsp-heart-beat-interval" - + alspDisabled = "alsp-disable-penalty" + alspSpamRecordCacheSize = "alsp-spam-record-cache-size" + alspSpamRecordQueueSize = "alsp-spam-report-queue-size" + alspHearBeatInterval = "alsp-heart-beat-interval" alspSyncEngineBatchRequestBaseProb = "alsp-sync-engine-batch-request-base-prob" alspSyncEngineRangeRequestBaseProb = "alsp-sync-engine-range-request-base-prob" alspSyncEngineSyncRequestProb = "alsp-sync-engine-sync-request-prob" diff --git a/network/p2p/builder/libp2pNodeBuilder.go b/network/p2p/builder/libp2pNodeBuilder.go index 855a615104a..19ef19afa6b 100644 --- a/network/p2p/builder/libp2pNodeBuilder.go +++ b/network/p2p/builder/libp2pNodeBuilder.go @@ -6,7 +6,6 @@ import ( "fmt" "net" - "github.com/go-playground/validator/v10" "github.com/libp2p/go-libp2p" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/config" @@ -74,49 +73,39 @@ type LibP2PNodeBuilder struct { networkingType flownet.NetworkingType // whether the node is running in private (staked) or public (unstaked) network } -// LibP2PNodeBuilderConfig parameters required to create a new *LibP2PNodeBuilder with NewNodeBuilder. -type LibP2PNodeBuilderConfig struct { - Logger zerolog.Logger `validate:"required"` - MetricsConfig *p2pbuilderconfig.MetricsConfig `validate:"required"` - NetworkingType flownet.NetworkingType `validate:"required"` - Address string `validate:"required"` - NetworkKey fcrypto.PrivateKey `validate:"required"` - SporkId flow.Identifier `validate:"required"` - IdProvider module.IdentityProvider `validate:"required"` - ResourceManagerParams *p2pconfig.ResourceManagerConfig `validate:"required"` - RpcInspectorParams *p2pconfig.RpcInspectorParameters `validate:"required"` - PeerManagerParams *p2pbuilderconfig.PeerManagerConfig `validate:"required"` - SubscriptionProviderParams *p2pconfig.SubscriptionProviderParameters `validate:"required"` - DisallowListCacheCfg *p2p.DisallowListCacheConfig `validate:"required"` - UnicastConfig *p2pbuilderconfig.UnicastConfig `validate:"required"` - GossipSubCfg *p2pconfig.GossipSubParameters `validate:"required"` -} - -func NewNodeBuilder(params *LibP2PNodeBuilderConfig) (*LibP2PNodeBuilder, error) { - err := validator.New().Struct(params) - if err != nil { - return nil, fmt.Errorf("libp2p node builder params validation failed: %w", err) - } +func NewNodeBuilder( + logger zerolog.Logger, + gossipSubCfg *p2pconfig.GossipSubParameters, + metricsConfig *p2pbuilderconfig.MetricsConfig, + networkingType flownet.NetworkingType, + address string, + networkKey fcrypto.PrivateKey, + sporkId flow.Identifier, + idProvider module.IdentityProvider, + rCfg *p2pconfig.ResourceManagerConfig, + peerManagerConfig *p2pbuilderconfig.PeerManagerConfig, + disallowListCacheCfg *p2p.DisallowListCacheConfig, + unicastConfig *p2pbuilderconfig.UnicastConfig, +) *LibP2PNodeBuilder { return &LibP2PNodeBuilder{ - logger: params.Logger, - sporkId: params.SporkId, - address: params.Address, - networkKey: params.NetworkKey, + logger: logger, + sporkId: sporkId, + address: address, + networkKey: networkKey, createNode: func(cfg *p2p.NodeConfig) (p2p.LibP2PNode, error) { return p2pnode.NewNode(cfg) }, - metricsConfig: params.MetricsConfig, - resourceManagerCfg: params.ResourceManagerParams, - disallowListCacheCfg: params.DisallowListCacheCfg, - networkingType: params.NetworkingType, - gossipSubBuilder: gossipsubbuilder.NewGossipSubBuilder(params.Logger, - params.MetricsConfig, - params.GossipSubCfg, - params.NetworkingType, - params.SporkId, - params.IdProvider, - ), - peerManagerConfig: params.PeerManagerParams, - unicastConfig: params.UnicastConfig, - }, nil + metricsConfig: metricsConfig, + resourceManagerCfg: rCfg, + disallowListCacheCfg: disallowListCacheCfg, + networkingType: networkingType, + gossipSubBuilder: gossipsubbuilder.NewGossipSubBuilder(logger, + metricsConfig, + gossipSubCfg, + networkingType, + sporkId, + idProvider), + peerManagerConfig: peerManagerConfig, + unicastConfig: unicastConfig, + } } var _ p2p.NodeBuilder = &LibP2PNodeBuilder{} @@ -403,34 +392,52 @@ func defaultLibP2POptions(address string, key fcrypto.PrivateKey) ([]config.Opti } // DefaultNodeBuilder returns a node builder. -func DefaultNodeBuilder(params *LibP2PNodeBuilderConfig, +func DefaultNodeBuilder( + logger zerolog.Logger, + address string, + networkingType flownet.NetworkingType, + flowKey fcrypto.PrivateKey, + sporkId flow.Identifier, + idProvider module.IdentityProvider, + metricsCfg *p2pbuilderconfig.MetricsConfig, resolver madns.BasicResolver, role string, connGaterCfg *p2pbuilderconfig.ConnectionGaterConfig, + peerManagerCfg *p2pbuilderconfig.PeerManagerConfig, + gossipCfg *p2pconfig.GossipSubParameters, + rCfg *p2pconfig.ResourceManagerConfig, + uniCfg *p2pbuilderconfig.UnicastConfig, connMgrConfig *netconf.ConnectionManager, + disallowListCacheCfg *p2p.DisallowListCacheConfig, dhtSystemActivation DhtSystemActivation, ) (p2p.NodeBuilder, error) { - connManager, err := connection.NewConnManager(params.Logger, params.MetricsConfig.Metrics, connMgrConfig) + connManager, err := connection.NewConnManager(logger, metricsCfg.Metrics, connMgrConfig) if err != nil { return nil, fmt.Errorf("could not create connection manager: %w", err) } // set the default connection gater peer filters for both InterceptPeerDial and InterceptSecured callbacks - peerFilter := notEjectedPeerFilter(params.IdProvider) + peerFilter := notEjectedPeerFilter(idProvider) peerFilters := []p2p.PeerFilter{peerFilter} connGater := connection.NewConnGater( - params.Logger, - params.IdProvider, + logger, + idProvider, connection.WithOnInterceptPeerDialFilters(append(peerFilters, connGaterCfg.InterceptPeerDialFilters...)), connection.WithOnInterceptSecuredFilters(append(peerFilters, connGaterCfg.InterceptSecuredFilters...))) - builder, err := NewNodeBuilder(params) - if err != nil { - return nil, fmt.Errorf("could not create libp2p node builder: %w", err) - } - builder. + builder := NewNodeBuilder(logger, + gossipCfg, + metricsCfg, + networkingType, + address, + flowKey, + sporkId, + idProvider, + rCfg, peerManagerCfg, + disallowListCacheCfg, + uniCfg). SetBasicResolver(resolver). SetConnectionManager(connManager). SetConnectionGater(connGater) @@ -440,12 +447,12 @@ func DefaultNodeBuilder(params *LibP2PNodeBuilderConfig, if err != nil { return nil, fmt.Errorf("could not parse role: %w", err) } - builder.SetSubscriptionFilter(subscription.NewRoleBasedFilter(r, params.IdProvider)) + builder.SetSubscriptionFilter(subscription.NewRoleBasedFilter(r, idProvider)) if dhtSystemActivation == DhtSystemEnabled { builder.SetRoutingSystem( func(ctx context.Context, host host.Host) (routing.Routing, error) { - return dht.NewDHT(ctx, host, protocols.FlowDHTProtocolID(params.SporkId), params.Logger, params.MetricsConfig.Metrics, dht.AsServer()) + return dht.NewDHT(ctx, host, protocols.FlowDHTProtocolID(sporkId), logger, metricsCfg.Metrics, dht.AsServer()) }) } } diff --git a/network/p2p/config/peer_scoring.go b/network/p2p/config/peer_scoring.go index 92bfc6bd911..25b70e77687 100644 --- a/network/p2p/config/peer_scoring.go +++ b/network/p2p/config/peer_scoring.go @@ -34,7 +34,7 @@ type InternalGossipSubScoreParams struct { // DecayInterval is the decay interval for the overall score of a peer at the GossipSub scoring // system. We set it to 1 minute so that it is not too short so that a malicious node can recover from a penalty // and is not too long so that a well-behaved node can't recover from a penalty. - DecayInterval time.Duration `validate:"gt=0" mapstructure:"decay-interval"` + DecayInterval time.Duration `validate:"gte=1m" mapstructure:"decay-interval"` // DecayToZero is the decay to zero for the overall score of a peer at the GossipSub scoring system. // It defines the maximum value below which a peer scoring counter is reset to zero. // This is to prevent the counter from decaying to a very small value. diff --git a/network/p2p/consumers.go b/network/p2p/consumers.go index 43302e70024..f079a3864af 100644 --- a/network/p2p/consumers.go +++ b/network/p2p/consumers.go @@ -1,9 +1,6 @@ package p2p import ( - "fmt" - - "github.com/hashicorp/go-multierror" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" @@ -11,109 +8,6 @@ import ( p2pmsg "github.com/onflow/flow-go/network/p2p/message" ) -// InvCtrlMsgErrs list of InvCtrlMsgErr -type InvCtrlMsgErrs []*InvCtrlMsgErr - -func (i InvCtrlMsgErrs) Error() error { - var errs *multierror.Error - for _, err := range i { - switch { - case err.Topic() != "": - errs = multierror.Append(errs, fmt.Errorf("%w (topic: %s)", err.Err, err.Topic())) - case err.MessageID() != "": - errs = multierror.Append(errs, fmt.Errorf("%w (messageID: %s)", err.Err, err.MessageID())) - default: - errs = multierror.Append(errs, err.Err) - } - } - return errs.ErrorOrNil() -} - -func (i InvCtrlMsgErrs) Len() int { - return len(i) -} - -// InvCtrlMsgErr represents an error that occurred during control message inspection. -// It encapsulates the error itself along with additional metadata, including the control message type, -// the associated topic or message ID. -type InvCtrlMsgErr struct { - // Err holds the underlying error. - Err error - - // ctrlMsgTopicType specifies the type of control message. - ctrlMsgTopicType CtrlMsgTopicType - - // topic is the topic associated with the error. - topic string - - // messageID is the message ID associated with an error during iWant validation. - messageID string -} - -// SetTopic sets the topic of the error. -func (e *InvCtrlMsgErr) SetTopic(topic string) { - e.topic = topic -} - -// SetMessageID sets the provided messageID. -func (e *InvCtrlMsgErr) SetMessageID(messageID string) { - e.messageID = messageID -} - -// MessageID returns the messageID of the error. -func (e *InvCtrlMsgErr) MessageID() string { - return e.messageID -} - -// Topic returns the topi of the error. -func (e *InvCtrlMsgErr) Topic() string { - return e.topic -} - -// CtrlMsgTopicType returns the CtrlMsgTopicType of the error. -func (e *InvCtrlMsgErr) CtrlMsgTopicType() CtrlMsgTopicType { - return e.ctrlMsgTopicType -} - -// NewInvCtrlMsgErr returns a new InvCtrlMsgErr. -// Args: -// - err: the error. -// - ctrlMsgTopicType: the control message topic type. -// Returns: -// - *InvCtrlMsgErr: the invalid control message error. -func NewInvCtrlMsgErr(err error, ctrlMsgTopicType CtrlMsgTopicType) *InvCtrlMsgErr { - return &InvCtrlMsgErr{ - Err: err, - ctrlMsgTopicType: ctrlMsgTopicType, - } -} - -// InvCtrlMsgNotif is the notification sent to the consumer when an invalid control message is received. -// It models the information that is available to the consumer about a misbehaving peer. -type InvCtrlMsgNotif struct { - // PeerID is the ID of the peer that sent the invalid control message. - PeerID peer.ID - // Errors the errors that occurred during validation. - Errors InvCtrlMsgErrs - // MsgType the control message type. - MsgType p2pmsg.ControlMessageType -} - -// NewInvalidControlMessageNotification returns a new *InvCtrlMsgNotif. -// Args: -// - peerID: peer ID of the sender. -// - ctlMsgType: the control message type. -// - errs: validation errors that occurred. -// Returns: -// - *InvCtrlMsgNotif: the invalid control message notification. -func NewInvalidControlMessageNotification(peerID peer.ID, ctlMsgType p2pmsg.ControlMessageType, errs InvCtrlMsgErrs) *InvCtrlMsgNotif { - return &InvCtrlMsgNotif{ - PeerID: peerID, - Errors: errs, - MsgType: ctlMsgType, - } -} - // GossipSubInspectorNotifDistributor is the interface for the distributor that distributes gossip sub inspector notifications. // It is used to distribute notifications to the consumers in an asynchronous manner and non-blocking manner. // The implementation should guarantee that all registered consumers are called upon distribution of a new event. @@ -151,6 +45,44 @@ func (t CtrlMsgTopicType) String() string { } } +// InvCtrlMsgNotif is the notification sent to the consumer when an invalid control message is received. +// It models the information that is available to the consumer about a misbehaving peer. +type InvCtrlMsgNotif struct { + // PeerID is the ID of the peer that sent the invalid control message. + PeerID peer.ID + // Error the error that occurred during validation. + Error error + // MsgType the control message type. + MsgType p2pmsg.ControlMessageType + // Count the number of errors. + Count uint64 + // TopicType reports whether the error occurred on a cluster-prefixed topic within the control message. + // Notifications must be explicitly marked as cluster-prefixed or not because the penalty applied to the GossipSub score + // for an error on a cluster-prefixed topic is more lenient than the penalty applied to a non-cluster-prefixed topic. + // This distinction ensures that nodes engaged in cluster-prefixed topic communication are not penalized too harshly, + // as such communication is vital to the progress of the chain. + TopicType CtrlMsgTopicType +} + +// NewInvalidControlMessageNotification returns a new *InvCtrlMsgNotif +// Args: +// - peerID: peer id of the offender. +// - ctlMsgType: the control message type of the rpc message that caused the error. +// - err: the error that occurred. +// - count: the number of occurrences of the error. +// +// Returns: +// - *InvCtlMsgNotif: invalid control message notification. +func NewInvalidControlMessageNotification(peerID peer.ID, ctlMsgType p2pmsg.ControlMessageType, err error, count uint64, topicType CtrlMsgTopicType) *InvCtrlMsgNotif { + return &InvCtrlMsgNotif{ + PeerID: peerID, + Error: err, + MsgType: ctlMsgType, + Count: count, + TopicType: topicType, + } +} + // GossipSubInvCtrlMsgNotifConsumer is the interface for the consumer that consumes gossipsub inspector notifications. // It is used to consume notifications in an asynchronous manner. // The implementation must be concurrency safe, but can be blocking. This is due to the fact that the consumer is called diff --git a/network/p2p/distributor/gossipsub_inspector_test.go b/network/p2p/distributor/gossipsub_inspector_test.go index d6bb2e18750..43d26d8fc26 100644 --- a/network/p2p/distributor/gossipsub_inspector_test.go +++ b/network/p2p/distributor/gossipsub_inspector_test.go @@ -92,12 +92,10 @@ func invalidControlMessageNotificationListFixture(t *testing.T, n int) []*p2p.In return list } -// invalidControlMessageNotificationFixture creates an invalid control message notification fixture with a random message type. func invalidControlMessageNotificationFixture(t *testing.T) *p2p.InvCtrlMsgNotif { - msgType := []p2pmsg.ControlMessageType{p2pmsg.CtrlMsgGraft, p2pmsg.CtrlMsgPrune, p2pmsg.CtrlMsgIHave, p2pmsg.CtrlMsgIWant}[rand.Intn(4)] return &p2p.InvCtrlMsgNotif{ PeerID: unittest.PeerIdFixture(t), - MsgType: msgType, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid control message"), p2p.CtrlMsgTopicTypeClusterPrefixed)}, + MsgType: []p2pmsg.ControlMessageType{p2pmsg.CtrlMsgGraft, p2pmsg.CtrlMsgPrune, p2pmsg.CtrlMsgIHave, p2pmsg.CtrlMsgIWant}[rand.Intn(4)], + Error: fmt.Errorf("this is an error"), } } diff --git a/network/p2p/inspector/validation/control_message_validation_inspector.go b/network/p2p/inspector/validation/control_message_validation_inspector.go index e4034521109..5556edf685d 100644 --- a/network/p2p/inspector/validation/control_message_validation_inspector.go +++ b/network/p2p/inspector/validation/control_message_validation_inspector.go @@ -252,40 +252,41 @@ func (c *ControlMsgValidationInspector) processInspectRPCReq(req *InspectRPCRequ defer func() { c.metrics.AsyncProcessingFinished(time.Since(start)) }() + activeClusterIDS := c.tracker.GetActiveClusterIds() for _, ctrlMsgType := range p2pmsg.ControlMessageTypes() { switch ctrlMsgType { case p2pmsg.CtrlMsgGraft: - errs := c.inspectGraftMessages(req.Peer, req.rpc.GetControl().GetGraft(), activeClusterIDS) - if errs != nil { - c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgGraft, errs) + err, topicType := c.inspectGraftMessages(req.Peer, req.rpc.GetControl().GetGraft(), activeClusterIDS) + if err != nil { + c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgGraft, err, 1, topicType) return nil } case p2pmsg.CtrlMsgPrune: - errs := c.inspectPruneMessages(req.Peer, req.rpc.GetControl().GetPrune(), activeClusterIDS) - if errs != nil { - c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgPrune, errs) + err, topicType := c.inspectPruneMessages(req.Peer, req.rpc.GetControl().GetPrune(), activeClusterIDS) + if err != nil { + c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgPrune, err, 1, topicType) return nil } case p2pmsg.CtrlMsgIWant: - errs := c.inspectIWantMessages(req.Peer, req.rpc.GetControl().GetIwant()) - if errs != nil { - c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgIWant, errs) + err := c.inspectIWantMessages(req.Peer, req.rpc.GetControl().GetIwant()) + if err != nil { + c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgIWant, err, 1, p2p.CtrlMsgNonClusterTopicType) return nil } case p2pmsg.CtrlMsgIHave: - errs := c.inspectIHaveMessages(req.Peer, req.rpc.GetControl().GetIhave(), activeClusterIDS) - if errs != nil { - c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgIHave, errs) + err, topicType := c.inspectIHaveMessages(req.Peer, req.rpc.GetControl().GetIhave(), activeClusterIDS) + if err != nil { + c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgIHave, err, 1, topicType) return nil } } } // inspect rpc publish messages after all control message validation has passed - errs := c.inspectRpcPublishMessages(req.Peer, req.rpc.GetPublish(), activeClusterIDS) - if errs != nil { - c.logAndDistributeAsyncInspectErrs(req, p2pmsg.RpcPublishMessage, errs) + err, errCount := c.inspectRpcPublishMessages(req.Peer, req.rpc.GetPublish(), activeClusterIDS) + if err != nil { + c.logAndDistributeAsyncInspectErrs(req, p2pmsg.RpcPublishMessage, err, errCount, p2p.CtrlMsgNonClusterTopicType) return nil } @@ -321,43 +322,23 @@ func (c *ControlMsgValidationInspector) checkPubsubMessageSender(message *pubsub // - grafts: the list of grafts to inspect. // - activeClusterIDS: the list of active cluster ids. // Returns: -// - p2p.InvCtrlMsgErrs: list of errors that occured during inspection -func (c *ControlMsgValidationInspector) inspectGraftMessages(from peer.ID, grafts []*pubsub_pb.ControlGraft, activeClusterIDS flow.ChainIDList) p2p.InvCtrlMsgErrs { - lg := c.logger.With(). - Str("remote_peer_id", p2plogging.PeerId(from)). - Int("sample_size", len(grafts)). - Logger() +// - DuplicateTopicErr: if there are any duplicate topics in the list of grafts +// - error: if any error occurs while sampling or validating topics, all returned errors are benign and should not cause the node to crash. +// - bool: true if an error is returned and the topic that failed validation was a cluster prefixed topic, false otherwise. +func (c *ControlMsgValidationInspector) inspectGraftMessages(from peer.ID, grafts []*pubsub_pb.ControlGraft, activeClusterIDS flow.ChainIDList) (error, p2p.CtrlMsgTopicType) { tracker := make(duplicateStrTracker) - errs := make(p2p.InvCtrlMsgErrs, 0) for _, graft := range grafts { topic := channels.Topic(graft.GetTopicID()) if tracker.isDuplicate(topic.String()) { - // Duplicate topic errors are marked with p2p.CtrlMsgNonClusterTopicType since the topic type is unknown until validation completes. - // This approach ensures that penalties for duplicate topic errors are not reduced. - invErr := p2p.NewInvCtrlMsgErr(NewDuplicateTopicErr(topic.String(), p2pmsg.CtrlMsgGraft), p2p.CtrlMsgNonClusterTopicType) - invErr.SetTopic(topic.String()) - errs = append(errs, invErr) - continue + return NewDuplicateTopicErr(topic.String(), p2pmsg.CtrlMsgGraft), p2p.CtrlMsgNonClusterTopicType } tracker.set(topic.String()) - err, topicType := c.validateTopic(from, topic, activeClusterIDS) + err, ctrlMsgType := c.validateTopic(from, topic, activeClusterIDS) if err != nil { - invErr := p2p.NewInvCtrlMsgErr(err, topicType) - invErr.SetTopic(topic.String()) - errs = append(errs, invErr) + return err, ctrlMsgType } } - - lg = lg.With().Int("error_count", errs.Len()).Logger() - if errs.Len() > 0 { - lg.Debug(). - Bool(logging.KeySuspicious, true). - Msg("graft control message validation failed") - return errs - } - - lg.Debug().Msg("graft control message validation complete") - return nil + return nil, p2p.CtrlMsgNonClusterTopicType } // inspectPruneMessages performs topic validation on all prunes in the control message using the provided validateTopic func while tracking duplicates. @@ -366,43 +347,24 @@ func (c *ControlMsgValidationInspector) inspectGraftMessages(from peer.ID, graft // - prunes: the list of iHaves to inspect. // - activeClusterIDS: the list of active cluster ids. // Returns: -// - p2p.InvCtrlMsgErrs: list of errors that occured during inspection -func (c *ControlMsgValidationInspector) inspectPruneMessages(from peer.ID, prunes []*pubsub_pb.ControlPrune, activeClusterIDS flow.ChainIDList) p2p.InvCtrlMsgErrs { - lg := c.logger.With(). - Str("peer_id", p2plogging.PeerId(from)). - Int("sample_size", len(prunes)). - Logger() +// - DuplicateTopicErr: if there are any duplicate topics found in the list of iHaves +// or any duplicate message ids found inside a single iHave. +// - error: if any error occurs while sampling or validating topics, all returned errors are benign and should not cause the node to crash. +// - bool: true if an error is returned and the topic that failed validation was a cluster prefixed topic, false otherwise. +func (c *ControlMsgValidationInspector) inspectPruneMessages(from peer.ID, prunes []*pubsub_pb.ControlPrune, activeClusterIDS flow.ChainIDList) (error, p2p.CtrlMsgTopicType) { tracker := make(duplicateStrTracker) - errs := make(p2p.InvCtrlMsgErrs, 0) for _, prune := range prunes { topic := channels.Topic(prune.GetTopicID()) if tracker.isDuplicate(topic.String()) { - // Duplicate topic errors are marked with p2p.CtrlMsgNonClusterTopicType since the topic type is unknown until validation completes. - // This approach ensures that penalties for duplicate topic errors are not reduced. - invErr := p2p.NewInvCtrlMsgErr(NewDuplicateTopicErr(topic.String(), p2pmsg.CtrlMsgGraft), p2p.CtrlMsgNonClusterTopicType) - invErr.SetTopic(topic.String()) - errs = append(errs, invErr) - continue + return NewDuplicateTopicErr(topic.String(), p2pmsg.CtrlMsgPrune), p2p.CtrlMsgNonClusterTopicType } tracker.set(topic.String()) - err, topicType := c.validateTopic(from, topic, activeClusterIDS) + err, ctrlMsgType := c.validateTopic(from, topic, activeClusterIDS) if err != nil { - invErr := p2p.NewInvCtrlMsgErr(err, topicType) - invErr.SetTopic(topic.String()) - errs = append(errs, invErr) + return err, ctrlMsgType } } - - lg = lg.With().Int("error_count", errs.Len()).Logger() - if errs.Len() > 0 { - lg.Debug(). - Bool(logging.KeySuspicious, true). - Msg("prune control message validation failed") - return errs - } - - lg.Debug().Msg("prune control message validation complete") - return nil + return nil, p2p.CtrlMsgNonClusterTopicType } // inspectIHaveMessages performs topic validation on all ihaves in the control message using the provided validateTopic func while tracking duplicates. @@ -411,10 +373,13 @@ func (c *ControlMsgValidationInspector) inspectPruneMessages(from peer.ID, prune // - iHaves: the list of iHaves to inspect. // - activeClusterIDS: the list of active cluster ids. // Returns: -// - p2p.InvCtrlMsgErrs: list of errors that occured during inspection -func (c *ControlMsgValidationInspector) inspectIHaveMessages(from peer.ID, ihaves []*pubsub_pb.ControlIHave, activeClusterIDS flow.ChainIDList) p2p.InvCtrlMsgErrs { +// - DuplicateTopicErr: if there are any duplicate topics found in the list of iHaves +// or any duplicate message ids found inside a single iHave. +// - error: if any error occurs while sampling or validating topics, all returned errors are benign and should not cause the node to crash. +// - bool: true if an error is returned and the topic that failed validation was a cluster prefixed topic, false otherwise. +func (c *ControlMsgValidationInspector) inspectIHaveMessages(from peer.ID, ihaves []*pubsub_pb.ControlIHave, activeClusterIDS flow.ChainIDList) (error, p2p.CtrlMsgTopicType) { if len(ihaves) == 0 { - return nil + return nil, p2p.CtrlMsgNonClusterTopicType } lg := c.logger.With(). Str("peer_id", p2plogging.PeerId(from)). @@ -423,51 +388,30 @@ func (c *ControlMsgValidationInspector) inspectIHaveMessages(from peer.ID, ihave Logger() duplicateTopicTracker := make(duplicateStrTracker) duplicateMessageIDTracker := make(duplicateStrTracker) - errs := make(p2p.InvCtrlMsgErrs, 0) totalMessageIds := 0 for _, ihave := range ihaves { messageIds := ihave.GetMessageIDs() topic := ihave.GetTopicID() if duplicateTopicTracker.isDuplicate(topic) { - // Duplicate topic errors are marked with p2p.CtrlMsgNonClusterTopicType since the topic type is unknown until validation completes. - // This approach ensures that penalties for duplicate topic errors are not reduced. - invErr := p2p.NewInvCtrlMsgErr(NewDuplicateTopicErr(topic, p2pmsg.CtrlMsgGraft), p2p.CtrlMsgNonClusterTopicType) - invErr.SetTopic(topic) - errs = append(errs, invErr) - continue + return NewDuplicateTopicErr(topic, p2pmsg.CtrlMsgIHave), p2p.CtrlMsgNonClusterTopicType } duplicateTopicTracker.set(topic) - err, topicType := c.validateTopic(from, channels.Topic(topic), activeClusterIDS) + err, ctrlMsgType := c.validateTopic(from, channels.Topic(topic), activeClusterIDS) if err != nil { - invErr := p2p.NewInvCtrlMsgErr(err, topicType) - invErr.SetTopic(topic) - errs = append(errs, invErr) + return err, ctrlMsgType } + for _, messageID := range messageIds { if duplicateMessageIDTracker.isDuplicate(messageID) { - invErr := p2p.NewInvCtrlMsgErr(NewDuplicateMessageIDErr(messageID, p2pmsg.CtrlMsgIHave), p2p.CtrlMsgNonClusterTopicType) - invErr.SetMessageID(messageID) - errs = append(errs, invErr) - continue + return NewDuplicateTopicErr(messageID, p2pmsg.CtrlMsgIHave), p2p.CtrlMsgNonClusterTopicType } duplicateMessageIDTracker.set(messageID) } } - - lg = lg.With(). + lg.Debug(). Int("total_message_ids", totalMessageIds). - Int("error_count", errs.Len()). - Logger() - - if errs.Len() > 0 { - lg.Debug(). - Bool(logging.KeySuspicious, true). - Msg("ihave control message validation failed") - return errs - } - - lg.Debug().Msg("ihave control message validation complete") - return nil + Msg("ihave control message validation complete") + return nil, p2p.CtrlMsgNonClusterTopicType } // inspectIWantMessages inspects RPC iWant control messages. This func will sample the iWants and perform validation on each iWant in the sample. @@ -481,7 +425,7 @@ func (c *ControlMsgValidationInspector) inspectIHaveMessages(from peer.ID, ihave // Returns: // - DuplicateTopicErr: if there are any duplicate message ids found in any of the iWants. // - IWantCacheMissThresholdErr: if the rate of cache misses exceeds the configured allowed threshold. -func (c *ControlMsgValidationInspector) inspectIWantMessages(from peer.ID, iWants []*pubsub_pb.ControlIWant) p2p.InvCtrlMsgErrs { +func (c *ControlMsgValidationInspector) inspectIWantMessages(from peer.ID, iWants []*pubsub_pb.ControlIWant) error { if len(iWants) == 0 { return nil } @@ -493,7 +437,6 @@ func (c *ControlMsgValidationInspector) inspectIWantMessages(from peer.ID, iWant Logger() sampleSize := uint(len(iWants)) tracker := make(duplicateStrTracker) - errs := make(p2p.InvCtrlMsgErrs, 0) cacheMisses := 0 allowedCacheMissesThreshold := float64(sampleSize) * c.config.IWant.CacheMissThreshold duplicates := 0 @@ -515,15 +458,7 @@ func (c *ControlMsgValidationInspector) inspectIWantMessages(from peer.ID, iWant if tracker.isDuplicate(messageID) { duplicates++ if float64(duplicates) > allowedDuplicatesThreshold { - invErr := p2p.NewInvCtrlMsgErr( - NewIWantDuplicateMsgIDThresholdErr( - duplicates, messageIDCount, - c.config.IWant.DuplicateMsgIDThreshold), - p2p.CtrlMsgNonClusterTopicType, - ) - invErr.SetMessageID(messageID) - errs = append(errs, invErr) - continue + return NewIWantDuplicateMsgIDThresholdErr(duplicates, messageIDCount, c.config.IWant.DuplicateMsgIDThreshold) } } // check cache miss threshold @@ -531,23 +466,7 @@ func (c *ControlMsgValidationInspector) inspectIWantMessages(from peer.ID, iWant cacheMisses++ if checkCacheMisses { if float64(cacheMisses) > allowedCacheMissesThreshold { - invErr := p2p.NewInvCtrlMsgErr( - NewIWantCacheMissThresholdErr( - cacheMisses, - messageIDCount, - c.config.IWant.CacheMissThreshold), - p2p.CtrlMsgNonClusterTopicType, - ) - invErr.SetMessageID(messageID) - errs = append(errs, invErr) - lg.Debug(). - Bool(logging.KeySuspicious, true). - Int("total_message_ids", totalMessageIds). - Int("cache_misses", cacheMisses). - Int("duplicates", duplicates). - Int("error_count", errs.Len()). - Msg("iwant control message validation failed cache miss threshold exceeded") - return errs + return NewIWantCacheMissThresholdErr(cacheMisses, messageIDCount, c.config.IWant.CacheMissThreshold) } } } @@ -556,20 +475,12 @@ func (c *ControlMsgValidationInspector) inspectIWantMessages(from peer.ID, iWant } } - lg = lg.With(). + lg.Debug(). Int("total_message_ids", totalMessageIds). Int("cache_misses", cacheMisses). Int("duplicates", duplicates). - Int("error_count", errs.Len()). - Logger() + Msg("iwant control message validation complete") - if errs.Len() > 0 { - lg.Debug(). - Bool(logging.KeySuspicious, true). - Msg("iwant control message validation failed") - return errs - } - lg.Debug().Msg("iwant control message validation completed") return nil } @@ -583,21 +494,17 @@ func (c *ControlMsgValidationInspector) inspectIWantMessages(from peer.ID, iWant // - messages: rpc publish messages. // - activeClusterIDS: the list of active cluster ids. // Returns: -// - InvalidRpcPublishMessagesErr: if the amount of invalid messages exceeds the configured RpcMessageErrorThreshold. -func (c *ControlMsgValidationInspector) inspectRpcPublishMessages(from peer.ID, messages []*pubsub_pb.Message, activeClusterIDS flow.ChainIDList) p2p.InvCtrlMsgErrs { +// - InvalidRpcPublishMessagesErr: if the amount of invalid messages exceeds the configured RPCMessageErrorThreshold. +// - int: the number of invalid pubsub messages +func (c *ControlMsgValidationInspector) inspectRpcPublishMessages(from peer.ID, messages []*pubsub_pb.Message, activeClusterIDS flow.ChainIDList) (error, uint64) { totalMessages := len(messages) if totalMessages == 0 { - return nil + return nil, 0 } sampleSize := c.config.MessageMaxSampleSize if sampleSize > totalMessages { sampleSize = totalMessages } - lg := c.logger.With(). - Str("peer_id", p2plogging.PeerId(from)). - Int("sample_size", sampleSize). - Int("max_sample_size", c.config.IHave.MaxSampleSize). - Logger() c.performSample(p2pmsg.RpcPublishMessage, uint(totalMessages), uint(sampleSize), func(i, j uint) { messages[i], messages[j] = messages[j], messages[i] }) @@ -611,27 +518,12 @@ func (c *ControlMsgValidationInspector) inspectRpcPublishMessages(from peer.ID, } return false } - checkErrThreshold := func(errs *multierror.Error, invCtrlMsgErrs p2p.InvCtrlMsgErrs) p2p.InvCtrlMsgErrs { - // capture error when we exceed the error threshold - if errs != nil && errs.Len() > c.config.MessageErrorThreshold { - invErr := p2p.NewInvCtrlMsgErr( - NewInvalidRpcPublishMessagesErr( - errs.ErrorOrNil(), - errs.Len()), - p2p.CtrlMsgNonClusterTopicType, - ) - invCtrlMsgErrs = append(invCtrlMsgErrs, invErr) - } - return invCtrlMsgErrs - } - invCtrlMsgErrs := make(p2p.InvCtrlMsgErrs, 0) var errs *multierror.Error for _, message := range messages[:sampleSize] { if c.networkingType == network.PrivateNetwork { err := c.checkPubsubMessageSender(message) if err != nil { errs = multierror.Append(errs, err) - invCtrlMsgErrs = checkErrThreshold(errs, invCtrlMsgErrs) continue } } @@ -641,26 +533,22 @@ func (c *ControlMsgValidationInspector) inspectRpcPublishMessages(from peer.ID, // cluster prefix status is unnecessary when the error threshold is exceeded. err, _ := c.validateTopic(from, topic, activeClusterIDS) if err != nil { + // we can skip checking for subscription of topic that failed validation and continue errs = multierror.Append(errs, err) - } else if !hasSubscription(topic.String()) { + continue + } + + if !hasSubscription(topic.String()) { errs = multierror.Append(errs, fmt.Errorf("subscription for topic %s not found", topic)) } - // capture error when we exceed the error threshold - invCtrlMsgErrs = checkErrThreshold(errs, invCtrlMsgErrs) } - lg = lg.With(). - Int("error_count", invCtrlMsgErrs.Len()). - Logger() - if invCtrlMsgErrs.Len() > 0 { - lg.Debug(). - Bool(logging.KeySuspicious, true). - Msg("rpc publish messages validation failed") - return invCtrlMsgErrs + // return an error when we exceed the error threshold + if errs != nil && errs.Len() > c.config.MessageErrorThreshold { + return NewInvalidRpcPublishMessagesErr(errs.ErrorOrNil(), errs.Len()), uint64(errs.Len()) } - lg.Debug().Msg("rpc publish messages validation completed") - return nil + return nil, 0 } // truncateRPC truncates the RPC by truncating each control message type using the configured max sample size values. @@ -894,11 +782,6 @@ func (c *ControlMsgValidationInspector) validateClusterPrefixedTopic(from peer.I // only staked nodes are expected to participate on cluster prefixed topics nodeID, err := c.getFlowIdentifier(from) if err != nil { - lg.Warn(). - Bool(logging.KeySuspicious, true). - Bool(logging.KeyNetworkingSecurity, true). - Err(err). - Msg("cluster prefixed control message received from unstaked peer") return err } if len(activeClusterIds) == 0 { @@ -914,7 +797,7 @@ func (c *ControlMsgValidationInspector) validateClusterPrefixedTopic(from peer.I lg.Warn(). Err(err). Str("topic", topic.String()). - Msg("failed to validate cluster prefixed control message with cluster prefixed topic active cluster ids not set") + Msg("failed to validate cluster prefixed control message with cluster pre-fixed topic active cluster ids not set") return nil } @@ -975,24 +858,34 @@ func (c *ControlMsgValidationInspector) checkClusterPrefixHardThreshold(nodeID f // Args: // - req: inspect rpc request that failed validation. // - ctlMsgType: the control message type of the rpc message that caused the error. -// - errs: the errors that occurred. -func (c *ControlMsgValidationInspector) logAndDistributeAsyncInspectErrs(req *InspectRPCRequest, ctlMsgType p2pmsg.ControlMessageType, errs p2p.InvCtrlMsgErrs) { +// - err: the error that occurred. +// - count: the number of occurrences of the error. +// - isClusterPrefixed: indicates if the errors occurred on a cluster prefixed topic. +func (c *ControlMsgValidationInspector) logAndDistributeAsyncInspectErrs(req *InspectRPCRequest, ctlMsgType p2pmsg.ControlMessageType, err error, count uint64, topicType p2p.CtrlMsgTopicType) { lg := c.logger.With(). - Err(errs.Error()). + Err(err). Str("control_message_type", ctlMsgType.String()). Bool(logging.KeySuspicious, true). Bool(logging.KeyNetworkingSecurity, true). - Int("error_count", errs.Len()). + Str("topic_type", topicType.String()). + Uint64("error_count", count). Str("peer_id", p2plogging.PeerId(req.Peer)). Logger() - err := c.distributor.Distribute(p2p.NewInvalidControlMessageNotification(req.Peer, ctlMsgType, errs)) - if err != nil { - c.logAndThrowError(fmt.Errorf("failed to distribute invalid control message notification: %w", err)) - } - c.metrics.InvalidControlMessageNotificationError(ctlMsgType, float64(errs.Len())) - - lg.Error().Err(errs.Error()).Msg("rpc control message async inspection failed invalid control message notification disseminated") + switch { + case IsErrActiveClusterIDsNotSet(err): + lg.Warn().Msg("active cluster ids not set") + case IsErrUnstakedPeer(err): + lg.Warn().Msg("control message received from unstaked peer") + default: + distErr := c.distributor.Distribute(p2p.NewInvalidControlMessageNotification(req.Peer, ctlMsgType, err, count, topicType)) + if distErr != nil { + lg.Error(). + Err(distErr). + Msg("failed to distribute invalid control message notification") + } + lg.Error().Msg("rpc control message async inspection failed") + } } // logAndThrowError logs and throws irrecoverable errors on the context. diff --git a/network/p2p/inspector/validation/control_message_validation_inspector_test.go b/network/p2p/inspector/validation/control_message_validation_inspector_test.go index 3e4d3bf74f6..f9268224dee 100644 --- a/network/p2p/inspector/validation/control_message_validation_inspector_test.go +++ b/network/p2p/inspector/validation/control_message_validation_inspector_test.go @@ -109,8 +109,7 @@ func TestControlMessageValidationInspector_truncateRPC(t *testing.T) { shouldNotBeTruncated := len(graftsLessThanMaxSampleSize.GetControl().GetGraft()) == 50 return shouldBeTruncated && shouldNotBeTruncated }, time.Second, 500*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + stopInspector(t, cancel, inspector) }) t.Run("truncatePruneMessages should truncate prune messages as expected", func(t *testing.T) { @@ -140,8 +139,7 @@ func TestControlMessageValidationInspector_truncateRPC(t *testing.T) { shouldNotBeTruncated := len(prunesLessThanMaxSampleSize.GetControl().GetPrune()) == 50 return shouldBeTruncated && shouldNotBeTruncated }, time.Second, 500*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + stopInspector(t, cancel, inspector) }) t.Run("truncateIHaveMessages should truncate iHave messages as expected", func(t *testing.T) { @@ -171,8 +169,7 @@ func TestControlMessageValidationInspector_truncateRPC(t *testing.T) { shouldNotBeTruncated := len(iHavesLessThanMaxSampleSize.GetControl().GetIhave()) == 50 return shouldBeTruncated && shouldNotBeTruncated }, time.Second, 500*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + stopInspector(t, cancel, inspector) }) t.Run("truncateIHaveMessageIds should truncate iHave message ids as expected", func(t *testing.T) { @@ -208,8 +205,7 @@ func TestControlMessageValidationInspector_truncateRPC(t *testing.T) { } return true }, time.Second, 500*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + stopInspector(t, cancel, inspector) }) t.Run("truncateIWantMessages should truncate iWant messages as expected", func(t *testing.T) { @@ -237,8 +233,7 @@ func TestControlMessageValidationInspector_truncateRPC(t *testing.T) { shouldNotBeTruncated := len(iWantsLessThanMaxSampleSize.GetControl().GetIwant()) == 50 return shouldBeTruncated && shouldNotBeTruncated }, time.Second, 500*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + stopInspector(t, cancel, inspector) }) t.Run("truncateIWantMessageIds should truncate iWant message ids as expected", func(t *testing.T) { @@ -272,8 +267,7 @@ func TestControlMessageValidationInspector_truncateRPC(t *testing.T) { } return true }, time.Second, 500*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + stopInspector(t, cancel, inspector) }) } @@ -321,16 +315,9 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { from := unittest.PeerIdFixture(t) require.NoError(t, inspector.Inspect(from, rpc)) - require.Never(t, func() bool { - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - return true - } - } - return false - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("processInspectRPCReq should disseminate invalid control message notification for control messages with duplicate topics", func(t *testing.T) { @@ -347,15 +334,13 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { duplicateTopicGraftsRpc := unittest.P2PRPCFixture(unittest.WithGrafts(grafts...)) duplicateTopicPrunesRpc := unittest.P2PRPCFixture(unittest.WithPrunes(prunes...)) duplicateTopicIHavesRpc := unittest.P2PRPCFixture(unittest.WithIHaves(ihaves...)) - expectedNotifications := 3 - distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(expectedNotifications).Run(func(args mock.Arguments) { + distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(3).Run(func(args mock.Arguments) { notification, ok := args[0].(*p2p.InvCtrlMsgNotif) require.True(t, ok) - require.Equal(t, duplicateTopic, notification.Errors[0].Topic()) - require.Equal(t, notification.Errors[0].CtrlMsgTopicType(), p2p.CtrlMsgNonClusterTopicType, "expected p2p.CtrlMsgNonClusterTopicType notification type, no RPC with cluster prefixed topic sent in this test") + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "expected p2p.CtrlMsgNonClusterTopicType notification type, no RPC with cluster prefixed topic sent in this test") require.Equal(t, from, notification.PeerID) require.Contains(t, []p2pmsg.ControlMessageType{p2pmsg.CtrlMsgGraft, p2pmsg.CtrlMsgPrune, p2pmsg.CtrlMsgIHave}, notification.MsgType) - require.True(t, validation.IsDuplicateTopicErr(notification.Errors[0].Err)) + require.True(t, validation.IsDuplicateTopicErr(notification.Error)) }) inspector.Start(signalerCtx) @@ -363,17 +348,9 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { require.NoError(t, inspector.Inspect(from, duplicateTopicGraftsRpc)) require.NoError(t, inspector.Inspect(from, duplicateTopicPrunesRpc)) require.NoError(t, inspector.Inspect(from, duplicateTopicIHavesRpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > expectedNotifications - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("inspectGraftMessages should disseminate invalid control message notification for invalid graft messages as expected", func(t *testing.T) { @@ -381,8 +358,7 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { // create unknown topic unknownTopic, malformedTopic, invalidSporkIDTopic := invalidTopics(t, sporkID) // avoid unknown topics errors - allTopics := []string{unknownTopic, malformedTopic, invalidSporkIDTopic} - topicProviderOracle.UpdateTopics(allTopics) + topicProviderOracle.UpdateTopics([]string{unknownTopic, malformedTopic, invalidSporkIDTopic}) unknownTopicGraft := unittest.P2PRPCGraftFixture(&unknownTopic) malformedTopicGraft := unittest.P2PRPCGraftFixture(&malformedTopic) invalidSporkIDTopicGraft := unittest.P2PRPCGraftFixture(&invalidSporkIDTopic) @@ -392,26 +368,17 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { invalidSporkIDTopicReq := unittest.P2PRPCFixture(unittest.WithGrafts(invalidSporkIDTopicGraft)) from := unittest.PeerIdFixture(t) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.CtrlMsgGraft}, channels.IsInvalidTopicErr, p2p.CtrlMsgNonClusterTopicType, allTopics, nil) - expectedNotifications := 3 - distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(expectedNotifications).Run(checkNotification) + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgGraft, channels.IsInvalidTopicErr, p2p.CtrlMsgNonClusterTopicType) + distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(3).Run(checkNotification) inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, unknownTopicReq)) require.NoError(t, inspector.Inspect(from, malformedTopicReq)) require.NoError(t, inspector.Inspect(from, invalidSporkIDTopicReq)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > expectedNotifications - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("inspectPruneMessages should disseminate invalid control message notification for invalid prune messages as expected", func(t *testing.T) { @@ -422,34 +389,23 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { malformedTopicPrune := unittest.P2PRPCPruneFixture(&malformedTopic) invalidSporkIDTopicPrune := unittest.P2PRPCPruneFixture(&invalidSporkIDTopic) // avoid unknown topics errors - allTopics := []string{unknownTopic, malformedTopic, invalidSporkIDTopic} - topicProviderOracle.UpdateTopics(allTopics) + topicProviderOracle.UpdateTopics([]string{unknownTopic, malformedTopic, invalidSporkIDTopic}) unknownTopicRpc := unittest.P2PRPCFixture(unittest.WithPrunes(unknownTopicPrune)) malformedTopicRpc := unittest.P2PRPCFixture(unittest.WithPrunes(malformedTopicPrune)) invalidSporkIDTopicRpc := unittest.P2PRPCFixture(unittest.WithPrunes(invalidSporkIDTopicPrune)) from := unittest.PeerIdFixture(t) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.CtrlMsgPrune}, channels.IsInvalidTopicErr, p2p.CtrlMsgNonClusterTopicType, allTopics, nil) - expectedNotifications := 3 - distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(expectedNotifications).Run(checkNotification) + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgPrune, channels.IsInvalidTopicErr, p2p.CtrlMsgNonClusterTopicType) + distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(3).Run(checkNotification) inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, unknownTopicRpc)) require.NoError(t, inspector.Inspect(from, malformedTopicRpc)) require.NoError(t, inspector.Inspect(from, invalidSporkIDTopicRpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > expectedNotifications - }, time.Second, 100*time.Millisecond) - - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("inspectIHaveMessages should disseminate invalid control message notification for iHave messages with invalid topics as expected", func(t *testing.T) { @@ -457,8 +413,7 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { // create unknown topic unknownTopic, malformedTopic, invalidSporkIDTopic := invalidTopics(t, sporkID) // avoid unknown topics errors - allTopics := []string{unknownTopic, malformedTopic, invalidSporkIDTopic} - topicProviderOracle.UpdateTopics(allTopics) + topicProviderOracle.UpdateTopics([]string{unknownTopic, malformedTopic, invalidSporkIDTopic}) unknownTopicIhave := unittest.P2PRPCIHaveFixture(&unknownTopic, unittest.IdentifierListFixture(5).Strings()...) malformedTopicIhave := unittest.P2PRPCIHaveFixture(&malformedTopic, unittest.IdentifierListFixture(5).Strings()...) invalidSporkIDTopicIhave := unittest.P2PRPCIHaveFixture(&invalidSporkIDTopic, unittest.IdentifierListFixture(5).Strings()...) @@ -468,56 +423,36 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { invalidSporkIDTopicRpc := unittest.P2PRPCFixture(unittest.WithIHaves(invalidSporkIDTopicIhave)) from := unittest.PeerIdFixture(t) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.CtrlMsgIHave}, channels.IsInvalidTopicErr, p2p.CtrlMsgNonClusterTopicType, allTopics, nil) - expectedNotifications := 3 - distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(expectedNotifications).Run(checkNotification) + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgIHave, channels.IsInvalidTopicErr, p2p.CtrlMsgNonClusterTopicType) + distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(3).Run(checkNotification) inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, unknownTopicRpc)) require.NoError(t, inspector.Inspect(from, malformedTopicRpc)) require.NoError(t, inspector.Inspect(from, invalidSporkIDTopicRpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > expectedNotifications - }, time.Second, 100*time.Millisecond) - - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("inspectIHaveMessages should disseminate invalid control message notification for iHave messages with duplicate message ids as expected", func(t *testing.T) { inspector, signalerCtx, cancel, distributor, _, sporkID, _, topicProviderOracle := inspectorFixture(t) validTopic := fmt.Sprintf("%s/%s", channels.PushBlocks.String(), sporkID) // avoid unknown topics errors - allTopics := []string{validTopic} - topicProviderOracle.UpdateTopics(allTopics) + topicProviderOracle.UpdateTopics([]string{validTopic}) duplicateMsgID := unittest.IdentifierFixture() msgIds := flow.IdentifierList{duplicateMsgID, duplicateMsgID, duplicateMsgID} duplicateMsgIDIHave := unittest.P2PRPCIHaveFixture(&validTopic, append(msgIds, unittest.IdentifierListFixture(5)...).Strings()...) duplicateMsgIDRpc := unittest.P2PRPCFixture(unittest.WithIHaves(duplicateMsgIDIHave)) from := unittest.PeerIdFixture(t) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.CtrlMsgIHave}, validation.IsDuplicateMessageIDErr, p2p.CtrlMsgNonClusterTopicType, nil, msgIds.Strings()) + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgIHave, validation.IsDuplicateTopicErr, p2p.CtrlMsgNonClusterTopicType) distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, duplicateMsgIDRpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > 1 - }, time.Second, 100*time.Millisecond) - - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("inspectIWantMessages should disseminate invalid control message notification for iWant messages when duplicate message ids exceeds the allowed threshold", func(t *testing.T) { @@ -531,7 +466,7 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { duplicateMsgIDRpc := unittest.P2PRPCFixture(unittest.WithIWants(duplicateMsgIDIWant)) from := unittest.PeerIdFixture(t) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.CtrlMsgIWant}, validation.IsIWantDuplicateMsgIDThresholdErr, p2p.CtrlMsgNonClusterTopicType, nil, msgIds) + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgIWant, validation.IsIWantDuplicateMsgIDThresholdErr, p2p.CtrlMsgNonClusterTopicType) distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Run(func(args mock.Arguments) { @@ -543,17 +478,9 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, duplicateMsgIDRpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > 1 - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("inspectIWantMessages should disseminate invalid control message notification for iWant messages when cache misses exceeds allowed threshold", func(t *testing.T) { @@ -567,7 +494,7 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { inspectMsgRpc := unittest.P2PRPCFixture(unittest.WithIWants(unittest.P2PRPCIWantFixtures(cacheMissCheckSize+1, 100)...)) from := unittest.PeerIdFixture(t) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.CtrlMsgIWant}, validation.IsIWantCacheMissThresholdErr, p2p.CtrlMsgNonClusterTopicType, nil, nil) + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgIWant, validation.IsIWantCacheMissThresholdErr, p2p.CtrlMsgNonClusterTopicType) distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() // return false each time to eventually force a notification to be disseminated when the cache miss count finally exceeds the 90% threshold @@ -588,54 +515,41 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > 1 - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) - t.Run("inspectIWantMessages should not disseminate invalid control message notification for iWant messages when cache misses exceeds allowed threshold if cache miss check size not exceeded", func(t *testing.T) { - inspector, signalerCtx, cancel, distributor, rpcTracker, _, _, _ := inspectorFixture(t, func(params *validation.InspectorParams) { - // if size of iwants not greater than 10 cache misses will not be checked - params.Config.IWant.CacheMissCheckSize = 10 - // set high cache miss threshold to ensure we only disseminate notification when it is exceeded - params.Config.IWant.CacheMissThreshold = .9 - }) - // oracle must be set even though iWant messages do not have topic IDs - defer distributor.AssertNotCalled(t, "Distribute") + t.Run("inspectIWantMessages should not disseminate invalid control message notification for iWant messages when cache misses exceeds allowed threshold if cache miss check size not exceeded", + func(t *testing.T) { + inspector, signalerCtx, cancel, distributor, rpcTracker, _, _, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + // if size of iwants not greater than 10 cache misses will not be checked + params.Config.IWant.CacheMissCheckSize = 10 + // set high cache miss threshold to ensure we only disseminate notification when it is exceeded + params.Config.IWant.CacheMissThreshold = .9 + }) + // oracle must be set even though iWant messages do not have topic IDs + defer distributor.AssertNotCalled(t, "Distribute") + + msgIds := unittest.IdentifierListFixture(100).Strings() + inspectMsgRpc := unittest.P2PRPCFixture(unittest.WithIWants(unittest.P2PRPCIWantFixture(msgIds...))) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + // return false each time to eventually force a notification to be disseminated when the cache miss count finally exceeds the 90% threshold + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(false).Run(func(args mock.Arguments) { + id, ok := args[0].(string) + require.True(t, ok) + require.Contains(t, msgIds, id) + }) + + from := unittest.PeerIdFixture(t) + inspector.Start(signalerCtx) - msgIds := unittest.IdentifierListFixture(100).Strings() - inspectMsgRpc := unittest.P2PRPCFixture(unittest.WithIWants(unittest.P2PRPCIWantFixture(msgIds...))) - rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() - // return false each time to eventually force a notification to be disseminated when the cache miss count finally exceeds the 90% threshold - rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(false).Run(func(args mock.Arguments) { - id, ok := args[0].(string) - require.True(t, ok) - require.Contains(t, msgIds, id) + require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) - from := unittest.PeerIdFixture(t) - inspector.Start(signalerCtx) - - require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) - require.Never(t, func() bool { - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - return true - } - } - return false - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") - }) t.Run("inspectRpcPublishMessages should disseminate invalid control message notification when invalid pubsub messages count greater than configured RpcMessageErrorThreshold", func(t *testing.T) { errThreshold := 500 inspector, signalerCtx, cancel, distributor, _, sporkID, _, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { @@ -666,48 +580,31 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { // set topic oracle to return list of topics to avoid hasSubscription errors and force topic validation topicProviderOracle.UpdateTopics(topics) from := unittest.PeerIdFixture(t) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.RpcPublishMessage}, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType, nil, nil) + checkNotification := checkNotificationFunc(t, from, p2pmsg.RpcPublishMessage, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType) distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, rpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > 1 - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("inspectRpcPublishMessages should disseminate invalid control message notification when subscription missing for topic", func(t *testing.T) { errThreshold := 500 inspector, signalerCtx, cancel, distributor, _, sporkID, _, _ := inspectorFixture(t, func(params *validation.InspectorParams) { params.Config.MessageErrorThreshold = errThreshold }) - topic := fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID) - pubsubMsgs := unittest.GossipSubMessageFixtures(errThreshold+1, topic) + pubsubMsgs := unittest.GossipSubMessageFixtures(errThreshold+1, fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID)) from := unittest.PeerIdFixture(t) rpc := unittest.P2PRPCFixture(unittest.WithPubsubMessages(pubsubMsgs...)) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.RpcPublishMessage}, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType, nil, nil) + checkNotification := checkNotificationFunc(t, from, p2pmsg.RpcPublishMessage, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType) distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, rpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > 1 - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("inspectRpcPublishMessages should disseminate invalid control message notification when publish messages contain no topic", func(t *testing.T) { @@ -724,24 +621,16 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { } // set topic oracle to return list of topics excluding first topic sent from := unittest.PeerIdFixture(t) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.RpcPublishMessage}, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType, topics, nil) + checkNotification := checkNotificationFunc(t, from, p2pmsg.RpcPublishMessage, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType) distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, rpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > 1 - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("inspectRpcPublishMessages should not inspect pubsub message sender on public networks", func(t *testing.T) { - inspector, signalerCtx, cancel, distributor, _, sporkID, idProvider, topicProviderOracle := inspectorFixture(t) + inspector, signalerCtx, cancel, _, _, sporkID, idProvider, topicProviderOracle := inspectorFixture(t) from := unittest.PeerIdFixture(t) defer idProvider.AssertNotCalled(t, "ByPeerID", from) topic := fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID) @@ -750,16 +639,9 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { rpc := unittest.P2PRPCFixture(unittest.WithPubsubMessages(pubsubMsgs...)) inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, rpc)) - require.Never(t, func() bool { - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - return true - } - } - return false - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("inspectRpcPublishMessages should disseminate invalid control message notification when message is from unstaked peer", func(t *testing.T) { inspector, signalerCtx, cancel, distributor, _, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { @@ -773,21 +655,13 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { pubsubMsgs := unittest.GossipSubMessageFixtures(501, topic, unittest.WithFrom(from)) idProvider.On("ByPeerID", from).Return(nil, false).Times(501) rpc := unittest.P2PRPCFixture(unittest.WithPubsubMessages(pubsubMsgs...)) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.RpcPublishMessage}, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType, nil, nil) + checkNotification := checkNotificationFunc(t, from, p2pmsg.RpcPublishMessage, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType) distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, rpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > 1 - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("inspectRpcPublishMessages should disseminate invalid control message notification when message is from ejected peer", func(t *testing.T) { inspector, signalerCtx, cancel, distributor, _, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { @@ -802,106 +676,13 @@ func TestControlMessageValidationInspector_processInspectRPCReq(t *testing.T) { pubsubMsgs := unittest.GossipSubMessageFixtures(501, topic, unittest.WithFrom(from)) idProvider.On("ByPeerID", from).Return(id, true).Times(501) rpc := unittest.P2PRPCFixture(unittest.WithPubsubMessages(pubsubMsgs...)) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.RpcPublishMessage}, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType, nil, nil) + checkNotification := checkNotificationFunc(t, from, p2pmsg.RpcPublishMessage, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType) distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, rpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > 1 - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") - }) - t.Run("rpc inspection should disseminate invalid control message notification with multiple errors when a control message contains multiple errors", func(t *testing.T) { - inspector, signalerCtx, cancel, distributor, rpcTracker, sporkID, _, topicProviderOracle := inspectorFixture(t) - - // create unknown topic - unknownTopic, malformedTopic, invalidSporkIDTopic := invalidTopics(t, sporkID) - validTopic := fmt.Sprintf("%s/%s", channels.PushBlocks, sporkID) - // avoid unknown topics errors - topicProviderOracle.UpdateTopics([]string{unknownTopic, malformedTopic, invalidSporkIDTopic, validTopic}) - duplicateMessageID := unittest.GenerateRandomStringWithLen(10) - invalidGrafts := []*pubsub_pb.ControlGraft{unittest.P2PRPCGraftFixture(&unknownTopic), unittest.P2PRPCGraftFixture(&malformedTopic), unittest.P2PRPCGraftFixture(&invalidSporkIDTopic)} - invalidPrunes := []*pubsub_pb.ControlPrune{unittest.P2PRPCPruneFixture(&unknownTopic), unittest.P2PRPCPruneFixture(&malformedTopic), unittest.P2PRPCPruneFixture(&invalidSporkIDTopic)} - invalidIHaves := []*pubsub_pb.ControlIHave{ - unittest.P2PRPCIHaveFixture(&unknownTopic, unittest.GenerateRandomStringsWithLen(10, 10)...), - unittest.P2PRPCIHaveFixture(&malformedTopic, unittest.GenerateRandomStringsWithLen(10, 10)...), - unittest.P2PRPCIHaveFixture(&invalidSporkIDTopic, unittest.GenerateRandomStringsWithLen(10, 10)...), - unittest.P2PRPCIHaveFixture(&validTopic, duplicateMessageID, duplicateMessageID)} - invalidIWants := []*pubsub_pb.ControlIWant{unittest.P2PRPCIWantFixture(duplicateMessageID, duplicateMessageID)} - - graftsReq := unittest.P2PRPCFixture(unittest.WithGrafts(invalidGrafts...)) - prunesReq := unittest.P2PRPCFixture(unittest.WithPrunes(invalidPrunes...)) - ihavesReq := unittest.P2PRPCFixture(unittest.WithIHaves(invalidIHaves...)) - iwantsReq := unittest.P2PRPCFixture(unittest.WithIWants(invalidIWants...)) - - from := unittest.PeerIdFixture(t) - ensureIsInvalidTopicErr := func(errors p2p.InvCtrlMsgErrs) { - for _, err := range errors { - require.True(t, channels.IsInvalidTopicErr(err.Err)) - } - } - expectedNotifications := 4 - distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(expectedNotifications).Run(func(args mock.Arguments) { - notification, ok := args[0].(*p2p.InvCtrlMsgNotif) - require.True(t, ok) - require.Equal(t, from, notification.PeerID) - switch notification.MsgType { - case p2pmsg.CtrlMsgGraft: - require.Equal(t, len(invalidGrafts), notification.Errors.Len()) - ensureIsInvalidTopicErr(notification.Errors) - case p2pmsg.CtrlMsgPrune: - require.Equal(t, len(invalidPrunes), notification.Errors.Len()) - ensureIsInvalidTopicErr(notification.Errors) - case p2pmsg.CtrlMsgIHave: - require.Equal(t, len(invalidIHaves), notification.Errors.Len()) - for _, err := range notification.Errors { - switch { - case channels.IsInvalidTopicErr(err.Err): - continue - case validation.IsDuplicateMessageIDErr(err.Err): - continue - default: - require.Fail(t, fmt.Sprintf("unexpected error encountered %s for iHave control message", err.Err)) - } - } - case p2pmsg.CtrlMsgIWant: - require.Equal(t, len(invalidIWants), notification.Errors.Len()) - require.True(t, validation.IsIWantDuplicateMsgIDThresholdErr(notification.Errors[0].Err)) - default: - require.Fail(t, fmt.Sprintf("unexpected control message type: %s", notification.MsgType)) - } - }) - rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() - rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Run(func(args mock.Arguments) { - id, ok := args[0].(string) - require.True(t, ok) - require.Contains(t, duplicateMessageID, id) - }) - inspector.Start(signalerCtx) - unittest.RequireCloseBefore(t, inspector.Ready(), 1*time.Second, "inspector did not start in time") - - require.NoError(t, inspector.Inspect(from, graftsReq)) - require.NoError(t, inspector.Inspect(from, prunesReq)) - require.NoError(t, inspector.Inspect(from, ihavesReq)) - require.NoError(t, inspector.Inspect(from, iwantsReq)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > expectedNotifications - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) } @@ -919,16 +700,9 @@ func TestNewControlMsgValidationInspector_validateClusterPrefixedTopic(t *testin inspector.ActiveClustersChanged(flow.ChainIDList{clusterID, flow.ChainID(unittest.IdentifierFixture().String()), flow.ChainID(unittest.IdentifierFixture().String())}) inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) - require.Never(t, func() bool { - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - return true - } - } - return false - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("validateClusterPrefixedTopic should not return error if cluster prefixed hard threshold not exceeded for unknown cluster ids", func(t *testing.T) { @@ -945,51 +719,27 @@ func TestNewControlMsgValidationInspector_validateClusterPrefixedTopic(t *testin idProvider.On("ByPeerID", from).Return(id, true).Once() inspector.Start(signalerCtx) require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) - require.Never(t, func() bool { - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - return true - } - } - return false - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) + stopInspector(t, cancel, inspector) }) t.Run("validateClusterPrefixedTopic should return an error when sender is unstaked", func(t *testing.T) { inspector, signalerCtx, cancel, distributor, _, sporkID, idProvider, topicProviderOracle := inspectorFixture(t) + defer distributor.AssertNotCalled(t, "Distribute") clusterID := flow.ChainID(unittest.IdentifierFixture().String()) clusterPrefixedTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.SyncCluster(clusterID), sporkID)).String() topicProviderOracle.UpdateTopics([]string{clusterPrefixedTopic}) from := unittest.PeerIdFixture(t) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.CtrlMsgGraft, p2pmsg.CtrlMsgPrune, p2pmsg.CtrlMsgIHave}, validation.IsErrUnstakedPeer, p2p.CtrlMsgTopicTypeClusterPrefixed, []string{clusterPrefixedTopic}, nil) - expectedNotifications := 3 - distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(expectedNotifications).Run(checkNotification) - idProvider.On("ByPeerID", from).Return(nil, false).Times(3) - grafts := unittest.P2PRPCGraftFixtures(clusterPrefixedTopic) - prunes := unittest.P2PRPCPruneFixtures(clusterPrefixedTopic) - ihaves := unittest.P2PRPCIHaveFixtures(50, clusterPrefixedTopic) - graftRpc := unittest.P2PRPCFixture(unittest.WithGrafts(grafts...)) - pruneRpc := unittest.P2PRPCFixture(unittest.WithPrunes(prunes...)) - ihaveRpc := unittest.P2PRPCFixture(unittest.WithIHaves(ihaves...)) - + idProvider.On("ByPeerID", from).Return(nil, false).Once() + inspectMsgRpc := unittest.P2PRPCFixture(unittest.WithGrafts(unittest.P2PRPCGraftFixture(&clusterPrefixedTopic))) inspector.ActiveClustersChanged(flow.ChainIDList{flow.ChainID(unittest.IdentifierFixture().String())}) inspector.Start(signalerCtx) - require.NoError(t, inspector.Inspect(from, graftRpc)) - require.NoError(t, inspector.Inspect(from, pruneRpc)) - require.NoError(t, inspector.Inspect(from, ihaveRpc)) - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > expectedNotifications - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) t.Run("validateClusterPrefixedTopic should return error if cluster prefixed hard threshold exceeded for unknown cluster ids", func(t *testing.T) { @@ -1003,7 +753,7 @@ func TestNewControlMsgValidationInspector_validateClusterPrefixedTopic(t *testin from := unittest.PeerIdFixture(t) identity := unittest.IdentityFixture() idProvider.On("ByPeerID", from).Return(identity, true).Times(11) - checkNotification := checkNotificationFunc(t, from, []p2pmsg.ControlMessageType{p2pmsg.CtrlMsgGraft}, channels.IsUnknownClusterIDErr, p2p.CtrlMsgTopicTypeClusterPrefixed, []string{clusterPrefixedTopic}, nil) + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgGraft, channels.IsUnknownClusterIDErr, p2p.CtrlMsgTopicTypeClusterPrefixed) inspectMsgRpc := unittest.P2PRPCFixture(unittest.WithGrafts(unittest.P2PRPCGraftFixture(&clusterPrefixedTopic))) inspector.ActiveClustersChanged(flow.ChainIDList{flow.ChainID(unittest.IdentifierFixture().String())}) distributor.On("Distribute", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) @@ -1011,17 +761,9 @@ func TestNewControlMsgValidationInspector_validateClusterPrefixedTopic(t *testin for i := 0; i < 11; i++ { require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) } - require.Never(t, func() bool { - calls := 0 - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - calls++ - } - } - return calls > 1 - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) }) } @@ -1043,16 +785,9 @@ func TestControlMessageValidationInspector_ActiveClustersChanged(t *testing.T) { rpc := unittest.P2PRPCFixture(unittest.WithGrafts(unittest.P2PRPCGraftFixture(&topic))) require.NoError(t, inspector.Inspect(from, rpc)) } - require.Never(t, func() bool { - for _, call := range distributor.Calls { - if call.Method == "Distribute" { - return true - } - } - return false - }, time.Second, 100*time.Millisecond) - cancel() - unittest.RequireCloseBefore(t, inspector.Done(), 500*time.Millisecond, "inspector did not stop") + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + stopInspector(t, cancel, inspector) } // invalidTopics returns 3 invalid topics. @@ -1070,23 +805,18 @@ func invalidTopics(t *testing.T, sporkID flow.Identifier) (string, string, strin } // checkNotificationFunc returns util func used to ensure invalid control message notification disseminated contains expected information. -func checkNotificationFunc(t *testing.T, expectedPeerID peer.ID, expectedMsgType []p2pmsg.ControlMessageType, isExpectedErr func(err error) bool, topicType p2p.CtrlMsgTopicType, topics []string, messageIds []string) func(args mock.Arguments) { +func checkNotificationFunc(t *testing.T, + expectedPeerID peer.ID, + expectedMsgType p2pmsg.ControlMessageType, + isExpectedErr func(err error) bool, + topicType p2p.CtrlMsgTopicType) func(args mock.Arguments) { return func(args mock.Arguments) { notification, ok := args[0].(*p2p.InvCtrlMsgNotif) require.True(t, ok) - require.Equal(t, topicType, notification.Errors[0].CtrlMsgTopicType()) - for _, err := range notification.Errors { - if topics != nil { - require.Contains(t, topics, err.Topic()) - } - - if messageIds != nil { - require.Contains(t, messageIds, err.MessageID()) - } - } + require.Equal(t, topicType, notification.TopicType) require.Equal(t, expectedPeerID, notification.PeerID) - require.Contains(t, expectedMsgType, notification.MsgType) - require.True(t, isExpectedErr(notification.Errors[0].Err)) + require.Equal(t, expectedMsgType, notification.MsgType) + require.True(t, isExpectedErr(notification.Error)) } } @@ -1130,6 +860,7 @@ func inspectorFixture(t *testing.T, opts ...func(params *validation.InspectorPar return validationInspector, signalerCtx, cancel, distributor, rpcTracker, sporkID, idProvider, topicProviderOracle } -func defaultTopicOracle() []string { - return []string{} +func stopInspector(t *testing.T, cancel context.CancelFunc, inspector *validation.ControlMsgValidationInspector) { + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") } diff --git a/network/p2p/scoring/registry.go b/network/p2p/scoring/registry.go index b1a3262ccba..4b56de3754e 100644 --- a/network/p2p/scoring/registry.go +++ b/network/p2p/scoring/registry.go @@ -246,6 +246,7 @@ func (r *GossipSubAppSpecificScoreRegistry) AppSpecificScoreFunc() func(peer.ID) // - float64: the application specific score of the peer. func (r *GossipSubAppSpecificScoreRegistry) computeAppSpecificScore(pid peer.ID) float64 { appSpecificScore := float64(0) + lg := r.logger.With().Str("peer_id", p2plogging.PeerId(pid)).Logger() // (1) spam penalty: the penalty is applied to the application specific penalty when a peer conducts a spamming misbehaviour. spamRecord, err, spamRecordExists := r.spamScoreCache.Get(pid) @@ -291,6 +292,7 @@ func (r *GossipSubAppSpecificScoreRegistry) computeAppSpecificScore(pid peer.ID) lg.Trace(). Float64("total_app_specific_score", appSpecificScore). Msg("application specific score computed") + return appSpecificScore } @@ -368,7 +370,7 @@ func (r *GossipSubAppSpecificScoreRegistry) OnInvalidControlMessageNotification( // we use mutex to ensure the method is concurrency safe. lg := r.logger.With(). - Err(notification.Errors.Error()). + Err(notification.Error). Str("peer_id", p2plogging.PeerId(notification.PeerID)). Str("misbehavior_type", notification.MsgType.String()).Logger() @@ -379,45 +381,29 @@ func (r *GossipSubAppSpecificScoreRegistry) OnInvalidControlMessageNotification( } record, err := r.spamScoreCache.Adjust(notification.PeerID, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { - basePenalty := float64(0) + penalty := 0.0 switch notification.MsgType { case p2pmsg.CtrlMsgGraft: - basePenalty += r.penalty.GraftMisbehaviour + penalty += r.penalty.GraftMisbehaviour case p2pmsg.CtrlMsgPrune: - basePenalty += r.penalty.PruneMisbehaviour + penalty += r.penalty.PruneMisbehaviour case p2pmsg.CtrlMsgIHave: - basePenalty += r.penalty.IHaveMisbehaviour + penalty += r.penalty.IHaveMisbehaviour case p2pmsg.CtrlMsgIWant: - basePenalty += r.penalty.IWantMisbehaviour + penalty += r.penalty.IWantMisbehaviour case p2pmsg.RpcPublishMessage: - basePenalty += r.penalty.PublishMisbehaviour + penalty += r.penalty.PublishMisbehaviour default: // the error is considered fatal as it means that we have an unsupported misbehaviour type, we should crash the node to prevent routing attack vulnerability. lg.Fatal().Str("misbehavior_type", notification.MsgType.String()).Msg("unknown misbehaviour type") } - // apply the base penalty to the application specific penalty. - appliedPenalty := float64(0) - for _, err := range notification.Errors { - errPenalty := basePenalty - // reduce penalty for cluster prefixed topics allowing nodes that are potentially behind to catch up - if err.CtrlMsgTopicType() == p2p.CtrlMsgTopicTypeClusterPrefixed { - errPenalty *= r.penalty.ClusterPrefixedReductionFactor - } - appliedPenalty += errPenalty + // reduce penalty for cluster prefixed topics allowing nodes that are potentially behind to catch up + if notification.TopicType == p2p.CtrlMsgTopicTypeClusterPrefixed { + penalty *= r.penalty.ClusterPrefixedReductionFactor } - record.Penalty += appliedPenalty - - lg.Debug(). - Err(notification.Errors.Error()). - Str("control_message_type", notification.MsgType.String()). - Bool(logging.KeySuspicious, true). - Bool(logging.KeyNetworkingSecurity, true). - Int("error_count", notification.Errors.Len()). - Float64("applied_penalty", appliedPenalty). - Float64("app_specific_score", record.Penalty). - Msg("processed invalid control message notification, applied misbehaviour penalty and updated application specific penalty ") + record.Penalty += penalty return record }) @@ -429,7 +415,6 @@ func (r *GossipSubAppSpecificScoreRegistry) OnInvalidControlMessageNotification( lg.Debug(). Float64("spam_record_penalty", record.Penalty). Msg("applied misbehaviour penalty and updated application specific penalty") - } // afterSilencePeriod returns true if registry silence period is over, false otherwise. diff --git a/network/p2p/scoring/registry_test.go b/network/p2p/scoring/registry_test.go index 74d76e8d788..ce5a522c17c 100644 --- a/network/p2p/scoring/registry_test.go +++ b/network/p2p/scoring/registry_test.go @@ -165,7 +165,6 @@ func testScoreRegistryPeerWithSpamRecord(t *testing.T, messageType p2pmsg.Contro reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: messageType, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType)}, }) // the penalty should now be updated in the spamRecords @@ -265,12 +264,13 @@ func testScoreRegistrySpamRecordWithUnknownIdentity(t *testing.T, messageType p2 return scoreOptParameters.UnknownIdentityPenalty == score }, 5*time.Second, 100*time.Millisecond) + // queryTime := time.Now() // report a misbehavior for the peer id. reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: messageType, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType)}, }) + // the penalty should now be updated. record, err, ok := spamRecords.Get(peerID) // get the record from the spamRecords. require.True(t, ok) @@ -376,8 +376,8 @@ func testScoreRegistrySpamRecordWithSubscriptionPenalty(t *testing.T, messageTyp reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: messageType, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType)}, }) + // the penalty should now be updated. record, err, ok := spamRecords.Get(peerID) // get the record from the spamRecords. assert.True(t, ok) @@ -420,7 +420,6 @@ func TestScoreRegistry_SpamPenaltyDecaysInCache(t *testing.T) { scoring.InitAppScoreRecordStateFunc(cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor), withStakedIdentities(peerID), withValidSubscriptions(peerID)) - ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) reg.Start(signalerCtx) @@ -432,7 +431,6 @@ func TestScoreRegistry_SpamPenaltyDecaysInCache(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: p2pmsg.CtrlMsgPrune, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType)}, }) time.Sleep(1 * time.Second) // wait for the penalty to decay. @@ -440,7 +438,6 @@ func TestScoreRegistry_SpamPenaltyDecaysInCache(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: p2pmsg.CtrlMsgGraft, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType)}, }) time.Sleep(1 * time.Second) // wait for the penalty to decay. @@ -448,7 +445,6 @@ func TestScoreRegistry_SpamPenaltyDecaysInCache(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: p2pmsg.CtrlMsgIHave, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType)}, }) time.Sleep(1 * time.Second) // wait for the penalty to decay. @@ -456,7 +452,6 @@ func TestScoreRegistry_SpamPenaltyDecaysInCache(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: p2pmsg.CtrlMsgIWant, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType)}, }) time.Sleep(1 * time.Second) // wait for the penalty to decay. @@ -464,7 +459,6 @@ func TestScoreRegistry_SpamPenaltyDecaysInCache(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: p2pmsg.RpcPublishMessage, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType)}, }) time.Sleep(1 * time.Second) // wait for the penalty to decay. @@ -529,7 +523,6 @@ func TestScoreRegistry_SpamPenaltyDecayToZero(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: p2pmsg.CtrlMsgGraft, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType)}, }) // decays happen every second, so we wait for 1 second to make sure the penalty is updated. @@ -604,7 +597,6 @@ func TestScoreRegistry_PersistingUnknownIdentityPenalty(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: p2pmsg.CtrlMsgGraft, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType)}, }) // decays happen every second, so we wait for 1 second to make sure the penalty is updated. @@ -680,8 +672,8 @@ func TestScoreRegistry_PersistingInvalidSubscriptionPenalty(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: p2pmsg.CtrlMsgGraft, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType)}, }) + // with reported spam, the app specific score should be the default invalid subscription penalty + the spam penalty. require.Eventually(t, func() bool { score := reg.AppSpecificScoreFunc()(peerID) @@ -755,15 +747,9 @@ func TestScoreRegistry_TestSpamRecordDecayAdjustment(t *testing.T) { prevDecay := scoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor tolerance := 0.1 require.Eventually(t, func() bool { - errCount := 500 - errs := make(p2p.InvCtrlMsgErrs, errCount) - for i := 0; i < errCount; i++ { - errs[i] = p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid prune"), p2p.CtrlMsgNonClusterTopicType) - } reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peer1, MsgType: p2pmsg.CtrlMsgPrune, - Errors: errs, }) record, err, ok := spamRecords.Get(peer1) require.NoError(t, err) @@ -777,7 +763,6 @@ func TestScoreRegistry_TestSpamRecordDecayAdjustment(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peer2, MsgType: p2pmsg.CtrlMsgPrune, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid prune"), p2p.CtrlMsgNonClusterTopicType)}, }) // reduce penalty and increase Decay to scoring.MinimumSpamPenaltyDecayFactor record, err := spamRecords.Adjust(peer2, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { @@ -806,7 +791,6 @@ func TestScoreRegistry_TestSpamRecordDecayAdjustment(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peer2, MsgType: p2pmsg.CtrlMsgPrune, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid prune"), p2p.CtrlMsgNonClusterTopicType)}, }) record, err, ok := spamRecords.Get(peer1) require.NoError(t, err) @@ -871,17 +855,17 @@ func TestPeerSpamPenaltyClusterPrefixed(t *testing.T) { go func() { defer wg.Done() reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ - PeerID: peerID, - MsgType: ctlMsgType, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf(fmt.Sprintf("invalid %s", ctlMsgType)), p2p.CtrlMsgNonClusterTopicType)}, + PeerID: peerID, + MsgType: ctlMsgType, + TopicType: p2p.CtrlMsgNonClusterTopicType, }) }() go func() { defer wg.Done() reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ - PeerID: peerID, - MsgType: ctlMsgType, - Errors: p2p.InvCtrlMsgErrs{p2p.NewInvCtrlMsgErr(fmt.Errorf(fmt.Sprintf("invalid %s", ctlMsgType)), p2p.CtrlMsgTopicTypeClusterPrefixed)}, + PeerID: peerID, + MsgType: ctlMsgType, + TopicType: p2p.CtrlMsgTopicTypeClusterPrefixed, }) }() unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") @@ -911,145 +895,6 @@ func TestPeerSpamPenaltyClusterPrefixed(t *testing.T) { unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") } -// TestInvalidControlMessageMultiErrorScoreCalculation tests that invalid control message penalties are calculated as expected when notifications -// contain multiple errors with multiple different severity levels. -func TestInvalidControlMessageMultiErrorScoreCalculation(t *testing.T) { - peerIds := unittest.PeerIdFixtures(t, 5) - cfg, err := config.DefaultConfig() - require.NoError(t, err) - // refresh cached app-specific score every 100 milliseconds to speed up the test. - cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 100 * time.Millisecond - maximumSpamPenaltyDecayFactor := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor - reg, spamRecords, _ := newGossipSubAppSpecificScoreRegistry(t, - cfg.NetworkConfig.GossipSub.ScoringParameters, - scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor), - withStakedIdentities(peerIds...), - withValidSubscriptions(peerIds...)) - - // starts the registry. - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - reg.Start(signalerCtx) - unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "failed to start GossipSubAppSpecificScoreRegistry") - - for _, peerID := range peerIds { - require.Eventually(t, func() bool { - // initially, the spamRecords should not have the peer id. - assert.False(t, spamRecords.Has(peerID)) - // since the peer id does not have a spam record, the app specific score should be the max app specific reward, which - // is the default reward for a staked peer that has valid subscriptions. - score := reg.AppSpecificScoreFunc()(peerID) - return score == cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.MaxAppSpecificReward - }, 5*time.Second, 500*time.Millisecond) - } - - type testCase struct { - notification *p2p.InvCtrlMsgNotif - expectedPenalty float64 - } - penaltyValues := penaltyValueFixtures() - testCases := []*testCase{ - // single error with, with random severity - { - notification: &p2p.InvCtrlMsgNotif{ - PeerID: peerIds[0], - MsgType: p2pmsg.CtrlMsgGraft, - Errors: p2p.InvCtrlMsgErrs{ - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType), - }, - }, - expectedPenalty: penaltyValues.GraftMisbehaviour, - }, - // multiple errors with, with same severity - { - notification: &p2p.InvCtrlMsgNotif{ - PeerID: peerIds[1], - MsgType: p2pmsg.CtrlMsgPrune, - Errors: p2p.InvCtrlMsgErrs{ - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid prune"), p2p.CtrlMsgNonClusterTopicType), - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid prune"), p2p.CtrlMsgNonClusterTopicType), - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid prune"), p2p.CtrlMsgNonClusterTopicType), - }, - }, - expectedPenalty: penaltyValues.PruneMisbehaviour * 3, - }, - // multiple errors with, with random severity's - { - notification: &p2p.InvCtrlMsgNotif{ - PeerID: peerIds[2], - MsgType: p2pmsg.CtrlMsgIHave, - Errors: p2p.InvCtrlMsgErrs{ - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid ihave"), p2p.CtrlMsgNonClusterTopicType), - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid ihave"), p2p.CtrlMsgNonClusterTopicType), - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid ihave"), p2p.CtrlMsgNonClusterTopicType), - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid ihave"), p2p.CtrlMsgNonClusterTopicType), - }, - }, - expectedPenalty: penaltyValues.IHaveMisbehaviour + - penaltyValues.IHaveMisbehaviour + - penaltyValues.IHaveMisbehaviour + - penaltyValues.IHaveMisbehaviour, - }, - // multiple errors with, with random severity's iwant - { - notification: &p2p.InvCtrlMsgNotif{ - PeerID: peerIds[3], - MsgType: p2pmsg.CtrlMsgIWant, - Errors: p2p.InvCtrlMsgErrs{ - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid iwant"), p2p.CtrlMsgNonClusterTopicType), - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid iwant"), p2p.CtrlMsgNonClusterTopicType), - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid iwant"), p2p.CtrlMsgNonClusterTopicType), - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid iwant"), p2p.CtrlMsgNonClusterTopicType), - }, - }, - expectedPenalty: penaltyValues.IWantMisbehaviour + - penaltyValues.IWantMisbehaviour + - penaltyValues.IWantMisbehaviour + - penaltyValues.IWantMisbehaviour, - }, - // multiple errors with mixed cluster prefixed and non cluster prefixed, with random severity's iwant - { - notification: &p2p.InvCtrlMsgNotif{ - PeerID: peerIds[4], - MsgType: p2pmsg.CtrlMsgIWant, - Errors: p2p.InvCtrlMsgErrs{ - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid iwant"), p2p.CtrlMsgNonClusterTopicType), - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid iwant"), p2p.CtrlMsgNonClusterTopicType), - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid iwant"), p2p.CtrlMsgTopicTypeClusterPrefixed), - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid iwant"), p2p.CtrlMsgTopicTypeClusterPrefixed), - }, - }, - expectedPenalty: penaltyValues.IWantMisbehaviour + - penaltyValues.IWantMisbehaviour + - (penaltyValues.IWantMisbehaviour * penaltyValues.ClusterPrefixedReductionFactor) + - (penaltyValues.IWantMisbehaviour * penaltyValues.ClusterPrefixedReductionFactor), - }, - } - - for _, tCase := range testCases { - // report a misbehavior for the peer id. - reg.OnInvalidControlMessageNotification(tCase.notification) - // the penalty should now be updated in the spamRecords - record, err, ok := spamRecords.Get(tCase.notification.PeerID) // get the record from the spamRecords. - require.True(t, ok) - require.NoError(t, err) - unittest.RequireNumericallyClose(t, tCase.expectedPenalty, record.Penalty, 10e-3) - require.Equal(t, scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor)().Decay, record.Decay) // decay should be initialized to the initial state. - - require.Eventually(t, func() bool { - // this peer has a spam record, with no subscription penalty. Hence, the app specific score should only be the spam penalty, - // and the peer should be deprived of the default reward for its valid staked role. - score := reg.AppSpecificScoreFunc()(tCase.notification.PeerID) - tolerance := 10e-2 - return math.Abs(tCase.expectedPenalty-score)/tCase.expectedPenalty < tolerance - }, 5*time.Second, 500*time.Millisecond) - } - - // stop the registry. - cancel() - unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") -} - // TestScoringRegistrySilencePeriod ensures that the scoring registry does not penalize nodes during the silence period, and // starts to penalize nodes only after the silence period is over. func TestScoringRegistrySilencePeriod(t *testing.T) { @@ -1103,9 +948,6 @@ func TestScoringRegistrySilencePeriod(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: p2pmsg.CtrlMsgGraft, - Errors: p2p.InvCtrlMsgErrs{ - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType), - }, }) expectedNumOfSilencedNotif++ // spam records should not be created during the silence period @@ -1130,16 +972,13 @@ func TestScoringRegistrySilencePeriod(t *testing.T) { reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: p2pmsg.CtrlMsgGraft, - Errors: p2p.InvCtrlMsgErrs{ - p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid graft"), p2p.CtrlMsgNonClusterTopicType), - }, }) // the penalty should now be applied and spam records created. record, err, ok := spamRecords.Get(peerID) assert.True(t, ok) assert.NoError(t, err) expectedPenalty := penaltyValueFixtures().GraftMisbehaviour - unittest.RequireNumericallyClose(t, expectedPenalty, record.Penalty, 10e-3) + assert.Less(t, math.Abs(expectedPenalty-record.Penalty), 10e-3) assert.Equal(t, scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor)().Decay, record.Decay) // decay should be initialized to the initial state. require.Eventually(t, func() bool { diff --git a/network/p2p/scoring/scoring_test.go b/network/p2p/scoring/scoring_test.go index 793d8c6dec5..cee819d3c85 100644 --- a/network/p2p/scoring/scoring_test.go +++ b/network/p2p/scoring/scoring_test.go @@ -144,13 +144,14 @@ func TestInvalidCtrlMsgScoringIntegration(t *testing.T) { p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { return unittest.ProposalFixture() }) + // simulates node2 spamming node1 with invalid gossipsub control messages until node2 gets dissallow listed. // since the decay will start lower than .99 and will only be incremented by default .01, we need to spam a lot of messages so that the node gets disallow listed for i := 0; i < 750; i++ { inspectorSuite1.consumer.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: node2.ID(), MsgType: p2pmsg.ControlMessageTypes()[rand.Intn(len(p2pmsg.ControlMessageTypes()))], - Errors: randomInvCtlMsgErrs(300), + Error: fmt.Errorf("invalid control message"), }) } @@ -170,16 +171,3 @@ func TestInvalidCtrlMsgScoringIntegration(t *testing.T) { return unittest.ProposalFixture() }) } - -// randomInvCtlMsgErrs returns n amount of invalid control message errors. Each error returned is set to p2p.CtrlMsgNonClusterTopicType by default. -// Args: -// - int: the number of errors to create. -// Returns: -// - p2p.InvCtrlMsgErrs: list of errors. -func randomInvCtlMsgErrs(n int) p2p.InvCtrlMsgErrs { - errs := make(p2p.InvCtrlMsgErrs, n) - for i := 0; i < n; i++ { - errs[i] = p2p.NewInvCtrlMsgErr(fmt.Errorf("invalid control message"), p2p.CtrlMsgNonClusterTopicType) - } - return errs -} diff --git a/network/p2p/test/fixtures.go b/network/p2p/test/fixtures.go index 85dcabee38d..50fb4cb80ca 100644 --- a/network/p2p/test/fixtures.go +++ b/network/p2p/test/fixtures.go @@ -120,31 +120,26 @@ func NodeFixture(t *testing.T, connManager, err := connection.NewConnManager(logger, parameters.MetricsCfg.Metrics, ¶meters.FlowConfig.NetworkConfig.ConnectionManager) require.NoError(t, err) - libP2PNodeBuilderParams := &p2pbuilder.LibP2PNodeBuilderConfig{ - Logger: logger, - MetricsConfig: parameters.MetricsCfg, - NetworkingType: parameters.NetworkingType, - Address: parameters.Address, - NetworkKey: parameters.Key, - SporkId: sporkID, - IdProvider: parameters.IdProvider, - ResourceManagerParams: ¶meters.FlowConfig.NetworkConfig.ResourceManager, - RpcInspectorParams: ¶meters.FlowConfig.NetworkConfig.GossipSub.RpcInspector, - PeerManagerParams: parameters.PeerManagerConfig, - SubscriptionProviderParams: ¶meters.FlowConfig.NetworkConfig.GossipSub.SubscriptionProvider, - DisallowListCacheCfg: &p2p.DisallowListCacheConfig{ + + builder := p2pbuilder.NewNodeBuilder( + logger, + ¶meters.FlowConfig.NetworkConfig.GossipSub, + parameters.MetricsCfg, + parameters.NetworkingType, + parameters.Address, + parameters.Key, + sporkID, + parameters.IdProvider, + ¶meters.FlowConfig.NetworkConfig.ResourceManager, + parameters.PeerManagerConfig, + &p2p.DisallowListCacheConfig{ MaxSize: uint32(1000), Metrics: metrics.NewNoopCollector(), }, - UnicastConfig: &p2pbuilderconfig.UnicastConfig{ + &p2pbuilderconfig.UnicastConfig{ Unicast: parameters.FlowConfig.NetworkConfig.Unicast, RateLimiterDistributor: parameters.UnicastRateLimiterDistributor, - }, - GossipSubCfg: ¶meters.FlowConfig.NetworkConfig.GossipSub, - } - builder, err := p2pbuilder.NewNodeBuilder(libP2PNodeBuilderParams) - require.NoError(t, err) - builder. + }). SetConnectionManager(connManager). SetResourceManager(parameters.ResourceManager) @@ -916,15 +911,6 @@ func GossipSubRpcFixture(t *testing.T, msgCnt int, opts ...GossipSubCtrlOption) type GossipSubCtrlOption func(*pb.ControlMessage) // GossipSubCtrlFixture returns a ControlMessage with the given options. -// -// Args: -// -// opts: variable number of GossipSubCtrlOption functions to configure the ControlMessage. -// -// Returns: -// A ControlMessage configured with the specified options. -// Example: GossipSubCtrlFixture(WithGraft(2, "exampleTopic"), WithPrune("topic1", "topic2")) will return a ControlMessage -// with configured GRAFT and PRUNE messages. func GossipSubCtrlFixture(opts ...GossipSubCtrlOption) *pb.ControlMessage { msg := &pb.ControlMessage{} for _, opt := range opts { @@ -934,16 +920,6 @@ func GossipSubCtrlFixture(opts ...GossipSubCtrlOption) *pb.ControlMessage { } // WithIHave adds iHave control messages of the given size and number to the control message. -// -// Args: -// -// msgCount: number of iHave messages to add. -// msgIDCount: number of message IDs to add to each iHave message. -// topicId: topic ID for the iHave messages. -// -// Returns: -// A GossipSubCtrlOption that adds iHave messages to the control message. -// Example: WithIHave(2, 3, "exampleTopic") will add 2 iHave messages, each with 3 message IDs and the specified topic ID. func WithIHave(msgCount, msgIDCount int, topicId string) GossipSubCtrlOption { return func(msg *pb.ControlMessage) { iHaves := make([]*pb.ControlIHave, msgCount) @@ -957,46 +933,6 @@ func WithIHave(msgCount, msgIDCount int, topicId string) GossipSubCtrlOption { } } -// WithIHaves adds iHave control messages for each topic ID with m number of message IDs. -// -// Args: -// -// m: number of message IDs to add to each iHave message. -// topicIds: variable number of topic IDs for which iHave messages will be added. -// -// Returns: -// A GossipSubCtrlOption that adds iHave messages to the control message. -// Example: WithIHaves(3, "topic1", "topic2") will add iHave messages for each specified topic ID, each with 3 message IDs. -func WithIHaves(m int, topicIds ...string) GossipSubCtrlOption { - return func(msg *pb.ControlMessage) { - iHaves := make([]*pb.ControlIHave, len(topicIds)) - for i, topicId := range topicIds { - topicId := topicId - iHaves[i] = &pb.ControlIHave{ - TopicID: &topicId, - MessageIDs: GossipSubMessageIdsFixture(m), - } - } - msg.Ihave = iHaves - } -} - -// WithIWantMessageIds adds an iWant control message to the list of iWants with the provided message ids. -// Args: -// -// msgIds: list of message ids. -// -// Returns: -// A GossipSubCtrlOption that adds iWant messages to the control message. -// Example: WithIWantMessageIds("message_id_1", "message_id_2") will add one iWant message with 2 message IDs. -func WithIWantMessageIds(messageIds ...string) GossipSubCtrlOption { - return func(msg *pb.ControlMessage) { - msg.Iwant = append(msg.Iwant, &pb.ControlIWant{ - MessageIDs: messageIds, - }) - } -} - // WithIWant adds iWant control messages of the given size and number to the control message. // The message IDs are generated randomly. // Args: @@ -1019,17 +955,7 @@ func WithIWant(iWantCount int, msgIdsPerIWant int) GossipSubCtrlOption { } } -// WithGraft adds Graft control messages to the control message. -// Graft messages are added based on the specified number and topic ID. -// -// Args: -// -// msgCount: number of Graft messages to add. -// topicId: topic ID for the Graft messages. -// -// Returns: -// A GossipSubCtrlOption that adds Graft messages to the control message. -// Example: WithGraft(2, "exampleTopic") will add 2 Graft messages, each with the specified topic ID. +// WithGraft adds GRAFT control messages with given topicID to the control message. func WithGraft(msgCount int, topicId string) GossipSubCtrlOption { return func(msg *pb.ControlMessage) { grafts := make([]*pb.ControlGraft, msgCount) @@ -1042,60 +968,7 @@ func WithGraft(msgCount int, topicId string) GossipSubCtrlOption { } } -// WithGrafts adds GRAFT control messages for each topic ID to the control message. -// -// Args: -// -// topicIds: variable number of topic IDs for which GRAFT messages will be added. -// -// Returns: -// A GossipSubCtrlOption that adds GRAFT messages to the control message. -// Example: WithGrafts("topic1", "topic2", "topic3") will add one GRAFT messages for each specified topic ID. -func WithGrafts(topicIds ...string) GossipSubCtrlOption { - return func(msg *pb.ControlMessage) { - grafts := make([]*pb.ControlGraft, len(topicIds)) - for i, topicId := range topicIds { - topicId := topicId - grafts[i] = &pb.ControlGraft{ - TopicID: &topicId, - } - } - msg.Graft = grafts - } -} - -// WithPrunes adds PRUNE control messages for each topic ID to the control message. -// -// Args: -// -// topicIds: variable number of topic IDs for which PRUNE messages will be added. -// -// Returns: -// A GossipSubCtrlOption that adds PRUNE messages to the control message. -// Example: WithPrunes("topic1", "topic2", "topic3") will add one PRUNE messages for each specified topic ID. -func WithPrunes(topicIds ...string) GossipSubCtrlOption { - return func(msg *pb.ControlMessage) { - prunes := make([]*pb.ControlPrune, len(topicIds)) - for i, topicId := range topicIds { - topicId := topicId - prunes[i] = &pb.ControlPrune{ - TopicID: &topicId, - } - } - msg.Prune = prunes - } -} - -// WithPrune adds PRUNE control messages with the given topic ID to the control message. -// -// Args: -// -// msgCount: number of PRUNE messages to add. -// topicId: topic ID for the PRUNE messages. -// -// Returns: -// A GossipSubCtrlOption that adds PRUNE messages to the control message. -// Example: WithPrune(2, "exampleTopic") will add 2 PRUNE messages, each with the specified topic ID. +// WithPrune adds PRUNE control messages with given topicID to the control message. func WithPrune(msgCount int, topicId string) GossipSubCtrlOption { return func(msg *pb.ControlMessage) { prunes := make([]*pb.ControlPrune, msgCount) diff --git a/utils/unittest/unittest.go b/utils/unittest/unittest.go index 403bd170121..9fba23ccd69 100644 --- a/utils/unittest/unittest.go +++ b/utils/unittest/unittest.go @@ -453,15 +453,6 @@ func GenerateRandomStringWithLen(commentLen uint) string { return string(bytes) } -// GenerateRandomStringsWithLen returns a list of random strings of the provided length. -func GenerateRandomStringsWithLen(n, commentLen uint) []string { - strs := make([]string, n) - for i := uint(0); i < n; i++ { - strs[i] = GenerateRandomStringWithLen(commentLen) - } - return strs -} - // PeerIdFixture creates a random and unique peer ID (libp2p node ID). func PeerIdFixture(tb testing.TB) peer.ID { peerID, err := peerIDFixture()