From 973234dd090b342b04dde39c8b43c5160e67d5a4 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Tue, 7 Jan 2025 16:12:28 +0800 Subject: [PATCH] *: remove api mode concept (#8952) ref tikv/pd#8477 Signed-off-by: Ryan Leung --- client/client.go | 26 +-- client/clients/tso/client.go | 2 +- client/clients/tso/dispatcher_test.go | 2 +- client/http/client.go | 10 +- client/inner_client.go | 36 ++-- client/keyspace_client.go | 8 +- client/meta_storage_client.go | 8 +- client/resource_manager_client.go | 4 +- ...discovery.go => mock_service_discovery.go} | 50 ++--- ...vice_discovery.go => service_discovery.go} | 204 +++++++++--------- ...very_test.go => service_discovery_test.go} | 20 +- .../servicediscovery/tso_service_discovery.go | 26 +-- cmd/pd-server/main.go | 14 +- pkg/mcs/registry/registry.go | 8 +- pkg/mcs/scheduling/server/cluster.go | 18 +- pkg/mcs/scheduling/server/config/config.go | 6 +- pkg/mcs/scheduling/server/config/watcher.go | 4 +- pkg/mcs/scheduling/server/grpc_service.go | 2 +- pkg/mcs/scheduling/server/meta/watcher.go | 4 +- pkg/mcs/scheduling/server/rule/watcher.go | 4 +- pkg/mcs/scheduling/server/server.go | 8 +- pkg/mcs/utils/constant/constant.go | 4 +- pkg/member/election_leader.go | 2 +- pkg/schedule/coordinator.go | 2 +- .../schedulers/scheduler_controller.go | 2 +- pkg/tso/keyspace_group_manager.go | 2 +- pkg/utils/apiutil/serverapi/middleware.go | 4 +- server/api/admin.go | 2 +- server/api/server.go | 4 +- server/apiv2/handlers/micro_service.go | 4 +- server/cluster/cluster.go | 22 +- server/config/config.go | 2 +- server/grpc_service.go | 4 +- server/server.go | 22 +- server/server_test.go | 4 +- tests/cluster.go | 44 ++-- tests/integrations/client/client_test.go | 2 +- .../mcs/discovery/register_test.go | 8 +- .../mcs/keyspace/tso_keyspace_group_test.go | 2 +- tests/integrations/mcs/members/member_test.go | 2 +- tests/integrations/mcs/scheduling/api_test.go | 46 ++-- .../mcs/scheduling/config_test.go | 14 +- .../integrations/mcs/scheduling/meta_test.go | 2 +- .../integrations/mcs/scheduling/rule_test.go | 4 +- .../mcs/scheduling/server_test.go | 38 ++-- tests/integrations/mcs/tso/api_test.go | 8 +- .../mcs/tso/keyspace_group_manager_test.go | 14 +- tests/integrations/mcs/tso/proxy_test.go | 2 +- tests/integrations/mcs/tso/server_test.go | 52 ++--- tests/integrations/tso/client_test.go | 6 +- tests/integrations/tso/consistency_test.go | 2 +- tests/integrations/tso/server_test.go | 2 +- tests/server/api/scheduler_test.go | 2 +- .../apiv2/handlers/tso_keyspace_group_test.go | 2 +- tests/server/server_test.go | 2 +- tests/testutil.go | 28 +-- tools/pd-ctl/pdctl/command/config_command.go | 24 +-- tools/pd-ctl/tests/config/config_test.go | 54 ++--- .../tests/keyspace/keyspace_group_test.go | 14 +- tools/pd-ctl/tests/keyspace/keyspace_test.go | 4 +- tools/pd-simulator/simulator/drive.go | 2 +- 61 files changed, 455 insertions(+), 469 deletions(-) rename client/servicediscovery/{mock_pd_service_discovery.go => mock_service_discovery.go} (57%) rename client/servicediscovery/{pd_service_discovery.go => service_discovery.go} (81%) rename client/servicediscovery/{pd_service_discovery_test.go => service_discovery_test.go} (96%) diff --git a/client/client.go b/client/client.go index 7aa28cbc0cd..e3d3f4e5b14 100644 --- a/client/client.go +++ b/client/client.go @@ -360,8 +360,8 @@ func newClientWithKeyspaceName( c := &client{ callerComponent: adjustCallerComponent(callerComponent), inner: &innerClient{ - // Create a PD service discovery with null keyspace id, then query the real id with the keyspace name, - // finally update the keyspace id to the PD service discovery for the following interactions. + // Create a service discovery with null keyspace id, then query the real id with the keyspace name, + // finally update the keyspace id to the service discovery for the following interactions. keyspaceID: constants.NullKeyspaceID, updateTokenConnectionCh: make(chan struct{}, 1), ctx: clientCtx, @@ -384,7 +384,7 @@ func newClientWithKeyspaceName( } c.inner.keyspaceID = keyspaceMeta.GetId() // c.keyspaceID is the source of truth for keyspace id. - c.inner.pdSvcDiscovery.SetKeyspaceID(c.inner.keyspaceID) + c.inner.serviceDiscovery.SetKeyspaceID(c.inner.keyspaceID) return nil } @@ -412,17 +412,17 @@ func (c *client) ResetTSOClient() { // GetClusterID returns the ClusterID. func (c *client) GetClusterID(context.Context) uint64 { - return c.inner.pdSvcDiscovery.GetClusterID() + return c.inner.serviceDiscovery.GetClusterID() } // GetLeaderURL returns the leader URL. func (c *client) GetLeaderURL() string { - return c.inner.pdSvcDiscovery.GetServingURL() + return c.inner.serviceDiscovery.GetServingURL() } // GetServiceDiscovery returns the client-side service discovery object func (c *client) GetServiceDiscovery() sd.ServiceDiscovery { - return c.inner.pdSvcDiscovery + return c.inner.serviceDiscovery } // UpdateOption updates the client option. @@ -438,7 +438,7 @@ func (c *client) UpdateOption(option opt.DynamicOption, value any) error { } case opt.EnableTSOFollowerProxy: if c.inner.getServiceMode() != pdpb.ServiceMode_PD_SVC_MODE { - return errors.New("[pd] tso follower proxy is only supported in PD service mode") + return errors.New("[pd] tso follower proxy is only supported in PD mode") } enable, ok := value.(bool) if !ok { @@ -485,7 +485,7 @@ func (c *client) GetAllMembers(ctx context.Context) ([]*pdpb.Member, error) { // getClientAndContext returns the leader pd client and the original context. If leader is unhealthy, it returns // follower pd client and the context which holds forward information. func (c *client) getClientAndContext(ctx context.Context) (pdpb.PDClient, context.Context) { - serviceClient := c.inner.pdSvcDiscovery.GetServiceClient() + serviceClient := c.inner.serviceDiscovery.GetServiceClient() if serviceClient == nil || serviceClient.GetClientConn() == nil { return nil, ctx } @@ -526,7 +526,7 @@ func (c *client) GetLocalTS(ctx context.Context, _ string) (physical int64, logi // GetMinTS implements the TSOClient interface. func (c *client) GetMinTS(ctx context.Context) (physical int64, logical int64, err error) { - // Handle compatibility issue in case of PD/API server doesn't support GetMinTS API. + // Handle compatibility issue in case of PD/PD service doesn't support GetMinTS API. serviceMode := c.inner.getServiceMode() switch serviceMode { case pdpb.ServiceMode_UNKNOWN_SVC_MODE: @@ -598,7 +598,7 @@ func (c *client) GetRegionFromMember(ctx context.Context, key []byte, memberURLs var resp *pdpb.GetRegionResponse for _, url := range memberURLs { - conn, err := c.inner.pdSvcDiscovery.GetOrCreateGRPCConn(url) + conn, err := c.inner.serviceDiscovery.GetOrCreateGRPCConn(url) if err != nil { log.Error("[pd] can't get grpc connection", zap.String("member-URL", url), errs.ZapError(err)) continue @@ -619,7 +619,7 @@ func (c *client) GetRegionFromMember(ctx context.Context, key []byte, memberURLs if resp == nil { metrics.CmdFailedDurationGetRegion.Observe(time.Since(start).Seconds()) - c.inner.pdSvcDiscovery.ScheduleCheckMemberChanged() + c.inner.serviceDiscovery.ScheduleCheckMemberChanged() errorMsg := fmt.Sprintf("[pd] can't get region info from member URLs: %+v", memberURLs) return nil, errors.WithStack(errors.New(errorMsg)) } @@ -1150,7 +1150,7 @@ func (c *client) SplitRegions(ctx context.Context, splitKeys [][]byte, opts ...o func (c *client) requestHeader() *pdpb.RequestHeader { return &pdpb.RequestHeader{ - ClusterId: c.inner.pdSvcDiscovery.GetClusterID(), + ClusterId: c.inner.serviceDiscovery.GetClusterID(), CallerId: string(caller.GetCallerID()), CallerComponent: string(c.callerComponent), } @@ -1334,7 +1334,7 @@ func (c *client) respForErr(observer prometheus.Observer, start time.Time, err e if err != nil || header.GetError() != nil { observer.Observe(time.Since(start).Seconds()) if err != nil { - c.inner.pdSvcDiscovery.ScheduleCheckMemberChanged() + c.inner.serviceDiscovery.ScheduleCheckMemberChanged() return errors.WithStack(err) } return errors.WithStack(errors.New(header.GetError().String())) diff --git a/client/clients/tso/client.go b/client/clients/tso/client.go index d24dba52394..7bc768ee21b 100644 --- a/client/clients/tso/client.go +++ b/client/clients/tso/client.go @@ -563,7 +563,7 @@ func (c *Cli) DispatchRequest(request *Request) (bool, error) { // Client is closed, no need to retry. return false, request.clientCtx.Err() case <-c.ctx.Done(): - // tsoClient is closed due to the PD service mode switch, which is retryable. + // tsoClient is closed due to the service mode switch, which is retryable. return true, c.ctx.Err() default: // This failpoint will increase the possibility that the request is sent to a closed dispatcher. diff --git a/client/clients/tso/dispatcher_test.go b/client/clients/tso/dispatcher_test.go index 7e5554c7c7b..6d8f94791e6 100644 --- a/client/clients/tso/dispatcher_test.go +++ b/client/clients/tso/dispatcher_test.go @@ -53,7 +53,7 @@ func (m *mockTSOServiceProvider) getOption() *opt.Option { } func (*mockTSOServiceProvider) getServiceDiscovery() sd.ServiceDiscovery { - return sd.NewMockPDServiceDiscovery([]string{mockStreamURL}, nil) + return sd.NewMockServiceDiscovery([]string{mockStreamURL}, nil) } func (m *mockTSOServiceProvider) getConnectionCtxMgr() *cctx.Manager[*tsoStream] { diff --git a/client/http/client.go b/client/http/client.go index fa9801cf764..b7109166a30 100644 --- a/client/http/client.go +++ b/client/http/client.go @@ -245,7 +245,7 @@ func (ci *clientInner) doRequest( if readErr != nil { logFields = append(logFields, zap.NamedError("read-body-error", err)) } else { - // API server will return a JSON body containing the detailed error message + // PD service will return a JSON body containing the detailed error message // when the status code is not `http.StatusOK` 200. bs = bytes.TrimSpace(bs) logFields = append(logFields, zap.ByteString("body", bs)) @@ -304,7 +304,7 @@ func WithMetrics( } } -// NewClientWithServiceDiscovery creates a PD HTTP client with the given PD service discovery. +// NewClientWithServiceDiscovery creates a PD HTTP client with the given service discovery. func NewClientWithServiceDiscovery( source string, sd sd.ServiceDiscovery, @@ -332,7 +332,7 @@ func NewClient( for _, opt := range opts { opt(c) } - sd := sd.NewDefaultPDServiceDiscovery(ctx, cancel, pdAddrs, c.inner.tlsConf) + sd := sd.NewDefaultServiceDiscovery(ctx, cancel, pdAddrs, c.inner.tlsConf) if err := sd.Init(); err != nil { log.Error("[pd] init service discovery failed", zap.String("source", source), zap.Strings("pd-addrs", pdAddrs), zap.Error(err)) @@ -420,7 +420,7 @@ func NewHTTPClientWithRequestChecker(checker requestChecker) *http.Client { } } -// newClientWithMockServiceDiscovery creates a new PD HTTP client with a mock PD service discovery. +// newClientWithMockServiceDiscovery creates a new PD HTTP client with a mock service discovery. func newClientWithMockServiceDiscovery( source string, pdAddrs []string, @@ -432,7 +432,7 @@ func newClientWithMockServiceDiscovery( for _, opt := range opts { opt(c) } - sd := sd.NewMockPDServiceDiscovery(pdAddrs, c.inner.tlsConf) + sd := sd.NewMockServiceDiscovery(pdAddrs, c.inner.tlsConf) if err := sd.Init(); err != nil { log.Error("[pd] init mock service discovery failed", zap.String("source", source), zap.Strings("pd-addrs", pdAddrs), zap.Error(err)) diff --git a/client/inner_client.go b/client/inner_client.go index 404cbcf0b80..8379b6a51a9 100644 --- a/client/inner_client.go +++ b/client/inner_client.go @@ -26,10 +26,10 @@ const ( ) type innerClient struct { - keyspaceID uint32 - svrUrls []string - pdSvcDiscovery sd.ServiceDiscovery - tokenDispatcher *tokenDispatcher + keyspaceID uint32 + svrUrls []string + serviceDiscovery sd.ServiceDiscovery + tokenDispatcher *tokenDispatcher // For service mode switching. serviceModeKeeper @@ -45,13 +45,13 @@ type innerClient struct { } func (c *innerClient) init(updateKeyspaceIDCb sd.UpdateKeyspaceIDFunc) error { - c.pdSvcDiscovery = sd.NewPDServiceDiscovery( + c.serviceDiscovery = sd.NewServiceDiscovery( c.ctx, c.cancel, &c.wg, c.setServiceMode, updateKeyspaceIDCb, c.keyspaceID, c.svrUrls, c.tlsCfg, c.option) if err := c.setup(); err != nil { c.cancel() - if c.pdSvcDiscovery != nil { - c.pdSvcDiscovery.Close() + if c.serviceDiscovery != nil { + c.serviceDiscovery.Close() } return err } @@ -92,10 +92,10 @@ func (c *innerClient) resetTSOClientLocked(mode pdpb.ServiceMode) { switch mode { case pdpb.ServiceMode_PD_SVC_MODE: newTSOCli = tso.NewClient(c.ctx, c.option, - c.pdSvcDiscovery, &tso.PDStreamBuilderFactory{}) + c.serviceDiscovery, &tso.PDStreamBuilderFactory{}) case pdpb.ServiceMode_API_SVC_MODE: newTSOSvcDiscovery = sd.NewTSOServiceDiscovery( - c.ctx, c, c.pdSvcDiscovery, + c.ctx, c, c.serviceDiscovery, c.keyspaceID, c.tlsCfg, c.option) // At this point, the keyspace group isn't known yet. Starts from the default keyspace group, // and will be updated later. @@ -119,12 +119,12 @@ func (c *innerClient) resetTSOClientLocked(mode pdpb.ServiceMode) { oldTSOClient.Close() // Replace the old TSO service discovery if needed. oldTSOSvcDiscovery := c.tsoSvcDiscovery - // If newTSOSvcDiscovery is nil, that's expected, as it means we are switching to PD service mode and + // If newTSOSvcDiscovery is nil, that's expected, as it means we are switching to PD mode and // no tso microservice discovery is needed. c.tsoSvcDiscovery = newTSOSvcDiscovery // Close the old TSO service discovery safely after both the old client and service discovery are replaced. if oldTSOSvcDiscovery != nil { - // We are switching from API service mode to PD service mode, so delete the old tso microservice discovery. + // We are switching from PD service mode to PD mode, so delete the old tso microservice discovery. oldTSOSvcDiscovery.Close() } } @@ -153,7 +153,7 @@ func (c *innerClient) close() { c.wg.Wait() c.serviceModeKeeper.close() - c.pdSvcDiscovery.Close() + c.serviceDiscovery.Close() if c.tokenDispatcher != nil { tokenErr := errors.WithStack(errs.ErrClosing) @@ -169,12 +169,12 @@ func (c *innerClient) setup() error { } // Init the client base. - if err := c.pdSvcDiscovery.Init(); err != nil { + if err := c.serviceDiscovery.Init(); err != nil { return err } // Register callbacks - c.pdSvcDiscovery.AddServingURLSwitchedCallback(c.scheduleUpdateTokenConnection) + c.serviceDiscovery.AddServingURLSwitchedCallback(c.scheduleUpdateTokenConnection) // Create dispatchers c.createTokenDispatcher() @@ -186,12 +186,12 @@ func (c *innerClient) setup() error { func (c *innerClient) getRegionAPIClientAndContext(ctx context.Context, allowFollower bool) (sd.ServiceClient, context.Context) { var serviceClient sd.ServiceClient if allowFollower { - serviceClient = c.pdSvcDiscovery.GetServiceClientByKind(sd.UniversalAPIKind) + serviceClient = c.serviceDiscovery.GetServiceClientByKind(sd.UniversalAPIKind) if serviceClient != nil { return serviceClient, serviceClient.BuildGRPCTargetContext(ctx, !allowFollower) } } - serviceClient = c.pdSvcDiscovery.GetServiceClient() + serviceClient = c.serviceDiscovery.GetServiceClient() if serviceClient == nil || serviceClient.GetClientConn() == nil { return nil, ctx } @@ -201,12 +201,12 @@ func (c *innerClient) getRegionAPIClientAndContext(ctx context.Context, allowFol // gRPCErrorHandler is used to handle the gRPC error returned by the resource manager service. func (c *innerClient) gRPCErrorHandler(err error) { if errs.IsLeaderChange(err) { - c.pdSvcDiscovery.ScheduleCheckMemberChanged() + c.serviceDiscovery.ScheduleCheckMemberChanged() } } func (c *innerClient) getOrCreateGRPCConn() (*grpc.ClientConn, error) { - cc, err := c.pdSvcDiscovery.GetOrCreateGRPCConn(c.pdSvcDiscovery.GetServingURL()) + cc, err := c.serviceDiscovery.GetOrCreateGRPCConn(c.serviceDiscovery.GetServingURL()) if err != nil { return nil, err } diff --git a/client/keyspace_client.go b/client/keyspace_client.go index 84bc29054eb..507279e906c 100644 --- a/client/keyspace_client.go +++ b/client/keyspace_client.go @@ -41,7 +41,7 @@ type KeyspaceClient interface { // keyspaceClient returns the KeyspaceClient from current PD leader. func (c *client) keyspaceClient() keyspacepb.KeyspaceClient { - if client := c.inner.pdSvcDiscovery.GetServingEndpointClientConn(); client != nil { + if client := c.inner.serviceDiscovery.GetServingEndpointClientConn(); client != nil { return keyspacepb.NewKeyspaceClient(client) } return nil @@ -70,7 +70,7 @@ func (c *client) LoadKeyspace(ctx context.Context, name string) (*keyspacepb.Key if err != nil { metrics.CmdFailedDurationLoadKeyspace.Observe(time.Since(start).Seconds()) - c.inner.pdSvcDiscovery.ScheduleCheckMemberChanged() + c.inner.serviceDiscovery.ScheduleCheckMemberChanged() return nil, err } @@ -115,7 +115,7 @@ func (c *client) UpdateKeyspaceState(ctx context.Context, id uint32, state keysp if err != nil { metrics.CmdFailedDurationUpdateKeyspaceState.Observe(time.Since(start).Seconds()) - c.inner.pdSvcDiscovery.ScheduleCheckMemberChanged() + c.inner.serviceDiscovery.ScheduleCheckMemberChanged() return nil, err } @@ -159,7 +159,7 @@ func (c *client) GetAllKeyspaces(ctx context.Context, startID uint32, limit uint if err != nil { metrics.CmdDurationGetAllKeyspaces.Observe(time.Since(start).Seconds()) - c.inner.pdSvcDiscovery.ScheduleCheckMemberChanged() + c.inner.serviceDiscovery.ScheduleCheckMemberChanged() return nil, err } diff --git a/client/meta_storage_client.go b/client/meta_storage_client.go index fbabd60debd..7652884720d 100644 --- a/client/meta_storage_client.go +++ b/client/meta_storage_client.go @@ -33,7 +33,7 @@ import ( // metaStorageClient gets the meta storage client from current PD leader. func (c *innerClient) metaStorageClient() meta_storagepb.MetaStorageClient { - if client := c.pdSvcDiscovery.GetServingEndpointClientConn(); client != nil { + if client := c.serviceDiscovery.GetServingEndpointClientConn(); client != nil { return meta_storagepb.NewMetaStorageClient(client) } return nil @@ -74,7 +74,7 @@ func (c *innerClient) Put(ctx context.Context, key, value []byte, opts ...opt.Me Lease: options.Lease, PrevKv: options.PrevKv, } - ctx = grpcutil.BuildForwardContext(ctx, c.pdSvcDiscovery.GetServingURL()) + ctx = grpcutil.BuildForwardContext(ctx, c.serviceDiscovery.GetServingURL()) cli := c.metaStorageClient() if cli == nil { cancel() @@ -113,7 +113,7 @@ func (c *innerClient) Get(ctx context.Context, key []byte, opts ...opt.MetaStora Limit: options.Limit, Revision: options.Revision, } - ctx = grpcutil.BuildForwardContext(ctx, c.pdSvcDiscovery.GetServingURL()) + ctx = grpcutil.BuildForwardContext(ctx, c.serviceDiscovery.GetServingURL()) cli := c.metaStorageClient() if cli == nil { cancel() @@ -179,7 +179,7 @@ func (c *innerClient) respForMetaStorageErr(observer prometheus.Observer, start if err != nil || header.GetError() != nil { observer.Observe(time.Since(start).Seconds()) if err != nil { - c.pdSvcDiscovery.ScheduleCheckMemberChanged() + c.serviceDiscovery.ScheduleCheckMemberChanged() return errors.WithStack(err) } return errors.WithStack(errors.New(header.GetError().String())) diff --git a/client/resource_manager_client.go b/client/resource_manager_client.go index 0c481631b93..3e4cd1a3cc8 100644 --- a/client/resource_manager_client.go +++ b/client/resource_manager_client.go @@ -331,7 +331,7 @@ func (c *innerClient) handleResourceTokenDispatcher(dispatcherCtx context.Contex // If the stream is still nil, return an error. if stream == nil { firstRequest.done <- errors.Errorf("failed to get the stream connection") - c.pdSvcDiscovery.ScheduleCheckMemberChanged() + c.serviceDiscovery.ScheduleCheckMemberChanged() connection.reset() continue } @@ -343,7 +343,7 @@ func (c *innerClient) handleResourceTokenDispatcher(dispatcherCtx context.Contex default: } if err = c.processTokenRequests(stream, firstRequest); err != nil { - c.pdSvcDiscovery.ScheduleCheckMemberChanged() + c.serviceDiscovery.ScheduleCheckMemberChanged() connection.reset() log.Info("[resource_manager] token request error", zap.Error(err)) } diff --git a/client/servicediscovery/mock_pd_service_discovery.go b/client/servicediscovery/mock_service_discovery.go similarity index 57% rename from client/servicediscovery/mock_pd_service_discovery.go rename to client/servicediscovery/mock_service_discovery.go index 87b74ae2136..6ca649f4575 100644 --- a/client/servicediscovery/mock_pd_service_discovery.go +++ b/client/servicediscovery/mock_service_discovery.go @@ -21,24 +21,24 @@ import ( "google.golang.org/grpc" ) -var _ ServiceDiscovery = (*mockPDServiceDiscovery)(nil) +var _ ServiceDiscovery = (*mockServiceDiscovery)(nil) -type mockPDServiceDiscovery struct { +type mockServiceDiscovery struct { urls []string tlsCfg *tls.Config clients []ServiceClient } -// NewMockPDServiceDiscovery creates a mock PD service discovery. -func NewMockPDServiceDiscovery(urls []string, tlsCfg *tls.Config) *mockPDServiceDiscovery { - return &mockPDServiceDiscovery{ +// NewMockServiceDiscovery creates a mock service discovery. +func NewMockServiceDiscovery(urls []string, tlsCfg *tls.Config) *mockServiceDiscovery { + return &mockServiceDiscovery{ urls: urls, tlsCfg: tlsCfg, } } // Init directly creates the service clients with the given URLs. -func (m *mockPDServiceDiscovery) Init() error { +func (m *mockServiceDiscovery) Init() error { m.clients = make([]ServiceClient, 0, len(m.urls)) for _, url := range m.urls { m.clients = append(m.clients, newPDServiceClient(url, m.urls[0], nil, false)) @@ -47,61 +47,61 @@ func (m *mockPDServiceDiscovery) Init() error { } // Close clears the service clients. -func (m *mockPDServiceDiscovery) Close() { +func (m *mockServiceDiscovery) Close() { clear(m.clients) } -// GetAllServiceClients returns all service clients init in the mock PD service discovery. -func (m *mockPDServiceDiscovery) GetAllServiceClients() []ServiceClient { +// GetAllServiceClients returns all service clients init in the mock service discovery. +func (m *mockServiceDiscovery) GetAllServiceClients() []ServiceClient { return m.clients } // GetClusterID implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) GetClusterID() uint64 { return 0 } +func (*mockServiceDiscovery) GetClusterID() uint64 { return 0 } // GetKeyspaceID implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) GetKeyspaceID() uint32 { return 0 } +func (*mockServiceDiscovery) GetKeyspaceID() uint32 { return 0 } // SetKeyspaceID implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) SetKeyspaceID(uint32) {} +func (*mockServiceDiscovery) SetKeyspaceID(uint32) {} // GetKeyspaceGroupID implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) GetKeyspaceGroupID() uint32 { return 0 } +func (*mockServiceDiscovery) GetKeyspaceGroupID() uint32 { return 0 } // GetServiceURLs implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) GetServiceURLs() []string { return nil } +func (*mockServiceDiscovery) GetServiceURLs() []string { return nil } // GetServingEndpointClientConn implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) GetServingEndpointClientConn() *grpc.ClientConn { return nil } +func (*mockServiceDiscovery) GetServingEndpointClientConn() *grpc.ClientConn { return nil } // GetClientConns implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) GetClientConns() *sync.Map { return nil } +func (*mockServiceDiscovery) GetClientConns() *sync.Map { return nil } // GetServingURL implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) GetServingURL() string { return "" } +func (*mockServiceDiscovery) GetServingURL() string { return "" } // GetBackupURLs implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) GetBackupURLs() []string { return nil } +func (*mockServiceDiscovery) GetBackupURLs() []string { return nil } // GetServiceClient implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) GetServiceClient() ServiceClient { return nil } +func (*mockServiceDiscovery) GetServiceClient() ServiceClient { return nil } // GetServiceClientByKind implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) GetServiceClientByKind(APIKind) ServiceClient { return nil } +func (*mockServiceDiscovery) GetServiceClientByKind(APIKind) ServiceClient { return nil } // GetOrCreateGRPCConn implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) GetOrCreateGRPCConn(string) (*grpc.ClientConn, error) { +func (*mockServiceDiscovery) GetOrCreateGRPCConn(string) (*grpc.ClientConn, error) { return nil, nil } // ScheduleCheckMemberChanged implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) ScheduleCheckMemberChanged() {} +func (*mockServiceDiscovery) ScheduleCheckMemberChanged() {} // CheckMemberChanged implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) CheckMemberChanged() error { return nil } +func (*mockServiceDiscovery) CheckMemberChanged() error { return nil } // AddServingURLSwitchedCallback implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) AddServingURLSwitchedCallback(...func()) {} +func (*mockServiceDiscovery) AddServingURLSwitchedCallback(...func()) {} // AddServiceURLsSwitchedCallback implements the ServiceDiscovery interface. -func (*mockPDServiceDiscovery) AddServiceURLsSwitchedCallback(...func()) {} +func (*mockServiceDiscovery) AddServiceURLsSwitchedCallback(...func()) {} diff --git a/client/servicediscovery/pd_service_discovery.go b/client/servicediscovery/service_discovery.go similarity index 81% rename from client/servicediscovery/pd_service_discovery.go rename to client/servicediscovery/service_discovery.go index 931f950c6d1..bef80e28a37 100644 --- a/client/servicediscovery/pd_service_discovery.go +++ b/client/servicediscovery/service_discovery.go @@ -157,11 +157,11 @@ type ServiceClient interface { } var ( - _ ServiceClient = (*pdServiceClient)(nil) - _ ServiceClient = (*pdServiceAPIClient)(nil) + _ ServiceClient = (*serviceClient)(nil) + _ ServiceClient = (*serviceAPIClient)(nil) ) -type pdServiceClient struct { +type serviceClient struct { url string conn *grpc.ClientConn isLeader bool @@ -171,10 +171,10 @@ type pdServiceClient struct { } // NOTE: In the current implementation, the URL passed in is bound to have a scheme, -// because it is processed in `newPDServiceDiscovery`, and the url returned by etcd member owns the scheme. +// because it is processed in `newServiceDiscovery`, and the url returned by etcd member owns the scheme. // When testing, the URL is also bound to have a scheme. func newPDServiceClient(url, leaderURL string, conn *grpc.ClientConn, isLeader bool) ServiceClient { - cli := &pdServiceClient{ + cli := &serviceClient{ url: url, conn: conn, isLeader: isLeader, @@ -187,7 +187,7 @@ func newPDServiceClient(url, leaderURL string, conn *grpc.ClientConn, isLeader b } // GetURL implements ServiceClient. -func (c *pdServiceClient) GetURL() string { +func (c *serviceClient) GetURL() string { if c == nil { return "" } @@ -195,7 +195,7 @@ func (c *pdServiceClient) GetURL() string { } // BuildGRPCTargetContext implements ServiceClient. -func (c *pdServiceClient) BuildGRPCTargetContext(ctx context.Context, toLeader bool) context.Context { +func (c *serviceClient) BuildGRPCTargetContext(ctx context.Context, toLeader bool) context.Context { if c == nil || c.isLeader { return ctx } @@ -206,7 +206,7 @@ func (c *pdServiceClient) BuildGRPCTargetContext(ctx context.Context, toLeader b } // IsConnectedToLeader implements ServiceClient. -func (c *pdServiceClient) IsConnectedToLeader() bool { +func (c *serviceClient) IsConnectedToLeader() bool { if c == nil { return false } @@ -214,14 +214,14 @@ func (c *pdServiceClient) IsConnectedToLeader() bool { } // Available implements ServiceClient. -func (c *pdServiceClient) Available() bool { +func (c *serviceClient) Available() bool { if c == nil { return false } return !c.networkFailure.Load() } -func (c *pdServiceClient) checkNetworkAvailable(ctx context.Context) { +func (c *serviceClient) checkNetworkAvailable(ctx context.Context) { if c == nil || c.conn == nil { return } @@ -242,7 +242,7 @@ func (c *pdServiceClient) checkNetworkAvailable(ctx context.Context) { } // GetClientConn implements ServiceClient. -func (c *pdServiceClient) GetClientConn() *grpc.ClientConn { +func (c *serviceClient) GetClientConn() *grpc.ClientConn { if c == nil { return nil } @@ -250,7 +250,7 @@ func (c *pdServiceClient) GetClientConn() *grpc.ClientConn { } // NeedRetry implements ServiceClient. -func (c *pdServiceClient) NeedRetry(pdErr *pdpb.Error, err error) bool { +func (c *serviceClient) NeedRetry(pdErr *pdpb.Error, err error) bool { if c.IsConnectedToLeader() { return false } @@ -267,9 +267,9 @@ func regionAPIErrorFn(pdErr *pdpb.Error) bool { return pdErr.GetType() == pdpb.ErrorType_REGION_NOT_FOUND } -// pdServiceAPIClient is a specific API client for PD service. -// It extends the pdServiceClient and adds additional fields for managing availability -type pdServiceAPIClient struct { +// serviceAPIClient is a specific API client for service. +// It extends the serviceClient and adds additional fields for managing availability +type serviceAPIClient struct { ServiceClient fn errFn @@ -278,19 +278,19 @@ type pdServiceAPIClient struct { } func newPDServiceAPIClient(client ServiceClient, f errFn) ServiceClient { - return &pdServiceAPIClient{ + return &serviceAPIClient{ ServiceClient: client, fn: f, } } // Available implements ServiceClient. -func (c *pdServiceAPIClient) Available() bool { +func (c *serviceAPIClient) Available() bool { return c.ServiceClient.Available() && !c.unavailable.Load() } // markAsAvailable is used to try to mark the client as available if unavailable status is expired. -func (c *pdServiceAPIClient) markAsAvailable() { +func (c *serviceAPIClient) markAsAvailable() { if !c.unavailable.Load() { return } @@ -301,7 +301,7 @@ func (c *pdServiceAPIClient) markAsAvailable() { } // NeedRetry implements ServiceClient. -func (c *pdServiceAPIClient) NeedRetry(pdErr *pdpb.Error, err error) bool { +func (c *serviceAPIClient) NeedRetry(pdErr *pdpb.Error, err error) bool { if c.IsConnectedToLeader() { return false } @@ -317,43 +317,43 @@ func (c *pdServiceAPIClient) NeedRetry(pdErr *pdpb.Error, err error) bool { return true } -// pdServiceBalancerNode is a balancer node for PD service. -// It extends the pdServiceClient and adds additional fields for the next polling client in the chain. -type pdServiceBalancerNode struct { - *pdServiceAPIClient - next *pdServiceBalancerNode +// serviceBalancerNode is a balancer node for PD. +// It extends the serviceClient and adds additional fields for the next polling client in the chain. +type serviceBalancerNode struct { + *serviceAPIClient + next *serviceBalancerNode } -// pdServiceBalancer is a load balancer for PD service clients. -// It is used to balance the request to all servers and manage the connections to multiple PD service nodes. -type pdServiceBalancer struct { +// serviceBalancer is a load balancer for clients. +// It is used to balance the request to all servers and manage the connections to multiple nodes. +type serviceBalancer struct { mu sync.Mutex - now *pdServiceBalancerNode + now *serviceBalancerNode totalNode int errFn errFn } -func newPDServiceBalancer(fn errFn) *pdServiceBalancer { - return &pdServiceBalancer{ +func newServiceBalancer(fn errFn) *serviceBalancer { + return &serviceBalancer{ errFn: fn, } } -func (c *pdServiceBalancer) set(clients []ServiceClient) { +func (c *serviceBalancer) set(clients []ServiceClient) { c.mu.Lock() defer c.mu.Unlock() if len(clients) == 0 { return } c.totalNode = len(clients) - head := &pdServiceBalancerNode{ - pdServiceAPIClient: newPDServiceAPIClient(clients[c.totalNode-1], c.errFn).(*pdServiceAPIClient), + head := &serviceBalancerNode{ + serviceAPIClient: newPDServiceAPIClient(clients[c.totalNode-1], c.errFn).(*serviceAPIClient), } head.next = head last := head for i := c.totalNode - 2; i >= 0; i-- { - next := &pdServiceBalancerNode{ - pdServiceAPIClient: newPDServiceAPIClient(clients[i], c.errFn).(*pdServiceAPIClient), - next: head, + next := &serviceBalancerNode{ + serviceAPIClient: newPDServiceAPIClient(clients[i], c.errFn).(*serviceAPIClient), + next: head, } head = next last.next = head @@ -361,7 +361,7 @@ func (c *pdServiceBalancer) set(clients []ServiceClient) { c.now = head } -func (c *pdServiceBalancer) check() { +func (c *serviceBalancer) check() { c.mu.Lock() defer c.mu.Unlock() for range c.totalNode { @@ -370,11 +370,11 @@ func (c *pdServiceBalancer) check() { } } -func (c *pdServiceBalancer) next() { +func (c *serviceBalancer) next() { c.now = c.now.next } -func (c *pdServiceBalancer) get() (ret ServiceClient) { +func (c *serviceBalancer) get() (ret ServiceClient) { c.mu.Lock() defer c.mu.Unlock() i := 0 @@ -403,22 +403,22 @@ type TSOEventSource interface { } var ( - _ ServiceDiscovery = (*pdServiceDiscovery)(nil) - _ TSOEventSource = (*pdServiceDiscovery)(nil) + _ ServiceDiscovery = (*serviceDiscovery)(nil) + _ TSOEventSource = (*serviceDiscovery)(nil) ) -// pdServiceDiscovery is the service discovery client of PD/API service which is quorum based -type pdServiceDiscovery struct { +// serviceDiscovery is the service discovery client of PD/PD service which is quorum based +type serviceDiscovery struct { isInitialized bool urls atomic.Value // Store as []string // PD leader - leader atomic.Value // Store as pdServiceClient + leader atomic.Value // Store as serviceClient // PD follower - followers sync.Map // Store as map[string]pdServiceClient + followers sync.Map // Store as map[string]serviceClient // PD leader and PD followers - all atomic.Value // Store as []pdServiceClient - apiCandidateNodes [apiKindCount]*pdServiceBalancer + all atomic.Value // Store as []serviceClient + apiCandidateNodes [apiKindCount]*serviceBalancer // PD follower URLs. Only for tso. followerURLs atomic.Value // Store as []string @@ -450,17 +450,17 @@ type pdServiceDiscovery struct { option *opt.Option } -// NewDefaultPDServiceDiscovery returns a new default PD service discovery-based client. -func NewDefaultPDServiceDiscovery( +// NewDefaultServiceDiscovery returns a new default service discovery-based client. +func NewDefaultServiceDiscovery( ctx context.Context, cancel context.CancelFunc, urls []string, tlsCfg *tls.Config, ) ServiceDiscovery { var wg sync.WaitGroup - return NewPDServiceDiscovery(ctx, cancel, &wg, nil, nil, constants.DefaultKeyspaceID, urls, tlsCfg, opt.NewOption()) + return NewServiceDiscovery(ctx, cancel, &wg, nil, nil, constants.DefaultKeyspaceID, urls, tlsCfg, opt.NewOption()) } -// NewPDServiceDiscovery returns a new PD service discovery-based client. -func NewPDServiceDiscovery( +// NewServiceDiscovery returns a new service discovery-based client. +func NewServiceDiscovery( ctx context.Context, cancel context.CancelFunc, wg *sync.WaitGroup, serviceModeUpdateCb func(pdpb.ServiceMode), @@ -468,12 +468,12 @@ func NewPDServiceDiscovery( keyspaceID uint32, urls []string, tlsCfg *tls.Config, option *opt.Option, ) ServiceDiscovery { - pdsd := &pdServiceDiscovery{ + pdsd := &serviceDiscovery{ checkMembershipCh: make(chan struct{}, 1), ctx: ctx, cancel: cancel, wg: wg, - apiCandidateNodes: [apiKindCount]*pdServiceBalancer{newPDServiceBalancer(emptyErrorFn), newPDServiceBalancer(regionAPIErrorFn)}, + apiCandidateNodes: [apiKindCount]*serviceBalancer{newServiceBalancer(emptyErrorFn), newServiceBalancer(regionAPIErrorFn)}, serviceModeUpdateCb: serviceModeUpdateCb, updateKeyspaceIDFunc: updateKeyspaceIDFunc, keyspaceID: keyspaceID, @@ -485,8 +485,8 @@ func NewPDServiceDiscovery( return pdsd } -// Init initializes the PD service discovery. -func (c *pdServiceDiscovery) Init() error { +// Init initializes the service discovery. +func (c *serviceDiscovery) Init() error { if c.isInitialized { return nil } @@ -523,7 +523,7 @@ func (c *pdServiceDiscovery) Init() error { return nil } -func (c *pdServiceDiscovery) initRetry(f func() error) error { +func (c *serviceDiscovery) initRetry(f func() error) error { var err error ticker := time.NewTicker(time.Second) defer ticker.Stop() @@ -540,7 +540,7 @@ func (c *pdServiceDiscovery) initRetry(f func() error) error { return errors.WithStack(err) } -func (c *pdServiceDiscovery) updateMemberLoop() { +func (c *serviceDiscovery) updateMemberLoop() { defer c.wg.Done() ctx, cancel := context.WithCancel(c.ctx) @@ -564,7 +564,7 @@ func (c *pdServiceDiscovery) updateMemberLoop() { } } -func (c *pdServiceDiscovery) updateServiceModeLoop() { +func (c *serviceDiscovery) updateServiceModeLoop() { defer c.wg.Done() failpoint.Inject("skipUpdateServiceMode", func() { failpoint.Return() @@ -596,7 +596,7 @@ func (c *pdServiceDiscovery) updateServiceModeLoop() { } } -func (c *pdServiceDiscovery) memberHealthCheckLoop() { +func (c *serviceDiscovery) memberHealthCheckLoop() { defer c.wg.Done() memberCheckLoopCtx, memberCheckLoopCancel := context.WithCancel(c.ctx) @@ -616,19 +616,19 @@ func (c *pdServiceDiscovery) memberHealthCheckLoop() { } } -func (c *pdServiceDiscovery) checkLeaderHealth(ctx context.Context) { +func (c *serviceDiscovery) checkLeaderHealth(ctx context.Context) { ctx, cancel := context.WithTimeout(ctx, c.option.Timeout) defer cancel() leader := c.getLeaderServiceClient() leader.checkNetworkAvailable(ctx) } -func (c *pdServiceDiscovery) checkFollowerHealth(ctx context.Context) { +func (c *serviceDiscovery) checkFollowerHealth(ctx context.Context) { c.followers.Range(func(_, value any) bool { // To ensure that the leader's healthy check is not delayed, shorten the duration. ctx, cancel := context.WithTimeout(ctx, MemberHealthCheckInterval/3) defer cancel() - serviceClient := value.(*pdServiceClient) + serviceClient := value.(*serviceClient) serviceClient.checkNetworkAvailable(ctx) return true }) @@ -638,12 +638,12 @@ func (c *pdServiceDiscovery) checkFollowerHealth(ctx context.Context) { } // Close releases all resources. -func (c *pdServiceDiscovery) Close() { +func (c *serviceDiscovery) Close() { if c == nil { return } c.closeOnce.Do(func() { - log.Info("[pd] close pd service discovery client") + log.Info("[pd] close service discovery client") c.clientConns.Range(func(key, cc any) bool { if err := cc.(*grpc.ClientConn).Close(); err != nil { log.Error("[pd] failed to close grpc clientConn", errs.ZapError(errs.ErrCloseGRPCConn, err)) @@ -655,28 +655,28 @@ func (c *pdServiceDiscovery) Close() { } // GetClusterID returns the ClusterID. -func (c *pdServiceDiscovery) GetClusterID() uint64 { +func (c *serviceDiscovery) GetClusterID() uint64 { return c.clusterID } // GetKeyspaceID returns the ID of the keyspace -func (c *pdServiceDiscovery) GetKeyspaceID() uint32 { +func (c *serviceDiscovery) GetKeyspaceID() uint32 { return c.keyspaceID } // SetKeyspaceID sets the ID of the keyspace -func (c *pdServiceDiscovery) SetKeyspaceID(keyspaceID uint32) { +func (c *serviceDiscovery) SetKeyspaceID(keyspaceID uint32) { c.keyspaceID = keyspaceID } // GetKeyspaceGroupID returns the ID of the keyspace group -func (*pdServiceDiscovery) GetKeyspaceGroupID() uint32 { - // PD/API service only supports the default keyspace group +func (*serviceDiscovery) GetKeyspaceGroupID() uint32 { + // PD only supports the default keyspace group return constants.DefaultKeyspaceGroupID } // DiscoverMicroservice discovers the microservice with the specified type and returns the server urls. -func (c *pdServiceDiscovery) discoverMicroservice(svcType serviceType) (urls []string, err error) { +func (c *serviceDiscovery) discoverMicroservice(svcType serviceType) (urls []string, err error) { switch svcType { case apiService: urls = c.GetServiceURLs() @@ -703,14 +703,14 @@ func (c *pdServiceDiscovery) discoverMicroservice(svcType serviceType) (urls []s // GetServiceURLs returns the URLs of the servers. // For testing use. It should only be called when the client is closed. -func (c *pdServiceDiscovery) GetServiceURLs() []string { +func (c *serviceDiscovery) GetServiceURLs() []string { return c.urls.Load().([]string) } // GetServingEndpointClientConn returns the grpc client connection of the serving endpoint // which is the leader in a quorum-based cluster or the primary in a primary/secondary // configured cluster. -func (c *pdServiceDiscovery) GetServingEndpointClientConn() *grpc.ClientConn { +func (c *serviceDiscovery) GetServingEndpointClientConn() *grpc.ClientConn { if cc, ok := c.clientConns.Load(c.getLeaderURL()); ok { return cc.(*grpc.ClientConn) } @@ -718,32 +718,32 @@ func (c *pdServiceDiscovery) GetServingEndpointClientConn() *grpc.ClientConn { } // GetClientConns returns the mapping {URL -> a gRPC connection} -func (c *pdServiceDiscovery) GetClientConns() *sync.Map { +func (c *serviceDiscovery) GetClientConns() *sync.Map { return &c.clientConns } // GetServingURL returns the leader url -func (c *pdServiceDiscovery) GetServingURL() string { +func (c *serviceDiscovery) GetServingURL() string { return c.getLeaderURL() } // GetBackupURLs gets the URLs of the current reachable followers // in a quorum-based cluster. Used for tso currently. -func (c *pdServiceDiscovery) GetBackupURLs() []string { +func (c *serviceDiscovery) GetBackupURLs() []string { return c.getFollowerURLs() } // getLeaderServiceClient returns the leader ServiceClient. -func (c *pdServiceDiscovery) getLeaderServiceClient() *pdServiceClient { +func (c *serviceDiscovery) getLeaderServiceClient() *serviceClient { leader := c.leader.Load() if leader == nil { return nil } - return leader.(*pdServiceClient) + return leader.(*serviceClient) } // GetServiceClientByKind returns ServiceClient of the specific kind. -func (c *pdServiceDiscovery) GetServiceClientByKind(kind APIKind) ServiceClient { +func (c *serviceDiscovery) GetServiceClientByKind(kind APIKind) ServiceClient { client := c.apiCandidateNodes[kind].get() if client == nil { return nil @@ -752,7 +752,7 @@ func (c *pdServiceDiscovery) GetServiceClientByKind(kind APIKind) ServiceClient } // GetServiceClient returns the leader/primary ServiceClient if it is healthy. -func (c *pdServiceDiscovery) GetServiceClient() ServiceClient { +func (c *serviceDiscovery) GetServiceClient() ServiceClient { leaderClient := c.getLeaderServiceClient() if c.option.EnableForwarding && !leaderClient.Available() { if followerClient := c.GetServiceClientByKind(ForwardAPIKind); followerClient != nil { @@ -767,7 +767,7 @@ func (c *pdServiceDiscovery) GetServiceClient() ServiceClient { } // GetAllServiceClients implements ServiceDiscovery -func (c *pdServiceDiscovery) GetAllServiceClients() []ServiceClient { +func (c *serviceDiscovery) GetAllServiceClients() []ServiceClient { all := c.all.Load() if all == nil { return nil @@ -778,7 +778,7 @@ func (c *pdServiceDiscovery) GetAllServiceClients() []ServiceClient { // ScheduleCheckMemberChanged is used to check if there is any membership // change among the leader and the followers. -func (c *pdServiceDiscovery) ScheduleCheckMemberChanged() { +func (c *serviceDiscovery) ScheduleCheckMemberChanged() { select { case c.checkMembershipCh <- struct{}{}: default: @@ -787,24 +787,24 @@ func (c *pdServiceDiscovery) ScheduleCheckMemberChanged() { // CheckMemberChanged Immediately check if there is any membership change among the leader/followers in a // quorum-based cluster or among the primary/secondaries in a primary/secondary configured cluster. -func (c *pdServiceDiscovery) CheckMemberChanged() error { +func (c *serviceDiscovery) CheckMemberChanged() error { return c.updateMember() } // AddServingURLSwitchedCallback adds callbacks which will be called // when the leader is switched. -func (c *pdServiceDiscovery) AddServingURLSwitchedCallback(callbacks ...func()) { +func (c *serviceDiscovery) AddServingURLSwitchedCallback(callbacks ...func()) { c.leaderSwitchedCbs = append(c.leaderSwitchedCbs, callbacks...) } // AddServiceURLsSwitchedCallback adds callbacks which will be called when // any leader/follower is changed. -func (c *pdServiceDiscovery) AddServiceURLsSwitchedCallback(callbacks ...func()) { +func (c *serviceDiscovery) AddServiceURLsSwitchedCallback(callbacks ...func()) { c.membersChangedCbs = append(c.membersChangedCbs, callbacks...) } // SetTSOLeaderURLUpdatedCallback adds a callback which will be called when the TSO leader is updated. -func (c *pdServiceDiscovery) SetTSOLeaderURLUpdatedCallback(callback tsoLeaderURLUpdatedFunc) { +func (c *serviceDiscovery) SetTSOLeaderURLUpdatedCallback(callback tsoLeaderURLUpdatedFunc) { url := c.getLeaderURL() if len(url) > 0 { if err := callback(url); err != nil { @@ -815,12 +815,12 @@ func (c *pdServiceDiscovery) SetTSOLeaderURLUpdatedCallback(callback tsoLeaderUR } // getLeaderURL returns the leader URL. -func (c *pdServiceDiscovery) getLeaderURL() string { +func (c *serviceDiscovery) getLeaderURL() string { return c.getLeaderServiceClient().GetURL() } // getFollowerURLs returns the follower URLs. -func (c *pdServiceDiscovery) getFollowerURLs() []string { +func (c *serviceDiscovery) getFollowerURLs() []string { followerURLs := c.followerURLs.Load() if followerURLs == nil { return []string{} @@ -828,7 +828,7 @@ func (c *pdServiceDiscovery) getFollowerURLs() []string { return followerURLs.([]string) } -func (c *pdServiceDiscovery) initClusterID() error { +func (c *serviceDiscovery) initClusterID() error { ctx, cancel := context.WithCancel(c.ctx) defer cancel() clusterID := uint64(0) @@ -855,7 +855,7 @@ func (c *pdServiceDiscovery) initClusterID() error { return nil } -func (c *pdServiceDiscovery) checkServiceModeChanged() error { +func (c *serviceDiscovery) checkServiceModeChanged() error { leaderURL := c.getLeaderURL() if len(leaderURL) == 0 { return errors.New("no leader found") @@ -883,7 +883,7 @@ func (c *pdServiceDiscovery) checkServiceModeChanged() error { return nil } -func (c *pdServiceDiscovery) updateMember() error { +func (c *serviceDiscovery) updateMember() error { for _, url := range c.GetServiceURLs() { members, err := c.getMembers(c.ctx, url, UpdateMemberTimeout) // Check the cluster ID. @@ -916,7 +916,7 @@ func (c *pdServiceDiscovery) updateMember() error { return errs.ErrClientGetMember.FastGenByArgs() } -func (c *pdServiceDiscovery) getClusterInfo(ctx context.Context, url string, timeout time.Duration) (*pdpb.GetClusterInfoResponse, error) { +func (c *serviceDiscovery) getClusterInfo(ctx context.Context, url string, timeout time.Duration) (*pdpb.GetClusterInfoResponse, error) { ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() cc, err := c.GetOrCreateGRPCConn(url) @@ -935,7 +935,7 @@ func (c *pdServiceDiscovery) getClusterInfo(ctx context.Context, url string, tim return clusterInfo, nil } -func (c *pdServiceDiscovery) getMembers(ctx context.Context, url string, timeout time.Duration) (*pdpb.GetMembersResponse, error) { +func (c *serviceDiscovery) getMembers(ctx context.Context, url string, timeout time.Duration) (*pdpb.GetMembersResponse, error) { ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() cc, err := c.GetOrCreateGRPCConn(url) @@ -954,7 +954,7 @@ func (c *pdServiceDiscovery) getMembers(ctx context.Context, url string, timeout return members, nil } -func (c *pdServiceDiscovery) updateURLs(members []*pdpb.Member) { +func (c *serviceDiscovery) updateURLs(members []*pdpb.Member) { urls := make([]string, 0, len(members)) for _, m := range members { urls = append(urls, m.GetClientUrls()...) @@ -974,7 +974,7 @@ func (c *pdServiceDiscovery) updateURLs(members []*pdpb.Member) { log.Info("[pd] update member urls", zap.Strings("old-urls", oldURLs), zap.Strings("new-urls", urls)) } -func (c *pdServiceDiscovery) switchLeader(url string) (bool, error) { +func (c *serviceDiscovery) switchLeader(url string) (bool, error) { oldLeader := c.getLeaderServiceClient() if url == oldLeader.GetURL() && oldLeader.GetClientConn() != nil { return false, nil @@ -999,10 +999,10 @@ func (c *pdServiceDiscovery) switchLeader(url string) (bool, error) { return true, err } -func (c *pdServiceDiscovery) updateFollowers(members []*pdpb.Member, leaderID uint64, leaderURL string) (changed bool) { - followers := make(map[string]*pdServiceClient) +func (c *serviceDiscovery) updateFollowers(members []*pdpb.Member, leaderID uint64, leaderURL string) (changed bool) { + followers := make(map[string]*serviceClient) c.followers.Range(func(key, value any) bool { - followers[key.(string)] = value.(*pdServiceClient) + followers[key.(string)] = value.(*serviceClient) return true }) var followerURLs []string @@ -1015,7 +1015,7 @@ func (c *pdServiceDiscovery) updateFollowers(members []*pdpb.Member, leaderID ui // FIXME: How to safely compare urls(also for leader)? For now, only allows one client url. url := tlsutil.PickMatchedURL(member.GetClientUrls(), c.tlsCfg) if client, ok := c.followers.Load(url); ok { - if client.(*pdServiceClient).GetClientConn() == nil { + if client.(*serviceClient).GetClientConn() == nil { conn, err := c.GetOrCreateGRPCConn(url) if err != nil || conn == nil { log.Warn("[pd] failed to connect follower", zap.String("follower", url), errs.ZapError(err)) @@ -1048,7 +1048,7 @@ func (c *pdServiceDiscovery) updateFollowers(members []*pdpb.Member, leaderID ui return } -func (c *pdServiceDiscovery) updateServiceClient(members []*pdpb.Member, leader *pdpb.Member) error { +func (c *serviceDiscovery) updateServiceClient(members []*pdpb.Member, leader *pdpb.Member) error { // FIXME: How to safely compare leader urls? For now, only allows one client url. leaderURL := tlsutil.PickMatchedURL(leader.GetClientUrls(), c.tlsCfg) leaderChanged, err := c.switchLeader(leaderURL) @@ -1064,7 +1064,7 @@ func (c *pdServiceDiscovery) updateServiceClient(members []*pdpb.Member, leader clients = append(clients, leaderClient) } c.followers.Range(func(_, value any) bool { - clients = append(clients, value.(*pdServiceClient)) + clients = append(clients, value.(*serviceClient)) return true }) c.all.Store(clients) @@ -1076,6 +1076,6 @@ func (c *pdServiceDiscovery) updateServiceClient(members []*pdpb.Member, leader } // GetOrCreateGRPCConn returns the corresponding grpc client connection of the given URL. -func (c *pdServiceDiscovery) GetOrCreateGRPCConn(url string) (*grpc.ClientConn, error) { +func (c *serviceDiscovery) GetOrCreateGRPCConn(url string) (*grpc.ClientConn, error) { return grpcutil.GetOrCreateGRPCConn(c.ctx, &c.clientConns, url, c.tlsCfg, c.option.GRPCDialOptions...) } diff --git a/client/servicediscovery/pd_service_discovery_test.go b/client/servicediscovery/service_discovery_test.go similarity index 96% rename from client/servicediscovery/pd_service_discovery_test.go rename to client/servicediscovery/service_discovery_test.go index dc0a0bd4511..0a678718fdc 100644 --- a/client/servicediscovery/pd_service_discovery_test.go +++ b/client/servicediscovery/service_discovery_test.go @@ -193,14 +193,14 @@ func (suite *serviceClientTestSuite) TestServiceClient() { re.True(leader.IsConnectedToLeader()) re.NoError(failpoint.Enable("github.com/tikv/pd/client/servicediscovery/unreachableNetwork1", "return(true)")) - follower.(*pdServiceClient).checkNetworkAvailable(suite.ctx) - leader.(*pdServiceClient).checkNetworkAvailable(suite.ctx) + follower.(*serviceClient).checkNetworkAvailable(suite.ctx) + leader.(*serviceClient).checkNetworkAvailable(suite.ctx) re.False(follower.Available()) re.False(leader.Available()) re.NoError(failpoint.Disable("github.com/tikv/pd/client/servicediscovery/unreachableNetwork1")) - follower.(*pdServiceClient).checkNetworkAvailable(suite.ctx) - leader.(*pdServiceClient).checkNetworkAvailable(suite.ctx) + follower.(*serviceClient).checkNetworkAvailable(suite.ctx) + leader.(*serviceClient).checkNetworkAvailable(suite.ctx) re.True(follower.Available()) re.True(leader.Available()) @@ -259,11 +259,11 @@ func (suite *serviceClientTestSuite) TestServiceClient() { re.False(leaderAPIClient.NeedRetry(pdErr2, nil)) re.False(followerAPIClient.Available()) re.True(leaderAPIClient.Available()) - followerAPIClient.(*pdServiceAPIClient).markAsAvailable() - leaderAPIClient.(*pdServiceAPIClient).markAsAvailable() + followerAPIClient.(*serviceAPIClient).markAsAvailable() + leaderAPIClient.(*serviceAPIClient).markAsAvailable() re.False(followerAPIClient.Available()) time.Sleep(time.Millisecond * 100) - followerAPIClient.(*pdServiceAPIClient).markAsAvailable() + followerAPIClient.(*serviceAPIClient).markAsAvailable() re.True(followerAPIClient.Available()) re.True(followerAPIClient.NeedRetry(nil, err)) @@ -278,7 +278,7 @@ func (suite *serviceClientTestSuite) TestServiceClientBalancer() { re := suite.Require() follower := suite.followerClient leader := suite.leaderClient - b := &pdServiceBalancer{} + b := &serviceBalancer{} b.set([]ServiceClient{leader, follower}) re.Equal(2, b.totalNode) @@ -400,7 +400,7 @@ func TestUpdateURLs(t *testing.T) { } return } - cli := &pdServiceDiscovery{option: opt.NewOption()} + cli := &serviceDiscovery{option: opt.NewOption()} cli.urls.Store([]string{}) cli.updateURLs(members[1:]) re.Equal(getURLs([]*pdpb.Member{members[1], members[3], members[2]}), cli.GetServiceURLs()) @@ -421,7 +421,7 @@ func TestGRPCDialOption(t *testing.T) { start := time.Now() ctx, cancel := context.WithTimeout(context.TODO(), 500*time.Millisecond) defer cancel() - cli := &pdServiceDiscovery{ + cli := &serviceDiscovery{ checkMembershipCh: make(chan struct{}, 1), ctx: ctx, cancel: cancel, diff --git a/client/servicediscovery/tso_service_discovery.go b/client/servicediscovery/tso_service_discovery.go index 1d2130db804..7734fd23107 100644 --- a/client/servicediscovery/tso_service_discovery.go +++ b/client/servicediscovery/tso_service_discovery.go @@ -126,10 +126,10 @@ func (t *tsoServerDiscovery) resetFailure() { // tsoServiceDiscovery is the service discovery client of the independent TSO service type tsoServiceDiscovery struct { - metacli metastorage.Client - apiSvcDiscovery ServiceDiscovery - clusterID uint64 - keyspaceID atomic.Uint32 + metacli metastorage.Client + serviceDiscovery ServiceDiscovery + clusterID uint64 + keyspaceID atomic.Uint32 // defaultDiscoveryKey is the etcd path used for discovering the serving endpoints of // the default keyspace group @@ -161,7 +161,7 @@ type tsoServiceDiscovery struct { // NewTSOServiceDiscovery returns a new client-side service discovery for the independent TSO service. func NewTSOServiceDiscovery( - ctx context.Context, metacli metastorage.Client, apiSvcDiscovery ServiceDiscovery, + ctx context.Context, metacli metastorage.Client, serviceDiscovery ServiceDiscovery, keyspaceID uint32, tlsCfg *tls.Config, option *opt.Option, ) ServiceDiscovery { ctx, cancel := context.WithCancel(ctx) @@ -169,8 +169,8 @@ func NewTSOServiceDiscovery( ctx: ctx, cancel: cancel, metacli: metacli, - apiSvcDiscovery: apiSvcDiscovery, - clusterID: apiSvcDiscovery.GetClusterID(), + serviceDiscovery: serviceDiscovery, + clusterID: serviceDiscovery.GetClusterID(), tlsCfg: tlsCfg, option: option, checkMembershipCh: make(chan struct{}, 1), @@ -351,7 +351,7 @@ func (c *tsoServiceDiscovery) ScheduleCheckMemberChanged() { // CheckMemberChanged Immediately check if there is any membership change among the primary/secondaries in // a primary/secondary configured cluster. func (c *tsoServiceDiscovery) CheckMemberChanged() error { - if err := c.apiSvcDiscovery.CheckMemberChanged(); err != nil { + if err := c.serviceDiscovery.CheckMemberChanged(); err != nil { log.Warn("[tso] failed to check member changed", errs.ZapError(err)) } if err := c.retry(tsoQueryRetryMaxTimes, tsoQueryRetryInterval, c.updateMember); err != nil { @@ -382,17 +382,17 @@ func (c *tsoServiceDiscovery) SetTSOLeaderURLUpdatedCallback(callback tsoLeaderU // GetServiceClient implements ServiceDiscovery func (c *tsoServiceDiscovery) GetServiceClient() ServiceClient { - return c.apiSvcDiscovery.GetServiceClient() + return c.serviceDiscovery.GetServiceClient() } // GetServiceClientByKind implements ServiceDiscovery func (c *tsoServiceDiscovery) GetServiceClientByKind(kind APIKind) ServiceClient { - return c.apiSvcDiscovery.GetServiceClientByKind(kind) + return c.serviceDiscovery.GetServiceClientByKind(kind) } // GetAllServiceClients implements ServiceDiscovery func (c *tsoServiceDiscovery) GetAllServiceClients() []ServiceClient { - return c.apiSvcDiscovery.GetAllServiceClients() + return c.serviceDiscovery.GetAllServiceClients() } // getPrimaryURL returns the primary URL. @@ -425,7 +425,7 @@ func (c *tsoServiceDiscovery) afterPrimarySwitched(oldPrimary, newPrimary string func (c *tsoServiceDiscovery) updateMember() error { // The keyspace membership or the primary serving URL of the keyspace group, to which this // keyspace belongs, might have been changed. We need to query tso servers to get the latest info. - tsoServerURL, err := c.getTSOServer(c.apiSvcDiscovery) + tsoServerURL, err := c.getTSOServer(c.serviceDiscovery) if err != nil { log.Error("[tso] failed to get tso server", errs.ZapError(err)) return err @@ -589,7 +589,7 @@ func (c *tsoServiceDiscovery) getTSOServer(sd ServiceDiscovery) (string, error) ) t := c.tsoServerDiscovery if len(t.urls) == 0 || t.failureCount == len(t.urls) { - urls, err = sd.(*pdServiceDiscovery).discoverMicroservice(tsoService) + urls, err = sd.(*serviceDiscovery).discoverMicroservice(tsoService) if err != nil { return "", err } diff --git a/cmd/pd-server/main.go b/cmd/pd-server/main.go index 165bcd2a12f..24ca46e7d5e 100644 --- a/cmd/pd-server/main.go +++ b/cmd/pd-server/main.go @@ -82,7 +82,7 @@ func NewServiceCommand() *cobra.Command { } cmd.AddCommand(NewTSOServiceCommand()) cmd.AddCommand(NewSchedulingServiceCommand()) - cmd.AddCommand(NewAPIServiceCommand()) + cmd.AddCommand(NewPDServiceCommand()) return cmd } @@ -128,12 +128,12 @@ func NewSchedulingServiceCommand() *cobra.Command { return cmd } -// NewAPIServiceCommand returns the API service command. -func NewAPIServiceCommand() *cobra.Command { +// NewPDServiceCommand returns the PD service command. +func NewPDServiceCommand() *cobra.Command { cmd := &cobra.Command{ Use: apiMode, - Short: "Run the API service", - Run: createAPIServerWrapper, + Short: "Run the PD service", + Run: createPDServiceWrapper, } addFlags(cmd) return cmd @@ -160,7 +160,7 @@ func addFlags(cmd *cobra.Command) { cmd.Flags().BoolP("force-new-cluster", "", false, "force to create a new one-member cluster") } -func createAPIServerWrapper(cmd *cobra.Command, args []string) { +func createPDServiceWrapper(cmd *cobra.Command, args []string) { start(cmd, args, cmd.CalledAs()) } @@ -219,7 +219,7 @@ func start(cmd *cobra.Command, args []string, services ...string) { defer log.Sync() memory.InitMemoryHook() if len(services) != 0 { - versioninfo.Log(server.APIServiceMode) + versioninfo.Log(server.PDServiceMode) } else { versioninfo.Log(server.PDMode) } diff --git a/pkg/mcs/registry/registry.go b/pkg/mcs/registry/registry.go index 6a01f091e52..2ffa04b1bf9 100644 --- a/pkg/mcs/registry/registry.go +++ b/pkg/mcs/registry/registry.go @@ -85,18 +85,18 @@ func (r *ServiceRegistry) InstallAllRESTHandler(srv bs.Server, h map[string]http serviceName := createServiceName(prefix, name) if l, ok := r.services[serviceName]; ok { if err := l.RegisterRESTHandler(h); err != nil { - log.Error("register restful API service failed", zap.String("prefix", prefix), zap.String("service-name", name), zap.Error(err)) + log.Error("register restful PD service failed", zap.String("prefix", prefix), zap.String("service-name", name), zap.Error(err)) } else { - log.Info("restful API service already registered", zap.String("prefix", prefix), zap.String("service-name", name)) + log.Info("restful PD service already registered", zap.String("prefix", prefix), zap.String("service-name", name)) } continue } l := builder(srv) r.services[serviceName] = l if err := l.RegisterRESTHandler(h); err != nil { - log.Error("register restful API service failed", zap.String("prefix", prefix), zap.String("service-name", name), zap.Error(err)) + log.Error("register restful PD service failed", zap.String("prefix", prefix), zap.String("service-name", name), zap.Error(err)) } else { - log.Info("restful API service registered successfully", zap.String("prefix", prefix), zap.String("service-name", name)) + log.Info("restful PD service registered successfully", zap.String("prefix", prefix), zap.String("service-name", name)) } } } diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index 5c7166fba09..6f80572673c 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -55,7 +55,7 @@ type Cluster struct { storage storage.Storage coordinator *schedule.Coordinator checkMembershipCh chan struct{} - apiServerLeader atomic.Value + pdLeader atomic.Value running atomic.Bool // heartbeatRunner is used to process the subtree update task asynchronously. @@ -227,7 +227,7 @@ func (c *Cluster) GetStoreConfig() sc.StoreConfigProvider { return c.persistConf // AllocID allocates a new ID. func (c *Cluster) AllocID() (uint64, error) { - client, err := c.getAPIServerLeaderClient() + client, err := c.getPDLeaderClient() if err != nil { return 0, err } @@ -241,11 +241,11 @@ func (c *Cluster) AllocID() (uint64, error) { return resp.GetId(), nil } -func (c *Cluster) getAPIServerLeaderClient() (pdpb.PDClient, error) { - cli := c.apiServerLeader.Load() +func (c *Cluster) getPDLeaderClient() (pdpb.PDClient, error) { + cli := c.pdLeader.Load() if cli == nil { c.triggerMembershipCheck() - return nil, errors.New("API server leader is not found") + return nil, errors.New("PD leader is not found") } return cli.(pdpb.PDClient), nil } @@ -257,10 +257,10 @@ func (c *Cluster) triggerMembershipCheck() { } } -// SwitchAPIServerLeader switches the API server leader. -func (c *Cluster) SwitchAPIServerLeader(new pdpb.PDClient) bool { - old := c.apiServerLeader.Load() - return c.apiServerLeader.CompareAndSwap(old, new) +// SwitchPDServiceLeader switches the PD service leader. +func (c *Cluster) SwitchPDServiceLeader(new pdpb.PDClient) bool { + old := c.pdLeader.Load() + return c.pdLeader.CompareAndSwap(old, new) } func trySend(notifier chan struct{}) { diff --git a/pkg/mcs/scheduling/server/config/config.go b/pkg/mcs/scheduling/server/config/config.go index 784d1f45a82..413a6c601cc 100644 --- a/pkg/mcs/scheduling/server/config/config.go +++ b/pkg/mcs/scheduling/server/config/config.go @@ -243,7 +243,7 @@ func NewPersistConfig(cfg *Config, ttl *cache.TTLString) *PersistConfig { o.SetClusterVersion(&cfg.ClusterVersion) o.schedule.Store(&cfg.Schedule) o.replication.Store(&cfg.Replication) - // storeConfig will be fetched from TiKV by PD API server, + // storeConfig will be fetched from TiKV by PD service, // so we just set an empty value here first. o.storeConfig.Store(&sc.StoreConfig{}) o.ttl = ttl @@ -748,11 +748,11 @@ func (o *PersistConfig) IsRaftKV2() bool { // TODO: implement the following methods // AddSchedulerCfg adds the scheduler configurations. -// This method is a no-op since we only use configurations derived from one-way synchronization from API server now. +// This method is a no-op since we only use configurations derived from one-way synchronization from PD service now. func (*PersistConfig) AddSchedulerCfg(types.CheckerSchedulerType, []string) {} // RemoveSchedulerCfg removes the scheduler configurations. -// This method is a no-op since we only use configurations derived from one-way synchronization from API server now. +// This method is a no-op since we only use configurations derived from one-way synchronization from PD service now. func (*PersistConfig) RemoveSchedulerCfg(types.CheckerSchedulerType) {} // CheckLabelProperty checks if the label property is satisfied. diff --git a/pkg/mcs/scheduling/server/config/watcher.go b/pkg/mcs/scheduling/server/config/watcher.go index f499a0d7d50..9db2d47d0f4 100644 --- a/pkg/mcs/scheduling/server/config/watcher.go +++ b/pkg/mcs/scheduling/server/config/watcher.go @@ -36,7 +36,7 @@ import ( "github.com/tikv/pd/pkg/utils/keypath" ) -// Watcher is used to watch the PD API server for any configuration changes. +// Watcher is used to watch the PD service for any configuration changes. type Watcher struct { wg sync.WaitGroup ctx context.Context @@ -76,7 +76,7 @@ type persistedConfig struct { Store sc.StoreConfig `json:"store"` } -// NewWatcher creates a new watcher to watch the config meta change from PD API server. +// NewWatcher creates a new watcher to watch the config meta change from PD service. func NewWatcher( ctx context.Context, etcdClient *clientv3.Client, diff --git a/pkg/mcs/scheduling/server/grpc_service.go b/pkg/mcs/scheduling/server/grpc_service.go index 3d1183bf734..bd2cc40c21d 100644 --- a/pkg/mcs/scheduling/server/grpc_service.go +++ b/pkg/mcs/scheduling/server/grpc_service.go @@ -159,7 +159,7 @@ func (s *Service) RegionHeartbeat(stream schedulingpb.Scheduling_RegionHeartbeat region := core.RegionFromHeartbeat(request, 0) err = c.HandleRegionHeartbeat(region) if err != nil { - // TODO: if we need to send the error back to API server. + // TODO: if we need to send the error back to PD service. log.Error("failed handle region heartbeat", zap.Error(err)) continue } diff --git a/pkg/mcs/scheduling/server/meta/watcher.go b/pkg/mcs/scheduling/server/meta/watcher.go index 27fe6687f3d..c51f10027d7 100644 --- a/pkg/mcs/scheduling/server/meta/watcher.go +++ b/pkg/mcs/scheduling/server/meta/watcher.go @@ -33,7 +33,7 @@ import ( "github.com/tikv/pd/pkg/utils/keypath" ) -// Watcher is used to watch the PD API server for any meta changes. +// Watcher is used to watch the PD service for any meta changes. type Watcher struct { wg sync.WaitGroup ctx context.Context @@ -48,7 +48,7 @@ type Watcher struct { storeWatcher *etcdutil.LoopWatcher } -// NewWatcher creates a new watcher to watch the meta change from PD API server. +// NewWatcher creates a new watcher to watch the meta change from PD service. func NewWatcher( ctx context.Context, etcdClient *clientv3.Client, diff --git a/pkg/mcs/scheduling/server/rule/watcher.go b/pkg/mcs/scheduling/server/rule/watcher.go index cc6480a0cb4..014a3abc2be 100644 --- a/pkg/mcs/scheduling/server/rule/watcher.go +++ b/pkg/mcs/scheduling/server/rule/watcher.go @@ -34,7 +34,7 @@ import ( "github.com/tikv/pd/pkg/utils/keypath" ) -// Watcher is used to watch the PD API server for any Placement Rule changes. +// Watcher is used to watch the PD service for any Placement Rule changes. type Watcher struct { ctx context.Context cancel context.CancelFunc @@ -74,7 +74,7 @@ type Watcher struct { patch *placement.RuleConfigPatch } -// NewWatcher creates a new watcher to watch the Placement Rule change from PD API server. +// NewWatcher creates a new watcher to watch the Placement Rule change from PD service. func NewWatcher( ctx context.Context, etcdClient *clientv3.Client, diff --git a/pkg/mcs/scheduling/server/server.go b/pkg/mcs/scheduling/server/server.go index 8c9972d5eec..80156c1e26b 100644 --- a/pkg/mcs/scheduling/server/server.go +++ b/pkg/mcs/scheduling/server/server.go @@ -110,7 +110,7 @@ type Server struct { hbStreams *hbstream.HeartbeatStreams storage *endpoint.StorageEndpoint - // for watching the PD API server meta info updates that are related to the scheduling. + // for watching the PD service meta info updates that are related to the scheduling. configWatcher *config.Watcher ruleWatcher *rule.Watcher metaWatcher *meta.Watcher @@ -169,10 +169,10 @@ func (s *Server) startServerLoop() { s.serverLoopCtx, s.serverLoopCancel = context.WithCancel(s.Context()) s.serverLoopWg.Add(2) go s.primaryElectionLoop() - go s.updateAPIServerMemberLoop() + go s.updatePDServiceMemberLoop() } -func (s *Server) updateAPIServerMemberLoop() { +func (s *Server) updatePDServiceMemberLoop() { defer logutil.LogPanic() defer s.serverLoopWg.Done() @@ -220,7 +220,7 @@ func (s *Server) updateAPIServerMemberLoop() { // double check break } - if s.cluster.SwitchAPIServerLeader(pdpb.NewPDClient(cc)) { + if s.cluster.SwitchPDServiceLeader(pdpb.NewPDClient(cc)) { if status.Leader != curLeader { log.Info("switch leader", zap.String("leader-id", fmt.Sprintf("%x", ep.ID)), zap.String("endpoint", ep.ClientURLs[0])) } diff --git a/pkg/mcs/utils/constant/constant.go b/pkg/mcs/utils/constant/constant.go index 87fcf29f678..6f684bdb977 100644 --- a/pkg/mcs/utils/constant/constant.go +++ b/pkg/mcs/utils/constant/constant.go @@ -57,8 +57,8 @@ const ( // MicroserviceRootPath is the root path of microservice in etcd. MicroserviceRootPath = "/ms" - // APIServiceName is the name of api server. - APIServiceName = "api" + // PDServiceName is the name of pd server. + PDServiceName = "pd" // TSOServiceName is the name of tso server. TSOServiceName = "tso" // SchedulingServiceName is the name of scheduling server. diff --git a/pkg/member/election_leader.go b/pkg/member/election_leader.go index 81afc5dbd0a..2e5769d7dc4 100644 --- a/pkg/member/election_leader.go +++ b/pkg/member/election_leader.go @@ -21,7 +21,7 @@ import ( ) // ElectionLeader defines the common interface of the leader, which is the pdpb.Member -// for in PD/API service or the tsopb.Participant in the micro services. +// for in PD/PD service or the tsopb.Participant in the micro services. type ElectionLeader interface { // GetListenUrls returns the listen urls GetListenUrls() []string diff --git a/pkg/schedule/coordinator.go b/pkg/schedule/coordinator.go index e792560cb37..80299bf1e25 100644 --- a/pkg/schedule/coordinator.go +++ b/pkg/schedule/coordinator.go @@ -389,7 +389,7 @@ func (c *Coordinator) LoadPlugin(pluginPath string, ch chan string) { return } log.Info("create scheduler", zap.String("scheduler-name", s.GetName())) - // TODO: handle the plugin in API service mode. + // TODO: handle the plugin in PD service mode. if err = c.schedulers.AddScheduler(s); err != nil { log.Error("can't add scheduler", zap.String("scheduler-name", s.GetName()), errs.ZapError(err)) return diff --git a/pkg/schedule/schedulers/scheduler_controller.go b/pkg/schedule/schedulers/scheduler_controller.go index 28973631570..5f461d326c5 100644 --- a/pkg/schedule/schedulers/scheduler_controller.go +++ b/pkg/schedule/schedulers/scheduler_controller.go @@ -55,7 +55,7 @@ type Controller struct { // and used in the PD leader service mode now. schedulers map[string]*ScheduleController // schedulerHandlers is used to manage the HTTP handlers of schedulers, - // which will only be initialized and used in the API service mode now. + // which will only be initialized and used in the PD service mode now. schedulerHandlers map[string]http.Handler opController *operator.Controller } diff --git a/pkg/tso/keyspace_group_manager.go b/pkg/tso/keyspace_group_manager.go index 149c68029be..9793939fa17 100644 --- a/pkg/tso/keyspace_group_manager.go +++ b/pkg/tso/keyspace_group_manager.go @@ -334,7 +334,7 @@ type KeyspaceGroupManager struct { // Value: discover.ServiceRegistryEntry tsoServiceKey string // legacySvcRootPath defines the legacy root path for all etcd paths which derives from - // the PD/API service. It's in the format of "/pd/{cluster_id}". + // the PD/PD service. It's in the format of "/pd/{cluster_id}". // The main paths for different usages include: // 1. The path, used by the default keyspace group, for LoadTimestamp/SaveTimestamp in the // storage endpoint. diff --git a/pkg/utils/apiutil/serverapi/middleware.go b/pkg/utils/apiutil/serverapi/middleware.go index 85b958a5554..823deed64ea 100644 --- a/pkg/utils/apiutil/serverapi/middleware.go +++ b/pkg/utils/apiutil/serverapi/middleware.go @@ -116,7 +116,7 @@ func MicroserviceRedirectRule(matchPath, targetPath, targetServiceName string, } func (h *redirector) matchMicroServiceRedirectRules(r *http.Request) (bool, string) { - if !h.s.IsAPIServiceMode() { + if !h.s.IsPDServiceMode() { return false, "" } if len(h.microserviceRedirectRules) == 0 { @@ -223,7 +223,7 @@ func (h *redirector) ServeHTTP(w http.ResponseWriter, r *http.Request, next http clientUrls = leader.GetClientUrls() r.Header.Set(apiutil.PDRedirectorHeader, h.s.Name()) } else { - // Prevent more than one redirection among PD/API servers. + // Prevent more than one redirection among PD/PD service. log.Error("redirect but server is not leader", zap.String("from", name), zap.String("server", h.s.Name()), errs.ZapError(errs.ErrRedirectToNotLeader)) http.Error(w, errs.ErrRedirectToNotLeader.FastGenByArgs().Error(), http.StatusInternalServerError) return diff --git a/server/api/admin.go b/server/api/admin.go index d2be53cf40e..561f4ec4bff 100644 --- a/server/api/admin.go +++ b/server/api/admin.go @@ -254,5 +254,5 @@ func (h *adminHandler) deleteRegionCacheInSchedulingServer(id ...uint64) error { } func buildMsg(err error) string { - return fmt.Sprintf("This operation was executed in API server but needs to be re-executed on scheduling server due to the following error: %s", err.Error()) + return fmt.Sprintf("This operation was executed in PD service but needs to be re-executed on scheduling server due to the following error: %s", err.Error()) } diff --git a/server/api/server.go b/server/api/server.go index 1a744635e2d..8e352b6a36e 100644 --- a/server/api/server.go +++ b/server/api/server.go @@ -63,7 +63,7 @@ func NewHandler(_ context.Context, svr *server.Server) (http.Handler, apiutil.AP // Following requests are **not** redirected: // "/schedulers", http.MethodPost // "/schedulers/{name}", http.MethodDelete - // Because the writing of all the config of the scheduling service is in the API server, + // Because the writing of all the config of the scheduling service is in the PD service, // we should not post and delete the scheduler directly in the scheduling service. router.PathPrefix(apiPrefix).Handler(negroni.New( serverapi.NewRuntimeServiceValidator(svr, group), @@ -153,7 +153,7 @@ func NewHandler(_ context.Context, svr *server.Server) (http.Handler, apiutil.AP scheapi.APIPathPrefix+"/config/placement-rule", constant.SchedulingServiceName, []string{http.MethodGet}), - // because the writing of all the meta information of the scheduling service is in the API server, + // because the writing of all the meta information of the scheduling service is in the PD service, // we should not post and delete the scheduler directly in the scheduling service. serverapi.MicroserviceRedirectRule( prefix+"/schedulers", diff --git a/server/apiv2/handlers/micro_service.go b/server/apiv2/handlers/micro_service.go index c7fa0dc94f2..b4d3d6bbe89 100644 --- a/server/apiv2/handlers/micro_service.go +++ b/server/apiv2/handlers/micro_service.go @@ -39,7 +39,7 @@ func RegisterMicroService(r *gin.RouterGroup) { // @Router /ms/members/{service} [get] func GetMembers(c *gin.Context) { svr := c.MustGet(middlewares.ServerContextKey).(*server.Server) - if !svr.IsAPIServiceMode() { + if !svr.IsPDServiceMode() { c.AbortWithStatusJSON(http.StatusNotFound, "not support micro service") return } @@ -65,7 +65,7 @@ func GetMembers(c *gin.Context) { // @Router /ms/primary/{service} [get] func GetPrimary(c *gin.Context) { svr := c.MustGet(middlewares.ServerContextKey).(*server.Server) - if !svr.IsAPIServiceMode() { + if !svr.IsPDServiceMode() { c.AbortWithStatusJSON(http.StatusNotFound, "not support micro service") return } diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index d2f3855d14e..e482b7b8b68 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -131,7 +131,7 @@ type Server interface { GetMembers() ([]*pdpb.Member, error) ReplicateFileToMember(ctx context.Context, member *pdpb.Member, name string, data []byte) error GetKeyspaceGroupManager() *keyspace.GroupManager - IsAPIServiceMode() bool + IsPDServiceMode() bool GetSafePointV2Manager() *gc.SafePointV2Manager } @@ -156,12 +156,12 @@ type RaftCluster struct { etcdClient *clientv3.Client httpClient *http.Client - running bool - isAPIServiceMode bool - meta *metapb.Cluster - storage storage.Storage - minResolvedTS atomic.Value // Store as uint64 - externalTS atomic.Value // Store as uint64 + running bool + isPDServiceMode bool + meta *metapb.Cluster + storage storage.Storage + minResolvedTS atomic.Value // Store as uint64 + externalTS atomic.Value // Store as uint64 // Keep the previous store limit settings when removing a store. prevStoreLimit map[uint64]map[storelimit.Type]float64 @@ -325,7 +325,7 @@ func (c *RaftCluster) Start(s Server, bootstrap bool) (err error) { log.Warn("raft cluster has already been started") return nil } - c.isAPIServiceMode = s.IsAPIServiceMode() + c.isPDServiceMode = s.IsPDServiceMode() err = c.InitCluster(s.GetAllocator(), s.GetPersistOptions(), s.GetHBStreams(), s.GetKeyspaceGroupManager()) if err != nil { return err @@ -376,7 +376,7 @@ func (c *RaftCluster) Start(s Server, bootstrap bool) (err error) { c.loadExternalTS() c.loadMinResolvedTS() - if c.isAPIServiceMode { + if c.isPDServiceMode { // bootstrap keyspace group manager after starting other parts successfully. // This order avoids a stuck goroutine in keyspaceGroupManager when it fails to create raftcluster. err = c.keyspaceGroupManager.Bootstrap(c.ctx) @@ -404,7 +404,7 @@ func (c *RaftCluster) Start(s Server, bootstrap bool) (err error) { } func (c *RaftCluster) checkSchedulingService() { - if c.isAPIServiceMode { + if c.isPDServiceMode { servers, err := discovery.Discover(c.etcdClient, constant.SchedulingServiceName) if c.opt.GetMicroServiceConfig().IsSchedulingFallbackEnabled() && (err != nil || len(servers) == 0) { c.startSchedulingJobs(c, c.hbstreams) @@ -425,7 +425,7 @@ func (c *RaftCluster) checkSchedulingService() { // checkTSOService checks the TSO service. func (c *RaftCluster) checkTSOService() { - if c.isAPIServiceMode { + if c.isPDServiceMode { if c.opt.GetMicroServiceConfig().IsTSODynamicSwitchingEnabled() { servers, err := discovery.Discover(c.etcdClient, constant.TSOServiceName) if err != nil || len(servers) == 0 { diff --git a/server/config/config.go b/server/config/config.go index 282b5264fe9..69cd76409bc 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -860,7 +860,7 @@ func (c *MicroServiceConfig) Clone() *MicroServiceConfig { return &cfg } -// IsSchedulingFallbackEnabled returns whether to enable scheduling service fallback to api service. +// IsSchedulingFallbackEnabled returns whether to enable scheduling service fallback to PD service. func (c *MicroServiceConfig) IsSchedulingFallbackEnabled() bool { return c.EnableSchedulingFallback } diff --git a/server/grpc_service.go b/server/grpc_service.go index d3fc5c58d7f..118b3d84748 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -271,8 +271,8 @@ func (s *GrpcServer) GetClusterInfo(context.Context, *pdpb.GetClusterInfoRequest }, nil } -// GetMinTS implements gRPC PDServer. In PD service mode, it simply returns a timestamp. -// In API service mode, it queries all tso servers and gets the minimum timestamp across +// GetMinTS implements gRPC PDServer. In PD mode, it simply returns a timestamp. +// In PD service mode, it queries all tso servers and gets the minimum timestamp across // all keyspace groups. func (s *GrpcServer) GetMinTS( ctx context.Context, request *pdpb.GetMinTSRequest, diff --git a/server/server.go b/server/server.go index 94250128fe3..8de2a5c57f3 100644 --- a/server/server.go +++ b/server/server.go @@ -101,8 +101,8 @@ const ( // PDMode represents that server is in PD mode. PDMode = "PD" - // APIServiceMode represents that server is in API service mode. - APIServiceMode = "API Service" + // PDServiceMode represents that server is in PD service mode which is in microservice architecture. + PDServiceMode = "PD Service" // maxRetryTimesGetServicePrimary is the max retry times for getting primary addr. // Note: it need to be less than client.defaultPDTimeout @@ -243,7 +243,7 @@ type HandlerBuilder func(context.Context, *Server) (http.Handler, apiutil.APISer func CreateServer(ctx context.Context, cfg *config.Config, services []string, legacyServiceBuilders ...HandlerBuilder) (*Server, error) { var mode string if len(services) != 0 { - mode = APIServiceMode + mode = PDServiceMode } else { mode = PDMode } @@ -478,7 +478,7 @@ func (s *Server) startServer(ctx context.Context) error { Member: s.member.MemberValue(), Step: keyspace.AllocStep, }) - if s.IsAPIServiceMode() { + if s.IsPDServiceMode() { s.keyspaceGroupManager = keyspace.NewKeyspaceGroupManager(s.ctx, s.storage, s.client) } s.keyspaceManager = keyspace.NewKeyspaceManager(s.ctx, s.storage, s.cluster, keyspaceIDAllocator, &s.cfg.Keyspace, s.keyspaceGroupManager) @@ -530,7 +530,7 @@ func (s *Server) Close() { s.cgMonitor.StopMonitor() s.stopServerLoop() - if s.IsAPIServiceMode() { + if s.IsPDServiceMode() { s.keyspaceGroupManager.Close() } @@ -641,7 +641,7 @@ func (s *Server) startServerLoop(ctx context.Context) { go s.etcdLeaderLoop() go s.serverMetricsLoop() go s.encryptionKeyManagerLoop() - if s.IsAPIServiceMode() { + if s.IsPDServiceMode() { s.initTSOPrimaryWatcher() s.initSchedulingPrimaryWatcher() } @@ -788,9 +788,9 @@ func (s *Server) stopRaftCluster() { s.cluster.Stop() } -// IsAPIServiceMode return whether the server is in API service mode. -func (s *Server) IsAPIServiceMode() bool { - return s.mode == APIServiceMode +// IsPDServiceMode return whether the server is in PD service mode. +func (s *Server) IsPDServiceMode() bool { + return s.mode == PDServiceMode } // GetAddr returns the server urls for clients. @@ -1390,7 +1390,7 @@ func (s *Server) GetRaftCluster() *cluster.RaftCluster { // IsServiceIndependent returns whether the service is independent. func (s *Server) IsServiceIndependent(name string) bool { - if s.mode == APIServiceMode && !s.IsClosed() { + if s.mode == PDServiceMode && !s.IsClosed() { if name == constant.TSOServiceName && !s.GetMicroServiceConfig().IsTSODynamicSwitchingEnabled() { return true } @@ -1667,7 +1667,7 @@ func (s *Server) campaignLeader() { log.Info(fmt.Sprintf("start to campaign %s leader", s.mode), zap.String("campaign-leader-name", s.Name())) if err := s.member.CampaignLeader(s.ctx, s.cfg.LeaderLease); err != nil { if err.Error() == errs.ErrEtcdTxnConflict.Error() { - log.Info(fmt.Sprintf("campaign %s leader meets error due to txn conflict, another PD/API server may campaign successfully", s.mode), + log.Info(fmt.Sprintf("campaign %s leader meets error due to txn conflict, another PD/PD service may campaign successfully", s.mode), zap.String("campaign-leader-name", s.Name())) } else { log.Error(fmt.Sprintf("campaign %s leader meets error due to etcd error", s.mode), diff --git a/server/server_test.go b/server/server_test.go index 23da2078cb2..28839b89389 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -266,13 +266,13 @@ func TestAPIService(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() mockHandler := CreateMockHandler(re, "127.0.0.1") - svr, err := CreateServer(ctx, cfg, []string{constant.APIServiceName}, mockHandler) + svr, err := CreateServer(ctx, cfg, []string{constant.PDServiceName}, mockHandler) re.NoError(err) defer svr.Close() err = svr.Run() re.NoError(err) MustWaitLeader(re, []*Server{svr}) - re.True(svr.IsAPIServiceMode()) + re.True(svr.IsPDServiceMode()) } func TestIsPathInDirectory(t *testing.T) { diff --git a/tests/cluster.go b/tests/cluster.go index a4f445155e1..4189b43902a 100644 --- a/tests/cluster.go +++ b/tests/cluster.go @@ -79,16 +79,7 @@ type TestServer struct { var zapLogOnce sync.Once // NewTestServer creates a new TestServer. -func NewTestServer(ctx context.Context, cfg *config.Config) (*TestServer, error) { - return createTestServer(ctx, cfg, nil) -} - -// NewTestAPIServer creates a new TestServer. -func NewTestAPIServer(ctx context.Context, cfg *config.Config) (*TestServer, error) { - return createTestServer(ctx, cfg, []string{constant.APIServiceName}) -} - -func createTestServer(ctx context.Context, cfg *config.Config, services []string) (*TestServer, error) { +func NewTestServer(ctx context.Context, cfg *config.Config, services []string) (*TestServer, error) { // disable the heartbeat async runner in test cfg.Schedule.EnableHeartbeatConcurrentRunner = false err := logutil.SetupLogger(cfg.Log, &cfg.Logger, &cfg.LogProps, cfg.Security.RedactInfoLog) @@ -435,15 +426,15 @@ type ConfigOption func(conf *config.Config, serverName string) // NewTestCluster creates a new TestCluster. func NewTestCluster(ctx context.Context, initialServerCount int, opts ...ConfigOption) (*TestCluster, error) { - return createTestCluster(ctx, initialServerCount, false, opts...) + return createTestCluster(ctx, initialServerCount, nil, opts...) } -// NewTestAPICluster creates a new TestCluster with API service. -func NewTestAPICluster(ctx context.Context, initialServerCount int, opts ...ConfigOption) (*TestCluster, error) { - return createTestCluster(ctx, initialServerCount, true, opts...) +// NewTestPDServiceCluster creates a new TestCluster with PD service. +func NewTestPDServiceCluster(ctx context.Context, initialServerCount int, opts ...ConfigOption) (*TestCluster, error) { + return createTestCluster(ctx, initialServerCount, []string{constant.PDServiceName}, opts...) } -func createTestCluster(ctx context.Context, initialServerCount int, isAPIServiceMode bool, opts ...ConfigOption) (*TestCluster, error) { +func createTestCluster(ctx context.Context, initialServerCount int, services []string, opts ...ConfigOption) (*TestCluster, error) { schedulers.Register() config := newClusterConfig(initialServerCount) servers := make(map[string]*TestServer) @@ -452,12 +443,7 @@ func createTestCluster(ctx context.Context, initialServerCount int, isAPIService if err != nil { return nil, err } - var s *TestServer - if isAPIServiceMode { - s, err = NewTestAPIServer(ctx, serverConf) - } else { - s, err = NewTestServer(ctx, serverConf) - } + s, err := NewTestServer(ctx, serverConf, services) if err != nil { return nil, err } @@ -481,7 +467,7 @@ func RestartTestAPICluster(ctx context.Context, cluster *TestCluster) (*TestClus } func restartTestCluster( - ctx context.Context, cluster *TestCluster, isAPIServiceMode bool, + ctx context.Context, cluster *TestCluster, isPDServiceMode bool, ) (newTestCluster *TestCluster, err error) { schedulers.Register() newTestCluster = &TestCluster{ @@ -508,10 +494,10 @@ func restartTestCluster( newServer *TestServer serverErr error ) - if isAPIServiceMode { - newServer, serverErr = NewTestAPIServer(ctx, serverCfg) + if isPDServiceMode { + newServer, serverErr = NewTestServer(ctx, serverCfg, []string{constant.PDServiceName}) } else { - newServer, serverErr = NewTestServer(ctx, serverCfg) + newServer, serverErr = NewTestServer(ctx, serverCfg, nil) } serverMap.Store(serverName, newServer) errorMap.Store(serverName, serverErr) @@ -735,7 +721,7 @@ func (c *TestCluster) Join(ctx context.Context, opts ...ConfigOption) (*TestServ if err != nil { return nil, err } - s, err := NewTestServer(ctx, conf) + s, err := NewTestServer(ctx, conf, nil) if err != nil { return nil, err } @@ -743,13 +729,13 @@ func (c *TestCluster) Join(ctx context.Context, opts ...ConfigOption) (*TestServ return s, nil } -// JoinAPIServer is used to add a new TestAPIServer into the cluster. -func (c *TestCluster) JoinAPIServer(ctx context.Context, opts ...ConfigOption) (*TestServer, error) { +// JoinPDServer is used to add a new TestServer into the cluster. +func (c *TestCluster) JoinPDServer(ctx context.Context, opts ...ConfigOption) (*TestServer, error) { conf, err := c.config.join().Generate(opts...) if err != nil { return nil, err } - s, err := NewTestAPIServer(ctx, conf) + s, err := NewTestServer(ctx, conf, []string{constant.PDServiceName}) if err != nil { return nil, err } diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index 397e1079af3..ab3874a33a7 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -361,7 +361,7 @@ func TestTSOFollowerProxyWithTSOService(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/client/servicediscovery/fastUpdateServiceMode", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestAPICluster(ctx, 1) + cluster, err := tests.NewTestPDServiceCluster(ctx, 1) re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() diff --git a/tests/integrations/mcs/discovery/register_test.go b/tests/integrations/mcs/discovery/register_test.go index da6fa158307..eb8933e10d8 100644 --- a/tests/integrations/mcs/discovery/register_test.go +++ b/tests/integrations/mcs/discovery/register_test.go @@ -54,7 +54,7 @@ func (suite *serverRegisterTestSuite) SetupSuite() { re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() @@ -84,7 +84,7 @@ func (suite *serverRegisterTestSuite) checkServerRegister(serviceName string) { addr := s.GetAddr() client := suite.pdLeader.GetEtcdClient() - // test API server discovery + // test PD service discovery endpoints, err := discovery.Discover(client, serviceName) re.NoError(err) @@ -98,7 +98,7 @@ func (suite *serverRegisterTestSuite) checkServerRegister(serviceName string) { re.True(exist) re.Equal(expectedPrimary, primary) - // test API server discovery after unregister + // test PD service discovery after unregister cleanup() endpoints, err = discovery.Discover(client, serviceName) re.NoError(err) @@ -140,7 +140,7 @@ func (suite *serverRegisterTestSuite) checkServerPrimaryChange(serviceName strin delete(serverMap, primary) expectedPrimary = tests.WaitForPrimaryServing(re, serverMap) - // test API server discovery + // test PD service discovery client := suite.pdLeader.GetEtcdClient() endpoints, err := discovery.Discover(client, serviceName) re.NoError(err) diff --git a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go index 44347b4757d..b31d919324d 100644 --- a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go +++ b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go @@ -60,7 +60,7 @@ func (suite *keyspaceGroupTestSuite) SetupTest() { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) suite.ctx = ctx - cluster, err := tests.NewTestAPICluster(suite.ctx, 1) + cluster, err := tests.NewTestPDServiceCluster(suite.ctx, 1) suite.cluster = cluster re.NoError(err) re.NoError(cluster.RunInitialServers()) diff --git a/tests/integrations/mcs/members/member_test.go b/tests/integrations/mcs/members/member_test.go index 28275849073..7e83ea570b9 100644 --- a/tests/integrations/mcs/members/member_test.go +++ b/tests/integrations/mcs/members/member_test.go @@ -64,7 +64,7 @@ func (suite *memberTestSuite) SetupTest() { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) suite.ctx = ctx - cluster, err := tests.NewTestAPICluster(suite.ctx, 1) + cluster, err := tests.NewTestPDServiceCluster(suite.ctx, 1) suite.cluster = cluster re.NoError(err) re.NoError(cluster.RunInitialServers()) diff --git a/tests/integrations/mcs/scheduling/api_test.go b/tests/integrations/mcs/scheduling/api_test.go index abace06bb78..9b6b3d95145 100644 --- a/tests/integrations/mcs/scheduling/api_test.go +++ b/tests/integrations/mcs/scheduling/api_test.go @@ -56,7 +56,7 @@ func (suite *apiTestSuite) TearDownSuite() { } func (suite *apiTestSuite) TestGetCheckerByName() { - suite.env.RunTestInAPIMode(suite.checkGetCheckerByName) + suite.env.RunTestInPDServiceMode(suite.checkGetCheckerByName) } func (suite *apiTestSuite) checkGetCheckerByName(cluster *tests.TestCluster) { @@ -102,7 +102,7 @@ func (suite *apiTestSuite) checkGetCheckerByName(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestAPIForward() { - suite.env.RunTestInAPIMode(suite.checkAPIForward) + suite.env.RunTestInPDServiceMode(suite.checkAPIForward) } func (suite *apiTestSuite) checkAPIForward(cluster *tests.TestCluster) { @@ -378,7 +378,7 @@ func (suite *apiTestSuite) checkAPIForward(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestConfig() { - suite.env.RunTestInAPIMode(suite.checkConfig) + suite.env.RunTestInPDServiceMode(suite.checkConfig) } func (suite *apiTestSuite) checkConfig(cluster *tests.TestCluster) { @@ -401,7 +401,7 @@ func (suite *apiTestSuite) checkConfig(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestConfigForward() { - suite.env.RunTestInAPIMode(suite.checkConfigForward) + suite.env.RunTestInPDServiceMode(suite.checkConfigForward) } func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { @@ -413,7 +413,7 @@ func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { urlPrefix := fmt.Sprintf("%s/pd/api/v1/config", addr) // Test config forward - // Expect to get same config in scheduling server and api server + // Expect to get same config in scheduling server and PD service testutil.Eventually(re, func() bool { testutil.ReadGetJSON(re, tests.TestDialClient, urlPrefix, &cfg) re.Equal(cfg["schedule"].(map[string]any)["leader-schedule-limit"], @@ -421,8 +421,8 @@ func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { return cfg["replication"].(map[string]any)["max-replicas"] == float64(opts.GetReplicationConfig().MaxReplicas) }) - // Test to change config in api server - // Expect to get new config in scheduling server and api server + // Test to change config in PD service + // Expect to get new config in scheduling server and PD service reqData, err := json.Marshal(map[string]any{ "max-replicas": 4, }) @@ -436,7 +436,7 @@ func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { }) // Test to change config only in scheduling server - // Expect to get new config in scheduling server but not old config in api server + // Expect to get new config in scheduling server but not old config in PD service scheCfg := opts.GetScheduleConfig().Clone() scheCfg.LeaderScheduleLimit = 100 opts.SetScheduleConfig(scheCfg) @@ -452,7 +452,7 @@ func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestAdminRegionCache() { - suite.env.RunTestInAPIMode(suite.checkAdminRegionCache) + suite.env.RunTestInPDServiceMode(suite.checkAdminRegionCache) } func (suite *apiTestSuite) checkAdminRegionCache(cluster *tests.TestCluster) { @@ -479,7 +479,7 @@ func (suite *apiTestSuite) checkAdminRegionCache(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestAdminRegionCacheForward() { - suite.env.RunTestInAPIMode(suite.checkAdminRegionCacheForward) + suite.env.RunTestInPDServiceMode(suite.checkAdminRegionCacheForward) } func (suite *apiTestSuite) checkAdminRegionCacheForward(cluster *tests.TestCluster) { @@ -491,22 +491,22 @@ func (suite *apiTestSuite) checkAdminRegionCacheForward(cluster *tests.TestClust r3 := core.NewTestRegionInfo(30, 1, []byte("c"), []byte(""), core.SetRegionConfVer(100), core.SetRegionVersion(100)) tests.MustPutRegionInfo(re, cluster, r3) - apiServer := cluster.GetLeaderServer().GetServer() + pdServer := cluster.GetLeaderServer().GetServer() schedulingServer := cluster.GetSchedulingPrimaryServer() re.Equal(3, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) - re.Equal(3, apiServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{})) + re.Equal(3, pdServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{})) addr := cluster.GetLeaderServer().GetAddr() urlPrefix := fmt.Sprintf("%s/pd/api/v1/admin/cache/region", addr) err := testutil.CheckDelete(tests.TestDialClient, fmt.Sprintf("%s/%s", urlPrefix, "30"), testutil.StatusOK(re)) re.NoError(err) re.Equal(2, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) - re.Equal(2, apiServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{})) + re.Equal(2, pdServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{})) err = testutil.CheckDelete(tests.TestDialClient, urlPrefix+"s", testutil.StatusOK(re)) re.NoError(err) re.Equal(0, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) - re.Equal(0, apiServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{})) + re.Equal(0, pdServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{})) } func (suite *apiTestSuite) TestFollowerForward() { @@ -520,7 +520,7 @@ func (suite *apiTestSuite) checkFollowerForward(cluster *tests.TestCluster) { leaderAddr := cluster.GetLeaderServer().GetAddr() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - follower, err := cluster.JoinAPIServer(ctx) + follower, err := cluster.JoinPDServer(ctx) re.NoError(err) re.NoError(follower.Run()) re.NotEmpty(cluster.WaitLeader()) @@ -558,7 +558,7 @@ func (suite *apiTestSuite) checkFollowerForward(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestMetrics() { - suite.env.RunTestInAPIMode(suite.checkMetrics) + suite.env.RunTestInPDServiceMode(suite.checkMetrics) } func (suite *apiTestSuite) checkMetrics(cluster *tests.TestCluster) { @@ -577,7 +577,7 @@ func (suite *apiTestSuite) checkMetrics(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestStatus() { - suite.env.RunTestInAPIMode(suite.checkStatus) + suite.env.RunTestInPDServiceMode(suite.checkStatus) } func (suite *apiTestSuite) checkStatus(cluster *tests.TestCluster) { @@ -600,7 +600,7 @@ func (suite *apiTestSuite) checkStatus(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestStores() { - suite.env.RunTestInAPIMode(suite.checkStores) + suite.env.RunTestInPDServiceMode(suite.checkStores) } func (suite *apiTestSuite) checkStores(cluster *tests.TestCluster) { @@ -647,8 +647,8 @@ func (suite *apiTestSuite) checkStores(cluster *tests.TestCluster) { tests.MustPutStore(re, cluster, store) } // Test /stores - apiServerAddr := cluster.GetLeaderServer().GetAddr() - urlPrefix := fmt.Sprintf("%s/pd/api/v1/stores", apiServerAddr) + pdServiceAddr := cluster.GetLeaderServer().GetAddr() + urlPrefix := fmt.Sprintf("%s/pd/api/v1/stores", pdServiceAddr) var resp map[string]any err := testutil.ReadGetJSON(re, tests.TestDialClient, urlPrefix, &resp) re.NoError(err) @@ -682,7 +682,7 @@ func (suite *apiTestSuite) checkStores(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestRegions() { - suite.env.RunTestInAPIMode(suite.checkRegions) + suite.env.RunTestInPDServiceMode(suite.checkRegions) } func (suite *apiTestSuite) checkRegions(cluster *tests.TestCluster) { @@ -691,8 +691,8 @@ func (suite *apiTestSuite) checkRegions(cluster *tests.TestCluster) { tests.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d")) tests.MustPutRegion(re, cluster, 3, 1, []byte("e"), []byte("f")) // Test /regions - apiServerAddr := cluster.GetLeaderServer().GetAddr() - urlPrefix := fmt.Sprintf("%s/pd/api/v1/regions", apiServerAddr) + pdServiceAddr := cluster.GetLeaderServer().GetAddr() + urlPrefix := fmt.Sprintf("%s/pd/api/v1/regions", pdServiceAddr) var resp map[string]any err := testutil.ReadGetJSON(re, tests.TestDialClient, urlPrefix, &resp) re.NoError(err) diff --git a/tests/integrations/mcs/scheduling/config_test.go b/tests/integrations/mcs/scheduling/config_test.go index d7d200814bb..6c770d3e4c1 100644 --- a/tests/integrations/mcs/scheduling/config_test.go +++ b/tests/integrations/mcs/scheduling/config_test.go @@ -62,7 +62,7 @@ func (suite *configTestSuite) SetupSuite() { schedulers.Register() var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() re.NoError(err) @@ -132,7 +132,7 @@ func (suite *configTestSuite) TestConfigWatch() { watcher.Close() } -// Manually trigger the config persistence in the PD API server side. +// Manually trigger the config persistence in the PD service side. func persistConfig(re *require.Assertions, pdLeaderServer *tests.TestServer) { err := pdLeaderServer.GetPersistOptions().Persist(pdLeaderServer.GetServer().GetStorage()) re.NoError(err) @@ -152,19 +152,19 @@ func (suite *configTestSuite) TestSchedulerConfigWatch() { ) re.NoError(err) // Get all default scheduler names. - var namesFromAPIServer []string + var namesFromPDService []string testutil.Eventually(re, func() bool { - namesFromAPIServer, _, _ = suite.pdLeaderServer.GetRaftCluster().GetStorage().LoadAllSchedulerConfigs() - return len(namesFromAPIServer) == len(sc.DefaultSchedulers) + namesFromPDService, _, _ = suite.pdLeaderServer.GetRaftCluster().GetStorage().LoadAllSchedulerConfigs() + return len(namesFromPDService) == len(sc.DefaultSchedulers) }) // Check all default schedulers' configs. var namesFromSchedulingServer []string testutil.Eventually(re, func() bool { namesFromSchedulingServer, _, err = storage.LoadAllSchedulerConfigs() re.NoError(err) - return len(namesFromSchedulingServer) == len(namesFromAPIServer) + return len(namesFromSchedulingServer) == len(namesFromPDService) }) - re.Equal(namesFromAPIServer, namesFromSchedulingServer) + re.Equal(namesFromPDService, namesFromSchedulingServer) // Add a new scheduler. api.MustAddScheduler(re, suite.pdLeaderServer.GetAddr(), types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, diff --git a/tests/integrations/mcs/scheduling/meta_test.go b/tests/integrations/mcs/scheduling/meta_test.go index 4e0d5249fdb..8df576b82ca 100644 --- a/tests/integrations/mcs/scheduling/meta_test.go +++ b/tests/integrations/mcs/scheduling/meta_test.go @@ -53,7 +53,7 @@ func (suite *metaTestSuite) SetupSuite() { re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() re.NoError(err) diff --git a/tests/integrations/mcs/scheduling/rule_test.go b/tests/integrations/mcs/scheduling/rule_test.go index 880dfddbb16..706c5784831 100644 --- a/tests/integrations/mcs/scheduling/rule_test.go +++ b/tests/integrations/mcs/scheduling/rule_test.go @@ -54,7 +54,7 @@ func (suite *ruleTestSuite) SetupSuite() { var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() re.NoError(err) @@ -97,7 +97,7 @@ func (suite *ruleTestSuite) TestRuleWatch() { re.Equal(placement.DefaultGroupID, ruleGroups[0].ID) re.Equal(0, ruleGroups[0].Index) re.False(ruleGroups[0].Override) - // Set a new rule via the PD API server. + // Set a new rule via the PD service. apiRuleManager := suite.pdLeaderServer.GetRaftCluster().GetRuleManager() rule := &placement.Rule{ GroupID: "2", diff --git a/tests/integrations/mcs/scheduling/server_test.go b/tests/integrations/mcs/scheduling/server_test.go index 3401fb880cb..9a3d33d1dcf 100644 --- a/tests/integrations/mcs/scheduling/server_test.go +++ b/tests/integrations/mcs/scheduling/server_test.go @@ -66,7 +66,7 @@ func (suite *serverTestSuite) SetupSuite() { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/mcs/scheduling/server/changeRunCollectWaitTime", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() @@ -220,7 +220,7 @@ func (suite *serverTestSuite) TestSchedulingServiceFallback() { // Change back to the default value. conf.EnableSchedulingFallback = true leaderServer.SetMicroServiceConfig(*conf) - // API server will execute scheduling jobs since there is no scheduling server. + // PD service will execute scheduling jobs since there is no scheduling server. testutil.Eventually(re, func() bool { return suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) @@ -229,7 +229,7 @@ func (suite *serverTestSuite) TestSchedulingServiceFallback() { re.NoError(err) defer tc.Destroy() tc.WaitForPrimaryServing(re) - // After scheduling server is started, API server will not execute scheduling jobs. + // After scheduling server is started, PD service will not execute scheduling jobs. testutil.Eventually(re, func() bool { return !suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) @@ -238,7 +238,7 @@ func (suite *serverTestSuite) TestSchedulingServiceFallback() { return tc.GetPrimaryServer().GetCluster().IsBackgroundJobsRunning() }) tc.GetPrimaryServer().Close() - // Stop scheduling server. API server will execute scheduling jobs again. + // Stop scheduling server. PD service will execute scheduling jobs again. testutil.Eventually(re, func() bool { return suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) @@ -246,7 +246,7 @@ func (suite *serverTestSuite) TestSchedulingServiceFallback() { re.NoError(err) defer tc1.Destroy() tc1.WaitForPrimaryServing(re) - // After scheduling server is started, API server will not execute scheduling jobs. + // After scheduling server is started, PD service will not execute scheduling jobs. testutil.Eventually(re, func() bool { return !suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) @@ -259,21 +259,21 @@ func (suite *serverTestSuite) TestSchedulingServiceFallback() { func (suite *serverTestSuite) TestDisableSchedulingServiceFallback() { re := suite.Require() - // API server will execute scheduling jobs since there is no scheduling server. + // PD service will execute scheduling jobs since there is no scheduling server. testutil.Eventually(re, func() bool { re.NotNil(suite.pdLeader.GetServer()) re.NotNil(suite.pdLeader.GetServer().GetRaftCluster()) return suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) leaderServer := suite.pdLeader.GetServer() - // After Disabling scheduling service fallback, the API server will stop scheduling. + // After Disabling scheduling service fallback, the PD service will stop scheduling. conf := leaderServer.GetMicroServiceConfig().Clone() conf.EnableSchedulingFallback = false leaderServer.SetMicroServiceConfig(*conf) testutil.Eventually(re, func() bool { return !suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) - // Enable scheduling service fallback again, the API server will restart scheduling. + // Enable scheduling service fallback again, the PD service will restart scheduling. conf.EnableSchedulingFallback = true leaderServer.SetMicroServiceConfig(*conf) testutil.Eventually(re, func() bool { @@ -284,7 +284,7 @@ func (suite *serverTestSuite) TestDisableSchedulingServiceFallback() { re.NoError(err) defer tc.Destroy() tc.WaitForPrimaryServing(re) - // After scheduling server is started, API server will not execute scheduling jobs. + // After scheduling server is started, PD service will not execute scheduling jobs. testutil.Eventually(re, func() bool { return !suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) @@ -292,7 +292,7 @@ func (suite *serverTestSuite) TestDisableSchedulingServiceFallback() { testutil.Eventually(re, func() bool { return tc.GetPrimaryServer().GetCluster().IsBackgroundJobsRunning() }) - // Disable scheduling service fallback and stop scheduling server. API server won't execute scheduling jobs again. + // Disable scheduling service fallback and stop scheduling server. PD service won't execute scheduling jobs again. conf.EnableSchedulingFallback = false leaderServer.SetMicroServiceConfig(*conf) tc.GetPrimaryServer().Close() @@ -310,14 +310,14 @@ func (suite *serverTestSuite) TestSchedulerSync() { tc.WaitForPrimaryServing(re) schedulersController := tc.GetPrimaryServer().GetCluster().GetCoordinator().GetSchedulersController() checkEvictLeaderSchedulerExist(re, schedulersController, false) - // Add a new evict-leader-scheduler through the API server. + // Add a new evict-leader-scheduler through the PD service. api.MustAddScheduler(re, suite.backendEndpoints, types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, }) // Check if the evict-leader-scheduler is added. checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1}) - // Add a store_id to the evict-leader-scheduler through the API server. + // Add a store_id to the evict-leader-scheduler through the PD service. err = suite.pdLeader.GetServer().GetRaftCluster().PutMetaStore( &metapb.Store{ Id: 2, @@ -334,18 +334,18 @@ func (suite *serverTestSuite) TestSchedulerSync() { }) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1, 2}) - // Delete a store_id from the evict-leader-scheduler through the API server. + // Delete a store_id from the evict-leader-scheduler through the PD service. api.MustDeleteScheduler(re, suite.backendEndpoints, fmt.Sprintf("%s-%d", types.EvictLeaderScheduler.String(), 1)) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{2}) - // Add a store_id to the evict-leader-scheduler through the API server by the scheduler handler. + // Add a store_id to the evict-leader-scheduler through the PD service by the scheduler handler. api.MustCallSchedulerConfigAPI(re, http.MethodPost, suite.backendEndpoints, types.EvictLeaderScheduler.String(), []string{"config"}, map[string]any{ "name": types.EvictLeaderScheduler.String(), "store_id": 1, }) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1, 2}) - // Delete a store_id from the evict-leader-scheduler through the API server by the scheduler handler. + // Delete a store_id from the evict-leader-scheduler through the PD service by the scheduler handler. api.MustCallSchedulerConfigAPI(re, http.MethodDelete, suite.backendEndpoints, types.EvictLeaderScheduler.String(), []string{"delete", "2"}, nil) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1}) @@ -354,7 +354,7 @@ func (suite *serverTestSuite) TestSchedulerSync() { // Check if the scheduler is removed. checkEvictLeaderSchedulerExist(re, schedulersController, false) - // Delete the evict-leader-scheduler through the API server by removing the last store_id. + // Delete the evict-leader-scheduler through the PD service by removing the last store_id. api.MustAddScheduler(re, suite.backendEndpoints, types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, }) @@ -363,7 +363,7 @@ func (suite *serverTestSuite) TestSchedulerSync() { api.MustDeleteScheduler(re, suite.backendEndpoints, fmt.Sprintf("%s-%d", types.EvictLeaderScheduler.String(), 1)) checkEvictLeaderSchedulerExist(re, schedulersController, false) - // Delete the evict-leader-scheduler through the API server. + // Delete the evict-leader-scheduler through the PD service. api.MustAddScheduler(re, suite.backendEndpoints, types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, }) @@ -551,7 +551,7 @@ func (suite *serverTestSuite) TestStoreLimit() { leaderServer.GetRaftCluster().SetStoreLimit(1, storelimit.RemovePeer, 60) leaderServer.GetRaftCluster().SetStoreLimit(2, storelimit.AddPeer, 60) leaderServer.GetRaftCluster().SetStoreLimit(2, storelimit.RemovePeer, 60) - // There is a time window between setting store limit in API service side and capturing the change in scheduling service. + // There is a time window between setting store limit in PD service side and capturing the change in scheduling service. waitSyncFinish(re, tc, storelimit.AddPeer, 60) for i := uint64(1); i <= 5; i++ { op := operator.NewTestOperator(2, &metapb.RegionEpoch{}, operator.OpRegion, operator.AddPeer{ToStore: 2, PeerID: 100}) @@ -636,7 +636,7 @@ func (suite *multipleServerTestSuite) SetupSuite() { re := suite.Require() re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 2) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 2) re.NoError(err) err = suite.cluster.RunInitialServers() diff --git a/tests/integrations/mcs/tso/api_test.go b/tests/integrations/mcs/tso/api_test.go index 91614530ef1..dceb5ccdf7c 100644 --- a/tests/integrations/mcs/tso/api_test.go +++ b/tests/integrations/mcs/tso/api_test.go @@ -62,7 +62,7 @@ func (suite *tsoAPITestSuite) SetupTest() { var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.pdCluster, err = tests.NewTestAPICluster(suite.ctx, 1) + suite.pdCluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) re.NoError(err) err = suite.pdCluster.RunInitialServers() re.NoError(err) @@ -137,7 +137,7 @@ func TestTSOServerStartFirst(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - apiCluster, err := tests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { + apiCluster, err := tests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = []string{"k1", "k2"} }) defer apiCluster.Destroy() @@ -200,7 +200,7 @@ func TestForwardOnlyTSONoScheduling(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tc, err := tests.NewTestAPICluster(ctx, 1) + tc, err := tests.NewTestPDServiceCluster(ctx, 1) defer tc.Destroy() re.NoError(err) err = tc.RunInitialServers() @@ -227,7 +227,7 @@ func TestForwardOnlyTSONoScheduling(t *testing.T) { testutil.StatusOK(re), testutil.StringContain(re, "Reset ts successfully"), testutil.WithHeader(re, apiutil.XForwardedToMicroServiceHeader, "true")) re.NoError(err) - // If close tso server, it should try forward to tso server, but return error in api mode. + // If close tso server, it should try forward to tso server, but return error in pd service mode. ttc.Destroy() err = testutil.CheckPostJSON(tests.TestDialClient, fmt.Sprintf("%s/%s", urlPrefix, "admin/reset-ts"), input, testutil.Status(re, http.StatusInternalServerError), testutil.StringContain(re, "[PD:apiutil:ErrRedirect]redirect failed")) diff --git a/tests/integrations/mcs/tso/keyspace_group_manager_test.go b/tests/integrations/mcs/tso/keyspace_group_manager_test.go index 2c19f6588e5..ecbc0295845 100644 --- a/tests/integrations/mcs/tso/keyspace_group_manager_test.go +++ b/tests/integrations/mcs/tso/keyspace_group_manager_test.go @@ -82,7 +82,7 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) SetupSuite() { var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() re.NoError(err) @@ -537,8 +537,8 @@ func TestTwiceSplitKeyspaceGroup(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/tso/fastGroupSplitPatroller", `return(true)`)) - // Init api server config but not start. - tc, err := tests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { + // Init PD service config but not start. + tc, err := tests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = []string{ "keyspace_a", "keyspace_b", } @@ -546,7 +546,7 @@ func TestTwiceSplitKeyspaceGroup(t *testing.T) { re.NoError(err) pdAddr := tc.GetConfig().GetClientURL() - // Start api server and tso server. + // Start PD service and tso server. err = tc.RunInitialServers() re.NoError(err) defer tc.Destroy() @@ -734,8 +734,8 @@ func TestGetTSOImmediately(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/tso/fastGroupSplitPatroller", `return(true)`)) - // Init api server config but not start. - tc, err := tests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { + // Init PD service config but not start. + tc, err := tests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = []string{ "keyspace_a", "keyspace_b", } @@ -743,7 +743,7 @@ func TestGetTSOImmediately(t *testing.T) { re.NoError(err) pdAddr := tc.GetConfig().GetClientURL() - // Start api server and tso server. + // Start PD service and tso server. err = tc.RunInitialServers() re.NoError(err) defer tc.Destroy() diff --git a/tests/integrations/mcs/tso/proxy_test.go b/tests/integrations/mcs/tso/proxy_test.go index b564076c1f0..50583ebbbb4 100644 --- a/tests/integrations/mcs/tso/proxy_test.go +++ b/tests/integrations/mcs/tso/proxy_test.go @@ -62,7 +62,7 @@ func (s *tsoProxyTestSuite) SetupSuite() { var err error s.ctx, s.cancel = context.WithCancel(context.Background()) // Create an API cluster with 1 server - s.apiCluster, err = tests.NewTestAPICluster(s.ctx, 1) + s.apiCluster, err = tests.NewTestPDServiceCluster(s.ctx, 1) re.NoError(err) err = s.apiCluster.RunInitialServers() re.NoError(err) diff --git a/tests/integrations/mcs/tso/server_test.go b/tests/integrations/mcs/tso/server_test.go index 09a199c2d52..7416a314949 100644 --- a/tests/integrations/mcs/tso/server_test.go +++ b/tests/integrations/mcs/tso/server_test.go @@ -75,7 +75,7 @@ func (suite *tsoServerTestSuite) SetupSuite() { re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() @@ -156,19 +156,19 @@ func (suite *tsoServerTestSuite) TestParticipantStartWithAdvertiseListenAddr() { func TestTSOPath(t *testing.T) { re := require.New(t) - checkTSOPath(re, true /*isAPIServiceMode*/) - checkTSOPath(re, false /*isAPIServiceMode*/) + checkTSOPath(re, true /*isPDServiceMode*/) + checkTSOPath(re, false /*isPDServiceMode*/) } -func checkTSOPath(re *require.Assertions, isAPIServiceMode bool) { +func checkTSOPath(re *require.Assertions, isPDServiceMode bool) { var ( cluster *tests.TestCluster err error ) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if isAPIServiceMode { - cluster, err = tests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { + if isPDServiceMode { + cluster, err = tests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { conf.MicroService.EnableTSODynamicSwitching = false }) } else { @@ -184,7 +184,7 @@ func checkTSOPath(re *require.Assertions, isAPIServiceMode bool) { re.NoError(pdLeader.BootstrapCluster()) backendEndpoints := pdLeader.GetAddr() client := pdLeader.GetEtcdClient() - if isAPIServiceMode { + if isPDServiceMode { re.Equal(0, getEtcdTimestampKeyNum(re, client)) } else { re.Equal(1, getEtcdTimestampKeyNum(re, client)) @@ -217,7 +217,7 @@ func getEtcdTimestampKeyNum(re *require.Assertions, client *clientv3.Client) int return count } -type APIServerForward struct { +type PDServiceForward struct { re *require.Assertions ctx context.Context cancel context.CancelFunc @@ -227,13 +227,13 @@ type APIServerForward struct { pdClient pd.Client } -func NewAPIServerForward(re *require.Assertions) APIServerForward { - suite := APIServerForward{ +func NewPDServiceForward(re *require.Assertions) PDServiceForward { + suite := PDServiceForward{ re: re, } var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 3) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 3) re.NoError(err) err = suite.cluster.RunInitialServers() @@ -254,7 +254,7 @@ func NewAPIServerForward(re *require.Assertions) APIServerForward { return suite } -func (suite *APIServerForward) ShutDown() { +func (suite *PDServiceForward) ShutDown() { suite.pdClient.Close() re := suite.re @@ -273,7 +273,7 @@ func (suite *APIServerForward) ShutDown() { func TestForwardTSORelated(t *testing.T) { re := require.New(t) - suite := NewAPIServerForward(re) + suite := NewPDServiceForward(re) defer suite.ShutDown() leaderServer := suite.cluster.GetLeaderServer().GetServer() cfg := leaderServer.GetMicroServiceConfig().Clone() @@ -290,7 +290,7 @@ func TestForwardTSORelated(t *testing.T) { func TestForwardTSOWhenPrimaryChanged(t *testing.T) { re := require.New(t) - suite := NewAPIServerForward(re) + suite := NewPDServiceForward(re) defer suite.ShutDown() tc, err := tests.NewTestTSOCluster(suite.ctx, 2, suite.backendEndpoints) @@ -330,7 +330,7 @@ func TestForwardTSOWhenPrimaryChanged(t *testing.T) { func TestResignTSOPrimaryForward(t *testing.T) { re := require.New(t) - suite := NewAPIServerForward(re) + suite := NewPDServiceForward(re) defer suite.ShutDown() // TODO: test random kill primary with 3 nodes tc, err := tests.NewTestTSOCluster(suite.ctx, 2, suite.backendEndpoints) @@ -356,7 +356,7 @@ func TestResignTSOPrimaryForward(t *testing.T) { func TestResignAPIPrimaryForward(t *testing.T) { re := require.New(t) - suite := NewAPIServerForward(re) + suite := NewPDServiceForward(re) defer suite.ShutDown() tc, err := tests.NewTestTSOCluster(suite.ctx, 2, suite.backendEndpoints) @@ -380,7 +380,7 @@ func TestResignAPIPrimaryForward(t *testing.T) { func TestForwardTSOUnexpectedToFollower1(t *testing.T) { re := require.New(t) - suite := NewAPIServerForward(re) + suite := NewPDServiceForward(re) defer suite.ShutDown() suite.checkForwardTSOUnexpectedToFollower(func() { // unary call will retry internally @@ -393,7 +393,7 @@ func TestForwardTSOUnexpectedToFollower1(t *testing.T) { func TestForwardTSOUnexpectedToFollower2(t *testing.T) { re := require.New(t) - suite := NewAPIServerForward(re) + suite := NewPDServiceForward(re) defer suite.ShutDown() suite.checkForwardTSOUnexpectedToFollower(func() { // unary call will retry internally @@ -407,7 +407,7 @@ func TestForwardTSOUnexpectedToFollower2(t *testing.T) { func TestForwardTSOUnexpectedToFollower3(t *testing.T) { re := require.New(t) - suite := NewAPIServerForward(re) + suite := NewPDServiceForward(re) defer suite.ShutDown() suite.checkForwardTSOUnexpectedToFollower(func() { _, _, err := suite.pdClient.GetTS(suite.ctx) @@ -415,7 +415,7 @@ func TestForwardTSOUnexpectedToFollower3(t *testing.T) { }) } -func (suite *APIServerForward) checkForwardTSOUnexpectedToFollower(checkTSO func()) { +func (suite *PDServiceForward) checkForwardTSOUnexpectedToFollower(checkTSO func()) { re := suite.re tc, err := tests.NewTestTSOCluster(suite.ctx, 2, suite.backendEndpoints) re.NoError(err) @@ -451,7 +451,7 @@ func (suite *APIServerForward) checkForwardTSOUnexpectedToFollower(checkTSO func tc.Destroy() } -func (suite *APIServerForward) addRegions() { +func (suite *PDServiceForward) addRegions() { leader := suite.cluster.GetServer(suite.cluster.WaitLeader()) rc := leader.GetServer().GetRaftCluster() for i := range 3 { @@ -465,7 +465,7 @@ func (suite *APIServerForward) addRegions() { } } -func (suite *APIServerForward) checkUnavailableTSO(re *require.Assertions) { +func (suite *PDServiceForward) checkUnavailableTSO(re *require.Assertions) { _, _, err := suite.pdClient.GetTS(suite.ctx) re.Error(err) // try to update gc safe point @@ -476,7 +476,7 @@ func (suite *APIServerForward) checkUnavailableTSO(re *require.Assertions) { re.Error(err) } -func (suite *APIServerForward) checkAvailableTSO(re *require.Assertions) { +func (suite *PDServiceForward) checkAvailableTSO(re *require.Assertions) { mcs.WaitForTSOServiceAvailable(suite.ctx, re, suite.pdClient) // try to get ts _, _, err := suite.pdClient.GetTS(suite.ctx) @@ -512,7 +512,7 @@ func (suite *CommonTestSuite) SetupSuite() { var err error re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() @@ -576,7 +576,7 @@ func (suite *CommonTestSuite) TestBootstrapDefaultKeyspaceGroup() { } check() - s, err := suite.cluster.JoinAPIServer(suite.ctx) + s, err := suite.cluster.JoinPDServer(suite.ctx) re.NoError(err) re.NoError(s.Run()) @@ -598,7 +598,7 @@ func TestTSOServiceSwitch(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tc, err := tests.NewTestAPICluster(ctx, 1, + tc, err := tests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { conf.MicroService.EnableTSODynamicSwitching = true }, diff --git a/tests/integrations/tso/client_test.go b/tests/integrations/tso/client_test.go index 2cda9f8734f..a06e44ed4ab 100644 --- a/tests/integrations/tso/client_test.go +++ b/tests/integrations/tso/client_test.go @@ -98,7 +98,7 @@ func (suite *tsoClientTestSuite) SetupSuite() { if suite.legacy { suite.cluster, err = tests.NewTestCluster(suite.ctx, serverCount) } else { - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, serverCount, func(conf *config.Config, _ string) { + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, serverCount, func(conf *config.Config, _ string) { conf.MicroService.EnableTSODynamicSwitching = false }) } @@ -510,7 +510,7 @@ func TestMixedTSODeployment(t *testing.T) { re.NotNil(leaderServer) backendEndpoints := leaderServer.GetAddr() - apiSvr, err := cluster.JoinAPIServer(ctx) + apiSvr, err := cluster.JoinPDServer(ctx) re.NoError(err) err = apiSvr.Run() re.NoError(err) @@ -544,7 +544,7 @@ func TestUpgradingAPIandTSOClusters(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) // Create an API cluster which has 3 servers - apiCluster, err := tests.NewTestAPICluster(ctx, 3) + apiCluster, err := tests.NewTestPDServiceCluster(ctx, 3) re.NoError(err) err = apiCluster.RunInitialServers() re.NoError(err) diff --git a/tests/integrations/tso/consistency_test.go b/tests/integrations/tso/consistency_test.go index 147f41a4591..b29ae696f26 100644 --- a/tests/integrations/tso/consistency_test.go +++ b/tests/integrations/tso/consistency_test.go @@ -76,7 +76,7 @@ func (suite *tsoConsistencyTestSuite) SetupSuite() { if suite.legacy { suite.cluster, err = tests.NewTestCluster(suite.ctx, serverCount) } else { - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, serverCount) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, serverCount) } re.NoError(err) err = suite.cluster.RunInitialServers() diff --git a/tests/integrations/tso/server_test.go b/tests/integrations/tso/server_test.go index f03db197b35..1428dbcd1a6 100644 --- a/tests/integrations/tso/server_test.go +++ b/tests/integrations/tso/server_test.go @@ -74,7 +74,7 @@ func (suite *tsoServerTestSuite) SetupSuite() { if suite.legacy { suite.cluster, err = tests.NewTestCluster(suite.ctx, serverCount) } else { - suite.cluster, err = tests.NewTestAPICluster(suite.ctx, serverCount) + suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, serverCount) } re.NoError(err) err = suite.cluster.RunInitialServers() diff --git a/tests/server/api/scheduler_test.go b/tests/server/api/scheduler_test.go index 1f76c469cfd..d1d5b06ceb4 100644 --- a/tests/server/api/scheduler_test.go +++ b/tests/server/api/scheduler_test.go @@ -55,7 +55,7 @@ func TestPDSchedulingTestSuite(t *testing.T) { func TestAPISchedulingTestSuite(t *testing.T) { suite.Run(t, &scheduleTestSuite{ - runMode: tests.APIMode, + runMode: tests.PDServiceMode, }) } diff --git a/tests/server/apiv2/handlers/tso_keyspace_group_test.go b/tests/server/apiv2/handlers/tso_keyspace_group_test.go index 796fd514eef..851df9b5fd1 100644 --- a/tests/server/apiv2/handlers/tso_keyspace_group_test.go +++ b/tests/server/apiv2/handlers/tso_keyspace_group_test.go @@ -42,7 +42,7 @@ func TestKeyspaceGroupTestSuite(t *testing.T) { func (suite *keyspaceGroupTestSuite) SetupTest() { re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) - cluster, err := tests.NewTestAPICluster(suite.ctx, 1) + cluster, err := tests.NewTestPDServiceCluster(suite.ctx, 1) suite.cluster = cluster re.NoError(err) re.NoError(cluster.RunInitialServers()) diff --git a/tests/server/server_test.go b/tests/server/server_test.go index 77cd7aa5158..06623b6f092 100644 --- a/tests/server/server_test.go +++ b/tests/server/server_test.go @@ -67,7 +67,7 @@ func TestUpdateAdvertiseUrls(t *testing.T) { for _, conf := range cluster.GetConfig().InitialServers { serverConf, err := conf.Generate() re.NoError(err) - s, err := tests.NewTestServer(ctx, serverConf) + s, err := tests.NewTestServer(ctx, serverConf, nil) re.NoError(err) cluster.GetServers()[conf.Name] = s } diff --git a/tests/testutil.go b/tests/testutil.go index 5e99b3dbeda..4bbfa8155b4 100644 --- a/tests/testutil.go +++ b/tests/testutil.go @@ -279,8 +279,8 @@ const ( Both SchedulerMode = iota // PDMode represents PD mode. PDMode - // APIMode represents API mode. - APIMode + // PDServiceMode represents API mode. + PDServiceMode ) // SchedulingTestEnvironment is used for test purpose. @@ -308,11 +308,11 @@ func (s *SchedulingTestEnvironment) RunTestBasedOnMode(test func(*TestCluster)) switch s.RunMode { case PDMode: s.RunTestInPDMode(test) - case APIMode: - s.RunTestInAPIMode(test) + case PDServiceMode: + s.RunTestInPDServiceMode(test) default: s.RunTestInPDMode(test) - s.RunTestInAPIMode(test) + s.RunTestInPDServiceMode(test) } } @@ -339,8 +339,8 @@ func getTestName() string { return "" } -// RunTestInAPIMode is to run test in api mode. -func (s *SchedulingTestEnvironment) RunTestInAPIMode(test func(*TestCluster)) { +// RunTestInPDServiceMode is to run test in pd service mode. +func (s *SchedulingTestEnvironment) RunTestInPDServiceMode(test func(*TestCluster)) { re := require.New(s.t) re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/mcs/scheduling/server/fastUpdateMember", `return(true)`)) @@ -348,11 +348,11 @@ func (s *SchedulingTestEnvironment) RunTestInAPIMode(test func(*TestCluster)) { re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/mcs/scheduling/server/fastUpdateMember")) re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) }() - s.t.Logf("start test %s in api mode", getTestName()) - if _, ok := s.clusters[APIMode]; !ok { - s.startCluster(APIMode) + s.t.Logf("start test %s in pd service mode", getTestName()) + if _, ok := s.clusters[PDServiceMode]; !ok { + s.startCluster(PDServiceMode) } - test(s.clusters[APIMode]) + test(s.clusters[PDServiceMode]) } // Cleanup is to cleanup the environment. @@ -379,8 +379,8 @@ func (s *SchedulingTestEnvironment) startCluster(m SchedulerMode) { leaderServer := cluster.GetServer(cluster.GetLeader()) re.NoError(leaderServer.BootstrapCluster()) s.clusters[PDMode] = cluster - case APIMode: - cluster, err := NewTestAPICluster(ctx, 1, s.opts...) + case PDServiceMode: + cluster, err := NewTestPDServiceCluster(ctx, 1, s.opts...) re.NoError(err) err = cluster.RunInitialServers() re.NoError(err) @@ -398,7 +398,7 @@ func (s *SchedulingTestEnvironment) startCluster(m SchedulerMode) { testutil.Eventually(re, func() bool { return cluster.GetLeaderServer().GetServer().IsServiceIndependent(constant.SchedulingServiceName) }) - s.clusters[APIMode] = cluster + s.clusters[PDServiceMode] = cluster } } diff --git a/tools/pd-ctl/pdctl/command/config_command.go b/tools/pd-ctl/pdctl/command/config_command.go index 2e9903db550..f89c63bc51c 100644 --- a/tools/pd-ctl/pdctl/command/config_command.go +++ b/tools/pd-ctl/pdctl/command/config_command.go @@ -49,8 +49,8 @@ const ( ruleBundlePrefix = "pd/api/v1/config/placement-rule" pdServerPrefix = "pd/api/v1/config/pd-server" serviceMiddlewareConfigPrefix = "pd/api/v1/service-middleware/config" - // flagFromAPIServer has no influence for pd mode, but it is useful for us to debug in api mode. - flagFromAPIServer = "from_api_server" + // flagFromPDService has no influence for pd mode, but it is useful for us to debug in pd service mode. + flagFromPDService = "from_pd_service" ) // NewConfigCommand return a config subcommand of rootCmd @@ -81,7 +81,7 @@ func NewShowConfigCommand() *cobra.Command { sc.AddCommand(newShowReplicationModeCommand()) sc.AddCommand(NewShowServerConfigCommand()) sc.AddCommand(NewShowServiceMiddlewareConfigCommand()) - sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") + sc.Flags().Bool(flagFromPDService, false, "read data from PD service rather than micro service") return sc } @@ -92,7 +92,7 @@ func NewShowAllConfigCommand() *cobra.Command { Short: "show all config of PD", Run: showAllConfigCommandFunc, } - sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") + sc.Flags().Bool(flagFromPDService, false, "read data from PD service rather than micro service") return sc } @@ -103,7 +103,7 @@ func NewShowScheduleConfigCommand() *cobra.Command { Short: "show schedule config of PD", Run: showScheduleConfigCommandFunc, } - sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") + sc.Flags().Bool(flagFromPDService, false, "read data from PD service rather than micro service") return sc } @@ -114,7 +114,7 @@ func NewShowReplicationConfigCommand() *cobra.Command { Short: "show replication config of PD", Run: showReplicationConfigCommandFunc, } - sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") + sc.Flags().Bool(flagFromPDService, false, "read data from PD service rather than micro service") return sc } @@ -528,7 +528,7 @@ func NewPlacementRulesCommand() *cobra.Command { show.Flags().String("id", "", "rule id") show.Flags().String("region", "", "region id") show.Flags().Bool("detail", false, "detailed match info for region") - show.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") + show.Flags().Bool(flagFromPDService, false, "read data from PD service rather than micro service") load := &cobra.Command{ Use: "load", Short: "load placement rules to a file", @@ -538,7 +538,7 @@ func NewPlacementRulesCommand() *cobra.Command { load.Flags().String("id", "", "rule id") load.Flags().String("region", "", "region id") load.Flags().String("out", "rules.json", "the filename contains rules") - load.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") + load.Flags().Bool(flagFromPDService, false, "read data from PD service rather than micro service") save := &cobra.Command{ Use: "save", Short: "save rules from file", @@ -554,7 +554,7 @@ func NewPlacementRulesCommand() *cobra.Command { Short: "show rule group configuration(s)", Run: showRuleGroupFunc, } - ruleGroupShow.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") + ruleGroupShow.Flags().Bool(flagFromPDService, false, "read data from PD service rather than micro service") ruleGroupSet := &cobra.Command{ Use: "set ", Short: "update rule group configuration", @@ -577,7 +577,7 @@ func NewPlacementRulesCommand() *cobra.Command { Run: getRuleBundle, } ruleBundleGet.Flags().String("out", "", "the output file") - ruleBundleGet.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") + ruleBundleGet.Flags().Bool(flagFromPDService, false, "read data from PD service rather than micro service") ruleBundleSet := &cobra.Command{ Use: "set", Short: "set rule group config and its rules from file", @@ -596,7 +596,7 @@ func NewPlacementRulesCommand() *cobra.Command { Run: loadRuleBundle, } ruleBundleLoad.Flags().String("out", "rules.json", "the output file") - ruleBundleLoad.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") + ruleBundleLoad.Flags().Bool(flagFromPDService, false, "read data from PD service rather than micro service") ruleBundleSave := &cobra.Command{ Use: "save", Short: "save all group configs and rules from file", @@ -895,7 +895,7 @@ func saveRuleBundle(cmd *cobra.Command, _ []string) { func buildHeader(cmd *cobra.Command) http.Header { header := http.Header{} - forbiddenRedirectToMicroService, err := cmd.Flags().GetBool(flagFromAPIServer) + forbiddenRedirectToMicroService, err := cmd.Flags().GetBool(flagFromPDService) if err == nil && forbiddenRedirectToMicroService { header.Add(apiutil.XForbiddenForwardToMicroServiceHeader, "true") } diff --git a/tools/pd-ctl/tests/config/config_test.go b/tools/pd-ctl/tests/config/config_test.go index b6c58fe2bc6..39820a6c7b7 100644 --- a/tools/pd-ctl/tests/config/config_test.go +++ b/tools/pd-ctl/tests/config/config_test.go @@ -383,9 +383,9 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu f.Close() defer os.RemoveAll(fname) - checkScheduleConfig := func(scheduleCfg *sc.ScheduleConfig, isFromAPIServer bool) { + checkScheduleConfig := func(scheduleCfg *sc.ScheduleConfig, isFromPDService bool) { if schedulingServer := cluster.GetSchedulingPrimaryServer(); schedulingServer != nil { - if isFromAPIServer { + if isFromPDService { re.Equal(scheduleCfg.LeaderScheduleLimit, leaderServer.GetPersistOptions().GetLeaderScheduleLimit()) re.NotEqual(scheduleCfg.LeaderScheduleLimit, schedulingServer.GetPersistConfig().GetLeaderScheduleLimit()) } else { @@ -397,9 +397,9 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu } } - checkReplicateConfig := func(replicationCfg *sc.ReplicationConfig, isFromAPIServer bool) { + checkReplicateConfig := func(replicationCfg *sc.ReplicationConfig, isFromPDService bool) { if schedulingServer := cluster.GetSchedulingPrimaryServer(); schedulingServer != nil { - if isFromAPIServer { + if isFromPDService { re.Equal(replicationCfg.MaxReplicas, uint64(leaderServer.GetPersistOptions().GetMaxReplicas())) re.NotEqual(int(replicationCfg.MaxReplicas), schedulingServer.GetPersistConfig().GetMaxReplicas()) } else { @@ -411,11 +411,11 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu } } - checkRules := func(rules []*placement.Rule, isFromAPIServer bool) { + checkRules := func(rules []*placement.Rule, isFromPDService bool) { apiRules := leaderServer.GetRaftCluster().GetRuleManager().GetAllRules() if schedulingServer := cluster.GetSchedulingPrimaryServer(); schedulingServer != nil { schedulingRules := schedulingServer.GetCluster().GetRuleManager().GetAllRules() - if isFromAPIServer { + if isFromPDService { re.Len(apiRules, len(rules)) re.NotEqual(len(schedulingRules), len(rules)) } else { @@ -427,11 +427,11 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu } } - checkGroup := func(group placement.RuleGroup, isFromAPIServer bool) { + checkGroup := func(group placement.RuleGroup, isFromPDService bool) { apiGroup := leaderServer.GetRaftCluster().GetRuleManager().GetRuleGroup(placement.DefaultGroupID) if schedulingServer := cluster.GetSchedulingPrimaryServer(); schedulingServer != nil { schedulingGroup := schedulingServer.GetCluster().GetRuleManager().GetRuleGroup(placement.DefaultGroupID) - if isFromAPIServer { + if isFromPDService { re.Equal(apiGroup.Index, group.Index) re.NotEqual(schedulingGroup.Index, group.Index) } else { @@ -444,28 +444,28 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu } testConfig := func(options ...string) { - for _, isFromAPIServer := range []bool{true, false} { + for _, isFromPDService := range []bool{true, false} { cmd := ctl.GetRootCmd() args := []string{"-u", pdAddr, "config", "show"} args = append(args, options...) - if isFromAPIServer { - args = append(args, "--from_api_server") + if isFromPDService { + args = append(args, "--from_pd_service") } output, err := tests.ExecuteCommand(cmd, args...) re.NoError(err) if len(options) == 0 || options[0] == "all" { cfg := config.Config{} re.NoError(json.Unmarshal(output, &cfg)) - checkReplicateConfig(&cfg.Replication, isFromAPIServer) - checkScheduleConfig(&cfg.Schedule, isFromAPIServer) + checkReplicateConfig(&cfg.Replication, isFromPDService) + checkScheduleConfig(&cfg.Schedule, isFromPDService) } else if options[0] == "replication" { replicationCfg := &sc.ReplicationConfig{} re.NoError(json.Unmarshal(output, replicationCfg)) - checkReplicateConfig(replicationCfg, isFromAPIServer) + checkReplicateConfig(replicationCfg, isFromPDService) } else if options[0] == "schedule" { scheduleCfg := &sc.ScheduleConfig{} re.NoError(json.Unmarshal(output, scheduleCfg)) - checkScheduleConfig(scheduleCfg, isFromAPIServer) + checkScheduleConfig(scheduleCfg, isFromPDService) } else { re.Fail("no implement") } @@ -473,37 +473,37 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu } testRules := func(options ...string) { - for _, isFromAPIServer := range []bool{true, false} { + for _, isFromPDService := range []bool{true, false} { cmd := ctl.GetRootCmd() args := []string{"-u", pdAddr, "config", "placement-rules"} args = append(args, options...) - if isFromAPIServer { - args = append(args, "--from_api_server") + if isFromPDService { + args = append(args, "--from_pd_service") } output, err := tests.ExecuteCommand(cmd, args...) re.NoError(err) if options[0] == "show" { var rules []*placement.Rule re.NoError(json.Unmarshal(output, &rules)) - checkRules(rules, isFromAPIServer) + checkRules(rules, isFromPDService) } else if options[0] == "load" { var rules []*placement.Rule b, _ := os.ReadFile(fname) re.NoError(json.Unmarshal(b, &rules)) - checkRules(rules, isFromAPIServer) + checkRules(rules, isFromPDService) } else if options[0] == "rule-group" { var group placement.RuleGroup re.NoError(json.Unmarshal(output, &group), string(output)) - checkGroup(group, isFromAPIServer) + checkGroup(group, isFromPDService) } else if options[0] == "rule-bundle" && options[1] == "get" { var bundle placement.GroupBundle re.NoError(json.Unmarshal(output, &bundle), string(output)) - checkRules(bundle.Rules, isFromAPIServer) + checkRules(bundle.Rules, isFromPDService) } else if options[0] == "rule-bundle" && options[1] == "load" { var bundles []placement.GroupBundle b, _ := os.ReadFile(fname) re.NoError(json.Unmarshal(b, &bundles), string(output)) - checkRules(bundles[0].Rules, isFromAPIServer) + checkRules(bundles[0].Rules, isFromPDService) } else { re.Fail("no implement") } @@ -522,13 +522,13 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu re.Equal(uint64(233), sche.GetPersistConfig().GetLeaderScheduleLimit()) re.Equal(7, sche.GetPersistConfig().GetMaxReplicas()) } - // show config from api server rather than scheduling server + // show config from PD service rather than scheduling server testConfig() - // show all config from api server rather than scheduling server + // show all config from PD service rather than scheduling server testConfig("all") - // show replication config from api server rather than scheduling server + // show replication config from PD service rather than scheduling server testConfig("replication") - // show schedule config from api server rather than scheduling server + // show schedule config from PD service rather than scheduling server testConfig("schedule") // Test Rule diff --git a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go index fca00f2fd3c..fff95856931 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go @@ -41,7 +41,7 @@ func TestKeyspaceGroup(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tc, err := pdTests.NewTestAPICluster(ctx, 1) + tc, err := pdTests.NewTestPDServiceCluster(ctx, 1) re.NoError(err) defer tc.Destroy() err = tc.RunInitialServers() @@ -102,7 +102,7 @@ func TestSplitKeyspaceGroup(t *testing.T) { for i := range 129 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestPDServiceCluster(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -157,7 +157,7 @@ func TestExternalAllocNodeWhenStart(t *testing.T) { for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -197,7 +197,7 @@ func TestSetNodeAndPriorityKeyspaceGroup(t *testing.T) { for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestPDServiceCluster(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -301,7 +301,7 @@ func TestMergeKeyspaceGroup(t *testing.T) { for i := range 129 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -420,7 +420,7 @@ func TestKeyspaceGroupState(t *testing.T) { for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -511,7 +511,7 @@ func TestShowKeyspaceGroupPrimary(t *testing.T) { for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) diff --git a/tools/pd-ctl/tests/keyspace/keyspace_test.go b/tools/pd-ctl/tests/keyspace/keyspace_test.go index 23a1148cd66..6a523ced7b8 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_test.go @@ -49,7 +49,7 @@ func TestKeyspace(t *testing.T) { for i := 1; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestPDServiceCluster(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -155,7 +155,7 @@ func (suite *keyspaceTestSuite) SetupTest() { suite.ctx, suite.cancel = context.WithCancel(context.Background()) re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayStartServerLoop", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion", "return(true)")) - tc, err := pdTests.NewTestAPICluster(suite.ctx, 1) + tc, err := pdTests.NewTestPDServiceCluster(suite.ctx, 1) re.NoError(err) re.NoError(tc.RunInitialServers()) tc.WaitLeader() diff --git a/tools/pd-simulator/simulator/drive.go b/tools/pd-simulator/simulator/drive.go index 0d81a2af1ab..d8a6094760e 100644 --- a/tools/pd-simulator/simulator/drive.go +++ b/tools/pd-simulator/simulator/drive.go @@ -165,7 +165,7 @@ func (d *Driver) allocID() error { func (d *Driver) updateNodesClient() error { urls := strings.Split(d.pdAddr, ",") ctx, cancel := context.WithCancel(context.Background()) - SD = sd.NewDefaultPDServiceDiscovery(ctx, cancel, urls, nil) + SD = sd.NewDefaultServiceDiscovery(ctx, cancel, urls, nil) if err := SD.Init(); err != nil { return err }