diff --git a/pkg/core/storelimit/store_limit_scenes.go b/pkg/core/storelimit/store_limit_scenes.go deleted file mode 100644 index c1af28541ad..00000000000 --- a/pkg/core/storelimit/store_limit_scenes.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019 TiKV Project Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storelimit - -// Scene defines the store limitation on difference -// scenes -// Idle/Low/Normal/High indicates the load of the cluster, it is defined -// in cluster.State. See the details there for how to calculate the -// load. -// The values here defines the store-limit for each load. For example: -// Idle = 60, means that change the store-limit to 60 when the cluster is -// idle. -// -// NOTE: This type is exported by HTTP API. Please pay more attention when modifying it. -type Scene struct { - Idle int - Low int - Normal int - High int -} - -// DefaultScene returns Scene object with default values -func DefaultScene(limitType Type) *Scene { - defaultScene := &Scene{ - Idle: 100, - Low: 50, - Normal: 32, - High: 12, - } - - // change this if different type rate limit has different default scene - switch limitType { - case AddPeer: - return defaultScene - case RemovePeer: - return defaultScene - default: - return nil - } -} diff --git a/server/api/router.go b/server/api/router.go index 0e129706b43..6ea52304b63 100644 --- a/server/api/router.go +++ b/server/api/router.go @@ -227,8 +227,6 @@ func createRouter(prefix string, svr *server.Server) *mux.Router { registerFunc(clusterRouter, "/stores/remove-tombstone", storesHandler.RemoveTombStone, setMethods(http.MethodDelete), setAuditBackend(localLog, prometheus)) registerFunc(clusterRouter, "/stores/limit", storesHandler.GetAllStoresLimit, setMethods(http.MethodGet), setAuditBackend(prometheus)) registerFunc(clusterRouter, "/stores/limit", storesHandler.SetAllStoresLimit, setMethods(http.MethodPost), setAuditBackend(localLog, prometheus)) - registerFunc(clusterRouter, "/stores/limit/scene", storesHandler.SetStoreLimitScene, setMethods(http.MethodPost), setAuditBackend(localLog, prometheus)) - registerFunc(clusterRouter, "/stores/limit/scene", storesHandler.GetStoreLimitScene, setMethods(http.MethodGet), setAuditBackend(prometheus)) registerFunc(clusterRouter, "/stores/progress", storesHandler.GetStoresProgress, setMethods(http.MethodGet), setAuditBackend(prometheus)) registerFunc(clusterRouter, "/stores/check", storesHandler.GetStoresByState, setMethods(http.MethodGet), setAuditBackend(prometheus)) diff --git a/server/api/store.go b/server/api/store.go index 4bf4af1496b..1342645cdb9 100644 --- a/server/api/store.go +++ b/server/api/store.go @@ -528,46 +528,6 @@ func (h *storesHandler) GetAllStoresLimit(w http.ResponseWriter, r *http.Request h.rd.JSON(w, http.StatusOK, limits) } -// @Tags store -// @Summary Set limit scene in the cluster. -// @Accept json -// @Param body body storelimit.Scene true "Store limit scene" -// @Produce json -// @Success 200 {string} string "Set store limit scene successfully." -// @Failure 400 {string} string "The input is invalid." -// @Failure 500 {string} string "PD server failed to proceed the request." -// @Router /stores/limit/scene [post] -func (h *storesHandler) SetStoreLimitScene(w http.ResponseWriter, r *http.Request) { - typeName := r.URL.Query().Get("type") - typeValue, err := parseStoreLimitType(typeName) - if err != nil { - h.rd.JSON(w, http.StatusBadRequest, err.Error()) - return - } - scene := h.Handler.GetStoreLimitScene(typeValue) - if err := apiutil.ReadJSONRespondError(h.rd, w, r.Body, &scene); err != nil { - return - } - h.Handler.SetStoreLimitScene(scene, typeValue) - h.rd.JSON(w, http.StatusOK, "Set store limit scene successfully.") -} - -// @Tags store -// @Summary Get limit scene in the cluster. -// @Produce json -// @Success 200 {string} string "Get store limit scene successfully." -// @Router /stores/limit/scene [get] -func (h *storesHandler) GetStoreLimitScene(w http.ResponseWriter, r *http.Request) { - typeName := r.URL.Query().Get("type") - typeValue, err := parseStoreLimitType(typeName) - if err != nil { - h.rd.JSON(w, http.StatusBadRequest, err.Error()) - return - } - scene := h.Handler.GetStoreLimitScene(typeValue) - h.rd.JSON(w, http.StatusOK, scene) -} - // Progress contains status about a progress. type Progress struct { Action string `json:"action"` diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index c7b4d9ae23b..6200e57f387 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -165,9 +165,8 @@ type RaftCluster struct { prevStoreLimit map[uint64]map[storelimit.Type]float64 // This below fields are all read-only, we cannot update itself after the raft cluster starts. - id id.Allocator - opt *config.PersistOptions - limiter *StoreLimiter + id id.Allocator + opt *config.PersistOptions *schedulingController ruleManager *placement.RuleManager regionLabeler *labeler.RegionLabeler @@ -354,7 +353,6 @@ func (c *RaftCluster) Start(s Server) error { if err != nil { return err } - c.limiter = NewStoreLimiter(s.GetPersistOptions()) c.loadExternalTS() c.loadMinResolvedTS() @@ -2177,11 +2175,6 @@ func (c *RaftCluster) putRegion(region *core.RegionInfo) error { return nil } -// GetStoreLimiter returns the dynamic adjusting limiter -func (c *RaftCluster) GetStoreLimiter() *StoreLimiter { - return c.limiter -} - // GetStoreLimitByType returns the store limit for a given store ID and type. func (c *RaftCluster) GetStoreLimitByType(storeID uint64, typ storelimit.Type) float64 { return c.opt.GetStoreLimitByType(storeID, typ) diff --git a/server/cluster/cluster_stat.go b/server/cluster/cluster_stat.go deleted file mode 100644 index ccc066c6e9f..00000000000 --- a/server/cluster/cluster_stat.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright 2019 TiKV Project Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cluster - -import ( - "strings" - "time" - - "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/pingcap/log" - "github.com/tikv/pd/pkg/movingaverage" - "github.com/tikv/pd/pkg/slice" - "github.com/tikv/pd/pkg/utils/syncutil" - "go.uber.org/zap" -) - -// Cluster State Statistics -// -// The target of cluster state statistics is to statistic the load state -// of a cluster given a time duration. The basic idea is to collect all -// the load information from every store at the same time duration and calculates -// the load for the whole cluster. -// -// Now we just support CPU as the measurement of the load. The CPU information -// is reported by each store with a heartbeat message which sending to PD every -// interval(10s). There is no synchronization between each store, so the stores -// could not send heartbeat messages at the same time, and the collected -// information has time shift. -// -// The diagram below demonstrates the time shift. "|" indicates the latest -// heartbeat. -// -// S1 ------------------------------|----------------------> -// S2 ---------------------------|-------------------------> -// S3 ---------------------------------|-------------------> -// -// The max time shift between 2 stores is 2*interval which is 20s here, and -// this is also the max time shift for the whole cluster. We assume that the -// time of starting to heartbeat is randomized, so the average time shift of -// the cluster is 10s. This is acceptable for statistics. -// -// Implementation -// -// Keep a 5min history statistics for each store, the history is stored in a -// circle array which evicting the oldest entry in a FIFO strategy. All the -// stores' histories combines into the cluster's history. So we can calculate -// any load value within 5 minutes. The algorithm for calculate is simple, -// Iterate each store's history from the latest entry with the same step and -// calculate the average CPU usage for the cluster. -// -// For example. -// To calculate the average load of the cluster within 3 minutes, start from the -// tail of circle array(which stores the history), and backward 18 steps to -// collect all the statistics that being accessed, then calculates the average -// CPU usage for this store. The average of all the stores CPU usage is the -// CPU usage of the whole cluster. -// - -// LoadState indicates the load of a cluster or store -type LoadState int - -// LoadStates that supported, None means no state determined -const ( - LoadStateNone LoadState = iota - LoadStateIdle - LoadStateLow - LoadStateNormal - LoadStateHigh -) - -// String representation of LoadState -func (s LoadState) String() string { - switch s { - case LoadStateIdle: - return "idle" - case LoadStateLow: - return "low" - case LoadStateNormal: - return "normal" - case LoadStateHigh: - return "high" - } - return "none" -} - -// ThreadsCollected filters the threads to take into -// the calculation of CPU usage. -var ThreadsCollected = []string{"grpc-server-"} - -// NumberOfEntries is the max number of StatEntry that preserved, -// it is the history of a store's heartbeats. The interval of store -// heartbeats from TiKV is 10s, so we can preserve 30 entries per -// store which is about 5 minutes. -const NumberOfEntries = 30 - -// StaleEntriesTimeout is the time before an entry is deleted as stale. -// It is about 30 entries * 10s -const StaleEntriesTimeout = 300 * time.Second - -// StatEntry is an entry of store statistics -type StatEntry pdpb.StoreStats - -// CPUEntries saves a history of store statistics -type CPUEntries struct { - cpu movingaverage.MovingAvg - updated time.Time -} - -// NewCPUEntries returns the StateEntries with a fixed size -func NewCPUEntries(size int) *CPUEntries { - return &CPUEntries{ - cpu: movingaverage.NewMedianFilter(size), - } -} - -// Append a StatEntry, it accepts an optional threads as a filter of CPU usage -func (s *CPUEntries) Append(stat *StatEntry, threads ...string) bool { - usages := stat.CpuUsages - // all gRPC fields are optional, so we must check the empty value - if usages == nil { - return false - } - - cpu := float64(0) - appended := 0 - for _, usage := range usages { - name := usage.GetKey() - value := usage.GetValue() - if threads != nil && slice.NoneOf(threads, func(i int) bool { - return strings.HasPrefix(name, threads[i]) - }) { - continue - } - cpu += float64(value) - appended++ - } - if appended > 0 { - s.cpu.Add(cpu / float64(appended)) - s.updated = time.Now() - return true - } - return false -} - -// CPU returns the cpu usage -func (s *CPUEntries) CPU() float64 { - return s.cpu.Get() -} - -// StatEntries saves the StatEntries for each store in the cluster -type StatEntries struct { - m syncutil.RWMutex - stats map[uint64]*CPUEntries - size int // size of entries to keep for each store - total int64 // total of StatEntry appended - ttl time.Duration -} - -// NewStatEntries returns a statistics object for the cluster -func NewStatEntries(size int) *StatEntries { - return &StatEntries{ - stats: make(map[uint64]*CPUEntries), - size: size, - ttl: StaleEntriesTimeout, - } -} - -// Append an store StatEntry -func (cst *StatEntries) Append(stat *StatEntry) bool { - cst.m.Lock() - defer cst.m.Unlock() - - cst.total++ - - // append the entry - storeID := stat.StoreId - entries, ok := cst.stats[storeID] - if !ok { - entries = NewCPUEntries(cst.size) - cst.stats[storeID] = entries - } - - return entries.Append(stat, ThreadsCollected...) -} - -// CPU returns the cpu usage of the cluster -func (cst *StatEntries) CPU(excludes ...uint64) float64 { - cst.m.Lock() - defer cst.m.Unlock() - - // no entries have been collected - if cst.total == 0 { - return 0 - } - - sum := 0.0 - for sid, stat := range cst.stats { - if slice.Contains(excludes, sid) { - continue - } - if time.Since(stat.updated) > cst.ttl { - delete(cst.stats, sid) - continue - } - sum += stat.CPU() - } - if len(cst.stats) == 0 { - return 0.0 - } - return sum / float64(len(cst.stats)) -} - -// State collects information from store heartbeat -// and calculates the load state of the cluster -type State struct { - cst *StatEntries -} - -// NewState return the LoadState object which collects -// information from store heartbeats and gives the current state of -// the cluster -func NewState() *State { - return &State{ - cst: NewStatEntries(NumberOfEntries), - } -} - -// State returns the state of the cluster, excludes is the list of store ID -// to be excluded -func (cs *State) State(excludes ...uint64) LoadState { - // Return LoadStateNone if there is not enough heartbeats - // collected. - if cs.cst.total < NumberOfEntries { - return LoadStateNone - } - - // The CPU usage in fact is collected from grpc-server, so it is not the - // CPU usage for the whole TiKV process. The boundaries are empirical - // values. - // TODO we may get a more accurate state with the information of the number // of the CPU cores - cpu := cs.cst.CPU(excludes...) - log.Debug("calculated cpu", zap.Float64("usage", cpu)) - clusterStateCPUGauge.Set(cpu) - switch { - case cpu < 5: - return LoadStateIdle - case cpu >= 5 && cpu < 10: - return LoadStateLow - case cpu >= 10 && cpu < 30: - return LoadStateNormal - case cpu >= 30: - return LoadStateHigh - } - return LoadStateNone -} - -// Collect statistics from store heartbeat -func (cs *State) Collect(stat *StatEntry) { - cs.cst.Append(stat) -} diff --git a/server/cluster/cluster_stat_test.go b/server/cluster/cluster_stat_test.go deleted file mode 100644 index 01d937334f0..00000000000 --- a/server/cluster/cluster_stat_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2019 TiKV Project Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cluster - -import ( - "fmt" - "testing" - - "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/stretchr/testify/require" -) - -func cpu(usage int64) []*pdpb.RecordPair { - n := 10 - name := "cpu" - pairs := make([]*pdpb.RecordPair, n) - for i := range n { - pairs[i] = &pdpb.RecordPair{ - Key: fmt.Sprintf("%s:%d", name, i), - Value: uint64(usage), - } - } - return pairs -} - -func TestCPUEntriesAppend(t *testing.T) { - re := require.New(t) - N := 10 - - checkAppend := func(appended bool, usage int64, threads ...string) { - entries := NewCPUEntries(N) - re.NotNil(entries) - for range N { - entry := &StatEntry{ - CpuUsages: cpu(usage), - } - re.Equal(appended, entries.Append(entry, threads...)) - } - re.Equal(float64(usage), entries.cpu.Get()) - } - - checkAppend(true, 20) - checkAppend(true, 20, "cpu") - checkAppend(false, 0, "cup") -} - -func TestCPUEntriesCPU(t *testing.T) { - re := require.New(t) - N := 10 - entries := NewCPUEntries(N) - re.NotNil(entries) - - usages := cpu(20) - for range N { - entry := &StatEntry{ - CpuUsages: usages, - } - entries.Append(entry) - } - re.Equal(float64(20), entries.CPU()) -} - -func TestStatEntriesAppend(t *testing.T) { - re := require.New(t) - N := 10 - cst := NewStatEntries(N) - re.NotNil(cst) - ThreadsCollected = []string{"cpu:"} - - // fill 2*N entries, 2 entries for each store - for i := range 2 * N { - entry := &StatEntry{ - StoreId: uint64(i % N), - CpuUsages: cpu(20), - } - re.True(cst.Append(entry)) - } - - // use i as the store ID - for i := range N { - re.Equal(float64(20), cst.stats[uint64(i)].CPU()) - } -} - -func TestStatEntriesCPU(t *testing.T) { - re := require.New(t) - N := 10 - cst := NewStatEntries(N) - re.NotNil(cst) - - // the average cpu usage is 20% - usages := cpu(20) - ThreadsCollected = []string{"cpu:"} - - // 2 entries per store - for i := range 2 * N { - entry := &StatEntry{ - StoreId: uint64(i % N), - CpuUsages: usages, - } - re.True(cst.Append(entry)) - } - - re.Equal(int64(2*N), cst.total) - // the cpu usage of the whole cluster is 20% - re.Equal(float64(20), cst.CPU()) -} -func TestStatEntriesCPUStale(t *testing.T) { - re := require.New(t) - N := 10 - cst := NewStatEntries(N) - // make all entries stale immediately - cst.ttl = 0 - - usages := cpu(20) - ThreadsCollected = []string{"cpu:"} - for i := range 2 * N { - entry := &StatEntry{ - StoreId: uint64(i % N), - CpuUsages: usages, - } - cst.Append(entry) - } - re.Equal(float64(0), cst.CPU()) -} - -func TestStatEntriesState(t *testing.T) { - re := require.New(t) - Load := func(usage int64) *State { - cst := NewStatEntries(10) - re.NotNil(cst) - - usages := cpu(usage) - ThreadsCollected = []string{"cpu:"} - - for range NumberOfEntries { - entry := &StatEntry{ - StoreId: 0, - CpuUsages: usages, - } - cst.Append(entry) - } - return &State{cst} - } - re.Equal(LoadStateIdle, Load(0).State()) - re.Equal(LoadStateLow, Load(5).State()) - re.Equal(LoadStateNormal, Load(10).State()) - re.Equal(LoadStateHigh, Load(30).State()) -} diff --git a/server/cluster/store_limiter.go b/server/cluster/store_limiter.go deleted file mode 100644 index b40bcef2eed..00000000000 --- a/server/cluster/store_limiter.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2019 TiKV Project Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cluster - -import ( - "github.com/tikv/pd/pkg/core/storelimit" - sc "github.com/tikv/pd/pkg/schedule/config" - "github.com/tikv/pd/pkg/utils/syncutil" -) - -// StoreLimiter adjust the store limit dynamically -type StoreLimiter struct { - m syncutil.RWMutex - opt sc.ConfProvider - scene map[storelimit.Type]*storelimit.Scene - state *State - current LoadState -} - -// NewStoreLimiter builds a store limiter object using the operator controller -func NewStoreLimiter(opt sc.ConfProvider) *StoreLimiter { - defaultScene := map[storelimit.Type]*storelimit.Scene{ - storelimit.AddPeer: storelimit.DefaultScene(storelimit.AddPeer), - storelimit.RemovePeer: storelimit.DefaultScene(storelimit.RemovePeer), - } - - return &StoreLimiter{ - opt: opt, - state: NewState(), - scene: defaultScene, - current: LoadStateNone, - } -} - -// ReplaceStoreLimitScene replaces the store limit values for different scenes -func (s *StoreLimiter) ReplaceStoreLimitScene(scene *storelimit.Scene, limitType storelimit.Type) { - s.m.Lock() - defer s.m.Unlock() - if s.scene == nil { - s.scene = make(map[storelimit.Type]*storelimit.Scene) - } - s.scene[limitType] = scene -} - -// StoreLimitScene returns the current limit for different scenes -func (s *StoreLimiter) StoreLimitScene(limitType storelimit.Type) *storelimit.Scene { - s.m.RLock() - defer s.m.RUnlock() - return s.scene[limitType] -} diff --git a/server/cluster/store_limiter_test.go b/server/cluster/store_limiter_test.go deleted file mode 100644 index 5d03d7cb0f1..00000000000 --- a/server/cluster/store_limiter_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2019 TiKV Project Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cluster - -import ( - "testing" - - "github.com/stretchr/testify/require" - "github.com/tikv/pd/pkg/core/storelimit" - "github.com/tikv/pd/pkg/mock/mockconfig" -) - -func TestStoreLimitScene(t *testing.T) { - re := require.New(t) - - limiter := NewStoreLimiter(mockconfig.NewTestOptions()) - re.Equal(storelimit.DefaultScene(storelimit.AddPeer), limiter.scene[storelimit.AddPeer]) - re.Equal(storelimit.DefaultScene(storelimit.RemovePeer), limiter.scene[storelimit.RemovePeer]) -} - -func TestReplaceStoreLimitScene(t *testing.T) { - re := require.New(t) - - limiter := NewStoreLimiter(mockconfig.NewTestOptions()) - - sceneAddPeer := &storelimit.Scene{Idle: 4, Low: 3, Normal: 2, High: 1} - limiter.ReplaceStoreLimitScene(sceneAddPeer, storelimit.AddPeer) - - re.Equal(sceneAddPeer, limiter.scene[storelimit.AddPeer]) - - sceneRemovePeer := &storelimit.Scene{Idle: 5, Low: 4, Normal: 3, High: 2} - limiter.ReplaceStoreLimitScene(sceneRemovePeer, storelimit.RemovePeer) -} diff --git a/server/handler.go b/server/handler.go index 1464c768aed..0789db9d35d 100644 --- a/server/handler.go +++ b/server/handler.go @@ -333,24 +333,6 @@ func (h *Handler) ResetTS(ts uint64, ignoreSmaller, skipUpperBoundCheck bool, _ return tsoAllocator.SetTSO(ts, ignoreSmaller, skipUpperBoundCheck) } -// SetStoreLimitScene sets the limit values for different scenes -func (h *Handler) SetStoreLimitScene(scene *storelimit.Scene, limitType storelimit.Type) { - rc := h.s.GetRaftCluster() - if rc == nil { - return - } - rc.GetStoreLimiter().ReplaceStoreLimitScene(scene, limitType) -} - -// GetStoreLimitScene returns the limit values for different scenes -func (h *Handler) GetStoreLimitScene(limitType storelimit.Type) *storelimit.Scene { - rc := h.s.GetRaftCluster() - if rc == nil { - return nil - } - return rc.GetStoreLimiter().StoreLimitScene(limitType) -} - // GetProgressByID returns the progress details for a given store ID. func (h *Handler) GetProgressByID(storeID string) (action string, p, ls, cs float64, err error) { return h.s.GetRaftCluster().GetProgressByID(storeID) diff --git a/tools/pd-ctl/pdctl/command/store_command.go b/tools/pd-ctl/pdctl/command/store_command.go index 14492d7c75d..0ad56e0ac72 100644 --- a/tools/pd-ctl/pdctl/command/store_command.go +++ b/tools/pd-ctl/pdctl/command/store_command.go @@ -51,7 +51,6 @@ func NewStoreCommand() *cobra.Command { s.AddCommand(NewSetStoreWeightCommand()) s.AddCommand(NewStoreLimitCommand()) s.AddCommand(NewRemoveTombStoneCommand()) - s.AddCommand(NewStoreLimitSceneCommand()) s.AddCommand(NewStoreCheckCommand()) s.Flags().String("jq", "", "jq query") s.Flags().StringSlice("state", nil, "state filter") @@ -229,56 +228,6 @@ func NewSetAllLimitCommand() *cobra.Command { } } -// NewStoreLimitSceneCommand returns a limit-scene command for store command -func NewStoreLimitSceneCommand() *cobra.Command { - return &cobra.Command{ - Use: "limit-scene []|[ ]", - Short: "show or set the limit value for a scene", - Long: "show or set the limit value for a scene, can be 'add-peer'(default) or 'remove-peer'", - Run: storeLimitSceneCommandFunc, - } -} - -func storeLimitSceneCommandFunc(cmd *cobra.Command, args []string) { - var resp string - var err error - prefix := fmt.Sprintf("%s/limit/scene", storesPrefix) - - switch len(args) { - case 0, 1: - // show all limit values - if len(args) == 1 { - prefix += fmt.Sprintf("?type=%v", args[0]) - } - resp, err = doRequest(cmd, prefix, http.MethodGet, http.Header{}) - if err != nil { - cmd.Println(err) - return - } - cmd.Println(resp) - case 2, 3: - // set limit value for a scene - scene := args[0] - if scene != "idle" && - scene != "low" && - scene != "normal" && - scene != "high" { - cmd.Println("invalid scene") - return - } - - rate, err := strconv.Atoi(args[1]) - if err != nil { - cmd.Println(err) - return - } - if len(args) == 3 { - prefix = path.Join(prefix, fmt.Sprintf("?type=%s", args[2])) - } - postJSON(cmd, prefix, map[string]any{scene: rate}) - } -} - func convertToStoreInfo(content string) string { store := &response.StoreInfo{} err := json.Unmarshal([]byte(content), store) diff --git a/tools/pd-ctl/tests/store/store_test.go b/tools/pd-ctl/tests/store/store_test.go index 2f68827a816..6f704a25e8c 100644 --- a/tools/pd-ctl/tests/store/store_test.go +++ b/tools/pd-ctl/tests/store/store_test.go @@ -477,38 +477,6 @@ func TestStore(t *testing.T) { output, err = tests.ExecuteCommand(cmd, args...) re.NoError(err) re.NotContains(string(output), "PANIC") - // store limit-scene - args = []string{"-u", pdAddr, "store", "limit-scene"} - output, err = tests.ExecuteCommand(cmd, args...) - re.NoError(err) - scene := &storelimit.Scene{} - err = json.Unmarshal(output, scene) - re.NoError(err) - re.Equal(storelimit.DefaultScene(storelimit.AddPeer), scene) - - // store limit-scene - args = []string{"-u", pdAddr, "store", "limit-scene", "idle", "200"} - _, err = tests.ExecuteCommand(cmd, args...) - re.NoError(err) - args = []string{"-u", pdAddr, "store", "limit-scene"} - scene = &storelimit.Scene{} - output, err = tests.ExecuteCommand(cmd, args...) - re.NoError(err) - err = json.Unmarshal(output, scene) - re.NoError(err) - re.Equal(200, scene.Idle) - - // store limit-scene - args = []string{"-u", pdAddr, "store", "limit-scene", "idle", "100", "remove-peer"} - _, err = tests.ExecuteCommand(cmd, args...) - re.NoError(err) - args = []string{"-u", pdAddr, "store", "limit-scene", "remove-peer"} - scene = &storelimit.Scene{} - output, err = tests.ExecuteCommand(cmd, args...) - re.NoError(err) - err = json.Unmarshal(output, scene) - re.NoError(err) - re.Equal(100, scene.Idle) // store limit all 201 is invalid for all args = []string{"-u", pdAddr, "store", "limit", "all", "201"}