diff --git a/pruner/params.go b/pruner/params.go index f50ac488e5..ec3c824caf 100644 --- a/pruner/params.go +++ b/pruner/params.go @@ -10,11 +10,15 @@ type Params struct { // gcCycle is the frequency at which the pruning Service // runs the ticker. If set to 0, the Service will not run. gcCycle time.Duration + // maxPruneablePerGC sets an upper limit to how many blocks + // can be pruned during a GC cycle. + maxPruneablePerGC uint64 } func DefaultParams() Params { return Params{ - gcCycle: time.Minute * 30, + gcCycle: time.Minute * 1, + maxPruneablePerGC: 50000, } } @@ -39,3 +43,11 @@ func WithDisabledGC() Option { func WithPrunerMetrics(s *Service) error { return s.WithMetrics() } + +// WithMaxPruneablePerGC sets the upper limit for how many blocks can +// be pruned per GC cycle. +func WithMaxPruneablePerGC(maxPruneable uint64) Option { + return func(p *Params) { + p.maxPruneablePerGC = maxPruneable + } +} diff --git a/pruner/service.go b/pruner/service.go index 3426e1ed02..a274812ac0 100644 --- a/pruner/service.go +++ b/pruner/service.go @@ -32,7 +32,9 @@ type Service struct { ds datastore.Datastore checkpoint *checkpoint - // TODO @renaynay @distractedmind: how would this impact a node that enables pruning after being an archival node? + // TODO @renaynay: how would this impact a node that enables pruning after being an archival node? + // e.g. Node has already 600k+ blocks stored, how long will it take to clean up all blocks outside + // of pruning window? maxPruneablePerGC uint64 numBlocksInWindow uint64 @@ -66,8 +68,7 @@ func NewService( checkpoint: &checkpoint{FailedHeaders: map[uint64]string{}}, ds: namespace.Wrap(ds, storePrefix), numBlocksInWindow: numBlocksInWindow, - // TODO @distractedmind: make this configurable? - maxPruneablePerGC: numBlocksInWindow * 2, + maxPruneablePerGC: params.maxPruneablePerGC, doneCh: make(chan struct{}), params: params, } @@ -119,7 +120,7 @@ func (s *Service) prune() { return case <-ticker.C: headers, err := s.findPruneableHeaders(s.ctx) - if err != nil { + if err != nil || len(headers) == 0 { // TODO @renaynay: record errors properly log.Errorw("failed to find prune-able blocks", "error", err) continue @@ -129,15 +130,16 @@ func (s *Service) prune() { // TODO @renaynay: make deadline a param ? / configurable? pruneCtx, cancel := context.WithDeadline(s.ctx, time.Now().Add(time.Minute)) + + log.Debugw("pruning headers", "from", headers[0].Height(), "to", + headers[len(headers)-1].Height()) for _, eh := range headers { - log.Debugw("pruning block", "height", eh.Height()) err = s.pruner.Prune(pruneCtx, eh) if err != nil { - // TODO: @distractedm1nd: updatecheckpoint should be called on the last NON-ERRORED header log.Errorw("failed to prune block", "height", eh.Height(), "err", err) failed[eh.Height()] = err } else { - lastPrunedHeader = eh // TODO @renaynay: make prettier + lastPrunedHeader = eh // TODO @renaynay: make prettier, updatecheckpoint should be called on the last NON-ERRORED header } s.metrics.observePrune(pruneCtx, err != nil) } @@ -149,6 +151,7 @@ func (s *Service) prune() { continue } + log.Debugw("retrying failed headers", "amount", len(s.checkpoint.FailedHeaders)) s.retryFailed(s.ctx) // TODO @renaynay: persist the results of this to disk } }