From 8fd6fb0ac15501c73eb154f71ad181b201c4890b Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Mon, 21 Oct 2024 14:21:33 -0400
Subject: [PATCH 01/29] save debug changes

---
 src/llfs/page_allocator_state.cpp | 18 +++++++++++-------
 src/llfs/page_allocator_state.hpp |  2 +-
 2 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/src/llfs/page_allocator_state.cpp b/src/llfs/page_allocator_state.cpp
index 578fc772..c8d0e492 100644
--- a/src/llfs/page_allocator_state.cpp
+++ b/src/llfs/page_allocator_state.cpp
@@ -458,11 +458,14 @@ PageAllocatorState::ProposalStatus PageAllocatorState::propose(PackedPageAllocat
   // If this is a valid proposal that will cause state change, go through and change the deltas to
   // the new ref count values.
   //
+  LOG(INFO) << "propose (start): txn-ref-count= " << txn->ref_counts.size();
   if (status == ProposalStatus::kValid) {
     for (PackedPageRefCount& prc : txn->ref_counts) {
-      prc.ref_count = this->calculate_new_ref_count(prc);
+      LOG(INFO) << "prc loop: " << prc;
+      prc.ref_count = this->calculate_new_ref_count(prc, *user_index);
     }
   }
+  LOG(INFO) << "propose (end): txn-ref-count= " << txn->ref_counts.size();
 
   return status;
 }
@@ -556,23 +559,23 @@ namespace {
 void run_ref_count_update_sanity_checks(const PageIdFactory& id_factory,
                                         const PackedPageRefCount& delta,
                                         const PageAllocatorRefCount& obj, i32 old_count,
-                                        i32 new_count)
+                                        i32 new_count, const u32 index)
 {
   const auto debug_info = [&](std::ostream& out) {
     const page_generation_int delta_generation = id_factory.get_generation(delta.page_id.unpack());
 
     out << "(" << BATT_INSPECT(delta) << BATT_INSPECT(obj) << BATT_INSPECT(old_count)
-        << BATT_INSPECT(new_count) << ")" << BATT_INSPECT(delta_generation);
+        << BATT_INSPECT(new_count) << ")" << BATT_INSPECT(delta_generation) << BATT_INSPECT(index);
   };
 
-  LLFS_VLOG(2) << debug_info;
+  LLFS_VLOG(2) << "rrcusc: " << debug_info;
 
   BATT_CHECK_GE(old_count, 0) << "ref counts must be non-negative" << debug_info;
   BATT_CHECK_GE(new_count, 0) << "ref counts must be non-negative" << debug_info;
 
   if (old_count == new_count) {
     BATT_CHECK_EQ(delta.ref_count, 0)
-        << "delta was non-zero but count did not change" << debug_info;
+        << "delta was non-zero but count did not change (bbora)" << debug_info;
     return;
   }
 
@@ -639,7 +642,8 @@ void run_ref_count_update_sanity_checks(const PageIdFactory& id_factory,
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
-i32 PageAllocatorState::calculate_new_ref_count(const PackedPageRefCount& delta) const
+i32 PageAllocatorState::calculate_new_ref_count(const PackedPageRefCount& delta,
+                                                const u32 index) const
 {
   const PageId page_id = delta.page_id.unpack();
   const page_id_int physical_page = this->page_ids_.get_physical_page(page_id);
@@ -662,7 +666,7 @@ i32 PageAllocatorState::calculate_new_ref_count(const PackedPageRefCount& delta)
     new_count = old_count + delta.ref_count;
   }
 
-  run_ref_count_update_sanity_checks(this->page_ids_, delta, obj, old_count, new_count);
+  run_ref_count_update_sanity_checks(this->page_ids_, delta, obj, old_count, new_count, index);
 
   return new_count;
 }
diff --git a/src/llfs/page_allocator_state.hpp b/src/llfs/page_allocator_state.hpp
index c1398919..4c1e867a 100644
--- a/src/llfs/page_allocator_state.hpp
+++ b/src/llfs/page_allocator_state.hpp
@@ -242,7 +242,7 @@ class PageAllocatorState : public PageAllocatorStateNoLock
 
   // Returns the new ref count that will result from applying the delta to the passed obj.
   //
-  i32 calculate_new_ref_count(const PackedPageRefCount& delta) const;
+  i32 calculate_new_ref_count(const PackedPageRefCount& delta, const u32 index) const;
 
   /** \brief If the given ref count object has a positive ref count but *is* in the free pool, then
    * this function removes it; otherwise if the object has a zero ref count but is *not* in the free

From f94af8da8115ecafd246fcf6f02264f5eac4329f Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Wed, 18 Dec 2024 09:49:30 -0500
Subject: [PATCH 02/29] ref count changes

---
 src/llfs/committable_page_cache_job.cpp     | 19 +++--
 src/llfs/page_recycler.cpp                  | 78 +++++++++++++++++----
 src/llfs/page_recycler.hpp                  | 12 +++-
 src/llfs/page_recycler.test.cpp             |  4 +-
 src/llfs/page_recycler_events.hpp           |  8 ++-
 src/llfs/page_recycler_recovery_visitor.cpp | 15 ++++
 src/llfs/page_recycler_recovery_visitor.hpp |  4 ++
 src/llfs/volume_trimmer.cpp                 |  5 +-
 8 files changed, 121 insertions(+), 24 deletions(-)

diff --git a/src/llfs/committable_page_cache_job.cpp b/src/llfs/committable_page_cache_job.cpp
index 71709437..46358269 100644
--- a/src/llfs/committable_page_cache_job.cpp
+++ b/src/llfs/committable_page_cache_job.cpp
@@ -185,7 +185,8 @@ Status commit(std::unique_ptr<PageCacheJob> job, const JobCommitParams& params,
 Status commit(CommittablePageCacheJob committable_job, const JobCommitParams& params, u64 callers,
               slot_offset_type prev_caller_slot, batt::Watch<slot_offset_type>* durable_caller_slot)
 {
-  return committable_job.commit_impl(params, callers, prev_caller_slot, durable_caller_slot);
+  auto stat = committable_job.commit_impl(params, callers, prev_caller_slot, durable_caller_slot);
+  return stat;
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -211,7 +212,12 @@ Status CommittablePageCacheJob::commit_impl(const JobCommitParams& params, u64 c
   const PageCacheJob* job = this->job_.get();
   BATT_CHECK_NOT_NULLPTR(job);
 
-  LLFS_VLOG(1) << "commit(PageCacheJob): entered";
+  if (durable_caller_slot) {
+    LLFS_VLOG(1) << "commit(PageCacheJob): entered" << BATT_INSPECT(prev_caller_slot)
+                 << BATT_INSPECT(*durable_caller_slot);
+  } else {
+    LLFS_VLOG(1) << "commit(PageCacheJob): entered" << BATT_INSPECT(prev_caller_slot);
+  }
 
   // Make sure the job is pruned!
   //
@@ -429,7 +435,7 @@ auto CommittablePageCacheJob::start_ref_count_updates(const JobCommitParams& par
                                                       PageRefCountUpdates& updates, u64 /*callers*/)
     -> StatusOr<DeadPages>
 {
-  LLFS_VLOG(1) << "commit(PageCacheJob): updating ref counts";
+  LLFS_VLOG(1) << "commit(PageCacheJob): updating ref counts" << BATT_INSPECT(params.caller_slot);
 
   DeadPages dead_pages;
 
@@ -579,10 +585,11 @@ Status CommittablePageCacheJob::recycle_dead_pages(const JobCommitParams& params
 
   BATT_ASSIGN_OK_RESULT(
       slot_offset_type recycler_sync_point,
-      params.recycler.recycle_pages(as_slice(dead_pages.ids), params.recycle_grant,
-                                    params.recycle_depth + 1));
+      params.recycler.recycle_pages(as_slice(dead_pages.ids), params.caller_slot,
+                                    params.recycle_grant, params.recycle_depth + 1));
 
-  LLFS_VLOG(1) << "commit(PageCacheJob): waiting for PageRecycler sync point";
+  LLFS_VLOG(1) << "commit(PageCacheJob): waiting for PageRecycler sync point"
+               << BATT_INSPECT(params.caller_slot);
 
   return params.recycler.await_flush(recycler_sync_point);
   //
diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index 5132b334..0616f905 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -107,6 +107,7 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 {
   initialize_status_codes();
 
+  LLFS_VLOG(1) << "PageRecycler:recover() start" << BATT_INSPECT(name);
   PageRecyclerRecoveryVisitor visitor{default_options};
 
   // Read the log, scanning its contents.
@@ -153,9 +154,11 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 
   state->bulk_load(as_slice(visitor.recovered_pages()));
 
-  return std::unique_ptr<PageRecycler>{new PageRecycler(scheduler, std::string{name}, page_deleter,
-                                                        std::move(*recovered_log),
-                                                        std::move(latest_batch), std::move(state))};
+  LLFS_VLOG(1) << "PageRecycler:recover() end" << BATT_INSPECT(name);
+
+  return std::unique_ptr<PageRecycler>{
+      new PageRecycler(scheduler, std::string{name}, page_deleter, std::move(*recovered_log),
+                       std::move(latest_batch), std::move(state), visitor.largest_offset())};
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -163,7 +166,8 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& name,
                            PageDeleter& page_deleter, std::unique_ptr<LogDevice>&& wal_device,
                            Optional<Batch>&& recovered_batch,
-                           std::unique_ptr<PageRecycler::State>&& state) noexcept
+                           std::unique_ptr<PageRecycler::State>&& state,
+                           u64 largest_offset_as_unique_identifier_init) noexcept
     : scheduler_{scheduler}
     , name_{name}
     , page_deleter_{page_deleter}
@@ -177,6 +181,7 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
     , recycle_task_{}
     , metrics_{}
     , prepared_batch_{std::move(recovered_batch)}
+    , largest_offset_as_unique_identifier_{largest_offset_as_unique_identifier_init}
 {
   const PageRecyclerOptions& options = this->state_.no_lock().options;
 
@@ -202,6 +207,34 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
 #undef ADD_METRIC_
 }
 
+//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
+/** \brief This infrastructure is to collect metrics for PageRecycler submodule.
+ * This metric collection is currently used by test suit to assess execution behavior of internal
+ * flows. This is static metric infrastructure so that any user level code could access it.
+ *
+ */
+auto PageRecycler::metrics_export() -> MetricsExported&
+{
+  static MetricsExported& metrics_ = [&]() -> MetricsExported& {
+    static MetricsExported metrics_;
+
+    LOG(INFO) << "Registering PageRecycler metrics...";
+    const auto metric_name = [](std::string_view property) {
+      return batt::to_string("PageRecycler_", property);
+    };
+
+#define ADD_METRIC_(n) global_metric_registry().add(metric_name(#n), metrics_.n)
+
+    ADD_METRIC_(page_id_deletion_reissue);
+
+#undef ADD_METRIC_
+
+    return metrics_;
+  }();
+
+  return metrics_;
+}
+
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
 PageRecycler::~PageRecycler() noexcept
@@ -270,20 +303,33 @@ void PageRecycler::join()
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
-StatusOr<slot_offset_type> PageRecycler::recycle_pages(const Slice<const PageId>& page_ids,
-                                                       batt::Grant* grant, i32 depth)
+StatusOr<slot_offset_type> PageRecycler::recycle_pages(
+    const Slice<const PageId>& page_ids, llfs::slot_offset_type offset_as_unique_identifier,
+    batt::Grant* grant, i32 depth)
 {
   BATT_CHECK_GE(depth, 0);
 
   LLFS_VLOG(1) << "PageRecycler::recycle_pages(page_ids=" << batt::dump_range(page_ids) << "["
                << page_ids.size() << "]"
                << ", grant=[" << (grant ? grant->size() : usize{0}) << "], depth=" << depth << ") "
-               << this->name_;
+               << this->name_ << BATT_INSPECT(offset_as_unique_identifier);
 
   if (page_ids.empty()) {
     return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
   }
 
+  // Check to see if we have already seen this or newer request.
+  //
+  if (this->largest_offset_as_unique_identifier_ >= offset_as_unique_identifier) {
+    this->metrics_export().page_id_deletion_reissue.fetch_add(1);
+
+    return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
+  }
+
+  // Update the largest unique identifier.
+  //
+  this->largest_offset_as_unique_identifier_ = offset_as_unique_identifier;
+
   Optional<slot_offset_type> sync_point = None;
 
   if (depth == 0) {
@@ -297,6 +343,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(const Slice<const PageId>
 
     const PageRecyclerOptions& options = this->state_.no_lock().options;
 
+    LLFS_VLOG(1) << "recycle_pages entered grant==NULL case";
     for (PageId page_id : page_ids) {
       StatusOr<batt::Grant> local_grant = [&] {
         const usize needed_size = options.insert_grant_size();
@@ -319,8 +366,9 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(const Slice<const PageId>
       BATT_REQUIRE_OK(local_grant);
       {
         auto locked_state = this->state_.lock();
-        StatusOr<slot_offset_type> append_slot =
-            this->insert_to_log(*local_grant, page_id, depth, locked_state);
+        // Writing to recycler log.
+        StatusOr<slot_offset_type> append_slot = this->insert_to_log(
+            *local_grant, page_id, depth, offset_as_unique_identifier, locked_state);
         BATT_REQUIRE_OK(append_slot);
 
         clamp_min_slot(&sync_point, *append_slot);
@@ -329,10 +377,13 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(const Slice<const PageId>
   } else {
     BATT_CHECK_LT(depth, (i32)kMaxPageRefDepth) << BATT_INSPECT_RANGE(page_ids);
 
+    LLFS_VLOG(1) << "recycle_pages entered the valid grant case";
+
     auto locked_state = this->state_.lock();
     for (PageId page_id : page_ids) {
+      // Writing to recycler log.
       StatusOr<slot_offset_type> append_slot =
-          this->insert_to_log(*grant, page_id, depth, locked_state);
+          this->insert_to_log(*grant, page_id, depth, offset_as_unique_identifier, locked_state);
       BATT_REQUIRE_OK(append_slot);
 
       clamp_min_slot(&sync_point, *append_slot);
@@ -348,13 +399,13 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(const Slice<const PageId>
 StatusOr<slot_offset_type> PageRecycler::recycle_page(PageId page_id, batt::Grant* grant, i32 depth)
 {
   std::array<PageId, 1> page_ids{page_id};
-  return this->recycle_pages(batt::as_slice(page_ids), grant, depth);
+  return this->recycle_pages(batt::as_slice(page_ids), 0, grant, depth);
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
 StatusOr<slot_offset_type> PageRecycler::insert_to_log(
-    batt::Grant& grant, PageId page_id, i32 depth,
+    batt::Grant& grant, PageId page_id, i32 depth, slot_offset_type offset_as_unique_identifier,
     batt::Mutex<std::unique_ptr<State>>::Lock& locked_state)
 {
   BATT_CHECK(locked_state.is_held());
@@ -370,6 +421,7 @@ StatusOr<slot_offset_type> PageRecycler::insert_to_log(
               .refresh_slot = None,
               .batch_slot = None,
               .depth = depth,
+              .offset_as_unique_identifier = offset_as_unique_identifier,
           },
           [&](const batt::SmallVecBase<PageToRecycle*>& to_append) -> StatusOr<slot_offset_type> {
             if (to_append.empty()) {
@@ -387,7 +439,7 @@ StatusOr<slot_offset_type> PageRecycler::insert_to_log(
               BATT_REQUIRE_OK(append_slot);
               item->refresh_slot = append_slot->lower_bound;
               last_slot = slot_max(last_slot, append_slot->upper_bound);
-              LLFS_VLOG(1) << "Write " << item << " to the log; last_slot=" << last_slot;
+              LLFS_VLOG(1) << "Write " << *item << " to the log; last_slot=" << last_slot;
             }
             BATT_CHECK_NE(this->slot_writer_.slot_offset(), current_slot);
 
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index c0ba8306..529bbc61 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -55,6 +55,11 @@ class PageRecycler
     CountMetric<u64> page_drop_error_count{0};
   };
 
+  struct MetricsExported {
+    CountMetric<u32> page_id_deletion_reissue{0};
+  };
+  static MetricsExported& metrics_export();
+
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
   static PageCount default_max_buffered_page_count(const PageRecyclerOptions& options);
@@ -105,6 +110,7 @@ class PageRecycler
   // necessarily flushed (see `await_flush`).
   //
   StatusOr<slot_offset_type> recycle_pages(const Slice<const PageId>& page_ids,
+                                           llfs::slot_offset_type offset,
                                            batt::Grant* grant = nullptr, i32 depth = 0);
 
   // Schedule a single page to be recycled.  \see recycle_pages
@@ -198,7 +204,8 @@ class PageRecycler
   explicit PageRecycler(batt::TaskScheduler& scheduler, const std::string& name,
                         PageDeleter& page_deleter, std::unique_ptr<LogDevice>&& wal_device,
                         Optional<Batch>&& recovered_batch,
-                        std::unique_ptr<PageRecycler::State>&& state) noexcept;
+                        std::unique_ptr<PageRecycler::State>&& state,
+                        u64 largest_offset_as_unique_identifier_init) noexcept;
 
   void start_recycle_task();
 
@@ -209,6 +216,7 @@ class PageRecycler
   void refresh_grants();
 
   StatusOr<slot_offset_type> insert_to_log(batt::Grant& grant, PageId page_id, i32 depth,
+                                           slot_offset_type offset_as_unique_identifier,
                                            batt::Mutex<std::unique_ptr<State>>::Lock& locked_state);
 
   StatusOr<Batch> prepare_batch(std::vector<PageToRecycle>&& to_recycle);
@@ -250,6 +258,8 @@ class PageRecycler
   Optional<Batch> prepared_batch_;
 
   Optional<slot_offset_type> latest_batch_upper_bound_;
+
+  slot_offset_type largest_offset_as_unique_identifier_;
 };
 
 inline std::ostream& operator<<(std::ostream& out, const PageRecycler::Batch& t)
diff --git a/src/llfs/page_recycler.test.cpp b/src/llfs/page_recycler.test.cpp
index 41268edd..36eb5b18 100644
--- a/src/llfs/page_recycler.test.cpp
+++ b/src/llfs/page_recycler.test.cpp
@@ -366,7 +366,7 @@ class FakePageDeleter : public PageDeleter
       // Recursively recycle any newly dead pages.  If we try to recycle the same page multiple
       // times, that is OK, since PageIds are never reused.
       //
-      result = this->test_->recycler_->recycle_pages(as_slice(dead_pages),  //
+      result = this->test_->recycler_->recycle_pages(as_slice(dead_pages), caller_slot,  //
                                                      &recycle_grant, depth + 1);
       BATT_REQUIRE_OK(result);
 
@@ -500,7 +500,7 @@ void PageRecyclerTest::run_crash_recovery_test()
         const std::array<PageId, 1> to_recycle = {root_id};
 
         BATT_DEBUG_INFO("Test - recycle_pages");
-        StatusOr<slot_offset_type> recycle_status = recycler.recycle_pages(to_recycle);
+        StatusOr<slot_offset_type> recycle_status = recycler.recycle_pages(to_recycle, 0);
         if (!recycle_status.ok()) {
           failed = true;
           break;
diff --git a/src/llfs/page_recycler_events.hpp b/src/llfs/page_recycler_events.hpp
index 8fe0e8b0..60a79bbd 100644
--- a/src/llfs/page_recycler_events.hpp
+++ b/src/llfs/page_recycler_events.hpp
@@ -36,6 +36,8 @@ struct PageToRecycle {
   //
   i32 depth;
 
+  slot_offset_type offset_as_unique_identifier;
+
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
   static PageToRecycle make_invalid()
@@ -45,6 +47,7 @@ struct PageToRecycle {
         .refresh_slot = None,
         .batch_slot = None,
         .depth = 0,
+        .offset_as_unique_identifier = 0,
     };
   }
 
@@ -129,12 +132,13 @@ struct PackedPageToRecycle {
 
   little_page_id_int page_id;
   little_u64 batch_slot;
+  u64 offset_as_unique_identifier;
   little_i32 depth;
   u8 flags;
   u8 reserved_[3];
 };
 
-BATT_STATIC_ASSERT_EQ(24, sizeof(PackedPageToRecycle));
+BATT_STATIC_ASSERT_EQ(32, sizeof(PackedPageToRecycle));
 
 inline std::size_t packed_sizeof(const PackedPageToRecycle&)
 {
@@ -161,6 +165,7 @@ inline bool pack_object_to(const PageToRecycle& from, PackedPageToRecycle* to, D
   } else {
     to->batch_slot = 0;
   }
+  to->offset_as_unique_identifier = from.offset_as_unique_identifier;
   return true;
 }
 
@@ -176,6 +181,7 @@ inline StatusOr<PageToRecycle> unpack_object(const PackedPageToRecycle& packed,
         return None;
       }(),
       .depth = packed.depth,
+      .offset_as_unique_identifier = packed.offset_as_unique_identifier,
   };
 }
 
diff --git a/src/llfs/page_recycler_recovery_visitor.cpp b/src/llfs/page_recycler_recovery_visitor.cpp
index 8bd3ecd7..2d687686 100644
--- a/src/llfs/page_recycler_recovery_visitor.cpp
+++ b/src/llfs/page_recycler_recovery_visitor.cpp
@@ -29,6 +29,7 @@ namespace llfs {
     , latest_batch_{}
     , recycler_uuid_{boost::uuids::random_generator{}()}
     , latest_info_refresh_slot_{}
+    , largest_offset_as_unique_identifier_{0}
 {
   initialize_status_codes();
   LLFS_VLOG(1) << "PageRecyclerRecoveryVisitor()";
@@ -111,6 +112,13 @@ Optional<SlotRange> PageRecyclerRecoveryVisitor::latest_info_refresh_slot() cons
   return this->latest_info_refresh_slot_;
 }
 
+//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
+//
+u64 PageRecyclerRecoveryVisitor::largest_offset() const
+{
+  return this->largest_offset_as_unique_identifier_;
+}
+
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
 Status PageRecyclerRecoveryVisitor::operator()(const SlotParse& slot,
@@ -140,6 +148,13 @@ Status PageRecyclerRecoveryVisitor::operator()(const SlotParse& slot,
 
     existing_record.batch_slot = recovered.batch_slot;
   }
+
+  // Save the largest unique offset identifier.
+  //
+  if (this->largest_offset_as_unique_identifier_ < recovered.offset_as_unique_identifier) {
+    this->largest_offset_as_unique_identifier_ = recovered.offset_as_unique_identifier;
+  }
+
   PageToRecycle& to_recycle = iter->second;
   to_recycle.refresh_slot = slot.offset.lower_bound;
 
diff --git a/src/llfs/page_recycler_recovery_visitor.hpp b/src/llfs/page_recycler_recovery_visitor.hpp
index a274e939..5d81d160 100644
--- a/src/llfs/page_recycler_recovery_visitor.hpp
+++ b/src/llfs/page_recycler_recovery_visitor.hpp
@@ -45,6 +45,8 @@ class PageRecyclerRecoveryVisitor
 
   Optional<SlotRange> latest_info_refresh_slot() const;
 
+  u64 largest_offset() const;
+
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
   Status operator()(const SlotParse&, const PageToRecycle& to_recycle);
@@ -79,6 +81,8 @@ class PageRecyclerRecoveryVisitor
   /** \brief The most recent slot at which recycler info was refreshed.
    */
   Optional<SlotRange> latest_info_refresh_slot_;
+
+  u64 largest_offset_as_unique_identifier_;
 };
 
 }  // namespace llfs
diff --git a/src/llfs/volume_trimmer.cpp b/src/llfs/volume_trimmer.cpp
index 72b3e64c..04ff3a2f 100644
--- a/src/llfs/volume_trimmer.cpp
+++ b/src/llfs/volume_trimmer.cpp
@@ -136,6 +136,7 @@ const usize kTrimEventGrantSize =
 
     // Finish the partially completed trim.
     //
+    LLFS_VLOG(1) << "finishing partial trim...";
     BATT_REQUIRE_OK(trim_volume_log(trimmer_uuid, slot_writer, grant, std::move(*trim_event_info),
                                     std::move(*trimmed_region_info), metadata_refresher, drop_roots,
                                     batt::LogLevel::kWarning));
@@ -143,6 +144,7 @@ const usize kTrimEventGrantSize =
 
   // The trim state is now clean!  Create and return the VolumeTrimmer.
   //
+  LLFS_VLOG(1) << "creating the VolumeTrimmer object...";
   return std::make_unique<VolumeTrimmer>(trimmer_uuid, std::move(name), trim_control, trim_delay,
                                          std::move(log_reader), slot_writer, std::move(drop_roots),
                                          metadata_refresher);
@@ -404,7 +406,8 @@ Status trim_volume_log(const boost::uuids::uuid& trimmer_uuid,
                        VolumeMetadataRefresher& metadata_refresher,
                        const VolumeDropRootsFn& drop_roots, batt::LogLevel error_log_level)
 {
-  LLFS_VLOG(1) << "trim_volume_log()" << BATT_INSPECT(trimmed_region.slot_range);
+  LLFS_VLOG(1) << "trim_volume_log()" << BATT_INSPECT(trimmed_region.slot_range)
+               << BATT_INSPECT(trim_event);
 
   auto on_scope_exit = batt::finally([&] {
     BATT_CHECK_LE(grant.size(), kTrimEventGrantSize);

From f6d43607aa110433998ab21e53b19c33f68ceb3f Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Wed, 18 Dec 2024 11:04:53 -0500
Subject: [PATCH 03/29] removed death-test

---
 script                            |  2 +-
 src/llfs/page_allocator_state.cpp |  6 ++---
 src/llfs/volume.test.cpp          | 38 ++++++++++++++-----------------
 3 files changed, 21 insertions(+), 25 deletions(-)

diff --git a/script b/script
index 27ed1304..6d266c2d 160000
--- a/script
+++ b/script
@@ -1 +1 @@
-Subproject commit 27ed13046d0d81974032a4d830565870cb07965a
+Subproject commit 6d266c2de572dde8fcf70085b8fa960084aae663
diff --git a/src/llfs/page_allocator_state.cpp b/src/llfs/page_allocator_state.cpp
index c8d0e492..9ba8ddc3 100644
--- a/src/llfs/page_allocator_state.cpp
+++ b/src/llfs/page_allocator_state.cpp
@@ -458,14 +458,14 @@ PageAllocatorState::ProposalStatus PageAllocatorState::propose(PackedPageAllocat
   // If this is a valid proposal that will cause state change, go through and change the deltas to
   // the new ref count values.
   //
-  LOG(INFO) << "propose (start): txn-ref-count= " << txn->ref_counts.size();
+  LLFS_VLOG(1) << "propose (start): txn-ref-count= " << txn->ref_counts.size();
   if (status == ProposalStatus::kValid) {
     for (PackedPageRefCount& prc : txn->ref_counts) {
-      LOG(INFO) << "prc loop: " << prc;
+      LLFS_VLOG(1) << "reference count: " << prc;
       prc.ref_count = this->calculate_new_ref_count(prc, *user_index);
     }
   }
-  LOG(INFO) << "propose (end): txn-ref-count= " << txn->ref_counts.size();
+  LLFS_VLOG(1) << "propose (end): txn-ref-count= " << txn->ref_counts.size();
 
   return status;
 }
diff --git a/src/llfs/volume.test.cpp b/src/llfs/volume.test.cpp
index d82dd110..aece1356 100644
--- a/src/llfs/volume.test.cpp
+++ b/src/llfs/volume.test.cpp
@@ -2137,11 +2137,9 @@ TEST_F(VolumeSimTest, DeadPageRefCountSimulation)
   }
 }
 
-using DeathTestVolumeSimTest = VolumeSimTest;
-
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
-TEST_F(DeathTestVolumeSimTest, DeadPageRefCountVariantSimulation)
+TEST_F(VolumeSimTest, DeadPageRefCountVariantSimulation)
 {
   const u32 max_seeds = batt::getenv_as<u32>("MAX_SEEDS").value_or(1000);
 
@@ -2149,8 +2147,6 @@ TEST_F(DeathTestVolumeSimTest, DeadPageRefCountVariantSimulation)
   // 50 and 40 respectively. To create a perfectly failing test use 50 and 40 as the pre_halt and
   // post_halt values respectively.
 
-  LOG(INFO) << "Starting DeadPageRefCountVariant test for " << max_seeds << " iterations...";
-
   // We will setup a two ranges to pick yield counts from.
   std::vector<std::pair<usize, usize>> ranges{{30, 50}, {30, 100}};
   std::mt19937 rng{0};
@@ -2158,22 +2154,22 @@ TEST_F(DeathTestVolumeSimTest, DeadPageRefCountVariantSimulation)
   std::uniform_int_distribution<usize> pick_value2{ranges[1].first, ranges[1].second};
   usize yield_pre_halt = 0, yield_post_halt = 0;
 
-  auto main_test_block = [&]() {
-    for (u32 current_seed = 0; current_seed < max_seeds; ++current_seed) {
-      LOG_EVERY_N(INFO, 100) << BATT_INSPECT(current_seed) << BATT_INSPECT(max_seeds);
+  for (u32 current_seed = 0; current_seed < max_seeds; ++current_seed) {
+    LOG_EVERY_N(INFO, 100) << BATT_INSPECT(current_seed) << BATT_INSPECT(max_seeds);
 
-      yield_pre_halt = (yield_pre_halt % 2) ? pick_value1(rng) : pick_value2(rng);
-      yield_post_halt = (yield_post_halt % 2) ? pick_value1(rng) : pick_value2(rng);
+    yield_pre_halt = (yield_pre_halt % 2) ? pick_value1(rng) : pick_value2(rng);
+    yield_post_halt = (yield_post_halt % 2) ? pick_value1(rng) : pick_value2(rng);
 
-      ASSERT_NO_FATAL_FAILURE(
-          this->run_dead_page_recovery_test_variant(current_seed, yield_pre_halt, yield_post_halt));
-    }
-  };
-  // Note that we are enabling thread-safe mode for Death-Test.
+    ASSERT_NO_FATAL_FAILURE(
+        this->run_dead_page_recovery_test_variant(current_seed, yield_pre_halt, yield_post_halt));
+  }
+
+  LOG(INFO) << "Ran DeadPageRefCountVariant test for " << max_seeds << " iterations..."
+            << BATT_INSPECT(llfs::PageRecycler::metrics_export().page_id_deletion_reissue);
+
+  // We need to have atleast one iteration hitting the issue.
   //
-  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
-  EXPECT_EXIT(main_test_block(), testing::ExitedWithCode(6), "FATAL.*");
-  ::testing::FLAGS_gtest_death_test_style = "fast";
+  ASSERT_GE(llfs::PageRecycler::metrics_export().page_id_deletion_reissue, 0);
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -2330,9 +2326,9 @@ void VolumeSimTest::post_halt_processing(RecoverySimState& state, llfs::StorageS
 void VolumeSimTest::run_dead_page_recovery_test(u32 seed, u32 yield_count)
 {
   RecoverySimState state;
-  state.seed = seed;
+  std::mt19937 rng{seed};
 
-  std::mt19937 rng{state.seed};
+  state.seed = seed;
 
   llfs::StorageSimulation sim{batt::StateMachineEntropySource{
       /*entropy_fn=*/[&rng](usize min_value, usize max_value) -> usize {
@@ -2417,7 +2413,7 @@ void VolumeSimTest::run_dead_page_recovery_test_variant(u32 seed, u32 yield_coun
                                                         u32 yield_count_post_halt)
 {
   RecoverySimState state;
-  std::mt19937 rng{state.seed};
+  std::mt19937 rng{seed};
 
   state.seed = seed;
 

From 315d8baac0295781bbfca223daefcc5705990a5d Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Wed, 18 Dec 2024 11:44:20 -0500
Subject: [PATCH 04/29] added a histogram plot for the bug hits

---
 src/llfs/committable_page_cache_job.cpp |  3 +--
 src/llfs/volume.test.cpp                | 27 +++++++++++++++++++++++++
 2 files changed, 28 insertions(+), 2 deletions(-)

diff --git a/src/llfs/committable_page_cache_job.cpp b/src/llfs/committable_page_cache_job.cpp
index 788e9dbb..3cfa6d10 100644
--- a/src/llfs/committable_page_cache_job.cpp
+++ b/src/llfs/committable_page_cache_job.cpp
@@ -182,8 +182,7 @@ Status commit(std::unique_ptr<PageCacheJob> job, const JobCommitParams& params,
 Status commit(CommittablePageCacheJob committable_job, const JobCommitParams& params, u64 callers,
               slot_offset_type prev_caller_slot, batt::Watch<slot_offset_type>* durable_caller_slot)
 {
-  auto stat = committable_job.commit_impl(params, callers, prev_caller_slot, durable_caller_slot);
-  return stat;
+  return committable_job.commit_impl(params, callers, prev_caller_slot, durable_caller_slot);
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
diff --git a/src/llfs/volume.test.cpp b/src/llfs/volume.test.cpp
index aece1356..ab80b5bd 100644
--- a/src/llfs/volume.test.cpp
+++ b/src/llfs/volume.test.cpp
@@ -2137,6 +2137,22 @@ TEST_F(VolumeSimTest, DeadPageRefCountSimulation)
   }
 }
 
+//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
+//
+void display_histogram(u32 max_seeds, const std::vector<usize>& histogram)
+{
+  std::string hist_str;
+  usize current_seed = 0;
+  const usize div = max_seeds / histogram.size();
+
+  for (const auto entry : histogram) {
+    hist_str += std::to_string(current_seed) + ":" + std::to_string(entry) + " ";
+    current_seed += div;
+  }
+
+  LOG(INFO) << "Histogram: " << hist_str;
+}
+
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
 TEST_F(VolumeSimTest, DeadPageRefCountVariantSimulation)
@@ -2153,6 +2169,10 @@ TEST_F(VolumeSimTest, DeadPageRefCountVariantSimulation)
   std::uniform_int_distribution<usize> pick_value1{ranges[0].first, ranges[0].second};
   std::uniform_int_distribution<usize> pick_value2{ranges[1].first, ranges[1].second};
   usize yield_pre_halt = 0, yield_post_halt = 0;
+  std::vector<usize> histogram(10);
+  const usize div = max_seeds / histogram.size();
+
+  llfs::PageRecycler::metrics_export().page_id_deletion_reissue.set(0);
 
   for (u32 current_seed = 0; current_seed < max_seeds; ++current_seed) {
     LOG_EVERY_N(INFO, 100) << BATT_INSPECT(current_seed) << BATT_INSPECT(max_seeds);
@@ -2160,13 +2180,20 @@ TEST_F(VolumeSimTest, DeadPageRefCountVariantSimulation)
     yield_pre_halt = (yield_pre_halt % 2) ? pick_value1(rng) : pick_value2(rng);
     yield_post_halt = (yield_post_halt % 2) ? pick_value1(rng) : pick_value2(rng);
 
+    usize last_value = llfs::PageRecycler::metrics_export().page_id_deletion_reissue;
+
     ASSERT_NO_FATAL_FAILURE(
         this->run_dead_page_recovery_test_variant(current_seed, yield_pre_halt, yield_post_halt));
+
+    last_value = llfs::PageRecycler::metrics_export().page_id_deletion_reissue - last_value;
+    histogram[current_seed / div] += last_value;
   }
 
   LOG(INFO) << "Ran DeadPageRefCountVariant test for " << max_seeds << " iterations..."
             << BATT_INSPECT(llfs::PageRecycler::metrics_export().page_id_deletion_reissue);
 
+  display_histogram(max_seeds, histogram);
+
   // We need to have atleast one iteration hitting the issue.
   //
   ASSERT_GE(llfs::PageRecycler::metrics_export().page_id_deletion_reissue, 0);

From 8f9264591aff2d8a57a21aca1774f672a4181c6f Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Tue, 7 Jan 2025 15:04:58 -0500
Subject: [PATCH 05/29] temp fix for the test failure

---
 src/llfs/page_recycler_options.hpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/llfs/page_recycler_options.hpp b/src/llfs/page_recycler_options.hpp
index 32b52d90..72902c9e 100644
--- a/src/llfs/page_recycler_options.hpp
+++ b/src/llfs/page_recycler_options.hpp
@@ -22,7 +22,7 @@ class PageRecyclerOptions
  public:
   static constexpr usize kDefaultInfoRefreshRate = 4;
   static constexpr usize kDefaultMaxRefsPerPage = 1 * kMiB;
-  static constexpr usize kDefaultBatchSize = 24;
+  static constexpr usize kDefaultBatchSize = 20;
   static constexpr usize kDefaultRefreshFactor = 2;
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -

From 4b68e888dea8e418546d9a9ed2bde672e5fbd0d7 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Wed, 8 Jan 2025 14:19:14 -0500
Subject: [PATCH 06/29] page_tracer test case fix

---
 src/llfs/page_recycler.cpp         | 31 +++++++++++++++++++++++-------
 src/llfs/page_recycler.hpp         |  3 +++
 src/llfs/page_recycler_options.hpp |  2 +-
 3 files changed, 28 insertions(+), 8 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index 0616f905..232fb588 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -72,15 +72,32 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 //
 /*static*/ u64 PageRecycler::calculate_log_size(const PageRecyclerOptions& options,
                                                 Optional<PageCount> max_buffered_page_count)
+{
+  const usize log_size_raw = calculate_log_size_no_padding(options, max_buffered_page_count);
+  const usize padding_bytes = 1 * kKiB;
+
+  return round_up_to_page_size_multiple(log_size_raw + padding_bytes);
+}
+
+//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
+//
+/*static*/ u64 PageRecycler::calculate_log_size_no_padding(
+    const PageRecyclerOptions& options, Optional<PageCount> max_buffered_page_count)
 {
   static const PackedPageRecyclerInfo info = {};
 
-  return round_up_to_page_size_multiple(
-      options.total_page_grant_size() *
-          (1 + max_buffered_page_count.value_or(
-                   PageRecycler::default_max_buffered_page_count(options))) +
-      options.recycle_task_target() + packed_sizeof_slot(info) * (options.info_refresh_rate() + 1) +
-      1 * kKiB);
+  const usize bytes_per_buffered_page = options.total_page_grant_size();
+
+  const usize minimum_required_buffered_pages =
+      1 + max_buffered_page_count.value_or(PageRecycler::default_max_buffered_page_count(options));
+
+  const usize bytes_for_minimum_required_buffered_pages =
+      bytes_per_buffered_page * minimum_required_buffered_pages;
+
+  const usize fixed_size_overhead_bytes =
+      options.recycle_task_target() + packed_sizeof_slot(info) * (options.info_refresh_rate() + 1);
+
+  return bytes_for_minimum_required_buffered_pages + fixed_size_overhead_bytes;
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -88,7 +105,7 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 /*static*/ PageCount PageRecycler::calculate_max_buffered_page_count(
     const PageRecyclerOptions& options, u64 log_size)
 {
-  const u64 required_log_size = PageRecycler::calculate_log_size(options, PageCount{0});
+  const u64 required_log_size = PageRecycler::calculate_log_size_no_padding(options, PageCount{0});
   if (log_size <= required_log_size) {
     return PageCount{0};
   }
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index 529bbc61..c0c5a5ed 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -67,6 +67,9 @@ class PageRecycler
   static u64 calculate_log_size(const PageRecyclerOptions& options,
                                 Optional<PageCount> max_buffered_page_count = None);
 
+  static u64 calculate_log_size_no_padding(const PageRecyclerOptions& options,
+                                           Optional<PageCount> max_buffered_page_count = None);
+
   static PageCount calculate_max_buffered_page_count(const PageRecyclerOptions& options,
                                                      u64 log_size);
 
diff --git a/src/llfs/page_recycler_options.hpp b/src/llfs/page_recycler_options.hpp
index 72902c9e..32b52d90 100644
--- a/src/llfs/page_recycler_options.hpp
+++ b/src/llfs/page_recycler_options.hpp
@@ -22,7 +22,7 @@ class PageRecyclerOptions
  public:
   static constexpr usize kDefaultInfoRefreshRate = 4;
   static constexpr usize kDefaultMaxRefsPerPage = 1 * kMiB;
-  static constexpr usize kDefaultBatchSize = 20;
+  static constexpr usize kDefaultBatchSize = 24;
   static constexpr usize kDefaultRefreshFactor = 2;
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -

From 34e1b716668500298eb4f2e2a7312f25e45eec62 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Wed, 15 Jan 2025 16:03:23 -0500
Subject: [PATCH 07/29] fixed llfs crash-recovery test case

---
 src/llfs/page_recycler.cpp      |  4 ++--
 src/llfs/page_recycler.test.cpp | 28 +++++++++++++++++++++++++---
 2 files changed, 27 insertions(+), 5 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index 232fb588..f2fd396a 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -235,7 +235,7 @@ auto PageRecycler::metrics_export() -> MetricsExported&
   static MetricsExported& metrics_ = [&]() -> MetricsExported& {
     static MetricsExported metrics_;
 
-    LOG(INFO) << "Registering PageRecycler metrics...";
+    LLFS_VLOG(1) << "Registering PageRecycler metrics...";
     const auto metric_name = [](std::string_view property) {
       return batt::to_string("PageRecycler_", property);
     };
@@ -335,7 +335,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
     return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
   }
 
-  // Check to see if we have already seen this or newer request.
+  // Check to see if we have already seen this or any newer request.
   //
   if (this->largest_offset_as_unique_identifier_ >= offset_as_unique_identifier) {
     this->metrics_export().page_id_deletion_reissue.fetch_add(1);
diff --git a/src/llfs/page_recycler.test.cpp b/src/llfs/page_recycler.test.cpp
index 36eb5b18..0977bffa 100644
--- a/src/llfs/page_recycler.test.cpp
+++ b/src/llfs/page_recycler.test.cpp
@@ -366,7 +366,9 @@ class FakePageDeleter : public PageDeleter
       // Recursively recycle any newly dead pages.  If we try to recycle the same page multiple
       // times, that is OK, since PageIds are never reused.
       //
-      result = this->test_->recycler_->recycle_pages(as_slice(dead_pages), caller_slot,  //
+      std::lock_guard lock(this->recycle_pages_mutex_);
+      result = this->test_->recycler_->recycle_pages(as_slice(dead_pages),
+                                                     this->get_and_incr_unique_offset(),  //
                                                      &recycle_grant, depth + 1);
       BATT_REQUIRE_OK(result);
 
@@ -412,10 +414,28 @@ class FakePageDeleter : public PageDeleter
     this->recursive_recycle_events_.push(failure).IgnoreError();
   }
 
+  slot_offset_type get_and_incr_unique_offset()
+  {
+    return this->unique_offset_++;
+  }
+
+  void lock_mutex()
+  {
+    recycle_pages_mutex_.lock();
+  }
+
+  void unlock_mutex()
+  {
+    recycle_pages_mutex_.unlock();
+  }
+
   PageRecyclerTest* test_;
   std::unordered_map<boost::uuids::uuid, slot_offset_type, boost::hash<boost::uuids::uuid>>
       current_slot_;
   batt::Queue<StatusOr<slot_offset_type>> recursive_recycle_events_;
+
+  slot_offset_type unique_offset_{1};
+  std::mutex recycle_pages_mutex_;
 };
 
 TEST_F(PageRecyclerTest, CrashRecovery)
@@ -435,7 +455,7 @@ TEST_F(PageRecyclerTest, CrashRecovery)
 
 void PageRecyclerTest::run_crash_recovery_test()
 {
-  const usize fake_page_count = 256;
+  const usize fake_page_count = 190;
   const u32 max_branching_factor = 8;
 
   const auto options = llfs::PageRecyclerOptions{}  //
@@ -500,7 +520,9 @@ void PageRecyclerTest::run_crash_recovery_test()
         const std::array<PageId, 1> to_recycle = {root_id};
 
         BATT_DEBUG_INFO("Test - recycle_pages");
-        StatusOr<slot_offset_type> recycle_status = recycler.recycle_pages(to_recycle, 0);
+        std::lock_guard lock(fake_deleter.recycle_pages_mutex_);
+        StatusOr<slot_offset_type> recycle_status =
+            recycler.recycle_pages(to_recycle, fake_deleter.get_and_incr_unique_offset());
         if (!recycle_status.ok()) {
           failed = true;
           break;

From 550ca3cff0ef9a0809072b89721f619153ff170d Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Wed, 15 Jan 2025 22:09:01 -0500
Subject: [PATCH 08/29] updated the test page_count

---
 src/llfs/page_recycler.test.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/llfs/page_recycler.test.cpp b/src/llfs/page_recycler.test.cpp
index 0977bffa..b545511d 100644
--- a/src/llfs/page_recycler.test.cpp
+++ b/src/llfs/page_recycler.test.cpp
@@ -455,7 +455,7 @@ TEST_F(PageRecyclerTest, CrashRecovery)
 
 void PageRecyclerTest::run_crash_recovery_test()
 {
-  const usize fake_page_count = 190;
+  const usize fake_page_count = 180;
   const u32 max_branching_factor = 8;
 
   const auto options = llfs::PageRecyclerOptions{}  //

From cdf7ea7413fc3e7df8c53fff519064fa32c34034 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Thu, 16 Jan 2025 14:02:40 -0500
Subject: [PATCH 09/29] updated test cases

---
 src/llfs/page_recycler.cpp      |  6 ++-
 src/llfs/page_recycler.hpp      |  4 +-
 src/llfs/page_recycler.test.cpp | 75 ++++++++++++++++++---------------
 3 files changed, 46 insertions(+), 39 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index f2fd396a..84aa0c0c 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -413,10 +413,12 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
-StatusOr<slot_offset_type> PageRecycler::recycle_page(PageId page_id, batt::Grant* grant, i32 depth)
+StatusOr<slot_offset_type> PageRecycler::recycle_page(PageId page_id,
+                                                      slot_offset_type unique_offset,
+                                                      batt::Grant* grant, i32 depth)
 {
   std::array<PageId, 1> page_ids{page_id};
-  return this->recycle_pages(batt::as_slice(page_ids), 0, grant, depth);
+  return this->recycle_pages(batt::as_slice(page_ids), unique_offset, grant, depth);
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index c0c5a5ed..b722c6b5 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -118,8 +118,8 @@ class PageRecycler
 
   // Schedule a single page to be recycled.  \see recycle_pages
   //
-  StatusOr<slot_offset_type> recycle_page(PageId page_id, batt::Grant* grant = nullptr,
-                                          i32 depth = 0);
+  StatusOr<slot_offset_type> recycle_page(PageId page_id, slot_offset_type unique_offset,
+                                          batt::Grant* grant = nullptr, i32 depth = 0);
 
   // Waits for the given slot to be flushed to durable storage.
   //
diff --git a/src/llfs/page_recycler.test.cpp b/src/llfs/page_recycler.test.cpp
index b545511d..81ceacd9 100644
--- a/src/llfs/page_recycler.test.cpp
+++ b/src/llfs/page_recycler.test.cpp
@@ -228,6 +228,11 @@ class PageRecyclerTest : public ::testing::Test
         });
   }
 
+  slot_offset_type get_and_incr_unique_offset()
+  {
+    return this->unique_offset_++;
+  }
+
   //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 
   // Actual fake page data for crash recovery tetst.
@@ -266,6 +271,9 @@ class PageRecyclerTest : public ::testing::Test
   // The owning pointer for the recycler.
   //
   std::unique_ptr<llfs::PageRecycler> unique_page_recycler_;
+
+  slot_offset_type unique_offset_{1};
+  std::mutex recycle_pages_mutex_;
 };
 
 class FakePageDeleter : public PageDeleter
@@ -366,9 +374,9 @@ class FakePageDeleter : public PageDeleter
       // Recursively recycle any newly dead pages.  If we try to recycle the same page multiple
       // times, that is OK, since PageIds are never reused.
       //
-      std::lock_guard lock(this->recycle_pages_mutex_);
+      std::lock_guard lock(this->test_->recycle_pages_mutex_);
       result = this->test_->recycler_->recycle_pages(as_slice(dead_pages),
-                                                     this->get_and_incr_unique_offset(),  //
+                                                     this->test_->get_and_incr_unique_offset(),  //
                                                      &recycle_grant, depth + 1);
       BATT_REQUIRE_OK(result);
 
@@ -414,28 +422,10 @@ class FakePageDeleter : public PageDeleter
     this->recursive_recycle_events_.push(failure).IgnoreError();
   }
 
-  slot_offset_type get_and_incr_unique_offset()
-  {
-    return this->unique_offset_++;
-  }
-
-  void lock_mutex()
-  {
-    recycle_pages_mutex_.lock();
-  }
-
-  void unlock_mutex()
-  {
-    recycle_pages_mutex_.unlock();
-  }
-
   PageRecyclerTest* test_;
   std::unordered_map<boost::uuids::uuid, slot_offset_type, boost::hash<boost::uuids::uuid>>
       current_slot_;
   batt::Queue<StatusOr<slot_offset_type>> recursive_recycle_events_;
-
-  slot_offset_type unique_offset_{1};
-  std::mutex recycle_pages_mutex_;
 };
 
 TEST_F(PageRecyclerTest, CrashRecovery)
@@ -520,9 +510,9 @@ void PageRecyclerTest::run_crash_recovery_test()
         const std::array<PageId, 1> to_recycle = {root_id};
 
         BATT_DEBUG_INFO("Test - recycle_pages");
-        std::lock_guard lock(fake_deleter.recycle_pages_mutex_);
+        std::lock_guard lock(this->recycle_pages_mutex_);
         StatusOr<slot_offset_type> recycle_status =
-            recycler.recycle_pages(to_recycle, fake_deleter.get_and_incr_unique_offset());
+            recycler.recycle_pages(to_recycle, this->get_and_incr_unique_offset());
         if (!recycle_status.ok()) {
           failed = true;
           break;
@@ -676,16 +666,24 @@ TEST_F(PageRecyclerTest, DuplicatePageDeletion)
                                << BATT_INSPECT(this->p_mem_log_->driver().get_trim_pos());
 
                   BATT_CHECK_EQ(recycle_depth + 1, 1);
-                  BATT_CHECK_OK(this->recycler_->recycle_page(this->fake_page_id_.back(),
-                                                              &recycle_grant, recycle_depth + 1));
+                  {
+                    std::lock_guard lock(this->recycle_pages_mutex_);
+                    BATT_CHECK_OK(this->recycler_->recycle_page(this->fake_page_id_.back(),
+                                                                this->get_and_incr_unique_offset(),
+                                                                &recycle_grant, recycle_depth + 1));
+                  }
                   delete_count.fetch_add(1);
                   BATT_CHECK_OK(continue_count.await_equal(1));
                   return batt::OkStatus();
                 }));
 
         for (usize i = 0; i < this->fake_page_id_.size() - 2; ++i) {
-          batt::StatusOr<llfs::slot_offset_type> result =
-              this->recycler_->recycle_page(this->fake_page_id_[i]);
+          {
+            std::lock_guard lock(this->recycle_pages_mutex_);
+            batt::StatusOr<llfs::slot_offset_type> result = this->recycler_->recycle_page(
+                this->fake_page_id_[i], this->get_and_incr_unique_offset());
+            ASSERT_TRUE(result.ok()) << BATT_INSPECT(result);
+          }
 
           if (i == 0) {
             LLFS_VLOG(1) << "waiting for " << BATT_INSPECT(this->fake_page_id_[0]);
@@ -730,7 +728,7 @@ TEST_F(PageRecyclerTest, DuplicatePageDeletion)
                     }));
           }
 
-          ASSERT_TRUE(result.ok()) << BATT_INSPECT(result);
+          //ASSERT_TRUE(result.ok()) << BATT_INSPECT(result);
         }
 
         continue_count.fetch_add(1);  // 0 -> 1
@@ -784,8 +782,10 @@ TEST_F(PageRecyclerTest, DuplicatePageDeletion)
         // everything else the recycler has queued.
         //
         {
+          std::lock_guard lock(this->recycle_pages_mutex_);
           batt::StatusOr<llfs::slot_offset_type> result =
-              this->recycler_->recycle_page(this->fake_page_id_[this->fake_page_id_.size() - 2]);
+              this->recycler_->recycle_page(this->fake_page_id_[this->fake_page_id_.size() - 2],
+                                            this->get_and_incr_unique_offset());
         }
 
         // Wait for all pages to be deleted.
@@ -847,8 +847,9 @@ TEST_F(PageRecyclerTest, NoRefreshBatchedPage)
         // Give some PageIds to delete.
         //
         {
-          batt::StatusOr<llfs::slot_offset_type> result =
-              this->recycler_->recycle_page(this->fake_page_id_[0]);
+          std::lock_guard lock(this->recycle_pages_mutex_);
+          batt::StatusOr<llfs::slot_offset_type> result = this->recycler_->recycle_page(
+              this->fake_page_id_[0], this->get_and_incr_unique_offset());
 
           ASSERT_TRUE(result.ok()) << BATT_INSPECT(result);
         }
@@ -861,12 +862,16 @@ TEST_F(PageRecyclerTest, NoRefreshBatchedPage)
         // refresh the page we just recycled.
         //
         for (usize i = 1; i < this->fake_page_id_.size(); ++i) {
-          batt::StatusOr<llfs::slot_offset_type> result =
-              this->recycler_->recycle_page(this->fake_page_id_[i]);
-          ASSERT_TRUE(result.ok()) << BATT_INSPECT(result);
+          {
+            std::lock_guard lock(this->recycle_pages_mutex_);
+            batt::StatusOr<llfs::slot_offset_type> result = this->recycler_->recycle_page(
+                this->fake_page_id_[i], this->get_and_incr_unique_offset());
+
+            ASSERT_TRUE(result.ok()) << BATT_INSPECT(result);
+            batt::Status flush_status = this->recycler_->await_flush(*result);
 
-          batt::Status flush_status = this->recycler_->await_flush(*result);
-          EXPECT_TRUE(flush_status.ok()) << BATT_INSPECT(flush_status);
+            EXPECT_TRUE(flush_status.ok()) << BATT_INSPECT(flush_status);
+          }
         }
 
         this->save_log_snapshot();

From 1f2d280570bf45800e073fca58d33eee0d35ba4c Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Tue, 21 Jan 2025 17:22:40 -0500
Subject: [PATCH 10/29] added support for recycler calling recycle_pages with
 depth>0

---
 src/llfs/page_recycler.cpp      | 17 ++++++++------
 src/llfs/page_recycler.test.cpp | 40 +++++++++++++++++++++------------
 2 files changed, 36 insertions(+), 21 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index 84aa0c0c..5323a269 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -335,18 +335,21 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
     return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
   }
 
-  // Check to see if we have already seen this or any newer request.
+  // Check to make sure request was never seen before. Note that we do unique identifier check only
+  // for depth=0 as that's coming from the external client side. For all recycler internal calls
+  // (with depth > 0) we will simply accept the request.
   //
-  if (this->largest_offset_as_unique_identifier_ >= offset_as_unique_identifier) {
+  if (this->largest_offset_as_unique_identifier_ < offset_as_unique_identifier || depth != 0) {
+    // Update the largest unique identifier and continue.
+    //
+    this->largest_offset_as_unique_identifier_ = offset_as_unique_identifier;
+  } else {
+    // Look like this is a repost of some old request thus update some status and ignore/return.
+    //
     this->metrics_export().page_id_deletion_reissue.fetch_add(1);
-
     return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
   }
 
-  // Update the largest unique identifier.
-  //
-  this->largest_offset_as_unique_identifier_ = offset_as_unique_identifier;
-
   Optional<slot_offset_type> sync_point = None;
 
   if (depth == 0) {
diff --git a/src/llfs/page_recycler.test.cpp b/src/llfs/page_recycler.test.cpp
index 81ceacd9..8bf9b630 100644
--- a/src/llfs/page_recycler.test.cpp
+++ b/src/llfs/page_recycler.test.cpp
@@ -228,9 +228,11 @@ class PageRecyclerTest : public ::testing::Test
         });
   }
 
+  // Returns an ever increasing unique_offset value for page recycler calls.
+  //
   slot_offset_type get_and_incr_unique_offset()
   {
-    return this->unique_offset_++;
+    return ++this->unique_offset_;
   }
 
   //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -272,7 +274,12 @@ class PageRecyclerTest : public ::testing::Test
   //
   std::unique_ptr<llfs::PageRecycler> unique_page_recycler_;
 
-  slot_offset_type unique_offset_{1};
+  // This is to track unique_offset for PageRecycler tests.
+  //
+  slot_offset_type unique_offset_{0};
+
+  // This mutex is to ensure serialized access to unique_offset counter.
+  //
   std::mutex recycle_pages_mutex_;
 };
 
@@ -445,6 +452,9 @@ TEST_F(PageRecyclerTest, CrashRecovery)
 
 void PageRecyclerTest::run_crash_recovery_test()
 {
+  // Note that increasing the page_count here most likely impact the test execution as we might run
+  // out of grant space.
+  //
   const usize fake_page_count = 180;
   const u32 max_branching_factor = 8;
 
@@ -510,19 +520,21 @@ void PageRecyclerTest::run_crash_recovery_test()
         const std::array<PageId, 1> to_recycle = {root_id};
 
         BATT_DEBUG_INFO("Test - recycle_pages");
-        std::lock_guard lock(this->recycle_pages_mutex_);
-        StatusOr<slot_offset_type> recycle_status =
-            recycler.recycle_pages(to_recycle, this->get_and_incr_unique_offset());
-        if (!recycle_status.ok()) {
-          failed = true;
-          break;
-        }
+        {
+          std::lock_guard lock(this->recycle_pages_mutex_);
+          StatusOr<slot_offset_type> recycle_status =
+              recycler.recycle_pages(to_recycle, this->get_and_incr_unique_offset());
+          if (!recycle_status.ok()) {
+            failed = true;
+            break;
+          }
 
-        BATT_DEBUG_INFO("Test - await_flush");
-        Status flush_status = recycler.await_flush(*recycle_status);
-        if (!flush_status.ok()) {
-          failed = true;
-          break;
+          BATT_DEBUG_INFO("Test - await_flush");
+          Status flush_status = recycler.await_flush(*recycle_status);
+          if (!flush_status.ok()) {
+            failed = true;
+            break;
+          }
         }
 
         ++progress;

From 6216416853e4e46b884e31af9e1da98d08f3eb7d Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Tue, 21 Jan 2025 17:50:31 -0500
Subject: [PATCH 11/29] minor style changes...

---
 src/llfs/page_allocator_state.cpp           |  4 ++--
 src/llfs/page_recycler.cpp                  | 10 ++++++----
 src/llfs/page_recycler_recovery_visitor.cpp |  2 +-
 src/llfs/page_recycler_recovery_visitor.hpp |  6 ++++--
 4 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/src/llfs/page_allocator_state.cpp b/src/llfs/page_allocator_state.cpp
index 9ba8ddc3..5b06c089 100644
--- a/src/llfs/page_allocator_state.cpp
+++ b/src/llfs/page_allocator_state.cpp
@@ -568,14 +568,14 @@ void run_ref_count_update_sanity_checks(const PageIdFactory& id_factory,
         << BATT_INSPECT(new_count) << ")" << BATT_INSPECT(delta_generation) << BATT_INSPECT(index);
   };
 
-  LLFS_VLOG(2) << "rrcusc: " << debug_info;
+  LLFS_VLOG(2) << debug_info;
 
   BATT_CHECK_GE(old_count, 0) << "ref counts must be non-negative" << debug_info;
   BATT_CHECK_GE(new_count, 0) << "ref counts must be non-negative" << debug_info;
 
   if (old_count == new_count) {
     BATT_CHECK_EQ(delta.ref_count, 0)
-        << "delta was non-zero but count did not change (bbora)" << debug_info;
+        << "delta was non-zero but count did not change" << debug_info;
     return;
   }
 
diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index 5323a269..bc4025bf 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -73,7 +73,8 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 /*static*/ u64 PageRecycler::calculate_log_size(const PageRecyclerOptions& options,
                                                 Optional<PageCount> max_buffered_page_count)
 {
-  const usize log_size_raw = calculate_log_size_no_padding(options, max_buffered_page_count);
+  const usize log_size_raw =
+      PageRecycler::calculate_log_size_no_padding(options, max_buffered_page_count);
   const usize padding_bytes = 1 * kKiB;
 
   return round_up_to_page_size_multiple(log_size_raw + padding_bytes);
@@ -175,7 +176,7 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 
   return std::unique_ptr<PageRecycler>{
       new PageRecycler(scheduler, std::string{name}, page_deleter, std::move(*recovered_log),
-                       std::move(latest_batch), std::move(state), visitor.largest_offset())};
+                       std::move(latest_batch), std::move(state), visitor.largest_unique_offset())};
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -184,7 +185,7 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
                            PageDeleter& page_deleter, std::unique_ptr<LogDevice>&& wal_device,
                            Optional<Batch>&& recovered_batch,
                            std::unique_ptr<PageRecycler::State>&& state,
-                           u64 largest_offset_as_unique_identifier_init) noexcept
+                           llfs::slot_offset_type largest_offset_as_unique_identifier_init) noexcept
     : scheduler_{scheduler}
     , name_{name}
     , page_deleter_{page_deleter}
@@ -329,7 +330,8 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
   LLFS_VLOG(1) << "PageRecycler::recycle_pages(page_ids=" << batt::dump_range(page_ids) << "["
                << page_ids.size() << "]"
                << ", grant=[" << (grant ? grant->size() : usize{0}) << "], depth=" << depth << ") "
-               << this->name_ << BATT_INSPECT(offset_as_unique_identifier);
+               << this->name_ << BATT_INSPECT(offset_as_unique_identifier)
+               << BATT_INSPECT(this->largest_offset_as_unique_identifier_);
 
   if (page_ids.empty()) {
     return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
diff --git a/src/llfs/page_recycler_recovery_visitor.cpp b/src/llfs/page_recycler_recovery_visitor.cpp
index 2d687686..4dd3e778 100644
--- a/src/llfs/page_recycler_recovery_visitor.cpp
+++ b/src/llfs/page_recycler_recovery_visitor.cpp
@@ -114,7 +114,7 @@ Optional<SlotRange> PageRecyclerRecoveryVisitor::latest_info_refresh_slot() cons
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
-u64 PageRecyclerRecoveryVisitor::largest_offset() const
+slot_offset_type PageRecyclerRecoveryVisitor::largest_unique_offset() const
 {
   return this->largest_offset_as_unique_identifier_;
 }
diff --git a/src/llfs/page_recycler_recovery_visitor.hpp b/src/llfs/page_recycler_recovery_visitor.hpp
index 5d81d160..2b749428 100644
--- a/src/llfs/page_recycler_recovery_visitor.hpp
+++ b/src/llfs/page_recycler_recovery_visitor.hpp
@@ -45,7 +45,7 @@ class PageRecyclerRecoveryVisitor
 
   Optional<SlotRange> latest_info_refresh_slot() const;
 
-  u64 largest_offset() const;
+  slot_offset_type largest_unique_offset() const;
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
@@ -82,7 +82,9 @@ class PageRecyclerRecoveryVisitor
    */
   Optional<SlotRange> latest_info_refresh_slot_;
 
-  u64 largest_offset_as_unique_identifier_;
+  // This is to track largest unique_offset value during recovery.
+  //
+  slot_offset_type largest_offset_as_unique_identifier_;
 };
 
 }  // namespace llfs

From 20cf66ac6c9579618b7ec5d77164266ab625e557 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Wed, 22 Jan 2025 13:38:31 -0500
Subject: [PATCH 12/29] submodule update

---
 script | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/script b/script
index 6d266c2d..ca547de6 160000
--- a/script
+++ b/script
@@ -1 +1 @@
-Subproject commit 6d266c2de572dde8fcf70085b8fa960084aae663
+Subproject commit ca547de64f9673981e8098925afd04243b1e7af7

From 18020d7386786c2bcdf7ed5fc8b9688ea720d191 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Fri, 31 Jan 2025 12:10:57 -0500
Subject: [PATCH 13/29] added support for partial dead page processing

---
 src/llfs/page_recycler.cpp                  | 81 ++++++++++++++++-----
 src/llfs/page_recycler.hpp                  |  7 +-
 src/llfs/page_recycler_events.hpp           |  9 ++-
 src/llfs/page_recycler_recovery_visitor.cpp | 19 ++++-
 src/llfs/page_recycler_recovery_visitor.hpp |  5 ++
 5 files changed, 98 insertions(+), 23 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index bc4025bf..b078d0ab 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -176,7 +176,8 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 
   return std::unique_ptr<PageRecycler>{
       new PageRecycler(scheduler, std::string{name}, page_deleter, std::move(*recovered_log),
-                       std::move(latest_batch), std::move(state), visitor.largest_unique_offset())};
+                       std::move(latest_batch), std::move(state), visitor.largest_unique_offset(),
+                       visitor.page_index())};
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -185,7 +186,8 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
                            PageDeleter& page_deleter, std::unique_ptr<LogDevice>&& wal_device,
                            Optional<Batch>&& recovered_batch,
                            std::unique_ptr<PageRecycler::State>&& state,
-                           llfs::slot_offset_type largest_offset_as_unique_identifier_init) noexcept
+                           llfs::slot_offset_type largest_offset_as_unique_identifier_init,
+                           u16 page_index_init) noexcept
     : scheduler_{scheduler}
     , name_{name}
     , page_deleter_{page_deleter}
@@ -200,6 +202,7 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
     , metrics_{}
     , prepared_batch_{std::move(recovered_batch)}
     , largest_offset_as_unique_identifier_{largest_offset_as_unique_identifier_init}
+    , largest_page_index_{page_index_init}
 {
   const PageRecyclerOptions& options = this->state_.no_lock().options;
 
@@ -319,21 +322,45 @@ void PageRecycler::join()
   }
 }
 
+bool PageRecycler::is_recycle_pages_allowed(const Slice<const PageId>& page_ids,
+                                            llfs::slot_offset_type offset_as_unique_identifier,
+                                            i32 depth)
+{
+  if (this->largest_offset_as_unique_identifier_ < offset_as_unique_identifier) {
+    // Update the largest unique identifier and index.
+    //
+    this->largest_offset_as_unique_identifier_ = offset_as_unique_identifier;
+    this->largest_page_index_ = 0;
+    return true;
+
+  } else if (depth > 0 &&
+             (this->largest_offset_as_unique_identifier_ == offset_as_unique_identifier &&
+              this->largest_page_index_ < page_ids.size())) {
+    return true;
+  } else {
+    // Look like this is a repost of some old request thus update some status and ignore/return.
+    //
+    this->metrics_export().page_id_deletion_reissue.fetch_add(1);
+    return false;
+  }
+}
+
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
 StatusOr<slot_offset_type> PageRecycler::recycle_pages(
-    const Slice<const PageId>& page_ids, llfs::slot_offset_type offset_as_unique_identifier,
+    const Slice<const PageId>& page_ids_in, llfs::slot_offset_type offset_as_unique_identifier,
     batt::Grant* grant, i32 depth)
 {
   BATT_CHECK_GE(depth, 0);
 
-  LLFS_VLOG(1) << "PageRecycler::recycle_pages(page_ids=" << batt::dump_range(page_ids) << "["
-               << page_ids.size() << "]"
+  LLFS_VLOG(1) << "PageRecycler::recycle_pages(page_ids=" << batt::dump_range(page_ids_in) << "["
+               << page_ids_in.size() << "]"
                << ", grant=[" << (grant ? grant->size() : usize{0}) << "], depth=" << depth << ") "
                << this->name_ << BATT_INSPECT(offset_as_unique_identifier)
-               << BATT_INSPECT(this->largest_offset_as_unique_identifier_);
+               << BATT_INSPECT(this->largest_offset_as_unique_identifier_)
+               << BATT_INSPECT(this->largest_page_index_);
 
-  if (page_ids.empty()) {
+  if (page_ids_in.empty()) {
     return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
   }
 
@@ -341,16 +368,20 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
   // for depth=0 as that's coming from the external client side. For all recycler internal calls
   // (with depth > 0) we will simply accept the request.
   //
-  if (this->largest_offset_as_unique_identifier_ < offset_as_unique_identifier || depth != 0) {
-    // Update the largest unique identifier and continue.
-    //
-    this->largest_offset_as_unique_identifier_ = offset_as_unique_identifier;
-  } else {
-    // Look like this is a repost of some old request thus update some status and ignore/return.
-    //
-    this->metrics_export().page_id_deletion_reissue.fetch_add(1);
-    return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
+  {
+    auto locked_state = this->state_.lock();
+    if (!is_recycle_pages_allowed(page_ids_in, offset_as_unique_identifier, depth)) {
+      return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
+    }
+  }
+
+  // Sort the pages.
+  std::vector<PageId> page_ids_list(page_ids_in.begin(), page_ids_in.end());
+  if (depth == 0) {
+    std::sort(page_ids_list.begin(), page_ids_list.end());
+    page_ids_list.erase(page_ids_list.begin(), page_ids_list.begin() + this->largest_page_index_);
   }
+  auto page_ids = batt::as_slice(page_ids_list);
 
   Optional<slot_offset_type> sync_point = None;
 
@@ -390,9 +421,14 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
         auto locked_state = this->state_.lock();
         // Writing to recycler log.
         StatusOr<slot_offset_type> append_slot = this->insert_to_log(
-            *local_grant, page_id, depth, offset_as_unique_identifier, locked_state);
+            *local_grant, page_id, depth, this->largest_offset_as_unique_identifier_,
+            this->largest_page_index_, locked_state);
         BATT_REQUIRE_OK(append_slot);
 
+        if (depth == 0) {
+          ++this->largest_page_index_;
+        }
+
         clamp_min_slot(&sync_point, *append_slot);
       }
     }
@@ -402,12 +438,18 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
     LLFS_VLOG(1) << "recycle_pages entered the valid grant case";
 
     auto locked_state = this->state_.lock();
+
     for (PageId page_id : page_ids) {
       // Writing to recycler log.
       StatusOr<slot_offset_type> append_slot =
-          this->insert_to_log(*grant, page_id, depth, offset_as_unique_identifier, locked_state);
+          this->insert_to_log(*grant, page_id, depth, this->largest_offset_as_unique_identifier_,
+                              this->largest_page_index_, locked_state);
       BATT_REQUIRE_OK(append_slot);
 
+      if (depth == 0) {
+        ++this->largest_page_index_;
+      }
+
       clamp_min_slot(&sync_point, *append_slot);
     }
   }
@@ -430,7 +472,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_page(PageId page_id,
 //
 StatusOr<slot_offset_type> PageRecycler::insert_to_log(
     batt::Grant& grant, PageId page_id, i32 depth, slot_offset_type offset_as_unique_identifier,
-    batt::Mutex<std::unique_ptr<State>>::Lock& locked_state)
+    u16 page_index, batt::Mutex<std::unique_ptr<State>>::Lock& locked_state)
 {
   BATT_CHECK(locked_state.is_held());
 
@@ -446,6 +488,7 @@ StatusOr<slot_offset_type> PageRecycler::insert_to_log(
               .batch_slot = None,
               .depth = depth,
               .offset_as_unique_identifier = offset_as_unique_identifier,
+              .page_index = page_index,
           },
           [&](const batt::SmallVecBase<PageToRecycle*>& to_append) -> StatusOr<slot_offset_type> {
             if (to_append.empty()) {
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index b722c6b5..66ea1bee 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -208,18 +208,22 @@ class PageRecycler
                         PageDeleter& page_deleter, std::unique_ptr<LogDevice>&& wal_device,
                         Optional<Batch>&& recovered_batch,
                         std::unique_ptr<PageRecycler::State>&& state,
-                        u64 largest_offset_as_unique_identifier_init) noexcept;
+                        u64 largest_offset_as_unique_identifier_init, u16 page_index_init) noexcept;
 
   void start_recycle_task();
 
   void recycle_task_main();
 
+  bool is_recycle_pages_allowed(const Slice<const PageId>& page_ids,
+                                llfs::slot_offset_type offset_as_unique_identifier, i32 depth);
+
   // MUST be called only on the recycle task or the ctor.
   //
   void refresh_grants();
 
   StatusOr<slot_offset_type> insert_to_log(batt::Grant& grant, PageId page_id, i32 depth,
                                            slot_offset_type offset_as_unique_identifier,
+                                           u16 page_index,
                                            batt::Mutex<std::unique_ptr<State>>::Lock& locked_state);
 
   StatusOr<Batch> prepare_batch(std::vector<PageToRecycle>&& to_recycle);
@@ -263,6 +267,7 @@ class PageRecycler
   Optional<slot_offset_type> latest_batch_upper_bound_;
 
   slot_offset_type largest_offset_as_unique_identifier_;
+  u16 largest_page_index_;
 };
 
 inline std::ostream& operator<<(std::ostream& out, const PageRecycler::Batch& t)
diff --git a/src/llfs/page_recycler_events.hpp b/src/llfs/page_recycler_events.hpp
index 2fdfc799..89e3072d 100644
--- a/src/llfs/page_recycler_events.hpp
+++ b/src/llfs/page_recycler_events.hpp
@@ -39,7 +39,10 @@ struct PageToRecycle {
   //
   i32 depth;
 
+  // The offset given by volume trimmer.
+  //
   slot_offset_type offset_as_unique_identifier;
+  u16 page_index;
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
@@ -51,6 +54,7 @@ struct PageToRecycle {
         .batch_slot = None,
         .depth = 0,
         .offset_as_unique_identifier = 0,
+        .page_index = 0,
     };
   }
 
@@ -137,8 +141,9 @@ struct PackedPageToRecycle {
   little_u64 batch_slot;
   u64 offset_as_unique_identifier;
   little_i32 depth;
+  u16 page_index;
   u8 flags;
-  u8 reserved_[3];
+  u8 reserved_[1];
 };
 
 BATT_STATIC_ASSERT_EQ(32, sizeof(PackedPageToRecycle));
@@ -169,6 +174,7 @@ inline bool pack_object_to(const PageToRecycle& from, PackedPageToRecycle* to, D
     to->batch_slot = 0;
   }
   to->offset_as_unique_identifier = from.offset_as_unique_identifier;
+  to->page_index = from.page_index;
   return true;
 }
 
@@ -185,6 +191,7 @@ inline StatusOr<PageToRecycle> unpack_object(const PackedPageToRecycle& packed,
       }(),
       .depth = packed.depth,
       .offset_as_unique_identifier = packed.offset_as_unique_identifier,
+      .page_index = packed.page_index,
   };
 }
 
diff --git a/src/llfs/page_recycler_recovery_visitor.cpp b/src/llfs/page_recycler_recovery_visitor.cpp
index 4dd3e778..e482b501 100644
--- a/src/llfs/page_recycler_recovery_visitor.cpp
+++ b/src/llfs/page_recycler_recovery_visitor.cpp
@@ -30,6 +30,7 @@ namespace llfs {
     , recycler_uuid_{boost::uuids::random_generator{}()}
     , latest_info_refresh_slot_{}
     , largest_offset_as_unique_identifier_{0}
+    , largest_page_index_{0}
 {
   initialize_status_codes();
   LLFS_VLOG(1) << "PageRecyclerRecoveryVisitor()";
@@ -119,13 +120,22 @@ slot_offset_type PageRecyclerRecoveryVisitor::largest_unique_offset() const
   return this->largest_offset_as_unique_identifier_;
 }
 
+//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
+//
+u16 PageRecyclerRecoveryVisitor::page_index() const
+{
+  return this->largest_page_index_;
+}
+
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
 Status PageRecyclerRecoveryVisitor::operator()(const SlotParse& slot,
                                                const PageToRecycle& recovered)
 {
   LLFS_VLOG(1) << "Recovered slot: " << slot.offset << " " << recovered
-               << BATT_INSPECT((bool)this->latest_batch_);
+               << BATT_INSPECT((bool)this->latest_batch_)
+               << BATT_INSPECT(this->largest_offset_as_unique_identifier_)
+               << BATT_INSPECT(this->largest_page_index_);
 
   // Add the new record, or verify that the existing one and the new one are equivalent.
   //
@@ -149,10 +159,15 @@ Status PageRecyclerRecoveryVisitor::operator()(const SlotParse& slot,
     existing_record.batch_slot = recovered.batch_slot;
   }
 
-  // Save the largest unique offset identifier.
+  // Save the largest unique offset identifier. Note that if offsets are same then we need to only
+  // update the largest page_index.
   //
   if (this->largest_offset_as_unique_identifier_ < recovered.offset_as_unique_identifier) {
     this->largest_offset_as_unique_identifier_ = recovered.offset_as_unique_identifier;
+    this->largest_page_index_ = recovered.page_index;
+  } else if (this->largest_offset_as_unique_identifier_ == recovered.offset_as_unique_identifier &&
+             this->largest_page_index_ < recovered.page_index) {
+    this->largest_page_index_ = recovered.page_index;
   }
 
   PageToRecycle& to_recycle = iter->second;
diff --git a/src/llfs/page_recycler_recovery_visitor.hpp b/src/llfs/page_recycler_recovery_visitor.hpp
index 2b749428..375684fb 100644
--- a/src/llfs/page_recycler_recovery_visitor.hpp
+++ b/src/llfs/page_recycler_recovery_visitor.hpp
@@ -46,6 +46,7 @@ class PageRecyclerRecoveryVisitor
   Optional<SlotRange> latest_info_refresh_slot() const;
 
   slot_offset_type largest_unique_offset() const;
+  u16 page_index() const;
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
@@ -85,6 +86,10 @@ class PageRecyclerRecoveryVisitor
   // This is to track largest unique_offset value during recovery.
   //
   slot_offset_type largest_offset_as_unique_identifier_;
+
+  // This tracks the largest page_index seen so far for a given offset.
+  //
+  u16 largest_page_index_;
 };
 
 }  // namespace llfs

From f9a97fcf7a03eff163c68c41df3967e50a0b7e5b Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Fri, 31 Jan 2025 12:21:10 -0500
Subject: [PATCH 14/29] removed attachment-index display code

---
 src/llfs/page_allocator_state.cpp | 11 +++++------
 src/llfs/page_allocator_state.hpp |  2 +-
 2 files changed, 6 insertions(+), 7 deletions(-)

diff --git a/src/llfs/page_allocator_state.cpp b/src/llfs/page_allocator_state.cpp
index 5b06c089..0446597a 100644
--- a/src/llfs/page_allocator_state.cpp
+++ b/src/llfs/page_allocator_state.cpp
@@ -462,7 +462,7 @@ PageAllocatorState::ProposalStatus PageAllocatorState::propose(PackedPageAllocat
   if (status == ProposalStatus::kValid) {
     for (PackedPageRefCount& prc : txn->ref_counts) {
       LLFS_VLOG(1) << "reference count: " << prc;
-      prc.ref_count = this->calculate_new_ref_count(prc, *user_index);
+      prc.ref_count = this->calculate_new_ref_count(prc);
     }
   }
   LLFS_VLOG(1) << "propose (end): txn-ref-count= " << txn->ref_counts.size();
@@ -559,13 +559,13 @@ namespace {
 void run_ref_count_update_sanity_checks(const PageIdFactory& id_factory,
                                         const PackedPageRefCount& delta,
                                         const PageAllocatorRefCount& obj, i32 old_count,
-                                        i32 new_count, const u32 index)
+                                        i32 new_count)
 {
   const auto debug_info = [&](std::ostream& out) {
     const page_generation_int delta_generation = id_factory.get_generation(delta.page_id.unpack());
 
     out << "(" << BATT_INSPECT(delta) << BATT_INSPECT(obj) << BATT_INSPECT(old_count)
-        << BATT_INSPECT(new_count) << ")" << BATT_INSPECT(delta_generation) << BATT_INSPECT(index);
+        << BATT_INSPECT(new_count) << ")" << BATT_INSPECT(delta_generation);
   };
 
   LLFS_VLOG(2) << debug_info;
@@ -642,8 +642,7 @@ void run_ref_count_update_sanity_checks(const PageIdFactory& id_factory,
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
-i32 PageAllocatorState::calculate_new_ref_count(const PackedPageRefCount& delta,
-                                                const u32 index) const
+i32 PageAllocatorState::calculate_new_ref_count(const PackedPageRefCount& delta) const
 {
   const PageId page_id = delta.page_id.unpack();
   const page_id_int physical_page = this->page_ids_.get_physical_page(page_id);
@@ -666,7 +665,7 @@ i32 PageAllocatorState::calculate_new_ref_count(const PackedPageRefCount& delta,
     new_count = old_count + delta.ref_count;
   }
 
-  run_ref_count_update_sanity_checks(this->page_ids_, delta, obj, old_count, new_count, index);
+  run_ref_count_update_sanity_checks(this->page_ids_, delta, obj, old_count, new_count);
 
   return new_count;
 }
diff --git a/src/llfs/page_allocator_state.hpp b/src/llfs/page_allocator_state.hpp
index 4c1e867a..f7b790ad 100644
--- a/src/llfs/page_allocator_state.hpp
+++ b/src/llfs/page_allocator_state.hpp
@@ -242,7 +242,7 @@ class PageAllocatorState : public PageAllocatorStateNoLock
 
   // Returns the new ref count that will result from applying the delta to the passed obj.
   //
-  i32 calculate_new_ref_count(const PackedPageRefCount& delta, const u32 index) const;
+  i32 calculate_new_ref_count(const PackedPageRefCount& deltaa) const;
 
   /** \brief If the given ref count object has a positive ref count but *is* in the free pool, then
    * this function removes it; otherwise if the object has a zero ref count but is *not* in the free

From ad2980a08af9756cdc1c0d89ef83aa98883430c4 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Fri, 31 Jan 2025 13:22:52 -0500
Subject: [PATCH 15/29] fixed depth related check

---
 src/llfs/page_allocator.cpp | 5 +++--
 src/llfs/page_recycler.cpp  | 2 +-
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/src/llfs/page_allocator.cpp b/src/llfs/page_allocator.cpp
index 73e91474..18f9ac0b 100644
--- a/src/llfs/page_allocator.cpp
+++ b/src/llfs/page_allocator.cpp
@@ -454,8 +454,9 @@ bool PageAllocator::await_ref_count(PageId page_id, i32 ref_count)
     if ((counter & 4095) == 0) {
       LLFS_LOG_INFO() << BATT_INSPECT(prc) << BATT_INSPECT(page_id) << BATT_INSPECT(ref_count)
                       << BATT_INSPECT(counter);
-      BATT_CHECK_LT(counter, 10 * 1000) << "[PageAllocator::await_ref_count] timed out (10s)"
-                                        << BATT_INSPECT(page_id) << BATT_INSPECT(ref_count);
+      BATT_CHECK_LT(counter, 10 * 1000)
+          << "[PageAllocator::await_ref_count] timed out (10s)" << BATT_INSPECT(page_id)
+          << BATT_INSPECT(ref_count) << BATT_INSPECT(prc.ref_count);
     }
   }
   return true;
diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index b078d0ab..88aa6728 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -333,7 +333,7 @@ bool PageRecycler::is_recycle_pages_allowed(const Slice<const PageId>& page_ids,
     this->largest_page_index_ = 0;
     return true;
 
-  } else if (depth > 0 &&
+  } else if (depth > 0 ||
              (this->largest_offset_as_unique_identifier_ == offset_as_unique_identifier &&
               this->largest_page_index_ < page_ids.size())) {
     return true;

From 985bd2534094c31daae38b83a8d36410d03ea162 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Fri, 31 Jan 2025 17:05:29 -0500
Subject: [PATCH 16/29] updated page_index logic

---
 src/llfs/page_recycler.cpp        | 38 +++++++++++++++++++------------
 src/llfs/page_recycler.hpp        |  4 ++--
 src/llfs/page_recycler_events.cpp |  4 +++-
 3 files changed, 29 insertions(+), 17 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index 88aa6728..dc2b63d3 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -322,24 +322,31 @@ void PageRecycler::join()
   }
 }
 
-bool PageRecycler::is_recycle_pages_allowed(const Slice<const PageId>& page_ids,
-                                            llfs::slot_offset_type offset_as_unique_identifier,
-                                            i32 depth)
+//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
+//
+bool PageRecycler::is_page_recycling_allowed(const Slice<const PageId>& page_ids,
+                                             llfs::slot_offset_type offset_as_unique_identifier,
+                                             i32 depth)
 {
-  if (this->largest_offset_as_unique_identifier_ < offset_as_unique_identifier) {
+  // If we got higher offset allow recycle.
+  //
+  if (this->largest_offset_as_unique_identifier_ < offset_as_unique_identifier && depth == 0) {
     // Update the largest unique identifier and index.
     //
     this->largest_offset_as_unique_identifier_ = offset_as_unique_identifier;
     this->largest_page_index_ = 0;
     return true;
-
-  } else if (depth > 0 ||
-             (this->largest_offset_as_unique_identifier_ == offset_as_unique_identifier &&
-              this->largest_page_index_ < page_ids.size())) {
+  }
+  // If we got same offset but not all pages were processed last time then allow recycle.
+  //
+  else if (depth != 0 ||
+           (this->largest_offset_as_unique_identifier_ == offset_as_unique_identifier &&
+            this->largest_page_index_ < page_ids.size())) {
     return true;
-  } else {
-    // Look like this is a repost of some old request thus update some status and ignore/return.
-    //
+  }
+  // Look like this is a repost of some old request thus update metric and do not allow recycle.
+  //
+  else {
     this->metrics_export().page_id_deletion_reissue.fetch_add(1);
     return false;
   }
@@ -370,7 +377,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
   //
   {
     auto locked_state = this->state_.lock();
-    if (!is_recycle_pages_allowed(page_ids_in, offset_as_unique_identifier, depth)) {
+    if (!is_page_recycling_allowed(page_ids_in, offset_as_unique_identifier, depth)) {
       return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
     }
   }
@@ -383,6 +390,9 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
   }
   auto page_ids = batt::as_slice(page_ids_list);
 
+  LLFS_VLOG(1) << "sorted slice: " << BATT_INSPECT(page_ids_list.size())
+               << BATT_INSPECT(this->largest_page_index_);
+
   Optional<slot_offset_type> sync_point = None;
 
   if (depth == 0) {
@@ -422,7 +432,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
         // Writing to recycler log.
         StatusOr<slot_offset_type> append_slot = this->insert_to_log(
             *local_grant, page_id, depth, this->largest_offset_as_unique_identifier_,
-            this->largest_page_index_, locked_state);
+            this->largest_page_index_ + 1, locked_state);
         BATT_REQUIRE_OK(append_slot);
 
         if (depth == 0) {
@@ -443,7 +453,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
       // Writing to recycler log.
       StatusOr<slot_offset_type> append_slot =
           this->insert_to_log(*grant, page_id, depth, this->largest_offset_as_unique_identifier_,
-                              this->largest_page_index_, locked_state);
+                              this->largest_page_index_ + 1, locked_state);
       BATT_REQUIRE_OK(append_slot);
 
       if (depth == 0) {
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index 66ea1bee..28f540b8 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -214,8 +214,8 @@ class PageRecycler
 
   void recycle_task_main();
 
-  bool is_recycle_pages_allowed(const Slice<const PageId>& page_ids,
-                                llfs::slot_offset_type offset_as_unique_identifier, i32 depth);
+  bool is_page_recycling_allowed(const Slice<const PageId>& page_ids,
+                                 llfs::slot_offset_type offset_as_unique_identifier, i32 depth);
 
   // MUST be called only on the recycle task or the ctor.
   //
diff --git a/src/llfs/page_recycler_events.cpp b/src/llfs/page_recycler_events.cpp
index b8137432..f218d929 100644
--- a/src/llfs/page_recycler_events.cpp
+++ b/src/llfs/page_recycler_events.cpp
@@ -18,7 +18,9 @@ namespace llfs {
 std::ostream& operator<<(std::ostream& out, const PageToRecycle& t)
 {
   return out << "PageToRecycle{.page_id=" << t.page_id << ", .refresh_slot=" << t.refresh_slot
-             << ", .batch_slot=" << t.batch_slot << ", .depth=" << t.depth << ",}";
+             << ", .batch_slot=" << t.batch_slot << ", .depth=" << t.depth
+             << ", offset_as_unique_identifier=" << t.offset_as_unique_identifier
+             << ", page_index=" << t.page_index << ",}";
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -

From 2785b7fbf6ae0ba6ccf6af5a7580ab518768cc58 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Fri, 31 Jan 2025 19:19:02 -0500
Subject: [PATCH 17/29] updated variable name

---
 src/llfs/page_recycler.cpp | 10 +++++-----
 src/llfs/page_recycler.hpp |  6 +++---
 src/llfs/volume.test.cpp   | 10 +++++-----
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index dc2b63d3..b4785bd6 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -234,10 +234,10 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
  * flows. This is static metric infrastructure so that any user level code could access it.
  *
  */
-auto PageRecycler::metrics_export() -> MetricsExported&
+auto PageRecycler::metrics_global() -> MetricsGlobal&
 {
-  static MetricsExported& metrics_ = [&]() -> MetricsExported& {
-    static MetricsExported metrics_;
+  static MetricsGlobal& metrics_ = [&]() -> MetricsGlobal& {
+    static MetricsGlobal metrics_;
 
     LLFS_VLOG(1) << "Registering PageRecycler metrics...";
     const auto metric_name = [](std::string_view property) {
@@ -246,7 +246,7 @@ auto PageRecycler::metrics_export() -> MetricsExported&
 
 #define ADD_METRIC_(n) global_metric_registry().add(metric_name(#n), metrics_.n)
 
-    ADD_METRIC_(page_id_deletion_reissue);
+    ADD_METRIC_(page_id_deletion_reissue_count);
 
 #undef ADD_METRIC_
 
@@ -347,7 +347,7 @@ bool PageRecycler::is_page_recycling_allowed(const Slice<const PageId>& page_ids
   // Look like this is a repost of some old request thus update metric and do not allow recycle.
   //
   else {
-    this->metrics_export().page_id_deletion_reissue.fetch_add(1);
+    this->metrics_global().page_id_deletion_reissue_count.fetch_add(1);
     return false;
   }
 }
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index 28f540b8..21f74866 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -55,10 +55,10 @@ class PageRecycler
     CountMetric<u64> page_drop_error_count{0};
   };
 
-  struct MetricsExported {
-    CountMetric<u32> page_id_deletion_reissue{0};
+  struct MetricsGlobal {
+    CountMetric<u32> page_id_deletion_reissue_count{0};
   };
-  static MetricsExported& metrics_export();
+  static MetricsGlobal& metrics_global();
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
diff --git a/src/llfs/volume.test.cpp b/src/llfs/volume.test.cpp
index ab80b5bd..6317a42a 100644
--- a/src/llfs/volume.test.cpp
+++ b/src/llfs/volume.test.cpp
@@ -2172,7 +2172,7 @@ TEST_F(VolumeSimTest, DeadPageRefCountVariantSimulation)
   std::vector<usize> histogram(10);
   const usize div = max_seeds / histogram.size();
 
-  llfs::PageRecycler::metrics_export().page_id_deletion_reissue.set(0);
+  llfs::PageRecycler::metrics_global().page_id_deletion_reissue_count.set(0);
 
   for (u32 current_seed = 0; current_seed < max_seeds; ++current_seed) {
     LOG_EVERY_N(INFO, 100) << BATT_INSPECT(current_seed) << BATT_INSPECT(max_seeds);
@@ -2180,23 +2180,23 @@ TEST_F(VolumeSimTest, DeadPageRefCountVariantSimulation)
     yield_pre_halt = (yield_pre_halt % 2) ? pick_value1(rng) : pick_value2(rng);
     yield_post_halt = (yield_post_halt % 2) ? pick_value1(rng) : pick_value2(rng);
 
-    usize last_value = llfs::PageRecycler::metrics_export().page_id_deletion_reissue;
+    usize last_value = llfs::PageRecycler::metrics_global().page_id_deletion_reissue_count;
 
     ASSERT_NO_FATAL_FAILURE(
         this->run_dead_page_recovery_test_variant(current_seed, yield_pre_halt, yield_post_halt));
 
-    last_value = llfs::PageRecycler::metrics_export().page_id_deletion_reissue - last_value;
+    last_value = llfs::PageRecycler::metrics_global().page_id_deletion_reissue_count - last_value;
     histogram[current_seed / div] += last_value;
   }
 
   LOG(INFO) << "Ran DeadPageRefCountVariant test for " << max_seeds << " iterations..."
-            << BATT_INSPECT(llfs::PageRecycler::metrics_export().page_id_deletion_reissue);
+            << BATT_INSPECT(llfs::PageRecycler::metrics_global().page_id_deletion_reissue_count);
 
   display_histogram(max_seeds, histogram);
 
   // We need to have atleast one iteration hitting the issue.
   //
-  ASSERT_GE(llfs::PageRecycler::metrics_export().page_id_deletion_reissue, 0);
+  ASSERT_GE(llfs::PageRecycler::metrics_global().page_id_deletion_reissue_count, 0);
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -

From 35c646d06238196f58942f404e40ee0e5dbae342 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Sat, 1 Feb 2025 00:08:35 -0500
Subject: [PATCH 18/29] update to avoid clearing the entire log

---
 src/llfs/page_recycler.cpp | 8 ++++++++
 src/llfs/page_recycler.hpp | 3 +++
 2 files changed, 11 insertions(+)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index b4785bd6..46c654c6 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -383,6 +383,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
   }
 
   // Sort the pages.
+  //
   std::vector<PageId> page_ids_list(page_ids_in.begin(), page_ids_in.end());
   if (depth == 0) {
     std::sort(page_ids_list.begin(), page_ids_list.end());
@@ -438,6 +439,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
         if (depth == 0) {
           ++this->largest_page_index_;
         }
+        this->last_page_recycle_offset_ = *append_slot;
 
         clamp_min_slot(&sync_point, *append_slot);
       }
@@ -848,6 +850,12 @@ Status PageRecycler::trim_log()
     }
   }();
 
+  // We want to keep atleast one recycle_page slot in the log.
+  //
+  if (trim_point <= this->last_page_recycle_offset_) {
+    return batt::OkStatus();
+  }
+
   // Check to see if we need to refresh the info slot.
   //
   if (options.info_needs_refresh(latest_info_slot, *this->wal_device_) ||
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index 21f74866..1a6d0e64 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -267,7 +267,10 @@ class PageRecycler
   Optional<slot_offset_type> latest_batch_upper_bound_;
 
   slot_offset_type largest_offset_as_unique_identifier_;
+
   u16 largest_page_index_;
+
+  slot_offset_type last_page_recycle_offset_;
 };
 
 inline std::ostream& operator<<(std::ostream& out, const PageRecycler::Batch& t)

From 5d350d11a67026f07a2174419dc46c412b8e39ef Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Mon, 3 Feb 2025 12:31:16 -0500
Subject: [PATCH 19/29] changed some test paramaters

---
 src/llfs/page_recycler.cpp      | 14 +++++++-------
 src/llfs/page_recycler.test.cpp |  2 +-
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index 46c654c6..67d8c159 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -412,13 +412,13 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
       StatusOr<batt::Grant> local_grant = [&] {
         const usize needed_size = options.insert_grant_size();
 
-        BATT_DEBUG_INFO("[PageRecycler::recycle_page] waiting for log space; "
-                        << BATT_INSPECT(needed_size)
-                        << BATT_INSPECT(this->insert_grant_pool_.size()) << BATT_INSPECT(depth)
-                        << BATT_INSPECT(options.total_grant_size_for_depth(depth))
-                        << BATT_INSPECT(this->slot_writer_.in_use_size())
-                        << BATT_INSPECT(this->slot_writer_.log_capacity())
-                        << BATT_INSPECT(options.recycle_task_target()));
+        BATT_DEBUG_INFO(
+            "[PageRecycler::recycle_page] waiting for log space; "
+            << BATT_INSPECT(needed_size) << BATT_INSPECT(this->insert_grant_pool_.size())
+            << BATT_INSPECT(depth) << BATT_INSPECT(options.total_grant_size_for_depth(depth))
+            << BATT_INSPECT(this->slot_writer_.in_use_size())
+            << BATT_INSPECT(this->slot_writer_.log_capacity())
+            << BATT_INSPECT(options.recycle_task_target()) << BATT_INSPECT(page_ids.size()));
 
         return this->insert_grant_pool_.spend(needed_size, batt::WaitForResource::kTrue);
       }();
diff --git a/src/llfs/page_recycler.test.cpp b/src/llfs/page_recycler.test.cpp
index 8bf9b630..6fc35c2a 100644
--- a/src/llfs/page_recycler.test.cpp
+++ b/src/llfs/page_recycler.test.cpp
@@ -455,7 +455,7 @@ void PageRecyclerTest::run_crash_recovery_test()
   // Note that increasing the page_count here most likely impact the test execution as we might run
   // out of grant space.
   //
-  const usize fake_page_count = 180;
+  const usize fake_page_count = 155;
   const u32 max_branching_factor = 8;
 
   const auto options = llfs::PageRecyclerOptions{}  //

From be0412d5eed84fb2ba5202d7c21ede6ba51cdbdc Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Mon, 3 Feb 2025 17:11:41 -0500
Subject: [PATCH 20/29] :review comment changes

---
 src/llfs/page_recycler.cpp        | 16 ++++++++++------
 src/llfs/page_recycler.hpp        |  6 +++---
 src/llfs/page_recycler.test.cpp   |  2 +-
 src/llfs/page_recycler_events.hpp |  3 +++
 src/llfs/volume.test.cpp          | 10 +++++-----
 5 files changed, 22 insertions(+), 15 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index 67d8c159..e3a003a6 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -81,7 +81,9 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
-//
+/** \brief This function determins the bytes needed for bufferd pages in the log.
+ *
+ */
 /*static*/ u64 PageRecycler::calculate_log_size_no_padding(
     const PageRecyclerOptions& options, Optional<PageCount> max_buffered_page_count)
 {
@@ -234,10 +236,10 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
  * flows. This is static metric infrastructure so that any user level code could access it.
  *
  */
-auto PageRecycler::metrics_global() -> MetricsGlobal&
+auto PageRecycler::global_metrics() -> GlobalMetrics&
 {
-  static MetricsGlobal& metrics_ = [&]() -> MetricsGlobal& {
-    static MetricsGlobal metrics_;
+  static GlobalMetrics& metrics_ = [&]() -> GlobalMetrics& {
+    static GlobalMetrics metrics_;
 
     LLFS_VLOG(1) << "Registering PageRecycler metrics...";
     const auto metric_name = [](std::string_view property) {
@@ -347,7 +349,7 @@ bool PageRecycler::is_page_recycling_allowed(const Slice<const PageId>& page_ids
   // Look like this is a repost of some old request thus update metric and do not allow recycle.
   //
   else {
-    this->metrics_global().page_id_deletion_reissue_count.fetch_add(1);
+    this->global_metrics().page_id_deletion_reissue_count.fetch_add(1);
     return false;
   }
 }
@@ -462,6 +464,8 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
         ++this->largest_page_index_;
       }
 
+      this->last_page_recycle_offset_ = *append_slot;
+
       clamp_min_slot(&sync_point, *append_slot);
     }
   }
@@ -852,7 +856,7 @@ Status PageRecycler::trim_log()
 
   // We want to keep atleast one recycle_page slot in the log.
   //
-  if (trim_point <= this->last_page_recycle_offset_) {
+  if (trim_point >= this->last_page_recycle_offset_) {
     return batt::OkStatus();
   }
 
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index 1a6d0e64..00404e98 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -55,10 +55,10 @@ class PageRecycler
     CountMetric<u64> page_drop_error_count{0};
   };
 
-  struct MetricsGlobal {
+  struct GlobalMetrics {
     CountMetric<u32> page_id_deletion_reissue_count{0};
   };
-  static MetricsGlobal& metrics_global();
+  static GlobalMetrics& global_metrics();
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
@@ -113,7 +113,7 @@ class PageRecycler
   // necessarily flushed (see `await_flush`).
   //
   StatusOr<slot_offset_type> recycle_pages(const Slice<const PageId>& page_ids,
-                                           llfs::slot_offset_type offset,
+                                           llfs::slot_offset_type unique_offset,
                                            batt::Grant* grant = nullptr, i32 depth = 0);
 
   // Schedule a single page to be recycled.  \see recycle_pages
diff --git a/src/llfs/page_recycler.test.cpp b/src/llfs/page_recycler.test.cpp
index 6fc35c2a..8bf9b630 100644
--- a/src/llfs/page_recycler.test.cpp
+++ b/src/llfs/page_recycler.test.cpp
@@ -455,7 +455,7 @@ void PageRecyclerTest::run_crash_recovery_test()
   // Note that increasing the page_count here most likely impact the test execution as we might run
   // out of grant space.
   //
-  const usize fake_page_count = 155;
+  const usize fake_page_count = 180;
   const u32 max_branching_factor = 8;
 
   const auto options = llfs::PageRecyclerOptions{}  //
diff --git a/src/llfs/page_recycler_events.hpp b/src/llfs/page_recycler_events.hpp
index 89e3072d..2773ace2 100644
--- a/src/llfs/page_recycler_events.hpp
+++ b/src/llfs/page_recycler_events.hpp
@@ -42,6 +42,9 @@ struct PageToRecycle {
   // The offset given by volume trimmer.
   //
   slot_offset_type offset_as_unique_identifier;
+
+  // This tracks the page index within recycle_pages request.
+  //
   u16 page_index;
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -
diff --git a/src/llfs/volume.test.cpp b/src/llfs/volume.test.cpp
index 6317a42a..dbf2b6dc 100644
--- a/src/llfs/volume.test.cpp
+++ b/src/llfs/volume.test.cpp
@@ -2172,7 +2172,7 @@ TEST_F(VolumeSimTest, DeadPageRefCountVariantSimulation)
   std::vector<usize> histogram(10);
   const usize div = max_seeds / histogram.size();
 
-  llfs::PageRecycler::metrics_global().page_id_deletion_reissue_count.set(0);
+  llfs::PageRecycler::global_metrics().page_id_deletion_reissue_count.set(0);
 
   for (u32 current_seed = 0; current_seed < max_seeds; ++current_seed) {
     LOG_EVERY_N(INFO, 100) << BATT_INSPECT(current_seed) << BATT_INSPECT(max_seeds);
@@ -2180,23 +2180,23 @@ TEST_F(VolumeSimTest, DeadPageRefCountVariantSimulation)
     yield_pre_halt = (yield_pre_halt % 2) ? pick_value1(rng) : pick_value2(rng);
     yield_post_halt = (yield_post_halt % 2) ? pick_value1(rng) : pick_value2(rng);
 
-    usize last_value = llfs::PageRecycler::metrics_global().page_id_deletion_reissue_count;
+    usize last_value = llfs::PageRecycler::global_metrics().page_id_deletion_reissue_count;
 
     ASSERT_NO_FATAL_FAILURE(
         this->run_dead_page_recovery_test_variant(current_seed, yield_pre_halt, yield_post_halt));
 
-    last_value = llfs::PageRecycler::metrics_global().page_id_deletion_reissue_count - last_value;
+    last_value = llfs::PageRecycler::global_metrics().page_id_deletion_reissue_count - last_value;
     histogram[current_seed / div] += last_value;
   }
 
   LOG(INFO) << "Ran DeadPageRefCountVariant test for " << max_seeds << " iterations..."
-            << BATT_INSPECT(llfs::PageRecycler::metrics_global().page_id_deletion_reissue_count);
+            << BATT_INSPECT(llfs::PageRecycler::global_metrics().page_id_deletion_reissue_count);
 
   display_histogram(max_seeds, histogram);
 
   // We need to have atleast one iteration hitting the issue.
   //
-  ASSERT_GE(llfs::PageRecycler::metrics_global().page_id_deletion_reissue_count, 0);
+  ASSERT_GE(llfs::PageRecycler::global_metrics().page_id_deletion_reissue_count, 0);
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -

From 3548923b3585473af3aeaa9361b0262d8e96e2a5 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Mon, 3 Feb 2025 17:39:20 -0500
Subject: [PATCH 21/29] update some comments and log messages

---
 src/llfs/page_recycler.cpp | 25 ++++++++++++-------------
 1 file changed, 12 insertions(+), 13 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index e3a003a6..ab5d536c 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -325,7 +325,10 @@ void PageRecycler::join()
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
-//
+/** \brief This check is to make sure this request was never seen before. Note that we do unique
+ * identifier check only for depth=0 as that's coming from the external client side. For all
+ * recycler internal calls (with depth > 0) we will simply accept the request.
+ */
 bool PageRecycler::is_page_recycling_allowed(const Slice<const PageId>& page_ids,
                                              llfs::slot_offset_type offset_as_unique_identifier,
                                              i32 depth)
@@ -339,7 +342,7 @@ bool PageRecycler::is_page_recycling_allowed(const Slice<const PageId>& page_ids
     this->largest_page_index_ = 0;
     return true;
   }
-  // If we got same offset but not all pages were processed last time then allow recycle.
+  // If we got same offset but not all pages were processed last time or depth>0 then allow recycle.
   //
   else if (depth != 0 ||
            (this->largest_offset_as_unique_identifier_ == offset_as_unique_identifier &&
@@ -367,15 +370,13 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
                << ", grant=[" << (grant ? grant->size() : usize{0}) << "], depth=" << depth << ") "
                << this->name_ << BATT_INSPECT(offset_as_unique_identifier)
                << BATT_INSPECT(this->largest_offset_as_unique_identifier_)
-               << BATT_INSPECT(this->largest_page_index_);
+               << BATT_INSPECT(this->last_page_recycle_offset_);
 
   if (page_ids_in.empty()) {
     return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
   }
 
-  // Check to make sure request was never seen before. Note that we do unique identifier check only
-  // for depth=0 as that's coming from the external client side. For all recycler internal calls
-  // (with depth > 0) we will simply accept the request.
+  // Check to make sure request was never seen before.
   //
   {
     auto locked_state = this->state_.lock();
@@ -390,12 +391,12 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
   if (depth == 0) {
     std::sort(page_ids_list.begin(), page_ids_list.end());
     page_ids_list.erase(page_ids_list.begin(), page_ids_list.begin() + this->largest_page_index_);
+
+    LLFS_VLOG(1) << "sorted slice: " << BATT_INSPECT(page_ids_list.size())
+                 << BATT_INSPECT(this->largest_page_index_);
   }
   auto page_ids = batt::as_slice(page_ids_list);
 
-  LLFS_VLOG(1) << "sorted slice: " << BATT_INSPECT(page_ids_list.size())
-               << BATT_INSPECT(this->largest_page_index_);
-
   Optional<slot_offset_type> sync_point = None;
 
   if (depth == 0) {
@@ -409,7 +410,6 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
 
     const PageRecyclerOptions& options = this->state_.no_lock().options;
 
-    LLFS_VLOG(1) << "recycle_pages entered grant==NULL case";
     for (PageId page_id : page_ids) {
       StatusOr<batt::Grant> local_grant = [&] {
         const usize needed_size = options.insert_grant_size();
@@ -420,7 +420,8 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
             << BATT_INSPECT(depth) << BATT_INSPECT(options.total_grant_size_for_depth(depth))
             << BATT_INSPECT(this->slot_writer_.in_use_size())
             << BATT_INSPECT(this->slot_writer_.log_capacity())
-            << BATT_INSPECT(options.recycle_task_target()) << BATT_INSPECT(page_ids.size()));
+            << BATT_INSPECT(options.recycle_task_target()) << BATT_INSPECT(page_ids.size())
+            << BATT_INSPECT(this->last_page_recycle_offset_));
 
         return this->insert_grant_pool_.spend(needed_size, batt::WaitForResource::kTrue);
       }();
@@ -449,8 +450,6 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
   } else {
     BATT_CHECK_LT(depth, (i32)kMaxPageRefDepth) << BATT_INSPECT_RANGE(page_ids);
 
-    LLFS_VLOG(1) << "recycle_pages entered the valid grant case";
-
     auto locked_state = this->state_.lock();
 
     for (PageId page_id : page_ids) {

From e560bc6e22022a0836f21417c4166f2299426793 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Mon, 3 Feb 2025 17:57:52 -0500
Subject: [PATCH 22/29] updated some log messages

---
 src/llfs/page_recycler.cpp | 19 ++++++-------------
 1 file changed, 6 insertions(+), 13 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index ab5d536c..b91c9a5c 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -392,18 +392,16 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
     std::sort(page_ids_list.begin(), page_ids_list.end());
     page_ids_list.erase(page_ids_list.begin(), page_ids_list.begin() + this->largest_page_index_);
 
-    LLFS_VLOG(1) << "sorted slice: " << BATT_INSPECT(page_ids_list.size())
+    LLFS_VLOG(1) << "Sorted slice: " << BATT_INSPECT(page_ids_list.size())
                  << BATT_INSPECT(this->largest_page_index_);
-  }
-  auto page_ids = batt::as_slice(page_ids_list);
-
-  Optional<slot_offset_type> sync_point = None;
 
-  if (depth == 0) {
     BATT_CHECK_EQ(grant, nullptr) << "External callers to `PageRecycler::recycle_pages` should "
                                      "specify depth == 0 and grant == nullptr; other values are "
                                      "for PageRecycler internal use only.";
   }
+  const Slice<const PageId> page_ids = batt::as_slice(page_ids_list);
+
+  Optional<slot_offset_type> sync_point = None;
 
   if (grant == nullptr) {
     BATT_CHECK_EQ(depth, 0u);
@@ -439,9 +437,8 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
             this->largest_page_index_ + 1, locked_state);
         BATT_REQUIRE_OK(append_slot);
 
-        if (depth == 0) {
-          ++this->largest_page_index_;
-        }
+        ++this->largest_page_index_;
+
         this->last_page_recycle_offset_ = *append_slot;
 
         clamp_min_slot(&sync_point, *append_slot);
@@ -459,10 +456,6 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
                               this->largest_page_index_ + 1, locked_state);
       BATT_REQUIRE_OK(append_slot);
 
-      if (depth == 0) {
-        ++this->largest_page_index_;
-      }
-
       this->last_page_recycle_offset_ = *append_slot;
 
       clamp_min_slot(&sync_point, *append_slot);

From f8815b192818d581959259b74eba30164828b10c Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Mon, 3 Feb 2025 18:13:54 -0500
Subject: [PATCH 23/29] simplified check function

---
 src/llfs/page_recycler.cpp | 39 +++++++++++++++++++-------------------
 src/llfs/page_recycler.hpp |  2 +-
 2 files changed, 20 insertions(+), 21 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index b91c9a5c..f9bdc9a0 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -326,27 +326,25 @@ void PageRecycler::join()
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 /** \brief This check is to make sure this request was never seen before. Note that we do unique
- * identifier check only for depth=0 as that's coming from the external client side. For all
- * recycler internal calls (with depth > 0) we will simply accept the request.
+ * identifier check only for external calls.
  */
 bool PageRecycler::is_page_recycling_allowed(const Slice<const PageId>& page_ids,
-                                             llfs::slot_offset_type offset_as_unique_identifier,
-                                             i32 depth)
+                                             llfs::slot_offset_type offset_as_unique_identifier)
+
 {
   // If we got higher offset allow recycle.
   //
-  if (this->largest_offset_as_unique_identifier_ < offset_as_unique_identifier && depth == 0) {
+  if (this->largest_offset_as_unique_identifier_ < offset_as_unique_identifier) {
     // Update the largest unique identifier and index.
     //
     this->largest_offset_as_unique_identifier_ = offset_as_unique_identifier;
     this->largest_page_index_ = 0;
     return true;
   }
-  // If we got same offset but not all pages were processed last time or depth>0 then allow recycle.
+  // If we got same offset but not all pages were processed last time then allow recycle.
   //
-  else if (depth != 0 ||
-           (this->largest_offset_as_unique_identifier_ == offset_as_unique_identifier &&
-            this->largest_page_index_ < page_ids.size())) {
+  else if (this->largest_offset_as_unique_identifier_ == offset_as_unique_identifier &&
+           this->largest_page_index_ < page_ids.size()) {
     return true;
   }
   // Look like this is a repost of some old request thus update metric and do not allow recycle.
@@ -376,19 +374,21 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
     return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
   }
 
-  // Check to make sure request was never seen before.
-  //
-  {
-    auto locked_state = this->state_.lock();
-    if (!is_page_recycling_allowed(page_ids_in, offset_as_unique_identifier, depth)) {
-      return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
-    }
-  }
-
-  // Sort the pages.
+  // Sort the pages if called by external callers.
   //
   std::vector<PageId> page_ids_list(page_ids_in.begin(), page_ids_in.end());
   if (depth == 0) {
+    // Check to make sure request was never seen before.
+    //
+    {
+      auto locked_state = this->state_.lock();
+      if (!is_page_recycling_allowed(page_ids_in, offset_as_unique_identifier)) {
+        return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
+      }
+    }
+
+    // We need to include the pages which are not processed yet.
+    //
     std::sort(page_ids_list.begin(), page_ids_list.end());
     page_ids_list.erase(page_ids_list.begin(), page_ids_list.begin() + this->largest_page_index_);
 
@@ -400,7 +400,6 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
                                      "for PageRecycler internal use only.";
   }
   const Slice<const PageId> page_ids = batt::as_slice(page_ids_list);
-
   Optional<slot_offset_type> sync_point = None;
 
   if (grant == nullptr) {
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index 00404e98..154f9185 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -215,7 +215,7 @@ class PageRecycler
   void recycle_task_main();
 
   bool is_page_recycling_allowed(const Slice<const PageId>& page_ids,
-                                 llfs::slot_offset_type offset_as_unique_identifier, i32 depth);
+                                 llfs::slot_offset_type offset_as_unique_identifier);
 
   // MUST be called only on the recycle task or the ctor.
   //

From dae089d99ad6fa10ba51a9426af3dd5ee6f09eb6 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Tue, 4 Feb 2025 11:47:18 -0500
Subject: [PATCH 24/29] made recycle_pages more modular

---
 src/llfs/page_recycler.cpp | 190 ++++++++++++++++++++-----------------
 src/llfs/page_recycler.hpp |  12 +++
 2 files changed, 117 insertions(+), 85 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index f9bdc9a0..03cbbad8 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -179,7 +179,7 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
   return std::unique_ptr<PageRecycler>{
       new PageRecycler(scheduler, std::string{name}, page_deleter, std::move(*recovered_log),
                        std::move(latest_batch), std::move(state), visitor.largest_unique_offset(),
-                       visitor.page_index())};
+                       visitor.page_index() + 1)};
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -356,105 +356,67 @@ bool PageRecycler::is_page_recycling_allowed(const Slice<const PageId>& page_ids
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
-//
-StatusOr<slot_offset_type> PageRecycler::recycle_pages(
-    const Slice<const PageId>& page_ids_in, llfs::slot_offset_type offset_as_unique_identifier,
-    batt::Grant* grant, i32 depth)
+/** \brief This function handles page recycling for external callers. This is done by first checking
+ * if this request could be even be accepted (as it could be a retry from the volume-trimmer side).
+ * If accepted the pages are processed starting the last pending page (which very well could be the
+ * first page if its the first time).
+ */
+StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_0(
+    const Slice<const PageId>& page_ids_in, llfs::slot_offset_type offset_as_unique_identifier)
 {
-  BATT_CHECK_GE(depth, 0);
-
-  LLFS_VLOG(1) << "PageRecycler::recycle_pages(page_ids=" << batt::dump_range(page_ids_in) << "["
-               << page_ids_in.size() << "]"
-               << ", grant=[" << (grant ? grant->size() : usize{0}) << "], depth=" << depth << ") "
-               << this->name_ << BATT_INSPECT(offset_as_unique_identifier)
-               << BATT_INSPECT(this->largest_offset_as_unique_identifier_)
-               << BATT_INSPECT(this->last_page_recycle_offset_);
-
-  if (page_ids_in.empty()) {
-    return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
-  }
-
-  // Sort the pages if called by external callers.
+  constexpr i32 depth = 0;
+  // Check to make sure request was never seen before.
   //
   std::vector<PageId> page_ids_list(page_ids_in.begin(), page_ids_in.end());
-  if (depth == 0) {
-    // Check to make sure request was never seen before.
-    //
-    {
-      auto locked_state = this->state_.lock();
-      if (!is_page_recycling_allowed(page_ids_in, offset_as_unique_identifier)) {
-        return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
-      }
+  {
+    auto locked_state = this->state_.lock();
+    if (!is_page_recycling_allowed(page_ids_in, offset_as_unique_identifier)) {
+      return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
     }
+  }
 
-    // We need to include the pages which are not processed yet.
-    //
-    std::sort(page_ids_list.begin(), page_ids_list.end());
-    page_ids_list.erase(page_ids_list.begin(), page_ids_list.begin() + this->largest_page_index_);
+  // We need to include the pages which are not processed yet.
+  //
+  std::sort(page_ids_list.begin(), page_ids_list.end());
+  page_ids_list.erase(page_ids_list.begin(), page_ids_list.begin() + this->largest_page_index_);
 
-    LLFS_VLOG(1) << "Sorted slice: " << BATT_INSPECT(page_ids_list.size())
-                 << BATT_INSPECT(this->largest_page_index_);
+  LLFS_VLOG(1) << "Sorted slice: " << BATT_INSPECT(page_ids_list.size())
+               << BATT_INSPECT(this->largest_page_index_);
 
-    BATT_CHECK_EQ(grant, nullptr) << "External callers to `PageRecycler::recycle_pages` should "
-                                     "specify depth == 0 and grant == nullptr; other values are "
-                                     "for PageRecycler internal use only.";
-  }
   const Slice<const PageId> page_ids = batt::as_slice(page_ids_list);
   Optional<slot_offset_type> sync_point = None;
+  const PageRecyclerOptions& options = this->state_.no_lock().options;
 
-  if (grant == nullptr) {
-    BATT_CHECK_EQ(depth, 0u);
-
-    const PageRecyclerOptions& options = this->state_.no_lock().options;
-
-    for (PageId page_id : page_ids) {
-      StatusOr<batt::Grant> local_grant = [&] {
-        const usize needed_size = options.insert_grant_size();
-
-        BATT_DEBUG_INFO(
-            "[PageRecycler::recycle_page] waiting for log space; "
-            << BATT_INSPECT(needed_size) << BATT_INSPECT(this->insert_grant_pool_.size())
-            << BATT_INSPECT(depth) << BATT_INSPECT(options.total_grant_size_for_depth(depth))
-            << BATT_INSPECT(this->slot_writer_.in_use_size())
-            << BATT_INSPECT(this->slot_writer_.log_capacity())
-            << BATT_INSPECT(options.recycle_task_target()) << BATT_INSPECT(page_ids.size())
-            << BATT_INSPECT(this->last_page_recycle_offset_));
-
-        return this->insert_grant_pool_.spend(needed_size, batt::WaitForResource::kTrue);
-      }();
-      if (!local_grant.ok()) {
-        if (!suppress_log_output_for_test() && !this->stop_requested_.load()) {
-          LLFS_LOG_WARNING() << "PageRecycler::recycle_pages failed; not enough log buffer space";
-        }
-      }
-      BATT_REQUIRE_OK(local_grant);
-      {
-        auto locked_state = this->state_.lock();
-        // Writing to recycler log.
-        StatusOr<slot_offset_type> append_slot = this->insert_to_log(
-            *local_grant, page_id, depth, this->largest_offset_as_unique_identifier_,
-            this->largest_page_index_ + 1, locked_state);
-        BATT_REQUIRE_OK(append_slot);
-
-        ++this->largest_page_index_;
-
-        this->last_page_recycle_offset_ = *append_slot;
-
-        clamp_min_slot(&sync_point, *append_slot);
+  for (PageId page_id : page_ids) {
+    StatusOr<batt::Grant> local_grant = [&] {
+      const usize needed_size = options.insert_grant_size();
+
+      BATT_DEBUG_INFO("[PageRecycler::recycle_page_depth_0] waiting for log space; "
+                      << BATT_INSPECT(needed_size) << BATT_INSPECT(this->insert_grant_pool_.size())
+                      << BATT_INSPECT(options.total_grant_size_for_depth(depth))
+                      << BATT_INSPECT(this->slot_writer_.in_use_size())
+                      << BATT_INSPECT(this->slot_writer_.log_capacity())
+                      << BATT_INSPECT(options.recycle_task_target())
+                      << BATT_INSPECT(page_ids.size())
+                      << BATT_INSPECT(this->last_page_recycle_offset_));
+
+      return this->insert_grant_pool_.spend(needed_size, batt::WaitForResource::kTrue);
+    }();
+    if (!local_grant.ok()) {
+      if (!suppress_log_output_for_test() && !this->stop_requested_.load()) {
+        LLFS_LOG_WARNING() << "PageRecycler::recycle_pages failed; not enough log buffer space";
       }
     }
-  } else {
-    BATT_CHECK_LT(depth, (i32)kMaxPageRefDepth) << BATT_INSPECT_RANGE(page_ids);
-
-    auto locked_state = this->state_.lock();
-
-    for (PageId page_id : page_ids) {
+    BATT_REQUIRE_OK(local_grant);
+    {
+      auto locked_state = this->state_.lock();
       // Writing to recycler log.
-      StatusOr<slot_offset_type> append_slot =
-          this->insert_to_log(*grant, page_id, depth, this->largest_offset_as_unique_identifier_,
-                              this->largest_page_index_ + 1, locked_state);
+      StatusOr<slot_offset_type> append_slot = this->insert_to_log(
+          *local_grant, page_id, depth, this->largest_offset_as_unique_identifier_,
+          this->largest_page_index_, locked_state);
       BATT_REQUIRE_OK(append_slot);
 
+      ++this->largest_page_index_;
       this->last_page_recycle_offset_ = *append_slot;
 
       clamp_min_slot(&sync_point, *append_slot);
@@ -465,6 +427,64 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
   return *sync_point;
 }
 
+//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
+/** \brief This function handles page recycling when depth is non-zero (internal calls).
+ */
+StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_n(const Slice<const PageId>& page_ids,
+                                                               batt::Grant* grant, const i32 depth)
+{
+  Optional<slot_offset_type> sync_point = None;
+  BATT_CHECK_LT(depth, (i32)kMaxPageRefDepth) << BATT_INSPECT_RANGE(page_ids);
+
+  auto locked_state = this->state_.lock();
+  for (PageId page_id : page_ids) {
+    // Writing to recycler log.
+    StatusOr<slot_offset_type> append_slot =
+        this->insert_to_log(*grant, page_id, depth, this->largest_offset_as_unique_identifier_,
+                            this->largest_page_index_, locked_state);
+    BATT_REQUIRE_OK(append_slot);
+
+    this->last_page_recycle_offset_ = *append_slot;
+
+    clamp_min_slot(&sync_point, *append_slot);
+  }
+
+  BATT_CHECK(sync_point);
+  return *sync_point;
+}
+
+//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
+//
+StatusOr<slot_offset_type> PageRecycler::recycle_pages(
+    const Slice<const PageId>& page_ids, llfs::slot_offset_type offset_as_unique_identifier,
+    batt::Grant* grant, i32 depth)
+{
+  BATT_CHECK_GE(depth, 0);
+
+  LLFS_VLOG(1) << "PageRecycler::recycle_pages(page_ids=" << batt::dump_range(page_ids) << "["
+               << page_ids.size() << "]"
+               << ", grant=[" << (grant ? grant->size() : usize{0}) << "], depth=" << depth << ") "
+               << this->name_ << BATT_INSPECT(offset_as_unique_identifier)
+               << BATT_INSPECT(this->largest_offset_as_unique_identifier_)
+               << BATT_INSPECT(this->last_page_recycle_offset_);
+
+  if (page_ids.empty()) {
+    return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
+  }
+
+  // Handle calls from external callers first (depth==0).
+  //
+  if (depth == 0) {
+    BATT_CHECK_EQ(grant, nullptr) << "External callers to `PageRecycler::recycle_pages` should "
+                                     "specify depth == 0 and grant == nullptr; other values are "
+                                     "for PageRecycler internal use only.";
+
+    return recycle_pages_depth_0(page_ids, offset_as_unique_identifier);
+  } else {
+    return recycle_pages_depth_n(page_ids, grant, depth);
+  }
+}
+
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
 StatusOr<slot_offset_type> PageRecycler::recycle_page(PageId page_id,
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index 154f9185..daabb6f0 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -121,6 +121,18 @@ class PageRecycler
   StatusOr<slot_offset_type> recycle_page(PageId page_id, slot_offset_type unique_offset,
                                           batt::Grant* grant = nullptr, i32 depth = 0);
 
+  // Schedule a list of pages to be recycled when depth is ZERO. This is basically called by
+  // recycle_pages.
+  //
+  StatusOr<slot_offset_type> recycle_pages_depth_0(
+      const Slice<const PageId>& page_ids_in, llfs::slot_offset_type offset_as_unique_identifier);
+
+  // Schedule a list of pages to be recycled when depth is non-ZERO. This is basically called by
+  // recycle_pages.
+  //
+  StatusOr<slot_offset_type> recycle_pages_depth_n(const Slice<const PageId>& page_ids_in,
+                                                   batt::Grant* grant, const i32 depth);
+
   // Waits for the given slot to be flushed to durable storage.
   //
   Status await_flush(Optional<slot_offset_type> min_upper_bound);

From 56187deaae0c9bfad5e96cde481b57a17c9e89e0 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Tue, 4 Feb 2025 12:12:10 -0500
Subject: [PATCH 25/29] removed a typo

---
 src/llfs/page_allocator_state.hpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/llfs/page_allocator_state.hpp b/src/llfs/page_allocator_state.hpp
index f7b790ad..c1398919 100644
--- a/src/llfs/page_allocator_state.hpp
+++ b/src/llfs/page_allocator_state.hpp
@@ -242,7 +242,7 @@ class PageAllocatorState : public PageAllocatorStateNoLock
 
   // Returns the new ref count that will result from applying the delta to the passed obj.
   //
-  i32 calculate_new_ref_count(const PackedPageRefCount& deltaa) const;
+  i32 calculate_new_ref_count(const PackedPageRefCount& delta) const;
 
   /** \brief If the given ref count object has a positive ref count but *is* in the free pool, then
    * this function removes it; otherwise if the object has a zero ref count but is *not* in the free

From 6d706767ab2dde570f90faf66c737b7c2ffbd2bd Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Tue, 4 Feb 2025 13:18:00 -0500
Subject: [PATCH 26/29] updated variable name

---
 src/llfs/page_recycler.cpp                  | 23 ++++++++++-----------
 src/llfs/page_recycler.hpp                  |  2 +-
 src/llfs/page_recycler_recovery_visitor.cpp | 15 +++++++-------
 src/llfs/page_recycler_recovery_visitor.hpp |  4 ++--
 4 files changed, 21 insertions(+), 23 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index 03cbbad8..93c88927 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -178,7 +178,7 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 
   return std::unique_ptr<PageRecycler>{
       new PageRecycler(scheduler, std::string{name}, page_deleter, std::move(*recovered_log),
-                       std::move(latest_batch), std::move(state), visitor.largest_unique_offset(),
+                       std::move(latest_batch), std::move(state), visitor.volume_trim_offset(),
                        visitor.page_index() + 1)};
 }
 
@@ -203,7 +203,7 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
     , recycle_task_{}
     , metrics_{}
     , prepared_batch_{std::move(recovered_batch)}
-    , largest_offset_as_unique_identifier_{largest_offset_as_unique_identifier_init}
+    , volume_trim_slot_{largest_offset_as_unique_identifier_init}
     , largest_page_index_{page_index_init}
 {
   const PageRecyclerOptions& options = this->state_.no_lock().options;
@@ -334,16 +334,16 @@ bool PageRecycler::is_page_recycling_allowed(const Slice<const PageId>& page_ids
 {
   // If we got higher offset allow recycle.
   //
-  if (this->largest_offset_as_unique_identifier_ < offset_as_unique_identifier) {
+  if (this->volume_trim_slot_ < offset_as_unique_identifier) {
     // Update the largest unique identifier and index.
     //
-    this->largest_offset_as_unique_identifier_ = offset_as_unique_identifier;
+    this->volume_trim_slot_ = offset_as_unique_identifier;
     this->largest_page_index_ = 0;
     return true;
   }
   // If we got same offset but not all pages were processed last time then allow recycle.
   //
-  else if (this->largest_offset_as_unique_identifier_ == offset_as_unique_identifier &&
+  else if (this->volume_trim_slot_ == offset_as_unique_identifier &&
            this->largest_page_index_ < page_ids.size()) {
     return true;
   }
@@ -411,9 +411,9 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_0(
     {
       auto locked_state = this->state_.lock();
       // Writing to recycler log.
-      StatusOr<slot_offset_type> append_slot = this->insert_to_log(
-          *local_grant, page_id, depth, this->largest_offset_as_unique_identifier_,
-          this->largest_page_index_, locked_state);
+      StatusOr<slot_offset_type> append_slot =
+          this->insert_to_log(*local_grant, page_id, depth, this->volume_trim_slot_,
+                              this->largest_page_index_, locked_state);
       BATT_REQUIRE_OK(append_slot);
 
       ++this->largest_page_index_;
@@ -439,9 +439,8 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_n(const Slice<const
   auto locked_state = this->state_.lock();
   for (PageId page_id : page_ids) {
     // Writing to recycler log.
-    StatusOr<slot_offset_type> append_slot =
-        this->insert_to_log(*grant, page_id, depth, this->largest_offset_as_unique_identifier_,
-                            this->largest_page_index_, locked_state);
+    StatusOr<slot_offset_type> append_slot = this->insert_to_log(
+        *grant, page_id, depth, this->volume_trim_slot_, this->largest_page_index_, locked_state);
     BATT_REQUIRE_OK(append_slot);
 
     this->last_page_recycle_offset_ = *append_slot;
@@ -465,7 +464,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
                << page_ids.size() << "]"
                << ", grant=[" << (grant ? grant->size() : usize{0}) << "], depth=" << depth << ") "
                << this->name_ << BATT_INSPECT(offset_as_unique_identifier)
-               << BATT_INSPECT(this->largest_offset_as_unique_identifier_)
+               << BATT_INSPECT(this->volume_trim_slot_)
                << BATT_INSPECT(this->last_page_recycle_offset_);
 
   if (page_ids.empty()) {
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index daabb6f0..015848df 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -278,7 +278,7 @@ class PageRecycler
 
   Optional<slot_offset_type> latest_batch_upper_bound_;
 
-  slot_offset_type largest_offset_as_unique_identifier_;
+  slot_offset_type volume_trim_slot_;
 
   u16 largest_page_index_;
 
diff --git a/src/llfs/page_recycler_recovery_visitor.cpp b/src/llfs/page_recycler_recovery_visitor.cpp
index e482b501..af23376d 100644
--- a/src/llfs/page_recycler_recovery_visitor.cpp
+++ b/src/llfs/page_recycler_recovery_visitor.cpp
@@ -29,7 +29,7 @@ namespace llfs {
     , latest_batch_{}
     , recycler_uuid_{boost::uuids::random_generator{}()}
     , latest_info_refresh_slot_{}
-    , largest_offset_as_unique_identifier_{0}
+    , volume_trim_slot_{0}
     , largest_page_index_{0}
 {
   initialize_status_codes();
@@ -115,9 +115,9 @@ Optional<SlotRange> PageRecyclerRecoveryVisitor::latest_info_refresh_slot() cons
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
-slot_offset_type PageRecyclerRecoveryVisitor::largest_unique_offset() const
+slot_offset_type PageRecyclerRecoveryVisitor::volume_trim_offset() const
 {
-  return this->largest_offset_as_unique_identifier_;
+  return this->volume_trim_slot_;
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -133,8 +133,7 @@ Status PageRecyclerRecoveryVisitor::operator()(const SlotParse& slot,
                                                const PageToRecycle& recovered)
 {
   LLFS_VLOG(1) << "Recovered slot: " << slot.offset << " " << recovered
-               << BATT_INSPECT((bool)this->latest_batch_)
-               << BATT_INSPECT(this->largest_offset_as_unique_identifier_)
+               << BATT_INSPECT((bool)this->latest_batch_) << BATT_INSPECT(this->volume_trim_slot_)
                << BATT_INSPECT(this->largest_page_index_);
 
   // Add the new record, or verify that the existing one and the new one are equivalent.
@@ -162,10 +161,10 @@ Status PageRecyclerRecoveryVisitor::operator()(const SlotParse& slot,
   // Save the largest unique offset identifier. Note that if offsets are same then we need to only
   // update the largest page_index.
   //
-  if (this->largest_offset_as_unique_identifier_ < recovered.offset_as_unique_identifier) {
-    this->largest_offset_as_unique_identifier_ = recovered.offset_as_unique_identifier;
+  if (this->volume_trim_slot_ < recovered.offset_as_unique_identifier) {
+    this->volume_trim_slot_ = recovered.offset_as_unique_identifier;
     this->largest_page_index_ = recovered.page_index;
-  } else if (this->largest_offset_as_unique_identifier_ == recovered.offset_as_unique_identifier &&
+  } else if (this->volume_trim_slot_ == recovered.offset_as_unique_identifier &&
              this->largest_page_index_ < recovered.page_index) {
     this->largest_page_index_ = recovered.page_index;
   }
diff --git a/src/llfs/page_recycler_recovery_visitor.hpp b/src/llfs/page_recycler_recovery_visitor.hpp
index 375684fb..60c408f3 100644
--- a/src/llfs/page_recycler_recovery_visitor.hpp
+++ b/src/llfs/page_recycler_recovery_visitor.hpp
@@ -45,7 +45,7 @@ class PageRecyclerRecoveryVisitor
 
   Optional<SlotRange> latest_info_refresh_slot() const;
 
-  slot_offset_type largest_unique_offset() const;
+  slot_offset_type volume_trim_offset() const;
   u16 page_index() const;
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -85,7 +85,7 @@ class PageRecyclerRecoveryVisitor
 
   // This is to track largest unique_offset value during recovery.
   //
-  slot_offset_type largest_offset_as_unique_identifier_;
+  slot_offset_type volume_trim_slot_;
 
   // This tracks the largest page_index seen so far for a given offset.
   //

From 51642851ebcb233d0b5aefb2f5576061eacb38c9 Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Wed, 5 Feb 2025 10:40:58 -0500
Subject: [PATCH 27/29] part of review comment changes

---
 src/llfs/page_recycler.cpp                  | 48 ++++++++++-----------
 src/llfs/page_recycler.hpp                  | 30 ++++++-------
 src/llfs/page_recycler_events.cpp           |  4 +-
 src/llfs/page_recycler_events.hpp           | 19 ++++----
 src/llfs/page_recycler_recovery_visitor.cpp |  8 ++--
 src/llfs/page_recycler_recovery_visitor.hpp |  4 +-
 6 files changed, 53 insertions(+), 60 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index 93c88927..d62e1e70 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -188,8 +188,8 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
                            PageDeleter& page_deleter, std::unique_ptr<LogDevice>&& wal_device,
                            Optional<Batch>&& recovered_batch,
                            std::unique_ptr<PageRecycler::State>&& state,
-                           llfs::slot_offset_type largest_offset_as_unique_identifier_init,
-                           u16 page_index_init) noexcept
+                           llfs::slot_offset_type volume_trim_slot_init,
+                           u32 page_index_init) noexcept
     : scheduler_{scheduler}
     , name_{name}
     , page_deleter_{page_deleter}
@@ -203,7 +203,7 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
     , recycle_task_{}
     , metrics_{}
     , prepared_batch_{std::move(recovered_batch)}
-    , volume_trim_slot_{largest_offset_as_unique_identifier_init}
+    , volume_trim_slot_{volume_trim_slot_init}
     , largest_page_index_{page_index_init}
 {
   const PageRecyclerOptions& options = this->state_.no_lock().options;
@@ -329,21 +329,21 @@ void PageRecycler::join()
  * identifier check only for external calls.
  */
 bool PageRecycler::is_page_recycling_allowed(const Slice<const PageId>& page_ids,
-                                             llfs::slot_offset_type offset_as_unique_identifier)
+                                             llfs::slot_offset_type volume_trim_slot) noexcept
 
 {
   // If we got higher offset allow recycle.
   //
-  if (this->volume_trim_slot_ < offset_as_unique_identifier) {
+  if (this->volume_trim_slot_ < volume_trim_slot) {
     // Update the largest unique identifier and index.
     //
-    this->volume_trim_slot_ = offset_as_unique_identifier;
+    this->volume_trim_slot_ = volume_trim_slot;
     this->largest_page_index_ = 0;
     return true;
   }
   // If we got same offset but not all pages were processed last time then allow recycle.
   //
-  else if (this->volume_trim_slot_ == offset_as_unique_identifier &&
+  else if (this->volume_trim_slot_ == volume_trim_slot &&
            this->largest_page_index_ < page_ids.size()) {
     return true;
   }
@@ -356,13 +356,9 @@ bool PageRecycler::is_page_recycling_allowed(const Slice<const PageId>& page_ids
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
-/** \brief This function handles page recycling for external callers. This is done by first checking
- * if this request could be even be accepted (as it could be a retry from the volume-trimmer side).
- * If accepted the pages are processed starting the last pending page (which very well could be the
- * first page if its the first time).
- */
+//
 StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_0(
-    const Slice<const PageId>& page_ids_in, llfs::slot_offset_type offset_as_unique_identifier)
+    const Slice<const PageId>& page_ids_in, llfs::slot_offset_type volume_trim_slot) noexcept
 {
   constexpr i32 depth = 0;
   // Check to make sure request was never seen before.
@@ -370,7 +366,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_0(
   std::vector<PageId> page_ids_list(page_ids_in.begin(), page_ids_in.end());
   {
     auto locked_state = this->state_.lock();
-    if (!is_page_recycling_allowed(page_ids_in, offset_as_unique_identifier)) {
+    if (!is_page_recycling_allowed(page_ids_in, volume_trim_slot)) {
       return this->wal_device_->slot_range(LogReadMode::kDurable).upper_bound;
     }
   }
@@ -428,10 +424,10 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_0(
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
-/** \brief This function handles page recycling when depth is non-zero (internal calls).
- */
+//
 StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_n(const Slice<const PageId>& page_ids,
-                                                               batt::Grant* grant, const i32 depth)
+                                                               batt::Grant* grant,
+                                                               const i32 depth) noexcept
 {
   Optional<slot_offset_type> sync_point = None;
   BATT_CHECK_LT(depth, (i32)kMaxPageRefDepth) << BATT_INSPECT_RANGE(page_ids);
@@ -454,16 +450,16 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_n(const Slice<const
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
-StatusOr<slot_offset_type> PageRecycler::recycle_pages(
-    const Slice<const PageId>& page_ids, llfs::slot_offset_type offset_as_unique_identifier,
-    batt::Grant* grant, i32 depth)
+StatusOr<slot_offset_type> PageRecycler::recycle_pages(const Slice<const PageId>& page_ids,
+                                                       llfs::slot_offset_type volume_trim_slot,
+                                                       batt::Grant* grant, i32 depth) noexcept
 {
   BATT_CHECK_GE(depth, 0);
 
   LLFS_VLOG(1) << "PageRecycler::recycle_pages(page_ids=" << batt::dump_range(page_ids) << "["
                << page_ids.size() << "]"
                << ", grant=[" << (grant ? grant->size() : usize{0}) << "], depth=" << depth << ") "
-               << this->name_ << BATT_INSPECT(offset_as_unique_identifier)
+               << this->name_ << BATT_INSPECT(volume_trim_slot)
                << BATT_INSPECT(this->volume_trim_slot_)
                << BATT_INSPECT(this->last_page_recycle_offset_);
 
@@ -478,7 +474,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
                                      "specify depth == 0 and grant == nullptr; other values are "
                                      "for PageRecycler internal use only.";
 
-    return recycle_pages_depth_0(page_ids, offset_as_unique_identifier);
+    return recycle_pages_depth_0(page_ids, volume_trim_slot);
   } else {
     return recycle_pages_depth_n(page_ids, grant, depth);
   }
@@ -488,7 +484,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(
 //
 StatusOr<slot_offset_type> PageRecycler::recycle_page(PageId page_id,
                                                       slot_offset_type unique_offset,
-                                                      batt::Grant* grant, i32 depth)
+                                                      batt::Grant* grant, i32 depth) noexcept
 {
   std::array<PageId, 1> page_ids{page_id};
   return this->recycle_pages(batt::as_slice(page_ids), unique_offset, grant, depth);
@@ -497,8 +493,8 @@ StatusOr<slot_offset_type> PageRecycler::recycle_page(PageId page_id,
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
 StatusOr<slot_offset_type> PageRecycler::insert_to_log(
-    batt::Grant& grant, PageId page_id, i32 depth, slot_offset_type offset_as_unique_identifier,
-    u16 page_index, batt::Mutex<std::unique_ptr<State>>::Lock& locked_state)
+    batt::Grant& grant, PageId page_id, i32 depth, slot_offset_type volume_trim_slot,
+    u32 page_index, batt::Mutex<std::unique_ptr<State>>::Lock& locked_state)
 {
   BATT_CHECK(locked_state.is_held());
 
@@ -513,7 +509,7 @@ StatusOr<slot_offset_type> PageRecycler::insert_to_log(
               .refresh_slot = None,
               .batch_slot = None,
               .depth = depth,
-              .offset_as_unique_identifier = offset_as_unique_identifier,
+              .volume_trim_slot = volume_trim_slot,
               .page_index = page_index,
           },
           [&](const batt::SmallVecBase<PageToRecycle*>& to_append) -> StatusOr<slot_offset_type> {
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index 015848df..a5bb5c4d 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -114,24 +114,23 @@ class PageRecycler
   //
   StatusOr<slot_offset_type> recycle_pages(const Slice<const PageId>& page_ids,
                                            llfs::slot_offset_type unique_offset,
-                                           batt::Grant* grant = nullptr, i32 depth = 0);
+                                           batt::Grant* grant = nullptr, i32 depth = 0) noexcept;
 
   // Schedule a single page to be recycled.  \see recycle_pages
   //
   StatusOr<slot_offset_type> recycle_page(PageId page_id, slot_offset_type unique_offset,
-                                          batt::Grant* grant = nullptr, i32 depth = 0);
+                                          batt::Grant* grant = nullptr, i32 depth = 0) noexcept;
 
-  // Schedule a list of pages to be recycled when depth is ZERO. This is basically called by
-  // recycle_pages.
-  //
+  /** \brief Schedule a list of pages to be recycled when depth is ZERO (when called by external
+   * callers).
+   */
   StatusOr<slot_offset_type> recycle_pages_depth_0(
-      const Slice<const PageId>& page_ids_in, llfs::slot_offset_type offset_as_unique_identifier);
+      const Slice<const PageId>& page_ids_in, llfs::slot_offset_type volume_trim_slot) noexcept;
 
-  // Schedule a list of pages to be recycled when depth is non-ZERO. This is basically called by
-  // recycle_pages.
-  //
+  /** \brief Schedule a list of pages to be recycled when depth is non-ZERO.
+   */
   StatusOr<slot_offset_type> recycle_pages_depth_n(const Slice<const PageId>& page_ids_in,
-                                                   batt::Grant* grant, const i32 depth);
+                                                   batt::Grant* grant, const i32 depth) noexcept;
 
   // Waits for the given slot to be flushed to durable storage.
   //
@@ -219,23 +218,22 @@ class PageRecycler
   explicit PageRecycler(batt::TaskScheduler& scheduler, const std::string& name,
                         PageDeleter& page_deleter, std::unique_ptr<LogDevice>&& wal_device,
                         Optional<Batch>&& recovered_batch,
-                        std::unique_ptr<PageRecycler::State>&& state,
-                        u64 largest_offset_as_unique_identifier_init, u16 page_index_init) noexcept;
+                        std::unique_ptr<PageRecycler::State>&& state, u64 volume_trim_slot_init,
+                        u32 page_index_init) noexcept;
 
   void start_recycle_task();
 
   void recycle_task_main();
 
   bool is_page_recycling_allowed(const Slice<const PageId>& page_ids,
-                                 llfs::slot_offset_type offset_as_unique_identifier);
+                                 llfs::slot_offset_type volume_trim_slot) noexcept;
 
   // MUST be called only on the recycle task or the ctor.
   //
   void refresh_grants();
 
   StatusOr<slot_offset_type> insert_to_log(batt::Grant& grant, PageId page_id, i32 depth,
-                                           slot_offset_type offset_as_unique_identifier,
-                                           u16 page_index,
+                                           slot_offset_type volume_trim_slot, u32 page_index,
                                            batt::Mutex<std::unique_ptr<State>>::Lock& locked_state);
 
   StatusOr<Batch> prepare_batch(std::vector<PageToRecycle>&& to_recycle);
@@ -280,7 +278,7 @@ class PageRecycler
 
   slot_offset_type volume_trim_slot_;
 
-  u16 largest_page_index_;
+  u32 largest_page_index_;
 
   slot_offset_type last_page_recycle_offset_;
 };
diff --git a/src/llfs/page_recycler_events.cpp b/src/llfs/page_recycler_events.cpp
index f218d929..814212e0 100644
--- a/src/llfs/page_recycler_events.cpp
+++ b/src/llfs/page_recycler_events.cpp
@@ -19,8 +19,8 @@ std::ostream& operator<<(std::ostream& out, const PageToRecycle& t)
 {
   return out << "PageToRecycle{.page_id=" << t.page_id << ", .refresh_slot=" << t.refresh_slot
              << ", .batch_slot=" << t.batch_slot << ", .depth=" << t.depth
-             << ", offset_as_unique_identifier=" << t.offset_as_unique_identifier
-             << ", page_index=" << t.page_index << ",}";
+             << ", volume_trim_slot=" << t.volume_trim_slot << ", page_index=" << t.page_index
+             << ",}";
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
diff --git a/src/llfs/page_recycler_events.hpp b/src/llfs/page_recycler_events.hpp
index 2773ace2..32913b9c 100644
--- a/src/llfs/page_recycler_events.hpp
+++ b/src/llfs/page_recycler_events.hpp
@@ -41,11 +41,11 @@ struct PageToRecycle {
 
   // The offset given by volume trimmer.
   //
-  slot_offset_type offset_as_unique_identifier;
+  slot_offset_type volume_trim_slot;
 
   // This tracks the page index within recycle_pages request.
   //
-  u16 page_index;
+  u32 page_index;
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
@@ -56,7 +56,7 @@ struct PageToRecycle {
         .refresh_slot = None,
         .batch_slot = None,
         .depth = 0,
-        .offset_as_unique_identifier = 0,
+        .volume_trim_slot = 0,
         .page_index = 0,
     };
   }
@@ -142,11 +142,10 @@ struct PackedPageToRecycle {
 
   little_page_id_int page_id;
   little_u64 batch_slot;
-  u64 offset_as_unique_identifier;
-  little_i32 depth;
-  u16 page_index;
+  little_u64 volume_trim_slot;
+  little_u32 page_index;
+  little_i24 depth;
   u8 flags;
-  u8 reserved_[1];
 };
 
 BATT_STATIC_ASSERT_EQ(32, sizeof(PackedPageToRecycle));
@@ -169,14 +168,14 @@ inline bool pack_object_to(const PageToRecycle& from, PackedPageToRecycle* to, D
   to->page_id = from.page_id.int_value();
   to->depth = from.depth;
   to->flags = 0;
-  std::memset(&to->reserved_, 0, sizeof(PackedPageToRecycle::reserved_));
+  //std::memset(&to->reserved_, 0, sizeof(PackedPageToRecycle::reserved_));
   if (from.batch_slot) {
     to->flags |= PackedPageToRecycle::kHasBatchSlot;
     to->batch_slot = *from.batch_slot;
   } else {
     to->batch_slot = 0;
   }
-  to->offset_as_unique_identifier = from.offset_as_unique_identifier;
+  to->volume_trim_slot = from.volume_trim_slot;
   to->page_index = from.page_index;
   return true;
 }
@@ -193,7 +192,7 @@ inline StatusOr<PageToRecycle> unpack_object(const PackedPageToRecycle& packed,
         return None;
       }(),
       .depth = packed.depth,
-      .offset_as_unique_identifier = packed.offset_as_unique_identifier,
+      .volume_trim_slot = packed.volume_trim_slot,
       .page_index = packed.page_index,
   };
 }
diff --git a/src/llfs/page_recycler_recovery_visitor.cpp b/src/llfs/page_recycler_recovery_visitor.cpp
index af23376d..23c3e9c2 100644
--- a/src/llfs/page_recycler_recovery_visitor.cpp
+++ b/src/llfs/page_recycler_recovery_visitor.cpp
@@ -122,7 +122,7 @@ slot_offset_type PageRecyclerRecoveryVisitor::volume_trim_offset() const
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
-u16 PageRecyclerRecoveryVisitor::page_index() const
+u32 PageRecyclerRecoveryVisitor::page_index() const
 {
   return this->largest_page_index_;
 }
@@ -161,10 +161,10 @@ Status PageRecyclerRecoveryVisitor::operator()(const SlotParse& slot,
   // Save the largest unique offset identifier. Note that if offsets are same then we need to only
   // update the largest page_index.
   //
-  if (this->volume_trim_slot_ < recovered.offset_as_unique_identifier) {
-    this->volume_trim_slot_ = recovered.offset_as_unique_identifier;
+  if (this->volume_trim_slot_ < recovered.volume_trim_slot) {
+    this->volume_trim_slot_ = recovered.volume_trim_slot;
     this->largest_page_index_ = recovered.page_index;
-  } else if (this->volume_trim_slot_ == recovered.offset_as_unique_identifier &&
+  } else if (this->volume_trim_slot_ == recovered.volume_trim_slot &&
              this->largest_page_index_ < recovered.page_index) {
     this->largest_page_index_ = recovered.page_index;
   }
diff --git a/src/llfs/page_recycler_recovery_visitor.hpp b/src/llfs/page_recycler_recovery_visitor.hpp
index 60c408f3..f8f5e84c 100644
--- a/src/llfs/page_recycler_recovery_visitor.hpp
+++ b/src/llfs/page_recycler_recovery_visitor.hpp
@@ -46,7 +46,7 @@ class PageRecyclerRecoveryVisitor
   Optional<SlotRange> latest_info_refresh_slot() const;
 
   slot_offset_type volume_trim_offset() const;
-  u16 page_index() const;
+  u32 page_index() const;
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
@@ -89,7 +89,7 @@ class PageRecyclerRecoveryVisitor
 
   // This tracks the largest page_index seen so far for a given offset.
   //
-  u16 largest_page_index_;
+  u32 largest_page_index_;
 };
 
 }  // namespace llfs

From f9b088b409b090aa6f2e6eed767ba4a6908068cc Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Wed, 5 Feb 2025 16:21:14 -0500
Subject: [PATCH 28/29] part-2 of review comments

---
 src/llfs/page_recycler.cpp                  | 84 ++++++++++-----------
 src/llfs/page_recycler.hpp                  | 25 +++---
 src/llfs/page_recycler_events.cpp           |  4 +-
 src/llfs/page_recycler_events.hpp           | 42 +++++++----
 src/llfs/page_recycler_recovery_visitor.cpp | 27 ++-----
 src/llfs/page_recycler_recovery_visitor.hpp | 11 +--
 6 files changed, 96 insertions(+), 97 deletions(-)

diff --git a/src/llfs/page_recycler.cpp b/src/llfs/page_recycler.cpp
index d62e1e70..bd8683ea 100644
--- a/src/llfs/page_recycler.cpp
+++ b/src/llfs/page_recycler.cpp
@@ -81,9 +81,7 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
-/** \brief This function determins the bytes needed for bufferd pages in the log.
- *
- */
+//
 /*static*/ u64 PageRecycler::calculate_log_size_no_padding(
     const PageRecyclerOptions& options, Optional<PageCount> max_buffered_page_count)
 {
@@ -178,8 +176,7 @@ StatusOr<SlotRange> refresh_recycler_info_slot(TypedSlotWriter<PageRecycleEvent>
 
   return std::unique_ptr<PageRecycler>{
       new PageRecycler(scheduler, std::string{name}, page_deleter, std::move(*recovered_log),
-                       std::move(latest_batch), std::move(state), visitor.volume_trim_offset(),
-                       visitor.page_index() + 1)};
+                       std::move(latest_batch), std::move(state), visitor.volume_trim_slot_info())};
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -188,8 +185,7 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
                            PageDeleter& page_deleter, std::unique_ptr<LogDevice>&& wal_device,
                            Optional<Batch>&& recovered_batch,
                            std::unique_ptr<PageRecycler::State>&& state,
-                           llfs::slot_offset_type volume_trim_slot_init,
-                           u32 page_index_init) noexcept
+                           VolumeTrimSlotInfo volume_trim_slot_info) noexcept
     : scheduler_{scheduler}
     , name_{name}
     , page_deleter_{page_deleter}
@@ -203,8 +199,8 @@ PageRecycler::PageRecycler(batt::TaskScheduler& scheduler, const std::string& na
     , recycle_task_{}
     , metrics_{}
     , prepared_batch_{std::move(recovered_batch)}
-    , volume_trim_slot_{volume_trim_slot_init}
-    , largest_page_index_{page_index_init}
+    , volume_trim_slot_info_{volume_trim_slot_info}
+    , last_page_recycle_offset_{}
 {
   const PageRecyclerOptions& options = this->state_.no_lock().options;
 
@@ -325,29 +321,32 @@ void PageRecycler::join()
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
-/** \brief This check is to make sure this request was never seen before. Note that we do unique
- * identifier check only for external calls.
- */
+//
 bool PageRecycler::is_page_recycling_allowed(const Slice<const PageId>& page_ids,
-                                             llfs::slot_offset_type volume_trim_slot) noexcept
+                                             llfs::slot_offset_type volume_trim_slot)
 
 {
-  // If we got higher offset allow recycle.
+  VolumeTrimSlotInfo incoming_volume_trim_slot_info{volume_trim_slot,
+                                                    static_cast<u32>(page_ids.size())};
+
+  // If we got higher trim_slot allow recycle.
   //
-  if (this->volume_trim_slot_ < volume_trim_slot) {
-    // Update the largest unique identifier and index.
+  if (this->volume_trim_slot_info_ < incoming_volume_trim_slot_info) {
+    // If trim_slot matches then it's a partially executed request. Keep processing from the last
+    // pending page.
     //
-    this->volume_trim_slot_ = volume_trim_slot;
-    this->largest_page_index_ = 0;
-    return true;
-  }
-  // If we got same offset but not all pages were processed last time then allow recycle.
-  //
-  else if (this->volume_trim_slot_ == volume_trim_slot &&
-           this->largest_page_index_ < page_ids.size()) {
-    return true;
+    if (this->volume_trim_slot_info_.volume_trim_slot ==
+        incoming_volume_trim_slot_info.volume_trim_slot) {
+      return true;
+    }
+    // We got a new trim_slot altogether. Update internal trim_slot and reset page_index.
+    //
+    else {
+      this->volume_trim_slot_info_ = VolumeTrimSlotInfo{volume_trim_slot, 0};
+      return true;
+    }
   }
-  // Look like this is a repost of some old request thus update metric and do not allow recycle.
+  // We got a report of the same recycle_pages request. Don't allow.
   //
   else {
     this->global_metrics().page_id_deletion_reissue_count.fetch_add(1);
@@ -374,11 +373,11 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_0(
   // We need to include the pages which are not processed yet.
   //
   std::sort(page_ids_list.begin(), page_ids_list.end());
-  page_ids_list.erase(page_ids_list.begin(), page_ids_list.begin() + this->largest_page_index_);
+  page_ids_list.erase(page_ids_list.begin(),
+                      page_ids_list.begin() + this->volume_trim_slot_info_.page_index);
 
   LLFS_VLOG(1) << "Sorted slice: " << BATT_INSPECT(page_ids_list.size())
-               << BATT_INSPECT(this->largest_page_index_);
-
+               << BATT_INSPECT(this->volume_trim_slot_info_.page_index);
   const Slice<const PageId> page_ids = batt::as_slice(page_ids_list);
   Optional<slot_offset_type> sync_point = None;
   const PageRecyclerOptions& options = this->state_.no_lock().options;
@@ -407,12 +406,11 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_0(
     {
       auto locked_state = this->state_.lock();
       // Writing to recycler log.
-      StatusOr<slot_offset_type> append_slot =
-          this->insert_to_log(*local_grant, page_id, depth, this->volume_trim_slot_,
-                              this->largest_page_index_, locked_state);
+      StatusOr<slot_offset_type> append_slot = this->insert_to_log(
+          *local_grant, page_id, depth, this->volume_trim_slot_info_, locked_state);
       BATT_REQUIRE_OK(append_slot);
 
-      ++this->largest_page_index_;
+      ++this->volume_trim_slot_info_.page_index;
       this->last_page_recycle_offset_ = *append_slot;
 
       clamp_min_slot(&sync_point, *append_slot);
@@ -426,7 +424,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_0(
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
 StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_n(const Slice<const PageId>& page_ids,
-                                                               batt::Grant* grant,
+                                                               batt::Grant& grant,
                                                                const i32 depth) noexcept
 {
   Optional<slot_offset_type> sync_point = None;
@@ -435,8 +433,8 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages_depth_n(const Slice<const
   auto locked_state = this->state_.lock();
   for (PageId page_id : page_ids) {
     // Writing to recycler log.
-    StatusOr<slot_offset_type> append_slot = this->insert_to_log(
-        *grant, page_id, depth, this->volume_trim_slot_, this->largest_page_index_, locked_state);
+    StatusOr<slot_offset_type> append_slot =
+        this->insert_to_log(grant, page_id, depth, this->volume_trim_slot_info_, locked_state);
     BATT_REQUIRE_OK(append_slot);
 
     this->last_page_recycle_offset_ = *append_slot;
@@ -459,8 +457,8 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(const Slice<const PageId>
   LLFS_VLOG(1) << "PageRecycler::recycle_pages(page_ids=" << batt::dump_range(page_ids) << "["
                << page_ids.size() << "]"
                << ", grant=[" << (grant ? grant->size() : usize{0}) << "], depth=" << depth << ") "
-               << this->name_ << BATT_INSPECT(volume_trim_slot)
-               << BATT_INSPECT(this->volume_trim_slot_)
+               << this->name_ << BATT_INSPECT(this->volume_trim_slot_info_.volume_trim_slot)
+               << BATT_INSPECT(this->volume_trim_slot_info_.page_index)
                << BATT_INSPECT(this->last_page_recycle_offset_);
 
   if (page_ids.empty()) {
@@ -476,7 +474,7 @@ StatusOr<slot_offset_type> PageRecycler::recycle_pages(const Slice<const PageId>
 
     return recycle_pages_depth_0(page_ids, volume_trim_slot);
   } else {
-    return recycle_pages_depth_n(page_ids, grant, depth);
+    return recycle_pages_depth_n(page_ids, *grant, depth);
   }
 }
 
@@ -493,8 +491,8 @@ StatusOr<slot_offset_type> PageRecycler::recycle_page(PageId page_id,
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
 StatusOr<slot_offset_type> PageRecycler::insert_to_log(
-    batt::Grant& grant, PageId page_id, i32 depth, slot_offset_type volume_trim_slot,
-    u32 page_index, batt::Mutex<std::unique_ptr<State>>::Lock& locked_state)
+    batt::Grant& grant, PageId page_id, i32 depth, const VolumeTrimSlotInfo& volume_trim_slot_info,
+    batt::Mutex<std::unique_ptr<State>>::Lock& locked_state) noexcept
 {
   BATT_CHECK(locked_state.is_held());
 
@@ -509,8 +507,8 @@ StatusOr<slot_offset_type> PageRecycler::insert_to_log(
               .refresh_slot = None,
               .batch_slot = None,
               .depth = depth,
-              .volume_trim_slot = volume_trim_slot,
-              .page_index = page_index,
+              .volume_trim_slot_info{volume_trim_slot_info.volume_trim_slot,
+                                     volume_trim_slot_info.page_index + 1},
           },
           [&](const batt::SmallVecBase<PageToRecycle*>& to_append) -> StatusOr<slot_offset_type> {
             if (to_append.empty()) {
diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index a5bb5c4d..83876333 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -67,6 +67,9 @@ class PageRecycler
   static u64 calculate_log_size(const PageRecyclerOptions& options,
                                 Optional<PageCount> max_buffered_page_count = None);
 
+  /** \brief This function determins the bytes needed for bufferd pages in the log. Part of the
+   * calculation is based on number of buffered-pages and total-page-grant size.
+   */
   static u64 calculate_log_size_no_padding(const PageRecyclerOptions& options,
                                            Optional<PageCount> max_buffered_page_count = None);
 
@@ -130,7 +133,7 @@ class PageRecycler
   /** \brief Schedule a list of pages to be recycled when depth is non-ZERO.
    */
   StatusOr<slot_offset_type> recycle_pages_depth_n(const Slice<const PageId>& page_ids_in,
-                                                   batt::Grant* grant, const i32 depth) noexcept;
+                                                   batt::Grant& grant, const i32 depth) noexcept;
 
   // Waits for the given slot to be flushed to durable storage.
   //
@@ -218,23 +221,27 @@ class PageRecycler
   explicit PageRecycler(batt::TaskScheduler& scheduler, const std::string& name,
                         PageDeleter& page_deleter, std::unique_ptr<LogDevice>&& wal_device,
                         Optional<Batch>&& recovered_batch,
-                        std::unique_ptr<PageRecycler::State>&& state, u64 volume_trim_slot_init,
-                        u32 page_index_init) noexcept;
+                        std::unique_ptr<PageRecycler::State>&& state,
+                        VolumeTrimSlotInfo volume_trim_slot_info) noexcept;
 
   void start_recycle_task();
 
   void recycle_task_main();
 
+  /** \brief This check is to make sure this request was never seen before. Note that we do this
+   * check only for external calls.
+   */
   bool is_page_recycling_allowed(const Slice<const PageId>& page_ids,
-                                 llfs::slot_offset_type volume_trim_slot) noexcept;
+                                 llfs::slot_offset_type volume_trim_slot);
 
   // MUST be called only on the recycle task or the ctor.
   //
   void refresh_grants();
 
-  StatusOr<slot_offset_type> insert_to_log(batt::Grant& grant, PageId page_id, i32 depth,
-                                           slot_offset_type volume_trim_slot, u32 page_index,
-                                           batt::Mutex<std::unique_ptr<State>>::Lock& locked_state);
+  StatusOr<slot_offset_type> insert_to_log(
+      batt::Grant& grant, PageId page_id, i32 depth,
+      const VolumeTrimSlotInfo& volume_trim_slot_info,
+      batt::Mutex<std::unique_ptr<State>>::Lock& locked_state) noexcept;
 
   StatusOr<Batch> prepare_batch(std::vector<PageToRecycle>&& to_recycle);
 
@@ -276,9 +283,7 @@ class PageRecycler
 
   Optional<slot_offset_type> latest_batch_upper_bound_;
 
-  slot_offset_type volume_trim_slot_;
-
-  u32 largest_page_index_;
+  VolumeTrimSlotInfo volume_trim_slot_info_;
 
   slot_offset_type last_page_recycle_offset_;
 };
diff --git a/src/llfs/page_recycler_events.cpp b/src/llfs/page_recycler_events.cpp
index 814212e0..7140b1b8 100644
--- a/src/llfs/page_recycler_events.cpp
+++ b/src/llfs/page_recycler_events.cpp
@@ -19,8 +19,8 @@ std::ostream& operator<<(std::ostream& out, const PageToRecycle& t)
 {
   return out << "PageToRecycle{.page_id=" << t.page_id << ", .refresh_slot=" << t.refresh_slot
              << ", .batch_slot=" << t.batch_slot << ", .depth=" << t.depth
-             << ", volume_trim_slot=" << t.volume_trim_slot << ", page_index=" << t.page_index
-             << ",}";
+             << ", volume_trim_slot=" << t.volume_trim_slot_info.volume_trim_slot
+             << ", page_index=" << t.volume_trim_slot_info.page_index << ",}";
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
diff --git a/src/llfs/page_recycler_events.hpp b/src/llfs/page_recycler_events.hpp
index 32913b9c..e4936f0d 100644
--- a/src/llfs/page_recycler_events.hpp
+++ b/src/llfs/page_recycler_events.hpp
@@ -22,6 +22,29 @@
 
 namespace llfs {
 
+/** \brief This is to track volume_trim_slot and page_index values for PageRecycler so that it could
+ * detect a re-issue of recycle_pages request by an external caller (like volume-trimmer side).
+ * If a duplicate request was deetcted, recycler skips adding it to its internal work queue.
+ * 'volume_trim_slot' is an ever increasing value. 'page_index' is used to allow resume of a
+ * partially executed request.
+ */
+struct VolumeTrimSlotInfo {
+  // The offset given by volume trimmer.
+  //
+  slot_offset_type volume_trim_slot;
+
+  // This tracks the page index within recycle_pages request.
+  //
+  u32 page_index;
+
+  bool operator<(const VolumeTrimSlotInfo& other) const
+  {
+    return slot_less_than(this->volume_trim_slot, other.volume_trim_slot) ||
+           (this->volume_trim_slot == other.volume_trim_slot &&
+            this->page_index < other.page_index);
+  }
+};
+
 struct PageToRecycle {
   // Which page to recycle.
   //
@@ -39,13 +62,7 @@ struct PageToRecycle {
   //
   i32 depth;
 
-  // The offset given by volume trimmer.
-  //
-  slot_offset_type volume_trim_slot;
-
-  // This tracks the page index within recycle_pages request.
-  //
-  u32 page_index;
+  VolumeTrimSlotInfo volume_trim_slot_info;
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
@@ -56,8 +73,7 @@ struct PageToRecycle {
         .refresh_slot = None,
         .batch_slot = None,
         .depth = 0,
-        .volume_trim_slot = 0,
-        .page_index = 0,
+        .volume_trim_slot_info{0, 0},
     };
   }
 
@@ -168,15 +184,14 @@ inline bool pack_object_to(const PageToRecycle& from, PackedPageToRecycle* to, D
   to->page_id = from.page_id.int_value();
   to->depth = from.depth;
   to->flags = 0;
-  //std::memset(&to->reserved_, 0, sizeof(PackedPageToRecycle::reserved_));
   if (from.batch_slot) {
     to->flags |= PackedPageToRecycle::kHasBatchSlot;
     to->batch_slot = *from.batch_slot;
   } else {
     to->batch_slot = 0;
   }
-  to->volume_trim_slot = from.volume_trim_slot;
-  to->page_index = from.page_index;
+  to->volume_trim_slot = from.volume_trim_slot_info.volume_trim_slot;
+  to->page_index = from.volume_trim_slot_info.page_index;
   return true;
 }
 
@@ -192,8 +207,7 @@ inline StatusOr<PageToRecycle> unpack_object(const PackedPageToRecycle& packed,
         return None;
       }(),
       .depth = packed.depth,
-      .volume_trim_slot = packed.volume_trim_slot,
-      .page_index = packed.page_index,
+      .volume_trim_slot_info = VolumeTrimSlotInfo{packed.volume_trim_slot, packed.page_index},
   };
 }
 
diff --git a/src/llfs/page_recycler_recovery_visitor.cpp b/src/llfs/page_recycler_recovery_visitor.cpp
index 23c3e9c2..4efa1224 100644
--- a/src/llfs/page_recycler_recovery_visitor.cpp
+++ b/src/llfs/page_recycler_recovery_visitor.cpp
@@ -29,8 +29,7 @@ namespace llfs {
     , latest_batch_{}
     , recycler_uuid_{boost::uuids::random_generator{}()}
     , latest_info_refresh_slot_{}
-    , volume_trim_slot_{0}
-    , largest_page_index_{0}
+    , volume_trim_slot_info_{}
 {
   initialize_status_codes();
   LLFS_VLOG(1) << "PageRecyclerRecoveryVisitor()";
@@ -115,16 +114,9 @@ Optional<SlotRange> PageRecyclerRecoveryVisitor::latest_info_refresh_slot() cons
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
 //
-slot_offset_type PageRecyclerRecoveryVisitor::volume_trim_offset() const
+VolumeTrimSlotInfo PageRecyclerRecoveryVisitor::volume_trim_slot_info() const
 {
-  return this->volume_trim_slot_;
-}
-
-//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
-//
-u32 PageRecyclerRecoveryVisitor::page_index() const
-{
-  return this->largest_page_index_;
+  return this->volume_trim_slot_info_;
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -133,8 +125,9 @@ Status PageRecyclerRecoveryVisitor::operator()(const SlotParse& slot,
                                                const PageToRecycle& recovered)
 {
   LLFS_VLOG(1) << "Recovered slot: " << slot.offset << " " << recovered
-               << BATT_INSPECT((bool)this->latest_batch_) << BATT_INSPECT(this->volume_trim_slot_)
-               << BATT_INSPECT(this->largest_page_index_);
+               << BATT_INSPECT((bool)this->latest_batch_)
+               << BATT_INSPECT(this->volume_trim_slot_info_.volume_trim_slot)
+               << BATT_INSPECT(this->volume_trim_slot_info_.page_index);
 
   // Add the new record, or verify that the existing one and the new one are equivalent.
   //
@@ -161,12 +154,8 @@ Status PageRecyclerRecoveryVisitor::operator()(const SlotParse& slot,
   // Save the largest unique offset identifier. Note that if offsets are same then we need to only
   // update the largest page_index.
   //
-  if (this->volume_trim_slot_ < recovered.volume_trim_slot) {
-    this->volume_trim_slot_ = recovered.volume_trim_slot;
-    this->largest_page_index_ = recovered.page_index;
-  } else if (this->volume_trim_slot_ == recovered.volume_trim_slot &&
-             this->largest_page_index_ < recovered.page_index) {
-    this->largest_page_index_ = recovered.page_index;
+  if (this->volume_trim_slot_info_ < recovered.volume_trim_slot_info) {
+    this->volume_trim_slot_info_ = recovered.volume_trim_slot_info;
   }
 
   PageToRecycle& to_recycle = iter->second;
diff --git a/src/llfs/page_recycler_recovery_visitor.hpp b/src/llfs/page_recycler_recovery_visitor.hpp
index f8f5e84c..78c168c8 100644
--- a/src/llfs/page_recycler_recovery_visitor.hpp
+++ b/src/llfs/page_recycler_recovery_visitor.hpp
@@ -45,8 +45,7 @@ class PageRecyclerRecoveryVisitor
 
   Optional<SlotRange> latest_info_refresh_slot() const;
 
-  slot_offset_type volume_trim_offset() const;
-  u32 page_index() const;
+  VolumeTrimSlotInfo volume_trim_slot_info() const;
 
   //+++++++++++-+-+--+----- --- -- -  -  -   -
 
@@ -83,13 +82,7 @@ class PageRecyclerRecoveryVisitor
    */
   Optional<SlotRange> latest_info_refresh_slot_;
 
-  // This is to track largest unique_offset value during recovery.
-  //
-  slot_offset_type volume_trim_slot_;
-
-  // This tracks the largest page_index seen so far for a given offset.
-  //
-  u32 largest_page_index_;
+  VolumeTrimSlotInfo volume_trim_slot_info_;
 };
 
 }  // namespace llfs

From 5cc6b10cc80173a24b35aadb10e81854f7bbc2be Mon Sep 17 00:00:00 2001
From: Bhaskar Bora <bbora@ah-bbora-l.dhcp.mathworks.com>
Date: Thu, 13 Feb 2025 19:02:26 -0500
Subject: [PATCH 29/29] updated some comments, made recovery variable Option

---
 src/llfs/page_recycler.hpp                  | 15 ++++++++-------
 src/llfs/page_recycler_events.hpp           |  8 ++++----
 src/llfs/page_recycler_recovery_visitor.cpp | 18 +++++++++++-------
 src/llfs/page_recycler_recovery_visitor.hpp |  4 +++-
 4 files changed, 26 insertions(+), 19 deletions(-)

diff --git a/src/llfs/page_recycler.hpp b/src/llfs/page_recycler.hpp
index 83876333..3d28ba87 100644
--- a/src/llfs/page_recycler.hpp
+++ b/src/llfs/page_recycler.hpp
@@ -56,7 +56,7 @@ class PageRecycler
   };
 
   struct GlobalMetrics {
-    CountMetric<u32> page_id_deletion_reissue_count{0};
+    CountMetric<u64> page_id_deletion_reissue_count{0};
   };
   static GlobalMetrics& global_metrics();
 
@@ -67,12 +67,6 @@ class PageRecycler
   static u64 calculate_log_size(const PageRecyclerOptions& options,
                                 Optional<PageCount> max_buffered_page_count = None);
 
-  /** \brief This function determins the bytes needed for bufferd pages in the log. Part of the
-   * calculation is based on number of buffered-pages and total-page-grant size.
-   */
-  static u64 calculate_log_size_no_padding(const PageRecyclerOptions& options,
-                                           Optional<PageCount> max_buffered_page_count = None);
-
   static PageCount calculate_max_buffered_page_count(const PageRecyclerOptions& options,
                                                      u64 log_size);
 
@@ -234,6 +228,13 @@ class PageRecycler
   bool is_page_recycling_allowed(const Slice<const PageId>& page_ids,
                                  llfs::slot_offset_type volume_trim_slot);
 
+  /** \brief This function determins the minimum log size based on passed in options. The
+   * calculation is based on passed in number of pages that log needs to contain and a fixed space
+   * needed for recycler to handle its tasks.
+   */
+  static u64 calculate_log_size_no_padding(const PageRecyclerOptions& options,
+                                           Optional<PageCount> max_buffered_page_count);
+
   // MUST be called only on the recycle task or the ctor.
   //
   void refresh_grants();
diff --git a/src/llfs/page_recycler_events.hpp b/src/llfs/page_recycler_events.hpp
index e4936f0d..89d718f6 100644
--- a/src/llfs/page_recycler_events.hpp
+++ b/src/llfs/page_recycler_events.hpp
@@ -23,10 +23,10 @@
 namespace llfs {
 
 /** \brief This is to track volume_trim_slot and page_index values for PageRecycler so that it could
- * detect a re-issue of recycle_pages request by an external caller (like volume-trimmer side).
- * If a duplicate request was deetcted, recycler skips adding it to its internal work queue.
- * 'volume_trim_slot' is an ever increasing value. 'page_index' is used to allow resume of a
- * partially executed request.
+ * detect re-issue of recycle_pages request by an external caller (like volume-trimmer).
+ * If a duplicate request is detected, recycler skips adding it to it's internal work queue.
+ * 'volume_trim_slot' is an ever increasing value. 'page_index' is to used to resume a partially
+ * executed request.
  */
 struct VolumeTrimSlotInfo {
   // The offset given by volume trimmer.
diff --git a/src/llfs/page_recycler_recovery_visitor.cpp b/src/llfs/page_recycler_recovery_visitor.cpp
index 4efa1224..fb5a7f73 100644
--- a/src/llfs/page_recycler_recovery_visitor.cpp
+++ b/src/llfs/page_recycler_recovery_visitor.cpp
@@ -29,7 +29,6 @@ namespace llfs {
     , latest_batch_{}
     , recycler_uuid_{boost::uuids::random_generator{}()}
     , latest_info_refresh_slot_{}
-    , volume_trim_slot_info_{}
 {
   initialize_status_codes();
   LLFS_VLOG(1) << "PageRecyclerRecoveryVisitor()";
@@ -116,7 +115,10 @@ Optional<SlotRange> PageRecyclerRecoveryVisitor::latest_info_refresh_slot() cons
 //
 VolumeTrimSlotInfo PageRecyclerRecoveryVisitor::volume_trim_slot_info() const
 {
-  return this->volume_trim_slot_info_;
+  if (this->volume_trim_slot_info_) {
+    return *this->volume_trim_slot_info_;
+  }
+  return VolumeTrimSlotInfo{};
 }
 
 //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- -  -  -   -
@@ -126,8 +128,8 @@ Status PageRecyclerRecoveryVisitor::operator()(const SlotParse& slot,
 {
   LLFS_VLOG(1) << "Recovered slot: " << slot.offset << " " << recovered
                << BATT_INSPECT((bool)this->latest_batch_)
-               << BATT_INSPECT(this->volume_trim_slot_info_.volume_trim_slot)
-               << BATT_INSPECT(this->volume_trim_slot_info_.page_index);
+               << BATT_INSPECT(this->volume_trim_slot_info_->volume_trim_slot)
+               << BATT_INSPECT(this->volume_trim_slot_info_->page_index);
 
   // Add the new record, or verify that the existing one and the new one are equivalent.
   //
@@ -151,10 +153,12 @@ Status PageRecyclerRecoveryVisitor::operator()(const SlotParse& slot,
     existing_record.batch_slot = recovered.batch_slot;
   }
 
-  // Save the largest unique offset identifier. Note that if offsets are same then we need to only
-  // update the largest page_index.
+  // Save the largest unique offset identifier. If it's the first offset we read from log then just
+  // initialize 'volume_trim_slot_info_'.
   //
-  if (this->volume_trim_slot_info_ < recovered.volume_trim_slot_info) {
+  if (!this->volume_trim_slot_info_) {
+    this->volume_trim_slot_info_ = recovered.volume_trim_slot_info;
+  } else if (*this->volume_trim_slot_info_ < recovered.volume_trim_slot_info) {
     this->volume_trim_slot_info_ = recovered.volume_trim_slot_info;
   }
 
diff --git a/src/llfs/page_recycler_recovery_visitor.hpp b/src/llfs/page_recycler_recovery_visitor.hpp
index 78c168c8..83096e95 100644
--- a/src/llfs/page_recycler_recovery_visitor.hpp
+++ b/src/llfs/page_recycler_recovery_visitor.hpp
@@ -82,7 +82,9 @@ class PageRecyclerRecoveryVisitor
    */
   Optional<SlotRange> latest_info_refresh_slot_;
 
-  VolumeTrimSlotInfo volume_trim_slot_info_;
+  /** \brief Tracks the highest volume-trim-slot during recovery.
+   */
+  Optional<VolumeTrimSlotInfo> volume_trim_slot_info_;
 };
 
 }  // namespace llfs