@@ -51,6 +51,7 @@ AllocationClass::AllocationClass(ClassId classId,
51
51
allocationSize_(allocSize),
52
52
slabAlloc_(s),
53
53
freedAllocations_{slabAlloc_.createSingleTierPtrCompressor <FreeAlloc>()} {
54
+ curAllocatedSlabs_ = allocatedSlabs_.size ();
54
55
checkState ();
55
56
}
56
57
@@ -87,6 +88,12 @@ void AllocationClass::checkState() const {
87
88
" Current allocation slab {} is not in allocated slabs list" ,
88
89
currSlab_));
89
90
}
91
+
92
+ if (curAllocatedSlabs_ != allocatedSlabs_.size ()) {
93
+ throw std::invalid_argument (folly::sformat (
94
+ " Mismatch in allocated slabs numbers"
95
+ ));
96
+ }
90
97
}
91
98
92
99
// TODO(stuclar): Add poolId to the metadata to be serialized when cache shuts
@@ -116,10 +123,12 @@ AllocationClass::AllocationClass(
116
123
freeSlabs_.push_back (slabAlloc_.getSlabForIdx (freeSlabIdx));
117
124
}
118
125
126
+ curAllocatedSlabs_ = allocatedSlabs_.size ();
119
127
checkState ();
120
128
}
121
129
122
130
void AllocationClass::addSlabLocked (Slab* slab) {
131
+ curAllocatedSlabs_.fetch_add (1 , std::memory_order_relaxed);
123
132
canAllocate_ = true ;
124
133
auto header = slabAlloc_.getSlabHeader (slab);
125
134
header->classId = classId_;
@@ -168,6 +177,7 @@ void* AllocationClass::allocateLocked() {
168
177
}
169
178
170
179
XDCHECK (canAllocate_);
180
+ curAllocatedSize_.fetch_add (getAllocSize (), std::memory_order_relaxed);
171
181
172
182
// grab from the free list if possible.
173
183
if (!freedAllocations_.empty ()) {
@@ -270,6 +280,7 @@ SlabReleaseContext AllocationClass::startSlabRelease(
270
280
slab, getId ()));
271
281
}
272
282
*allocIt = allocatedSlabs_.back ();
283
+ curAllocatedSlabs_.fetch_sub (1 , std::memory_order_relaxed);
273
284
allocatedSlabs_.pop_back ();
274
285
275
286
// if slab is being carved currently, then update slabReleaseAllocMap
@@ -510,6 +521,7 @@ void AllocationClass::abortSlabRelease(const SlabReleaseContext& context) {
510
521
}
511
522
slabReleaseAllocMap_.erase (slabPtrVal);
512
523
allocatedSlabs_.push_back (const_cast <Slab*>(slab));
524
+ curAllocatedSlabs_.fetch_add (1 , std::memory_order_relaxed);
513
525
// restore the classId and allocSize
514
526
header->classId = classId_;
515
527
header->allocSize = allocationSize_;
@@ -660,6 +672,8 @@ void AllocationClass::free(void* memory) {
660
672
freedAllocations_.insert (*reinterpret_cast <FreeAlloc*>(memory));
661
673
canAllocate_ = true ;
662
674
});
675
+
676
+ curAllocatedSize_.fetch_sub (getAllocSize (), std::memory_order_relaxed);
663
677
}
664
678
665
679
serialization::AllocationClassObject AllocationClass::saveState () const {
@@ -722,3 +736,12 @@ std::vector<bool>& AllocationClass::getSlabReleaseAllocMapLocked(
722
736
const auto slabPtrVal = getSlabPtrValue (slab);
723
737
return slabReleaseAllocMap_.at (slabPtrVal);
724
738
}
739
+
740
+ double AllocationClass::approxFreePercentage () const {
741
+ if (getNumSlabs () == 0 ) {
742
+ return 100.0 ;
743
+ }
744
+
745
+ return 100.0 - 100.0 * static_cast <double >(curAllocatedSize_.load (std::memory_order_relaxed)) /
746
+ static_cast <double >(getNumSlabs () * Slab::kSize );
747
+ }
0 commit comments