forked from pmem/CacheLib
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCacheAllocator.h
2590 lines (2240 loc) · 108 KB
/
CacheAllocator.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/CPortability.h>
#include <folly/Likely.h>
#include <folly/Random.h>
#include <folly/ScopeGuard.h>
#include <folly/logging/xlog.h>
#include <folly/synchronization/SanitizeThread.h>
#include <folly/hash/Hash.h>
#include <folly/container/F14Map.h>
#include <gtest/gtest.h>
#include <chrono>
#include <functional>
#include <memory>
#include <mutex>
#include <optional>
#include <stdexcept>
#include <utility>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <folly/Format.h>
#include <folly/Range.h>
#pragma GCC diagnostic pop
#include "cachelib/allocator/BackgroundMover.h"
#include "cachelib/allocator/CCacheManager.h"
#include "cachelib/allocator/Cache.h"
#include "cachelib/allocator/CacheAllocatorConfig.h"
#include "cachelib/allocator/CacheChainedItemIterator.h"
#include "cachelib/allocator/CacheItem.h"
#include "cachelib/allocator/CacheStats.h"
#include "cachelib/allocator/CacheStatsInternal.h"
#include "cachelib/allocator/CacheTraits.h"
#include "cachelib/allocator/CacheVersion.h"
#include "cachelib/allocator/ChainedAllocs.h"
#include "cachelib/allocator/ICompactCache.h"
#include "cachelib/allocator/KAllocation.h"
#include "cachelib/allocator/MemoryMonitor.h"
#include "cachelib/allocator/NvmAdmissionPolicy.h"
#include "cachelib/allocator/NvmCacheState.h"
#include "cachelib/allocator/PoolOptimizeStrategy.h"
#include "cachelib/allocator/PoolOptimizer.h"
#include "cachelib/allocator/PoolRebalancer.h"
#include "cachelib/allocator/PoolResizer.h"
#include "cachelib/allocator/PrivateMemoryManager.h"
#include "cachelib/allocator/ReadOnlySharedCacheView.h"
#include "cachelib/allocator/Reaper.h"
#include "cachelib/allocator/RebalanceStrategy.h"
#include "cachelib/allocator/Refcount.h"
#include "cachelib/allocator/TempShmMapping.h"
#include "cachelib/allocator/TlsActiveItemRing.h"
#include "cachelib/allocator/TypedHandle.h"
#include "cachelib/allocator/Util.h"
#include "cachelib/allocator/memory/MemoryAllocator.h"
#include "cachelib/allocator/memory/MemoryAllocatorStats.h"
#include "cachelib/allocator/memory/serialize/gen-cpp2/objects_types.h"
#include "cachelib/allocator/nvmcache/NvmCache.h"
#include "cachelib/allocator/serialize/gen-cpp2/objects_types.h"
#include "cachelib/common/Exceptions.h"
#include "cachelib/common/Hash.h"
#include "cachelib/common/Mutex.h"
#include "cachelib/common/PeriodicWorker.h"
#include "cachelib/common/Serialization.h"
#include "cachelib/common/Throttler.h"
#include "cachelib/common/Time.h"
#include "cachelib/common/Utils.h"
#include "cachelib/shm/ShmManager.h"
namespace facebook {
namespace cachelib {
template <typename AllocatorT>
class FbInternalRuntimeUpdateWrapper;
template <typename K, typename V, typename C>
class ReadOnlyMap;
namespace objcache2 {
template <typename AllocatorT>
class ObjectCache;
template <typename AllocatorT>
class ObjectCacheBase;
} // namespace objcache2
namespace cachebench {
template <typename Allocator>
class Cache;
namespace tests {
class CacheTest;
}
} // namespace cachebench
namespace tests {
template <typename AllocatorT>
class BaseAllocatorTest;
template <typename AllocatorT>
class AllocatorHitStatsTest;
template <typename AllocatorT>
class AllocatorResizeTest;
template <typename AllocatorT>
class FixedSizeArrayTest;
template <typename AllocatorT>
class MapTest;
class NvmCacheTest;
template <typename AllocatorT>
class PoolOptimizeStrategyTest;
class NvmAdmissionPolicyTest;
class CacheAllocatorTestWrapper;
class PersistenceCache;
} // namespace tests
namespace objcache {
template <typename CacheDescriptor, typename AllocatorRes>
class ObjectCache;
namespace test {
#define GET_CLASS_NAME(test_case_name, test_name) \
test_case_name##_##test_name##_Test
#define GET_DECORATED_CLASS_NAME(namespace, test_case_name, test_name) \
namespace ::GET_CLASS_NAME(test_case_name, test_name)
class GET_CLASS_NAME(ObjectCache, ObjectHandleInvalid);
} // namespace test
} // namespace objcache
// CacheAllocator can provide an interface to make Keyed Allocations(Item) and
// takes two templated types that control how the allocation is
// maintained(MMType aka MemoryManagementType) and accessed(AccessType). The
// cache allocator internally has an allocator that it interacts with to make
// allocations. All active allocations are put into the AccessContainer and
// the MMContainer for maintenance. When the cache is full, allocations are
// garbage collected by the implementation of MMType.
//
// The MMType is used for keeping track of allocations that are currently
// under the control of cache allocator. The MMType is required to provide a
// data structure MMContainer with a well defined interface. For example,
// check MMLru.h's Container (TODO use boost concepts to enforce the
// interface if possible and have it defined in a .h file). The MMType must
// provide a Hook type that will be used to instrument the resultant Item to
// be compatible for use with the MMContainer similar to a boost intrusive
// member hook. MMType::Hook must be sufficient for MMType::Container<T> to
// operate. The MMContainer is expected to implement interfaces to
// add/remove/evict/recordAccess a T& object into the container. This allows
// us to change/abstract away the memory management implementation of the
// cache from the other parts of the cache.
//
// Similar to the MMType, the AccessType is an intrusive data type that
// provides a container to access the keyed allocations. AccessType must
// provide an AccessType::Hook and AccessType::Container with
// find/insert/remove interface similar to a hash table.
//
template <typename CacheTrait>
class CacheAllocator : public CacheBase {
public:
using CacheT = CacheAllocator<CacheTrait>;
using MMType = typename CacheTrait::MMType;
using AccessType = typename CacheTrait::AccessType;
using Config = CacheAllocatorConfig<CacheT>;
// configs for the MMtype and AccessType.
using MMConfig = typename MMType::Config;
using AccessConfig = typename AccessType::Config;
using Item = CacheItem<CacheTrait>;
using ChainedItem = typename Item::ChainedItem;
// the holder for the item when we hand it to the caller. This ensures
// that the reference count is maintained when the caller is done with the
// item. The ReadHandle/WriteHandle provides a getMemory() and getKey()
// interface. The caller is free to use the result of these two as long as the
// handle is active/alive. Using the result of the above interfaces after
// destroying the ReadHandle/WriteHandle is UB. The ReadHandle/WriteHandle
// safely wraps a pointer to the "const Item"/"Item".
using ReadHandle = typename Item::ReadHandle;
using WriteHandle = typename Item::WriteHandle;
// Following is deprecated as of allocator version 17 and this line will be
// removed at a future date
// using ItemHandle = WriteHandle;
template <typename UserType,
typename Converter =
detail::DefaultUserTypeConverter<Item, UserType>>
using TypedHandle = TypedHandleImpl<Item, UserType, Converter>;
// TODO (sathya) some types take CacheT and some take CacheTrait. need to
// clean this up and come up with a consistent policy that is intuitive.
using ChainedItemIter = CacheChainedItemIterator<CacheT, const Item>;
using WritableChainedItemIter = CacheChainedItemIterator<CacheT, Item>;
using ChainedAllocs = CacheChainedAllocs<CacheT, ReadHandle, ChainedItemIter>;
using WritableChainedAllocs =
CacheChainedAllocs<CacheT, WriteHandle, WritableChainedItemIter>;
using Key = typename Item::Key;
using PoolIds = std::set<PoolId>;
using EventTracker = EventInterface<Key>;
// SampleItem is a wrapper for the CacheItem which is provided as the sample
// for uploading to Scuba (see ItemStatsExporter). It is guaranteed that the
// CacheItem is accessible as long as the SampleItem is around since the
// internal resource (e.g., ref counts, buffer) will be managed by the iobuf
class SampleItem {
public:
explicit SampleItem(bool fromNvm) : fromNvm_{fromNvm} {}
SampleItem(folly::IOBuf&& iobuf, const AllocInfo& allocInfo, bool fromNvm)
: iobuf_{std::move(iobuf)}, allocInfo_{allocInfo}, fromNvm_{fromNvm} {}
SampleItem(folly::IOBuf&& iobuf,
PoolId poolId,
ClassId classId,
size_t allocSize,
bool fromNvm)
: SampleItem(std::move(iobuf),
AllocInfo{poolId, classId, allocSize},
fromNvm) {}
const Item* operator->() const noexcept { return get(); }
const Item& operator*() const noexcept { return *get(); }
[[nodiscard]] const Item* get() const noexcept {
return reinterpret_cast<const Item*>(iobuf_.data());
}
[[nodiscard]] bool isValid() const { return !iobuf_.empty(); }
[[nodiscard]] bool isNvmItem() const { return fromNvm_; }
[[nodiscard]] const AllocInfo& getAllocInfo() const { return allocInfo_; }
private:
folly::IOBuf iobuf_;
AllocInfo allocInfo_{};
bool fromNvm_ = false;
};
// holds information about removal, used in RemoveCb
struct RemoveCbData {
// remove or eviction
RemoveContext context;
// item about to be freed back to allocator
Item& item;
// Iterator range pointing to chained allocs associated with @item
folly::Range<ChainedItemIter> chainedAllocs;
};
struct DestructorData {
DestructorData(DestructorContext ctx,
Item& it,
folly::Range<ChainedItemIter> iter,
PoolId id)
: context(ctx), item(it), chainedAllocs(iter), pool(id) {}
// helps to convert RemoveContext to DestructorContext,
// the context for RemoveCB is re-used to create DestructorData,
// this can be removed if RemoveCB is dropped.
DestructorData(RemoveContext ctx,
Item& it,
folly::Range<ChainedItemIter> iter,
PoolId id)
: item(it), chainedAllocs(iter), pool(id) {
if (ctx == RemoveContext::kEviction) {
context = DestructorContext::kEvictedFromRAM;
} else {
context = DestructorContext::kRemovedFromRAM;
}
}
// remove or eviction
DestructorContext context;
// item about to be freed back to allocator
// when the item is evicted/removed from NVM, the item is created on the
// heap, functions (e.g. CacheAllocator::getAllocInfo) that assumes item is
// located in cache slab doesn't work in such case.
// chained items must be iterated though @chainedAllocs.
// Other APIs used to access chained items are not compatible and should not
// be used.
Item& item;
// Iterator range pointing to chained allocs associated with @item
// when chained items are evicted/removed from NVM, items are created on the
// heap, functions (e.g. CacheAllocator::getAllocInfo) that assumes items
// are located in cache slab doesn't work in such case.
folly::Range<ChainedItemIter> chainedAllocs;
// the pool that this item is/was
PoolId pool;
};
// call back to execute when moving an item, this could be a simple memcpy
// or something more complex.
// An optional parentItem pointer is provided if the item being moved is a
// chained item.
using MoveCb =
std::function<void(Item& oldItem, Item& newItem, Item* parentItem)>;
// call back type that is executed when the cache item is removed
// (evicted / freed) from RAM, only items inserted into cache (not nascent)
// successfully are tracked
using RemoveCb = std::function<void(const RemoveCbData& data)>;
// the destructor being executed when the item is removed from cache (both RAM
// and NVM), only items inserted into cache (not nascent) successfully are
// tracked.
using ItemDestructor = std::function<void(const DestructorData& data)>;
using NvmCacheT = NvmCache<CacheT>;
using NvmCacheConfig = typename NvmCacheT::Config;
using DeleteTombStoneGuard = typename NvmCacheT::DeleteTombStoneGuard;
// Interface for the sync object provided by the user if movingSync is turned
// on.
// SyncObj is for CacheLib to obtain exclusive access to an item when
// it is moving it during slab release. Once held, the user should guarantee
// the item will not be accessed from another thread.
struct SyncObj {
virtual ~SyncObj() = default;
// Override this function to indicate success/failure of the sync obj,
// if user-supplied SyncObj can fail. e.g. if a lock can timeout.
virtual bool isValid() const { return true; }
};
using ChainedItemMovingSync = std::function<std::unique_ptr<SyncObj>(Key)>;
using AccessContainer = typename Item::AccessContainer;
using MMContainer = typename Item::MMContainer;
// serialization types
using MMSerializationType = typename MMType::SerializationType;
using MMSerializationConfigType = typename MMType::SerializationConfigType;
using MMSerializationTypeContainer =
typename MMType::SerializationTypeContainer;
using AccessSerializationType = typename AccessType::SerializationType;
using ShmManager = facebook::cachelib::ShmManager;
// The shared memory segments that can be persisted and re-attached to
enum SharedMemNewT { SharedMemNew };
// Attach to a persisted shared memory segment
enum SharedMemAttachT { SharedMemAttach };
// instantiates a cache allocator on heap memory
//
// @param config the configuration for the whole cache allocator
explicit CacheAllocator(Config config);
// instantiates a cache allocator on shared memory
//
// @param config the configuration for the whole cache allocator
CacheAllocator(SharedMemNewT, Config config);
// restore a cache allocator from shared memory
//
// @param config the configuration for the whole cache allocator
//
// @throw std::invalid_argument if cannot restore successful
CacheAllocator(SharedMemAttachT, Config config);
// Shared segments will be detached upon destruction
~CacheAllocator() override;
// create a new cache allocation. The allocation can be initialized
// appropriately and made accessible through insert or insertOrReplace.
// If the handle returned from this api is not passed on to
// insert/insertOrReplace, the allocation gets destroyed when the handle
// goes out of scope.
//
// @param id the pool id for the allocation that was previously
// created through addPool
// @param key the key for the allocation. This will be made a
// part of the Item and be available through getKey().
// @param size the size of the allocation, exclusive of the key
// size.
// @param ttlSecs Time To Live(second) for the item,
// default with 0 means no expiration time.
//
// @return the handle for the item or an invalid handle(nullptr) if the
// allocation failed. Allocation can fail if we are out of memory
// and can not find an eviction.
// @throw std::invalid_argument if the poolId is invalid or the size
// requested is invalid or if the key is invalid(key.size() == 0 or
// key.size() > 255)
WriteHandle allocate(PoolId id,
Key key,
uint32_t size,
uint32_t ttlSecs = 0,
uint32_t creationTime = 0);
// Allocate a chained item
//
// The resulting chained item does not have a parent item and
// will be freed once the handle is dropped
//
// The parent handle parameter here is mainly used to find the
// correct pool to allocate memory for this chained item
//
// @param parent handle to the cache item
// @param size the size for the chained allocation
//
// @return handle to the chained allocation
// @throw std::invalid_argument if the size requested is invalid or
// if the item is invalid
WriteHandle allocateChainedItem(const ReadHandle& parent, uint32_t size);
// Link a chained item to a parent item and mark this parent handle as having
// chained allocations.
// The parent handle is not reset (to become a null handle) so that the caller
// can continue using it as before calling this api.
//
// @param parent handle to the parent item
// @param child chained item that will be linked to the parent
//
// @throw std::invalid_argument if parent is nullptr
void addChainedItem(WriteHandle& parent, WriteHandle child);
// Pop the first chained item assocaited with this parent and unmark this
// parent handle as having chained allocations.
// The parent handle is not reset (to become a null handle) so that the caller
// can continue using it as before calling this api.
//
// @param parent handle to the parent item
//
// @return ChainedItem head if there exists one
// nullptr otherwise
WriteHandle popChainedItem(WriteHandle& parent);
// Return the key to the parent item.
//
// This API is racy with transferChainedAndReplace and also with moving during
// a slab release. To use this safely. user needs to synchronize calls to this
// API using their user level lock (in exclusive mode). The same user level
// lock should've been provided via movingSync to CacheLib if moving is
// enabled for slab rebalancing.
//
// @throw std::invalid_argument if chainedItem is not actually a chained item.
Key getParentKey(const Item& chainedItem);
// replace a chained item in the existing chain. old and new item must be
// chained items that have been allocated with the same parent that is
// passed in. oldItem must be in the chain and newItem must not be.
//
// Upon success a handle to the oldItem is returned for the caller
//
// @param oldItem the item we are replacing in the chain
// @param newItem the item we are replacing it with
// @param parent the parent for the chain
//
// @return handle to the oldItem on return.
//
// @throw std::invalid_argument if any of the pre-conditions fails
WriteHandle replaceChainedItem(Item& oldItem,
WriteHandle newItem,
Item& parent);
// Transfers the ownership of the chain from the current parent to the new
// parent and inserts the new parent into the cache. Parent will be unmarked
// as having chained allocations and its nvmCache will be invalidated. Parent
// will not be null after calling this API.
//
// Caller must synchronize with any modifications to the parent's chain and
// any calls to find() for the same key to ensure there are no more concurrent
// parent handle while doing this. While calling this method, the cache does
// not guarantee a consistent view for the key and the caller must not rely on
// this. The new parent and old parent must be allocations for the same key.
// New parent must also be an allocation that is not added to the cache.
//
//
// @param parent the current parent of the chain we want to transfer
// @param newParent the new parent for the chain
//
// @throw std::invalid_argument if the parent does not have chained item or
// incorrect state of chained item or if any of the pre-conditions
// are not met
void transferChainAndReplace(WriteHandle& parent, WriteHandle& newParent);
// Inserts the allocated handle into the AccessContainer, making it
// accessible for everyone. This needs to be the handle that the caller
// allocated through _allocate_. If this call fails, the allocation will be
// freed back when the handle gets out of scope in the caller.
//
// @param handle the handle for the allocation.
//
// @return true if the handle was successfully inserted into the hashtable
// and is now accessible to everyone. False if there was an error.
//
// @throw std::invalid_argument if the handle is already accessible.
bool insert(const WriteHandle& handle);
// Replaces the allocated handle into the AccessContainer, making it
// accessible for everyone. If an existing handle is already in the
// container, remove that handle. This needs to be the handle that the caller
// allocated through _allocate_. If this call fails, the allocation will be
// freed back when the handle gets out of scope in the caller.
//
// @param handle the handle for the allocation.
//
// @throw std::invalid_argument if the handle is already accessible.
// @throw cachelib::exception::RefcountOverflow if the item we are replacing
// is already out of refcounts.
// @return handle to the old item that had been replaced
WriteHandle insertOrReplace(const WriteHandle& handle);
// look up an item by its key across the nvm cache as well if enabled.
//
// @param key the key for lookup
//
// @return the read handle for the item or a handle to nullptr if the
// key does not exist.
ReadHandle find(Key key);
// Warning: this API is synchronous today with HybridCache. This means as
// opposed to find(), we will block on an item being read from
// flash until it is loaded into DRAM-cache. In find(), if an item
// is missing in dram, we will return a "not-ready" handle and
// user can choose to block or convert to folly::SemiFuture and
// process the item only when it becomes ready (loaded into DRAM).
// If blocking behavior is NOT what you want, a workaround is:
// auto readHandle = cache->find("my key");
// if (!readHandle.isReady()) {
// auto sf = std::move(readHandle)
// .toSemiFuture()
// .defer([] (auto readHandle)) {
// return std::move(readHandle).toWriteHandle();
// }
// }
//
// look up an item by its key across the nvm cache as well if enabled. Users
// should call this API only when they are going to mutate the item data.
//
// @param key the key for lookup
// @param isNvmInvalidate whether to do nvm invalidation;
// defaults to be true
//
// @return the write handle for the item or a handle to nullptr if the
// key does not exist.
WriteHandle findToWrite(Key key, bool doNvmInvalidation = true);
// look up an item by its key. This ignores the nvm cache and only does RAM
// lookup.
//
// @param key the key for lookup
//
// @return the read handle for the item or a handle to nullptr if the key
// does not exist.
FOLLY_ALWAYS_INLINE ReadHandle findFast(Key key);
// look up an item by its key. This ignores the nvm cache and only does RAM
// lookup. Users should call this API only when they are going to mutate the
// item data.
//
// @param key the key for lookup
// @param isNvmInvalidate whether to do nvm invalidation;
// defaults to be true
//
// @return the write handle for the item or a handle to nullptr if the
// key does not exist.
FOLLY_ALWAYS_INLINE WriteHandle
findFastToWrite(Key key, bool doNvmInvalidation = true);
// look up an item by its key. This ignores the nvm cache and only does RAM
// lookup. This API does not update the stats related to cache gets and misses
// nor mark the item as useful (see markUseful below).
//
// @param key the key for lookup
// @return the handle for the item or a handle to nullptr if the key does
// not exist.
FOLLY_ALWAYS_INLINE ReadHandle peek(Key key);
// Returns true if a key is potentially in cache. There is a non-zero chance
// the key does not exist in cache (e.g. hash collision in NvmCache). This
// check is meant to be synchronous and fast as we only check DRAM cache and
// in-memory index for NvmCache. Similar to peek, this does not indicate to
// cachelib you have looked up an item (i.e. no stats bump, no eviction queue
// promotion, etc.)
//
// @param key the key for lookup
// @return true if the key could exist, false otherwise
bool couldExistFast(Key key);
// Mark an item that was fetched through peek as useful. This is useful when
// users want to look into the cache and only mark items as useful when they
// inspect the contents of it.
//
// @param handle the item handle
// @param mode the mode of access for the lookup. defaults to
// AccessMode::kRead
void markUseful(const ReadHandle& handle, AccessMode mode);
using AccessIterator = typename AccessContainer::Iterator;
// Iterator interface for the cache. It guarantees that all keys that were
// present when the iteration started will be accessible unless they are
// removed. Keys that are removed/inserted during the lifetime of an
// iterator are not guaranteed to be either visited or not-visited.
// Adding/Removing from the hash table while the iterator is alive will not
// inivalidate any iterator or the element that the iterator points at
// currently. The iterator internally holds a Handle to the item and hence
// the keys that the iterator holds reference to, will not be evictable
// until the iterator is destroyed.
AccessIterator begin() { return accessContainer_->begin(); }
// return an iterator with a throttler for throttled iteration
AccessIterator begin(util::Throttler::Config config) {
return accessContainer_->begin(config);
}
AccessIterator end() { return accessContainer_->end(); }
enum class RemoveRes : uint8_t {
kSuccess,
kNotFoundInRam,
};
// removes the allocation corresponding to the key, if present in the hash
// table. The key will not be accessible through find() after this returns
// success. The allocation for the key will be recycled once all active
// Item handles are released.
//
// @param key the key for the allocation.
// @return kSuccess if the key exists and was successfully removed.
// kNotFoundInRam if the key was not present in memory (doesn't
// check nvm)
RemoveRes remove(Key key);
// remove the key that the iterator is pointing to. The element will
// not be accessible upon success. However, the elemenet will not actually be
// recycled until the iterator destroys the internal handle.
//
// @param it the iterator to the key to be destroyed.
// @return kSuccess if the element was still in the hashtable and it was
// successfully removed.
// kNotFoundInRam if the element the iterator was pointing to was
// deleted already.
RemoveRes remove(AccessIterator& it);
// removes the allocation corresponding to the handle. The allocation will
// be freed when all the existing handles are released.
//
// @param it item read handle
//
// @return kSuccess if the item exists and was successfully removed.
// kNotFoundInRam otherwise
//
// @throw std::invalid_argument if item handle is null
RemoveRes remove(const ReadHandle& it);
// view a read-only parent item as a chain of allocations if it has chained
// alloc. The returned chained-alloc is good to iterate upon, but will block
// any concurrent addChainedItem or popChainedItem for the same key until the
// ChainedAllocs object is released. This is ideal for use cases which do
// very brief operations on the chain of allocations.
//
// The ordering of the iteration for the chain is LIFO. Check
// CacheChainedAllocs.h for the API and usage.
//
// @param parent the parent allocation of the chain from a ReadHandle.
// @return read-only chained alloc view of the parent
//
// @throw std::invalid_argument if the parent does not have chained allocs
ChainedAllocs viewAsChainedAllocs(const ReadHandle& parent) {
return viewAsChainedAllocsT<ReadHandle, ChainedItemIter>(parent);
}
// view a writable parent item as a chain of allocations if it has chained
// alloc. The returned chained-alloc is good to iterate upon, but will block
// any concurrent addChainedItem or popChainedItem for the same key until the
// ChainedAllocs object is released. This is ideal for use cases which do
// very brief operations on the chain of allocations.
//
// The ordering of the iteration for the chain is LIFO. Check
// CacheChainedAllocs.h for the API and usage.
//
// @param parent the parent allocation of the chain from a WriteHandle.
// @return writable chained alloc view of the parent
//
// @throw std::invalid_argument if the parent does not have chained allocs
WritableChainedAllocs viewAsWritableChainedAllocs(const WriteHandle& parent) {
return viewAsChainedAllocsT<WriteHandle, WritableChainedItemIter>(parent);
}
// Returns the full usable size for this item
// This can be bigger than item.getSize()
//
// @param item reference to an item
//
// @return the full usable size for this item
uint32_t getUsableSize(const Item& item) const;
// gets the allocation class assigned to BG worker
auto getAssignedMemoryToBgWorker(size_t evictorId, size_t numWorkers, TierId tid);
bool shouldWakeupBgEvictor(TierId tid, PoolId pid, ClassId cid);
size_t backgroundWorkerId(TierId tid, PoolId pid, ClassId cid, size_t numWorkers);
// Get a random item from memory
// This is useful for profiling and sampling cachelib managed memory
//
// @return Valid SampleItem if an valid item is found
// Invalid SampleItem if the randomly chosen memory does not
// belong to an valid item
// Should be checked with SampleItem.isValid() before use
SampleItem getSampleItem();
// Convert a Read Handle to an IOBuf. The returned IOBuf gives a
// read-only view to the user. The item's ownership is retained by
// the IOBuf until its destruction.
//
// When the read handle has one or more chained items attached to it,
// user will also get a series of IOBufs (first of which is the Parent).
//
// **WARNING**: folly::IOBuf allows mutation to a cachelib item even when the
// item is read-only. User is responsible to ensure no mutation occurs (i.e.
// only const functions are called). If mutation is required, please use
// `convertToIOBufForWrite`.
//
// @param handle read handle that will transfer its ownership to an IOBuf
//
// @return an IOBuf that contains the value of the item.
// This IOBuf acts as a Read Handle, on destruction, it will
// properly decrement the refcount (to release the item).
// @throw std::invalid_argument if ReadHandle is nullptr
folly::IOBuf convertToIOBuf(ReadHandle handle) {
return convertToIOBufT<ReadHandle>(handle);
}
// Convert a Write Handle to an IOBuf. The returned IOBuf gives a
// writable view to the user. The item's ownership is retained by
// the IOBuf until its destruction.
//
// When the write handle has one or more chained items attached to it,
// user will also get a series of IOBufs (first of which is the Parent).
//
// @param handle write handle that will transfer its ownership to an IOBuf
//
// @return an IOBuf that contains the value of the item.
// This IOBuf acts as a Write Handle, on destruction, it will
// properly decrement the refcount (to release the item).
// @throw std::invalid_argument if WriteHandle is nullptr
folly::IOBuf convertToIOBufForWrite(WriteHandle handle) {
return convertToIOBufT<WriteHandle>(handle);
}
// TODO: When Read/Write Handles are ready, change this to allow
// const-only access to data manged by iobuf and offer a
// wrapAsWritableIOBuf() API.
//
// wrap an IOBuf over the data for an item. This IOBuf does not own the item
// and the caller is responsible for ensuring that the IOBuf is valid with
// the item lifetime. If the item has chained allocations, the chains are
// also wrapped into the iobuf as chained iobufs
//
// @param item the item to wrap around
//
// @return an IOBuf that contains the value of the item.
folly::IOBuf wrapAsIOBuf(const Item& item);
// creates a pool for the cache allocator with the corresponding name.
//
// @param name name of the pool
// @param size size of the pool
// @param allocSizes allocation class sizes, if empty, a default
// one from the memory allocator will be used
// @param config MMConfig for the MMContainer,
// default constructed if user doesn't supply one
// @param rebalanceStrategy rebalance strategy for the pool. If not set,
// the default one will be used.
// @param resizeStrategy resize strategy for the pool. If not set,
// the default one will be used.
// @param ensureProvisionable ensures that the size of the pool is enough
// to give one slab to each allocation class,
// false by default.
//
// @return a valid PoolId that the caller can use.
// @throw std::invalid_argument if the size is invalid or there is not
// enough space for creating the pool.
// std::logic_error if we have run out of pools.
PoolId addPool(folly::StringPiece name,
size_t size,
const std::set<uint32_t>& allocSizes = {},
MMConfig config = {},
std::shared_ptr<RebalanceStrategy> rebalanceStrategy = nullptr,
std::shared_ptr<RebalanceStrategy> resizeStrategy = nullptr,
bool ensureProvisionable = false);
// update an existing pool's config
//
// @param pid pool id for the pool to be updated
// @param config new config for the pool
//
// @throw std::invalid_argument if the poolId is invalid
void overridePoolConfig(TierId tid, PoolId pid, const MMConfig& config);
// update an existing pool's rebalance strategy
//
// @param pid pool id for the pool to be updated
// @param rebalanceStrategy new rebalance strategy for the pool
//
// @throw std::invalid_argument if the poolId is invalid
void overridePoolRebalanceStrategy(
PoolId pid, std::shared_ptr<RebalanceStrategy> rebalanceStrategy);
// update an existing pool's resize strategy
//
// @param pid pool id for the pool to be updated
// @param resizeStrategy new resize strategy for the pool
//
// @throw std::invalid_argument if the poolId is invalid
void overridePoolResizeStrategy(
PoolId pid, std::shared_ptr<RebalanceStrategy> resizeStrategy);
// update pool size optimization strategy for this cache
// @param optimizeStrategy new resize strategy
void overridePoolOptimizeStrategy(
std::shared_ptr<PoolOptimizeStrategy> optimizeStrategy);
/**
* PoolResizing can be done online while the cache allocator is being used
* to do allocations. Pools can be grown or shrunk using the following api.
* The actual resizing happens asynchronously and is controlled by the
* config parameters poolResizeIntervalSecs and poolResizeSlabsPerIter. The
* pool resizer releases slabs from pools that are over limit when the
* memory allocator is out of memory. If there is enough free memory
* available, the pool resizer does not do any resizing until the memory is
* exhausted and there is some pool that is over the limit
*/
// shrink the existing pool by _bytes_ .
// @param bytes the number of bytes to be taken away from the pool
// @return true if the operation succeeded. false if the size of the pool is
// smaller than _bytes_
// @throw std::invalid_argument if the poolId is invalid.
// TODO: should call shrinkPool for specific tier?
bool shrinkPool(PoolId pid, size_t bytes) {
return allocator_[currentTier()]->shrinkPool(pid, bytes);
}
// grow an existing pool by _bytes_. This will fail if there is no
// available memory across all the pools to provide for this pool
// @param bytes the number of bytes to be added to the pool.
// @return true if the pool was grown. false if the necessary number of
// bytes were not available.
// @throw std::invalid_argument if the poolId is invalid.
// TODO: should call growPool for specific tier?
bool growPool(PoolId pid, size_t bytes) {
return allocator_[currentTier()]->growPool(pid, bytes);
}
// move bytes from one pool to another. The source pool should be at least
// _bytes_ in size.
//
// @param src the pool to be sized down and giving the memory.
// @param dest the pool receiving the memory.
// @param bytes the number of bytes to move from src to dest.
// @param true if the resize succeeded. false if src does does not have
// correct size to do the transfer.
// @throw std::invalid_argument if src or dest is invalid pool
bool resizePools(PoolId src, PoolId dest, size_t bytes) override {
return allocator_[currentTier()]->resizePools(src, dest, bytes);
}
// Add a new compact cache with given name and size
//
// @param name name of the compact cache pool
// @param size size of the compact cache pool
// @param args All of the arguments in CompactCache afer allocator
// So the signature of addCompactCache is:
// addCompactCache(folly::StringPiece name,
// size_t size,
// RemoveCb removeCb,
// ReplaceCb replaceCb,
// ValidCb validCb,
// bool allowPromotions = true);
// addCompactCache(folly::StringPiece name,
// size_t size,
// bool allowPromotions = true);
//
// @return pointer to CompactCache instance of the template type
//
// @throw std::logic_error if compact cache is not enabled
// @throw std::invalid_argument There is a memory pool that has the same
// name as the compact cache we are adding or
// if there is no sufficient space to create
// a compact cache.
template <typename CCacheT, typename... Args>
CCacheT* addCompactCache(folly::StringPiece name,
size_t size,
Args&&... args);
// Attach a compact cache to the given pool after warm roll
//
// @param name name of the compact cache pool
// @param args All of the arguments in CompactCache afer allocator
// So the signature of attachCompactCache is:
// attachCompactCache(folly::StringPiece name,
// RemoveCb removeCb,
// ReplaceCb replaceCb,
// ValidCb validCb,
// bool allowPromotions = true);
// attachCompactCache(folly::StringPiece name,
// bool allowPromotions = true);
//
// @return pointer to CompactCache instance of the template type.
//
// @throw std::out_of_range if the pool does not exist
// @throw std::invalid_argument if the compact key/value size does not match
// from warm roll
template <typename CCacheT, typename... Args>
CCacheT* attachCompactCache(folly::StringPiece name, Args&&... args);
// Return the base iterface of an attached compact cache to pull out its
// stats. For non-active compact cache, this would throw
// std::invalid_argument.
const ICompactCache& getCompactCache(PoolId pid) const override;
// The enum value that indicates the CacheAllocator's shutdown status.
enum class ShutDownStatus {
kSuccess = 0, // Successfully persisted the DRAM cache, and the NvmCache if
// enabled.
kSavedOnlyDRAM, // Successfully persisted the DRAM cache only; NvmCache is
// enabled but failed to persist it.
kSavedOnlyNvmCache, // Successfully persisted the enabled NvM cache only;
// Failed to persist DRAM cache.
kFailed // Failed to persist both the DRAM cache and the enabled NvmCache.
};
// Persists the state of the cache allocator. On a successful shutdown,
// this cache allocator can be restored on restart.
//
// precondition: serialization must happen without any reader or writer
// present. Any modification of this object afterwards will result in an
// invalid, inconsistent state for the serialized data. There must not be
// any outstanding active handles
//
// @throw std::invalid_argument if the cache allocator isn't using shared
// memory
// @throw std::logic_error if any component is not restorable.
// @return A ShutDownStatus value indicating the result of the shutDown
// operation.
// kSuccess - successfully shut down and can be re-attached
// kFailed - failure due to outstanding active handle or error with
// cache dir
// kSavedOnlyDRAM and kSavedOnlyNvmCache - partial content saved
ShutDownStatus shutDown();
// No-op for workers that are already running. Typically user uses this in
// conjunction with `config.delayWorkerStart()` to avoid initialization
// ordering issues with user callback for cachelib's workers.
void startCacheWorkers();
// Functions that stop existing ones (if any) and create new workers
// start pool rebalancer
// @param interval the period this worker fires.
// @param strategy rebalancing strategy
// @param freeAllocThreshold threshold for free-alloc-slab for picking victim
// allocation class. free-alloc-slab is calculated by the number of free
// allocation divided by the number of allocations in one slab. Only
// allocation classes with a higher free-alloc-slab than the threshold would
// be picked as a victim.
//
//
bool startNewPoolRebalancer(std::chrono::milliseconds interval,
std::shared_ptr<RebalanceStrategy> strategy,
unsigned int freeAllocThreshold);
// start pool resizer
// @param interval the period this worker fires.
// @param poolResizeSlabsPerIter maximum number of slabs each pool may remove
// in resizing.
// @param strategy resizing strategy
bool startNewPoolResizer(std::chrono::milliseconds interval,
unsigned int poolResizeSlabsPerIter,
std::shared_ptr<RebalanceStrategy> strategy);