diff --git a/cppUtils/include/cpputils/Allocator.h b/cppUtils/include/cpputils/Allocator.h new file mode 100644 index 0000000..8b05996 --- /dev/null +++ b/cppUtils/include/cpputils/Allocator.h @@ -0,0 +1,130 @@ +// Copyright (c) 2018-Present Advanced Micro Devices, Inc. See LICENSE.TXT for terms. + +#ifndef INCLUDE_CPPUTILS_ALLOCATOR_H_ +#define INCLUDE_CPPUTILS_ALLOCATOR_H_ + +#include +#include + +#include +#include + +namespace cpputils { + +template +class AlignedAllocator : public std::allocator { + using Base = std::allocator; + + protected: + size_t mAlign; + + public: + explicit AlignedAllocator(size_t alignment) noexcept : Base(), mAlign(alignment) { + assert(mAlign & 3 == 0 && "lowest alignment must be at least 4 bytes"); + } + + T* allocate(size_t n) const noexcept { return aligned_alloc(mAlign, n); } + + T* allocate(size_t n) override { return const_cast(this)->allocate(n); } + + void deallocate(T* ptr, size_t n) const noexcept { free(ptr); } + + void deallocate(T* ptr, size_t n) override { + const_cast(this)->deallocate(ptr, n); + } + + size_t alignment(void) const noexcept { return mAlign; } +}; + + +/** + * generic resource pool that holds some number of objects of type T + * Objects are created via a static factory class that exposes create and + * destroy methods that work on one object of type T. + */ + +template +struct ResourcePool { + constexpr static const size_t BATCH_SIZE = BATCH_SIZE_T; + using value_type = T; + +private: + + using Pool = std::vector; + + Pool mResPool; + FactoryT mFactory; + + + void replenish() noexcept { + assert(mResPool.empty()); + + for (size_t i = 0; i < BATCH_SIZE; ++i) { + mResPool.emplace_back(mFactory.create()); + } + } + + void init() noexcept { + // TODO: add policy on whether to replenish at creation/init or not + replenish(); + } + + void destroy() noexcept { + for (const auto& r: mResPool) { + mFactory.destroy(r); + } + mResPool.clear(); + } + +public: + + explicit ResourcePool(FactoryT&& factory = FactoryT()) noexcept: + mResPool(), + mFactory(std::forward(factory)) + { + init(); + } + + ~ResourcePool() noexcept { + destroy(); + } + + T allocate() noexcept { + if (mResPool.empty()) { + replenish(); + } + + assert(!mResPool.empty()); + + auto obj = mResPool.back(); + mResPool.pop_back(); + return obj; + } + + void deallocate(const T& obj) { + mResPool.push_back(obj); + } +}; + +namespace impl { + template + struct StaticFactoryWrapper { + + T create() const noexcept { + return StaticFactoryT::create(); + } + + void destroy(const T& obj) const noexcept { + StaticFactoryT::destroy(obj); + } + }; +} + +template +using ResourcePoolStaticFactory = + ResourcePool, BATCH_SIZE_T>; + + +} // end namespace cpputils + +#endif // INCLUDE_CPPUTILS_ALLOCATOR_H_ diff --git a/cppUtils/include/cpputils/Heap.h b/cppUtils/include/cpputils/Heap.h new file mode 100644 index 0000000..257742f --- /dev/null +++ b/cppUtils/include/cpputils/Heap.h @@ -0,0 +1,125 @@ +// Copyright (c) 2018-Present Advanced Micro Devices, Inc. See LICENSE.TXT for terms. +#ifndef INCLUDE_CPPUTILS_HEAP_H_ +#define INCLUDE_CPPUTILS_HEAP_H_ + +#include +#include +#include + +#include + +namespace cpputils { + +template +struct MemBlockImpl { + + using pointer = PtrT; + PtrT mBeg = nullptr; + PtrT mEnd = nullptr; + + MemBlockImpl(const PtrT& b, const PtrT& e) : mBeg(b), mEnd(e) {} + + const PtrT& begin(void) const noexcept { return mBeg; } + const PtrT& end(void) const noexcept { return mEnd; } + + size_t size(void) const noexcept { + assert(mEnd >= mBeg); + return static_cast(mEnd - mBeg); + } + + ptrdiff_t distance(void) const noexcept { return (mEnd - mBeg); } + + bool empty(void) const noexcept { return mBeg == mEnd; } +}; + +using MemBlock = MemBlockImpl<>; + +/** + * FixedSizeHeap allocates blocks of a fixed size (mAllocSz). It requests a larger block + * from the BackingHeapT and chops it up into smaller blocks of mAllocSz + */ +template +class FixedSizeHeapImpl { + // FIXME(amber): Fixed size heap currently requests huge pages from the + // mBackingHeap but never frees them back until it's destroyed. Freed chunks go in the mFreeBlocks below. + // If we track what chunks belong to which page of BackingHeapT, we can free a + // huge page when all its chunks have been freed by FixedSizeHeap + + using BlockList = std::vector; + using pointer = char*; + + size_t mAllocSz; + BackingHeapT& mBackingHeap; + BlockList mFreeBlocks; + BlockList mBigBlocks; + + void replenish(void) { + assert(mFreeBlocks.empty()); + + MemBlock bigBlk = mBackingHeap.allocate(); + assert(bigBlk.size() >= mAllocSz && "must allocate at least 1 block of mAllocSz"); + + if (bigBlk.empty()) { + std::fprintf(stderr, "FixedSizeHeapImpl::replenish() : mBackingHeap.allocate() failed\n"); + std::abort(); + } + + mBigBlocks.emplace_back(bigBlk); + + auto* beg = bigBlk.begin(); + auto* end = bigBlk.end(); + for (auto* i = begin; (i+mAllocSz) <= end; i += mAllocSz) { + mFreeBlocks.emplace_back(i, i + mAllocSz); + } + } + + public: + FixedSizeHeapImpl(BackingHeapT& backHeap, const size_t allocSz) + : mAllocSz(allocSz), mBackingHeap(backHeap) { + // TODO(amber) FIXME other fields are default constructed? + // TODO(amber) FIXME: change assert because ALLOCATION_SIZE is undefined? + // assert(BackingHeapT::ALLOCATION_SIZE % mAllocSz == 0 && + // "allocation size must be a factor of CoreHeap::ALLOCATION_SIZE"); + } + + ~FixedSizeHeapImpl(void) noexcept { + for (const auto& bigBlk : mBigBlocks) { + mBackingHeap.deallocate(bigBlk); + } + mBigBlocks.clear(); + mFreeBlocks.clear(); + } + + size_t allocationSize(void) const noexcept { return mAllocSz; } + + /** + * allocate without alignment constraints + */ + MemBlock allocate() noexcept { + if (mFreeBlocks.empty()) { + replenish(); + } + + if (mFreeBlocks.empty()) { + assert(false && "allocation failed due to out of memory"); + return MemBlock{nullptr, nullptr}; + } + + MemBlock ret = mFreeBlocks.back(); + mFreeBlocks.pop_back(); + assert(ret.size() == mAllocSz && "allocation size mismatch. Bad blk"); + return ret; + } + + void deallocate(const MemBlock& blk) { + assert(blk.size() == mAllocSz && "attempting to free a bad blk"); + mFreeBlocks.push_back(blk); + } +}; + + + + +}// end namespace cpputils + +#endif// INCLUDE_CPPUTILS_HEAP_H_ diff --git a/cppUtils/include/cpputils/Stat.h b/cppUtils/include/cpputils/Stat.h new file mode 100644 index 0000000..ea4a315 --- /dev/null +++ b/cppUtils/include/cpputils/Stat.h @@ -0,0 +1,74 @@ +#ifndef INCLUDE_CPPUTILS_STAT_H_ +#define INCLUDE_CPPUTILS_STAT_H_ + +#include + +#include +#include + +namespace cpputils { + +template > +struct StatSeries { + + using container = Cont_tp; + + Cont_tp mSeries; + + void push_back(const T& val) { + mSeries.emplace_back(val); + } + + void clear() noexcept { + mSeries.clear(); + } + + size_t size(void) const { + return mSeries.size(); + } + + bool empty(void) const { + return mSeries.empty(); + } + + T min(void) const { + assert(!mSeries.empty()); + return *(std::min_element(mSeries.cbegin(), mSeries.cend())); + } + + T max(void) const { + assert(!mSeries.empty()); + return *(std::max_element(mSeries.cbegin(), mSeries.cend())); + } + + std::pair range(void) const { + assert(!mSeries.empty()); + auto p = std::minmax_element(mSeries.cbegin(), mSeries.cend()); + return std::make_pair(*p.first, *p.second); + } + + T sum(void) const { + return std::accumulate(mSeries.cbegin(), mSeries.cend(), T()); + } + + template + U avg(void) const { + return static_cast(sum()) / static_cast(size()); + } + + template + U stdDev(void) const { + U a = avg(); + U sumSqDev = U(); + for (const auto& i: mSeries) { + sumSqDev += (i - a) * (i - a); + } + return std::sqrt(sumSqDev); + } + +}; + + +}// end namespace cpputils + +#endif// INCLUDE_CPPUTILS_STAT_H_