diff --git a/src/containers/hashset.d b/src/containers/hashset.d index 1bfb6f2..6a7c99c 100644 --- a/src/containers/hashset.d +++ b/src/containers/hashset.d @@ -207,7 +207,6 @@ private: import containers.internal.node : shouldAddGCRange, FatNodeInfo; import containers.internal.storage_type : ContainerStorageType; import std.traits : isPointer; - import core.bitop : bsf; alias LengthType = ubyte; alias N = FatNodeInfo!(ItemNode.sizeof, 1, 64, LengthType.sizeof); diff --git a/src/containers/internal/hash.d b/src/containers/internal/hash.d index f28fd81..09fc4fc 100644 --- a/src/containers/internal/hash.d +++ b/src/containers/internal/hash.d @@ -44,17 +44,28 @@ else */ size_t hashToIndex(const size_t hash, const size_t len) pure nothrow @nogc @safe { - import core.bitop : bsr; - // This magic number taken from // https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/ // // It's amazing how much faster this makes the hash data structures // when faced with low quality hash functions. static if (size_t.sizeof == 8) - return (hash * 11_400_714_819_323_198_485UL) >>> (64 - bsr(len)); + enum ulong magic = 11_400_714_819_323_198_485UL; else - return (hash * 2_654_435_769U) >>> (32 - bsr(len)); + enum uint magic = 2_654_435_769U; + + if (len <= 1) + return 0; + version(LDC) + { + import ldc.intrinsics : llvm_cttz; + return (hash * magic) >>> ((size_t.sizeof * 8) - llvm_cttz(len, true)); + } + else + { + import core.bitop : bsf; + return (hash * magic) >>> ((size_t.sizeof * 8) - bsf(len)); + } } enum size_t DEFAULT_BUCKET_COUNT = 8; diff --git a/src/containers/unrolledlist.d b/src/containers/unrolledlist.d index 28ae3a5..ae13909 100644 --- a/src/containers/unrolledlist.d +++ b/src/containers/unrolledlist.d @@ -252,8 +252,16 @@ struct UnrolledList(T, Allocator = Mallocator, } body { - import containers.internal.backwards : bsf; - size_t index = bsf(_front.registry); + version (LDC) + { + import ldc.intrinsics : llvm_cttz; + size_t index = llvm_cttz(_front.registry, true); + } + else + { + import containers.internal.backwards : bsf; + size_t index = bsf(_front.registry); + } T r = _front.items[index]; _front.markUnused(index); _length--; @@ -302,9 +310,16 @@ struct UnrolledList(T, Allocator = Mallocator, } body { - import containers.internal.backwards : bsf; - - immutable size_t index = bsf(_front.registry); + version (LDC) + { + import ldc.intrinsics : llvm_cttz; + immutable index = llvm_cttz(_front.registry, true); + } + else + { + import containers.internal.backwards : bsf; + immutable index = bsf(_front.registry); + } return *(cast(typeof(return)*) &_front.items[index]); } @@ -377,13 +392,22 @@ struct UnrolledList(T, Allocator = Mallocator, this(inout(Node)* current) { - import containers.internal.backwards : bsf; import std.format:format; this.current = current; if (current !is null) { - index = bsf(current.registry); + version (LDC) + { + import ldc.intrinsics : llvm_cttz; + index = llvm_cttz(current.registry, true); + } + else + { + import containers.internal.backwards : bsf; + index = bsf(current.registry); + } + assert (index < nodeCapacity); } else @@ -490,12 +514,22 @@ private: static bool shouldMerge(const Node* first, const Node* second) { - import containers.internal.backwards : popcnt; - if (first is null || second is null) return false; - immutable f = popcnt(first.registry); - immutable s = popcnt(second.registry); + version (LDC) + { + import ldc.intrinsics : llvm_ctpop; + + immutable f = llvm_ctpop(first.registry); + immutable s = llvm_ctpop(second.registry); + } + else + { + import containers.internal.backwards : popcnt; + + immutable f = popcnt(first.registry); + immutable s = popcnt(second.registry); + } return f + s <= nodeCapacity; } @@ -508,7 +542,6 @@ private: } body { - import containers.internal.backwards : bsf; size_t i; ContainerStorageType!T[nodeCapacity] temp; foreach (j; 0 .. nodeCapacity) @@ -529,11 +562,20 @@ private: { size_t nextAvailableIndex() const nothrow pure @safe @nogc { - import containers.internal.backwards : bsf; static if (BookkeepingType.sizeof < uint.sizeof) - return bsf(~(cast(uint) registry)); + immutable uint notReg = ~(cast(uint) registry); + else + immutable uint notReg = ~registry; + version (LDC) + { + import ldc.intrinsics : llvm_cttz; + return llvm_cttz(notReg, true); + } else - return bsf(~registry); + { + import containers.internal.backwards : bsf; + return bsf(notReg); + } } void markUsed(size_t index) nothrow pure @safe @nogc