@@ -275,7 +275,7 @@ impl BuildHasher for EntityHash {
275
275
/// try [`AHasher`] for a slower hash computation but fewer lookup conflicts.
276
276
#[ derive( Debug , Default ) ]
277
277
pub struct EntityHasher {
278
- hash : u64 ,
278
+ index : u32 ,
279
279
}
280
280
281
281
impl Hasher for EntityHasher {
@@ -285,6 +285,23 @@ impl Hasher for EntityHasher {
285
285
286
286
#[ inline]
287
287
fn write_u64 ( & mut self , i : u64 ) {
288
+ // We ignore the generation entirely. It's always functionally correct
289
+ // to omit things when hashing, so long as it's consistent, just a perf
290
+ // trade-off. This hasher is designed for "normal" cases, where nearly
291
+ // everything in the table is a live entity, meaning there are few
292
+ // generation conflicts. And thus it's overall faster to just ignore
293
+ // the generation during hashing, leaving it to the `Entity::eq` to
294
+ // confirm the generation matches -- just like `Entity::eq` checks that
295
+ // the index is actually the right one, since there's always the chance
296
+ // of a conflict in the index despite a good hash function.
297
+ //
298
+ // This truncation actually ends up with negative cost after optimization,
299
+ // as the optimizer will just skip loading the generation at all.
300
+ self . index = i as u32 ;
301
+ }
302
+
303
+ #[ inline]
304
+ fn finish ( & self ) -> u64 {
288
305
// SwissTable (and thus `hashbrown`) cares about two things from the hash:
289
306
// - H1: low bits (masked by `2ⁿ-1`) to pick the slot in which to store the item
290
307
// - H2: high 7 bits are used to SIMD optimize hash collision probing
@@ -302,16 +319,10 @@ impl Hasher for EntityHasher {
302
319
// <https://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences/>
303
320
// It loses no information because it has a modular inverse.
304
321
// (Specifically, `0x144c_bc89_u32 * 0x9e37_79b9_u32 == 1`.)
322
+ //
305
323
// The low 32 bits are just 1, to leave the entity id there unchanged.
306
324
const UPPER_PHI : u64 = 0x9e37_79b9_0000_0001 ;
307
- // This bit-masking is free, as the optimizer can just not load the generation.
308
- let id = i & 0xFFFF_FFFF ;
309
- self . hash = id. wrapping_mul ( UPPER_PHI ) ;
310
- }
311
-
312
- #[ inline]
313
- fn finish ( & self ) -> u64 {
314
- self . hash
325
+ u64:: from ( self . index ) . wrapping_mul ( UPPER_PHI )
315
326
}
316
327
}
317
328
0 commit comments