@@ -7,10 +7,11 @@ use std::ptr;
7
7
#[ cfg( test) ]
8
8
mod tests;
9
9
10
+ const ELEM_SIZE : usize = mem:: size_of :: < u64 > ( ) ;
10
11
const BUFFER_SIZE_ELEMS : usize = 8 ;
11
- const BUFFER_SIZE_BYTES : usize = BUFFER_SIZE_ELEMS * mem :: size_of :: < u64 > ( ) ;
12
+ const BUFFER_SIZE_BYTES : usize = BUFFER_SIZE_ELEMS * ELEM_SIZE ;
12
13
const BUFFER_SIZE_ELEMS_SPILL : usize = BUFFER_SIZE_ELEMS + 1 ;
13
- const BUFFER_SIZE_BYTES_SPILL : usize = BUFFER_SIZE_ELEMS_SPILL * mem :: size_of :: < u64 > ( ) ;
14
+ const BUFFER_SIZE_BYTES_SPILL : usize = BUFFER_SIZE_ELEMS_SPILL * ELEM_SIZE ;
14
15
const BUFFER_SPILL_INDEX : usize = BUFFER_SIZE_ELEMS_SPILL - 1 ;
15
16
16
17
#[ derive( Debug , Clone ) ]
@@ -54,15 +55,16 @@ macro_rules! compress {
54
55
} } ;
55
56
}
56
57
57
- // Copies up to 8 bytes from source to destination. This may be faster than
58
- // calling `ptr::copy_nonoverlapping` with an arbitrary count, since all of
59
- // the copies have fixed sizes and thus avoid calling memcpy.
58
+ // Copies up to 8 bytes from source to destination. This performs better than
59
+ // `ptr::copy_nonoverlapping` on microbenchmarks and may perform better on real
60
+ // workloads since all of the copies have fixed sizes and avoid calling memcpy.
60
61
#[ inline]
61
62
unsafe fn copy_nonoverlapping_small ( src : * const u8 , dst : * mut u8 , count : usize ) {
62
- debug_assert ! ( count <= 8 ) ;
63
+ const COUNT_MAX : usize = 8 ;
64
+ debug_assert ! ( count <= COUNT_MAX ) ;
63
65
64
- if count == 8 {
65
- ptr:: copy_nonoverlapping ( src, dst, 8 ) ;
66
+ if count == COUNT_MAX {
67
+ ptr:: copy_nonoverlapping ( src, dst, COUNT_MAX ) ;
66
68
return ;
67
69
}
68
70
@@ -85,7 +87,7 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
85
87
debug_assert_eq ! ( i, count) ;
86
88
}
87
89
88
- // Implementation
90
+ // # Implementation
89
91
//
90
92
// This implementation uses buffering to reduce the hashing cost for inputs
91
93
// consisting of many small integers. Buffering simplifies the integration of
@@ -99,10 +101,11 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
99
101
//
100
102
// When a write fills the buffer, a buffer processing function is invoked to
101
103
// hash all of the buffered input. The buffer processing functions are marked
102
- // #[inline(never)] so that they aren't inlined into the append functions, which
103
- // ensures the more frequently called append functions remain inlineable and
104
- // don't include register pushing/popping that would only be made necessary by
105
- // inclusion of the complex buffer processing path which uses those registers.
104
+ // `#[inline(never)]` so that they aren't inlined into the append functions,
105
+ // which ensures the more frequently called append functions remain inlineable
106
+ // and don't include register pushing/popping that would only be made necessary
107
+ // by inclusion of the complex buffer processing path which uses those
108
+ // registers.
106
109
//
107
110
// The buffer includes a "spill"--an extra element at the end--which simplifies
108
111
// the integer write buffer processing path. The value that fills the buffer can
@@ -118,7 +121,7 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
118
121
// efficiently implemented with an uninitialized buffer. On the other hand, an
119
122
// uninitialized buffer may become more important should a larger one be used.
120
123
//
121
- // Platform Dependence
124
+ // # Platform Dependence
122
125
//
123
126
// The SipHash algorithm operates on byte sequences. It parses the input stream
124
127
// as 8-byte little-endian integers. Therefore, given the same byte sequence, it
@@ -131,14 +134,14 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
131
134
// native size), or independent (by converting to a common size), supposing the
132
135
// values can be represented in 32 bits.
133
136
//
134
- // In order to make SipHasher128 consistent with SipHasher in libstd, we choose
135
- // to do the integer to byte sequence conversion in the platform-dependent way.
136
- // Clients can achieve (nearly) platform-independent hashing by widening `isize`
137
- // and `usize` integers to 64 bits on 32-bit systems and byte-swapping integers
138
- // on big-endian systems before passing them to the writing functions. This
139
- // causes the input byte sequence to look identical on big- and little- endian
140
- // systems (supposing `isize` and `usize` values can be represented in 32 bits),
141
- // which ensures platform-independent results.
137
+ // In order to make ` SipHasher128` consistent with ` SipHasher` in libstd, we
138
+ // choose to do the integer to byte sequence conversion in the platform-
139
+ // dependent way. Clients can achieve (nearly) platform-independent hashing by
140
+ // widening `isize` and `usize` integers to 64 bits on 32-bit systems and
141
+ // byte-swapping integers on big-endian systems before passing them to the
142
+ // writing functions. This causes the input byte sequence to look identical on
143
+ // big- and little- endian systems (supposing `isize` and `usize` values can be
144
+ // represented in 32 bits), which ensures platform-independent results.
142
145
impl SipHasher128 {
143
146
#[ inline]
144
147
pub fn new_with_keys ( key0 : u64 , key1 : u64 ) -> SipHasher128 {
@@ -156,7 +159,7 @@ impl SipHasher128 {
156
159
} ;
157
160
158
161
unsafe {
159
- // Initialize spill because we read from it in short_write_process_buffer.
162
+ // Initialize spill because we read from it in ` short_write_process_buffer` .
160
163
* hasher. buf . get_unchecked_mut ( BUFFER_SPILL_INDEX ) = MaybeUninit :: zeroed ( ) ;
161
164
}
162
165
@@ -190,9 +193,9 @@ impl SipHasher128 {
190
193
// A specialized write function for values with size <= 8 that should only
191
194
// be called when the write would cause the buffer to fill.
192
195
//
193
- // SAFETY: the write of x into self.buf starting at byte offset self.nbuf
194
- // must cause self.buf to become fully initialized (and not overflow) if it
195
- // wasn't already.
196
+ // SAFETY: the write of `x` into ` self.buf` starting at byte offset
197
+ // `self.nbuf` must cause ` self.buf` to become fully initialized (and not
198
+ // overflow) if it wasn't already.
196
199
#[ inline( never) ]
197
200
unsafe fn short_write_process_buffer < T > ( & mut self , x : T ) {
198
201
let size = mem:: size_of :: < T > ( ) ;
@@ -223,7 +226,7 @@ impl SipHasher128 {
223
226
ptr:: copy_nonoverlapping ( src, self . buf . as_mut_ptr ( ) as * mut u8 , size - 1 ) ;
224
227
225
228
// This function should only be called when the write fills the buffer.
226
- // Therefore, when size == 1, the new self.nbuf must be zero. The size
229
+ // Therefore, when size == 1, the new ` self.nbuf` must be zero. The size
227
230
// is statically known, so the branch is optimized away.
228
231
self . nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE_BYTES } ;
229
232
self . processed += BUFFER_SIZE_BYTES ;
@@ -240,7 +243,7 @@ impl SipHasher128 {
240
243
unsafe {
241
244
let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
242
245
243
- if length < 8 {
246
+ if length <= 8 {
244
247
copy_nonoverlapping_small ( msg. as_ptr ( ) , dst, length) ;
245
248
} else {
246
249
// This memcpy is *not* optimized away.
@@ -259,9 +262,9 @@ impl SipHasher128 {
259
262
// A write function for byte slices that should only be called when the
260
263
// write would cause the buffer to fill.
261
264
//
262
- // SAFETY: self.buf must be initialized up to the byte offset self.nbuf, and
263
- // msg must contain enough bytes to initialize the rest of the element
264
- // containing the byte offset self.nbuf.
265
+ // SAFETY: ` self.buf` must be initialized up to the byte offset ` self.nbuf`,
266
+ // and ` msg` must contain enough bytes to initialize the rest of the element
267
+ // containing the byte offset ` self.nbuf` .
265
268
#[ inline( never) ]
266
269
unsafe fn slice_write_process_buffer ( & mut self , msg : & [ u8 ] ) {
267
270
let length = msg. len ( ) ;
@@ -272,19 +275,20 @@ impl SipHasher128 {
272
275
// Always copy first part of input into current element of buffer.
273
276
// This function should only be called when the write fills the buffer,
274
277
// so we know that there is enough input to fill the current element.
275
- let valid_in_elem = nbuf & 0x7 ;
276
- let needed_in_elem = 8 - valid_in_elem;
278
+ let valid_in_elem = nbuf % ELEM_SIZE ;
279
+ let needed_in_elem = ELEM_SIZE - valid_in_elem;
277
280
278
281
let src = msg. as_ptr ( ) ;
279
282
let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
280
283
copy_nonoverlapping_small ( src, dst, needed_in_elem) ;
281
284
282
285
// Process buffer.
283
286
284
- // Using nbuf / 8 + 1 rather than (nbuf + needed_in_elem) / 8 to show
285
- // the compiler that this loop's upper bound is > 0. We know that is
286
- // true, because last step ensured we have a full element in the buffer.
287
- let last = nbuf / 8 + 1 ;
287
+ // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
288
+ // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
289
+ // We know that is true, because last step ensured we have a full
290
+ // element in the buffer.
291
+ let last = nbuf / ELEM_SIZE + 1 ;
288
292
289
293
for i in 0 ..last {
290
294
let elem = self . buf . get_unchecked ( i) . assume_init ( ) . to_le ( ) ;
@@ -293,26 +297,26 @@ impl SipHasher128 {
293
297
self . state . v0 ^= elem;
294
298
}
295
299
296
- // Process the remaining u64 -sized chunks of input.
300
+ // Process the remaining element -sized chunks of input.
297
301
let mut processed = needed_in_elem;
298
302
let input_left = length - processed;
299
- let u64s_left = input_left / 8 ;
300
- let u8s_left = input_left & 0x7 ;
303
+ let elems_left = input_left / ELEM_SIZE ;
304
+ let extra_bytes_left = input_left % ELEM_SIZE ;
301
305
302
- for _ in 0 ..u64s_left {
306
+ for _ in 0 ..elems_left {
303
307
let elem = ( msg. as_ptr ( ) . add ( processed) as * const u64 ) . read_unaligned ( ) . to_le ( ) ;
304
308
self . state . v3 ^= elem;
305
309
Sip24Rounds :: c_rounds ( & mut self . state ) ;
306
310
self . state . v0 ^= elem;
307
- processed += 8 ;
311
+ processed += ELEM_SIZE ;
308
312
}
309
313
310
314
// Copy remaining input into start of buffer.
311
315
let src = msg. as_ptr ( ) . add ( processed) ;
312
316
let dst = self . buf . as_mut_ptr ( ) as * mut u8 ;
313
- copy_nonoverlapping_small ( src, dst, u8s_left ) ;
317
+ copy_nonoverlapping_small ( src, dst, extra_bytes_left ) ;
314
318
315
- self . nbuf = u8s_left ;
319
+ self . nbuf = extra_bytes_left ;
316
320
self . processed += nbuf + processed;
317
321
}
318
322
@@ -321,7 +325,7 @@ impl SipHasher128 {
321
325
debug_assert ! ( self . nbuf < BUFFER_SIZE_BYTES ) ;
322
326
323
327
// Process full elements in buffer.
324
- let last = self . nbuf / 8 ;
328
+ let last = self . nbuf / ELEM_SIZE ;
325
329
326
330
// Since we're consuming self, avoid updating members for a potential
327
331
// performance gain.
@@ -335,14 +339,14 @@ impl SipHasher128 {
335
339
}
336
340
337
341
// Get remaining partial element.
338
- let elem = if self . nbuf % 8 != 0 {
342
+ let elem = if self . nbuf % ELEM_SIZE != 0 {
339
343
unsafe {
340
344
// Ensure element is initialized by writing zero bytes. At most
341
- // seven are required given the above check. It's safe to write
342
- // this many because we have the spill element and we maintain
343
- // self.nbuf such that this write will start before the spill.
345
+ // `ELEM_SIZE - 1` are required given the above check. It's safe
346
+ // to write this many because we have the spill and we maintain
347
+ // ` self.nbuf` such that this write will start before the spill.
344
348
let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( self . nbuf ) ;
345
- ptr:: write_bytes ( dst, 0 , 7 ) ;
349
+ ptr:: write_bytes ( dst, 0 , ELEM_SIZE - 1 ) ;
346
350
self . buf . get_unchecked ( last) . assume_init ( ) . to_le ( )
347
351
}
348
352
} else {
0 commit comments