diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index d7347cc..97e49dc 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -18,11 +18,11 @@ jobs: - name: Generate code coverage run: cargo llvm-cov --all-features --html - name: Upload artifact - uses: actions/upload-pages-artifact@v3 + uses: actions/upload-pages-artifact@v1 with: path: "./target/llvm-cov/html" - name: Deploy to Netlify - uses: nwtgck/actions-netlify@v3.0 + uses: nwtgck/actions-netlify@v2.0 with: publish-dir: './target/llvm-cov/html' production-branch: main diff --git a/Cargo.toml b/Cargo.toml index 28e92a1..dfe9460 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,11 +2,11 @@ name = "ringbuffer" version = "0.15.0" authors = [ - "Vivian Roest ", + "Victor Roest ", "Jonathan Dönszelmann ", ] edition = "2021" -rust-version = "1.79" +rust-version = "1.59" description = "A fixed-size circular buffer" repository = "https://github.com/NULLx76/ringbuffer/" keywords = ["ring", "cyclic", "circular", "buffer", "no-std"] @@ -14,14 +14,16 @@ categories = ["data-structures"] license = "MIT" [dev-dependencies] -criterion = { version = "0.4.0", features = ["html_reports"] } +criterion = "0.4.0" compiletest_rs = "0.10.0" [features] -default = ["alloc"] +default = ["alloc", "batched_extend"] # disable the alloc based ringbuffer, to make RingBuffers work in no_alloc environments alloc = [] +batched_extend = [] + [[bench]] name = "bench" harness = false diff --git a/README.md b/README.md index e1d7a4c..e3cd9ae 100644 --- a/README.md +++ b/README.md @@ -20,29 +20,32 @@ All of these ringbuffers also implement the [RingBuffer][4] trait for their shar [3]: https://docs.rs/ringbuffer/latest/ringbuffer/struct.ConstGenericRingBuffer.html [4]: https://docs.rs/ringbuffer/latest/ringbuffer/trait.RingBuffer.html -MSRV: Rust 1.79 +MSRV: Rust 1.59 # Usage ```rust use ringbuffer::{AllocRingBuffer, RingBuffer}; -let mut buffer = AllocRingBuffer::with_capacity(2); +fn main() { + let mut buffer = AllocRingBuffer::with_capacity(2); -// First entry of the buffer is now 5. -buffer.push(5); + // First entry of the buffer is now 5. + buffer.push(5); -// The last item we pushed is 5 -assert_eq!(buffer.back(), Some(&5)); + // The last item we pushed is 5 + assert_eq!(buffer.back(), Some(&5)); -// Second entry is now 42. -buffer.push(42); -assert_eq!(buffer.peek(), Some(&5)); -assert!(buffer.is_full()); + // Second entry is now 42. + buffer.push(42); + assert_eq!(buffer.peek(), Some(&5)); + assert!(buffer.is_full()); + + // Because capacity is reached the next push will be the first item of the buffer. + buffer.push(1); + assert_eq!(buffer.to_vec(), vec![42, 1]); +} -// Because capacity is reached the next push will be the first item of the buffer. -buffer.push(1); -assert_eq!(buffer.to_vec(), vec![42, 1]); ``` # Features diff --git a/benches/bench.rs b/benches/bench.rs index 67f50d8..3289d0e 100644 --- a/benches/bench.rs +++ b/benches/bench.rs @@ -1,14 +1,14 @@ -#![feature(coverage_attribute)] -#![coverage(off)] -use criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion}; -use ringbuffer::{AllocRingBuffer, ConstGenericRingBuffer, RingBuffer, SetLen}; +#![cfg(not(tarpaulin_include))] + +use criterion::{black_box, criterion_group, criterion_main, BatchSize, Bencher, Criterion}; +use ringbuffer::{AllocRingBuffer, ConstGenericRingBuffer, RingBuffer}; fn benchmark_push, F: Fn() -> T>(b: &mut Bencher, new: F) { b.iter(|| { let mut rb = new(); for i in 0..1_000_000 { - rb.enqueue(i); + rb.push(i); black_box(()); } @@ -21,25 +21,25 @@ fn benchmark_push_dequeue, F: Fn() -> T>(b: &mut Bencher, new let mut rb = new(); for _i in 0..100_000 { - let _ = rb.enqueue(1); + rb.push(1); black_box(()); - let _ = rb.enqueue(2); + rb.push(2); black_box(()); assert_eq!(black_box(rb.dequeue()), Some(1)); assert_eq!(black_box(rb.dequeue()), Some(2)); - let _ = rb.enqueue(1); + rb.push(1); black_box(()); - let _ = rb.enqueue(2); + rb.push(2); black_box(()); assert_eq!(black_box(rb.dequeue()), Some(1)); assert_eq!(black_box(rb.dequeue()), Some(2)); - let _ = rb.enqueue(1); + rb.push(1); black_box(()); - let _ = rb.enqueue(2); + rb.push(2); black_box(()); assert_eq!(black_box(rb.get_signed(-1)), Some(&2)); @@ -55,7 +55,7 @@ fn benchmark_various, F: Fn() -> T>(b: &mut Bencher, new: F) let mut rb = new(); for i in 0..100_000 { - rb.enqueue(i); + rb.push(i); black_box(()); black_box(rb.back()); } @@ -64,99 +64,6 @@ fn benchmark_various, F: Fn() -> T>(b: &mut Bencher, new: F) }) } -fn benchmark_skip, F: Fn() -> T>(b: &mut Bencher, new: F) { - let mut rb = new(); - rb.fill(9); - b.iter(|| { - for i in 0..rb.len() { - assert_eq!(rb.iter().skip(i).next(), Some(&9)); - } - }) -} - -fn benchmark_copy_to_slice_vs_extend, F: Fn() -> T>( - rb_size: usize, - rb_type: &str, - fn_name: &str, - c: &mut Criterion, - new: F, -) { - let mut group = c.benchmark_group(format!("{fn_name}({rb_type}, {rb_size})")); - let mut output = vec![0; rb_size]; - group.bench_function(format!("CopyTo({rb_type}; {rb_size})"), |b| { - let mut rb = new(); - rb.fill(9); - // making sure the read/write pointers wrap around - for _ in 0..rb_size / 2 { - let _ = rb.dequeue(); - let _ = rb.enqueue(9); - } - b.iter(|| { - rb.copy_to_slice(0, &mut output); - assert_eq!(output[output.len() / 2], 9); - assert_eq!(output.len(), rb_size); - }) - }); - let mut output: Vec = Vec::with_capacity(rb_size); - group.bench_function(format!("ExtendVec({rb_type}; {rb_size})"), |b| { - let mut rb = new(); - rb.fill(9); - // making sure the read/write pointers wrap around - for _ in 0..rb_size / 2 { - let _ = rb.dequeue(); - let _ = rb.enqueue(9); - } - b.iter(|| { - unsafe { output.set_len(0) }; - output.extend(rb.iter()); - assert_eq!(output[output.len() / 2], 9); - assert_eq!(output.len(), rb_size); - }) - }); - group.finish(); -} - -fn benchmark_copy_from_slice_vs_extend + SetLen, F: Fn() -> T>( - rb_size: usize, - rb_type: &str, - fn_name: &str, - c: &mut Criterion, - new: F, -) { - let mut group = c.benchmark_group(format!("{fn_name}({rb_type}, {rb_size})")); - let input = vec![9; rb_size]; - group.bench_function(format!("CopyFrom({rb_type}; {rb_size})"), |b| { - let mut rb = new(); - rb.fill(0); - // making sure the read/write pointers wrap around - for _ in 0..rb_size / 2 { - let _ = rb.dequeue(); - let _ = rb.enqueue(0); - } - for _ in 0..rb_size / 2 {} - b.iter(|| { - rb.copy_from_slice(0, &input); - assert_eq!(rb[rb.len() / 2], 9); - assert_eq!(rb.len(), rb_size); - }) - }); - group.bench_function(format!("ExtendRb({rb_type}; {rb_size})"), |b| { - let mut rb = new(); - // making sure the read/write pointers wrap around - for _ in 0..rb_size / 2 { - let _ = rb.dequeue(); - let _ = rb.enqueue(0); - } - b.iter(|| { - unsafe { rb.set_len(0) }; - rb.extend(input.iter().copied()); - assert_eq!(rb[rb.len() / 2], 9); - assert_eq!(rb.len(), rb_size); - }) - }); - group.finish(); -} - macro_rules! generate_benches { (called, $c: tt, $rb: tt, $ty: tt, $fn: tt, $bmfunc: tt, $($i:tt),*) => { $( @@ -180,22 +87,6 @@ macro_rules! generate_benches { })); )* }; - - (compare, $c: tt, $rb: tt, $ty: tt, $fn: tt, $bmfunc: tt, $($i:tt),*) => { - $( - $bmfunc($i, stringify!($rb), stringify!($bmfunc), $c, || { - $rb::<$ty>::$fn($i) - }); - )* - }; - - (compare_typed, $c: tt, $rb: tt, $ty: tt, $fn: tt, $bmfunc: tt, $($i:tt),*) => { - $( - $bmfunc($i, stringify!($rb), stringify!($bmfunc), $c, || { - $rb::<$ty, $i>::$fn() - }); - )* - }; } fn criterion_benchmark(c: &mut Criterion) { @@ -290,145 +181,68 @@ fn criterion_benchmark(c: &mut Criterion) { 8192, 8195 ]; - generate_benches![ - typed, - c, - ConstGenericRingBuffer, - i32, - new, - benchmark_skip, - 16, - 1024, - 4096, - 8192 - ]; - generate_benches![ - called, - c, - AllocRingBuffer, - i32, - new, - benchmark_skip, - 16, - 17, - 1024, - 4096, - 8192, - 8195 - ]; - generate_benches![ - compare, - c, - AllocRingBuffer, - i32, - new, - benchmark_copy_to_slice_vs_extend, - 16, - 1024, - 4096, - 8192, - 1_000_000, - 1_048_576 - ]; - generate_benches![ - compare_typed, - c, - ConstGenericRingBuffer, - i32, - new, - benchmark_copy_to_slice_vs_extend, - 16, - 1024, - 4096, - 8192, - 1_000_000, - 1_048_576 - ]; - generate_benches![ - compare, - c, - AllocRingBuffer, - i32, - new, - benchmark_copy_from_slice_vs_extend, - 16, - 1024, - 4096, - 8192, - 1_000_000, - 1_048_576 - ]; - generate_benches![ - compare_typed, - c, - ConstGenericRingBuffer, - i32, - new, - benchmark_copy_from_slice_vs_extend, - 16, - 1024, - 4096, - 8192, - 1_000_000, - 1_048_576 - ]; - generate_benches![ - compare, - c, - AllocRingBuffer, - i32, - new, - benchmark_copy_to_slice_vs_extend, - 16, - 1024, - 4096, - 8192, - 1_000_000, - 1_048_576 - ]; - generate_benches![ - compare_typed, - c, - ConstGenericRingBuffer, - i32, - new, - benchmark_copy_to_slice_vs_extend, - 16, - 1024, - 4096, - 8192, - 1_000_000, - 1_048_576 - ]; - generate_benches![ - compare, - c, - AllocRingBuffer, - i32, - new, - benchmark_copy_from_slice_vs_extend, - 16, - 1024, - 4096, - 8192, - 1_000_000, - 1_048_576 - ]; - generate_benches![ - compare_typed, - c, - ConstGenericRingBuffer, - i32, - new, - benchmark_copy_from_slice_vs_extend, - 16, - 1024, - 4096, - 8192, - 1_000_000, - 1_048_576 - ]; + c.bench_function("extend too many", extend_too_many); + c.bench_function("extend many too many", extend_many_too_many); + c.bench_function("extend exact cap", extend_exact_cap); + c.bench_function("extend too few", extend_too_few); + c.bench_function("extend after one", extend_after_one); +} + +fn extend_many_too_many(b: &mut Bencher) { + let rb = ConstGenericRingBuffer::new::<8192>(); + let input = (0..16384).collect::>(); + + b.iter_batched( + &|| rb.clone(), + |mut r| black_box(r.extend(black_box(input.as_slice()))), + BatchSize::SmallInput, + ); +} + +fn extend_too_many(b: &mut Bencher) { + let rb = ConstGenericRingBuffer::new::<8192>(); + let input = (0..10000).collect::>(); + + b.iter_batched( + &|| rb.clone(), + |mut r| black_box(r.extend(black_box(input.as_slice()))), + BatchSize::SmallInput, + ); +} + +fn extend_exact_cap(b: &mut Bencher) { + let rb = ConstGenericRingBuffer::new::<8192>(); + let input = (0..8192).collect::>(); + + b.iter_batched( + &|| rb.clone(), + |mut r| black_box(r.extend(black_box(input.as_slice()))), + BatchSize::SmallInput, + ); +} + +fn extend_too_few(b: &mut Bencher) { + let rb = ConstGenericRingBuffer::new::<8192>(); + let input = (0..4096).collect::>(); + + b.iter_batched( + &|| rb.clone(), + |mut r| black_box(r.extend(black_box(input.as_slice()))), + BatchSize::LargeInput, + ); +} + +fn extend_after_one(b: &mut Bencher) { + let mut rb = ConstGenericRingBuffer::new::<8192>(); + rb.push(&0); + let input = (0..4096).collect::>(); + + b.iter_batched( + &|| rb.clone(), + |mut r| black_box(r.extend(black_box(input.as_slice()))), + BatchSize::LargeInput, + ); } criterion_group!(benches, criterion_benchmark); diff --git a/src/lib.rs b/src/lib.rs index 23cafe7..fa3dd17 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,9 +21,6 @@ pub(crate) mod ringbuffer_trait; pub use ringbuffer_trait::RingBuffer; -mod set_len_trait; -pub use set_len_trait::SetLen; - #[cfg(feature = "alloc")] mod with_alloc; #[cfg(feature = "alloc")] @@ -67,7 +64,7 @@ mod tests { const capacity: usize = 8; fn test_neg_index(mut b: impl RingBuffer) { for i in 0..capacity + 2 { - let _ = b.enqueue(i); + b.push(i); assert_eq!(b.get_signed(-1), Some(&i)); } } @@ -117,10 +114,10 @@ mod tests { fn run_test_len() { fn test_len(mut b: impl RingBuffer) { assert_eq!(0, b.len()); - let _ = b.enqueue(1); + b.push(1); assert_eq!(1, b.len()); - let _ = b.enqueue(2); - assert_eq!(2, b.len()); + b.push(2); + assert_eq!(2, b.len()) } test_len(AllocRingBuffer::new(8)); @@ -132,14 +129,14 @@ mod tests { fn run_test_len_wrap() { fn test_len_wrap(mut b: impl RingBuffer) { assert_eq!(0, b.len()); - let _ = b.enqueue(1); + b.push(1); assert_eq!(1, b.len()); - let _ = b.enqueue(2); + b.push(2); assert_eq!(2, b.len()); // Now we are wrapping - let _ = b.enqueue(3); + b.push(3); assert_eq!(2, b.len()); - let _ = b.enqueue(4); + b.push(4); assert_eq!(2, b.len()); } @@ -149,20 +146,20 @@ mod tests { // the growable ringbuffer actually should grow instead of wrap let mut grb = GrowableAllocRingBuffer::with_capacity(2); assert_eq!(0, grb.len()); - let _ = grb.enqueue(0); + grb.push(0); assert_eq!(1, grb.len()); - let _ = grb.enqueue(1); + grb.push(1); assert_eq!(2, grb.len()); - let _ = grb.enqueue(2); + grb.push(2); assert_eq!(3, grb.len()); } #[test] fn run_test_clear() { fn test_clear(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); + b.push(1); + b.push(2); + b.push(3); b.clear(); assert!(b.is_empty()); @@ -178,9 +175,9 @@ mod tests { fn run_test_empty() { fn test_empty(mut b: impl RingBuffer) { assert!(b.is_empty()); - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); + b.push(1); + b.push(2); + b.push(3); assert!(!b.is_empty()); b.clear(); @@ -196,13 +193,13 @@ mod tests { #[test] fn run_test_iter() { fn test_iter(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); - let _ = b.enqueue(4); - let _ = b.enqueue(5); - let _ = b.enqueue(6); - let _ = b.enqueue(7); + b.push(1); + b.push(2); + b.push(3); + b.push(4); + b.push(5); + b.push(6); + b.push(7); let mut iter = b.iter(); assert_eq!(&1, iter.next().unwrap()); @@ -223,13 +220,13 @@ mod tests { #[test] fn run_test_forward_iter_non_power_of_two() { fn test_iter(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); - let _ = b.enqueue(4); - let _ = b.enqueue(5); - let _ = b.enqueue(6); - let _ = b.enqueue(7); + b.push(1); + b.push(2); + b.push(3); + b.push(4); + b.push(5); + b.push(6); + b.push(7); let mut iter = b.iter(); assert_eq!(&1, iter.next().unwrap()); @@ -250,13 +247,13 @@ mod tests { #[test] fn run_test_iter_non_power_of_two() { fn test_iter(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); - let _ = b.enqueue(4); - let _ = b.enqueue(5); - let _ = b.enqueue(6); - let _ = b.enqueue(7); + b.push(1); + b.push(2); + b.push(3); + b.push(4); + b.push(5); + b.push(6); + b.push(7); let mut iter = b.iter(); assert_eq!(&1, iter.next().unwrap()); @@ -281,13 +278,13 @@ mod tests { B: RingBuffer, for<'a> &'a B: IntoIterator>, { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); - let _ = b.enqueue(4); - let _ = b.enqueue(5); - let _ = b.enqueue(6); - let _ = b.enqueue(7); + b.push(1); + b.push(2); + b.push(3); + b.push(4); + b.push(5); + b.push(6); + b.push(7); let mut iter = (&b).into_iter(); assert_eq!(&1, iter.next().unwrap()); @@ -308,13 +305,13 @@ mod tests { #[test] fn run_test_into_iter() { fn test_iter(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); - let _ = b.enqueue(4); - let _ = b.enqueue(5); - let _ = b.enqueue(6); - let _ = b.enqueue(7); + b.push(1); + b.push(2); + b.push(3); + b.push(4); + b.push(5); + b.push(6); + b.push(7); let mut iter = b.into_iter(); assert_eq!(1, iter.next().unwrap()); @@ -336,9 +333,9 @@ mod tests { #[test] fn run_test_iter_with_lifetimes() { fn test_iter<'a>(string: &'a str, mut b: impl RingBuffer<&'a str>) { - let _ = b.enqueue(&string[0..1]); - let _ = b.enqueue(&string[1..2]); - let _ = b.enqueue(&string[2..3]); + b.push(&string[0..1]); + b.push(&string[1..2]); + b.push(&string[2..3]); let mut iter = b.iter(); assert_eq!(&&string[0..1], iter.next().unwrap()); @@ -358,9 +355,9 @@ mod tests { #[test] fn run_test_double_iter() { fn test_double_iter(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); + b.push(1); + b.push(2); + b.push(3); let mut iter1 = b.iter(); let mut iter2 = b.iter(); @@ -381,10 +378,10 @@ mod tests { #[test] fn run_test_iter_wrap() { fn test_iter_wrap(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(1); + b.push(2); // Wrap - let _ = b.enqueue(3); + b.push(3); let mut iter = b.iter(); assert_eq!(&2, iter.next().unwrap()); @@ -397,10 +394,10 @@ mod tests { // the growable ringbuffer shouldn't actually stop growing let mut b = GrowableAllocRingBuffer::with_capacity(2); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(1); + b.push(2); // No wrap - let _ = b.enqueue(3); + b.push(3); let mut iter = b.iter(); assert_eq!(&1, iter.next().unwrap()); @@ -412,15 +409,15 @@ mod tests { #[test] fn run_test_iter_mut() { fn test_iter_mut(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); + b.push(1); + b.push(2); + b.push(3); for el in b.iter_mut() { *el += 1; } - assert_eq!(vec![2, 3, 4], b.to_vec()); + assert_eq!(vec![2, 3, 4], b.to_vec()) } test_iter_mut(AllocRingBuffer::new(8)); @@ -436,15 +433,15 @@ mod tests { for<'a> &'a mut B: IntoIterator>, { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); + b.push(1); + b.push(2); + b.push(3); for el in &mut b { *el += 1; } - assert_eq!(vec![2, 3, 4], b.to_vec()); + assert_eq!(vec![2, 3, 4], b.to_vec()) } test_iter_mut(AllocRingBuffer::new(8)); @@ -455,15 +452,15 @@ mod tests { #[test] fn test_iter_mut_wrap() { fn run_test_iter_mut_wrap(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); + b.push(1); + b.push(2); + b.push(3); for i in b.iter_mut() { *i += 1; } - assert_eq!(vec![3, 4], b.to_vec()); + assert_eq!(vec![3, 4], b.to_vec()) } run_test_iter_mut_wrap(AllocRingBuffer::new(2)); @@ -472,23 +469,23 @@ mod tests { // The growable ringbuffer actually shouldn't wrap let mut b = GrowableAllocRingBuffer::with_capacity(2); - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); + b.push(1); + b.push(2); + b.push(3); for i in b.iter_mut() { *i += 1; } - assert_eq!(vec![2, 3, 4], b.to_vec()); + assert_eq!(vec![2, 3, 4], b.to_vec()) } #[test] fn test_iter_mut_miri_fail() { fn run_test_iter_mut_wrap(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); + b.push(1); + b.push(2); + b.push(3); let buf = b.iter_mut().collect::>(); @@ -496,7 +493,7 @@ mod tests { *i += 1; } - assert_eq!(vec![3, 4], b.to_vec()); + assert_eq!(vec![3, 4], b.to_vec()) } run_test_iter_mut_wrap(AllocRingBuffer::new(2)); @@ -504,9 +501,9 @@ mod tests { // the growable ringbuffer actually shouldn't wrap let mut b = GrowableAllocRingBuffer::with_capacity(2); - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); + b.push(1); + b.push(2); + b.push(3); let buf = b.iter_mut().collect::>(); @@ -514,17 +511,17 @@ mod tests { *i += 1; } - assert_eq!(vec![2, 3, 4], b.to_vec()); + assert_eq!(vec![2, 3, 4], b.to_vec()) } #[test] fn run_test_to_vec() { fn test_to_vec(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); + b.push(1); + b.push(2); + b.push(3); - assert_eq!(vec![1, 2, 3], b.to_vec()); + assert_eq!(vec![1, 2, 3], b.to_vec()) } test_to_vec(AllocRingBuffer::new(8)); @@ -535,12 +532,12 @@ mod tests { #[test] fn run_test_to_vec_wrap() { fn test_to_vec_wrap(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(1); + b.push(2); // Wrap - let _ = b.enqueue(3); + b.push(3); - assert_eq!(vec![2, 3], b.to_vec()); + assert_eq!(vec![2, 3], b.to_vec()) } test_to_vec_wrap(AllocRingBuffer::new(2)); @@ -549,18 +546,18 @@ mod tests { // The growable ringbuffer should actually remember all items let mut b = GrowableAllocRingBuffer::with_capacity(2); - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); + b.push(1); + b.push(2); + b.push(3); - assert_eq!(vec![1, 2, 3], b.to_vec()); + assert_eq!(vec![1, 2, 3], b.to_vec()) } #[test] fn run_test_index() { fn test_index(mut b: impl RingBuffer) { - let _ = b.enqueue(2); - assert_eq!(b[0], 2); + b.push(2); + assert_eq!(b[0], 2) } test_index(AllocRingBuffer::new(8)); @@ -571,14 +568,14 @@ mod tests { #[test] fn run_test_get() { fn test_index(mut b: impl RingBuffer) { - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); - let _ = b.enqueue(3); - let _ = b.enqueue(4); - let _ = b.enqueue(5); - let _ = b.enqueue(6); - let _ = b.enqueue(7); + b.push(0); + b.push(1); + b.push(2); + b.push(3); + b.push(4); + b.push(5); + b.push(6); + b.push(7); assert_eq!(b.get(0), Some(&0)); assert_eq!(b.get(1), Some(&1)); @@ -598,7 +595,7 @@ mod tests { #[test] fn run_test_index_mut() { fn test_index_mut(mut b: impl RingBuffer) { - let _ = b.enqueue(2); + b.push(2); assert_eq!(b[0], 2); @@ -615,8 +612,8 @@ mod tests { #[test] fn run_test_peek_some() { fn test_peek_some(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(1); + b.push(2); assert_eq!(b.peek(), Some(&1)); } @@ -640,8 +637,8 @@ mod tests { #[test] fn run_test_get_relative() { fn test_get_relative(mut b: impl RingBuffer) { - let _ = b.enqueue(0); - let _ = b.enqueue(1); + b.push(0); + b.push(1); // get[(index + 1) % len] = 1 assert_eq!(b.get(0).unwrap(), &0); @@ -660,9 +657,9 @@ mod tests { #[test] fn run_test_wrapping_get_relative() { fn test_wrapping_get_relative(mut b: impl RingBuffer) { - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); // [0, ...] // ^ @@ -681,9 +678,9 @@ mod tests { // the growable ringbuffer actually shouldn't wrap let mut b = GrowableAllocRingBuffer::with_capacity(2); - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); assert_eq!(b.get(0).unwrap(), &0); assert_eq!(b.get(1).unwrap(), &1); @@ -704,8 +701,8 @@ mod tests { #[test] fn run_test_get_relative_mut() { fn test_get_relative_mut(mut b: impl RingBuffer) { - let _ = b.enqueue(0); - let _ = b.enqueue(1); + b.push(0); + b.push(1); // [0, ...] // ^ @@ -728,9 +725,9 @@ mod tests { #[test] fn run_test_wrapping_get_relative_mut() { fn test_wrapping_get_relative_mut(mut b: impl RingBuffer) { - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); *b.get_mut(0).unwrap() = 3; @@ -752,9 +749,9 @@ mod tests { // the growable ringbuffer actually shouldn't wrap let mut b = GrowableAllocRingBuffer::with_capacity(2); - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); *b.get_mut(0).unwrap() = 3; @@ -779,7 +776,7 @@ mod tests { fn test_from_iterator + FromIterator>() { let b: T = std::iter::repeat(1).take(1024).collect(); assert_eq!(b.len(), 1024); - assert_eq!(b.to_vec(), vec![1; 1024]); + assert_eq!(b.to_vec(), vec![1; 1024]) } test_from_iterator::>(); @@ -791,7 +788,7 @@ mod tests { fn test_from_iterator_wrap + FromIterator>() { let b: T = std::iter::repeat(1).take(8000).collect(); assert_eq!(b.len(), b.capacity()); - assert_eq!(b.to_vec(), vec![1; b.capacity()]); + assert_eq!(b.to_vec(), vec![1; b.capacity()]) } test_from_iterator_wrap::>(); @@ -801,8 +798,8 @@ mod tests { #[test] fn run_test_get_relative_negative() { fn test_get_relative_negative(mut b: impl RingBuffer) { - let _ = b.enqueue(0); - let _ = b.enqueue(1); + b.push(0); + b.push(1); // [0, ...] // ^ @@ -825,8 +822,8 @@ mod tests { #[test] fn run_test_contains() { fn test_contains(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(1); + b.push(2); assert!(b.contains(&1)); assert!(b.contains(&2)); @@ -841,9 +838,9 @@ mod tests { fn run_test_is_full() { fn test_is_full(mut b: impl RingBuffer) { assert!(!b.is_full()); - let _ = b.enqueue(1); + b.push(1); assert!(!b.is_full()); - let _ = b.enqueue(2); + b.push(2); assert!(b.is_full()); } @@ -855,8 +852,8 @@ mod tests { #[test] fn run_test_front_some() { fn test_front_some(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(1); + b.push(2); assert_eq!(b.front(), Some(&1)); } @@ -880,8 +877,8 @@ mod tests { #[test] fn run_test_back_some() { fn test_back_some(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(1); + b.push(2); assert_eq!(b.back(), Some(&2)); } @@ -905,8 +902,8 @@ mod tests { #[test] fn run_test_front_some_mut() { fn test_front_some_mut(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(1); + b.push(2); assert_eq!(b.front_mut(), Some(&mut 1)); } @@ -930,8 +927,8 @@ mod tests { #[test] fn run_test_back_some_mut() { fn test_back_some_mut(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(1); + b.push(2); assert_eq!(b.back_mut(), Some(&mut 2)); } @@ -955,8 +952,8 @@ mod tests { #[test] fn run_test_dequeue() { fn run_test_dequeue(mut b: impl RingBuffer) { - let _ = b.enqueue(0); - let _ = b.enqueue(1); + b.push(0); + b.push(1); assert_eq!(b.len(), 2); @@ -975,17 +972,16 @@ mod tests { #[test] fn run_test_skip() { - #[allow(deprecated)] fn test_skip(mut b: impl RingBuffer) { - let _ = b.enqueue(0); - let _ = b.enqueue(1); + b.push(0); + b.push(1); assert_eq!(b.len(), 2); b.skip(); b.skip(); - assert_eq!(b.len(), 0); + assert_eq!(b.len(), 0) } test_skip(AllocRingBuffer::new(8)); @@ -995,12 +991,11 @@ mod tests { #[test] fn run_test_skip_2() { - #[allow(deprecated)] fn test_skip2(mut rb: impl RingBuffer) { rb.skip(); rb.skip(); rb.skip(); - let _ = rb.enqueue(1); + rb.push(1); assert_eq!(rb.dequeue(), Some(1)); assert_eq!(rb.dequeue(), None); rb.skip(); @@ -1013,9 +1008,8 @@ mod tests { } #[test] - #[allow(deprecated)] - fn run_test_push_pop() { - fn test_push_pop(mut b: impl RingBuffer) { + fn run_test_push_dequeue_push() { + fn test_push_dequeue_push(mut b: impl RingBuffer) { b.push(0); b.push(1); @@ -1031,39 +1025,39 @@ mod tests { assert_eq!(b.dequeue(), None); } - test_push_pop(AllocRingBuffer::new(8)); - test_push_pop(GrowableAllocRingBuffer::with_capacity(8)); - test_push_pop(ConstGenericRingBuffer::::new()); + test_push_dequeue_push(AllocRingBuffer::new(8)); + test_push_dequeue_push(GrowableAllocRingBuffer::with_capacity(8)); + test_push_dequeue_push(ConstGenericRingBuffer::::new()); } #[test] - fn run_test_enqueue_dequeue_enqueue() { - fn test_enqueue_dequeue_enqueue(mut b: impl RingBuffer) { - let _ = b.enqueue(0); - let _ = b.enqueue(1); + fn run_test_enqueue_dequeue_push() { + fn test_enqueue_dequeue_push(mut b: impl RingBuffer) { + b.enqueue(0); + b.enqueue(1); assert_eq!(b.dequeue(), Some(0)); assert_eq!(b.dequeue(), Some(1)); assert_eq!(b.dequeue(), None); - let _ = b.enqueue(0); - let _ = b.enqueue(1); + b.enqueue(0); + b.enqueue(1); assert_eq!(b.dequeue(), Some(0)); assert_eq!(b.dequeue(), Some(1)); assert_eq!(b.dequeue(), None); } - test_enqueue_dequeue_enqueue(AllocRingBuffer::new(8)); - test_enqueue_dequeue_enqueue(GrowableAllocRingBuffer::with_capacity(8)); - test_enqueue_dequeue_enqueue(ConstGenericRingBuffer::::new()); + test_enqueue_dequeue_push(AllocRingBuffer::new(8)); + test_enqueue_dequeue_push(GrowableAllocRingBuffer::with_capacity(8)); + test_enqueue_dequeue_push(ConstGenericRingBuffer::::new()); } #[test] fn large_negative_index() { fn test_large_negative_index(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(1); + b.push(2); assert_eq!(b.get_signed(1), Some(&2)); assert_eq!(b.get_signed(0), Some(&1)); assert_eq!(b.get_signed(-1), Some(&2)); @@ -1079,8 +1073,8 @@ mod tests { #[test] fn large_negative_index_mut() { fn test_large_negative_index(mut b: impl RingBuffer) { - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(1); + b.push(2); assert_eq!(b.get_mut_signed(1), Some(&mut 2)); assert_eq!(b.get_mut_signed(0), Some(&mut 1)); assert_eq!(b.get_mut_signed(-1), Some(&mut 2)); @@ -1094,42 +1088,42 @@ mod tests { } #[test] - fn run_test_enqueue_dequeue_enqueue_full() { - fn test_enqueue_dequeue_enqueue_full(mut b: impl RingBuffer) { - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + fn run_test_push_dequeue_push_full() { + fn test_push_dequeue_push_full(mut b: impl RingBuffer) { + b.push(0); + b.push(1); + b.push(2); assert_eq!(b.dequeue(), Some(1)); assert_eq!(b.dequeue(), Some(2)); assert_eq!(b.dequeue(), None); - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); assert_eq!(b.dequeue(), Some(1)); assert_eq!(b.dequeue(), Some(2)); assert_eq!(b.dequeue(), None); } - test_enqueue_dequeue_enqueue_full(AllocRingBuffer::new(2)); - test_enqueue_dequeue_enqueue_full(ConstGenericRingBuffer::::new()); + test_push_dequeue_push_full(AllocRingBuffer::new(2)); + test_push_dequeue_push_full(ConstGenericRingBuffer::::new()); // the growable ringbuffer should actually keep growing and dequeue all items let mut b = GrowableAllocRingBuffer::with_capacity(2); - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); assert_eq!(b.dequeue(), Some(0)); assert_eq!(b.dequeue(), Some(1)); assert_eq!(b.dequeue(), Some(2)); assert_eq!(b.dequeue(), None); - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); assert_eq!(b.dequeue(), Some(0)); assert_eq!(b.dequeue(), Some(1)); @@ -1138,112 +1132,112 @@ mod tests { } #[test] - fn run_test_enqueue_dequeue_enqueue_full_get() { - fn test_enqueue_dequeue_enqueue_full_get(mut b: impl RingBuffer) { - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + fn run_test_push_dequeue_push_full_get() { + fn test_push_dequeue_push_full_get(mut b: impl RingBuffer) { + b.push(0); + b.push(1); + b.push(2); assert_eq!(b.dequeue(), Some(1)); assert_eq!(b.dequeue(), Some(2)); assert_eq!(b.dequeue(), None); - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); assert_eq!(b.dequeue(), Some(1)); assert_eq!(b.dequeue(), Some(2)); assert_eq!(b.dequeue(), None); - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); assert_eq!(b.get_signed(-1), Some(&2)); assert_eq!(b.get_signed(-2), Some(&1)); assert_eq!(b.get_signed(-3), Some(&2)); } - test_enqueue_dequeue_enqueue_full_get(AllocRingBuffer::new(2)); - test_enqueue_dequeue_enqueue_full_get(ConstGenericRingBuffer::::new()); + test_push_dequeue_push_full_get(AllocRingBuffer::new(2)); + test_push_dequeue_push_full_get(ConstGenericRingBuffer::::new()); // the growable ringbuffer should actually keep growing and dequeue all items let mut b = GrowableAllocRingBuffer::with_capacity(2); - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); assert_eq!(b.dequeue(), Some(0)); assert_eq!(b.dequeue(), Some(1)); assert_eq!(b.dequeue(), Some(2)); assert_eq!(b.dequeue(), None); - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); assert_eq!(b.dequeue(), Some(0)); assert_eq!(b.dequeue(), Some(1)); assert_eq!(b.dequeue(), Some(2)); assert_eq!(b.dequeue(), None); - let _ = b.enqueue(0); - let _ = b.enqueue(1); - let _ = b.enqueue(2); + b.push(0); + b.push(1); + b.push(2); assert_eq!(b.get_signed(-1), Some(&2)); assert_eq!(b.get_signed(-2), Some(&1)); - assert_eq!(b.get_signed(-3), Some(&0)); + assert_eq!(b.get_signed(-3), Some(&0)) } #[test] #[cfg_attr(miri, ignore)] // this test takes far too long with Miri enabled - fn run_test_enqueue_dequeue_enqueue_full_get_rep() { - fn test_enqueue_dequeue_enqueue_full_get_rep(mut rb: impl RingBuffer) { + fn run_test_push_dequeue_push_full_get_rep() { + fn test_push_dequeue_push_full_get_rep(mut rb: impl RingBuffer) { for _ in 0..100_000 { - let _ = rb.enqueue(1); - let _ = rb.enqueue(2); + rb.push(1); + rb.push(2); assert_eq!(rb.dequeue(), Some(1)); assert_eq!(rb.dequeue(), Some(2)); - let _ = rb.enqueue(1); - let _ = rb.enqueue(2); + rb.push(1); + rb.push(2); assert_eq!(rb.dequeue(), Some(1)); assert_eq!(rb.dequeue(), Some(2)); - let _ = rb.enqueue(1); - let _ = rb.enqueue(2); + rb.push(1); + rb.push(2); assert_eq!(rb.get_signed(-1), Some(&2)); assert_eq!(rb.get_signed(-2), Some(&1)); } } - test_enqueue_dequeue_enqueue_full_get_rep(AllocRingBuffer::new(8)); - test_enqueue_dequeue_enqueue_full_get_rep(GrowableAllocRingBuffer::with_capacity(8)); - test_enqueue_dequeue_enqueue_full_get_rep(ConstGenericRingBuffer::::new()); + test_push_dequeue_push_full_get_rep(AllocRingBuffer::new(8)); + test_push_dequeue_push_full_get_rep(GrowableAllocRingBuffer::with_capacity(8)); + test_push_dequeue_push_full_get_rep(ConstGenericRingBuffer::::new()); } #[test] fn run_test_clone() { fn test_clone(mut rb: impl RingBuffer + Clone + Eq + Debug) { - let _ = rb.enqueue(42); - let _ = rb.enqueue(32); - let _ = rb.enqueue(22); + rb.push(42); + rb.push(32); + rb.push(22); let mut other = rb.clone(); assert_eq!(rb, other); - let _ = rb.enqueue(11); - let _ = rb.enqueue(12); - let _ = other.enqueue(11); - let _ = other.enqueue(12); + rb.push(11); + rb.push(12); + other.push(11); + other.push(12); assert_eq!(rb, other); } @@ -1258,7 +1252,7 @@ mod tests { fn test_default_fill(mut rb: impl RingBuffer) { for i in 0..rb.capacity() { for _ in 0..i { - let _ = rb.enqueue(1); + rb.push(1); } assert_eq!(rb.len(), i); @@ -1284,12 +1278,12 @@ mod tests { let mut alloc_b = ConstGenericRingBuffer::::new(); assert!(alloc_a.eq(&alloc_b)); - let _ = alloc_a.enqueue(1); + alloc_a.push(1); assert!(!alloc_b.eq(&alloc_a)); - let _ = alloc_b.enqueue(1); + alloc_b.push(1); assert!(alloc_a.eq(&alloc_b)); - let _ = alloc_a.enqueue(4); - let _ = alloc_b.enqueue(2); + alloc_a.push(4); + alloc_b.push(2); assert!(!alloc_b.eq(&alloc_a)); } @@ -1297,7 +1291,7 @@ mod tests { fn run_next_back_test() { fn next_back_test(mut rb: impl RingBuffer) { for i in 1..=4 { - let _ = rb.enqueue(i); + rb.push(i); } let mut it = rb.iter(); @@ -1317,7 +1311,7 @@ mod tests { fn run_next_back_test_mut() { fn next_back_test_mut(mut rb: impl RingBuffer) { for i in 1..=4 { - let _ = rb.enqueue(i); + rb.push(i); } let mut it = rb.iter_mut(); @@ -1346,7 +1340,7 @@ mod tests { fn test_fill(mut rb: impl RingBuffer) { for i in 0..rb.capacity() { for _ in 0..i { - let _ = rb.enqueue(1); + rb.push(1); } assert_eq!(rb.len(), i); @@ -1398,8 +1392,8 @@ mod tests { parent: Some(unsafe { dt.as_ref() }.unwrap().borrow_mut()), }; let mut rb = { $constructor }; - let _ = rb.enqueue(d); - let _ = rb.enqueue(Dropee { parent: None }); + rb.push(d); + rb.push(Dropee { parent: None }); } { // Safety: @@ -1435,13 +1429,13 @@ mod tests { macro_rules! test_clone { ($e: expr) => { let mut e1 = $e; - let _ = e1.enqueue(1); - let _ = e1.enqueue(2); + e1.push(1); + e1.push(2); let mut e2 = e1.clone(); - let _ = e2.enqueue(11); - let _ = e2.enqueue(12); + e2.push(11); + e2.push(12); assert_eq!(e1.to_vec(), vec![1, 2]); assert_eq!(e2.to_vec(), vec![1, 2, 11, 12]); @@ -1452,611 +1446,4 @@ mod tests { test_clone!(GrowableAllocRingBuffer::<_>::new()); test_clone!(AllocRingBuffer::<_>::new(4)); } - - #[test] - fn iter_nth_override() { - macro_rules! test_concrete { - ($rb_init: expr) => { - let rb = $rb_init([1, 2, 3, 4]); - assert_eq!(rb.iter().nth(0), Some(&1)); - assert_eq!(rb.iter().nth(1), Some(&2)); - assert_eq!(rb.iter().nth(2), Some(&3)); - assert_eq!(rb.iter().nth(3), Some(&4)); - assert_eq!(rb.iter().nth(4), None); - - let mut rb = $rb_init([1, 2, 3, 4]); - assert_eq!(rb.iter_mut().nth(0), Some(&mut 1)); - assert_eq!(rb.iter_mut().nth(1), Some(&mut 2)); - assert_eq!(rb.iter_mut().nth(2), Some(&mut 3)); - assert_eq!(rb.iter_mut().nth(3), Some(&mut 4)); - assert_eq!(rb.iter_mut().nth(4), None); - - let rb = $rb_init([1, 2, 3, 4]); - assert_eq!(rb.clone().into_iter().nth(0), Some(1)); - assert_eq!(rb.clone().into_iter().nth(1), Some(2)); - assert_eq!(rb.clone().into_iter().nth(2), Some(3)); - assert_eq!(rb.clone().into_iter().nth(3), Some(4)); - assert_eq!(rb.clone().into_iter().nth(4), None); - }; - } - - test_concrete!(|values: [i32; 4]| ConstGenericRingBuffer::<_, 4>::from(values)); - test_concrete!(|values: [i32; 4]| GrowableAllocRingBuffer::<_>::from(values)); - test_concrete!(|values: [i32; 4]| AllocRingBuffer::<_>::from(values)); - } - - #[test] - fn iter_nth_back_override() { - macro_rules! test_concrete { - ($rb_init: expr) => { - let rb = $rb_init([1, 2, 3, 4]); - assert_eq!(rb.iter().nth_back(0), Some(&4)); - assert_eq!(rb.iter().nth_back(1), Some(&3)); - assert_eq!(rb.iter().nth_back(2), Some(&2)); - assert_eq!(rb.iter().nth_back(3), Some(&1)); - assert_eq!(rb.iter().nth_back(4), None); - - let mut rb = $rb_init([1, 2, 3, 4]); - assert_eq!(rb.iter_mut().nth_back(0), Some(&mut 4)); - assert_eq!(rb.iter_mut().nth_back(1), Some(&mut 3)); - assert_eq!(rb.iter_mut().nth_back(2), Some(&mut 2)); - assert_eq!(rb.iter_mut().nth_back(3), Some(&mut 1)); - assert_eq!(rb.iter_mut().nth_back(4), None); - }; - } - - test_concrete!(|values: [i32; 4]| ConstGenericRingBuffer::<_, 4>::from(values)); - test_concrete!(|values: [i32; 4]| GrowableAllocRingBuffer::<_>::from(values)); - test_concrete!(|values: [i32; 4]| AllocRingBuffer::<_>::from(values)); - } - - #[test] - fn test_copy_from_slice_power_of_two() { - macro_rules! test_concrete { - ($rb_init: expr) => { - // same-sized slice - let mut rb = $rb_init([1, 2, 3, 4]); - rb.copy_from_slice(0, &[5, 6, 7, 8]); - assert_eq!(rb.to_vec(), alloc::vec![5, 6, 7, 8]); - - // same-sized slice after a push - let mut rb = $rb_init([1, 2, 3, 4]); - let initial_len = rb.len(); - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - rb.copy_from_slice(0, &[5, 6, 7, 8]); - assert_eq!(rb.to_vec(), alloc::vec![5, 6, 7, 8]); - - // same-sized slice after a roundtrip - let mut rb = $rb_init([1, 2, 3, 4]); - let initial_len = rb.len(); - for _ in 0..rb.len() { - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - } - rb.copy_from_slice(0, &[5, 6, 7, 8]); - assert_eq!(rb.to_vec(), alloc::vec![5, 6, 7, 8]); - - // from offset - let mut rb = $rb_init([1, 2, 3, 4]); - rb.copy_from_slice(2, &[5, 6]); - assert_eq!(rb.to_vec(), alloc::vec![1, 2, 5, 6]); - - // from offset after a push - let mut rb = $rb_init([1, 2, 3, 4]); - let initial_len = rb.len(); - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - rb.copy_from_slice(2, &[5, 6]); - assert_eq!(rb.to_vec(), alloc::vec![2, 3, 5, 6]); - - // from offset after a roundtrip - let mut rb = $rb_init([1, 2, 3, 4]); - let initial_len = rb.len(); - for _ in 0..rb.len() { - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - } - rb.copy_from_slice(2, &[5, 6]); - assert_eq!(rb.to_vec(), alloc::vec![0, 0, 5, 6]); - }; - } - - test_concrete!(|values: [i32; 4]| ConstGenericRingBuffer::<_, 4>::from(values)); - test_concrete!(|values: [i32; 4]| GrowableAllocRingBuffer::<_>::from(values)); - test_concrete!(|values: [i32; 4]| AllocRingBuffer::<_>::from(values)); - } - - #[test] - fn test_copy_from_slice_capacity_smaller_than_size() { - macro_rules! test_concrete { - ($rb_init: expr) => { - // same-sized slice - let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - rb.copy_from_slice(0, &[8, 9, 10, 11, 12, 13, 14]); - assert_eq!(rb.to_vec(), alloc::vec![8, 9, 10, 11, 12, 13, 14]); - - // same-sized slice after a push - let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - let initial_len = rb.len(); - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - rb.copy_from_slice(0, &[8, 9, 10, 11, 12, 13, 14]); - assert_eq!(rb.to_vec(), alloc::vec![8, 9, 10, 11, 12, 13, 14]); - - // same-sized slice after a roundtrip - let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - let initial_len = rb.len(); - for _ in 0..rb.len() { - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - } - rb.copy_from_slice(0, &[8, 9, 10, 11, 12, 13, 14]); - assert_eq!(rb.to_vec(), alloc::vec![8, 9, 10, 11, 12, 13, 14]); - - // from offset - let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - rb.copy_from_slice(2, &[8, 9, 10, 11, 12]); - assert_eq!(rb.to_vec(), alloc::vec![1, 2, 8, 9, 10, 11, 12]); - - // from offset after a push - let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - let initial_len = rb.len(); - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - rb.copy_from_slice(2, &[8, 9, 10, 11, 12]); - assert_eq!(rb.to_vec(), alloc::vec![2, 3, 8, 9, 10, 11, 12]); - - // from offset after a roundtrip - let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - let initial_len = rb.len(); - for _ in 0..rb.len() { - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - } - rb.copy_from_slice(2, &[8, 9, 10, 11, 12]); - assert_eq!(rb.to_vec(), alloc::vec![0, 0, 8, 9, 10, 11, 12]); - }; - } - - test_concrete!(|values: [i32; 7]| ConstGenericRingBuffer::<_, 7>::from(values)); - test_concrete!(|values: [i32; 7]| GrowableAllocRingBuffer::<_>::from(values)); - test_concrete!(|values: [i32; 7]| AllocRingBuffer::<_>::from(values)); - } - - #[test] - fn test_copy_from_slice_non_full_rb() { - macro_rules! test_concrete { - ($rb_init: expr) => { - let mut rb = $rb_init(&[3, 2, 1]); - assert_eq!(rb.capacity(), 7); - // we have some space left - assert!(rb.len() < rb.capacity()); - - // copy preserves length - rb.copy_from_slice(0, &[1, 2, 3]); - assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3]); - - let _ = rb.enqueue(4); - let _ = rb.enqueue(5); - let _ = rb.enqueue(6); - assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4, 5, 6]); - - // still preserving length - rb.copy_from_slice(0, &[6, 5, 4, 3, 2, 1]); - assert_eq!(rb.to_vec(), alloc::vec![6, 5, 4, 3, 2, 1]); - - // making sure the read/write ptrs have traversed the ring - for i in 0..6 { - let _ = rb.enqueue(i + 1); - let _ = rb.dequeue(); - } - - // sanity check - assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4, 5, 6]); - // copy from offset - rb.copy_from_slice(3, &[3, 2, 1]); - assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 3, 2, 1]); - // copy again - rb.copy_from_slice(0, &[6, 5, 4, 1, 2, 3]); - assert_eq!(rb.to_vec(), alloc::vec![6, 5, 4, 1, 2, 3]); - }; - } - - test_concrete!(|values: &[i32]| { - let mut rb = ConstGenericRingBuffer::<_, 7>::new(); - rb.extend(values.iter().copied()); - rb - }); - test_concrete!(|values: &[i32]| { - let mut rb = GrowableAllocRingBuffer::<_>::with_capacity(7); - rb.extend(values.iter().copied()); - rb - }); - test_concrete!(|values: &[i32]| { - let mut rb = AllocRingBuffer::<_>::new(7); - rb.extend(values.iter().copied()); - rb - }); - } - - #[test] - fn test_copy_from_slice_empty() { - macro_rules! test_concrete { - ($rb_init: expr) => { - let mut rb = $rb_init(); - rb.copy_from_slice(0, &[0; 0]); - assert_eq!(rb.to_vec(), alloc::vec![]); - }; - } - - test_concrete!(ConstGenericRingBuffer::::new); - test_concrete!(|| GrowableAllocRingBuffer::::with_capacity(1)); - test_concrete!(|| AllocRingBuffer::::new(1)); - } - - #[test] - fn test_copy_to_slice_power_of_two() { - macro_rules! test_concrete { - ($rb_init: expr) => { - // same-sized slice - let rb = $rb_init([1, 2, 3, 4]); - let mut slice = [0; 4]; - rb.copy_to_slice(0, &mut slice); - assert_eq!(slice.as_slice(), &[1, 2, 3, 4]); - - // same-sized slice after a push - let mut rb = $rb_init([1, 2, 3, 4]); - let initial_len = rb.len(); - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - let mut slice = [0; 4]; - rb.copy_to_slice(0, &mut slice); - assert_eq!(slice.as_slice(), &[2, 3, 4, 0]); - - // same-sized slice after a roundtrip - let mut rb = $rb_init([4, 3, 2, 1]); - let initial_len = rb.len(); - for i in 0..rb.len() { - let _ = rb.enqueue((i + 1).try_into().unwrap()); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - } - let mut slice = [0; 4]; - rb.copy_to_slice(0, &mut slice); - assert_eq!(slice.as_slice(), &[1, 2, 3, 4]); - - // from offset - let rb = $rb_init([1, 2, 3, 4]); - let mut slice = [0; 2]; - rb.copy_to_slice(2, &mut slice); - assert_eq!(slice.as_slice(), &[3, 4]); - - // from offset after a push - let mut rb = $rb_init([1, 2, 3, 4]); - let initial_len = rb.len(); - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - let mut slice = [0; 2]; - rb.copy_to_slice(2, &mut slice); - assert_eq!(slice.as_slice(), &[4, 0]); - - // from offset after a roundtrip - let mut rb = $rb_init([4, 3, 2, 1]); - let initial_len = rb.len(); - for i in 0..rb.len() { - let _ = rb.enqueue((i + 1).try_into().unwrap()); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - } - let mut slice = [0; 2]; - rb.copy_to_slice(2, &mut slice); - assert_eq!(slice.as_slice(), &[3, 4]); - }; - } - - test_concrete!(|values: [i32; 4]| ConstGenericRingBuffer::<_, 4>::from(values)); - test_concrete!(|values: [i32; 4]| GrowableAllocRingBuffer::<_>::from(values)); - test_concrete!(|values: [i32; 4]| AllocRingBuffer::<_>::from(values)); - } - - #[test] - fn test_copy_to_slice_capacity_smaller_than_size() { - macro_rules! test_concrete { - ($rb_init: expr) => { - // same-sized slice - let rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - let mut slice = [0; 7]; - rb.copy_to_slice(0, &mut slice); - assert_eq!(slice.as_slice(), &[1, 2, 3, 4, 5, 6, 7]); - - // same-sized slice after a push - let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - let initial_len = rb.len(); - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - let mut slice = [0; 7]; - rb.copy_to_slice(0, &mut slice); - assert_eq!(slice.as_slice(), &[2, 3, 4, 5, 6, 7, 0]); - - // same-sized slice after a roundtrip - let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - let initial_len = rb.len(); - for i in 0..rb.len() { - let _ = rb.enqueue((i + 1).try_into().unwrap()); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - } - let mut slice = [0; 7]; - rb.copy_to_slice(0, &mut slice); - assert_eq!(slice.as_slice(), &[1, 2, 3, 4, 5, 6, 7]); - - // from offset - let rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - let mut slice = [0; 5]; - rb.copy_to_slice(2, &mut slice); - assert_eq!(slice.as_slice(), &[3, 4, 5, 6, 7]); - - // from offset after a push - let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - let initial_len = rb.len(); - let _ = rb.enqueue(0); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - let mut slice = [0; 5]; - rb.copy_to_slice(2, &mut slice); - assert_eq!(slice.as_slice(), &[4, 5, 6, 7, 0]); - - // from offset after a roundtrip - let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); - let initial_len = rb.len(); - for i in 0..rb.len() { - let _ = rb.enqueue((i + 1).try_into().unwrap()); - if rb.len() > initial_len { - let _ = rb.dequeue(); - } - } - let mut slice = [0; 5]; - rb.copy_to_slice(2, &mut slice); - assert_eq!(slice.as_slice(), &[3, 4, 5, 6, 7]); - }; - } - - test_concrete!(|values: [i32; 7]| ConstGenericRingBuffer::<_, 7>::from(values)); - test_concrete!(|values: [i32; 7]| GrowableAllocRingBuffer::<_>::from(values)); - test_concrete!(|values: [i32; 7]| AllocRingBuffer::<_>::from(values)); - } - - #[test] - fn test_copy_to_slice_non_full_rb() { - macro_rules! test_concrete { - ($rb_init: expr) => { - let mut rb = $rb_init(&[1, 2, 3]); - assert_eq!(rb.capacity(), 7); - // we have some space left - assert!(rb.len() < rb.capacity()); - - // copy based on length - let mut slice = [0; 3]; - rb.copy_to_slice(0, &mut slice); - assert_eq!(slice.as_slice(), &[1, 2, 3]); - - let _ = rb.enqueue(4); - let _ = rb.enqueue(5); - let _ = rb.enqueue(6); - // still based on length - let mut slice = [0; 6]; - rb.copy_to_slice(0, &mut slice); - assert_eq!(slice.as_slice(), &[1, 2, 3, 4, 5, 6]); - - // making sure the read/write ptrs have traversed the ring - for i in 0..6 { - let _ = rb.enqueue(i + 1); - let _ = rb.dequeue(); - } - - // sanity check - assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4, 5, 6]); - // copy again - let mut slice = [0; 6]; - rb.copy_to_slice(0, &mut slice); - assert_eq!(slice.as_slice(), &[1, 2, 3, 4, 5, 6]); - }; - } - - test_concrete!(|values: &[i32]| { - let mut rb = ConstGenericRingBuffer::<_, 7>::new(); - rb.extend(values.iter().copied()); - rb - }); - test_concrete!(|values: &[i32]| { - let mut rb = GrowableAllocRingBuffer::<_>::with_capacity(7); - rb.extend(values.iter().copied()); - rb - }); - test_concrete!(|values: &[i32]| { - let mut rb = AllocRingBuffer::<_>::new(7); - rb.extend(values.iter().copied()); - rb - }); - } - - #[test] - fn test_copy_to_slice_empty() { - macro_rules! test_concrete { - ($rb_init: expr) => { - let rb = $rb_init(); - let mut slice = []; - rb.copy_to_slice(0, &mut slice); - assert_eq!(slice.as_slice(), &[0; 0]); - }; - } - - test_concrete!(ConstGenericRingBuffer::::new); - test_concrete!(|| GrowableAllocRingBuffer::::with_capacity(1)); - test_concrete!(|| AllocRingBuffer::::new(1)); - } - - #[test] - fn test_set_len_primitive() { - use crate::SetLen; - - let values = [1, 2, 3, 4, 5, 6, 7, 8]; - - macro_rules! test_concrete { - ($rb_init: expr) => { - let mut rb = $rb_init(); - let initial_capacity = rb.capacity(); - unsafe { rb.set_len(4) }; - assert_eq!(rb.capacity(), initial_capacity); - assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4]); - unsafe { rb.set_len(8) }; - assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4, 5, 6, 7, 8]); - }; - } - - test_concrete!(|| ConstGenericRingBuffer::::from(values)); - test_concrete!(|| AllocRingBuffer::::from(values)); - } - - #[test] - fn test_set_len_leak() { - use crate::SetLen; - - #[derive(Default, Clone)] - struct Droppable { - dropped: bool, - } - impl Drop for Droppable { - fn drop(&mut self) { - self.dropped = true; - } - } - - let values = (0..8).map(|_| Droppable::default()).collect::>(); - - macro_rules! test_concrete { - ($rb_init: expr) => { - let mut rb = $rb_init(); - let initial_capacity = rb.capacity(); - unsafe { rb.set_len(4) }; - assert_eq!(rb.capacity(), initial_capacity); - assert!(rb.to_vec().iter().all(|item| !item.dropped)); - unsafe { rb.set_len(8) }; - assert!(rb.to_vec().iter().all(|item| !item.dropped)); - rb.clear(); - assert!(rb.to_vec().iter().all(|item| item.dropped)); - }; - } - - test_concrete!(|| ConstGenericRingBuffer::::from(values.clone())); - test_concrete!(|| AllocRingBuffer::::from(values)); - } - - #[test] - fn test_set_len_uninit_primitive() { - use crate::SetLen; - - macro_rules! test_concrete { - ($rb_init: expr) => { - let mut rb = $rb_init(); - assert_eq!(rb.len(), 0); - unsafe { rb.set_len(4) }; - assert_eq!(rb.len(), 4); - assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4]); - }; - } - - test_concrete!(|| { - let mut rb = ConstGenericRingBuffer::::new(); - let _ = rb.buf[0].write(1); - let _ = rb.buf[1].write(2); - let _ = rb.buf[2].write(3); - let _ = rb.buf[3].write(4); - rb - }); - test_concrete!(|| { - let rb = AllocRingBuffer::::with_capacity_power_of_2(3); - unsafe { - *rb.buf = 1; - *rb.buf.add(1) = 2; - *rb.buf.add(2) = 3; - *rb.buf.add(3) = 4; - } - rb - }); - } - - #[test] - fn test_set_len_uninit_droppable() { - use crate::SetLen; - - #[derive(Default, Clone)] - struct Droppable { - dropped: bool, - } - impl Drop for Droppable { - fn drop(&mut self) { - self.dropped = true; - } - } - - macro_rules! test_concrete { - ($rb_init: expr) => { - let mut rb = $rb_init(); - assert_eq!(rb.len(), 0); - assert!(rb.to_vec().iter().all(|item| !item.dropped)); - unsafe { rb.set_len(4) }; - assert_eq!(rb.len(), 4); - assert!(rb.to_vec().iter().all(|item| !item.dropped)); - rb.clear(); - assert!(rb.to_vec().iter().all(|item| item.dropped)); - }; - } - - test_concrete!(|| { - let mut rb = ConstGenericRingBuffer::::new(); - let _ = rb.buf[0].write(Droppable::default()); - let _ = rb.buf[1].write(Droppable::default()); - let _ = rb.buf[2].write(Droppable::default()); - let _ = rb.buf[3].write(Droppable::default()); - rb - }); - test_concrete!(|| { - let rb = AllocRingBuffer::::with_capacity_power_of_2(3); - unsafe { - *rb.buf = Droppable::default(); - *rb.buf.add(1) = Droppable::default(); - *rb.buf.add(2) = Droppable::default(); - *rb.buf.add(3) = Droppable::default(); - } - rb - }); - } } diff --git a/src/ringbuffer_trait.rs b/src/ringbuffer_trait.rs index 046b845..854be52 100644 --- a/src/ringbuffer_trait.rs +++ b/src/ringbuffer_trait.rs @@ -2,6 +2,7 @@ use core::ops::{Index, IndexMut}; #[cfg(feature = "alloc")] extern crate alloc; + #[cfg(feature = "alloc")] use alloc::vec::Vec; @@ -75,19 +76,48 @@ pub unsafe trait RingBuffer: #[doc(hidden)] unsafe fn ptr_buffer_size(rb: *const Self) -> usize; - /// Alias for [`enqueue`] - #[deprecated = "use enqueue instead"] + /// Pushes a value onto the buffer. Cycles around if capacity is reached. + fn push(&mut self, value: T); + + /// alias for [`push`](RingBuffer::push), forming a more natural counterpart to [`dequeue`](RingBuffer::dequeue) + fn enqueue(&mut self, value: T) { + self.push(value); + } + + /// alias for [`extend`](RingBuffer::extend). #[inline] - fn push(&mut self, value: T) { - let _ = self.enqueue(value); + fn enqueue_many>(&mut self, items: I) { + self.extend(items); } - /// Adds a value onto the buffer. + /// Clones and appends all elements in a slice to the `Vec`. + /// + /// Iterates over the slice `other`, clones each element, and then appends + /// it to this `RingBuffer`. The `other` slice is traversed in-order. + /// + /// Depending on the `RingBuffer` implementation, may be faster than inserting items in a loop. + /// `ConstGenericRingBuffer` is especially optimised in this regard. + /// See also: [`ConstGenericRingBuffer::custom_extend_batched`](crate::with_const_generics::ConstGenericRingBuffer::custom_extend_batched) + /// + /// # Examples + /// + /// ``` + /// use ringbuffer::{ConstGenericRingBuffer, RingBuffer}; + /// + /// let mut rb = ConstGenericRingBuffer::<_, 6>::new(); + /// rb.push(1); + /// + /// rb.extend_from_slice(&[2, 3, 4]); + /// assert_eq!(rb.to_vec(), vec![1, 2, 3, 4]); + /// ``` /// - /// Cycles around if capacity is reached. - /// Forms a more natural counterpart to [`dequeue`](RingBuffer::dequeue). - /// An alias is provided with [`push`](RingBuffer::push). - fn enqueue(&mut self, value: T) -> Option; + /// [`extend`]: RingBuffer::extend + fn extend_from_slice(&mut self, other: &[T]) + where + T: Clone, + { + self.extend(other.iter().cloned()); + } /// dequeues the top item off the ringbuffer, and moves this item out. fn dequeue(&mut self) -> Option; @@ -95,7 +125,6 @@ pub unsafe trait RingBuffer: /// dequeues the top item off the queue, but does not return it. Instead it is dropped. /// If the ringbuffer is empty, this function is a nop. #[inline] - #[deprecated = "use dequeue instead"] fn skip(&mut self) { let _ = self.dequeue(); } @@ -122,10 +151,28 @@ pub unsafe trait RingBuffer: /// assert_eq!(rb.len(), 0); /// /// ``` - fn drain(&mut self) -> RingBufferDrainingIterator<'_, T, Self> { + fn drain(&mut self) -> RingBufferDrainingIterator { RingBufferDrainingIterator::new(self) } + /// Moves all the elements of `other` into `self`, leaving `other` empty. + /// + /// # Examples + /// + /// ``` + /// use ringbuffer::{ConstGenericRingBuffer, RingBuffer}; + /// + /// let mut vec = ConstGenericRingBuffer::<_, 6>::from(vec![1, 2, 3]); + /// let mut vec2 = ConstGenericRingBuffer::<_, 6>::from(vec![4, 5, 6]); + /// + /// vec.append(&mut vec2); + /// assert_eq!(vec.to_vec(), &[1, 2, 3, 4, 5, 6]); + /// assert_eq!(vec2.to_vec(), &[]); + /// ``` + fn append(&mut self, other: &mut Self) { + self.extend(other.drain()); + } + /// Sets every element in the ringbuffer to the value returned by f. fn fill_with T>(&mut self, f: F); @@ -226,14 +273,14 @@ pub unsafe trait RingBuffer: /// Creates a mutable iterator over the buffer starting from the item pushed the longest ago, /// and ending at the element most recently pushed. #[inline] - fn iter_mut(&mut self) -> RingBufferMutIterator<'_, T, Self> { + fn iter_mut(&mut self) -> RingBufferMutIterator { RingBufferMutIterator::new(self) } /// Creates an iterator over the buffer starting from the item pushed the longest ago, /// and ending at the element most recently pushed. #[inline] - fn iter(&self) -> RingBufferIterator<'_, T, Self> { + fn iter(&self) -> RingBufferIterator { RingBufferIterator::new(self) } @@ -253,49 +300,6 @@ pub unsafe trait RingBuffer: { self.iter().any(|i| i == elem) } - - /// Efficiently copy items from the ringbuffer to a target slice. - /// - /// # Panics - /// Panics if the buffer length minus the offset is NOT equal to `target.len()`. - /// - /// # Safety - /// ONLY SAFE WHEN self is a *const to to an implementor of `RingBuffer` - unsafe fn ptr_copy_to_slice(rb: *const Self, offset: usize, dst: &mut [T]) - where - T: Copy; - - /// Efficiently copy items from the ringbuffer to a target slice. - /// - /// # Panics - /// Panics if the buffer length minus the offset is NOT equal to `target.len()`. - fn copy_to_slice(&self, offset: usize, dst: &mut [T]) - where - T: Copy, - { - unsafe { Self::ptr_copy_to_slice(self, offset, dst) } - } - - /// Efficiently copy items from a slice to the ringbuffer. - /// # Panics - /// Panics if the buffer length minus the offset is NOT equal to `source.len()`. - /// - /// # Safety - /// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer` - unsafe fn ptr_copy_from_slice(rb: *mut Self, offset: usize, src: &[T]) - where - T: Copy; - - /// Efficiently copy items from a slice to the ringbuffer. - /// - /// # Panics - /// Panics if the buffer length minus the offset is NOT equal to `source.len()`. - fn copy_from_slice(&mut self, offset: usize, src: &[T]) - where - T: Copy, - { - unsafe { Self::ptr_copy_from_slice(self, offset, src) } - } } mod iter { @@ -342,11 +346,6 @@ mod iter { fn size_hint(&self) -> (usize, Option) { (self.len, Some(self.len)) } - - fn nth(&mut self, n: usize) -> Option { - self.index = (self.index + n).min(self.len); - self.next() - } } impl<'rb, T: 'rb, RB: RingBuffer> FusedIterator for RingBufferIterator<'rb, T, RB> {} @@ -364,11 +363,6 @@ mod iter { None } } - - fn nth_back(&mut self, n: usize) -> Option { - self.len = self.len - n.min(self.len); - self.next_back() - } } /// `RingBufferMutIterator` holds a reference to a `RingBuffer` and iterates over it. `index` is the @@ -411,11 +405,6 @@ mod iter { None } } - - fn nth_back(&mut self, n: usize) -> Option { - self.len = self.len - n.min(self.len); - self.next_back() - } } impl<'rb, T, RB: RingBuffer + 'rb> Iterator for RingBufferMutIterator<'rb, T, RB> { @@ -435,11 +424,6 @@ mod iter { fn size_hint(&self) -> (usize, Option) { (self.len, Some(self.len)) } - - fn nth(&mut self, n: usize) -> Option { - self.index = (self.index + n).min(self.len); - self.next() - } } /// `RingBufferMutIterator` holds a reference to a `RingBuffer` and iterates over it. @@ -458,7 +442,7 @@ mod iter { } } - impl> Iterator for RingBufferDrainingIterator<'_, T, RB> { + impl<'rb, T, RB: RingBuffer> Iterator for RingBufferDrainingIterator<'rb, T, RB> { type Item = T; fn next(&mut self) -> Option { @@ -518,7 +502,7 @@ macro_rules! impl_ringbuffer { /// Implement various functions on implementors of [`RingBuffer`]. /// This is to avoid duplicate code. macro_rules! impl_ringbuffer_ext { - ($get_base_ptr: ident, $get_base_mut_ptr: ident, $get_unchecked: ident, $get_unchecked_mut: ident, $readptr: ident, $writeptr: ident, $mask: expr) => { + ($get_unchecked: ident, $get_unchecked_mut: ident, $readptr: ident, $writeptr: ident, $mask: expr) => { #[inline] fn get_signed(&self, index: isize) -> Option<&T> { use core::ops::Not; @@ -600,94 +584,5 @@ macro_rules! impl_ringbuffer_ext { self.$readptr = 0; self.$writeptr = 0; } - - unsafe fn ptr_copy_to_slice(rb: *const Self, offset: usize, dst: &mut [T]) - where - T: Copy, - { - let len = Self::ptr_len(rb); - let dst_len = dst.len(); - assert!( - (offset == 0 && len == 0) || offset < len, - "offset ({offset}) is out of bounds for the current buffer length ({len})" - ); - assert!(len - offset == dst_len, "destination slice length ({dst_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})"); - - if dst_len == 0 { - return; - } - - let base: *const T = $get_base_ptr(rb); - let size = Self::ptr_buffer_size(rb); - let offset_readptr = (*rb).$readptr + offset; - - let from_idx = $mask(size, offset_readptr); - let to_idx = $mask(size, offset_readptr + dst_len); - - if from_idx < to_idx { - dst.copy_from_slice(unsafe { - // SAFETY: index has been modulo-ed to be within range - // to be within bounds - core::slice::from_raw_parts(base.add(from_idx), dst_len) - }); - } else { - dst[..size - from_idx].copy_from_slice(unsafe { - // SAFETY: index has been modulo-ed to be within range - // to be within bounds - core::slice::from_raw_parts(base.add(from_idx), size - from_idx) - }); - dst[size - from_idx..].copy_from_slice(unsafe { - // SAFETY: index has been modulo-ed to be within range - // to be within bounds - core::slice::from_raw_parts(base, to_idx) - }); - } - } - - unsafe fn ptr_copy_from_slice(rb: *mut Self, offset: usize, src: &[T]) - where - T: Copy, - { - let len = Self::ptr_len(rb); - let src_len = src.len(); - assert!( - (offset == 0 && len == 0) || offset < len, - "offset ({offset}) is out of bounds for the current buffer length ({len})" - ); - assert!(len - offset == src_len, "source slice length ({src_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})"); - - if src_len == 0 { - return; - } - - let base: *mut T = $get_base_mut_ptr(rb); - let size = Self::ptr_buffer_size(rb); - let offset_readptr = (*rb).$readptr + offset; - - let from_idx = $mask(size, offset_readptr); - let to_idx = $mask(size, offset_readptr + src_len); - - if from_idx < to_idx { - unsafe { - // SAFETY: index has been modulo-ed to be within range - // to be within bounds - core::slice::from_raw_parts_mut(base.add(from_idx), src_len) - } - .copy_from_slice(src); - } else { - unsafe { - // SAFETY: index has been modulo-ed to be within range - // to be within bounds - core::slice::from_raw_parts_mut(base.add(from_idx), size - from_idx) - } - .copy_from_slice(&src[..size - from_idx]); - unsafe { - // SAFETY: index has been modulo-ed to be within range - // to be within bounds - core::slice::from_raw_parts_mut(base, to_idx) - } - .copy_from_slice(&src[size - from_idx..]); - } - } }; } diff --git a/src/set_len_trait.rs b/src/set_len_trait.rs deleted file mode 100644 index 91c00fc..0000000 --- a/src/set_len_trait.rs +++ /dev/null @@ -1,29 +0,0 @@ -/// `SetLen` is a trait defining the unsafe `set_len` method -/// on ringbuffers that support the operation. -pub trait SetLen { - /// Force the length of the ringbuffer to `new_len`. - /// - /// Note that downsizing will not call Drop on elements at `new_len..old_len`, - /// potentially causing a memory leak. - /// - /// # Panics - /// Panics if `new_len` is greater than the ringbuffer capacity. - /// - /// # Safety - /// - Safe when `new_len <= old_len`. - /// - Safe when `new_len > old_len` and all the elements at `old_len..new_len` are already initialized. - unsafe fn set_len(&mut self, new_len: usize); -} - -/// Implement `set_len` given a `readptr` and a `writeptr`. -#[macro_export] -macro_rules! impl_ring_buffer_set_len { - ($readptr: ident, $writeptr: ident) => { - #[inline] - unsafe fn set_len(&mut self, new_len: usize) { - let cap = self.capacity(); - assert!(new_len <= cap, "Cannot set the a length of {new_len} on a ringbuffer with capacity for {cap} items"); - self.$writeptr = self.$readptr + new_len; - } - }; -} diff --git a/src/with_alloc/alloc_ringbuffer.rs b/src/with_alloc/alloc_ringbuffer.rs index c64ecbd..730b67d 100644 --- a/src/with_alloc/alloc_ringbuffer.rs +++ b/src/with_alloc/alloc_ringbuffer.rs @@ -7,7 +7,7 @@ use crate::ringbuffer_trait::{ extern crate alloc; // We need boxes, so depend on alloc -use crate::{impl_ring_buffer_set_len, mask_and, GrowableAllocRingBuffer, SetLen}; +use crate::{mask_and, GrowableAllocRingBuffer}; use core::ptr; /// The `AllocRingBuffer` is a `RingBuffer` which is based on a Vec. This means it allocates at runtime @@ -21,24 +21,24 @@ use core::ptr; /// let mut buffer = AllocRingBuffer::new(2); /// /// // First entry of the buffer is now 5. -/// buffer.enqueue(5); +/// buffer.push(5); /// -/// // The last item we enqueued is 5 +/// // The last item we pushed is 5 /// assert_eq!(buffer.back(), Some(&5)); /// /// // Second entry is now 42. -/// buffer.enqueue(42); +/// buffer.push(42); /// /// assert_eq!(buffer.peek(), Some(&5)); /// assert!(buffer.is_full()); /// -/// // Because capacity is reached the next enqueue will be the first item of the buffer. -/// buffer.enqueue(1); +/// // Because capacity is reached the next push will be the first item of the buffer. +/// buffer.push(1); /// assert_eq!(buffer.to_vec(), vec![42, 1]); /// ``` #[derive(Debug)] pub struct AllocRingBuffer { - pub(crate) buf: *mut T, + buf: *mut T, // the size of the allocation. Next power of 2 up from the capacity size: usize, @@ -153,7 +153,7 @@ impl Drop for AllocRingBuffer { let layout = alloc::alloc::Layout::array::(self.size).unwrap(); unsafe { - alloc::alloc::dealloc(self.buf.cast(), layout); + alloc::alloc::dealloc(self.buf as *mut u8, layout); } } } @@ -163,7 +163,7 @@ impl Clone for AllocRingBuffer { debug_assert_ne!(self.capacity, 0); let mut new = Self::new(self.capacity); - new.extend(self.iter().cloned()); + self.iter().cloned().for_each(|i| new.push(i)); new } } @@ -187,8 +187,6 @@ impl IntoIterator for AllocRingBuffer { } } -#[allow(clippy::into_iter_without_iter)] -// iter() is implemented on the trait impl<'a, T> IntoIterator for &'a AllocRingBuffer { type Item = &'a T; type IntoIter = RingBufferIterator<'a, T, AllocRingBuffer>; @@ -198,8 +196,6 @@ impl<'a, T> IntoIterator for &'a AllocRingBuffer { } } -#[allow(clippy::into_iter_without_iter)] -// iter_mut() is implemented on the trait impl<'a, T> IntoIterator for &'a mut AllocRingBuffer { type Item = &'a mut T; type IntoIter = RingBufferMutIterator<'a, T, AllocRingBuffer>; @@ -214,7 +210,7 @@ impl Extend for AllocRingBuffer { let iter = iter.into_iter(); for i in iter { - let _ = self.enqueue(i); + self.push(i); } } } @@ -233,15 +229,20 @@ unsafe impl RingBuffer for AllocRingBuffer { impl_ringbuffer!(readptr, writeptr); #[inline] - fn enqueue(&mut self, value: T) -> Option { - let mut ret = None; - + fn push(&mut self, value: T) { if self.is_full() { // mask with and is allowed here because size is always a power of two let previous_value = unsafe { ptr::read(get_unchecked_mut(self, mask_and(self.size, self.readptr))) }; - ret = Some(previous_value); + // make sure we drop whatever is being overwritten + // SAFETY: the buffer is full, so this must be initialized + // : also, index has been masked + // make sure we drop because it won't happen automatically + unsafe { + drop(previous_value); + } + self.readptr += 1; } @@ -253,8 +254,6 @@ unsafe impl RingBuffer for AllocRingBuffer { } self.writeptr += 1; - - ret } fn dequeue(&mut self) -> Option { @@ -274,8 +273,6 @@ unsafe impl RingBuffer for AllocRingBuffer { } impl_ringbuffer_ext!( - get_base_ptr, - get_base_mut_ptr, get_unchecked, get_unchecked_mut, readptr, @@ -323,7 +320,7 @@ impl AllocRingBuffer { assert_ne!(capacity, 0, "Capacity must be greater than 0"); let size = capacity.next_power_of_two(); let layout = alloc::alloc::Layout::array::(size).unwrap(); - let buf = unsafe { alloc::alloc::alloc(layout).cast() }; + let buf = unsafe { alloc::alloc::alloc(layout) as *mut T }; Self { buf, size, @@ -334,16 +331,6 @@ impl AllocRingBuffer { } } -/// Get a const pointer to the buffer -unsafe fn get_base_ptr(rb: *const AllocRingBuffer) -> *const T { - (*rb).buf.cast() -} - -/// Get a mut pointer to the buffer -unsafe fn get_base_mut_ptr(rb: *mut AllocRingBuffer) -> *mut T { - (*rb).buf -} - /// Get a reference from the buffer without checking it is initialized. /// /// Caller must be sure the index is in bounds, or this will panic. @@ -381,10 +368,6 @@ impl IndexMut for AllocRingBuffer { } } -impl SetLen for AllocRingBuffer { - impl_ring_buffer_set_len!(readptr, writeptr); -} - #[cfg(test)] mod tests { use crate::{AllocRingBuffer, RingBuffer}; @@ -409,15 +392,15 @@ mod tests { // messes up for _ in 0..100 { for i in 0..NUM_VALS { - let _ = rb.enqueue(i); + rb.enqueue(i); } assert!(rb.is_full()); for i in 0..10 { - assert_eq!(Some(i + NUM_VALS - rb.capacity()), rb.dequeue()); + assert_eq!(Some(i + NUM_VALS - rb.capacity()), rb.dequeue()) } - assert!(rb.is_empty()); + assert!(rb.is_empty()) } } @@ -437,9 +420,7 @@ mod tests { #[test] fn test_extend() { let mut buf = AllocRingBuffer::::new(4); - (0..4).for_each(|_| { - let _ = buf.enqueue(0); - }); + (0..4).for_each(|_| buf.push(0)); let new_data = [0, 1, 2]; buf.extend(new_data); @@ -456,9 +437,7 @@ mod tests { #[test] fn test_extend_with_overflow() { let mut buf = AllocRingBuffer::::new(8); - (0..8).for_each(|_| { - let _ = buf.enqueue(0); - }); + (0..8).for_each(|_| buf.push(0)); let new_data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; buf.extend(new_data); diff --git a/src/with_alloc/vecdeque.rs b/src/with_alloc/vecdeque.rs index c195150..23adeb1 100644 --- a/src/with_alloc/vecdeque.rs +++ b/src/with_alloc/vecdeque.rs @@ -186,9 +186,8 @@ unsafe impl RingBuffer for GrowableAllocRingBuffer { self.pop_front() } - fn enqueue(&mut self, value: T) -> Option { + fn push(&mut self, value: T) { self.push_back(value); - None } fn fill_with T>(&mut self, mut f: F) { @@ -217,10 +216,9 @@ unsafe impl RingBuffer for GrowableAllocRingBuffer { if self.is_empty() { None } else if index >= 0 { - self.0 - .get(crate::mask_modulo(self.0.len(), index.unsigned_abs())) + self.0.get(crate::mask_modulo(self.0.len(), index as usize)) } else { - let positive_index = index.unsigned_abs() - 1; + let positive_index = -index as usize - 1; let masked = crate::mask_modulo(self.0.len(), positive_index); let index = self.0.len() - 1 - masked; @@ -233,11 +231,11 @@ unsafe impl RingBuffer for GrowableAllocRingBuffer { if RingBuffer::ptr_len(rb) == 0 { None } else if index >= 0 { - (*rb).0.get_mut(index.unsigned_abs()) + (*rb).0.get_mut(index as usize) } else { let len = Self::ptr_len(rb); - let positive_index = index.unsigned_abs() + 1; + let positive_index = -index as usize + 1; let masked = crate::mask_modulo(len, positive_index); let index = len - 1 - masked; @@ -255,68 +253,6 @@ unsafe impl RingBuffer for GrowableAllocRingBuffer { } .map(|i| i as *mut T) } - - unsafe fn ptr_copy_to_slice(rb: *const Self, offset: usize, dst: &mut [T]) - where - T: Copy, - { - let len = Self::ptr_len(rb); - let dst_len = dst.len(); - assert!( - (offset == 0 && len == 0) || offset < len, - "offset ({offset}) is out of bounds for the current buffer length ({len})" - ); - assert!(len - offset == dst_len, "destination slice length ({dst_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})"); - - if dst_len == 0 { - return; - } - - let (front, back) = (*rb).0.as_slices(); - let first_len = front.len(); - - if offset < first_len { - let n_in_first = first_len - offset; - dst[..n_in_first].copy_from_slice(&front[offset..]); - - if n_in_first < dst_len { - dst[n_in_first..].copy_from_slice(&back[..dst_len - n_in_first]); - } - } else { - dst.copy_from_slice(&back[offset - first_len..]); - } - } - - unsafe fn ptr_copy_from_slice(rb: *mut Self, offset: usize, src: &[T]) - where - T: Copy, - { - let len = Self::ptr_len(rb); - let src_len = src.len(); - assert!( - (offset == 0 && len == 0) || offset < len, - "offset ({offset}) is out of bounds for the current buffer length ({len})" - ); - assert!(len - offset == src_len, "source slice length ({src_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})"); - - if src_len == 0 { - return; - } - - let (front, back) = (*rb).0.as_mut_slices(); - let first_len = front.len(); - - if offset < first_len { - let n_in_first = first_len - offset; - front[offset..].copy_from_slice(&src[..n_in_first]); - - if n_in_first < src_len { - back[..src_len - n_in_first].copy_from_slice(&src[n_in_first..]); - } - } else { - back[offset - first_len..].copy_from_slice(src); - } - } } impl Extend for GrowableAllocRingBuffer { diff --git a/src/with_const_generics.rs b/src/with_const_generics.rs index 1ab5240..b65860a 100644 --- a/src/with_const_generics.rs +++ b/src/with_const_generics.rs @@ -1,8 +1,8 @@ use crate::ringbuffer_trait::{RingBufferIntoIterator, RingBufferIterator, RingBufferMutIterator}; -use crate::{impl_ring_buffer_set_len, RingBuffer, SetLen}; +use crate::RingBuffer; use core::iter::FromIterator; +use core::mem; use core::mem::MaybeUninit; -use core::mem::{self, ManuallyDrop}; use core::ops::{Index, IndexMut}; /// The `ConstGenericRingBuffer` struct is a `RingBuffer` implementation which does not require `alloc` but @@ -18,38 +18,35 @@ use core::ops::{Index, IndexMut}; /// let mut buffer = ConstGenericRingBuffer::<_, 2>::new(); /// /// // First entry of the buffer is now 5. -/// buffer.enqueue(5); +/// buffer.push(5); /// -/// // The last item we enqueued is 5 +/// // The last item we pushed is 5 /// assert_eq!(buffer.back(), Some(&5)); /// /// // Second entry is now 42. -/// buffer.enqueue(42); +/// buffer.push(42); /// /// assert_eq!(buffer.peek(), Some(&5)); /// assert!(buffer.is_full()); /// -/// // Because capacity is reached the next enqueue will be the first item of the buffer. -/// buffer.enqueue(1); +/// // Because capacity is reached the next push will be the first item of the buffer. +/// buffer.push(1); /// assert_eq!(buffer.to_vec(), vec![42, 1]); /// ``` #[derive(Debug)] pub struct ConstGenericRingBuffer { - pub(crate) buf: [MaybeUninit; CAP], + buf: [MaybeUninit; CAP], readptr: usize, writeptr: usize, } impl From<[T; CAP]> for ConstGenericRingBuffer { fn from(value: [T; CAP]) -> Self { - let v = ManuallyDrop::new(value); Self { // Safety: // T has the same layout as MaybeUninit // [T; N] has the same layout as [MaybeUninit; N] - // Without ManuallyDrop this would be unsound as - // transmute_copy doesn't take ownership - buf: unsafe { mem::transmute_copy(&v) }, + buf: unsafe { mem::transmute_copy(&value) }, readptr: 0, writeptr: CAP, } @@ -141,7 +138,7 @@ impl Drop for ConstGenericRingBuffer { impl Clone for ConstGenericRingBuffer { fn clone(&self) -> Self { let mut new = ConstGenericRingBuffer::::new(); - new.extend(self.iter().cloned()); + self.iter().cloned().for_each(|i| new.push(i)); new } } @@ -179,26 +176,20 @@ impl ConstGenericRingBuffer { ConstGenericRingBuffer: From>, { #[allow(clippy::let_unit_value)] - let () = Self::ERROR_CAPACITY_IS_NOT_ALLOWED_TO_BE_ZERO; + let _ = Self::ERROR_CAPACITY_IS_NOT_ALLOWED_TO_BE_ZERO; + // allow here since we are constructing an array of MaybeUninit + // which explicitly *is* defined behavior + // https://rust-lang.github.io/rust-clippy/master/index.html#uninit_assumed_init + #[allow(clippy::uninit_assumed_init)] Self { - buf: [const { MaybeUninit::::uninit() }; CAP], + buf: unsafe { MaybeUninit::uninit().assume_init() }, writeptr: 0, readptr: 0, } } } -/// Get a const pointer to the buffer -unsafe fn get_base_ptr(rb: *const ConstGenericRingBuffer) -> *const T { - (*rb).buf.as_ptr().cast() -} - -/// Get a mut pointer to the buffer -unsafe fn get_base_mut_ptr(rb: *mut ConstGenericRingBuffer) -> *mut T { - (*rb).buf.as_mut_ptr().cast() -} - /// Get a reference from the buffer without checking it is initialized /// Caller MUST be sure this index is initialized, or undefined behavior will happen unsafe fn get_unchecked<'a, T, const N: usize>( @@ -232,8 +223,6 @@ impl IntoIterator for ConstGenericRingBuffer { } } -#[allow(clippy::into_iter_without_iter)] -// iter() is implemented on the trait impl<'a, T, const CAP: usize> IntoIterator for &'a ConstGenericRingBuffer { type Item = &'a T; type IntoIter = RingBufferIterator<'a, T, ConstGenericRingBuffer>; @@ -243,8 +232,6 @@ impl<'a, T, const CAP: usize> IntoIterator for &'a ConstGenericRingBuffer IntoIterator for &'a mut ConstGenericRingBuffer { type Item = &'a mut T; type IntoIter = RingBufferMutIterator<'a, T, ConstGenericRingBuffer>; @@ -254,13 +241,280 @@ impl<'a, T, const CAP: usize> IntoIterator for &'a mut ConstGenericRingBuffer Extend for ConstGenericRingBuffer { - fn extend>(&mut self, iter: A) { - let iter = iter.into_iter(); +impl ConstGenericRingBuffer { + /// splits the ringbuffer into two slices. One from the old pointer to the end of the buffer, + /// and one from the start of the buffer to the new pointer + /// + /// # Safety + /// Only safe when old != new + #[inline] + #[cfg(feature = "batched_extend")] + unsafe fn split_pointer_move( + &mut self, + old: usize, + new: usize, + ) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { + let old_mod = crate::mask_modulo(CAP, old); + let new_mod = crate::mask_modulo(CAP, new); + + if old_mod < new_mod { + // if there's no wrapping, nice! we can just return one slice + (&mut self.buf[old_mod..new_mod], &mut []) + } else { + // the first part is from old_mod to CAP + let (start, p1) = self.buf.split_at_mut(old_mod); - for i in iter { - let _ = self.enqueue(i); + // and the second part from 0 to new_mod + let (p2, _) = start.split_at_mut(new_mod); + + (p1, p2) + } + } + + /// # Safety + /// Only safe when `CAP` >= `BATCH_SIZE` + #[inline] + #[cfg(feature = "batched_extend")] + unsafe fn extend_from_arr_batch(&mut self, data: [T; BATCH_SIZE]) { + debug_assert!(CAP >= BATCH_SIZE); + + // algorithm to push 1 item: + // + // if self.is_full() { + // let previous_value = mem::replace( + // &mut self.buf[crate::mask_modulo(CAP, self.readptr)], + // MaybeUninit::uninit(), + // ); + // // make sure we drop whatever is being overwritten + // // SAFETY: the buffer is full, so this must be initialized + // // : also, index has been masked + // // make sure we drop because it won't happen automatically + // unsafe { + // drop(previous_value.assume_init()); + // } + // self.readptr += 1; + // } + // let index = crate::mask_modulo(CAP, self.writeptr); + // self.buf[index] = MaybeUninit::new(value); + // self.writeptr += 1; + + let old_len = self.len(); + + let old_writeptr = self.writeptr; + let old_readptr = self.readptr; + + // so essentially, we need to update the write pointer by Self::BATCH_SIZE + self.writeptr += BATCH_SIZE; + + // but maybe we also need to update the readptr + // first we calculate if we will be full. if not, no need to update the readptr + let num_items_until_full = self.capacity() - old_len; + if num_items_until_full < BATCH_SIZE { + // the difference is how much the read ptr needs to move + self.readptr += BATCH_SIZE - num_items_until_full; + + debug_assert_ne!(old_readptr, self.readptr); + + // if readptr moves, we also need to free some items. + // Safety: same safety guarantees as this function and old != new by the assertion above + let (p1, p2) = unsafe { self.split_pointer_move(old_readptr, self.readptr) }; + // assertion: we can never be in a situation where we have to drop more than a batch size of items + debug_assert!(p1.len() + p2.len() <= BATCH_SIZE); + + for i in p1 { + i.assume_init_drop(); + } + for i in p2 { + i.assume_init_drop(); + } + } + + debug_assert_ne!(old_writeptr, self.writeptr); + // now we need to write some items between old_writeptr and self.writeptr + // Safety: same safety guarantees as this function and old != new by the assertion above + let (p1, p2) = unsafe { self.split_pointer_move(old_writeptr, self.writeptr) }; + // assertion: we can never be in a situation where we have to write more than a batch size of items + debug_assert!( + p1.len() + p2.len() <= BATCH_SIZE, + "p1: {}; p2: {}; batch: {}", + p1.len(), + p2.len(), + BATCH_SIZE + ); + + // if we are lucky, we're not on the boundary so either p1 or p2 has a length of Self::BATCH_SIZE + if p1.len() == BATCH_SIZE { + for (index, i) in data.into_iter().enumerate() { + p1[index] = MaybeUninit::new(i); + } + } else if p2.len() == BATCH_SIZE { + for (index, i) in data.into_iter().enumerate() { + p2[index] = MaybeUninit::new(i); + } + } else { + // oof, unfortunately we're on a boundary + + // iterate over the data + let mut data_iter = data.into_iter(); + + // put p1.len() in p1 + for i in p1 { + let next_item = data_iter.next(); + // Safety: p1.len() + p2.len() <= Self::BATCH_SIZE so the two loops here + // together cannot run for more than Self::BATCH_SIZE iterations + *i = MaybeUninit::new(unsafe { next_item.unwrap_unchecked() }); + } + + // put p2.len() in p2 + for i in p2 { + let next_item = data_iter.next(); + // Safety: p1.len() + p2.len() <= Self::BATCH_SIZE so the two loops here + // together cannot run for more than Self::BATCH_SIZE iterations + *i = MaybeUninit::new(unsafe { next_item.unwrap_unchecked() }); + } + } + } + + #[inline] + #[cfg(feature = "batched_extend")] + fn fill_batch( + batch: &mut [MaybeUninit; BATCH_SIZE], + iter: &mut impl Iterator, + ) -> usize { + for (index, b) in batch.iter_mut().enumerate() { + if let Some(i) = iter.next() { + *b = MaybeUninit::new(i); + } else { + return index; + } } + + BATCH_SIZE + } + + #[inline] + #[cfg(feature = "batched_extend")] + fn extend_batched_internal( + &mut self, + mut other: impl Iterator, + ) { + // SAFETY: if CAP < Self::BATCH_SIZE we can't run extend_from_arr_batch so we catch that here + if CAP < BATCH_SIZE { + for i in other { + self.push(i); + } + } else { + // Safety: assume init to MaybeUninit slice is safe + let mut batch: [MaybeUninit; BATCH_SIZE] = + unsafe { MaybeUninit::uninit().assume_init() }; + + // repeat until we find an empty batch + loop { + // fill up a batch + let how_full = Self::fill_batch(&mut batch, &mut other); + + // if the batch isn't complete, individually add the items from that batch + if how_full < BATCH_SIZE { + for b in batch.iter().take(how_full) { + // Safety: fill_batch filled up at least `how_full` items so if we iterate + // until there this is safe + self.push(unsafe { b.assume_init_read() }); + } + + // then we're done! + return; + } + + // else the batch is full, and we can transmute it to an init slice + let batch = unsafe { + mem::transmute_copy::<[MaybeUninit; BATCH_SIZE], [T; BATCH_SIZE]>(&batch) + }; + + // SAFETY: if CAP < Self::BATCH_SIZE we woudn't be here + unsafe { self.extend_from_arr_batch(batch) } + } + } + } + + /// # Safety + /// ONLY USE WHEN WORKING ON A CLEARED RINGBUFFER + #[cfg(feature = "batched_extend")] + #[inline] + unsafe fn finish_iter(&mut self, mut iter: impl Iterator) { + let mut index = 0; + for i in iter.by_ref() { + self.buf[index] = MaybeUninit::new(i); + index += 1; + + if index > CAP - 1 { + break; + } + } + + if index < CAP { + // we set writepointer to however many elements we managed to write (up to CAP-1) + // WARNING: ONLY WORKS WHEN WORKING ON A CLEARED RINGBUFFER + self.writeptr = index; + } else { + self.writeptr = CAP; + self.extend_batched_internal::(iter); + } + } + + /// Alias of [`Extend::extend`](ConstGenericRingBuffer::extend) but can take a custom batch size. + /// + /// We found that `30` works well for us, which is the batch size we use in `extend`, + /// but on different architectures this may not be true. + pub fn custom_extend_batched( + &mut self, + iter: impl IntoIterator, + ) { + #[cfg(not(feature = "batched_extend"))] + { + for i in iter { + self.push(i); + } + } + + #[cfg(feature = "batched_extend")] + { + let iter = iter.into_iter(); + + let (lower, _) = iter.size_hint(); + + if lower >= CAP { + // if there are more elements in our iterator than we have size in the ringbuffer + // drain the ringbuffer + self.clear(); + + // we need exactly CAP elements. + // so we need to drop until the number of elements in the iterator is exactly CAP + let num_we_can_drop = lower - CAP; + + let iter = iter.skip(num_we_can_drop); + + // Safety: clear above + unsafe { self.finish_iter::(iter) }; + } else if self.is_empty() { + self.clear(); + + // Safety: clear above + unsafe { self.finish_iter::(iter) }; + } else { + self.extend_batched_internal::(iter); + } + } + } +} + +impl Extend for ConstGenericRingBuffer { + /// NOTE: correctness (but not soundness) of extend depends on `size_hint` on iter being correct. + #[inline] + fn extend>(&mut self, iter: A) { + /// good number, found through benchmarking. + /// gives ~30% performance boost over not batching + const BATCH_SIZE: usize = 30; + self.custom_extend_batched::(iter); } } @@ -278,9 +532,7 @@ unsafe impl RingBuffer for ConstGenericRingBuffer Option { - let mut ret = None; - + fn push(&mut self, value: T) { if self.is_full() { let previous_value = mem::replace( &mut self.buf[crate::mask_modulo(CAP, self.readptr)], @@ -289,14 +541,15 @@ unsafe impl RingBuffer for ConstGenericRingBuffer Option { @@ -315,8 +568,6 @@ unsafe impl RingBuffer for ConstGenericRingBuffer FromIterator for ConstGenericRingBuffer fn from_iter>(iter: T) -> Self { let mut res = Self::default(); for i in iter { - let _ = res.enqueue(i); + res.push(i); } res @@ -368,16 +619,11 @@ impl IndexMut for ConstGenericRingBuffer { } } -impl SetLen for ConstGenericRingBuffer { - impl_ring_buffer_set_len!(readptr, writeptr); -} - #[cfg(test)] mod tests { - use crate::{AllocRingBuffer, ConstGenericRingBuffer, GrowableAllocRingBuffer, RingBuffer}; - use alloc::collections::{LinkedList, VecDeque}; - use alloc::string::ToString; - use alloc::vec; + use super::*; + use core::hint::black_box; + use core::ops::Range; #[test] fn test_not_power_of_two() { @@ -388,15 +634,15 @@ mod tests { // messes up for _ in 0..100 { for i in 0..NUM_VALS { - let _ = rb.enqueue(i); + rb.enqueue(i); } assert!(rb.is_full()); for i in 0..10 { - assert_eq!(Some(i + NUM_VALS - rb.capacity()), rb.dequeue()); + assert_eq!(Some(i + NUM_VALS - rb.capacity()), rb.dequeue()) } - assert!(rb.is_empty()); + assert!(rb.is_empty()) } } @@ -410,9 +656,7 @@ mod tests { #[test] fn test_extend() { let mut buf = ConstGenericRingBuffer::::new(); - (0..4).for_each(|_| { - let _ = buf.enqueue(0); - }); + (0..4).for_each(|_| buf.push(0)); let new_data = [0, 1, 2]; buf.extend(new_data); @@ -429,9 +673,7 @@ mod tests { #[test] fn test_extend_with_overflow() { let mut buf = ConstGenericRingBuffer::::new(); - (0..8).for_each(|_| { - let _ = buf.enqueue(0); - }); + (0..8).for_each(|_| buf.push(0)); let new_data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; buf.extend(new_data); @@ -445,71 +687,198 @@ mod tests { } } + struct Weirderator(::IntoIter, SizeHint); + + impl Iterator for Weirderator { + type Item = ::Item; + + fn next(&mut self) -> Option { + self.0.next() + } + + fn size_hint(&self) -> (usize, Option) { + let (lower, upper) = self.0.size_hint(); + + match self.1 { + SizeHint::TooHigh => (lower + 10, upper), + SizeHint::TooLow => (lower - 10, upper), + SizeHint::Good => (lower, upper), + } + } + } + + #[derive(Debug, Copy, Clone)] + pub enum SizeHint { + TooHigh, + TooLow, + Good, + } + + struct IntoWeirderator(pub T, SizeHint); + + impl IntoIterator for IntoWeirderator + where + ::IntoIter: Sized, + { + type Item = ::Item; + type IntoIter = Weirderator; + + fn into_iter(self) -> Self::IntoIter { + Weirderator(self.0.into_iter(), self.1) + } + } + #[test] - fn from() { - assert_eq!( - ConstGenericRingBuffer::::from([1, 2, 3]).to_vec(), - vec![1, 2, 3] - ); + // tests whether we correctly drop items when the batch crosses the boundary + fn boundary_drop_extend() { + for n in 50..300 { + let mut a = ConstGenericRingBuffer::<_, 128>::new(); - let v: &[i32; 3] = &[1, 2, 3]; - assert_eq!( - ConstGenericRingBuffer::::from(v).to_vec(), - vec![1, 2, 3] - ); + for i in 0..n { + a.push(i); + } - let v: &[i32] = &[1, 2, 3]; - assert_eq!( - ConstGenericRingBuffer::::from(v).to_vec(), - vec![1, 2, 3] - ); + a.extend(0..n); - let v: &mut [i32; 3] = &mut [1, 2, 3]; - assert_eq!( - ConstGenericRingBuffer::::from(v).to_vec(), - vec![1, 2, 3] - ); + for _ in 0..128 { + let _ = black_box(a.dequeue()); + } + } + } - let v: &mut [i32] = &mut [1, 2, 3]; - assert_eq!( - ConstGenericRingBuffer::::from(v).to_vec(), - vec![1, 2, 3] - ); + #[test] + fn test_verify_extend() { + extern crate std; + + macro_rules! for_cap { + ($cap: expr) => {{ + const CAP: usize = $cap; + + for start in 0..5 { + for size in [SizeHint::TooLow, SizeHint::Good, SizeHint::TooHigh] { + std::println!("{start} {size:?}"); + + let mut rb = ConstGenericRingBuffer::::new(); + for i in 0..start { + rb.push(i); + } + + rb.extend(Weirderator::>(0..CAP, size)); + rb.push(17); + rb.push(18); + rb.push(19); + + for _ in 0..CAP { + let _ = rb.dequeue(); + } + + let mut rb = ConstGenericRingBuffer::::new(); + for i in 0..start { + rb.push(i); + } + + rb.extend(Weirderator::>(0..(CAP + 1), size)); + rb.push(18); + rb.push(19); + + for _ in 0..CAP { + let _ = rb.dequeue(); + } + + let mut rb = ConstGenericRingBuffer::::new(); + for i in 0..start { + rb.push(i); + } + + rb.extend(Weirderator::>(0..(CAP + 2), size)); + rb.push(19); + + for _ in 0..CAP { + let _ = rb.dequeue(); + } + } + } + };}; + } - assert_eq!( - ConstGenericRingBuffer::::from(vec![1, 2, 3]).to_vec(), - vec![1, 2, 3] - ); - assert_eq!( - ConstGenericRingBuffer::::from( - vec![1, 2, 3].into_iter().collect::>() - ) - .to_vec(), - vec![1, 2, 3] - ); - assert_eq!( - ConstGenericRingBuffer::::from( - vec![1, 2, 3].into_iter().collect::>() - ) - .to_vec(), - vec![1, 2, 3] - ); - assert_eq!( - ConstGenericRingBuffer::<_, 3>::from("abc".to_string()).to_vec(), - vec!['a', 'b', 'c'] - ); - assert_eq!( - ConstGenericRingBuffer::<_, 3>::from("abc").to_vec(), - vec!['a', 'b', 'c'] - ); - assert_eq!( - ConstGenericRingBuffer::<_, 3>::from(GrowableAllocRingBuffer::from(vec![1, 2, 3])) + for_cap!(17); + for_cap!(70); + for_cap!(128); + } + + #[cfg(test)] + mod tests { + use crate::{AllocRingBuffer, ConstGenericRingBuffer, GrowableAllocRingBuffer, RingBuffer}; + use alloc::collections::{LinkedList, VecDeque}; + use alloc::string::ToString; + use alloc::vec; + + #[test] + fn from() { + assert_eq!( + ConstGenericRingBuffer::::from([1, 2, 3]).to_vec(), + vec![1, 2, 3] + ); + + let v: &[i32; 3] = &[1, 2, 3]; + assert_eq!( + ConstGenericRingBuffer::::from(v).to_vec(), + vec![1, 2, 3] + ); + + let v: &[i32] = &[1, 2, 3]; + assert_eq!( + ConstGenericRingBuffer::::from(v).to_vec(), + vec![1, 2, 3] + ); + + let v: &mut [i32; 3] = &mut [1, 2, 3]; + assert_eq!( + ConstGenericRingBuffer::::from(v).to_vec(), + vec![1, 2, 3] + ); + + let v: &mut [i32] = &mut [1, 2, 3]; + assert_eq!( + ConstGenericRingBuffer::::from(v).to_vec(), + vec![1, 2, 3] + ); + + assert_eq!( + ConstGenericRingBuffer::::from(vec![1, 2, 3]).to_vec(), + vec![1, 2, 3] + ); + assert_eq!( + ConstGenericRingBuffer::::from( + vec![1, 2, 3].into_iter().collect::>() + ) .to_vec(), - vec![1, 2, 3] - ); - assert_eq!( - ConstGenericRingBuffer::<_, 3>::from(AllocRingBuffer::from(vec![1, 2, 3])).to_vec(), - vec![1, 2, 3] - ); + vec![1, 2, 3] + ); + assert_eq!( + ConstGenericRingBuffer::::from( + vec![1, 2, 3].into_iter().collect::>() + ) + .to_vec(), + vec![1, 2, 3] + ); + assert_eq!( + ConstGenericRingBuffer::<_, 3>::from("abc".to_string()).to_vec(), + vec!['a', 'b', 'c'] + ); + assert_eq!( + ConstGenericRingBuffer::<_, 3>::from("abc").to_vec(), + vec!['a', 'b', 'c'] + ); + assert_eq!( + ConstGenericRingBuffer::<_, 3>::from(GrowableAllocRingBuffer::from(vec![1, 2, 3])) + .to_vec(), + vec![1, 2, 3] + ); + assert_eq!( + ConstGenericRingBuffer::<_, 3>::from(AllocRingBuffer::from(vec![1, 2, 3])).to_vec(), + vec![1, 2, 3] + ); + } } } diff --git a/tests/compile-fail/test_const_generic_array_zero_length_new.rs b/tests/compile-fail/test_const_generic_array_zero_length_new.rs index bdaddbf..b080de1 100644 --- a/tests/compile-fail/test_const_generic_array_zero_length_new.rs +++ b/tests/compile-fail/test_const_generic_array_zero_length_new.rs @@ -6,5 +6,5 @@ fn main() { let mut buf = ConstGenericRingBuffer::new::<0>(); //~^ note: the above error was encountered while instantiating `fn ringbuffer::ConstGenericRingBuffer::::new::<0>` // ringbuffer can't be zero length -let _ = buf.enqueue(5); + buf.push(5); } diff --git a/tests/conversions.rs b/tests/conversions.rs index 33a1750..e3c13a2 100644 --- a/tests/conversions.rs +++ b/tests/conversions.rs @@ -15,7 +15,7 @@ macro_rules! convert_test { let mut b: $to = a.into(); assert_eq!(b.to_vec(), vec!['1', '2']); - b.enqueue('3'); + b.push('3'); assert_eq!(b, b); } }; @@ -43,8 +43,8 @@ convert_tests!( alloc_from_const_slice: {let a: &[char; 2] = &['1', '2']; a}, alloc_from_arr: {let a: [char; 2] = ['1', '2']; a}, - alloc_from_cgrb: { ConstGenericRingBuffer::from(['1', '2'])}, - alloc_from_garb: { GrowableAllocRingBuffer::from(['1', '2'])}, + alloc_from_cgrb: {let a = ConstGenericRingBuffer::from(['1', '2']); a}, + alloc_from_garb: {let a = GrowableAllocRingBuffer::from(['1', '2']); a}, ] => AllocRingBuffer::<_> ); @@ -59,8 +59,8 @@ convert_tests!( growable_alloc_from_const_slice: {let a: &[char; 2] = &['1', '2']; a}, growable_alloc_from_arr: {let a: [char; 2] = ['1', '2']; a}, - growable_alloc_from_cgrb: { ConstGenericRingBuffer::from(['1', '2'])}, - growable_alloc_from_arb: { AllocRingBuffer::from(['1', '2'])}, + growable_alloc_from_cgrb: {let a = ConstGenericRingBuffer::from(['1', '2']); a}, + growable_alloc_from_arb: {let a = AllocRingBuffer::from(['1', '2']); a}, ] => GrowableAllocRingBuffer::<_> ); @@ -75,8 +75,8 @@ convert_tests!( const_from_const_slice: {let a: &[char; 2] = &['1', '2']; a}, const_from_arr: {let a: [char; 2] = ['1', '2']; a}, - const_from_garb: { GrowableAllocRingBuffer::from(['1', '2'])}, - const_from_arb: { AllocRingBuffer::from(['1', '2'])}, + const_from_garb: {let a = GrowableAllocRingBuffer::from(['1', '2']); a}, + const_from_arb: {let a = AllocRingBuffer::from(['1', '2']); a}, ] => ConstGenericRingBuffer::<_, 2> ); @@ -123,13 +123,13 @@ fn test_extra_conversions_const() { fn test_const_generic_new_parameter() { // Can we specify size only on the method? let mut a = ConstGenericRingBuffer::new::<2>(); - let _ = a.enqueue(5); + a.push(5); // Can we specify size in both positions? let mut a = ConstGenericRingBuffer::::new::<50>(); - let _ = a.enqueue(5); + a.push(5); // Can we specify size only on the struct? let mut a = ConstGenericRingBuffer::::new(); - let _ = a.enqueue(5); + a.push(5); }