diff --git a/.lock b/.lock new file mode 100644 index 000000000..e69de29bb diff --git a/bytes/all.html b/bytes/all.html new file mode 100644 index 000000000..13ff5495f --- /dev/null +++ b/bytes/all.html @@ -0,0 +1 @@ +List of all items in this crate

List of all items

Structs

Traits

\ No newline at end of file diff --git a/bytes/buf/buf_impl/trait.Buf.html b/bytes/buf/buf_impl/trait.Buf.html new file mode 100644 index 000000000..af9509861 --- /dev/null +++ b/bytes/buf/buf_impl/trait.Buf.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../../bytes/buf/trait.Buf.html...

+ + + \ No newline at end of file diff --git a/bytes/buf/buf_mut/trait.BufMut.html b/bytes/buf/buf_mut/trait.BufMut.html new file mode 100644 index 000000000..77242b643 --- /dev/null +++ b/bytes/buf/buf_mut/trait.BufMut.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../../bytes/buf/trait.BufMut.html...

+ + + \ No newline at end of file diff --git a/bytes/buf/chain/struct.Chain.html b/bytes/buf/chain/struct.Chain.html new file mode 100644 index 000000000..f8ecbf20f --- /dev/null +++ b/bytes/buf/chain/struct.Chain.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../../bytes/buf/struct.Chain.html...

+ + + \ No newline at end of file diff --git a/bytes/buf/index.html b/bytes/buf/index.html new file mode 100644 index 000000000..c422e32ac --- /dev/null +++ b/bytes/buf/index.html @@ -0,0 +1,11 @@ +bytes::buf - Rust

Module bytes::buf

source ·
Expand description

Utilities for working with buffers.

+

A buffer is any structure that contains a sequence of bytes. The bytes may +or may not be stored in contiguous memory. This module contains traits used +to abstract over buffers as well as utilities for working with buffer types.

+

§Buf, BufMut

+

These are the two foundational traits for abstractly working with buffers. +They can be thought as iterators for byte structures. They offer additional +performance over Iterator by providing an API optimized for byte slices.

+

See Buf and BufMut for more details.

+

Structs§

  • A Chain sequences two buffers.
  • Iterator over the bytes contained by the buffer.
  • A BufMut adapter which limits the amount of bytes that can be written +to an underlying buffer.
  • A Buf adapter which implements io::Read for the inner value.
  • A Buf adapter which limits the bytes read from an underlying buffer.
  • Uninitialized byte slice.
  • A BufMut adapter which implements io::Write for the inner value.

Traits§

  • Read bytes from a buffer.
  • A trait for values that provide sequential write access to bytes.
\ No newline at end of file diff --git a/bytes/buf/iter/struct.IntoIter.html b/bytes/buf/iter/struct.IntoIter.html new file mode 100644 index 000000000..459f7fe1d --- /dev/null +++ b/bytes/buf/iter/struct.IntoIter.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../../bytes/buf/struct.IntoIter.html...

+ + + \ No newline at end of file diff --git a/bytes/buf/limit/struct.Limit.html b/bytes/buf/limit/struct.Limit.html new file mode 100644 index 000000000..7e57ce537 --- /dev/null +++ b/bytes/buf/limit/struct.Limit.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../../bytes/buf/struct.Limit.html...

+ + + \ No newline at end of file diff --git a/bytes/buf/reader/struct.Reader.html b/bytes/buf/reader/struct.Reader.html new file mode 100644 index 000000000..8b81eb74c --- /dev/null +++ b/bytes/buf/reader/struct.Reader.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../../bytes/buf/struct.Reader.html...

+ + + \ No newline at end of file diff --git a/bytes/buf/sidebar-items.js b/bytes/buf/sidebar-items.js new file mode 100644 index 000000000..86ebaaedc --- /dev/null +++ b/bytes/buf/sidebar-items.js @@ -0,0 +1 @@ +window.SIDEBAR_ITEMS = {"struct":["Chain","IntoIter","Limit","Reader","Take","UninitSlice","Writer"],"trait":["Buf","BufMut"]}; \ No newline at end of file diff --git a/bytes/buf/struct.Chain.html b/bytes/buf/struct.Chain.html new file mode 100644 index 000000000..ff4e7136e --- /dev/null +++ b/bytes/buf/struct.Chain.html @@ -0,0 +1,120 @@ +Chain in bytes::buf - Rust

Struct bytes::buf::Chain

source ·
pub struct Chain<T, U> { /* private fields */ }
Expand description

A Chain sequences two buffers.

+

Chain is an adapter that links two underlying buffers and provides a +continuous view across both buffers. It is able to sequence either immutable +buffers (Buf values) or mutable buffers (BufMut values).

+

This struct is generally created by calling Buf::chain. Please see that +function’s documentation for more detail.

+

§Examples

+
use bytes::{Bytes, Buf};
+
+let mut buf = (&b"hello "[..])
+    .chain(&b"world"[..]);
+
+let full: Bytes = buf.copy_to_bytes(11);
+assert_eq!(full[..], b"hello world"[..]);
+

Implementations§

source§

impl<T, U> Chain<T, U>

source

pub fn first_ref(&self) -> &T

Gets a reference to the first underlying Buf.

+
§Examples
+
use bytes::Buf;
+
+let buf = (&b"hello"[..])
+    .chain(&b"world"[..]);
+
+assert_eq!(buf.first_ref()[..], b"hello"[..]);
+
source

pub fn first_mut(&mut self) -> &mut T

Gets a mutable reference to the first underlying Buf.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = (&b"hello"[..])
+    .chain(&b"world"[..]);
+
+buf.first_mut().advance(1);
+
+let full = buf.copy_to_bytes(9);
+assert_eq!(full, b"elloworld"[..]);
+
source

pub fn last_ref(&self) -> &U

Gets a reference to the last underlying Buf.

+
§Examples
+
use bytes::Buf;
+
+let buf = (&b"hello"[..])
+    .chain(&b"world"[..]);
+
+assert_eq!(buf.last_ref()[..], b"world"[..]);
+
source

pub fn last_mut(&mut self) -> &mut U

Gets a mutable reference to the last underlying Buf.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = (&b"hello "[..])
+    .chain(&b"world"[..]);
+
+buf.last_mut().advance(1);
+
+let full = buf.copy_to_bytes(10);
+assert_eq!(full, b"hello orld"[..]);
+
source

pub fn into_inner(self) -> (T, U)

Consumes this Chain, returning the underlying values.

+
§Examples
+
use bytes::Buf;
+
+let chain = (&b"hello"[..])
+    .chain(&b"world"[..]);
+
+let (first, last) = chain.into_inner();
+assert_eq!(first[..], b"hello"[..]);
+assert_eq!(last[..], b"world"[..]);
+

Trait Implementations§

source§

impl<T, U> Buf for Chain<T, U>
where + T: Buf, + U: Buf,

source§

fn remaining(&self) -> usize

Returns the number of bytes between the current position and the end of +the buffer. Read more
source§

fn chunk(&self) -> &[u8]

Returns a slice starting at the current position and of length between 0 +and Buf::remaining(). Note that this can return shorter slice (this allows +non-continuous internal representation). Read more
source§

fn advance(&mut self, cnt: usize)

Advance the internal cursor of the Buf Read more
source§

fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize

Available on crate feature std only.
Fills dst with potentially multiple slices starting at self’s +current position. Read more
source§

fn copy_to_bytes(&mut self, len: usize) -> Bytes

Consumes len bytes inside self and returns new instance of Bytes +with this data. Read more
source§

fn has_remaining(&self) -> bool

Returns true if there are any more bytes to consume Read more
source§

fn copy_to_slice(&mut self, dst: &mut [u8])

Copies bytes from self into dst. Read more
source§

fn get_u8(&mut self) -> u8

Gets an unsigned 8 bit integer from self. Read more
source§

fn get_i8(&mut self) -> i8

Gets a signed 8 bit integer from self. Read more
source§

fn get_u16(&mut self) -> u16

Gets an unsigned 16 bit integer from self in big-endian byte order. Read more
source§

fn get_u16_le(&mut self) -> u16

Gets an unsigned 16 bit integer from self in little-endian byte order. Read more
source§

fn get_u16_ne(&mut self) -> u16

Gets an unsigned 16 bit integer from self in native-endian byte order. Read more
source§

fn get_i16(&mut self) -> i16

Gets a signed 16 bit integer from self in big-endian byte order. Read more
source§

fn get_i16_le(&mut self) -> i16

Gets a signed 16 bit integer from self in little-endian byte order. Read more
source§

fn get_i16_ne(&mut self) -> i16

Gets a signed 16 bit integer from self in native-endian byte order. Read more
source§

fn get_u32(&mut self) -> u32

Gets an unsigned 32 bit integer from self in the big-endian byte order. Read more
source§

fn get_u32_le(&mut self) -> u32

Gets an unsigned 32 bit integer from self in the little-endian byte order. Read more
source§

fn get_u32_ne(&mut self) -> u32

Gets an unsigned 32 bit integer from self in native-endian byte order. Read more
source§

fn get_i32(&mut self) -> i32

Gets a signed 32 bit integer from self in big-endian byte order. Read more
source§

fn get_i32_le(&mut self) -> i32

Gets a signed 32 bit integer from self in little-endian byte order. Read more
source§

fn get_i32_ne(&mut self) -> i32

Gets a signed 32 bit integer from self in native-endian byte order. Read more
source§

fn get_u64(&mut self) -> u64

Gets an unsigned 64 bit integer from self in big-endian byte order. Read more
source§

fn get_u64_le(&mut self) -> u64

Gets an unsigned 64 bit integer from self in little-endian byte order. Read more
source§

fn get_u64_ne(&mut self) -> u64

Gets an unsigned 64 bit integer from self in native-endian byte order. Read more
source§

fn get_i64(&mut self) -> i64

Gets a signed 64 bit integer from self in big-endian byte order. Read more
source§

fn get_i64_le(&mut self) -> i64

Gets a signed 64 bit integer from self in little-endian byte order. Read more
source§

fn get_i64_ne(&mut self) -> i64

Gets a signed 64 bit integer from self in native-endian byte order. Read more
source§

fn get_u128(&mut self) -> u128

Gets an unsigned 128 bit integer from self in big-endian byte order. Read more
source§

fn get_u128_le(&mut self) -> u128

Gets an unsigned 128 bit integer from self in little-endian byte order. Read more
source§

fn get_u128_ne(&mut self) -> u128

Gets an unsigned 128 bit integer from self in native-endian byte order. Read more
source§

fn get_i128(&mut self) -> i128

Gets a signed 128 bit integer from self in big-endian byte order. Read more
source§

fn get_i128_le(&mut self) -> i128

Gets a signed 128 bit integer from self in little-endian byte order. Read more
source§

fn get_i128_ne(&mut self) -> i128

Gets a signed 128 bit integer from self in native-endian byte order. Read more
source§

fn get_uint(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in big-endian byte order. Read more
source§

fn get_uint_le(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in little-endian byte order. Read more
source§

fn get_uint_ne(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in native-endian byte order. Read more
source§

fn get_int(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in big-endian byte order. Read more
source§

fn get_int_le(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in little-endian byte order. Read more
source§

fn get_int_ne(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in native-endian byte order. Read more
source§

fn get_f32(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in big-endian byte order. Read more
source§

fn get_f32_le(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in little-endian byte order. Read more
source§

fn get_f32_ne(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in native-endian byte order. Read more
source§

fn get_f64(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in big-endian byte order. Read more
source§

fn get_f64_le(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in little-endian byte order. Read more
source§

fn get_f64_ne(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in native-endian byte order. Read more
source§

fn take(self, limit: usize) -> Take<Self>
where + Self: Sized,

Creates an adaptor which will read at most limit bytes from self. Read more
source§

fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
where + Self: Sized,

Creates an adaptor which will chain this buffer with another. Read more
source§

fn reader(self) -> Reader<Self>
where + Self: Sized,

Available on crate feature std only.
Creates an adaptor which implements the Read trait for self. Read more
source§

impl<T, U> BufMut for Chain<T, U>
where + T: BufMut, + U: BufMut,

source§

fn remaining_mut(&self) -> usize

Returns the number of bytes that can be written from the current +position until the end of the buffer is reached. Read more
source§

fn chunk_mut(&mut self) -> &mut UninitSlice

Returns a mutable slice starting at the current BufMut position and of +length between 0 and BufMut::remaining_mut(). Note that this can be shorter than the +whole remainder of the buffer (this allows non-continuous implementation). Read more
source§

unsafe fn advance_mut(&mut self, cnt: usize)

Advance the internal cursor of the BufMut Read more
source§

fn has_remaining_mut(&self) -> bool

Returns true if there is space in self for more bytes. Read more
source§

fn put<T: Buf>(&mut self, src: T)
where + Self: Sized,

Transfer bytes into self from src and advance the cursor by the +number of bytes written. Read more
source§

fn put_slice(&mut self, src: &[u8])

Transfer bytes into self from src and advance the cursor by the +number of bytes written. Read more
source§

fn put_bytes(&mut self, val: u8, cnt: usize)

Put cnt bytes val into self. Read more
source§

fn put_u8(&mut self, n: u8)

Writes an unsigned 8 bit integer to self. Read more
source§

fn put_i8(&mut self, n: i8)

Writes a signed 8 bit integer to self. Read more
source§

fn put_u16(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in big-endian byte order. Read more
source§

fn put_u16_le(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in little-endian byte order. Read more
source§

fn put_u16_ne(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in native-endian byte order. Read more
source§

fn put_i16(&mut self, n: i16)

Writes a signed 16 bit integer to self in big-endian byte order. Read more
source§

fn put_i16_le(&mut self, n: i16)

Writes a signed 16 bit integer to self in little-endian byte order. Read more
source§

fn put_i16_ne(&mut self, n: i16)

Writes a signed 16 bit integer to self in native-endian byte order. Read more
source§

fn put_u32(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in big-endian byte order. Read more
source§

fn put_u32_le(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in little-endian byte order. Read more
source§

fn put_u32_ne(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in native-endian byte order. Read more
source§

fn put_i32(&mut self, n: i32)

Writes a signed 32 bit integer to self in big-endian byte order. Read more
source§

fn put_i32_le(&mut self, n: i32)

Writes a signed 32 bit integer to self in little-endian byte order. Read more
source§

fn put_i32_ne(&mut self, n: i32)

Writes a signed 32 bit integer to self in native-endian byte order. Read more
source§

fn put_u64(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in the big-endian byte order. Read more
source§

fn put_u64_le(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in little-endian byte order. Read more
source§

fn put_u64_ne(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in native-endian byte order. Read more
source§

fn put_i64(&mut self, n: i64)

Writes a signed 64 bit integer to self in the big-endian byte order. Read more
source§

fn put_i64_le(&mut self, n: i64)

Writes a signed 64 bit integer to self in little-endian byte order. Read more
source§

fn put_i64_ne(&mut self, n: i64)

Writes a signed 64 bit integer to self in native-endian byte order. Read more
source§

fn put_u128(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in the big-endian byte order. Read more
source§

fn put_u128_le(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in little-endian byte order. Read more
source§

fn put_u128_ne(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in native-endian byte order. Read more
source§

fn put_i128(&mut self, n: i128)

Writes a signed 128 bit integer to self in the big-endian byte order. Read more
source§

fn put_i128_le(&mut self, n: i128)

Writes a signed 128 bit integer to self in little-endian byte order. Read more
source§

fn put_i128_ne(&mut self, n: i128)

Writes a signed 128 bit integer to self in native-endian byte order. Read more
source§

fn put_uint(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in big-endian byte order. Read more
source§

fn put_uint_le(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in the little-endian byte order. Read more
source§

fn put_uint_ne(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in the native-endian byte order. Read more
source§

fn put_int(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in big-endian byte order. Read more
source§

fn put_int_le(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in little-endian byte order. Read more
source§

fn put_int_ne(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in native-endian byte order. Read more
source§

fn put_f32(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in big-endian byte order. Read more
source§

fn put_f32_le(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in little-endian byte order. Read more
source§

fn put_f32_ne(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in native-endian byte order. Read more
source§

fn put_f64(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in big-endian byte order. Read more
source§

fn put_f64_le(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in little-endian byte order. Read more
source§

fn put_f64_ne(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in native-endian byte order. Read more
source§

fn limit(self, limit: usize) -> Limit<Self>
where + Self: Sized,

Creates an adaptor which can write at most limit bytes to self. Read more
source§

fn writer(self) -> Writer<Self>
where + Self: Sized,

Available on crate feature std only.
Creates an adaptor which implements the Write trait for self. Read more
source§

fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
where + Self: Sized,

Creates an adapter which will chain this buffer with another. Read more
source§

impl<T: Debug, U: Debug> Debug for Chain<T, U>

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl<T, U> IntoIterator for Chain<T, U>
where + T: Buf, + U: Buf,

source§

type Item = u8

The type of the elements being iterated over.
source§

type IntoIter = IntoIter<Chain<T, U>>

Which kind of iterator are we turning this into?
source§

fn into_iter(self) -> Self::IntoIter

Creates an iterator from a value. Read more

Auto Trait Implementations§

§

impl<T, U> Freeze for Chain<T, U>
where + T: Freeze, + U: Freeze,

§

impl<T, U> RefUnwindSafe for Chain<T, U>
where + T: RefUnwindSafe, + U: RefUnwindSafe,

§

impl<T, U> Send for Chain<T, U>
where + T: Send, + U: Send,

§

impl<T, U> Sync for Chain<T, U>
where + T: Sync, + U: Sync,

§

impl<T, U> Unpin for Chain<T, U>
where + T: Unpin, + U: Unpin,

§

impl<T, U> UnwindSafe for Chain<T, U>
where + T: UnwindSafe, + U: UnwindSafe,

Blanket Implementations§

source§

impl<T> Any for T
where + T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where + T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where + T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

+
source§

impl<T, U> Into<U> for T
where + U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

+

That is, this conversion is whatever the implementation of +From<T> for U chooses to do.

+
source§

impl<T, U> TryFrom<U> for T
where + U: Into<T>,

source§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where + U: TryFrom<T>,

source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
\ No newline at end of file diff --git a/bytes/buf/struct.IntoIter.html b/bytes/buf/struct.IntoIter.html new file mode 100644 index 000000000..6faf518d7 --- /dev/null +++ b/bytes/buf/struct.IntoIter.html @@ -0,0 +1,252 @@ +IntoIter in bytes::buf - Rust

Struct bytes::buf::IntoIter

source ·
pub struct IntoIter<T> { /* private fields */ }
Expand description

Iterator over the bytes contained by the buffer.

+

§Examples

+

Basic usage:

+ +
use bytes::Bytes;
+
+let buf = Bytes::from(&b"abc"[..]);
+let mut iter = buf.into_iter();
+
+assert_eq!(iter.next(), Some(b'a'));
+assert_eq!(iter.next(), Some(b'b'));
+assert_eq!(iter.next(), Some(b'c'));
+assert_eq!(iter.next(), None);
+

Implementations§

source§

impl<T> IntoIter<T>

source

pub fn new(inner: T) -> IntoIter<T>

Creates an iterator over the bytes contained by the buffer.

+
§Examples
+
use bytes::Bytes;
+
+let buf = Bytes::from_static(b"abc");
+let mut iter = buf.into_iter();
+
+assert_eq!(iter.next(), Some(b'a'));
+assert_eq!(iter.next(), Some(b'b'));
+assert_eq!(iter.next(), Some(b'c'));
+assert_eq!(iter.next(), None);
+
source

pub fn into_inner(self) -> T

Consumes this IntoIter, returning the underlying value.

+
§Examples
+
use bytes::{Buf, Bytes};
+
+let buf = Bytes::from(&b"abc"[..]);
+let mut iter = buf.into_iter();
+
+assert_eq!(iter.next(), Some(b'a'));
+
+let buf = iter.into_inner();
+assert_eq!(2, buf.remaining());
+
source

pub fn get_ref(&self) -> &T

Gets a reference to the underlying Buf.

+

It is inadvisable to directly read from the underlying Buf.

+
§Examples
+
use bytes::{Buf, Bytes};
+
+let buf = Bytes::from(&b"abc"[..]);
+let mut iter = buf.into_iter();
+
+assert_eq!(iter.next(), Some(b'a'));
+
+assert_eq!(2, iter.get_ref().remaining());
+
source

pub fn get_mut(&mut self) -> &mut T

Gets a mutable reference to the underlying Buf.

+

It is inadvisable to directly read from the underlying Buf.

+
§Examples
+
use bytes::{Buf, BytesMut};
+
+let buf = BytesMut::from(&b"abc"[..]);
+let mut iter = buf.into_iter();
+
+assert_eq!(iter.next(), Some(b'a'));
+
+iter.get_mut().advance(1);
+
+assert_eq!(iter.next(), Some(b'c'));
+

Trait Implementations§

source§

impl<T: Debug> Debug for IntoIter<T>

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl<T: Buf> ExactSizeIterator for IntoIter<T>

1.0.0 · source§

fn len(&self) -> usize

Returns the exact remaining length of the iterator. Read more
source§

fn is_empty(&self) -> bool

🔬This is a nightly-only experimental API. (exact_size_is_empty)
Returns true if the iterator is empty. Read more
source§

impl<T: Buf> Iterator for IntoIter<T>

source§

type Item = u8

The type of the elements being iterated over.
source§

fn next(&mut self) -> Option<u8>

Advances the iterator and returns the next value. Read more
source§

fn size_hint(&self) -> (usize, Option<usize>)

Returns the bounds on the remaining length of the iterator. Read more
source§

fn next_chunk<const N: usize>( + &mut self, +) -> Result<[Self::Item; N], IntoIter<Self::Item, N>>
where + Self: Sized,

🔬This is a nightly-only experimental API. (iter_next_chunk)
Advances the iterator and returns an array containing the next N values. Read more
1.0.0 · source§

fn count(self) -> usize
where + Self: Sized,

Consumes the iterator, counting the number of iterations and returning it. Read more
1.0.0 · source§

fn last(self) -> Option<Self::Item>
where + Self: Sized,

Consumes the iterator, returning the last element. Read more
source§

fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>>

🔬This is a nightly-only experimental API. (iter_advance_by)
Advances the iterator by n elements. Read more
1.0.0 · source§

fn nth(&mut self, n: usize) -> Option<Self::Item>

Returns the nth element of the iterator. Read more
1.28.0 · source§

fn step_by(self, step: usize) -> StepBy<Self>
where + Self: Sized,

Creates an iterator starting at the same point, but stepping by +the given amount at each iteration. Read more
1.0.0 · source§

fn chain<U>(self, other: U) -> Chain<Self, <U as IntoIterator>::IntoIter>
where + Self: Sized, + U: IntoIterator<Item = Self::Item>,

Takes two iterators and creates a new iterator over both in sequence. Read more
1.0.0 · source§

fn zip<U>(self, other: U) -> Zip<Self, <U as IntoIterator>::IntoIter>
where + Self: Sized, + U: IntoIterator,

‘Zips up’ two iterators into a single iterator of pairs. Read more
source§

fn intersperse_with<G>(self, separator: G) -> IntersperseWith<Self, G>
where + Self: Sized, + G: FnMut() -> Self::Item,

🔬This is a nightly-only experimental API. (iter_intersperse)
Creates a new iterator which places an item generated by separator +between adjacent items of the original iterator. Read more
1.0.0 · source§

fn map<B, F>(self, f: F) -> Map<Self, F>
where + Self: Sized, + F: FnMut(Self::Item) -> B,

Takes a closure and creates an iterator which calls that closure on each +element. Read more
1.21.0 · source§

fn for_each<F>(self, f: F)
where + Self: Sized, + F: FnMut(Self::Item),

Calls a closure on each element of an iterator. Read more
1.0.0 · source§

fn filter<P>(self, predicate: P) -> Filter<Self, P>
where + Self: Sized, + P: FnMut(&Self::Item) -> bool,

Creates an iterator which uses a closure to determine if an element +should be yielded. Read more
1.0.0 · source§

fn filter_map<B, F>(self, f: F) -> FilterMap<Self, F>
where + Self: Sized, + F: FnMut(Self::Item) -> Option<B>,

Creates an iterator that both filters and maps. Read more
1.0.0 · source§

fn enumerate(self) -> Enumerate<Self>
where + Self: Sized,

Creates an iterator which gives the current iteration count as well as +the next value. Read more
1.0.0 · source§

fn peekable(self) -> Peekable<Self>
where + Self: Sized,

Creates an iterator which can use the peek and peek_mut methods +to look at the next element of the iterator without consuming it. See +their documentation for more information. Read more
1.0.0 · source§

fn skip_while<P>(self, predicate: P) -> SkipWhile<Self, P>
where + Self: Sized, + P: FnMut(&Self::Item) -> bool,

Creates an iterator that skips elements based on a predicate. Read more
1.0.0 · source§

fn take_while<P>(self, predicate: P) -> TakeWhile<Self, P>
where + Self: Sized, + P: FnMut(&Self::Item) -> bool,

Creates an iterator that yields elements based on a predicate. Read more
1.57.0 · source§

fn map_while<B, P>(self, predicate: P) -> MapWhile<Self, P>
where + Self: Sized, + P: FnMut(Self::Item) -> Option<B>,

Creates an iterator that both yields elements based on a predicate and maps. Read more
1.0.0 · source§

fn skip(self, n: usize) -> Skip<Self>
where + Self: Sized,

Creates an iterator that skips the first n elements. Read more
1.0.0 · source§

fn take(self, n: usize) -> Take<Self>
where + Self: Sized,

Creates an iterator that yields the first n elements, or fewer +if the underlying iterator ends sooner. Read more
1.0.0 · source§

fn scan<St, B, F>(self, initial_state: St, f: F) -> Scan<Self, St, F>
where + Self: Sized, + F: FnMut(&mut St, Self::Item) -> Option<B>,

An iterator adapter which, like fold, holds internal state, but +unlike fold, produces a new iterator. Read more
1.0.0 · source§

fn flat_map<U, F>(self, f: F) -> FlatMap<Self, U, F>
where + Self: Sized, + U: IntoIterator, + F: FnMut(Self::Item) -> U,

Creates an iterator that works like map, but flattens nested structure. Read more
source§

fn map_windows<F, R, const N: usize>(self, f: F) -> MapWindows<Self, F, N>
where + Self: Sized, + F: FnMut(&[Self::Item; N]) -> R,

🔬This is a nightly-only experimental API. (iter_map_windows)
Calls the given function f for each contiguous window of size N over +self and returns an iterator over the outputs of f. Like slice::windows(), +the windows during mapping overlap as well. Read more
1.0.0 · source§

fn fuse(self) -> Fuse<Self>
where + Self: Sized,

Creates an iterator which ends after the first None. Read more
1.0.0 · source§

fn inspect<F>(self, f: F) -> Inspect<Self, F>
where + Self: Sized, + F: FnMut(&Self::Item),

Does something with each element of an iterator, passing the value on. Read more
1.0.0 · source§

fn by_ref(&mut self) -> &mut Self
where + Self: Sized,

Borrows an iterator, rather than consuming it. Read more
1.0.0 · source§

fn collect<B>(self) -> B
where + B: FromIterator<Self::Item>, + Self: Sized,

Transforms an iterator into a collection. Read more
source§

fn collect_into<E>(self, collection: &mut E) -> &mut E
where + E: Extend<Self::Item>, + Self: Sized,

🔬This is a nightly-only experimental API. (iter_collect_into)
Collects all the items from an iterator into a collection. Read more
1.0.0 · source§

fn partition<B, F>(self, f: F) -> (B, B)
where + Self: Sized, + B: Default + Extend<Self::Item>, + F: FnMut(&Self::Item) -> bool,

Consumes an iterator, creating two collections from it. Read more
source§

fn is_partitioned<P>(self, predicate: P) -> bool
where + Self: Sized, + P: FnMut(Self::Item) -> bool,

🔬This is a nightly-only experimental API. (iter_is_partitioned)
Checks if the elements of this iterator are partitioned according to the given predicate, +such that all those that return true precede all those that return false. Read more
1.27.0 · source§

fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try<Output = B>,

An iterator method that applies a function as long as it returns +successfully, producing a single, final value. Read more
1.27.0 · source§

fn try_for_each<F, R>(&mut self, f: F) -> R
where + Self: Sized, + F: FnMut(Self::Item) -> R, + R: Try<Output = ()>,

An iterator method that applies a fallible function to each item in the +iterator, stopping at the first error and returning that error. Read more
1.0.0 · source§

fn fold<B, F>(self, init: B, f: F) -> B
where + Self: Sized, + F: FnMut(B, Self::Item) -> B,

Folds every element into an accumulator by applying an operation, +returning the final result. Read more
1.51.0 · source§

fn reduce<F>(self, f: F) -> Option<Self::Item>
where + Self: Sized, + F: FnMut(Self::Item, Self::Item) -> Self::Item,

Reduces the elements to a single one, by repeatedly applying a reducing +operation. Read more
source§

fn try_reduce<R>( + &mut self, + f: impl FnMut(Self::Item, Self::Item) -> R, +) -> <<R as Try>::Residual as Residual<Option<<R as Try>::Output>>>::TryType
where + Self: Sized, + R: Try<Output = Self::Item>, + <R as Try>::Residual: Residual<Option<Self::Item>>,

🔬This is a nightly-only experimental API. (iterator_try_reduce)
Reduces the elements to a single one by repeatedly applying a reducing operation. If the +closure returns a failure, the failure is propagated back to the caller immediately. Read more
1.0.0 · source§

fn all<F>(&mut self, f: F) -> bool
where + Self: Sized, + F: FnMut(Self::Item) -> bool,

Tests if every element of the iterator matches a predicate. Read more
1.0.0 · source§

fn any<F>(&mut self, f: F) -> bool
where + Self: Sized, + F: FnMut(Self::Item) -> bool,

Tests if any element of the iterator matches a predicate. Read more
1.0.0 · source§

fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
where + Self: Sized, + P: FnMut(&Self::Item) -> bool,

Searches for an element of an iterator that satisfies a predicate. Read more
1.30.0 · source§

fn find_map<B, F>(&mut self, f: F) -> Option<B>
where + Self: Sized, + F: FnMut(Self::Item) -> Option<B>,

Applies function to the elements of iterator and returns +the first non-none result. Read more
source§

fn try_find<R>( + &mut self, + f: impl FnMut(&Self::Item) -> R, +) -> <<R as Try>::Residual as Residual<Option<Self::Item>>>::TryType
where + Self: Sized, + R: Try<Output = bool>, + <R as Try>::Residual: Residual<Option<Self::Item>>,

🔬This is a nightly-only experimental API. (try_find)
Applies function to the elements of iterator and returns +the first true result or the first error. Read more
1.0.0 · source§

fn position<P>(&mut self, predicate: P) -> Option<usize>
where + Self: Sized, + P: FnMut(Self::Item) -> bool,

Searches for an element in an iterator, returning its index. Read more
1.6.0 · source§

fn max_by_key<B, F>(self, f: F) -> Option<Self::Item>
where + B: Ord, + Self: Sized, + F: FnMut(&Self::Item) -> B,

Returns the element that gives the maximum value from the +specified function. Read more
1.15.0 · source§

fn max_by<F>(self, compare: F) -> Option<Self::Item>
where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering,

Returns the element that gives the maximum value with respect to the +specified comparison function. Read more
1.6.0 · source§

fn min_by_key<B, F>(self, f: F) -> Option<Self::Item>
where + B: Ord, + Self: Sized, + F: FnMut(&Self::Item) -> B,

Returns the element that gives the minimum value from the +specified function. Read more
1.15.0 · source§

fn min_by<F>(self, compare: F) -> Option<Self::Item>
where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering,

Returns the element that gives the minimum value with respect to the +specified comparison function. Read more
1.0.0 · source§

fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
where + FromA: Default + Extend<A>, + FromB: Default + Extend<B>, + Self: Sized + Iterator<Item = (A, B)>,

Converts an iterator of pairs into a pair of containers. Read more
1.36.0 · source§

fn copied<'a, T>(self) -> Copied<Self>
where + T: 'a + Copy, + Self: Sized + Iterator<Item = &'a T>,

Creates an iterator which copies all of its elements. Read more
1.0.0 · source§

fn cloned<'a, T>(self) -> Cloned<Self>
where + T: 'a + Clone, + Self: Sized + Iterator<Item = &'a T>,

Creates an iterator which clones all of its elements. Read more
source§

fn array_chunks<const N: usize>(self) -> ArrayChunks<Self, N>
where + Self: Sized,

🔬This is a nightly-only experimental API. (iter_array_chunks)
Returns an iterator over N elements of the iterator at a time. Read more
1.11.0 · source§

fn sum<S>(self) -> S
where + Self: Sized, + S: Sum<Self::Item>,

Sums the elements of an iterator. Read more
1.11.0 · source§

fn product<P>(self) -> P
where + Self: Sized, + P: Product<Self::Item>,

Iterates over the entire iterator, multiplying all the elements Read more
source§

fn cmp_by<I, F>(self, other: I, cmp: F) -> Ordering
where + Self: Sized, + I: IntoIterator, + F: FnMut(Self::Item, <I as IntoIterator>::Item) -> Ordering,

🔬This is a nightly-only experimental API. (iter_order_by)
Lexicographically compares the elements of this Iterator with those +of another with respect to the specified comparison function. Read more
1.5.0 · source§

fn partial_cmp<I>(self, other: I) -> Option<Ordering>
where + I: IntoIterator, + Self::Item: PartialOrd<<I as IntoIterator>::Item>, + Self: Sized,

Lexicographically compares the PartialOrd elements of +this Iterator with those of another. The comparison works like short-circuit +evaluation, returning a result without comparing the remaining elements. +As soon as an order can be determined, the evaluation stops and a result is returned. Read more
source§

fn partial_cmp_by<I, F>(self, other: I, partial_cmp: F) -> Option<Ordering>
where + Self: Sized, + I: IntoIterator, + F: FnMut(Self::Item, <I as IntoIterator>::Item) -> Option<Ordering>,

🔬This is a nightly-only experimental API. (iter_order_by)
Lexicographically compares the elements of this Iterator with those +of another with respect to the specified comparison function. Read more
1.5.0 · source§

fn eq<I>(self, other: I) -> bool
where + I: IntoIterator, + Self::Item: PartialEq<<I as IntoIterator>::Item>, + Self: Sized,

Determines if the elements of this Iterator are equal to those of +another. Read more
source§

fn eq_by<I, F>(self, other: I, eq: F) -> bool
where + Self: Sized, + I: IntoIterator, + F: FnMut(Self::Item, <I as IntoIterator>::Item) -> bool,

🔬This is a nightly-only experimental API. (iter_order_by)
Determines if the elements of this Iterator are equal to those of +another with respect to the specified equality function. Read more
1.5.0 · source§

fn ne<I>(self, other: I) -> bool
where + I: IntoIterator, + Self::Item: PartialEq<<I as IntoIterator>::Item>, + Self: Sized,

Determines if the elements of this Iterator are not equal to those of +another. Read more
1.5.0 · source§

fn lt<I>(self, other: I) -> bool
where + I: IntoIterator, + Self::Item: PartialOrd<<I as IntoIterator>::Item>, + Self: Sized,

Determines if the elements of this Iterator are lexicographically +less than those of another. Read more
1.5.0 · source§

fn le<I>(self, other: I) -> bool
where + I: IntoIterator, + Self::Item: PartialOrd<<I as IntoIterator>::Item>, + Self: Sized,

Determines if the elements of this Iterator are lexicographically +less or equal to those of another. Read more
1.5.0 · source§

fn gt<I>(self, other: I) -> bool
where + I: IntoIterator, + Self::Item: PartialOrd<<I as IntoIterator>::Item>, + Self: Sized,

Determines if the elements of this Iterator are lexicographically +greater than those of another. Read more
1.5.0 · source§

fn ge<I>(self, other: I) -> bool
where + I: IntoIterator, + Self::Item: PartialOrd<<I as IntoIterator>::Item>, + Self: Sized,

Determines if the elements of this Iterator are lexicographically +greater than or equal to those of another. Read more
1.82.0 · source§

fn is_sorted_by<F>(self, compare: F) -> bool
where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> bool,

Checks if the elements of this iterator are sorted using the given comparator function. Read more
1.82.0 · source§

fn is_sorted_by_key<F, K>(self, f: F) -> bool
where + Self: Sized, + F: FnMut(Self::Item) -> K, + K: PartialOrd,

Checks if the elements of this iterator are sorted using the given key extraction +function. Read more

Auto Trait Implementations§

§

impl<T> Freeze for IntoIter<T>
where + T: Freeze,

§

impl<T> RefUnwindSafe for IntoIter<T>
where + T: RefUnwindSafe,

§

impl<T> Send for IntoIter<T>
where + T: Send,

§

impl<T> Sync for IntoIter<T>
where + T: Sync,

§

impl<T> Unpin for IntoIter<T>
where + T: Unpin,

§

impl<T> UnwindSafe for IntoIter<T>
where + T: UnwindSafe,

Blanket Implementations§

source§

impl<T> Any for T
where + T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where + T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where + T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

+
source§

impl<T, U> Into<U> for T
where + U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

+

That is, this conversion is whatever the implementation of +From<T> for U chooses to do.

+
source§

impl<I> IntoIterator for I
where + I: Iterator,

source§

type Item = <I as Iterator>::Item

The type of the elements being iterated over.
source§

type IntoIter = I

Which kind of iterator are we turning this into?
source§

fn into_iter(self) -> I

Creates an iterator from a value. Read more
source§

impl<T, U> TryFrom<U> for T
where + U: Into<T>,

source§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where + U: TryFrom<T>,

source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
\ No newline at end of file diff --git a/bytes/buf/struct.Limit.html b/bytes/buf/struct.Limit.html new file mode 100644 index 000000000..e5b62e625 --- /dev/null +++ b/bytes/buf/struct.Limit.html @@ -0,0 +1,47 @@ +Limit in bytes::buf - Rust

Struct bytes::buf::Limit

source ·
pub struct Limit<T> { /* private fields */ }
Expand description

A BufMut adapter which limits the amount of bytes that can be written +to an underlying buffer.

+

Implementations§

source§

impl<T> Limit<T>

source

pub fn into_inner(self) -> T

Consumes this Limit, returning the underlying value.

+
source

pub fn get_ref(&self) -> &T

Gets a reference to the underlying BufMut.

+

It is inadvisable to directly write to the underlying BufMut.

+
source

pub fn get_mut(&mut self) -> &mut T

Gets a mutable reference to the underlying BufMut.

+

It is inadvisable to directly write to the underlying BufMut.

+
source

pub fn limit(&self) -> usize

Returns the maximum number of bytes that can be written

+
§Note
+

If the inner BufMut has fewer bytes than indicated by this method then +that is the actual number of available bytes.

+
source

pub fn set_limit(&mut self, lim: usize)

Sets the maximum number of bytes that can be written.

+
§Note
+

If the inner BufMut has fewer bytes than lim then that is the actual +number of available bytes.

+

Trait Implementations§

source§

impl<T: BufMut> BufMut for Limit<T>

source§

fn remaining_mut(&self) -> usize

Returns the number of bytes that can be written from the current +position until the end of the buffer is reached. Read more
source§

fn chunk_mut(&mut self) -> &mut UninitSlice

Returns a mutable slice starting at the current BufMut position and of +length between 0 and BufMut::remaining_mut(). Note that this can be shorter than the +whole remainder of the buffer (this allows non-continuous implementation). Read more
source§

unsafe fn advance_mut(&mut self, cnt: usize)

Advance the internal cursor of the BufMut Read more
source§

fn has_remaining_mut(&self) -> bool

Returns true if there is space in self for more bytes. Read more
source§

fn put<T: Buf>(&mut self, src: T)
where + Self: Sized,

Transfer bytes into self from src and advance the cursor by the +number of bytes written. Read more
source§

fn put_slice(&mut self, src: &[u8])

Transfer bytes into self from src and advance the cursor by the +number of bytes written. Read more
source§

fn put_bytes(&mut self, val: u8, cnt: usize)

Put cnt bytes val into self. Read more
source§

fn put_u8(&mut self, n: u8)

Writes an unsigned 8 bit integer to self. Read more
source§

fn put_i8(&mut self, n: i8)

Writes a signed 8 bit integer to self. Read more
source§

fn put_u16(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in big-endian byte order. Read more
source§

fn put_u16_le(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in little-endian byte order. Read more
source§

fn put_u16_ne(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in native-endian byte order. Read more
source§

fn put_i16(&mut self, n: i16)

Writes a signed 16 bit integer to self in big-endian byte order. Read more
source§

fn put_i16_le(&mut self, n: i16)

Writes a signed 16 bit integer to self in little-endian byte order. Read more
source§

fn put_i16_ne(&mut self, n: i16)

Writes a signed 16 bit integer to self in native-endian byte order. Read more
source§

fn put_u32(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in big-endian byte order. Read more
source§

fn put_u32_le(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in little-endian byte order. Read more
source§

fn put_u32_ne(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in native-endian byte order. Read more
source§

fn put_i32(&mut self, n: i32)

Writes a signed 32 bit integer to self in big-endian byte order. Read more
source§

fn put_i32_le(&mut self, n: i32)

Writes a signed 32 bit integer to self in little-endian byte order. Read more
source§

fn put_i32_ne(&mut self, n: i32)

Writes a signed 32 bit integer to self in native-endian byte order. Read more
source§

fn put_u64(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in the big-endian byte order. Read more
source§

fn put_u64_le(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in little-endian byte order. Read more
source§

fn put_u64_ne(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in native-endian byte order. Read more
source§

fn put_i64(&mut self, n: i64)

Writes a signed 64 bit integer to self in the big-endian byte order. Read more
source§

fn put_i64_le(&mut self, n: i64)

Writes a signed 64 bit integer to self in little-endian byte order. Read more
source§

fn put_i64_ne(&mut self, n: i64)

Writes a signed 64 bit integer to self in native-endian byte order. Read more
source§

fn put_u128(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in the big-endian byte order. Read more
source§

fn put_u128_le(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in little-endian byte order. Read more
source§

fn put_u128_ne(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in native-endian byte order. Read more
source§

fn put_i128(&mut self, n: i128)

Writes a signed 128 bit integer to self in the big-endian byte order. Read more
source§

fn put_i128_le(&mut self, n: i128)

Writes a signed 128 bit integer to self in little-endian byte order. Read more
source§

fn put_i128_ne(&mut self, n: i128)

Writes a signed 128 bit integer to self in native-endian byte order. Read more
source§

fn put_uint(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in big-endian byte order. Read more
source§

fn put_uint_le(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in the little-endian byte order. Read more
source§

fn put_uint_ne(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in the native-endian byte order. Read more
source§

fn put_int(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in big-endian byte order. Read more
source§

fn put_int_le(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in little-endian byte order. Read more
source§

fn put_int_ne(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in native-endian byte order. Read more
source§

fn put_f32(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in big-endian byte order. Read more
source§

fn put_f32_le(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in little-endian byte order. Read more
source§

fn put_f32_ne(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in native-endian byte order. Read more
source§

fn put_f64(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in big-endian byte order. Read more
source§

fn put_f64_le(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in little-endian byte order. Read more
source§

fn put_f64_ne(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in native-endian byte order. Read more
source§

fn limit(self, limit: usize) -> Limit<Self>
where + Self: Sized,

Creates an adaptor which can write at most limit bytes to self. Read more
source§

fn writer(self) -> Writer<Self>
where + Self: Sized,

Available on crate feature std only.
Creates an adaptor which implements the Write trait for self. Read more
source§

fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
where + Self: Sized,

Creates an adapter which will chain this buffer with another. Read more
source§

impl<T: Debug> Debug for Limit<T>

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

§

impl<T> Freeze for Limit<T>
where + T: Freeze,

§

impl<T> RefUnwindSafe for Limit<T>
where + T: RefUnwindSafe,

§

impl<T> Send for Limit<T>
where + T: Send,

§

impl<T> Sync for Limit<T>
where + T: Sync,

§

impl<T> Unpin for Limit<T>
where + T: Unpin,

§

impl<T> UnwindSafe for Limit<T>
where + T: UnwindSafe,

Blanket Implementations§

source§

impl<T> Any for T
where + T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where + T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where + T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

+
source§

impl<T, U> Into<U> for T
where + U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

+

That is, this conversion is whatever the implementation of +From<T> for U chooses to do.

+
source§

impl<T, U> TryFrom<U> for T
where + U: Into<T>,

source§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where + U: TryFrom<T>,

source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
\ No newline at end of file diff --git a/bytes/buf/struct.Reader.html b/bytes/buf/struct.Reader.html new file mode 100644 index 000000000..778fce326 --- /dev/null +++ b/bytes/buf/struct.Reader.html @@ -0,0 +1,56 @@ +Reader in bytes::buf - Rust

Struct bytes::buf::Reader

source ·
pub struct Reader<B> { /* private fields */ }
Expand description

A Buf adapter which implements io::Read for the inner value.

+

This struct is generally created by calling reader() on Buf. See +documentation of reader() for more +details.

+

Implementations§

source§

impl<B: Buf> Reader<B>

source

pub fn get_ref(&self) -> &B

Gets a reference to the underlying Buf.

+

It is inadvisable to directly read from the underlying Buf.

+
§Examples
+
use bytes::Buf;
+
+let buf = b"hello world".reader();
+
+assert_eq!(b"hello world", buf.get_ref());
+
source

pub fn get_mut(&mut self) -> &mut B

Gets a mutable reference to the underlying Buf.

+

It is inadvisable to directly read from the underlying Buf.

+
source

pub fn into_inner(self) -> B

Consumes this Reader, returning the underlying value.

+
§Examples
+
use bytes::Buf;
+use std::io;
+
+let mut buf = b"hello world".reader();
+let mut dst = vec![];
+
+io::copy(&mut buf, &mut dst).unwrap();
+
+let buf = buf.into_inner();
+assert_eq!(0, buf.remaining());
+

Trait Implementations§

source§

impl<B: Buf + Sized> BufRead for Reader<B>

source§

fn fill_buf(&mut self) -> Result<&[u8]>

Returns the contents of the internal buffer, filling it with more data +from the inner reader if it is empty. Read more
source§

fn consume(&mut self, amt: usize)

Tells this buffer that amt bytes have been consumed from the buffer, +so they should no longer be returned in calls to read. Read more
source§

fn has_data_left(&mut self) -> Result<bool, Error>

🔬This is a nightly-only experimental API. (buf_read_has_data_left)
Checks if the underlying Read has any data left to be read. Read more
1.0.0 · source§

fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> Result<usize, Error>

Reads all bytes into buf until the delimiter byte or EOF is reached. Read more
source§

fn skip_until(&mut self, byte: u8) -> Result<usize, Error>

🔬This is a nightly-only experimental API. (bufread_skip_until)
Skips all bytes until the delimiter byte or EOF is reached. Read more
1.0.0 · source§

fn read_line(&mut self, buf: &mut String) -> Result<usize, Error>

Reads all bytes until a newline (the 0xA byte) is reached, and append +them to the provided String buffer. Read more
1.0.0 · source§

fn split(self, byte: u8) -> Split<Self>
where + Self: Sized,

Returns an iterator over the contents of this reader split on the byte +byte. Read more
1.0.0 · source§

fn lines(self) -> Lines<Self>
where + Self: Sized,

Returns an iterator over the lines of this reader. Read more
source§

impl<B: Debug> Debug for Reader<B>

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl<B: Buf + Sized> Read for Reader<B>

source§

fn read(&mut self, dst: &mut [u8]) -> Result<usize>

Pull some bytes from this source into the specified buffer, returning +how many bytes were read. Read more
1.36.0 · source§

fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> Result<usize, Error>

Like read, except that it reads into a slice of buffers. Read more
source§

fn is_read_vectored(&self) -> bool

🔬This is a nightly-only experimental API. (can_vector)
Determines if this Reader has an efficient read_vectored +implementation. Read more
1.0.0 · source§

fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<usize, Error>

Reads all bytes until EOF in this source, placing them into buf. Read more
1.0.0 · source§

fn read_to_string(&mut self, buf: &mut String) -> Result<usize, Error>

Reads all bytes until EOF in this source, appending them to buf. Read more
1.6.0 · source§

fn read_exact(&mut self, buf: &mut [u8]) -> Result<(), Error>

Reads the exact number of bytes required to fill buf. Read more
source§

fn read_buf(&mut self, buf: BorrowedCursor<'_>) -> Result<(), Error>

🔬This is a nightly-only experimental API. (read_buf)
Pull some bytes from this source into the specified buffer. Read more
source§

fn read_buf_exact(&mut self, cursor: BorrowedCursor<'_>) -> Result<(), Error>

🔬This is a nightly-only experimental API. (read_buf)
Reads the exact number of bytes required to fill cursor. Read more
1.0.0 · source§

fn by_ref(&mut self) -> &mut Self
where + Self: Sized,

Creates a “by reference” adaptor for this instance of Read. Read more
1.0.0 · source§

fn bytes(self) -> Bytes<Self>
where + Self: Sized,

Transforms this Read instance to an Iterator over its bytes. Read more
1.0.0 · source§

fn chain<R>(self, next: R) -> Chain<Self, R>
where + R: Read, + Self: Sized,

Creates an adapter which will chain this stream with another. Read more
1.0.0 · source§

fn take(self, limit: u64) -> Take<Self>
where + Self: Sized,

Creates an adapter which will read at most limit bytes from it. Read more

Auto Trait Implementations§

§

impl<B> Freeze for Reader<B>
where + B: Freeze,

§

impl<B> RefUnwindSafe for Reader<B>
where + B: RefUnwindSafe,

§

impl<B> Send for Reader<B>
where + B: Send,

§

impl<B> Sync for Reader<B>
where + B: Sync,

§

impl<B> Unpin for Reader<B>
where + B: Unpin,

§

impl<B> UnwindSafe for Reader<B>
where + B: UnwindSafe,

Blanket Implementations§

source§

impl<T> Any for T
where + T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where + T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where + T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

+
source§

impl<T, U> Into<U> for T
where + U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

+

That is, this conversion is whatever the implementation of +From<T> for U chooses to do.

+
source§

impl<T, U> TryFrom<U> for T
where + U: Into<T>,

source§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where + U: TryFrom<T>,

source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
\ No newline at end of file diff --git a/bytes/buf/struct.Take.html b/bytes/buf/struct.Take.html new file mode 100644 index 000000000..1e2bc2b12 --- /dev/null +++ b/bytes/buf/struct.Take.html @@ -0,0 +1,99 @@ +Take in bytes::buf - Rust

Struct bytes::buf::Take

source ·
pub struct Take<T> { /* private fields */ }
Expand description

A Buf adapter which limits the bytes read from an underlying buffer.

+

This struct is generally created by calling take() on Buf. See +documentation of take() for more details.

+

Implementations§

source§

impl<T> Take<T>

source

pub fn into_inner(self) -> T

Consumes this Take, returning the underlying value.

+
§Examples
+
use bytes::{Buf, BufMut};
+
+let mut buf = b"hello world".take(2);
+let mut dst = vec![];
+
+dst.put(&mut buf);
+assert_eq!(*dst, b"he"[..]);
+
+let mut buf = buf.into_inner();
+
+dst.clear();
+dst.put(&mut buf);
+assert_eq!(*dst, b"llo world"[..]);
+
source

pub fn get_ref(&self) -> &T

Gets a reference to the underlying Buf.

+

It is inadvisable to directly read from the underlying Buf.

+
§Examples
+
use bytes::Buf;
+
+let buf = b"hello world".take(2);
+
+assert_eq!(11, buf.get_ref().remaining());
+
source

pub fn get_mut(&mut self) -> &mut T

Gets a mutable reference to the underlying Buf.

+

It is inadvisable to directly read from the underlying Buf.

+
§Examples
+
use bytes::{Buf, BufMut};
+
+let mut buf = b"hello world".take(2);
+let mut dst = vec![];
+
+buf.get_mut().advance(2);
+
+dst.put(&mut buf);
+assert_eq!(*dst, b"ll"[..]);
+
source

pub fn limit(&self) -> usize

Returns the maximum number of bytes that can be read.

+
§Note
+

If the inner Buf has fewer bytes than indicated by this method then +that is the actual number of available bytes.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = b"hello world".take(2);
+
+assert_eq!(2, buf.limit());
+assert_eq!(b'h', buf.get_u8());
+assert_eq!(1, buf.limit());
+
source

pub fn set_limit(&mut self, lim: usize)

Sets the maximum number of bytes that can be read.

+
§Note
+

If the inner Buf has fewer bytes than lim then that is the actual +number of available bytes.

+
§Examples
+
use bytes::{Buf, BufMut};
+
+let mut buf = b"hello world".take(2);
+let mut dst = vec![];
+
+dst.put(&mut buf);
+assert_eq!(*dst, b"he"[..]);
+
+dst.clear();
+
+buf.set_limit(3);
+dst.put(&mut buf);
+assert_eq!(*dst, b"llo"[..]);
+

Trait Implementations§

source§

impl<T: Buf> Buf for Take<T>

source§

fn remaining(&self) -> usize

Returns the number of bytes between the current position and the end of +the buffer. Read more
source§

fn chunk(&self) -> &[u8]

Returns a slice starting at the current position and of length between 0 +and Buf::remaining(). Note that this can return shorter slice (this allows +non-continuous internal representation). Read more
source§

fn advance(&mut self, cnt: usize)

Advance the internal cursor of the Buf Read more
source§

fn copy_to_bytes(&mut self, len: usize) -> Bytes

Consumes len bytes inside self and returns new instance of Bytes +with this data. Read more
source§

fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize

Available on crate feature std only.
Fills dst with potentially multiple slices starting at self’s +current position. Read more
source§

fn has_remaining(&self) -> bool

Returns true if there are any more bytes to consume Read more
source§

fn copy_to_slice(&mut self, dst: &mut [u8])

Copies bytes from self into dst. Read more
source§

fn get_u8(&mut self) -> u8

Gets an unsigned 8 bit integer from self. Read more
source§

fn get_i8(&mut self) -> i8

Gets a signed 8 bit integer from self. Read more
source§

fn get_u16(&mut self) -> u16

Gets an unsigned 16 bit integer from self in big-endian byte order. Read more
source§

fn get_u16_le(&mut self) -> u16

Gets an unsigned 16 bit integer from self in little-endian byte order. Read more
source§

fn get_u16_ne(&mut self) -> u16

Gets an unsigned 16 bit integer from self in native-endian byte order. Read more
source§

fn get_i16(&mut self) -> i16

Gets a signed 16 bit integer from self in big-endian byte order. Read more
source§

fn get_i16_le(&mut self) -> i16

Gets a signed 16 bit integer from self in little-endian byte order. Read more
source§

fn get_i16_ne(&mut self) -> i16

Gets a signed 16 bit integer from self in native-endian byte order. Read more
source§

fn get_u32(&mut self) -> u32

Gets an unsigned 32 bit integer from self in the big-endian byte order. Read more
source§

fn get_u32_le(&mut self) -> u32

Gets an unsigned 32 bit integer from self in the little-endian byte order. Read more
source§

fn get_u32_ne(&mut self) -> u32

Gets an unsigned 32 bit integer from self in native-endian byte order. Read more
source§

fn get_i32(&mut self) -> i32

Gets a signed 32 bit integer from self in big-endian byte order. Read more
source§

fn get_i32_le(&mut self) -> i32

Gets a signed 32 bit integer from self in little-endian byte order. Read more
source§

fn get_i32_ne(&mut self) -> i32

Gets a signed 32 bit integer from self in native-endian byte order. Read more
source§

fn get_u64(&mut self) -> u64

Gets an unsigned 64 bit integer from self in big-endian byte order. Read more
source§

fn get_u64_le(&mut self) -> u64

Gets an unsigned 64 bit integer from self in little-endian byte order. Read more
source§

fn get_u64_ne(&mut self) -> u64

Gets an unsigned 64 bit integer from self in native-endian byte order. Read more
source§

fn get_i64(&mut self) -> i64

Gets a signed 64 bit integer from self in big-endian byte order. Read more
source§

fn get_i64_le(&mut self) -> i64

Gets a signed 64 bit integer from self in little-endian byte order. Read more
source§

fn get_i64_ne(&mut self) -> i64

Gets a signed 64 bit integer from self in native-endian byte order. Read more
source§

fn get_u128(&mut self) -> u128

Gets an unsigned 128 bit integer from self in big-endian byte order. Read more
source§

fn get_u128_le(&mut self) -> u128

Gets an unsigned 128 bit integer from self in little-endian byte order. Read more
source§

fn get_u128_ne(&mut self) -> u128

Gets an unsigned 128 bit integer from self in native-endian byte order. Read more
source§

fn get_i128(&mut self) -> i128

Gets a signed 128 bit integer from self in big-endian byte order. Read more
source§

fn get_i128_le(&mut self) -> i128

Gets a signed 128 bit integer from self in little-endian byte order. Read more
source§

fn get_i128_ne(&mut self) -> i128

Gets a signed 128 bit integer from self in native-endian byte order. Read more
source§

fn get_uint(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in big-endian byte order. Read more
source§

fn get_uint_le(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in little-endian byte order. Read more
source§

fn get_uint_ne(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in native-endian byte order. Read more
source§

fn get_int(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in big-endian byte order. Read more
source§

fn get_int_le(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in little-endian byte order. Read more
source§

fn get_int_ne(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in native-endian byte order. Read more
source§

fn get_f32(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in big-endian byte order. Read more
source§

fn get_f32_le(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in little-endian byte order. Read more
source§

fn get_f32_ne(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in native-endian byte order. Read more
source§

fn get_f64(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in big-endian byte order. Read more
source§

fn get_f64_le(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in little-endian byte order. Read more
source§

fn get_f64_ne(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in native-endian byte order. Read more
source§

fn take(self, limit: usize) -> Take<Self>
where + Self: Sized,

Creates an adaptor which will read at most limit bytes from self. Read more
source§

fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
where + Self: Sized,

Creates an adaptor which will chain this buffer with another. Read more
source§

fn reader(self) -> Reader<Self>
where + Self: Sized,

Available on crate feature std only.
Creates an adaptor which implements the Read trait for self. Read more
source§

impl<T: Debug> Debug for Take<T>

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

§

impl<T> Freeze for Take<T>
where + T: Freeze,

§

impl<T> RefUnwindSafe for Take<T>
where + T: RefUnwindSafe,

§

impl<T> Send for Take<T>
where + T: Send,

§

impl<T> Sync for Take<T>
where + T: Sync,

§

impl<T> Unpin for Take<T>
where + T: Unpin,

§

impl<T> UnwindSafe for Take<T>
where + T: UnwindSafe,

Blanket Implementations§

source§

impl<T> Any for T
where + T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where + T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where + T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

+
source§

impl<T, U> Into<U> for T
where + U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

+

That is, this conversion is whatever the implementation of +From<T> for U chooses to do.

+
source§

impl<T, U> TryFrom<U> for T
where + U: Into<T>,

source§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where + U: TryFrom<T>,

source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
\ No newline at end of file diff --git a/bytes/buf/struct.UninitSlice.html b/bytes/buf/struct.UninitSlice.html new file mode 100644 index 000000000..cff4c6d01 --- /dev/null +++ b/bytes/buf/struct.UninitSlice.html @@ -0,0 +1,106 @@ +UninitSlice in bytes::buf - Rust

Struct bytes::buf::UninitSlice

source ·
pub struct UninitSlice(/* private fields */);
Expand description

Uninitialized byte slice.

+

Returned by BufMut::chunk_mut(), the referenced byte slice may be +uninitialized. The wrapper provides safe access without introducing +undefined behavior.

+

The safety invariants of this wrapper are:

+
    +
  1. Reading from an UninitSlice is undefined behavior.
  2. +
  3. Writing uninitialized bytes to an UninitSlice is undefined behavior.
  4. +
+

The difference between &mut UninitSlice and &mut [MaybeUninit<u8>] is +that it is possible in safe code to write uninitialized bytes to an +&mut [MaybeUninit<u8>], which this type prohibits.

+

Implementations§

source§

impl UninitSlice

source

pub fn new(slice: &mut [u8]) -> &mut UninitSlice

Creates a &mut UninitSlice wrapping a slice of initialised memory.

+
§Examples
+
use bytes::buf::UninitSlice;
+
+let mut buffer = [0u8; 64];
+let slice = UninitSlice::new(&mut buffer[..]);
+
source

pub fn uninit(slice: &mut [MaybeUninit<u8>]) -> &mut UninitSlice

Creates a &mut UninitSlice wrapping a slice of uninitialised memory.

+
§Examples
+
use bytes::buf::UninitSlice;
+use core::mem::MaybeUninit;
+
+let mut buffer = [MaybeUninit::uninit(); 64];
+let slice = UninitSlice::uninit(&mut buffer[..]);
+
+let mut vec = Vec::with_capacity(1024);
+let spare: &mut UninitSlice = vec.spare_capacity_mut().into();
+
source

pub unsafe fn from_raw_parts_mut<'a>( + ptr: *mut u8, + len: usize, +) -> &'a mut UninitSlice

Create a &mut UninitSlice from a pointer and a length.

+
§Safety
+

The caller must ensure that ptr references a valid memory region owned +by the caller representing a byte slice for the duration of 'a.

+
§Examples
+
use bytes::buf::UninitSlice;
+
+let bytes = b"hello world".to_vec();
+let ptr = bytes.as_ptr() as *mut _;
+let len = bytes.len();
+
+let slice = unsafe { UninitSlice::from_raw_parts_mut(ptr, len) };
+
source

pub fn write_byte(&mut self, index: usize, byte: u8)

Write a single byte at the specified offset.

+
§Panics
+

The function panics if index is out of bounds.

+
§Examples
+
use bytes::buf::UninitSlice;
+
+let mut data = [b'f', b'o', b'o'];
+let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) };
+
+slice.write_byte(0, b'b');
+
+assert_eq!(b"boo", &data[..]);
+
source

pub fn copy_from_slice(&mut self, src: &[u8])

Copies bytes from src into self.

+

The length of src must be the same as self.

+
§Panics
+

The function panics if src has a different length than self.

+
§Examples
+
use bytes::buf::UninitSlice;
+
+let mut data = [b'f', b'o', b'o'];
+let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) };
+
+slice.copy_from_slice(b"bar");
+
+assert_eq!(b"bar", &data[..]);
+
source

pub fn as_mut_ptr(&mut self) -> *mut u8

Return a raw pointer to the slice’s buffer.

+
§Safety
+

The caller must not read from the referenced memory and must not +write uninitialized bytes to the slice either.

+
§Examples
+
use bytes::BufMut;
+
+let mut data = [0, 1, 2];
+let mut slice = &mut data[..];
+let ptr = BufMut::chunk_mut(&mut slice).as_mut_ptr();
+
source

pub unsafe fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit<u8>]

Return a &mut [MaybeUninit<u8>] to this slice’s buffer.

+
§Safety
+

The caller must not read from the referenced memory and must not write +uninitialized bytes to the slice either. This is because BufMut implementation +that created the UninitSlice knows which parts are initialized. Writing uninitialized +bytes to the slice may cause the BufMut to read those bytes and trigger undefined +behavior.

+
§Examples
+
use bytes::BufMut;
+
+let mut data = [0, 1, 2];
+let mut slice = &mut data[..];
+unsafe {
+    let uninit_slice = BufMut::chunk_mut(&mut slice).as_uninit_slice_mut();
+};
+
source

pub fn len(&self) -> usize

Returns the number of bytes in the slice.

+
§Examples
+
use bytes::BufMut;
+
+let mut data = [0, 1, 2];
+let mut slice = &mut data[..];
+let len = BufMut::chunk_mut(&mut slice).len();
+
+assert_eq!(len, 3);
+

Trait Implementations§

source§

impl Debug for UninitSlice

source§

fn fmt(&self, fmt: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl<'a> From<&'a mut [MaybeUninit<u8>]> for &'a mut UninitSlice

source§

fn from(slice: &'a mut [MaybeUninit<u8>]) -> Self

Converts to this type from the input type.
source§

impl<'a> From<&'a mut [u8]> for &'a mut UninitSlice

source§

fn from(slice: &'a mut [u8]) -> Self

Converts to this type from the input type.
source§

impl Index<Range<usize>> for UninitSlice

source§

type Output = UninitSlice

The returned type after indexing.
source§

fn index(&self, index: Range<usize>) -> &UninitSlice

Performs the indexing (container[index]) operation. Read more
source§

impl Index<RangeFrom<usize>> for UninitSlice

source§

type Output = UninitSlice

The returned type after indexing.
source§

fn index(&self, index: RangeFrom<usize>) -> &UninitSlice

Performs the indexing (container[index]) operation. Read more
source§

impl Index<RangeFull> for UninitSlice

source§

type Output = UninitSlice

The returned type after indexing.
source§

fn index(&self, index: RangeFull) -> &UninitSlice

Performs the indexing (container[index]) operation. Read more
source§

impl Index<RangeInclusive<usize>> for UninitSlice

source§

type Output = UninitSlice

The returned type after indexing.
source§

fn index(&self, index: RangeInclusive<usize>) -> &UninitSlice

Performs the indexing (container[index]) operation. Read more
source§

impl Index<RangeTo<usize>> for UninitSlice

source§

type Output = UninitSlice

The returned type after indexing.
source§

fn index(&self, index: RangeTo<usize>) -> &UninitSlice

Performs the indexing (container[index]) operation. Read more
source§

impl Index<RangeToInclusive<usize>> for UninitSlice

source§

type Output = UninitSlice

The returned type after indexing.
source§

fn index(&self, index: RangeToInclusive<usize>) -> &UninitSlice

Performs the indexing (container[index]) operation. Read more
source§

impl IndexMut<Range<usize>> for UninitSlice

source§

fn index_mut(&mut self, index: Range<usize>) -> &mut UninitSlice

Performs the mutable indexing (container[index]) operation. Read more
source§

impl IndexMut<RangeFrom<usize>> for UninitSlice

source§

fn index_mut(&mut self, index: RangeFrom<usize>) -> &mut UninitSlice

Performs the mutable indexing (container[index]) operation. Read more
source§

impl IndexMut<RangeFull> for UninitSlice

source§

fn index_mut(&mut self, index: RangeFull) -> &mut UninitSlice

Performs the mutable indexing (container[index]) operation. Read more
source§

impl IndexMut<RangeInclusive<usize>> for UninitSlice

source§

fn index_mut(&mut self, index: RangeInclusive<usize>) -> &mut UninitSlice

Performs the mutable indexing (container[index]) operation. Read more
source§

impl IndexMut<RangeTo<usize>> for UninitSlice

source§

fn index_mut(&mut self, index: RangeTo<usize>) -> &mut UninitSlice

Performs the mutable indexing (container[index]) operation. Read more
source§

impl IndexMut<RangeToInclusive<usize>> for UninitSlice

source§

fn index_mut(&mut self, index: RangeToInclusive<usize>) -> &mut UninitSlice

Performs the mutable indexing (container[index]) operation. Read more

Auto Trait Implementations§

Blanket Implementations§

source§

impl<T> Any for T
where + T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where + T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where + T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
\ No newline at end of file diff --git a/bytes/buf/struct.Writer.html b/bytes/buf/struct.Writer.html new file mode 100644 index 000000000..714ee58dc --- /dev/null +++ b/bytes/buf/struct.Writer.html @@ -0,0 +1,55 @@ +Writer in bytes::buf - Rust

Struct bytes::buf::Writer

source ·
pub struct Writer<B> { /* private fields */ }
Expand description

A BufMut adapter which implements io::Write for the inner value.

+

This struct is generally created by calling writer() on BufMut. See +documentation of writer() for more +details.

+

Implementations§

source§

impl<B: BufMut> Writer<B>

source

pub fn get_ref(&self) -> &B

Gets a reference to the underlying BufMut.

+

It is inadvisable to directly write to the underlying BufMut.

+
§Examples
+
use bytes::BufMut;
+
+let buf = Vec::with_capacity(1024).writer();
+
+assert_eq!(1024, buf.get_ref().capacity());
+
source

pub fn get_mut(&mut self) -> &mut B

Gets a mutable reference to the underlying BufMut.

+

It is inadvisable to directly write to the underlying BufMut.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![].writer();
+
+buf.get_mut().reserve(1024);
+
+assert_eq!(1024, buf.get_ref().capacity());
+
source

pub fn into_inner(self) -> B

Consumes this Writer, returning the underlying value.

+
§Examples
+
use bytes::BufMut;
+use std::io;
+
+let mut buf = vec![].writer();
+let mut src = &b"hello world"[..];
+
+io::copy(&mut src, &mut buf).unwrap();
+
+let buf = buf.into_inner();
+assert_eq!(*buf, b"hello world"[..]);
+

Trait Implementations§

source§

impl<B: Debug> Debug for Writer<B>

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl<B: BufMut + Sized> Write for Writer<B>

source§

fn write(&mut self, src: &[u8]) -> Result<usize>

Writes a buffer into this writer, returning how many bytes were written. Read more
source§

fn flush(&mut self) -> Result<()>

Flushes this output stream, ensuring that all intermediately buffered +contents reach their destination. Read more
1.36.0 · source§

fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> Result<usize, Error>

Like write, except that it writes from a slice of buffers. Read more
source§

fn is_write_vectored(&self) -> bool

🔬This is a nightly-only experimental API. (can_vector)
Determines if this Writer has an efficient write_vectored +implementation. Read more
1.0.0 · source§

fn write_all(&mut self, buf: &[u8]) -> Result<(), Error>

Attempts to write an entire buffer into this writer. Read more
source§

fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> Result<(), Error>

🔬This is a nightly-only experimental API. (write_all_vectored)
Attempts to write multiple buffers into this writer. Read more
1.0.0 · source§

fn write_fmt(&mut self, fmt: Arguments<'_>) -> Result<(), Error>

Writes a formatted string into this writer, returning any error +encountered. Read more
1.0.0 · source§

fn by_ref(&mut self) -> &mut Self
where + Self: Sized,

Creates a “by reference” adapter for this instance of Write. Read more

Auto Trait Implementations§

§

impl<B> Freeze for Writer<B>
where + B: Freeze,

§

impl<B> RefUnwindSafe for Writer<B>
where + B: RefUnwindSafe,

§

impl<B> Send for Writer<B>
where + B: Send,

§

impl<B> Sync for Writer<B>
where + B: Sync,

§

impl<B> Unpin for Writer<B>
where + B: Unpin,

§

impl<B> UnwindSafe for Writer<B>
where + B: UnwindSafe,

Blanket Implementations§

source§

impl<T> Any for T
where + T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where + T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where + T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

+
source§

impl<T, U> Into<U> for T
where + U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

+

That is, this conversion is whatever the implementation of +From<T> for U chooses to do.

+
source§

impl<T, U> TryFrom<U> for T
where + U: Into<T>,

source§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where + U: TryFrom<T>,

source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
\ No newline at end of file diff --git a/bytes/buf/take/struct.Take.html b/bytes/buf/take/struct.Take.html new file mode 100644 index 000000000..4272e0b7d --- /dev/null +++ b/bytes/buf/take/struct.Take.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../../bytes/buf/struct.Take.html...

+ + + \ No newline at end of file diff --git a/bytes/buf/trait.Buf.html b/bytes/buf/trait.Buf.html new file mode 100644 index 000000000..8c91fbfd9 --- /dev/null +++ b/bytes/buf/trait.Buf.html @@ -0,0 +1,628 @@ +Buf in bytes::buf - Rust

Trait bytes::buf::Buf

source ·
pub trait Buf {
+
Show 48 methods // Required methods + fn remaining(&self) -> usize; + fn chunk(&self) -> &[u8] ; + fn advance(&mut self, cnt: usize); + + // Provided methods + fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { ... } + fn has_remaining(&self) -> bool { ... } + fn copy_to_slice(&mut self, dst: &mut [u8]) { ... } + fn get_u8(&mut self) -> u8 { ... } + fn get_i8(&mut self) -> i8 { ... } + fn get_u16(&mut self) -> u16 { ... } + fn get_u16_le(&mut self) -> u16 { ... } + fn get_u16_ne(&mut self) -> u16 { ... } + fn get_i16(&mut self) -> i16 { ... } + fn get_i16_le(&mut self) -> i16 { ... } + fn get_i16_ne(&mut self) -> i16 { ... } + fn get_u32(&mut self) -> u32 { ... } + fn get_u32_le(&mut self) -> u32 { ... } + fn get_u32_ne(&mut self) -> u32 { ... } + fn get_i32(&mut self) -> i32 { ... } + fn get_i32_le(&mut self) -> i32 { ... } + fn get_i32_ne(&mut self) -> i32 { ... } + fn get_u64(&mut self) -> u64 { ... } + fn get_u64_le(&mut self) -> u64 { ... } + fn get_u64_ne(&mut self) -> u64 { ... } + fn get_i64(&mut self) -> i64 { ... } + fn get_i64_le(&mut self) -> i64 { ... } + fn get_i64_ne(&mut self) -> i64 { ... } + fn get_u128(&mut self) -> u128 { ... } + fn get_u128_le(&mut self) -> u128 { ... } + fn get_u128_ne(&mut self) -> u128 { ... } + fn get_i128(&mut self) -> i128 { ... } + fn get_i128_le(&mut self) -> i128 { ... } + fn get_i128_ne(&mut self) -> i128 { ... } + fn get_uint(&mut self, nbytes: usize) -> u64 { ... } + fn get_uint_le(&mut self, nbytes: usize) -> u64 { ... } + fn get_uint_ne(&mut self, nbytes: usize) -> u64 { ... } + fn get_int(&mut self, nbytes: usize) -> i64 { ... } + fn get_int_le(&mut self, nbytes: usize) -> i64 { ... } + fn get_int_ne(&mut self, nbytes: usize) -> i64 { ... } + fn get_f32(&mut self) -> f32 { ... } + fn get_f32_le(&mut self) -> f32 { ... } + fn get_f32_ne(&mut self) -> f32 { ... } + fn get_f64(&mut self) -> f64 { ... } + fn get_f64_le(&mut self) -> f64 { ... } + fn get_f64_ne(&mut self) -> f64 { ... } + fn copy_to_bytes(&mut self, len: usize) -> Bytes { ... } + fn take(self, limit: usize) -> Take<Self> + where Self: Sized { ... } + fn chain<U: Buf>(self, next: U) -> Chain<Self, U> + where Self: Sized { ... } + fn reader(self) -> Reader<Self> + where Self: Sized { ... } +
}
Expand description

Read bytes from a buffer.

+

A buffer stores bytes in memory such that read operations are infallible. +The underlying storage may or may not be in contiguous memory. A Buf value +is a cursor into the buffer. Reading from Buf advances the cursor +position. It can be thought of as an efficient Iterator for collections of +bytes.

+

The simplest Buf is a &[u8].

+ +
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(b'h', buf.get_u8());
+assert_eq!(b'e', buf.get_u8());
+assert_eq!(b'l', buf.get_u8());
+
+let mut rest = [0; 8];
+buf.copy_to_slice(&mut rest);
+
+assert_eq!(&rest[..], &b"lo world"[..]);
+

Required Methods§

source

fn remaining(&self) -> usize

Returns the number of bytes between the current position and the end of +the buffer.

+

This value is greater than or equal to the length of the slice returned +by chunk().

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(buf.remaining(), 11);
+
+buf.get_u8();
+
+assert_eq!(buf.remaining(), 10);
+
§Implementer notes
+

Implementations of remaining should ensure that the return value does +not change unless a call is made to advance or any other function that +is documented to change the Buf’s current position.

+
source

fn chunk(&self) -> &[u8]

Returns a slice starting at the current position and of length between 0 +and Buf::remaining(). Note that this can return shorter slice (this allows +non-continuous internal representation).

+

This is a lower level function. Most operations are done with other +functions.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(buf.chunk(), &b"hello world"[..]);
+
+buf.advance(6);
+
+assert_eq!(buf.chunk(), &b"world"[..]);
+
§Implementer notes
+

This function should never panic. chunk() should return an empty +slice if and only if remaining() returns 0. In other words, +chunk() returning an empty slice implies that remaining() will +return 0 and remaining() returning 0 implies that chunk() will +return an empty slice.

+
source

fn advance(&mut self, cnt: usize)

Advance the internal cursor of the Buf

+

The next call to chunk() will return a slice starting cnt bytes +further into the underlying buffer.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(buf.chunk(), &b"hello world"[..]);
+
+buf.advance(6);
+
+assert_eq!(buf.chunk(), &b"world"[..]);
+
§Panics
+

This function may panic if cnt > self.remaining().

+
§Implementer notes
+

It is recommended for implementations of advance to panic if cnt > self.remaining(). If the implementation does not panic, the call must +behave as if cnt == self.remaining().

+

A call with cnt == 0 should never panic and be a no-op.

+

Provided Methods§

source

fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize

Available on crate feature std only.

Fills dst with potentially multiple slices starting at self’s +current position.

+

If the Buf is backed by disjoint slices of bytes, chunk_vectored enables +fetching more than one slice at once. dst is a slice of IoSlice +references, enabling the slice to be directly used with writev +without any further conversion. The sum of the lengths of all the +buffers in dst will be less than or equal to Buf::remaining().

+

The entries in dst will be overwritten, but the data contained by +the slices will not be modified. If chunk_vectored does not fill every +entry in dst, then dst is guaranteed to contain all remaining slices +in `self.

+

This is a lower level function. Most operations are done with other +functions.

+
§Implementer notes
+

This function should never panic. Once the end of the buffer is reached, +i.e., Buf::remaining returns 0, calls to chunk_vectored must return 0 +without mutating dst.

+

Implementations should also take care to properly handle being called +with dst being a zero length slice.

+
source

fn has_remaining(&self) -> bool

Returns true if there are any more bytes to consume

+

This is equivalent to self.remaining() != 0.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"a"[..];
+
+assert!(buf.has_remaining());
+
+buf.get_u8();
+
+assert!(!buf.has_remaining());
+
source

fn copy_to_slice(&mut self, dst: &mut [u8])

Copies bytes from self into dst.

+

The cursor is advanced by the number of bytes copied. self must have +enough remaining bytes to fill dst.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+let mut dst = [0; 5];
+
+buf.copy_to_slice(&mut dst);
+assert_eq!(&b"hello"[..], &dst);
+assert_eq!(6, buf.remaining());
+
§Panics
+

This function panics if self.remaining() < dst.len().

+
source

fn get_u8(&mut self) -> u8

Gets an unsigned 8 bit integer from self.

+

The current position is advanced by 1.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x08 hello"[..];
+assert_eq!(8, buf.get_u8());
+
§Panics
+

This function panics if there is no more remaining data in self.

+
source

fn get_i8(&mut self) -> i8

Gets a signed 8 bit integer from self.

+

The current position is advanced by 1.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x08 hello"[..];
+assert_eq!(8, buf.get_i8());
+
§Panics
+

This function panics if there is no more remaining data in self.

+
source

fn get_u16(&mut self) -> u16

Gets an unsigned 16 bit integer from self in big-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x08\x09 hello"[..];
+assert_eq!(0x0809, buf.get_u16());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_u16_le(&mut self) -> u16

Gets an unsigned 16 bit integer from self in little-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x09\x08 hello"[..];
+assert_eq!(0x0809, buf.get_u16_le());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_u16_ne(&mut self) -> u16

Gets an unsigned 16 bit integer from self in native-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x08\x09 hello",
+    false => b"\x09\x08 hello",
+};
+assert_eq!(0x0809, buf.get_u16_ne());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i16(&mut self) -> i16

Gets a signed 16 bit integer from self in big-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x08\x09 hello"[..];
+assert_eq!(0x0809, buf.get_i16());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i16_le(&mut self) -> i16

Gets a signed 16 bit integer from self in little-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x09\x08 hello"[..];
+assert_eq!(0x0809, buf.get_i16_le());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i16_ne(&mut self) -> i16

Gets a signed 16 bit integer from self in native-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x08\x09 hello",
+    false => b"\x09\x08 hello",
+};
+assert_eq!(0x0809, buf.get_i16_ne());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_u32(&mut self) -> u32

Gets an unsigned 32 bit integer from self in the big-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x08\x09\xA0\xA1 hello"[..];
+assert_eq!(0x0809A0A1, buf.get_u32());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_u32_le(&mut self) -> u32

Gets an unsigned 32 bit integer from self in the little-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
+assert_eq!(0x0809A0A1, buf.get_u32_le());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_u32_ne(&mut self) -> u32

Gets an unsigned 32 bit integer from self in native-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x08\x09\xA0\xA1 hello",
+    false => b"\xA1\xA0\x09\x08 hello",
+};
+assert_eq!(0x0809A0A1, buf.get_u32_ne());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i32(&mut self) -> i32

Gets a signed 32 bit integer from self in big-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x08\x09\xA0\xA1 hello"[..];
+assert_eq!(0x0809A0A1, buf.get_i32());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i32_le(&mut self) -> i32

Gets a signed 32 bit integer from self in little-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
+assert_eq!(0x0809A0A1, buf.get_i32_le());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i32_ne(&mut self) -> i32

Gets a signed 32 bit integer from self in native-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x08\x09\xA0\xA1 hello",
+    false => b"\xA1\xA0\x09\x08 hello",
+};
+assert_eq!(0x0809A0A1, buf.get_i32_ne());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_u64(&mut self) -> u64

Gets an unsigned 64 bit integer from self in big-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_u64());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_u64_le(&mut self) -> u64

Gets an unsigned 64 bit integer from self in little-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_u64_le());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_u64_ne(&mut self) -> u64

Gets an unsigned 64 bit integer from self in native-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+    false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x0102030405060708, buf.get_u64_ne());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i64(&mut self) -> i64

Gets a signed 64 bit integer from self in big-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_i64());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i64_le(&mut self) -> i64

Gets a signed 64 bit integer from self in little-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_i64_le());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i64_ne(&mut self) -> i64

Gets a signed 64 bit integer from self in native-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+    false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x0102030405060708, buf.get_i64_ne());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_u128(&mut self) -> u128

Gets an unsigned 128 bit integer from self in big-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_u128());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_u128_le(&mut self) -> u128

Gets an unsigned 128 bit integer from self in little-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_u128_ne(&mut self) -> u128

Gets an unsigned 128 bit integer from self in native-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+    false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x01020304050607080910111213141516, buf.get_u128_ne());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i128(&mut self) -> i128

Gets a signed 128 bit integer from self in big-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_i128());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i128_le(&mut self) -> i128

Gets a signed 128 bit integer from self in little-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_i128_ne(&mut self) -> i128

Gets a signed 128 bit integer from self in native-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+    false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x01020304050607080910111213141516, buf.get_i128_ne());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_uint(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in big-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03 hello"[..];
+assert_eq!(0x010203, buf.get_uint(3));
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_uint_le(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in little-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x03\x02\x01 hello"[..];
+assert_eq!(0x010203, buf.get_uint_le(3));
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_uint_ne(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in native-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x01\x02\x03 hello",
+    false => b"\x03\x02\x01 hello",
+};
+assert_eq!(0x010203, buf.get_uint_ne(3));
+
§Panics
+

This function panics if there is not enough remaining data in self, or +if nbytes is greater than 8.

+
source

fn get_int(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in big-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03 hello"[..];
+assert_eq!(0x010203, buf.get_int(3));
+
§Panics
+

This function panics if there is not enough remaining data in self, or +if nbytes is greater than 8.

+
source

fn get_int_le(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in little-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x03\x02\x01 hello"[..];
+assert_eq!(0x010203, buf.get_int_le(3));
+
§Panics
+

This function panics if there is not enough remaining data in self, or +if nbytes is greater than 8.

+
source

fn get_int_ne(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in native-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x01\x02\x03 hello",
+    false => b"\x03\x02\x01 hello",
+};
+assert_eq!(0x010203, buf.get_int_ne(3));
+
§Panics
+

This function panics if there is not enough remaining data in self, or +if nbytes is greater than 8.

+
source

fn get_f32(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in big-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x3F\x99\x99\x9A hello"[..];
+assert_eq!(1.2f32, buf.get_f32());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_f32_le(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in little-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x9A\x99\x99\x3F hello"[..];
+assert_eq!(1.2f32, buf.get_f32_le());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_f32_ne(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in native-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x3F\x99\x99\x9A hello",
+    false => b"\x9A\x99\x99\x3F hello",
+};
+assert_eq!(1.2f32, buf.get_f32_ne());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_f64(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in big-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"[..];
+assert_eq!(1.2f64, buf.get_f64());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_f64_le(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in little-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::Buf;
+
+let mut buf = &b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"[..];
+assert_eq!(1.2f64, buf.get_f64_le());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn get_f64_ne(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in native-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    true => b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello",
+    false => b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello",
+};
+assert_eq!(1.2f64, buf.get_f64_ne());
+
§Panics
+

This function panics if there is not enough remaining data in self.

+
source

fn copy_to_bytes(&mut self, len: usize) -> Bytes

Consumes len bytes inside self and returns new instance of Bytes +with this data.

+

This function may be optimized by the underlying type to avoid actual +copies. For example, Bytes implementation will do a shallow copy +(ref-count increment).

+
§Examples
+
use bytes::Buf;
+
+let bytes = (&b"hello world"[..]).copy_to_bytes(5);
+assert_eq!(&bytes[..], &b"hello"[..]);
+
§Panics
+

This function panics if len > self.remaining().

+
source

fn take(self, limit: usize) -> Take<Self>
where + Self: Sized,

Creates an adaptor which will read at most limit bytes from self.

+

This function returns a new instance of Buf which will read at most +limit bytes.

+
§Examples
+
use bytes::{Buf, BufMut};
+
+let mut buf = b"hello world"[..].take(5);
+let mut dst = vec![];
+
+dst.put(&mut buf);
+assert_eq!(dst, b"hello");
+
+let mut buf = buf.into_inner();
+dst.clear();
+dst.put(&mut buf);
+assert_eq!(dst, b" world");
+
source

fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
where + Self: Sized,

Creates an adaptor which will chain this buffer with another.

+

The returned Buf instance will first consume all bytes from self. +Afterwards the output is equivalent to the output of next.

+
§Examples
+
use bytes::Buf;
+
+let mut chain = b"hello "[..].chain(&b"world"[..]);
+
+let full = chain.copy_to_bytes(11);
+assert_eq!(full.chunk(), b"hello world");
+
source

fn reader(self) -> Reader<Self>
where + Self: Sized,

Available on crate feature std only.

Creates an adaptor which implements the Read trait for self.

+

This function returns a new value which implements Read by adapting +the Read trait functions to the Buf trait functions. Given that +Buf operations are infallible, none of the Read functions will +return with Err.

+
§Examples
+
use bytes::{Bytes, Buf};
+use std::io::Read;
+
+let buf = Bytes::from("hello world");
+
+let mut reader = buf.reader();
+let mut dst = [0; 1024];
+
+let num = reader.read(&mut dst).unwrap();
+
+assert_eq!(11, num);
+assert_eq!(&dst[..11], &b"hello world"[..]);
+

Implementations on Foreign Types§

source§

impl Buf for &[u8]

source§

fn remaining(&self) -> usize

source§

fn chunk(&self) -> &[u8]

source§

fn advance(&mut self, cnt: usize)

source§

fn copy_to_slice(&mut self, dst: &mut [u8])

source§

impl Buf for VecDeque<u8>

source§

fn remaining(&self) -> usize

source§

fn chunk(&self) -> &[u8]

source§

fn advance(&mut self, cnt: usize)

source§

impl<T: AsRef<[u8]>> Buf for Cursor<T>

source§

fn remaining(&self) -> usize

source§

fn chunk(&self) -> &[u8]

source§

fn advance(&mut self, cnt: usize)

source§

impl<T: Buf + ?Sized> Buf for &mut T

source§

fn remaining(&self) -> usize

source§

fn chunk(&self) -> &[u8]

source§

fn chunks_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize

Available on crate feature std only.
source§

fn advance(&mut self, cnt: usize)

source§

fn has_remaining(&self) -> bool

source§

fn copy_to_slice(&mut self, dst: &mut [u8])

source§

fn get_u8(&mut self) -> u8

source§

fn get_i8(&mut self) -> i8

source§

fn get_u16(&mut self) -> u16

source§

fn get_u16_le(&mut self) -> u16

source§

fn get_u16_ne(&mut self) -> u16

source§

fn get_i16(&mut self) -> i16

source§

fn get_i16_le(&mut self) -> i16

source§

fn get_i16_ne(&mut self) -> i16

source§

fn get_u32(&mut self) -> u32

source§

fn get_u32_le(&mut self) -> u32

source§

fn get_u32_ne(&mut self) -> u32

source§

fn get_i32(&mut self) -> i32

source§

fn get_i32_le(&mut self) -> i32

source§

fn get_i32_ne(&mut self) -> i32

source§

fn get_u64(&mut self) -> u64

source§

fn get_u64_le(&mut self) -> u64

source§

fn get_u64_ne(&mut self) -> u64

source§

fn get_i64(&mut self) -> i64

source§

fn get_i64_le(&mut self) -> i64

source§

fn get_i64_ne(&mut self) -> i64

source§

fn get_uint(&mut self, nbytes: usize) -> u64

source§

fn get_uint_le(&mut self, nbytes: usize) -> u64

source§

fn get_uint_ne(&mut self, nbytes: usize) -> u64

source§

fn get_int(&mut self, nbytes: usize) -> i64

source§

fn get_int_le(&mut self, nbytes: usize) -> i64

source§

fn get_int_ne(&mut self, nbytes: usize) -> i64

source§

fn copy_to_bytes(&mut self, len: usize) -> Bytes

source§

impl<T: Buf + ?Sized> Buf for Box<T>

source§

fn remaining(&self) -> usize

source§

fn chunk(&self) -> &[u8]

source§

fn chunks_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize

Available on crate feature std only.
source§

fn advance(&mut self, cnt: usize)

source§

fn has_remaining(&self) -> bool

source§

fn copy_to_slice(&mut self, dst: &mut [u8])

source§

fn get_u8(&mut self) -> u8

source§

fn get_i8(&mut self) -> i8

source§

fn get_u16(&mut self) -> u16

source§

fn get_u16_le(&mut self) -> u16

source§

fn get_u16_ne(&mut self) -> u16

source§

fn get_i16(&mut self) -> i16

source§

fn get_i16_le(&mut self) -> i16

source§

fn get_i16_ne(&mut self) -> i16

source§

fn get_u32(&mut self) -> u32

source§

fn get_u32_le(&mut self) -> u32

source§

fn get_u32_ne(&mut self) -> u32

source§

fn get_i32(&mut self) -> i32

source§

fn get_i32_le(&mut self) -> i32

source§

fn get_i32_ne(&mut self) -> i32

source§

fn get_u64(&mut self) -> u64

source§

fn get_u64_le(&mut self) -> u64

source§

fn get_u64_ne(&mut self) -> u64

source§

fn get_i64(&mut self) -> i64

source§

fn get_i64_le(&mut self) -> i64

source§

fn get_i64_ne(&mut self) -> i64

source§

fn get_uint(&mut self, nbytes: usize) -> u64

source§

fn get_uint_le(&mut self, nbytes: usize) -> u64

source§

fn get_uint_ne(&mut self, nbytes: usize) -> u64

source§

fn get_int(&mut self, nbytes: usize) -> i64

source§

fn get_int_le(&mut self, nbytes: usize) -> i64

source§

fn get_int_ne(&mut self, nbytes: usize) -> i64

source§

fn copy_to_bytes(&mut self, len: usize) -> Bytes

Implementors§

source§

impl Buf for Bytes

source§

impl Buf for BytesMut

source§

impl<T, U> Buf for Chain<T, U>
where + T: Buf, + U: Buf,

source§

impl<T: Buf> Buf for Take<T>

\ No newline at end of file diff --git a/bytes/buf/trait.BufMut.html b/bytes/buf/trait.BufMut.html new file mode 100644 index 000000000..c6dfb061d --- /dev/null +++ b/bytes/buf/trait.BufMut.html @@ -0,0 +1,741 @@ +BufMut in bytes::buf - Rust

Trait bytes::buf::BufMut

source ·
pub unsafe trait BufMut {
+
Show 48 methods // Required methods + fn remaining_mut(&self) -> usize; + unsafe fn advance_mut(&mut self, cnt: usize); + fn chunk_mut(&mut self) -> &mut UninitSlice; + + // Provided methods + fn has_remaining_mut(&self) -> bool { ... } + fn put<T: Buf>(&mut self, src: T) + where Self: Sized { ... } + fn put_slice(&mut self, src: &[u8]) { ... } + fn put_bytes(&mut self, val: u8, cnt: usize) { ... } + fn put_u8(&mut self, n: u8) { ... } + fn put_i8(&mut self, n: i8) { ... } + fn put_u16(&mut self, n: u16) { ... } + fn put_u16_le(&mut self, n: u16) { ... } + fn put_u16_ne(&mut self, n: u16) { ... } + fn put_i16(&mut self, n: i16) { ... } + fn put_i16_le(&mut self, n: i16) { ... } + fn put_i16_ne(&mut self, n: i16) { ... } + fn put_u32(&mut self, n: u32) { ... } + fn put_u32_le(&mut self, n: u32) { ... } + fn put_u32_ne(&mut self, n: u32) { ... } + fn put_i32(&mut self, n: i32) { ... } + fn put_i32_le(&mut self, n: i32) { ... } + fn put_i32_ne(&mut self, n: i32) { ... } + fn put_u64(&mut self, n: u64) { ... } + fn put_u64_le(&mut self, n: u64) { ... } + fn put_u64_ne(&mut self, n: u64) { ... } + fn put_i64(&mut self, n: i64) { ... } + fn put_i64_le(&mut self, n: i64) { ... } + fn put_i64_ne(&mut self, n: i64) { ... } + fn put_u128(&mut self, n: u128) { ... } + fn put_u128_le(&mut self, n: u128) { ... } + fn put_u128_ne(&mut self, n: u128) { ... } + fn put_i128(&mut self, n: i128) { ... } + fn put_i128_le(&mut self, n: i128) { ... } + fn put_i128_ne(&mut self, n: i128) { ... } + fn put_uint(&mut self, n: u64, nbytes: usize) { ... } + fn put_uint_le(&mut self, n: u64, nbytes: usize) { ... } + fn put_uint_ne(&mut self, n: u64, nbytes: usize) { ... } + fn put_int(&mut self, n: i64, nbytes: usize) { ... } + fn put_int_le(&mut self, n: i64, nbytes: usize) { ... } + fn put_int_ne(&mut self, n: i64, nbytes: usize) { ... } + fn put_f32(&mut self, n: f32) { ... } + fn put_f32_le(&mut self, n: f32) { ... } + fn put_f32_ne(&mut self, n: f32) { ... } + fn put_f64(&mut self, n: f64) { ... } + fn put_f64_le(&mut self, n: f64) { ... } + fn put_f64_ne(&mut self, n: f64) { ... } + fn limit(self, limit: usize) -> Limit<Self> + where Self: Sized { ... } + fn writer(self) -> Writer<Self> + where Self: Sized { ... } + fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U> + where Self: Sized { ... } +
}
Expand description

A trait for values that provide sequential write access to bytes.

+

Write bytes to a buffer

+

A buffer stores bytes in memory such that write operations are infallible. +The underlying storage may or may not be in contiguous memory. A BufMut +value is a cursor into the buffer. Writing to BufMut advances the cursor +position.

+

The simplest BufMut is a Vec<u8>.

+ +
use bytes::BufMut;
+
+let mut buf = vec![];
+
+buf.put(&b"hello world"[..]);
+
+assert_eq!(buf, b"hello world");
+

Required Methods§

source

fn remaining_mut(&self) -> usize

Returns the number of bytes that can be written from the current +position until the end of the buffer is reached.

+

This value is greater than or equal to the length of the slice returned +by chunk_mut().

+

Writing to a BufMut may involve allocating more memory on the fly. +Implementations may fail before reaching the number of bytes indicated +by this method if they encounter an allocation failure.

+
§Examples
+
use bytes::BufMut;
+
+let mut dst = [0; 10];
+let mut buf = &mut dst[..];
+
+let original_remaining = buf.remaining_mut();
+buf.put(&b"hello"[..]);
+
+assert_eq!(original_remaining - 5, buf.remaining_mut());
+
§Implementer notes
+

Implementations of remaining_mut should ensure that the return value +does not change unless a call is made to advance_mut or any other +function that is documented to change the BufMut’s current position.

+
§Note
+

remaining_mut may return value smaller than actual available space.

+
source

unsafe fn advance_mut(&mut self, cnt: usize)

Advance the internal cursor of the BufMut

+

The next call to chunk_mut will return a slice starting cnt bytes +further into the underlying buffer.

+
§Safety
+

The caller must ensure that the next cnt bytes of chunk are +initialized.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = Vec::with_capacity(16);
+
+// Write some data
+buf.chunk_mut()[0..2].copy_from_slice(b"he");
+unsafe { buf.advance_mut(2) };
+
+// write more bytes
+buf.chunk_mut()[0..3].copy_from_slice(b"llo");
+
+unsafe { buf.advance_mut(3); }
+
+assert_eq!(5, buf.len());
+assert_eq!(buf, b"hello");
+
§Panics
+

This function may panic if cnt > self.remaining_mut().

+
§Implementer notes
+

It is recommended for implementations of advance_mut to panic if +cnt > self.remaining_mut(). If the implementation does not panic, +the call must behave as if cnt == self.remaining_mut().

+

A call with cnt == 0 should never panic and be a no-op.

+
source

fn chunk_mut(&mut self) -> &mut UninitSlice

Returns a mutable slice starting at the current BufMut position and of +length between 0 and BufMut::remaining_mut(). Note that this can be shorter than the +whole remainder of the buffer (this allows non-continuous implementation).

+

This is a lower level function. Most operations are done with other +functions.

+

The returned byte slice may represent uninitialized memory.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = Vec::with_capacity(16);
+
+unsafe {
+    // MaybeUninit::as_mut_ptr
+    buf.chunk_mut()[0..].as_mut_ptr().write(b'h');
+    buf.chunk_mut()[1..].as_mut_ptr().write(b'e');
+
+    buf.advance_mut(2);
+
+    buf.chunk_mut()[0..].as_mut_ptr().write(b'l');
+    buf.chunk_mut()[1..].as_mut_ptr().write(b'l');
+    buf.chunk_mut()[2..].as_mut_ptr().write(b'o');
+
+    buf.advance_mut(3);
+}
+
+assert_eq!(5, buf.len());
+assert_eq!(buf, b"hello");
+
§Implementer notes
+

This function should never panic. chunk_mut() should return an empty +slice if and only if remaining_mut() returns 0. In other words, +chunk_mut() returning an empty slice implies that remaining_mut() will +return 0 and remaining_mut() returning 0 implies that chunk_mut() will +return an empty slice.

+

This function may trigger an out-of-memory abort if it tries to allocate +memory and fails to do so.

+

Provided Methods§

source

fn has_remaining_mut(&self) -> bool

Returns true if there is space in self for more bytes.

+

This is equivalent to self.remaining_mut() != 0.

+
§Examples
+
use bytes::BufMut;
+
+let mut dst = [0; 5];
+let mut buf = &mut dst[..];
+
+assert!(buf.has_remaining_mut());
+
+buf.put(&b"hello"[..]);
+
+assert!(!buf.has_remaining_mut());
+
source

fn put<T: Buf>(&mut self, src: T)
where + Self: Sized,

Transfer bytes into self from src and advance the cursor by the +number of bytes written.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+
+buf.put_u8(b'h');
+buf.put(&b"ello"[..]);
+buf.put(&b" world"[..]);
+
+assert_eq!(buf, b"hello world");
+
§Panics
+

Panics if self does not have enough capacity to contain src.

+
source

fn put_slice(&mut self, src: &[u8])

Transfer bytes into self from src and advance the cursor by the +number of bytes written.

+

self must have enough remaining capacity to contain all of src.

+ +
use bytes::BufMut;
+
+let mut dst = [0; 6];
+
+{
+    let mut buf = &mut dst[..];
+    buf.put_slice(b"hello");
+
+    assert_eq!(1, buf.remaining_mut());
+}
+
+assert_eq!(b"hello\0", &dst);
+
source

fn put_bytes(&mut self, val: u8, cnt: usize)

Put cnt bytes val into self.

+

Logically equivalent to calling self.put_u8(val) cnt times, but may work faster.

+

self must have at least cnt remaining capacity.

+ +
use bytes::BufMut;
+
+let mut dst = [0; 6];
+
+{
+    let mut buf = &mut dst[..];
+    buf.put_bytes(b'a', 4);
+
+    assert_eq!(2, buf.remaining_mut());
+}
+
+assert_eq!(b"aaaa\0\0", &dst);
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u8(&mut self, n: u8)

Writes an unsigned 8 bit integer to self.

+

The current position is advanced by 1.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u8(0x01);
+assert_eq!(buf, b"\x01");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i8(&mut self, n: i8)

Writes a signed 8 bit integer to self.

+

The current position is advanced by 1.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i8(0x01);
+assert_eq!(buf, b"\x01");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u16(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in big-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u16(0x0809);
+assert_eq!(buf, b"\x08\x09");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u16_le(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in little-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u16_le(0x0809);
+assert_eq!(buf, b"\x09\x08");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u16_ne(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in native-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u16_ne(0x0809);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x08\x09");
+} else {
+    assert_eq!(buf, b"\x09\x08");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i16(&mut self, n: i16)

Writes a signed 16 bit integer to self in big-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i16(0x0809);
+assert_eq!(buf, b"\x08\x09");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i16_le(&mut self, n: i16)

Writes a signed 16 bit integer to self in little-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i16_le(0x0809);
+assert_eq!(buf, b"\x09\x08");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i16_ne(&mut self, n: i16)

Writes a signed 16 bit integer to self in native-endian byte order.

+

The current position is advanced by 2.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i16_ne(0x0809);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x08\x09");
+} else {
+    assert_eq!(buf, b"\x09\x08");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u32(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in big-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u32(0x0809A0A1);
+assert_eq!(buf, b"\x08\x09\xA0\xA1");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u32_le(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in little-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u32_le(0x0809A0A1);
+assert_eq!(buf, b"\xA1\xA0\x09\x08");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u32_ne(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in native-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u32_ne(0x0809A0A1);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x08\x09\xA0\xA1");
+} else {
+    assert_eq!(buf, b"\xA1\xA0\x09\x08");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i32(&mut self, n: i32)

Writes a signed 32 bit integer to self in big-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i32(0x0809A0A1);
+assert_eq!(buf, b"\x08\x09\xA0\xA1");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i32_le(&mut self, n: i32)

Writes a signed 32 bit integer to self in little-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i32_le(0x0809A0A1);
+assert_eq!(buf, b"\xA1\xA0\x09\x08");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i32_ne(&mut self, n: i32)

Writes a signed 32 bit integer to self in native-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i32_ne(0x0809A0A1);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x08\x09\xA0\xA1");
+} else {
+    assert_eq!(buf, b"\xA1\xA0\x09\x08");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u64(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in the big-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u64(0x0102030405060708);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u64_le(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in little-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u64_le(0x0102030405060708);
+assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u64_ne(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in native-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u64_ne(0x0102030405060708);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+} else {
+    assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i64(&mut self, n: i64)

Writes a signed 64 bit integer to self in the big-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i64(0x0102030405060708);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i64_le(&mut self, n: i64)

Writes a signed 64 bit integer to self in little-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i64_le(0x0102030405060708);
+assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i64_ne(&mut self, n: i64)

Writes a signed 64 bit integer to self in native-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i64_ne(0x0102030405060708);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+} else {
+    assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u128(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in the big-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u128(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u128_le(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in little-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u128_le(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_u128_ne(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in native-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u128_ne(0x01020304050607080910111213141516);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+} else {
+    assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i128(&mut self, n: i128)

Writes a signed 128 bit integer to self in the big-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i128(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i128_le(&mut self, n: i128)

Writes a signed 128 bit integer to self in little-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i128_le(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_i128_ne(&mut self, n: i128)

Writes a signed 128 bit integer to self in native-endian byte order.

+

The current position is advanced by 16.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i128_ne(0x01020304050607080910111213141516);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+} else {
+    assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_uint(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in big-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_uint(0x010203, 3);
+assert_eq!(buf, b"\x01\x02\x03");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self or if nbytes is greater than 8.

+
source

fn put_uint_le(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in the little-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_uint_le(0x010203, 3);
+assert_eq!(buf, b"\x03\x02\x01");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self or if nbytes is greater than 8.

+
source

fn put_uint_ne(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in the native-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_uint_ne(0x010203, 3);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x01\x02\x03");
+} else {
+    assert_eq!(buf, b"\x03\x02\x01");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self or if nbytes is greater than 8.

+
source

fn put_int(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in big-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_int(0x0504010203, 3);
+assert_eq!(buf, b"\x01\x02\x03");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self or if nbytes is greater than 8.

+
source

fn put_int_le(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in little-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_int_le(0x0504010203, 3);
+assert_eq!(buf, b"\x03\x02\x01");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self or if nbytes is greater than 8.

+
source

fn put_int_ne(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in native-endian byte order.

+

The current position is advanced by nbytes.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_int_ne(0x010203, 3);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x01\x02\x03");
+} else {
+    assert_eq!(buf, b"\x03\x02\x01");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self or if nbytes is greater than 8.

+
source

fn put_f32(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in big-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f32(1.2f32);
+assert_eq!(buf, b"\x3F\x99\x99\x9A");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_f32_le(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in little-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f32_le(1.2f32);
+assert_eq!(buf, b"\x9A\x99\x99\x3F");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_f32_ne(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in native-endian byte order.

+

The current position is advanced by 4.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f32_ne(1.2f32);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x3F\x99\x99\x9A");
+} else {
+    assert_eq!(buf, b"\x9A\x99\x99\x3F");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_f64(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in big-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f64(1.2f64);
+assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_f64_le(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in little-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f64_le(1.2f64);
+assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn put_f64_ne(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in native-endian byte order.

+

The current position is advanced by 8.

+
§Examples
+
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f64_ne(1.2f64);
+if cfg!(target_endian = "big") {
+    assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
+} else {
+    assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
+}
+
§Panics
+

This function panics if there is not enough remaining capacity in +self.

+
source

fn limit(self, limit: usize) -> Limit<Self>
where + Self: Sized,

Creates an adaptor which can write at most limit bytes to self.

+
§Examples
+
use bytes::BufMut;
+
+let arr = &mut [0u8; 128][..];
+assert_eq!(arr.remaining_mut(), 128);
+
+let dst = arr.limit(10);
+assert_eq!(dst.remaining_mut(), 10);
+
source

fn writer(self) -> Writer<Self>
where + Self: Sized,

Available on crate feature std only.

Creates an adaptor which implements the Write trait for self.

+

This function returns a new value which implements Write by adapting +the Write trait functions to the BufMut trait functions. Given that +BufMut operations are infallible, none of the Write functions will +return with Err.

+
§Examples
+
use bytes::BufMut;
+use std::io::Write;
+
+let mut buf = vec![].writer();
+
+let num = buf.write(&b"hello world"[..]).unwrap();
+assert_eq!(11, num);
+
+let buf = buf.into_inner();
+
+assert_eq!(*buf, b"hello world"[..]);
+
source

fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
where + Self: Sized,

Creates an adapter which will chain this buffer with another.

+

The returned BufMut instance will first write to all bytes from +self. Afterwards, it will write to next.

+
§Examples
+
use bytes::BufMut;
+
+let mut a = [0u8; 5];
+let mut b = [0u8; 6];
+
+let mut chain = (&mut a[..]).chain_mut(&mut b[..]);
+
+chain.put_slice(b"hello world");
+
+assert_eq!(&a[..], b"hello");
+assert_eq!(&b[..], b" world");
+

Implementations on Foreign Types§

source§

impl BufMut for &mut [u8]

source§

fn remaining_mut(&self) -> usize

source§

fn chunk_mut(&mut self) -> &mut UninitSlice

source§

unsafe fn advance_mut(&mut self, cnt: usize)

source§

fn put_slice(&mut self, src: &[u8])

source§

fn put_bytes(&mut self, val: u8, cnt: usize)

source§

impl BufMut for &mut [MaybeUninit<u8>]

source§

fn remaining_mut(&self) -> usize

source§

fn chunk_mut(&mut self) -> &mut UninitSlice

source§

unsafe fn advance_mut(&mut self, cnt: usize)

source§

fn put_slice(&mut self, src: &[u8])

source§

fn put_bytes(&mut self, val: u8, cnt: usize)

source§

impl BufMut for Vec<u8>

source§

fn remaining_mut(&self) -> usize

source§

unsafe fn advance_mut(&mut self, cnt: usize)

source§

fn chunk_mut(&mut self) -> &mut UninitSlice

source§

fn put<T: Buf>(&mut self, src: T)
where + Self: Sized,

source§

fn put_slice(&mut self, src: &[u8])

source§

fn put_bytes(&mut self, val: u8, cnt: usize)

source§

impl<T: BufMut + ?Sized> BufMut for &mut T

source§

fn remaining_mut(&self) -> usize

source§

fn chunk_mut(&mut self) -> &mut UninitSlice

source§

unsafe fn advance_mut(&mut self, cnt: usize)

source§

fn put_slice(&mut self, src: &[u8])

source§

fn put_u8(&mut self, n: u8)

source§

fn put_i8(&mut self, n: i8)

source§

fn put_u16(&mut self, n: u16)

source§

fn put_u16_le(&mut self, n: u16)

source§

fn put_u16_ne(&mut self, n: u16)

source§

fn put_i16(&mut self, n: i16)

source§

fn put_i16_le(&mut self, n: i16)

source§

fn put_i16_ne(&mut self, n: i16)

source§

fn put_u32(&mut self, n: u32)

source§

fn put_u32_le(&mut self, n: u32)

source§

fn put_u32_ne(&mut self, n: u32)

source§

fn put_i32(&mut self, n: i32)

source§

fn put_i32_le(&mut self, n: i32)

source§

fn put_i32_ne(&mut self, n: i32)

source§

fn put_u64(&mut self, n: u64)

source§

fn put_u64_le(&mut self, n: u64)

source§

fn put_u64_ne(&mut self, n: u64)

source§

fn put_i64(&mut self, n: i64)

source§

fn put_i64_le(&mut self, n: i64)

source§

fn put_i64_ne(&mut self, n: i64)

source§

impl<T: BufMut + ?Sized> BufMut for Box<T>

source§

fn remaining_mut(&self) -> usize

source§

fn chunk_mut(&mut self) -> &mut UninitSlice

source§

unsafe fn advance_mut(&mut self, cnt: usize)

source§

fn put_slice(&mut self, src: &[u8])

source§

fn put_u8(&mut self, n: u8)

source§

fn put_i8(&mut self, n: i8)

source§

fn put_u16(&mut self, n: u16)

source§

fn put_u16_le(&mut self, n: u16)

source§

fn put_u16_ne(&mut self, n: u16)

source§

fn put_i16(&mut self, n: i16)

source§

fn put_i16_le(&mut self, n: i16)

source§

fn put_i16_ne(&mut self, n: i16)

source§

fn put_u32(&mut self, n: u32)

source§

fn put_u32_le(&mut self, n: u32)

source§

fn put_u32_ne(&mut self, n: u32)

source§

fn put_i32(&mut self, n: i32)

source§

fn put_i32_le(&mut self, n: i32)

source§

fn put_i32_ne(&mut self, n: i32)

source§

fn put_u64(&mut self, n: u64)

source§

fn put_u64_le(&mut self, n: u64)

source§

fn put_u64_ne(&mut self, n: u64)

source§

fn put_i64(&mut self, n: i64)

source§

fn put_i64_le(&mut self, n: i64)

source§

fn put_i64_ne(&mut self, n: i64)

Implementors§

source§

impl BufMut for BytesMut

source§

impl<T, U> BufMut for Chain<T, U>
where + T: BufMut, + U: BufMut,

source§

impl<T: BufMut> BufMut for Limit<T>

\ No newline at end of file diff --git a/bytes/buf/uninit_slice/struct.UninitSlice.html b/bytes/buf/uninit_slice/struct.UninitSlice.html new file mode 100644 index 000000000..576f5ff1b --- /dev/null +++ b/bytes/buf/uninit_slice/struct.UninitSlice.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../../bytes/buf/struct.UninitSlice.html...

+ + + \ No newline at end of file diff --git a/bytes/buf/writer/struct.Writer.html b/bytes/buf/writer/struct.Writer.html new file mode 100644 index 000000000..3a3ba29ac --- /dev/null +++ b/bytes/buf/writer/struct.Writer.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../../bytes/buf/struct.Writer.html...

+ + + \ No newline at end of file diff --git a/bytes/bytes/struct.Bytes.html b/bytes/bytes/struct.Bytes.html new file mode 100644 index 000000000..4b3bc3a80 --- /dev/null +++ b/bytes/bytes/struct.Bytes.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../bytes/struct.Bytes.html...

+ + + \ No newline at end of file diff --git a/bytes/bytes_mut/struct.BytesMut.html b/bytes/bytes_mut/struct.BytesMut.html new file mode 100644 index 000000000..45ba8f32e --- /dev/null +++ b/bytes/bytes_mut/struct.BytesMut.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../bytes/struct.BytesMut.html...

+ + + \ No newline at end of file diff --git a/bytes/index.html b/bytes/index.html new file mode 100644 index 000000000..bde590db1 --- /dev/null +++ b/bytes/index.html @@ -0,0 +1,50 @@ +bytes - Rust

Crate bytes

source ·
Expand description

Provides abstractions for working with bytes.

+

The bytes crate provides an efficient byte buffer structure +(Bytes) and traits for working with buffer +implementations (Buf, BufMut).

+

§Bytes

+

Bytes is an efficient container for storing and operating on contiguous +slices of memory. It is intended for use primarily in networking code, but +could have applications elsewhere as well.

+

Bytes values facilitate zero-copy network programming by allowing multiple +Bytes objects to point to the same underlying memory. This is managed by +using a reference count to track when the memory is no longer needed and can +be freed.

+

A Bytes handle can be created directly from an existing byte store (such as &[u8] +or Vec<u8>), but usually a BytesMut is used first and written to. For +example:

+ +
use bytes::{BytesMut, BufMut};
+
+let mut buf = BytesMut::with_capacity(1024);
+buf.put(&b"hello world"[..]);
+buf.put_u16(1234);
+
+let a = buf.split();
+assert_eq!(a, b"hello world\x04\xD2"[..]);
+
+buf.put(&b"goodbye world"[..]);
+
+let b = buf.split();
+assert_eq!(b, b"goodbye world"[..]);
+
+assert_eq!(buf.capacity(), 998);
+

In the above example, only a single buffer of 1024 is allocated. The handles +a and b will share the underlying buffer and maintain indices tracking +the view into the buffer represented by the handle.

+

See the struct docs for more details.

+

§Buf, BufMut

+

These two traits provide read and write access to buffers. The underlying +storage may or may not be in contiguous memory. For example, Bytes is a +buffer that guarantees contiguous memory, but a rope stores the bytes in +disjoint chunks. Buf and BufMut maintain cursors tracking the current +position in the underlying byte storage. When bytes are read or written, the +cursor is advanced.

+

§Relation with Read and Write

+

At first glance, it may seem that Buf and BufMut overlap in +functionality with std::io::Read and std::io::Write. However, they +serve different purposes. A buffer is the value that is provided as an +argument to Read::read and Write::write. Read and Write may then +perform a syscall, which has the potential of failing. Operations on Buf +and BufMut are infallible.

+

Re-exports§

  • pub use crate::buf::Buf;
  • pub use crate::buf::BufMut;

Modules§

  • Utilities for working with buffers.

Structs§

  • A cheaply cloneable and sliceable chunk of contiguous memory.
  • A unique reference to a contiguous slice of memory.
\ No newline at end of file diff --git a/bytes/sidebar-items.js b/bytes/sidebar-items.js new file mode 100644 index 000000000..1ca6a74c5 --- /dev/null +++ b/bytes/sidebar-items.js @@ -0,0 +1 @@ +window.SIDEBAR_ITEMS = {"mod":["buf"],"struct":["Bytes","BytesMut"]}; \ No newline at end of file diff --git a/bytes/struct.Bytes.html b/bytes/struct.Bytes.html new file mode 100644 index 000000000..f2fda980a --- /dev/null +++ b/bytes/struct.Bytes.html @@ -0,0 +1,1433 @@ +Bytes in bytes - Rust

Struct bytes::Bytes

source ·
pub struct Bytes { /* private fields */ }
Expand description

A cheaply cloneable and sliceable chunk of contiguous memory.

+

Bytes is an efficient container for storing and operating on contiguous +slices of memory. It is intended for use primarily in networking code, but +could have applications elsewhere as well.

+

Bytes values facilitate zero-copy network programming by allowing multiple +Bytes objects to point to the same underlying memory.

+

Bytes does not have a single implementation. It is an interface, whose +exact behavior is implemented through dynamic dispatch in several underlying +implementations of Bytes.

+

All Bytes implementations must fulfill the following requirements:

+
    +
  • They are cheaply cloneable and thereby shareable between an unlimited amount +of components, for example by modifying a reference count.
  • +
  • Instances can be sliced to refer to a subset of the original buffer.
  • +
+ +
use bytes::Bytes;
+
+let mut mem = Bytes::from("Hello world");
+let a = mem.slice(0..5);
+
+assert_eq!(a, "Hello");
+
+let b = mem.split_to(6);
+
+assert_eq!(mem, "world");
+assert_eq!(b, "Hello ");
+

§Memory layout

+

The Bytes struct itself is fairly small, limited to 4 usize fields used +to track information about which segment of the underlying memory the +Bytes handle has access to.

+

Bytes keeps both a pointer to the shared state containing the full memory +slice and a pointer to the start of the region visible by the handle. +Bytes also tracks the length of its view into the memory.

+

§Sharing

+

Bytes contains a vtable, which allows implementations of Bytes to define +how sharing/cloning is implemented in detail. +When Bytes::clone() is called, Bytes will call the vtable function for +cloning the backing storage in order to share it behind multiple Bytes +instances.

+

For Bytes implementations which refer to constant memory (e.g. created +via Bytes::from_static()) the cloning implementation will be a no-op.

+

For Bytes implementations which point to a reference counted shared storage +(e.g. an Arc<[u8]>), sharing will be implemented by increasing the +reference count.

+

Due to this mechanism, multiple Bytes instances may point to the same +shared memory region. +Each Bytes instance can point to different sections within that +memory region, and Bytes instances may or may not have overlapping views +into the memory.

+

The following diagram visualizes a scenario where 2 Bytes instances make +use of an Arc-based backing storage, and provide access to different views:

+

+   Arc ptrs                   ┌─────────┐
+   ________________________ / │ Bytes 2 │
+  /                           └─────────┘
+ /          ┌───────────┐     |         |
+|_________/ │  Bytes 1  │     |         |
+|           └───────────┘     |         |
+|           |           | ___/ data     | tail
+|      data |      tail |/              |
+v           v           v               v
+┌─────┬─────┬───────────┬───────────────┬─────┐
+│ Arc │     │           │               │     │
+└─────┴─────┴───────────┴───────────────┴─────┘
+

Implementations§

source§

impl Bytes

source

pub const fn new() -> Self

Creates a new empty Bytes.

+

This will not allocate and the returned Bytes handle will be empty.

+
§Examples
+
use bytes::Bytes;
+
+let b = Bytes::new();
+assert_eq!(&b[..], b"");
+
source

pub const fn from_static(bytes: &'static [u8]) -> Self

Creates a new Bytes from a static slice.

+

The returned Bytes will point directly to the static slice. There is +no allocating or copying.

+
§Examples
+
use bytes::Bytes;
+
+let b = Bytes::from_static(b"hello");
+assert_eq!(&b[..], b"hello");
+
source

pub const fn len(&self) -> usize

Returns the number of bytes contained in this Bytes.

+
§Examples
+
use bytes::Bytes;
+
+let b = Bytes::from(&b"hello"[..]);
+assert_eq!(b.len(), 5);
+
source

pub const fn is_empty(&self) -> bool

Returns true if the Bytes has a length of 0.

+
§Examples
+
use bytes::Bytes;
+
+let b = Bytes::new();
+assert!(b.is_empty());
+
source

pub fn is_unique(&self) -> bool

Returns true if this is the only reference to the data.

+

Always returns false if the data is backed by a static slice.

+

The result of this method may be invalidated immediately if another +thread clones this value while this is being called. Ensure you have +unique access to this value (&mut Bytes) first if you need to be +certain the result is valid (i.e. for safety reasons)

+
§Examples
+
use bytes::Bytes;
+
+let a = Bytes::from(vec![1, 2, 3]);
+assert!(a.is_unique());
+let b = a.clone();
+assert!(!a.is_unique());
+
source

pub fn copy_from_slice(data: &[u8]) -> Self

Creates Bytes instance from slice, by copying it.

+
source

pub fn slice(&self, range: impl RangeBounds<usize>) -> Self

Returns a slice of self for the provided range.

+

This will increment the reference count for the underlying memory and +return a new Bytes handle set to the slice.

+

This operation is O(1).

+
§Examples
+
use bytes::Bytes;
+
+let a = Bytes::from(&b"hello world"[..]);
+let b = a.slice(2..5);
+
+assert_eq!(&b[..], b"llo");
+
§Panics
+

Requires that begin <= end and end <= self.len(), otherwise slicing +will panic.

+
source

pub fn slice_ref(&self, subset: &[u8]) -> Self

Returns a slice of self that is equivalent to the given subset.

+

When processing a Bytes buffer with other tools, one often gets a +&[u8] which is in fact a slice of the Bytes, i.e. a subset of it. +This function turns that &[u8] into another Bytes, as if one had +called self.slice() with the offsets that correspond to subset.

+

This operation is O(1).

+
§Examples
+
use bytes::Bytes;
+
+let bytes = Bytes::from(&b"012345678"[..]);
+let as_slice = bytes.as_ref();
+let subset = &as_slice[2..6];
+let subslice = bytes.slice_ref(&subset);
+assert_eq!(&subslice[..], b"2345");
+
§Panics
+

Requires that the given sub slice is in fact contained within the +Bytes buffer; otherwise this function will panic.

+
source

pub fn split_off(&mut self, at: usize) -> Self

Splits the bytes into two at the given index.

+

Afterwards self contains elements [0, at), and the returned Bytes +contains elements [at, len).

+

This is an O(1) operation that just increases the reference count and +sets a few indices.

+
§Examples
+
use bytes::Bytes;
+
+let mut a = Bytes::from(&b"hello world"[..]);
+let b = a.split_off(5);
+
+assert_eq!(&a[..], b"hello");
+assert_eq!(&b[..], b" world");
+
§Panics
+

Panics if at > len.

+
source

pub fn split_to(&mut self, at: usize) -> Self

Splits the bytes into two at the given index.

+

Afterwards self contains elements [at, len), and the returned +Bytes contains elements [0, at).

+

This is an O(1) operation that just increases the reference count and +sets a few indices.

+
§Examples
+
use bytes::Bytes;
+
+let mut a = Bytes::from(&b"hello world"[..]);
+let b = a.split_to(5);
+
+assert_eq!(&a[..], b" world");
+assert_eq!(&b[..], b"hello");
+
§Panics
+

Panics if at > len.

+
source

pub fn truncate(&mut self, len: usize)

Shortens the buffer, keeping the first len bytes and dropping the +rest.

+

If len is greater than the buffer’s current length, this has no +effect.

+

The split_off method can emulate truncate, but this causes the +excess bytes to be returned instead of dropped.

+
§Examples
+
use bytes::Bytes;
+
+let mut buf = Bytes::from(&b"hello world"[..]);
+buf.truncate(5);
+assert_eq!(buf, b"hello"[..]);
+
source

pub fn clear(&mut self)

Clears the buffer, removing all data.

+
§Examples
+
use bytes::Bytes;
+
+let mut buf = Bytes::from(&b"hello world"[..]);
+buf.clear();
+assert!(buf.is_empty());
+
source

pub fn try_into_mut(self) -> Result<BytesMut, Bytes>

Try to convert self into BytesMut.

+

If self is unique for the entire original buffer, this will succeed +and return a BytesMut with the contents of self without copying. +If self is not unique for the entire original buffer, this will fail +and return self.

+
§Examples
+
use bytes::{Bytes, BytesMut};
+
+let bytes = Bytes::from(b"hello".to_vec());
+assert_eq!(bytes.try_into_mut(), Ok(BytesMut::from(&b"hello"[..])));
+

Methods from Deref<Target = [u8]>§

source

pub fn as_str(&self) -> &str

🔬This is a nightly-only experimental API. (ascii_char)

Views this slice of ASCII characters as a UTF-8 str.

+
source

pub fn as_bytes(&self) -> &[u8]

🔬This is a nightly-only experimental API. (ascii_char)

Views this slice of ASCII characters as a slice of u8 bytes.

+
1.23.0 · source

pub fn is_ascii(&self) -> bool

Checks if all bytes in this slice are within the ASCII range.

+
source

pub fn as_ascii(&self) -> Option<&[AsciiChar]>

🔬This is a nightly-only experimental API. (ascii_char)

If this slice is_ascii, returns it as a slice of +ASCII characters, otherwise returns None.

+
source

pub unsafe fn as_ascii_unchecked(&self) -> &[AsciiChar]

🔬This is a nightly-only experimental API. (ascii_char)

Converts this slice of bytes into a slice of ASCII characters, +without checking whether they’re valid.

+
§Safety
+

Every byte in the slice must be in 0..=127, or else this is UB.

+
1.23.0 · source

pub fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool

Checks that two slices are an ASCII case-insensitive match.

+

Same as to_ascii_lowercase(a) == to_ascii_lowercase(b), +but without allocating and copying temporaries.

+
1.60.0 · source

pub fn escape_ascii(&self) -> EscapeAscii<'_>

Returns an iterator that produces an escaped version of this slice, +treating it as an ASCII string.

+
§Examples
+

+let s = b"0\t\r\n'\"\\\x9d";
+let escaped = s.escape_ascii().to_string();
+assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
+
1.80.0 · source

pub fn trim_ascii_start(&self) -> &[u8]

Returns a byte slice with leading ASCII whitespace bytes removed.

+

‘Whitespace’ refers to the definition used by +u8::is_ascii_whitespace.

+
§Examples
+
assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
+assert_eq!(b"  ".trim_ascii_start(), b"");
+assert_eq!(b"".trim_ascii_start(), b"");
+
1.80.0 · source

pub fn trim_ascii_end(&self) -> &[u8]

Returns a byte slice with trailing ASCII whitespace bytes removed.

+

‘Whitespace’ refers to the definition used by +u8::is_ascii_whitespace.

+
§Examples
+
assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
+assert_eq!(b"  ".trim_ascii_end(), b"");
+assert_eq!(b"".trim_ascii_end(), b"");
+
1.80.0 · source

pub fn trim_ascii(&self) -> &[u8]

Returns a byte slice with leading and trailing ASCII whitespace bytes +removed.

+

‘Whitespace’ refers to the definition used by +u8::is_ascii_whitespace.

+
§Examples
+
assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
+assert_eq!(b"  ".trim_ascii(), b"");
+assert_eq!(b"".trim_ascii(), b"");
+
1.0.0 · source

pub fn len(&self) -> usize

Returns the number of elements in the slice.

+
§Examples
+
let a = [1, 2, 3];
+assert_eq!(a.len(), 3);
+
1.0.0 · source

pub fn is_empty(&self) -> bool

Returns true if the slice has a length of 0.

+
§Examples
+
let a = [1, 2, 3];
+assert!(!a.is_empty());
+
+let b: &[i32] = &[];
+assert!(b.is_empty());
+
1.0.0 · source

pub fn first(&self) -> Option<&T>

Returns the first element of the slice, or None if it is empty.

+
§Examples
+
let v = [10, 40, 30];
+assert_eq!(Some(&10), v.first());
+
+let w: &[i32] = &[];
+assert_eq!(None, w.first());
+
1.5.0 · source

pub fn split_first(&self) -> Option<(&T, &[T])>

Returns the first and all the rest of the elements of the slice, or None if it is empty.

+
§Examples
+
let x = &[0, 1, 2];
+
+if let Some((first, elements)) = x.split_first() {
+    assert_eq!(first, &0);
+    assert_eq!(elements, &[1, 2]);
+}
+
1.5.0 · source

pub fn split_last(&self) -> Option<(&T, &[T])>

Returns the last and all the rest of the elements of the slice, or None if it is empty.

+
§Examples
+
let x = &[0, 1, 2];
+
+if let Some((last, elements)) = x.split_last() {
+    assert_eq!(last, &2);
+    assert_eq!(elements, &[0, 1]);
+}
+
1.0.0 · source

pub fn last(&self) -> Option<&T>

Returns the last element of the slice, or None if it is empty.

+
§Examples
+
let v = [10, 40, 30];
+assert_eq!(Some(&30), v.last());
+
+let w: &[i32] = &[];
+assert_eq!(None, w.last());
+
1.77.0 · source

pub fn first_chunk<const N: usize>(&self) -> Option<&[T; N]>

Returns an array reference to the first N items in the slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let u = [10, 40, 30];
+assert_eq!(Some(&[10, 40]), u.first_chunk::<2>());
+
+let v: &[i32] = &[10];
+assert_eq!(None, v.first_chunk::<2>());
+
+let w: &[i32] = &[];
+assert_eq!(Some(&[]), w.first_chunk::<0>());
+
1.77.0 · source

pub fn split_first_chunk<const N: usize>(&self) -> Option<(&[T; N], &[T])>

Returns an array reference to the first N items in the slice and the remaining slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let x = &[0, 1, 2];
+
+if let Some((first, elements)) = x.split_first_chunk::<2>() {
+    assert_eq!(first, &[0, 1]);
+    assert_eq!(elements, &[2]);
+}
+
+assert_eq!(None, x.split_first_chunk::<4>());
+
1.77.0 · source

pub fn split_last_chunk<const N: usize>(&self) -> Option<(&[T], &[T; N])>

Returns an array reference to the last N items in the slice and the remaining slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let x = &[0, 1, 2];
+
+if let Some((elements, last)) = x.split_last_chunk::<2>() {
+    assert_eq!(elements, &[0]);
+    assert_eq!(last, &[1, 2]);
+}
+
+assert_eq!(None, x.split_last_chunk::<4>());
+
1.77.0 · source

pub fn last_chunk<const N: usize>(&self) -> Option<&[T; N]>

Returns an array reference to the last N items in the slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let u = [10, 40, 30];
+assert_eq!(Some(&[40, 30]), u.last_chunk::<2>());
+
+let v: &[i32] = &[10];
+assert_eq!(None, v.last_chunk::<2>());
+
+let w: &[i32] = &[];
+assert_eq!(Some(&[]), w.last_chunk::<0>());
+
1.0.0 · source

pub fn get<I>(&self, index: I) -> Option<&<I as SliceIndex<[T]>>::Output>
where + I: SliceIndex<[T]>,

Returns a reference to an element or subslice depending on the type of +index.

+
    +
  • If given a position, returns a reference to the element at that +position or None if out of bounds.
  • +
  • If given a range, returns the subslice corresponding to that range, +or None if out of bounds.
  • +
+
§Examples
+
let v = [10, 40, 30];
+assert_eq!(Some(&40), v.get(1));
+assert_eq!(Some(&[10, 40][..]), v.get(0..2));
+assert_eq!(None, v.get(3));
+assert_eq!(None, v.get(0..4));
+
1.0.0 · source

pub unsafe fn get_unchecked<I>( + &self, + index: I, +) -> &<I as SliceIndex<[T]>>::Output
where + I: SliceIndex<[T]>,

Returns a reference to an element or subslice, without doing bounds +checking.

+

For a safe alternative see get.

+
§Safety
+

Calling this method with an out-of-bounds index is undefined behavior +even if the resulting reference is not used.

+

You can think of this like .get(index).unwrap_unchecked(). It’s UB +to call .get_unchecked(len), even if you immediately convert to a +pointer. And it’s UB to call .get_unchecked(..len + 1), +.get_unchecked(..=len), or similar.

+
§Examples
+
let x = &[1, 2, 4];
+
+unsafe {
+    assert_eq!(x.get_unchecked(1), &2);
+}
+
1.0.0 · source

pub fn as_ptr(&self) -> *const T

Returns a raw pointer to the slice’s buffer.

+

The caller must ensure that the slice outlives the pointer this +function returns, or else it will end up dangling.

+

The caller must also ensure that the memory the pointer (non-transitively) points to +is never written to (except inside an UnsafeCell) using this pointer or any pointer +derived from it. If you need to mutate the contents of the slice, use as_mut_ptr.

+

Modifying the container referenced by this slice may cause its buffer +to be reallocated, which would also make any pointers to it invalid.

+
§Examples
+
let x = &[1, 2, 4];
+let x_ptr = x.as_ptr();
+
+unsafe {
+    for i in 0..x.len() {
+        assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
+    }
+}
+
1.48.0 · source

pub fn as_ptr_range(&self) -> Range<*const T>

Returns the two raw pointers spanning the slice.

+

The returned range is half-open, which means that the end pointer +points one past the last element of the slice. This way, an empty +slice is represented by two equal pointers, and the difference between +the two pointers represents the size of the slice.

+

See as_ptr for warnings on using these pointers. The end pointer +requires extra caution, as it does not point to a valid element in the +slice.

+

This function is useful for interacting with foreign interfaces which +use two pointers to refer to a range of elements in memory, as is +common in C++.

+

It can also be useful to check if a pointer to an element refers to an +element of this slice:

+ +
let a = [1, 2, 3];
+let x = &a[1] as *const _;
+let y = &5 as *const _;
+
+assert!(a.as_ptr_range().contains(&x));
+assert!(!a.as_ptr_range().contains(&y));
+
1.0.0 · source

pub fn iter(&self) -> Iter<'_, T>

Returns an iterator over the slice.

+

The iterator yields all items from start to end.

+
§Examples
+
let x = &[1, 2, 4];
+let mut iterator = x.iter();
+
+assert_eq!(iterator.next(), Some(&1));
+assert_eq!(iterator.next(), Some(&2));
+assert_eq!(iterator.next(), Some(&4));
+assert_eq!(iterator.next(), None);
+
1.0.0 · source

pub fn windows(&self, size: usize) -> Windows<'_, T>

Returns an iterator over all contiguous windows of length +size. The windows overlap. If the slice is shorter than +size, the iterator returns no values.

+
§Panics
+

Panics if size is 0.

+
§Examples
+
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.windows(3);
+assert_eq!(iter.next().unwrap(), &['l', 'o', 'r']);
+assert_eq!(iter.next().unwrap(), &['o', 'r', 'e']);
+assert_eq!(iter.next().unwrap(), &['r', 'e', 'm']);
+assert!(iter.next().is_none());
+

If the slice is shorter than size:

+ +
let slice = ['f', 'o', 'o'];
+let mut iter = slice.windows(4);
+assert!(iter.next().is_none());
+

There’s no windows_mut, as that existing would let safe code violate the +“only one &mut at a time to the same thing” rule. However, you can sometimes +use Cell::as_slice_of_cells in +conjunction with windows to accomplish something similar:

+ +
use std::cell::Cell;
+
+let mut array = ['R', 'u', 's', 't', ' ', '2', '0', '1', '5'];
+let slice = &mut array[..];
+let slice_of_cells: &[Cell<char>] = Cell::from_mut(slice).as_slice_of_cells();
+for w in slice_of_cells.windows(3) {
+    Cell::swap(&w[0], &w[2]);
+}
+assert_eq!(array, ['s', 't', ' ', '2', '0', '1', '5', 'u', 'R']);
+
1.0.0 · source

pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the +beginning of the slice.

+

The chunks are slices and do not overlap. If chunk_size does not divide the length of the +slice, then the last chunk will not have length chunk_size.

+

See chunks_exact for a variant of this iterator that returns chunks of always exactly +chunk_size elements, and rchunks for the same iterator but starting at the end of the +slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.chunks(2);
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert_eq!(iter.next().unwrap(), &['m']);
+assert!(iter.next().is_none());
+
1.31.0 · source

pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the +beginning of the slice.

+

The chunks are slices and do not overlap. If chunk_size does not divide the length of the +slice, then the last up to chunk_size-1 elements will be omitted and can be retrieved +from the remainder function of the iterator.

+

Due to each chunk having exactly chunk_size elements, the compiler can often optimize the +resulting code better than in the case of chunks.

+

See chunks for a variant of this iterator that also returns the remainder as a smaller +chunk, and rchunks_exact for the same iterator but starting at the end of the slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.chunks_exact(2);
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['m']);
+
source

pub unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]]

🔬This is a nightly-only experimental API. (slice_as_chunks)

Splits the slice into a slice of N-element arrays, +assuming that there’s no remainder.

+
§Safety
+

This may only be called when

+
    +
  • The slice splits exactly into N-element chunks (aka self.len() % N == 0).
  • +
  • N != 0.
  • +
+
§Examples
+
#![feature(slice_as_chunks)]
+let slice: &[char] = &['l', 'o', 'r', 'e', 'm', '!'];
+let chunks: &[[char; 1]] =
+    // SAFETY: 1-element chunks never have remainder
+    unsafe { slice.as_chunks_unchecked() };
+assert_eq!(chunks, &[['l'], ['o'], ['r'], ['e'], ['m'], ['!']]);
+let chunks: &[[char; 3]] =
+    // SAFETY: The slice length (6) is a multiple of 3
+    unsafe { slice.as_chunks_unchecked() };
+assert_eq!(chunks, &[['l', 'o', 'r'], ['e', 'm', '!']]);
+
+// These would be unsound:
+// let chunks: &[[_; 5]] = slice.as_chunks_unchecked() // The slice length is not a multiple of 5
+// let chunks: &[[_; 0]] = slice.as_chunks_unchecked() // Zero-length chunks are never allowed
+
source

pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T])

🔬This is a nightly-only experimental API. (slice_as_chunks)

Splits the slice into a slice of N-element arrays, +starting at the beginning of the slice, +and a remainder slice with length strictly less than N.

+
§Panics
+

Panics if N is 0. This check will most probably get changed to a compile time +error before this method gets stabilized.

+
§Examples
+
#![feature(slice_as_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let (chunks, remainder) = slice.as_chunks();
+assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
+assert_eq!(remainder, &['m']);
+

If you expect the slice to be an exact multiple, you can combine +let-else with an empty slice pattern:

+ +
#![feature(slice_as_chunks)]
+let slice = ['R', 'u', 's', 't'];
+let (chunks, []) = slice.as_chunks::<2>() else {
+    panic!("slice didn't have even length")
+};
+assert_eq!(chunks, &[['R', 'u'], ['s', 't']]);
+
source

pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]])

🔬This is a nightly-only experimental API. (slice_as_chunks)

Splits the slice into a slice of N-element arrays, +starting at the end of the slice, +and a remainder slice with length strictly less than N.

+
§Panics
+

Panics if N is 0. This check will most probably get changed to a compile time +error before this method gets stabilized.

+
§Examples
+
#![feature(slice_as_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let (remainder, chunks) = slice.as_rchunks();
+assert_eq!(remainder, &['l']);
+assert_eq!(chunks, &[['o', 'r'], ['e', 'm']]);
+
source

pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N>

🔬This is a nightly-only experimental API. (array_chunks)

Returns an iterator over N elements of the slice at a time, starting at the +beginning of the slice.

+

The chunks are array references and do not overlap. If N does not divide the +length of the slice, then the last up to N-1 elements will be omitted and can be +retrieved from the remainder function of the iterator.

+

This method is the const generic equivalent of chunks_exact.

+
§Panics
+

Panics if N is 0. This check will most probably get changed to a compile time +error before this method gets stabilized.

+
§Examples
+
#![feature(array_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.array_chunks();
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['m']);
+
source

pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N>

🔬This is a nightly-only experimental API. (array_windows)

Returns an iterator over overlapping windows of N elements of a slice, +starting at the beginning of the slice.

+

This is the const generic equivalent of windows.

+

If N is greater than the size of the slice, it will return no windows.

+
§Panics
+

Panics if N is 0. This check will most probably get changed to a compile time +error before this method gets stabilized.

+
§Examples
+
#![feature(array_windows)]
+let slice = [0, 1, 2, 3];
+let mut iter = slice.array_windows();
+assert_eq!(iter.next().unwrap(), &[0, 1]);
+assert_eq!(iter.next().unwrap(), &[1, 2]);
+assert_eq!(iter.next().unwrap(), &[2, 3]);
+assert!(iter.next().is_none());
+
1.31.0 · source

pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the end +of the slice.

+

The chunks are slices and do not overlap. If chunk_size does not divide the length of the +slice, then the last chunk will not have length chunk_size.

+

See rchunks_exact for a variant of this iterator that returns chunks of always exactly +chunk_size elements, and chunks for the same iterator but starting at the beginning +of the slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.rchunks(2);
+assert_eq!(iter.next().unwrap(), &['e', 'm']);
+assert_eq!(iter.next().unwrap(), &['o', 'r']);
+assert_eq!(iter.next().unwrap(), &['l']);
+assert!(iter.next().is_none());
+
1.31.0 · source

pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the +end of the slice.

+

The chunks are slices and do not overlap. If chunk_size does not divide the length of the +slice, then the last up to chunk_size-1 elements will be omitted and can be retrieved +from the remainder function of the iterator.

+

Due to each chunk having exactly chunk_size elements, the compiler can often optimize the +resulting code better than in the case of rchunks.

+

See rchunks for a variant of this iterator that also returns the remainder as a smaller +chunk, and chunks_exact for the same iterator but starting at the beginning of the +slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.rchunks_exact(2);
+assert_eq!(iter.next().unwrap(), &['e', 'm']);
+assert_eq!(iter.next().unwrap(), &['o', 'r']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['l']);
+
1.77.0 · source

pub fn chunk_by<F>(&self, pred: F) -> ChunkBy<'_, T, F>
where + F: FnMut(&T, &T) -> bool,

Returns an iterator over the slice producing non-overlapping runs +of elements using the predicate to separate them.

+

The predicate is called for every pair of consecutive elements, +meaning that it is called on slice[0] and slice[1], +followed by slice[1] and slice[2], and so on.

+
§Examples
+
let slice = &[1, 1, 1, 3, 3, 2, 2, 2];
+
+let mut iter = slice.chunk_by(|a, b| a == b);
+
+assert_eq!(iter.next(), Some(&[1, 1, 1][..]));
+assert_eq!(iter.next(), Some(&[3, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 2, 2][..]));
+assert_eq!(iter.next(), None);
+

This method can be used to extract the sorted subslices:

+ +
let slice = &[1, 1, 2, 3, 2, 3, 2, 3, 4];
+
+let mut iter = slice.chunk_by(|a, b| a <= b);
+
+assert_eq!(iter.next(), Some(&[1, 1, 2, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 3, 4][..]));
+assert_eq!(iter.next(), None);
+
1.0.0 · source

pub fn split_at(&self, mid: usize) -> (&[T], &[T])

Divides one slice into two at an index.

+

The first will contain all indices from [0, mid) (excluding +the index mid itself) and the second will contain all +indices from [mid, len) (excluding the index len itself).

+
§Panics
+

Panics if mid > len. For a non-panicking alternative see +split_at_checked.

+
§Examples
+
let v = [1, 2, 3, 4, 5, 6];
+
+{
+   let (left, right) = v.split_at(0);
+   assert_eq!(left, []);
+   assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+}
+
+{
+    let (left, right) = v.split_at(2);
+    assert_eq!(left, [1, 2]);
+    assert_eq!(right, [3, 4, 5, 6]);
+}
+
+{
+    let (left, right) = v.split_at(6);
+    assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+    assert_eq!(right, []);
+}
+
1.79.0 · source

pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T])

Divides one slice into two at an index, without doing bounds checking.

+

The first will contain all indices from [0, mid) (excluding +the index mid itself) and the second will contain all +indices from [mid, len) (excluding the index len itself).

+

For a safe alternative see split_at.

+
§Safety
+

Calling this method with an out-of-bounds index is undefined behavior +even if the resulting reference is not used. The caller has to ensure that +0 <= mid <= self.len().

+
§Examples
+
let v = [1, 2, 3, 4, 5, 6];
+
+unsafe {
+   let (left, right) = v.split_at_unchecked(0);
+   assert_eq!(left, []);
+   assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+}
+
+unsafe {
+    let (left, right) = v.split_at_unchecked(2);
+    assert_eq!(left, [1, 2]);
+    assert_eq!(right, [3, 4, 5, 6]);
+}
+
+unsafe {
+    let (left, right) = v.split_at_unchecked(6);
+    assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+    assert_eq!(right, []);
+}
+
1.80.0 · source

pub fn split_at_checked(&self, mid: usize) -> Option<(&[T], &[T])>

Divides one slice into two at an index, returning None if the slice is +too short.

+

If mid ≤ len returns a pair of slices where the first will contain all +indices from [0, mid) (excluding the index mid itself) and the +second will contain all indices from [mid, len) (excluding the index +len itself).

+

Otherwise, if mid > len, returns None.

+
§Examples
+
let v = [1, -2, 3, -4, 5, -6];
+
+{
+   let (left, right) = v.split_at_checked(0).unwrap();
+   assert_eq!(left, []);
+   assert_eq!(right, [1, -2, 3, -4, 5, -6]);
+}
+
+{
+    let (left, right) = v.split_at_checked(2).unwrap();
+    assert_eq!(left, [1, -2]);
+    assert_eq!(right, [3, -4, 5, -6]);
+}
+
+{
+    let (left, right) = v.split_at_checked(6).unwrap();
+    assert_eq!(left, [1, -2, 3, -4, 5, -6]);
+    assert_eq!(right, []);
+}
+
+assert_eq!(None, v.split_at_checked(7));
+
1.0.0 · source

pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over subslices separated by elements that match +pred. The matched element is not contained in the subslices.

+
§Examples
+
let slice = [10, 40, 33, 20];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
+

If the first element is matched, an empty slice will be the first item +returned by the iterator. Similarly, if the last element in the slice +is matched, an empty slice will be the last item returned by the +iterator:

+ +
let slice = [10, 40, 33];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40]);
+assert_eq!(iter.next().unwrap(), &[]);
+assert!(iter.next().is_none());
+

If two matched elements are directly adjacent, an empty slice will be +present between them:

+ +
let slice = [10, 6, 33, 20];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10]);
+assert_eq!(iter.next().unwrap(), &[]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
+
1.51.0 · source

pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over subslices separated by elements that match +pred. The matched element is contained in the end of the previous +subslice as a terminator.

+
§Examples
+
let slice = [10, 40, 33, 20];
+let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
+

If the last element of the slice is matched, +that element will be considered the terminator of the preceding slice. +That slice will be the last item returned by the iterator.

+ +
let slice = [3, 10, 40, 33];
+let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[3]);
+assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+assert!(iter.next().is_none());
+
1.27.0 · source

pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over subslices separated by elements that match +pred, starting at the end of the slice and working backwards. +The matched element is not contained in the subslices.

+
§Examples
+
let slice = [11, 22, 33, 0, 44, 55];
+let mut iter = slice.rsplit(|num| *num == 0);
+
+assert_eq!(iter.next().unwrap(), &[44, 55]);
+assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
+assert_eq!(iter.next(), None);
+

As with split(), if the first or last element is matched, an empty +slice will be the first (or last) item returned by the iterator.

+ +
let v = &[0, 1, 1, 2, 3, 5, 8];
+let mut it = v.rsplit(|n| *n % 2 == 0);
+assert_eq!(it.next().unwrap(), &[]);
+assert_eq!(it.next().unwrap(), &[3, 5]);
+assert_eq!(it.next().unwrap(), &[1, 1]);
+assert_eq!(it.next().unwrap(), &[]);
+assert_eq!(it.next(), None);
+
1.0.0 · source

pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over subslices separated by elements that match +pred, limited to returning at most n items. The matched element is +not contained in the subslices.

+

The last element returned, if any, will contain the remainder of the +slice.

+
§Examples
+

Print the slice split once by numbers divisible by 3 (i.e., [10, 40], +[20, 60, 50]):

+ +
let v = [10, 40, 30, 20, 60, 50];
+
+for group in v.splitn(2, |num| *num % 3 == 0) {
+    println!("{group:?}");
+}
+
1.0.0 · source

pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over subslices separated by elements that match +pred limited to returning at most n items. This starts at the end of +the slice and works backwards. The matched element is not contained in +the subslices.

+

The last element returned, if any, will contain the remainder of the +slice.

+
§Examples
+

Print the slice split once, starting from the end, by numbers divisible +by 3 (i.e., [50], [10, 40, 30, 20]):

+ +
let v = [10, 40, 30, 20, 60, 50];
+
+for group in v.rsplitn(2, |num| *num % 3 == 0) {
+    println!("{group:?}");
+}
+
source

pub fn split_once<F>(&self, pred: F) -> Option<(&[T], &[T])>
where + F: FnMut(&T) -> bool,

🔬This is a nightly-only experimental API. (slice_split_once)

Splits the slice on the first element that matches the specified +predicate.

+

If any matching elements are present in the slice, returns the prefix +before the match and suffix after. The matching element itself is not +included. If no elements match, returns None.

+
§Examples
+
#![feature(slice_split_once)]
+let s = [1, 2, 3, 2, 4];
+assert_eq!(s.split_once(|&x| x == 2), Some((
+    &[1][..],
+    &[3, 2, 4][..]
+)));
+assert_eq!(s.split_once(|&x| x == 0), None);
+
source

pub fn rsplit_once<F>(&self, pred: F) -> Option<(&[T], &[T])>
where + F: FnMut(&T) -> bool,

🔬This is a nightly-only experimental API. (slice_split_once)

Splits the slice on the last element that matches the specified +predicate.

+

If any matching elements are present in the slice, returns the prefix +before the match and suffix after. The matching element itself is not +included. If no elements match, returns None.

+
§Examples
+
#![feature(slice_split_once)]
+let s = [1, 2, 3, 2, 4];
+assert_eq!(s.rsplit_once(|&x| x == 2), Some((
+    &[1, 2, 3][..],
+    &[4][..]
+)));
+assert_eq!(s.rsplit_once(|&x| x == 0), None);
+
1.0.0 · source

pub fn contains(&self, x: &T) -> bool
where + T: PartialEq,

Returns true if the slice contains an element with the given value.

+

This operation is O(n).

+

Note that if you have a sorted slice, binary_search may be faster.

+
§Examples
+
let v = [10, 40, 30];
+assert!(v.contains(&30));
+assert!(!v.contains(&50));
+

If you do not have a &T, but some other value that you can compare +with one (for example, String implements PartialEq<str>), you can +use iter().any:

+ +
let v = [String::from("hello"), String::from("world")]; // slice of `String`
+assert!(v.iter().any(|e| e == "hello")); // search with `&str`
+assert!(!v.iter().any(|e| e == "hi"));
+
1.0.0 · source

pub fn starts_with(&self, needle: &[T]) -> bool
where + T: PartialEq,

Returns true if needle is a prefix of the slice or equal to the slice.

+
§Examples
+
let v = [10, 40, 30];
+assert!(v.starts_with(&[10]));
+assert!(v.starts_with(&[10, 40]));
+assert!(v.starts_with(&v));
+assert!(!v.starts_with(&[50]));
+assert!(!v.starts_with(&[10, 50]));
+

Always returns true if needle is an empty slice:

+ +
let v = &[10, 40, 30];
+assert!(v.starts_with(&[]));
+let v: &[u8] = &[];
+assert!(v.starts_with(&[]));
+
1.0.0 · source

pub fn ends_with(&self, needle: &[T]) -> bool
where + T: PartialEq,

Returns true if needle is a suffix of the slice or equal to the slice.

+
§Examples
+
let v = [10, 40, 30];
+assert!(v.ends_with(&[30]));
+assert!(v.ends_with(&[40, 30]));
+assert!(v.ends_with(&v));
+assert!(!v.ends_with(&[50]));
+assert!(!v.ends_with(&[50, 30]));
+

Always returns true if needle is an empty slice:

+ +
let v = &[10, 40, 30];
+assert!(v.ends_with(&[]));
+let v: &[u8] = &[];
+assert!(v.ends_with(&[]));
+
1.51.0 · source

pub fn strip_prefix<P>(&self, prefix: &P) -> Option<&[T]>
where + P: SlicePattern<Item = T> + ?Sized, + T: PartialEq,

Returns a subslice with the prefix removed.

+

If the slice starts with prefix, returns the subslice after the prefix, wrapped in Some. +If prefix is empty, simply returns the original slice. If prefix is equal to the +original slice, returns an empty slice.

+

If the slice does not start with prefix, returns None.

+
§Examples
+
let v = &[10, 40, 30];
+assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
+assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
+assert_eq!(v.strip_prefix(&[10, 40, 30]), Some(&[][..]));
+assert_eq!(v.strip_prefix(&[50]), None);
+assert_eq!(v.strip_prefix(&[10, 50]), None);
+
+let prefix : &str = "he";
+assert_eq!(b"hello".strip_prefix(prefix.as_bytes()),
+           Some(b"llo".as_ref()));
+
1.51.0 · source

pub fn strip_suffix<P>(&self, suffix: &P) -> Option<&[T]>
where + P: SlicePattern<Item = T> + ?Sized, + T: PartialEq,

Returns a subslice with the suffix removed.

+

If the slice ends with suffix, returns the subslice before the suffix, wrapped in Some. +If suffix is empty, simply returns the original slice. If suffix is equal to the +original slice, returns an empty slice.

+

If the slice does not end with suffix, returns None.

+
§Examples
+
let v = &[10, 40, 30];
+assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
+assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
+assert_eq!(v.strip_suffix(&[10, 40, 30]), Some(&[][..]));
+assert_eq!(v.strip_suffix(&[50]), None);
+assert_eq!(v.strip_suffix(&[50, 30]), None);
+

Binary searches this slice for a given element. +If the slice is not sorted, the returned result is unspecified and +meaningless.

+

If the value is found then Result::Ok is returned, containing the +index of the matching element. If there are multiple matches, then any +one of the matches could be returned. The index is chosen +deterministically, but is subject to change in future versions of Rust. +If the value is not found then Result::Err is returned, containing +the index where a matching element could be inserted while maintaining +sorted order.

+

See also binary_search_by, binary_search_by_key, and partition_point.

+
§Examples
+

Looks up a series of four elements. The first is found, with a +uniquely determined position; the second and third are not +found; the fourth could match any position in [1, 4].

+ +
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+assert_eq!(s.binary_search(&13),  Ok(9));
+assert_eq!(s.binary_search(&4),   Err(7));
+assert_eq!(s.binary_search(&100), Err(13));
+let r = s.binary_search(&1);
+assert!(match r { Ok(1..=4) => true, _ => false, });
+

If you want to find that whole range of matching items, rather than +an arbitrary matching one, that can be done using partition_point:

+ +
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+let low = s.partition_point(|x| x < &1);
+assert_eq!(low, 1);
+let high = s.partition_point(|x| x <= &1);
+assert_eq!(high, 5);
+let r = s.binary_search(&1);
+assert!((low..high).contains(&r.unwrap()));
+
+assert!(s[..low].iter().all(|&x| x < 1));
+assert!(s[low..high].iter().all(|&x| x == 1));
+assert!(s[high..].iter().all(|&x| x > 1));
+
+// For something not found, the "range" of equal items is empty
+assert_eq!(s.partition_point(|x| x < &11), 9);
+assert_eq!(s.partition_point(|x| x <= &11), 9);
+assert_eq!(s.binary_search(&11), Err(9));
+

If you want to insert an item to a sorted vector, while maintaining +sort order, consider using partition_point:

+ +
let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+let num = 42;
+let idx = s.partition_point(|&x| x <= num);
+// If `num` is unique, `s.partition_point(|&x| x < num)` (with `<`) is equivalent to
+// `s.binary_search(&num).unwrap_or_else(|x| x)`, but using `<=` will allow `insert`
+// to shift less elements.
+s.insert(idx, num);
+assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
+
1.0.0 · source

pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result<usize, usize>
where + F: FnMut(&'a T) -> Ordering,

Binary searches this slice with a comparator function.

+

The comparator function should return an order code that indicates +whether its argument is Less, Equal or Greater the desired +target. +If the slice is not sorted or if the comparator function does not +implement an order consistent with the sort order of the underlying +slice, the returned result is unspecified and meaningless.

+

If the value is found then Result::Ok is returned, containing the +index of the matching element. If there are multiple matches, then any +one of the matches could be returned. The index is chosen +deterministically, but is subject to change in future versions of Rust. +If the value is not found then Result::Err is returned, containing +the index where a matching element could be inserted while maintaining +sorted order.

+

See also binary_search, binary_search_by_key, and partition_point.

+
§Examples
+

Looks up a series of four elements. The first is found, with a +uniquely determined position; the second and third are not +found; the fourth could match any position in [1, 4].

+ +
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+let seek = 13;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
+let seek = 4;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
+let seek = 100;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
+let seek = 1;
+let r = s.binary_search_by(|probe| probe.cmp(&seek));
+assert!(match r { Ok(1..=4) => true, _ => false, });
+
1.10.0 · source

pub fn binary_search_by_key<'a, B, F>( + &'a self, + b: &B, + f: F, +) -> Result<usize, usize>
where + F: FnMut(&'a T) -> B, + B: Ord,

Binary searches this slice with a key extraction function.

+

Assumes that the slice is sorted by the key, for instance with +sort_by_key using the same key extraction function. +If the slice is not sorted by the key, the returned result is +unspecified and meaningless.

+

If the value is found then Result::Ok is returned, containing the +index of the matching element. If there are multiple matches, then any +one of the matches could be returned. The index is chosen +deterministically, but is subject to change in future versions of Rust. +If the value is not found then Result::Err is returned, containing +the index where a matching element could be inserted while maintaining +sorted order.

+

See also binary_search, binary_search_by, and partition_point.

+
§Examples
+

Looks up a series of four elements in a slice of pairs sorted by +their second elements. The first is found, with a uniquely +determined position; the second and third are not found; the +fourth could match any position in [1, 4].

+ +
let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
+         (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
+         (1, 21), (2, 34), (4, 55)];
+
+assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b),  Ok(9));
+assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b),   Err(7));
+assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13));
+let r = s.binary_search_by_key(&1, |&(a, b)| b);
+assert!(match r { Ok(1..=4) => true, _ => false, });
+
1.30.0 · source

pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T])

Transmutes the slice to a slice of another type, ensuring alignment of the types is +maintained.

+

This method splits the slice into three distinct slices: prefix, correctly aligned middle +slice of a new type, and the suffix slice. The middle part will be as big as possible under +the given alignment constraint and element size.

+

This method has no purpose when either input element T or output element U are +zero-sized and will return the original slice without splitting anything.

+
§Safety
+

This method is essentially a transmute with respect to the elements in the returned +middle slice, so all the usual caveats pertaining to transmute::<T, U> also apply here.

+
§Examples
+

Basic usage:

+ +
unsafe {
+    let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+    let (prefix, shorts, suffix) = bytes.align_to::<u16>();
+    // less_efficient_algorithm_for_bytes(prefix);
+    // more_efficient_algorithm_for_aligned_shorts(shorts);
+    // less_efficient_algorithm_for_bytes(suffix);
+}
+
source

pub fn as_simd<const LANES: usize>(&self) -> (&[T], &[Simd<T, LANES>], &[T])
where + Simd<T, LANES>: AsRef<[T; LANES]>, + T: SimdElement, + LaneCount<LANES>: SupportedLaneCount,

🔬This is a nightly-only experimental API. (portable_simd)

Splits a slice into a prefix, a middle of aligned SIMD types, and a suffix.

+

This is a safe wrapper around slice::align_to, so inherits the same +guarantees as that method.

+
§Panics
+

This will panic if the size of the SIMD type is different from +LANES times that of the scalar.

+

At the time of writing, the trait restrictions on Simd<T, LANES> keeps +that from ever happening, as only power-of-two numbers of lanes are +supported. It’s possible that, in the future, those restrictions might +be lifted in a way that would make it possible to see panics from this +method for something like LANES == 3.

+
§Examples
+
#![feature(portable_simd)]
+use core::simd::prelude::*;
+
+let short = &[1, 2, 3];
+let (prefix, middle, suffix) = short.as_simd::<4>();
+assert_eq!(middle, []); // Not enough elements for anything in the middle
+
+// They might be split in any possible way between prefix and suffix
+let it = prefix.iter().chain(suffix).copied();
+assert_eq!(it.collect::<Vec<_>>(), vec![1, 2, 3]);
+
+fn basic_simd_sum(x: &[f32]) -> f32 {
+    use std::ops::Add;
+    let (prefix, middle, suffix) = x.as_simd();
+    let sums = f32x4::from_array([
+        prefix.iter().copied().sum(),
+        0.0,
+        0.0,
+        suffix.iter().copied().sum(),
+    ]);
+    let sums = middle.iter().copied().fold(sums, f32x4::add);
+    sums.reduce_sum()
+}
+
+let numbers: Vec<f32> = (1..101).map(|x| x as _).collect();
+assert_eq!(basic_simd_sum(&numbers[1..99]), 4949.0);
+
1.82.0 · source

pub fn is_sorted(&self) -> bool
where + T: PartialOrd,

Checks if the elements of this slice are sorted.

+

That is, for each element a and its following element b, a <= b must hold. If the +slice yields exactly zero or one element, true is returned.

+

Note that if Self::Item is only PartialOrd, but not Ord, the above definition +implies that this function returns false if any two consecutive items are not +comparable.

+
§Examples
+
let empty: [i32; 0] = [];
+
+assert!([1, 2, 2, 9].is_sorted());
+assert!(![1, 3, 2, 4].is_sorted());
+assert!([0].is_sorted());
+assert!(empty.is_sorted());
+assert!(![0.0, 1.0, f32::NAN].is_sorted());
+
1.82.0 · source

pub fn is_sorted_by<'a, F>(&'a self, compare: F) -> bool
where + F: FnMut(&'a T, &'a T) -> bool,

Checks if the elements of this slice are sorted using the given comparator function.

+

Instead of using PartialOrd::partial_cmp, this function uses the given compare +function to determine whether two elements are to be considered in sorted order.

+
§Examples
+
assert!([1, 2, 2, 9].is_sorted_by(|a, b| a <= b));
+assert!(![1, 2, 2, 9].is_sorted_by(|a, b| a < b));
+
+assert!([0].is_sorted_by(|a, b| true));
+assert!([0].is_sorted_by(|a, b| false));
+
+let empty: [i32; 0] = [];
+assert!(empty.is_sorted_by(|a, b| false));
+assert!(empty.is_sorted_by(|a, b| true));
+
1.82.0 · source

pub fn is_sorted_by_key<'a, F, K>(&'a self, f: F) -> bool
where + F: FnMut(&'a T) -> K, + K: PartialOrd,

Checks if the elements of this slice are sorted using the given key extraction function.

+

Instead of comparing the slice’s elements directly, this function compares the keys of the +elements, as determined by f. Apart from that, it’s equivalent to is_sorted; see its +documentation for more information.

+
§Examples
+
assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
+assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
+
1.52.0 · source

pub fn partition_point<P>(&self, pred: P) -> usize
where + P: FnMut(&T) -> bool,

Returns the index of the partition point according to the given predicate +(the index of the first element of the second partition).

+

The slice is assumed to be partitioned according to the given predicate. +This means that all elements for which the predicate returns true are at the start of the slice +and all elements for which the predicate returns false are at the end. +For example, [7, 15, 3, 5, 4, 12, 6] is partitioned under the predicate x % 2 != 0 +(all odd numbers are at the start, all even at the end).

+

If this slice is not partitioned, the returned result is unspecified and meaningless, +as this method performs a kind of binary search.

+

See also binary_search, binary_search_by, and binary_search_by_key.

+
§Examples
+
let v = [1, 2, 3, 3, 5, 6, 7];
+let i = v.partition_point(|&x| x < 5);
+
+assert_eq!(i, 4);
+assert!(v[..i].iter().all(|&x| x < 5));
+assert!(v[i..].iter().all(|&x| !(x < 5)));
+

If all elements of the slice match the predicate, including if the slice +is empty, then the length of the slice will be returned:

+ +
let a = [2, 4, 8];
+assert_eq!(a.partition_point(|x| x < &100), a.len());
+let a: [i32; 0] = [];
+assert_eq!(a.partition_point(|x| x < &100), 0);
+

If you want to insert an item to a sorted vector, while maintaining +sort order:

+ +
let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+let num = 42;
+let idx = s.partition_point(|&x| x <= num);
+s.insert(idx, num);
+assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
+
source

pub fn elem_offset(&self, element: &T) -> Option<usize>

🔬This is a nightly-only experimental API. (substr_range)

Returns the index that an element reference points to.

+

Returns None if element does not point within the slice or if it points between elements.

+

This method is useful for extending slice iterators like slice::split.

+

Note that this uses pointer arithmetic and does not compare elements. +To find the index of an element via comparison, use +.iter().position() instead.

+
§Panics
+

Panics if T is zero-sized.

+
§Examples
+

Basic usage:

+ +
#![feature(substr_range)]
+
+let nums: &[u32] = &[1, 7, 1, 1];
+let num = &nums[2];
+
+assert_eq!(num, &1);
+assert_eq!(nums.elem_offset(num), Some(2));
+

Returning None with an in-between element:

+ +
#![feature(substr_range)]
+
+let arr: &[[u32; 2]] = &[[0, 1], [2, 3]];
+let flat_arr: &[u32] = arr.as_flattened();
+
+let ok_elm: &[u32; 2] = flat_arr[0..2].try_into().unwrap();
+let weird_elm: &[u32; 2] = flat_arr[1..3].try_into().unwrap();
+
+assert_eq!(ok_elm, &[0, 1]);
+assert_eq!(weird_elm, &[1, 2]);
+
+assert_eq!(arr.elem_offset(ok_elm), Some(0)); // Points to element 0
+assert_eq!(arr.elem_offset(weird_elm), None); // Points between element 0 and 1
+
source

pub fn subslice_range(&self, subslice: &[T]) -> Option<Range<usize>>

🔬This is a nightly-only experimental API. (substr_range)

Returns the range of indices that a subslice points to.

+

Returns None if subslice does not point within the slice or if it points between elements.

+

This method does not compare elements. Instead, this method finds the location in the slice that +subslice was obtained from. To find the index of a subslice via comparison, instead use +.windows().position().

+

This method is useful for extending slice iterators like slice::split.

+

Note that this may return a false positive (either Some(0..0) or Some(self.len()..self.len())) +if subslice has a length of zero and points to the beginning or end of another, separate, slice.

+
§Panics
+

Panics if T is zero-sized.

+
§Examples
+

Basic usage:

+ +
#![feature(substr_range)]
+
+let nums = &[0, 5, 10, 0, 0, 5];
+
+let mut iter = nums
+    .split(|t| *t == 0)
+    .map(|n| nums.subslice_range(n).unwrap());
+
+assert_eq!(iter.next(), Some(0..0));
+assert_eq!(iter.next(), Some(1..3));
+assert_eq!(iter.next(), Some(4..4));
+assert_eq!(iter.next(), Some(5..6));
+
1.80.0 · source

pub fn as_flattened(&self) -> &[T]

Takes a &[[T; N]], and flattens it to a &[T].

+
§Panics
+

This panics if the length of the resulting slice would overflow a usize.

+

This is only possible when flattening a slice of arrays of zero-sized +types, and thus tends to be irrelevant in practice. If +size_of::<T>() > 0, this will never panic.

+
§Examples
+
assert_eq!([[1, 2, 3], [4, 5, 6]].as_flattened(), &[1, 2, 3, 4, 5, 6]);
+
+assert_eq!(
+    [[1, 2, 3], [4, 5, 6]].as_flattened(),
+    [[1, 2], [3, 4], [5, 6]].as_flattened(),
+);
+
+let slice_of_empty_arrays: &[[i32; 0]] = &[[], [], [], [], []];
+assert!(slice_of_empty_arrays.as_flattened().is_empty());
+
+let empty_slice_of_arrays: &[[u32; 10]] = &[];
+assert!(empty_slice_of_arrays.as_flattened().is_empty());
+
1.79.0 · source

pub fn utf8_chunks(&self) -> Utf8Chunks<'_>

Creates an iterator over the contiguous valid UTF-8 ranges of this +slice, and the non-UTF-8 fragments in between.

+
§Examples
+

This function formats arbitrary but mostly-UTF-8 bytes into Rust source +code in the form of a C-string literal (c"...").

+ +
use std::fmt::Write as _;
+
+pub fn cstr_literal(bytes: &[u8]) -> String {
+    let mut repr = String::new();
+    repr.push_str("c\"");
+    for chunk in bytes.utf8_chunks() {
+        for ch in chunk.valid().chars() {
+            // Escapes \0, \t, \r, \n, \\, \', \", and uses \u{...} for non-printable characters.
+            write!(repr, "{}", ch.escape_debug()).unwrap();
+        }
+        for byte in chunk.invalid() {
+            write!(repr, "\\x{:02X}", byte).unwrap();
+        }
+    }
+    repr.push('"');
+    repr
+}
+
+fn main() {
+    let lit = cstr_literal(b"\xferris the \xf0\x9f\xa6\x80\x07");
+    let expected = stringify!(c"\xFErris the 🦀\u{7}");
+    assert_eq!(lit, expected);
+}
+
1.0.0 · source

pub fn to_vec(&self) -> Vec<T>
where + T: Clone,

Copies self into a new Vec.

+
§Examples
+
let s = [10, 40, 30];
+let x = s.to_vec();
+// Here, `s` and `x` can be modified independently.
+
source

pub fn to_vec_in<A>(&self, alloc: A) -> Vec<T, A>
where + A: Allocator, + T: Clone,

🔬This is a nightly-only experimental API. (allocator_api)

Copies self into a new Vec with an allocator.

+
§Examples
+
#![feature(allocator_api)]
+
+use std::alloc::System;
+
+let s = [10, 40, 30];
+let x = s.to_vec_in(System);
+// Here, `s` and `x` can be modified independently.
+
1.40.0 · source

pub fn repeat(&self, n: usize) -> Vec<T>
where + T: Copy,

Creates a vector by copying a slice n times.

+
§Panics
+

This function will panic if the capacity would overflow.

+
§Examples
+

Basic usage:

+ +
assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
+

A panic upon overflow:

+ +
// this will panic at runtime
+b"0123456789abcdef".repeat(usize::MAX);
+
1.0.0 · source

pub fn concat<Item>(&self) -> <[T] as Concat<Item>>::Output
where + [T]: Concat<Item>, + Item: ?Sized,

Flattens a slice of T into a single value Self::Output.

+
§Examples
+
assert_eq!(["hello", "world"].concat(), "helloworld");
+assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
+
1.3.0 · source

pub fn join<Separator>( + &self, + sep: Separator, +) -> <[T] as Join<Separator>>::Output
where + [T]: Join<Separator>,

Flattens a slice of T into a single value Self::Output, placing a +given separator between each.

+
§Examples
+
assert_eq!(["hello", "world"].join(" "), "hello world");
+assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
+assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
+
1.0.0 · source

pub fn connect<Separator>( + &self, + sep: Separator, +) -> <[T] as Join<Separator>>::Output
where + [T]: Join<Separator>,

👎Deprecated since 1.3.0: renamed to join

Flattens a slice of T into a single value Self::Output, placing a +given separator between each.

+
§Examples
+
assert_eq!(["hello", "world"].connect(" "), "hello world");
+assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
+
1.23.0 · source

pub fn to_ascii_uppercase(&self) -> Vec<u8>

Returns a vector containing a copy of this slice where each byte +is mapped to its ASCII upper case equivalent.

+

ASCII letters ‘a’ to ‘z’ are mapped to ‘A’ to ‘Z’, +but non-ASCII letters are unchanged.

+

To uppercase the value in-place, use make_ascii_uppercase.

+
1.23.0 · source

pub fn to_ascii_lowercase(&self) -> Vec<u8>

Returns a vector containing a copy of this slice where each byte +is mapped to its ASCII lower case equivalent.

+

ASCII letters ‘A’ to ‘Z’ are mapped to ‘a’ to ‘z’, +but non-ASCII letters are unchanged.

+

To lowercase the value in-place, use make_ascii_lowercase.

+

Trait Implementations§

source§

impl AsRef<[u8]> for Bytes

source§

fn as_ref(&self) -> &[u8]

Converts this type into a shared reference of the (usually inferred) input type.
source§

impl Borrow<[u8]> for Bytes

source§

fn borrow(&self) -> &[u8]

Immutably borrows from an owned value. Read more
source§

impl Buf for Bytes

source§

fn remaining(&self) -> usize

Returns the number of bytes between the current position and the end of +the buffer. Read more
source§

fn chunk(&self) -> &[u8]

Returns a slice starting at the current position and of length between 0 +and Buf::remaining(). Note that this can return shorter slice (this allows +non-continuous internal representation). Read more
source§

fn advance(&mut self, cnt: usize)

Advance the internal cursor of the Buf Read more
source§

fn copy_to_bytes(&mut self, len: usize) -> Self

Consumes len bytes inside self and returns new instance of Bytes +with this data. Read more
source§

fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize

Available on crate feature std only.
Fills dst with potentially multiple slices starting at self’s +current position. Read more
source§

fn has_remaining(&self) -> bool

Returns true if there are any more bytes to consume Read more
source§

fn copy_to_slice(&mut self, dst: &mut [u8])

Copies bytes from self into dst. Read more
source§

fn get_u8(&mut self) -> u8

Gets an unsigned 8 bit integer from self. Read more
source§

fn get_i8(&mut self) -> i8

Gets a signed 8 bit integer from self. Read more
source§

fn get_u16(&mut self) -> u16

Gets an unsigned 16 bit integer from self in big-endian byte order. Read more
source§

fn get_u16_le(&mut self) -> u16

Gets an unsigned 16 bit integer from self in little-endian byte order. Read more
source§

fn get_u16_ne(&mut self) -> u16

Gets an unsigned 16 bit integer from self in native-endian byte order. Read more
source§

fn get_i16(&mut self) -> i16

Gets a signed 16 bit integer from self in big-endian byte order. Read more
source§

fn get_i16_le(&mut self) -> i16

Gets a signed 16 bit integer from self in little-endian byte order. Read more
source§

fn get_i16_ne(&mut self) -> i16

Gets a signed 16 bit integer from self in native-endian byte order. Read more
source§

fn get_u32(&mut self) -> u32

Gets an unsigned 32 bit integer from self in the big-endian byte order. Read more
source§

fn get_u32_le(&mut self) -> u32

Gets an unsigned 32 bit integer from self in the little-endian byte order. Read more
source§

fn get_u32_ne(&mut self) -> u32

Gets an unsigned 32 bit integer from self in native-endian byte order. Read more
source§

fn get_i32(&mut self) -> i32

Gets a signed 32 bit integer from self in big-endian byte order. Read more
source§

fn get_i32_le(&mut self) -> i32

Gets a signed 32 bit integer from self in little-endian byte order. Read more
source§

fn get_i32_ne(&mut self) -> i32

Gets a signed 32 bit integer from self in native-endian byte order. Read more
source§

fn get_u64(&mut self) -> u64

Gets an unsigned 64 bit integer from self in big-endian byte order. Read more
source§

fn get_u64_le(&mut self) -> u64

Gets an unsigned 64 bit integer from self in little-endian byte order. Read more
source§

fn get_u64_ne(&mut self) -> u64

Gets an unsigned 64 bit integer from self in native-endian byte order. Read more
source§

fn get_i64(&mut self) -> i64

Gets a signed 64 bit integer from self in big-endian byte order. Read more
source§

fn get_i64_le(&mut self) -> i64

Gets a signed 64 bit integer from self in little-endian byte order. Read more
source§

fn get_i64_ne(&mut self) -> i64

Gets a signed 64 bit integer from self in native-endian byte order. Read more
source§

fn get_u128(&mut self) -> u128

Gets an unsigned 128 bit integer from self in big-endian byte order. Read more
source§

fn get_u128_le(&mut self) -> u128

Gets an unsigned 128 bit integer from self in little-endian byte order. Read more
source§

fn get_u128_ne(&mut self) -> u128

Gets an unsigned 128 bit integer from self in native-endian byte order. Read more
source§

fn get_i128(&mut self) -> i128

Gets a signed 128 bit integer from self in big-endian byte order. Read more
source§

fn get_i128_le(&mut self) -> i128

Gets a signed 128 bit integer from self in little-endian byte order. Read more
source§

fn get_i128_ne(&mut self) -> i128

Gets a signed 128 bit integer from self in native-endian byte order. Read more
source§

fn get_uint(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in big-endian byte order. Read more
source§

fn get_uint_le(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in little-endian byte order. Read more
source§

fn get_uint_ne(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in native-endian byte order. Read more
source§

fn get_int(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in big-endian byte order. Read more
source§

fn get_int_le(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in little-endian byte order. Read more
source§

fn get_int_ne(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in native-endian byte order. Read more
source§

fn get_f32(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in big-endian byte order. Read more
source§

fn get_f32_le(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in little-endian byte order. Read more
source§

fn get_f32_ne(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in native-endian byte order. Read more
source§

fn get_f64(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in big-endian byte order. Read more
source§

fn get_f64_le(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in little-endian byte order. Read more
source§

fn get_f64_ne(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in native-endian byte order. Read more
source§

fn take(self, limit: usize) -> Take<Self>
where + Self: Sized,

Creates an adaptor which will read at most limit bytes from self. Read more
source§

fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
where + Self: Sized,

Creates an adaptor which will chain this buffer with another. Read more
source§

fn reader(self) -> Reader<Self>
where + Self: Sized,

Available on crate feature std only.
Creates an adaptor which implements the Read trait for self. Read more
source§

impl Clone for Bytes

source§

fn clone(&self) -> Bytes

Returns a copy of the value. Read more
1.0.0 · source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
source§

impl Debug for Bytes

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl Default for Bytes

source§

fn default() -> Bytes

Returns the “default value” for a type. Read more
source§

impl Deref for Bytes

source§

type Target = [u8]

The resulting type after dereferencing.
source§

fn deref(&self) -> &[u8]

Dereferences the value.
source§

impl<'de> Deserialize<'de> for Bytes

source§

fn deserialize<D>(deserializer: D) -> Result<Bytes, D::Error>
where + D: Deserializer<'de>,

Deserialize this value from the given Serde deserializer. Read more
source§

impl Drop for Bytes

source§

fn drop(&mut self)

Executes the destructor for this type. Read more
source§

impl Extend<Bytes> for BytesMut

source§

fn extend<T>(&mut self, iter: T)
where + T: IntoIterator<Item = Bytes>,

Extends a collection with the contents of an iterator. Read more
source§

fn extend_one(&mut self, item: A)

🔬This is a nightly-only experimental API. (extend_one)
Extends a collection with exactly one element.
source§

fn extend_reserve(&mut self, additional: usize)

🔬This is a nightly-only experimental API. (extend_one)
Reserves capacity in a collection for the given number of additional elements. Read more
source§

impl From<&'static [u8]> for Bytes

source§

fn from(slice: &'static [u8]) -> Bytes

Converts to this type from the input type.
source§

impl From<&'static str> for Bytes

source§

fn from(slice: &'static str) -> Bytes

Converts to this type from the input type.
source§

impl From<Box<[u8]>> for Bytes

source§

fn from(slice: Box<[u8]>) -> Bytes

Converts to this type from the input type.
source§

impl From<Bytes> for BytesMut

source§

fn from(bytes: Bytes) -> Self

Convert self into BytesMut.

+

If bytes is unique for the entire original buffer, this will return a +BytesMut with the contents of bytes without copying. +If bytes is not unique for the entire original buffer, this will make +a copy of bytes subset of the original buffer in a new BytesMut.

+
§Examples
+
use bytes::{Bytes, BytesMut};
+
+let bytes = Bytes::from(b"hello".to_vec());
+assert_eq!(BytesMut::from(bytes), BytesMut::from(&b"hello"[..]));
+
source§

impl From<Bytes> for Vec<u8>

source§

fn from(bytes: Bytes) -> Vec<u8>

Converts to this type from the input type.
source§

impl From<BytesMut> for Bytes

source§

fn from(src: BytesMut) -> Bytes

Converts to this type from the input type.
source§

impl From<String> for Bytes

source§

fn from(s: String) -> Bytes

Converts to this type from the input type.
source§

impl From<Vec<u8>> for Bytes

source§

fn from(vec: Vec<u8>) -> Bytes

Converts to this type from the input type.
source§

impl FromIterator<u8> for Bytes

source§

fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self

Creates a value from an iterator. Read more
source§

impl Hash for Bytes

source§

fn hash<H>(&self, state: &mut H)
where + H: Hasher,

Feeds this value into the given Hasher. Read more
1.3.0 · source§

fn hash_slice<H>(data: &[Self], state: &mut H)
where + H: Hasher, + Self: Sized,

Feeds a slice of this type into the given Hasher. Read more
source§

impl<'a> IntoIterator for &'a Bytes

source§

type Item = &'a u8

The type of the elements being iterated over.
source§

type IntoIter = Iter<'a, u8>

Which kind of iterator are we turning this into?
source§

fn into_iter(self) -> Self::IntoIter

Creates an iterator from a value. Read more
source§

impl IntoIterator for Bytes

source§

type Item = u8

The type of the elements being iterated over.
source§

type IntoIter = IntoIter<Bytes>

Which kind of iterator are we turning this into?
source§

fn into_iter(self) -> Self::IntoIter

Creates an iterator from a value. Read more
source§

impl LowerHex for Bytes

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl Ord for Bytes

source§

fn cmp(&self, other: &Bytes) -> Ordering

This method returns an Ordering between self and other. Read more
1.21.0 · source§

fn max(self, other: Self) -> Self
where + Self: Sized,

Compares and returns the maximum of two values. Read more
1.21.0 · source§

fn min(self, other: Self) -> Self
where + Self: Sized,

Compares and returns the minimum of two values. Read more
1.50.0 · source§

fn clamp(self, min: Self, max: Self) -> Self
where + Self: Sized + PartialOrd,

Restrict a value to a certain interval. Read more
source§

impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
where + Bytes: PartialEq<T>,

source§

fn eq(&self, other: &&'a T) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<[u8]> for Bytes

source§

fn eq(&self, other: &[u8]) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<Bytes> for &[u8]

source§

fn eq(&self, other: &Bytes) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<Bytes> for &str

source§

fn eq(&self, other: &Bytes) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<Bytes> for [u8]

source§

fn eq(&self, other: &Bytes) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<Bytes> for BytesMut

source§

fn eq(&self, other: &Bytes) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<Bytes> for String

source§

fn eq(&self, other: &Bytes) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<Bytes> for Vec<u8>

source§

fn eq(&self, other: &Bytes) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<Bytes> for str

source§

fn eq(&self, other: &Bytes) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<BytesMut> for Bytes

source§

fn eq(&self, other: &BytesMut) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<String> for Bytes

source§

fn eq(&self, other: &String) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<Vec<u8>> for Bytes

source§

fn eq(&self, other: &Vec<u8>) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<str> for Bytes

source§

fn eq(&self, other: &str) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq for Bytes

source§

fn eq(&self, other: &Bytes) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
where + Bytes: PartialOrd<T>,

source§

fn partial_cmp(&self, other: &&'a T) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<[u8]> for Bytes

source§

fn partial_cmp(&self, other: &[u8]) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<Bytes> for &[u8]

source§

fn partial_cmp(&self, other: &Bytes) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<Bytes> for &str

source§

fn partial_cmp(&self, other: &Bytes) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<Bytes> for [u8]

source§

fn partial_cmp(&self, other: &Bytes) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<Bytes> for String

source§

fn partial_cmp(&self, other: &Bytes) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<Bytes> for Vec<u8>

source§

fn partial_cmp(&self, other: &Bytes) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<Bytes> for str

source§

fn partial_cmp(&self, other: &Bytes) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<String> for Bytes

source§

fn partial_cmp(&self, other: &String) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<Vec<u8>> for Bytes

source§

fn partial_cmp(&self, other: &Vec<u8>) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<str> for Bytes

source§

fn partial_cmp(&self, other: &str) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd for Bytes

source§

fn partial_cmp(&self, other: &Bytes) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl Serialize for Bytes

source§

fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where + S: Serializer,

Serialize this value into the given Serde serializer. Read more
source§

impl UpperHex for Bytes

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl Eq for Bytes

source§

impl Send for Bytes

source§

impl Sync for Bytes

Auto Trait Implementations§

Blanket Implementations§

source§

impl<T> Any for T
where + T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where + T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where + T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> CloneToUninit for T
where + T: Clone,

source§

unsafe fn clone_to_uninit(&self, dst: *mut T)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dst. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

+
source§

impl<T, U> Into<U> for T
where + U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

+

That is, this conversion is whatever the implementation of +From<T> for U chooses to do.

+
source§

impl<T> ToOwned for T
where + T: Clone,

source§

type Owned = T

The resulting type after obtaining ownership.
source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
source§

impl<T, U> TryFrom<U> for T
where + U: Into<T>,

source§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where + U: TryFrom<T>,

source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
source§

impl<T> DeserializeOwned for T
where + T: for<'de> Deserialize<'de>,

\ No newline at end of file diff --git a/bytes/struct.BytesMut.html b/bytes/struct.BytesMut.html new file mode 100644 index 000000000..e8cd44b99 --- /dev/null +++ b/bytes/struct.BytesMut.html @@ -0,0 +1,2931 @@ +BytesMut in bytes - Rust

Struct bytes::BytesMut

source ·
pub struct BytesMut { /* private fields */ }
Expand description

A unique reference to a contiguous slice of memory.

+

BytesMut represents a unique view into a potentially shared memory region. +Given the uniqueness guarantee, owners of BytesMut handles are able to +mutate the memory.

+

BytesMut can be thought of as containing a buf: Arc<Vec<u8>>, an offset +into buf, a slice length, and a guarantee that no other BytesMut for the +same buf overlaps with its slice. That guarantee means that a write lock +is not required.

+

§Growth

+

BytesMut’s BufMut implementation will implicitly grow its buffer as +necessary. However, explicitly reserving the required space up-front before +a series of inserts will be more efficient.

+

§Examples

+
use bytes::{BytesMut, BufMut};
+
+let mut buf = BytesMut::with_capacity(64);
+
+buf.put_u8(b'h');
+buf.put_u8(b'e');
+buf.put(&b"llo"[..]);
+
+assert_eq!(&buf[..], b"hello");
+
+// Freeze the buffer so that it can be shared
+let a = buf.freeze();
+
+// This does not allocate, instead `b` points to the same memory.
+let b = a.clone();
+
+assert_eq!(&a[..], b"hello");
+assert_eq!(&b[..], b"hello");
+

Implementations§

source§

impl BytesMut

source

pub fn with_capacity(capacity: usize) -> BytesMut

Creates a new BytesMut with the specified capacity.

+

The returned BytesMut will be able to hold at least capacity bytes +without reallocating.

+

It is important to note that this function does not specify the length +of the returned BytesMut, but only the capacity.

+
§Examples
+
use bytes::{BytesMut, BufMut};
+
+let mut bytes = BytesMut::with_capacity(64);
+
+// `bytes` contains no data, even though there is capacity
+assert_eq!(bytes.len(), 0);
+
+bytes.put(&b"hello world"[..]);
+
+assert_eq!(&bytes[..], b"hello world");
+
source

pub fn new() -> BytesMut

Creates a new BytesMut with default capacity.

+

Resulting object has length 0 and unspecified capacity. +This function does not allocate.

+
§Examples
+
use bytes::{BytesMut, BufMut};
+
+let mut bytes = BytesMut::new();
+
+assert_eq!(0, bytes.len());
+
+bytes.reserve(2);
+bytes.put_slice(b"xy");
+
+assert_eq!(&b"xy"[..], &bytes[..]);
+
source

pub fn len(&self) -> usize

Returns the number of bytes contained in this BytesMut.

+
§Examples
+
use bytes::BytesMut;
+
+let b = BytesMut::from(&b"hello"[..]);
+assert_eq!(b.len(), 5);
+
source

pub fn is_empty(&self) -> bool

Returns true if the BytesMut has a length of 0.

+
§Examples
+
use bytes::BytesMut;
+
+let b = BytesMut::with_capacity(64);
+assert!(b.is_empty());
+
source

pub fn capacity(&self) -> usize

Returns the number of bytes the BytesMut can hold without reallocating.

+
§Examples
+
use bytes::BytesMut;
+
+let b = BytesMut::with_capacity(64);
+assert_eq!(b.capacity(), 64);
+
source

pub fn freeze(self) -> Bytes

Converts self into an immutable Bytes.

+

The conversion is zero cost and is used to indicate that the slice +referenced by the handle will no longer be mutated. Once the conversion +is done, the handle can be cloned and shared across threads.

+
§Examples
+
use bytes::{BytesMut, BufMut};
+use std::thread;
+
+let mut b = BytesMut::with_capacity(64);
+b.put(&b"hello world"[..]);
+let b1 = b.freeze();
+let b2 = b1.clone();
+
+let th = thread::spawn(move || {
+    assert_eq!(&b1[..], b"hello world");
+});
+
+assert_eq!(&b2[..], b"hello world");
+th.join().unwrap();
+
source

pub fn zeroed(len: usize) -> BytesMut

Creates a new BytesMut containing len zeros.

+

The resulting object has a length of len and a capacity greater +than or equal to len. The entire length of the object will be filled +with zeros.

+

On some platforms or allocators this function may be faster than +a manual implementation.

+
§Examples
+
use bytes::BytesMut;
+
+let zeros = BytesMut::zeroed(42);
+
+assert!(zeros.capacity() >= 42);
+assert_eq!(zeros.len(), 42);
+zeros.into_iter().for_each(|x| assert_eq!(x, 0));
+
source

pub fn split_off(&mut self, at: usize) -> BytesMut

Splits the bytes into two at the given index.

+

Afterwards self contains elements [0, at), and the returned +BytesMut contains elements [at, capacity).

+

This is an O(1) operation that just increases the reference count +and sets a few indices.

+
§Examples
+
use bytes::BytesMut;
+
+let mut a = BytesMut::from(&b"hello world"[..]);
+let mut b = a.split_off(5);
+
+a[0] = b'j';
+b[0] = b'!';
+
+assert_eq!(&a[..], b"jello");
+assert_eq!(&b[..], b"!world");
+
§Panics
+

Panics if at > capacity.

+
source

pub fn split(&mut self) -> BytesMut

Removes the bytes from the current view, returning them in a new +BytesMut handle.

+

Afterwards, self will be empty, but will retain any additional +capacity that it had before the operation. This is identical to +self.split_to(self.len()).

+

This is an O(1) operation that just increases the reference count and +sets a few indices.

+
§Examples
+
use bytes::{BytesMut, BufMut};
+
+let mut buf = BytesMut::with_capacity(1024);
+buf.put(&b"hello world"[..]);
+
+let other = buf.split();
+
+assert!(buf.is_empty());
+assert_eq!(1013, buf.capacity());
+
+assert_eq!(other, b"hello world"[..]);
+
source

pub fn split_to(&mut self, at: usize) -> BytesMut

Splits the buffer into two at the given index.

+

Afterwards self contains elements [at, len), and the returned BytesMut +contains elements [0, at).

+

This is an O(1) operation that just increases the reference count and +sets a few indices.

+
§Examples
+
use bytes::BytesMut;
+
+let mut a = BytesMut::from(&b"hello world"[..]);
+let mut b = a.split_to(5);
+
+a[0] = b'!';
+b[0] = b'j';
+
+assert_eq!(&a[..], b"!world");
+assert_eq!(&b[..], b"jello");
+
§Panics
+

Panics if at > len.

+
source

pub fn truncate(&mut self, len: usize)

Shortens the buffer, keeping the first len bytes and dropping the +rest.

+

If len is greater than the buffer’s current length, this has no +effect.

+

Existing underlying capacity is preserved.

+

The split_off method can emulate truncate, but this causes the +excess bytes to be returned instead of dropped.

+
§Examples
+
use bytes::BytesMut;
+
+let mut buf = BytesMut::from(&b"hello world"[..]);
+buf.truncate(5);
+assert_eq!(buf, b"hello"[..]);
+
source

pub fn clear(&mut self)

Clears the buffer, removing all data. Existing capacity is preserved.

+
§Examples
+
use bytes::BytesMut;
+
+let mut buf = BytesMut::from(&b"hello world"[..]);
+buf.clear();
+assert!(buf.is_empty());
+
source

pub fn resize(&mut self, new_len: usize, value: u8)

Resizes the buffer so that len is equal to new_len.

+

If new_len is greater than len, the buffer is extended by the +difference with each additional byte set to value. If new_len is +less than len, the buffer is simply truncated.

+
§Examples
+
use bytes::BytesMut;
+
+let mut buf = BytesMut::new();
+
+buf.resize(3, 0x1);
+assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
+
+buf.resize(2, 0x2);
+assert_eq!(&buf[..], &[0x1, 0x1]);
+
+buf.resize(4, 0x3);
+assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
+
source

pub unsafe fn set_len(&mut self, len: usize)

Sets the length of the buffer.

+

This will explicitly set the size of the buffer without actually +modifying the data, so it is up to the caller to ensure that the data +has been initialized.

+
§Examples
+
use bytes::BytesMut;
+
+let mut b = BytesMut::from(&b"hello world"[..]);
+
+unsafe {
+    b.set_len(5);
+}
+
+assert_eq!(&b[..], b"hello");
+
+unsafe {
+    b.set_len(11);
+}
+
+assert_eq!(&b[..], b"hello world");
+
source

pub fn reserve(&mut self, additional: usize)

Reserves capacity for at least additional more bytes to be inserted +into the given BytesMut.

+

More than additional bytes may be reserved in order to avoid frequent +reallocations. A call to reserve may result in an allocation.

+

Before allocating new buffer space, the function will attempt to reclaim +space in the existing buffer. If the current handle references a view +into a larger original buffer, and all other handles referencing part +of the same original buffer have been dropped, then the current view +can be copied/shifted to the front of the buffer and the handle can take +ownership of the full buffer, provided that the full buffer is large +enough to fit the requested additional capacity.

+

This optimization will only happen if shifting the data from the current +view to the front of the buffer is not too expensive in terms of the +(amortized) time required. The precise condition is subject to change; +as of now, the length of the data being shifted needs to be at least as +large as the distance that it’s shifted by. If the current view is empty +and the original buffer is large enough to fit the requested additional +capacity, then reallocations will never happen.

+
§Examples
+

In the following example, a new buffer is allocated.

+ +
use bytes::BytesMut;
+
+let mut buf = BytesMut::from(&b"hello"[..]);
+buf.reserve(64);
+assert!(buf.capacity() >= 69);
+

In the following example, the existing buffer is reclaimed.

+ +
use bytes::{BytesMut, BufMut};
+
+let mut buf = BytesMut::with_capacity(128);
+buf.put(&[0; 64][..]);
+
+let ptr = buf.as_ptr();
+let other = buf.split();
+
+assert!(buf.is_empty());
+assert_eq!(buf.capacity(), 64);
+
+drop(other);
+buf.reserve(128);
+
+assert_eq!(buf.capacity(), 128);
+assert_eq!(buf.as_ptr(), ptr);
+
§Panics
+

Panics if the new capacity overflows usize.

+
source

pub fn try_reclaim(&mut self, additional: usize) -> bool

Attempts to cheaply reclaim already allocated capacity for at least additional more +bytes to be inserted into the given BytesMut and returns true if it succeeded.

+

try_reclaim behaves exactly like reserve, except that it never allocates new storage +and returns a bool indicating whether it was successful in doing so:

+

try_reclaim returns false under these conditions:

+
    +
  • The spare capacity left is less than additional bytes AND
  • +
  • The existing allocation cannot be reclaimed cheaply or it was less than +additional bytes in size
  • +
+

Reclaiming the allocation cheaply is possible if the BytesMut has no outstanding +references through other BytesMuts or Bytes which point to the same underlying +storage.

+
§Examples
+
use bytes::BytesMut;
+
+let mut buf = BytesMut::with_capacity(64);
+assert_eq!(true, buf.try_reclaim(64));
+assert_eq!(64, buf.capacity());
+
+buf.extend_from_slice(b"abcd");
+let mut split = buf.split();
+assert_eq!(60, buf.capacity());
+assert_eq!(4, split.capacity());
+assert_eq!(false, split.try_reclaim(64));
+assert_eq!(false, buf.try_reclaim(64));
+// The split buffer is filled with "abcd"
+assert_eq!(false, split.try_reclaim(4));
+// buf is empty and has capacity for 60 bytes
+assert_eq!(true, buf.try_reclaim(60));
+
+drop(buf);
+assert_eq!(false, split.try_reclaim(64));
+
+split.clear();
+assert_eq!(4, split.capacity());
+assert_eq!(true, split.try_reclaim(64));
+assert_eq!(64, split.capacity());
+
source

pub fn extend_from_slice(&mut self, extend: &[u8])

Appends given bytes to this BytesMut.

+

If this BytesMut object does not have enough capacity, it is resized +first.

+
§Examples
+
use bytes::BytesMut;
+
+let mut buf = BytesMut::with_capacity(0);
+buf.extend_from_slice(b"aaabbb");
+buf.extend_from_slice(b"cccddd");
+
+assert_eq!(b"aaabbbcccddd", &buf[..]);
+
source

pub fn unsplit(&mut self, other: BytesMut)

Absorbs a BytesMut that was previously split off.

+

If the two BytesMut objects were previously contiguous and not mutated +in a way that causes re-allocation i.e., if other was created by +calling split_off on this BytesMut, then this is an O(1) operation +that just decreases a reference count and sets a few indices. +Otherwise this method degenerates to +self.extend_from_slice(other.as_ref()).

+
§Examples
+
use bytes::BytesMut;
+
+let mut buf = BytesMut::with_capacity(64);
+buf.extend_from_slice(b"aaabbbcccddd");
+
+let split = buf.split_off(6);
+assert_eq!(b"aaabbb", &buf[..]);
+assert_eq!(b"cccddd", &split[..]);
+
+buf.unsplit(split);
+assert_eq!(b"aaabbbcccddd", &buf[..]);
+
source

pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>]

Returns the remaining spare capacity of the buffer as a slice of MaybeUninit<u8>.

+

The returned slice can be used to fill the buffer with data (e.g. by +reading from a file) before marking the data as initialized using the +set_len method.

+
§Examples
+
use bytes::BytesMut;
+
+// Allocate buffer big enough for 10 bytes.
+let mut buf = BytesMut::with_capacity(10);
+
+// Fill in the first 3 elements.
+let uninit = buf.spare_capacity_mut();
+uninit[0].write(0);
+uninit[1].write(1);
+uninit[2].write(2);
+
+// Mark the first 3 bytes of the buffer as being initialized.
+unsafe {
+    buf.set_len(3);
+}
+
+assert_eq!(&buf[..], &[0, 1, 2]);
+

Methods from Deref<Target = [u8]>§

source

pub fn as_str(&self) -> &str

🔬This is a nightly-only experimental API. (ascii_char)

Views this slice of ASCII characters as a UTF-8 str.

+
source

pub fn as_bytes(&self) -> &[u8]

🔬This is a nightly-only experimental API. (ascii_char)

Views this slice of ASCII characters as a slice of u8 bytes.

+
1.23.0 · source

pub fn is_ascii(&self) -> bool

Checks if all bytes in this slice are within the ASCII range.

+
source

pub fn as_ascii(&self) -> Option<&[AsciiChar]>

🔬This is a nightly-only experimental API. (ascii_char)

If this slice is_ascii, returns it as a slice of +ASCII characters, otherwise returns None.

+
source

pub unsafe fn as_ascii_unchecked(&self) -> &[AsciiChar]

🔬This is a nightly-only experimental API. (ascii_char)

Converts this slice of bytes into a slice of ASCII characters, +without checking whether they’re valid.

+
§Safety
+

Every byte in the slice must be in 0..=127, or else this is UB.

+
1.23.0 · source

pub fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool

Checks that two slices are an ASCII case-insensitive match.

+

Same as to_ascii_lowercase(a) == to_ascii_lowercase(b), +but without allocating and copying temporaries.

+
1.23.0 · source

pub fn make_ascii_uppercase(&mut self)

Converts this slice to its ASCII upper case equivalent in-place.

+

ASCII letters ‘a’ to ‘z’ are mapped to ‘A’ to ‘Z’, +but non-ASCII letters are unchanged.

+

To return a new uppercased value without modifying the existing one, use +to_ascii_uppercase.

+
1.23.0 · source

pub fn make_ascii_lowercase(&mut self)

Converts this slice to its ASCII lower case equivalent in-place.

+

ASCII letters ‘A’ to ‘Z’ are mapped to ‘a’ to ‘z’, +but non-ASCII letters are unchanged.

+

To return a new lowercased value without modifying the existing one, use +to_ascii_lowercase.

+
1.60.0 · source

pub fn escape_ascii(&self) -> EscapeAscii<'_>

Returns an iterator that produces an escaped version of this slice, +treating it as an ASCII string.

+
§Examples
+

+let s = b"0\t\r\n'\"\\\x9d";
+let escaped = s.escape_ascii().to_string();
+assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
+
1.80.0 · source

pub fn trim_ascii_start(&self) -> &[u8]

Returns a byte slice with leading ASCII whitespace bytes removed.

+

‘Whitespace’ refers to the definition used by +u8::is_ascii_whitespace.

+
§Examples
+
assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
+assert_eq!(b"  ".trim_ascii_start(), b"");
+assert_eq!(b"".trim_ascii_start(), b"");
+
1.80.0 · source

pub fn trim_ascii_end(&self) -> &[u8]

Returns a byte slice with trailing ASCII whitespace bytes removed.

+

‘Whitespace’ refers to the definition used by +u8::is_ascii_whitespace.

+
§Examples
+
assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
+assert_eq!(b"  ".trim_ascii_end(), b"");
+assert_eq!(b"".trim_ascii_end(), b"");
+
1.80.0 · source

pub fn trim_ascii(&self) -> &[u8]

Returns a byte slice with leading and trailing ASCII whitespace bytes +removed.

+

‘Whitespace’ refers to the definition used by +u8::is_ascii_whitespace.

+
§Examples
+
assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
+assert_eq!(b"  ".trim_ascii(), b"");
+assert_eq!(b"".trim_ascii(), b"");
+
1.0.0 · source

pub fn len(&self) -> usize

Returns the number of elements in the slice.

+
§Examples
+
let a = [1, 2, 3];
+assert_eq!(a.len(), 3);
+
1.0.0 · source

pub fn is_empty(&self) -> bool

Returns true if the slice has a length of 0.

+
§Examples
+
let a = [1, 2, 3];
+assert!(!a.is_empty());
+
+let b: &[i32] = &[];
+assert!(b.is_empty());
+
1.0.0 · source

pub fn first(&self) -> Option<&T>

Returns the first element of the slice, or None if it is empty.

+
§Examples
+
let v = [10, 40, 30];
+assert_eq!(Some(&10), v.first());
+
+let w: &[i32] = &[];
+assert_eq!(None, w.first());
+
1.0.0 · source

pub fn first_mut(&mut self) -> Option<&mut T>

Returns a mutable reference to the first element of the slice, or None if it is empty.

+
§Examples
+
let x = &mut [0, 1, 2];
+
+if let Some(first) = x.first_mut() {
+    *first = 5;
+}
+assert_eq!(x, &[5, 1, 2]);
+
+let y: &mut [i32] = &mut [];
+assert_eq!(None, y.first_mut());
+
1.5.0 · source

pub fn split_first(&self) -> Option<(&T, &[T])>

Returns the first and all the rest of the elements of the slice, or None if it is empty.

+
§Examples
+
let x = &[0, 1, 2];
+
+if let Some((first, elements)) = x.split_first() {
+    assert_eq!(first, &0);
+    assert_eq!(elements, &[1, 2]);
+}
+
1.5.0 · source

pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])>

Returns the first and all the rest of the elements of the slice, or None if it is empty.

+
§Examples
+
let x = &mut [0, 1, 2];
+
+if let Some((first, elements)) = x.split_first_mut() {
+    *first = 3;
+    elements[0] = 4;
+    elements[1] = 5;
+}
+assert_eq!(x, &[3, 4, 5]);
+
1.5.0 · source

pub fn split_last(&self) -> Option<(&T, &[T])>

Returns the last and all the rest of the elements of the slice, or None if it is empty.

+
§Examples
+
let x = &[0, 1, 2];
+
+if let Some((last, elements)) = x.split_last() {
+    assert_eq!(last, &2);
+    assert_eq!(elements, &[0, 1]);
+}
+
1.5.0 · source

pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])>

Returns the last and all the rest of the elements of the slice, or None if it is empty.

+
§Examples
+
let x = &mut [0, 1, 2];
+
+if let Some((last, elements)) = x.split_last_mut() {
+    *last = 3;
+    elements[0] = 4;
+    elements[1] = 5;
+}
+assert_eq!(x, &[4, 5, 3]);
+
1.0.0 · source

pub fn last(&self) -> Option<&T>

Returns the last element of the slice, or None if it is empty.

+
§Examples
+
let v = [10, 40, 30];
+assert_eq!(Some(&30), v.last());
+
+let w: &[i32] = &[];
+assert_eq!(None, w.last());
+
1.0.0 · source

pub fn last_mut(&mut self) -> Option<&mut T>

Returns a mutable reference to the last item in the slice, or None if it is empty.

+
§Examples
+
let x = &mut [0, 1, 2];
+
+if let Some(last) = x.last_mut() {
+    *last = 10;
+}
+assert_eq!(x, &[0, 1, 10]);
+
+let y: &mut [i32] = &mut [];
+assert_eq!(None, y.last_mut());
+
1.77.0 · source

pub fn first_chunk<const N: usize>(&self) -> Option<&[T; N]>

Returns an array reference to the first N items in the slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let u = [10, 40, 30];
+assert_eq!(Some(&[10, 40]), u.first_chunk::<2>());
+
+let v: &[i32] = &[10];
+assert_eq!(None, v.first_chunk::<2>());
+
+let w: &[i32] = &[];
+assert_eq!(Some(&[]), w.first_chunk::<0>());
+
1.77.0 · source

pub fn first_chunk_mut<const N: usize>(&mut self) -> Option<&mut [T; N]>

Returns a mutable array reference to the first N items in the slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let x = &mut [0, 1, 2];
+
+if let Some(first) = x.first_chunk_mut::<2>() {
+    first[0] = 5;
+    first[1] = 4;
+}
+assert_eq!(x, &[5, 4, 2]);
+
+assert_eq!(None, x.first_chunk_mut::<4>());
+
1.77.0 · source

pub fn split_first_chunk<const N: usize>(&self) -> Option<(&[T; N], &[T])>

Returns an array reference to the first N items in the slice and the remaining slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let x = &[0, 1, 2];
+
+if let Some((first, elements)) = x.split_first_chunk::<2>() {
+    assert_eq!(first, &[0, 1]);
+    assert_eq!(elements, &[2]);
+}
+
+assert_eq!(None, x.split_first_chunk::<4>());
+
1.77.0 · source

pub fn split_first_chunk_mut<const N: usize>( + &mut self, +) -> Option<(&mut [T; N], &mut [T])>

Returns a mutable array reference to the first N items in the slice and the remaining +slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let x = &mut [0, 1, 2];
+
+if let Some((first, elements)) = x.split_first_chunk_mut::<2>() {
+    first[0] = 3;
+    first[1] = 4;
+    elements[0] = 5;
+}
+assert_eq!(x, &[3, 4, 5]);
+
+assert_eq!(None, x.split_first_chunk_mut::<4>());
+
1.77.0 · source

pub fn split_last_chunk<const N: usize>(&self) -> Option<(&[T], &[T; N])>

Returns an array reference to the last N items in the slice and the remaining slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let x = &[0, 1, 2];
+
+if let Some((elements, last)) = x.split_last_chunk::<2>() {
+    assert_eq!(elements, &[0]);
+    assert_eq!(last, &[1, 2]);
+}
+
+assert_eq!(None, x.split_last_chunk::<4>());
+
1.77.0 · source

pub fn split_last_chunk_mut<const N: usize>( + &mut self, +) -> Option<(&mut [T], &mut [T; N])>

Returns a mutable array reference to the last N items in the slice and the remaining +slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let x = &mut [0, 1, 2];
+
+if let Some((elements, last)) = x.split_last_chunk_mut::<2>() {
+    last[0] = 3;
+    last[1] = 4;
+    elements[0] = 5;
+}
+assert_eq!(x, &[5, 3, 4]);
+
+assert_eq!(None, x.split_last_chunk_mut::<4>());
+
1.77.0 · source

pub fn last_chunk<const N: usize>(&self) -> Option<&[T; N]>

Returns an array reference to the last N items in the slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let u = [10, 40, 30];
+assert_eq!(Some(&[40, 30]), u.last_chunk::<2>());
+
+let v: &[i32] = &[10];
+assert_eq!(None, v.last_chunk::<2>());
+
+let w: &[i32] = &[];
+assert_eq!(Some(&[]), w.last_chunk::<0>());
+
1.77.0 · source

pub fn last_chunk_mut<const N: usize>(&mut self) -> Option<&mut [T; N]>

Returns a mutable array reference to the last N items in the slice.

+

If the slice is not at least N in length, this will return None.

+
§Examples
+
let x = &mut [0, 1, 2];
+
+if let Some(last) = x.last_chunk_mut::<2>() {
+    last[0] = 10;
+    last[1] = 20;
+}
+assert_eq!(x, &[0, 10, 20]);
+
+assert_eq!(None, x.last_chunk_mut::<4>());
+
1.0.0 · source

pub fn get<I>(&self, index: I) -> Option<&<I as SliceIndex<[T]>>::Output>
where + I: SliceIndex<[T]>,

Returns a reference to an element or subslice depending on the type of +index.

+
    +
  • If given a position, returns a reference to the element at that +position or None if out of bounds.
  • +
  • If given a range, returns the subslice corresponding to that range, +or None if out of bounds.
  • +
+
§Examples
+
let v = [10, 40, 30];
+assert_eq!(Some(&40), v.get(1));
+assert_eq!(Some(&[10, 40][..]), v.get(0..2));
+assert_eq!(None, v.get(3));
+assert_eq!(None, v.get(0..4));
+
1.0.0 · source

pub fn get_mut<I>( + &mut self, + index: I, +) -> Option<&mut <I as SliceIndex<[T]>>::Output>
where + I: SliceIndex<[T]>,

Returns a mutable reference to an element or subslice depending on the +type of index (see get) or None if the index is out of bounds.

+
§Examples
+
let x = &mut [0, 1, 2];
+
+if let Some(elem) = x.get_mut(1) {
+    *elem = 42;
+}
+assert_eq!(x, &[0, 42, 2]);
+
1.0.0 · source

pub unsafe fn get_unchecked<I>( + &self, + index: I, +) -> &<I as SliceIndex<[T]>>::Output
where + I: SliceIndex<[T]>,

Returns a reference to an element or subslice, without doing bounds +checking.

+

For a safe alternative see get.

+
§Safety
+

Calling this method with an out-of-bounds index is undefined behavior +even if the resulting reference is not used.

+

You can think of this like .get(index).unwrap_unchecked(). It’s UB +to call .get_unchecked(len), even if you immediately convert to a +pointer. And it’s UB to call .get_unchecked(..len + 1), +.get_unchecked(..=len), or similar.

+
§Examples
+
let x = &[1, 2, 4];
+
+unsafe {
+    assert_eq!(x.get_unchecked(1), &2);
+}
+
1.0.0 · source

pub unsafe fn get_unchecked_mut<I>( + &mut self, + index: I, +) -> &mut <I as SliceIndex<[T]>>::Output
where + I: SliceIndex<[T]>,

Returns a mutable reference to an element or subslice, without doing +bounds checking.

+

For a safe alternative see get_mut.

+
§Safety
+

Calling this method with an out-of-bounds index is undefined behavior +even if the resulting reference is not used.

+

You can think of this like .get_mut(index).unwrap_unchecked(). It’s +UB to call .get_unchecked_mut(len), even if you immediately convert +to a pointer. And it’s UB to call .get_unchecked_mut(..len + 1), +.get_unchecked_mut(..=len), or similar.

+
§Examples
+
let x = &mut [1, 2, 4];
+
+unsafe {
+    let elem = x.get_unchecked_mut(1);
+    *elem = 13;
+}
+assert_eq!(x, &[1, 13, 4]);
+
1.0.0 · source

pub fn as_ptr(&self) -> *const T

Returns a raw pointer to the slice’s buffer.

+

The caller must ensure that the slice outlives the pointer this +function returns, or else it will end up dangling.

+

The caller must also ensure that the memory the pointer (non-transitively) points to +is never written to (except inside an UnsafeCell) using this pointer or any pointer +derived from it. If you need to mutate the contents of the slice, use as_mut_ptr.

+

Modifying the container referenced by this slice may cause its buffer +to be reallocated, which would also make any pointers to it invalid.

+
§Examples
+
let x = &[1, 2, 4];
+let x_ptr = x.as_ptr();
+
+unsafe {
+    for i in 0..x.len() {
+        assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
+    }
+}
+
1.0.0 · source

pub fn as_mut_ptr(&mut self) -> *mut T

Returns an unsafe mutable pointer to the slice’s buffer.

+

The caller must ensure that the slice outlives the pointer this +function returns, or else it will end up dangling.

+

Modifying the container referenced by this slice may cause its buffer +to be reallocated, which would also make any pointers to it invalid.

+
§Examples
+
let x = &mut [1, 2, 4];
+let x_ptr = x.as_mut_ptr();
+
+unsafe {
+    for i in 0..x.len() {
+        *x_ptr.add(i) += 2;
+    }
+}
+assert_eq!(x, &[3, 4, 6]);
+
1.48.0 · source

pub fn as_ptr_range(&self) -> Range<*const T>

Returns the two raw pointers spanning the slice.

+

The returned range is half-open, which means that the end pointer +points one past the last element of the slice. This way, an empty +slice is represented by two equal pointers, and the difference between +the two pointers represents the size of the slice.

+

See as_ptr for warnings on using these pointers. The end pointer +requires extra caution, as it does not point to a valid element in the +slice.

+

This function is useful for interacting with foreign interfaces which +use two pointers to refer to a range of elements in memory, as is +common in C++.

+

It can also be useful to check if a pointer to an element refers to an +element of this slice:

+ +
let a = [1, 2, 3];
+let x = &a[1] as *const _;
+let y = &5 as *const _;
+
+assert!(a.as_ptr_range().contains(&x));
+assert!(!a.as_ptr_range().contains(&y));
+
1.48.0 · source

pub fn as_mut_ptr_range(&mut self) -> Range<*mut T>

Returns the two unsafe mutable pointers spanning the slice.

+

The returned range is half-open, which means that the end pointer +points one past the last element of the slice. This way, an empty +slice is represented by two equal pointers, and the difference between +the two pointers represents the size of the slice.

+

See as_mut_ptr for warnings on using these pointers. The end +pointer requires extra caution, as it does not point to a valid element +in the slice.

+

This function is useful for interacting with foreign interfaces which +use two pointers to refer to a range of elements in memory, as is +common in C++.

+
1.0.0 · source

pub fn swap(&mut self, a: usize, b: usize)

Swaps two elements in the slice.

+

If a equals to b, it’s guaranteed that elements won’t change value.

+
§Arguments
+
    +
  • a - The index of the first element
  • +
  • b - The index of the second element
  • +
+
§Panics
+

Panics if a or b are out of bounds.

+
§Examples
+
let mut v = ["a", "b", "c", "d", "e"];
+v.swap(2, 4);
+assert!(v == ["a", "b", "e", "d", "c"]);
+
source

pub unsafe fn swap_unchecked(&mut self, a: usize, b: usize)

🔬This is a nightly-only experimental API. (slice_swap_unchecked)

Swaps two elements in the slice, without doing bounds checking.

+

For a safe alternative see swap.

+
§Arguments
+
    +
  • a - The index of the first element
  • +
  • b - The index of the second element
  • +
+
§Safety
+

Calling this method with an out-of-bounds index is undefined behavior. +The caller has to ensure that a < self.len() and b < self.len().

+
§Examples
+
#![feature(slice_swap_unchecked)]
+
+let mut v = ["a", "b", "c", "d"];
+// SAFETY: we know that 1 and 3 are both indices of the slice
+unsafe { v.swap_unchecked(1, 3) };
+assert!(v == ["a", "d", "c", "b"]);
+
1.0.0 · source

pub fn reverse(&mut self)

Reverses the order of elements in the slice, in place.

+
§Examples
+
let mut v = [1, 2, 3];
+v.reverse();
+assert!(v == [3, 2, 1]);
+
1.0.0 · source

pub fn iter(&self) -> Iter<'_, T>

Returns an iterator over the slice.

+

The iterator yields all items from start to end.

+
§Examples
+
let x = &[1, 2, 4];
+let mut iterator = x.iter();
+
+assert_eq!(iterator.next(), Some(&1));
+assert_eq!(iterator.next(), Some(&2));
+assert_eq!(iterator.next(), Some(&4));
+assert_eq!(iterator.next(), None);
+
1.0.0 · source

pub fn iter_mut(&mut self) -> IterMut<'_, T>

Returns an iterator that allows modifying each value.

+

The iterator yields all items from start to end.

+
§Examples
+
let x = &mut [1, 2, 4];
+for elem in x.iter_mut() {
+    *elem += 2;
+}
+assert_eq!(x, &[3, 4, 6]);
+
1.0.0 · source

pub fn windows(&self, size: usize) -> Windows<'_, T>

Returns an iterator over all contiguous windows of length +size. The windows overlap. If the slice is shorter than +size, the iterator returns no values.

+
§Panics
+

Panics if size is 0.

+
§Examples
+
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.windows(3);
+assert_eq!(iter.next().unwrap(), &['l', 'o', 'r']);
+assert_eq!(iter.next().unwrap(), &['o', 'r', 'e']);
+assert_eq!(iter.next().unwrap(), &['r', 'e', 'm']);
+assert!(iter.next().is_none());
+

If the slice is shorter than size:

+ +
let slice = ['f', 'o', 'o'];
+let mut iter = slice.windows(4);
+assert!(iter.next().is_none());
+

There’s no windows_mut, as that existing would let safe code violate the +“only one &mut at a time to the same thing” rule. However, you can sometimes +use Cell::as_slice_of_cells in +conjunction with windows to accomplish something similar:

+ +
use std::cell::Cell;
+
+let mut array = ['R', 'u', 's', 't', ' ', '2', '0', '1', '5'];
+let slice = &mut array[..];
+let slice_of_cells: &[Cell<char>] = Cell::from_mut(slice).as_slice_of_cells();
+for w in slice_of_cells.windows(3) {
+    Cell::swap(&w[0], &w[2]);
+}
+assert_eq!(array, ['s', 't', ' ', '2', '0', '1', '5', 'u', 'R']);
+
1.0.0 · source

pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the +beginning of the slice.

+

The chunks are slices and do not overlap. If chunk_size does not divide the length of the +slice, then the last chunk will not have length chunk_size.

+

See chunks_exact for a variant of this iterator that returns chunks of always exactly +chunk_size elements, and rchunks for the same iterator but starting at the end of the +slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.chunks(2);
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert_eq!(iter.next().unwrap(), &['m']);
+assert!(iter.next().is_none());
+
1.0.0 · source

pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the +beginning of the slice.

+

The chunks are mutable slices, and do not overlap. If chunk_size does not divide the +length of the slice, then the last chunk will not have length chunk_size.

+

See chunks_exact_mut for a variant of this iterator that returns chunks of always +exactly chunk_size elements, and rchunks_mut for the same iterator but starting at +the end of the slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+for chunk in v.chunks_mut(2) {
+    for elem in chunk.iter_mut() {
+        *elem += count;
+    }
+    count += 1;
+}
+assert_eq!(v, &[1, 1, 2, 2, 3]);
+
1.31.0 · source

pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the +beginning of the slice.

+

The chunks are slices and do not overlap. If chunk_size does not divide the length of the +slice, then the last up to chunk_size-1 elements will be omitted and can be retrieved +from the remainder function of the iterator.

+

Due to each chunk having exactly chunk_size elements, the compiler can often optimize the +resulting code better than in the case of chunks.

+

See chunks for a variant of this iterator that also returns the remainder as a smaller +chunk, and rchunks_exact for the same iterator but starting at the end of the slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.chunks_exact(2);
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['m']);
+
1.31.0 · source

pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the +beginning of the slice.

+

The chunks are mutable slices, and do not overlap. If chunk_size does not divide the +length of the slice, then the last up to chunk_size-1 elements will be omitted and can be +retrieved from the into_remainder function of the iterator.

+

Due to each chunk having exactly chunk_size elements, the compiler can often optimize the +resulting code better than in the case of chunks_mut.

+

See chunks_mut for a variant of this iterator that also returns the remainder as a +smaller chunk, and rchunks_exact_mut for the same iterator but starting at the end of +the slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+for chunk in v.chunks_exact_mut(2) {
+    for elem in chunk.iter_mut() {
+        *elem += count;
+    }
+    count += 1;
+}
+assert_eq!(v, &[1, 1, 2, 2, 0]);
+
source

pub unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]]

🔬This is a nightly-only experimental API. (slice_as_chunks)

Splits the slice into a slice of N-element arrays, +assuming that there’s no remainder.

+
§Safety
+

This may only be called when

+
    +
  • The slice splits exactly into N-element chunks (aka self.len() % N == 0).
  • +
  • N != 0.
  • +
+
§Examples
+
#![feature(slice_as_chunks)]
+let slice: &[char] = &['l', 'o', 'r', 'e', 'm', '!'];
+let chunks: &[[char; 1]] =
+    // SAFETY: 1-element chunks never have remainder
+    unsafe { slice.as_chunks_unchecked() };
+assert_eq!(chunks, &[['l'], ['o'], ['r'], ['e'], ['m'], ['!']]);
+let chunks: &[[char; 3]] =
+    // SAFETY: The slice length (6) is a multiple of 3
+    unsafe { slice.as_chunks_unchecked() };
+assert_eq!(chunks, &[['l', 'o', 'r'], ['e', 'm', '!']]);
+
+// These would be unsound:
+// let chunks: &[[_; 5]] = slice.as_chunks_unchecked() // The slice length is not a multiple of 5
+// let chunks: &[[_; 0]] = slice.as_chunks_unchecked() // Zero-length chunks are never allowed
+
source

pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T])

🔬This is a nightly-only experimental API. (slice_as_chunks)

Splits the slice into a slice of N-element arrays, +starting at the beginning of the slice, +and a remainder slice with length strictly less than N.

+
§Panics
+

Panics if N is 0. This check will most probably get changed to a compile time +error before this method gets stabilized.

+
§Examples
+
#![feature(slice_as_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let (chunks, remainder) = slice.as_chunks();
+assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
+assert_eq!(remainder, &['m']);
+

If you expect the slice to be an exact multiple, you can combine +let-else with an empty slice pattern:

+ +
#![feature(slice_as_chunks)]
+let slice = ['R', 'u', 's', 't'];
+let (chunks, []) = slice.as_chunks::<2>() else {
+    panic!("slice didn't have even length")
+};
+assert_eq!(chunks, &[['R', 'u'], ['s', 't']]);
+
source

pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]])

🔬This is a nightly-only experimental API. (slice_as_chunks)

Splits the slice into a slice of N-element arrays, +starting at the end of the slice, +and a remainder slice with length strictly less than N.

+
§Panics
+

Panics if N is 0. This check will most probably get changed to a compile time +error before this method gets stabilized.

+
§Examples
+
#![feature(slice_as_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let (remainder, chunks) = slice.as_rchunks();
+assert_eq!(remainder, &['l']);
+assert_eq!(chunks, &[['o', 'r'], ['e', 'm']]);
+
source

pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N>

🔬This is a nightly-only experimental API. (array_chunks)

Returns an iterator over N elements of the slice at a time, starting at the +beginning of the slice.

+

The chunks are array references and do not overlap. If N does not divide the +length of the slice, then the last up to N-1 elements will be omitted and can be +retrieved from the remainder function of the iterator.

+

This method is the const generic equivalent of chunks_exact.

+
§Panics
+

Panics if N is 0. This check will most probably get changed to a compile time +error before this method gets stabilized.

+
§Examples
+
#![feature(array_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.array_chunks();
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['m']);
+
source

pub unsafe fn as_chunks_unchecked_mut<const N: usize>( + &mut self, +) -> &mut [[T; N]]

🔬This is a nightly-only experimental API. (slice_as_chunks)

Splits the slice into a slice of N-element arrays, +assuming that there’s no remainder.

+
§Safety
+

This may only be called when

+
    +
  • The slice splits exactly into N-element chunks (aka self.len() % N == 0).
  • +
  • N != 0.
  • +
+
§Examples
+
#![feature(slice_as_chunks)]
+let slice: &mut [char] = &mut ['l', 'o', 'r', 'e', 'm', '!'];
+let chunks: &mut [[char; 1]] =
+    // SAFETY: 1-element chunks never have remainder
+    unsafe { slice.as_chunks_unchecked_mut() };
+chunks[0] = ['L'];
+assert_eq!(chunks, &[['L'], ['o'], ['r'], ['e'], ['m'], ['!']]);
+let chunks: &mut [[char; 3]] =
+    // SAFETY: The slice length (6) is a multiple of 3
+    unsafe { slice.as_chunks_unchecked_mut() };
+chunks[1] = ['a', 'x', '?'];
+assert_eq!(slice, &['L', 'o', 'r', 'a', 'x', '?']);
+
+// These would be unsound:
+// let chunks: &[[_; 5]] = slice.as_chunks_unchecked_mut() // The slice length is not a multiple of 5
+// let chunks: &[[_; 0]] = slice.as_chunks_unchecked_mut() // Zero-length chunks are never allowed
+
source

pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T])

🔬This is a nightly-only experimental API. (slice_as_chunks)

Splits the slice into a slice of N-element arrays, +starting at the beginning of the slice, +and a remainder slice with length strictly less than N.

+
§Panics
+

Panics if N is 0. This check will most probably get changed to a compile time +error before this method gets stabilized.

+
§Examples
+
#![feature(slice_as_chunks)]
+let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+let (chunks, remainder) = v.as_chunks_mut();
+remainder[0] = 9;
+for chunk in chunks {
+    *chunk = [count; 2];
+    count += 1;
+}
+assert_eq!(v, &[1, 1, 2, 2, 9]);
+
source

pub fn as_rchunks_mut<const N: usize>(&mut self) -> (&mut [T], &mut [[T; N]])

🔬This is a nightly-only experimental API. (slice_as_chunks)

Splits the slice into a slice of N-element arrays, +starting at the end of the slice, +and a remainder slice with length strictly less than N.

+
§Panics
+

Panics if N is 0. This check will most probably get changed to a compile time +error before this method gets stabilized.

+
§Examples
+
#![feature(slice_as_chunks)]
+let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+let (remainder, chunks) = v.as_rchunks_mut();
+remainder[0] = 9;
+for chunk in chunks {
+    *chunk = [count; 2];
+    count += 1;
+}
+assert_eq!(v, &[9, 1, 1, 2, 2]);
+
source

pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N>

🔬This is a nightly-only experimental API. (array_chunks)

Returns an iterator over N elements of the slice at a time, starting at the +beginning of the slice.

+

The chunks are mutable array references and do not overlap. If N does not divide +the length of the slice, then the last up to N-1 elements will be omitted and +can be retrieved from the into_remainder function of the iterator.

+

This method is the const generic equivalent of chunks_exact_mut.

+
§Panics
+

Panics if N is 0. This check will most probably get changed to a compile time +error before this method gets stabilized.

+
§Examples
+
#![feature(array_chunks)]
+let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+for chunk in v.array_chunks_mut() {
+    *chunk = [count; 2];
+    count += 1;
+}
+assert_eq!(v, &[1, 1, 2, 2, 0]);
+
source

pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N>

🔬This is a nightly-only experimental API. (array_windows)

Returns an iterator over overlapping windows of N elements of a slice, +starting at the beginning of the slice.

+

This is the const generic equivalent of windows.

+

If N is greater than the size of the slice, it will return no windows.

+
§Panics
+

Panics if N is 0. This check will most probably get changed to a compile time +error before this method gets stabilized.

+
§Examples
+
#![feature(array_windows)]
+let slice = [0, 1, 2, 3];
+let mut iter = slice.array_windows();
+assert_eq!(iter.next().unwrap(), &[0, 1]);
+assert_eq!(iter.next().unwrap(), &[1, 2]);
+assert_eq!(iter.next().unwrap(), &[2, 3]);
+assert!(iter.next().is_none());
+
1.31.0 · source

pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the end +of the slice.

+

The chunks are slices and do not overlap. If chunk_size does not divide the length of the +slice, then the last chunk will not have length chunk_size.

+

See rchunks_exact for a variant of this iterator that returns chunks of always exactly +chunk_size elements, and chunks for the same iterator but starting at the beginning +of the slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.rchunks(2);
+assert_eq!(iter.next().unwrap(), &['e', 'm']);
+assert_eq!(iter.next().unwrap(), &['o', 'r']);
+assert_eq!(iter.next().unwrap(), &['l']);
+assert!(iter.next().is_none());
+
1.31.0 · source

pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the end +of the slice.

+

The chunks are mutable slices, and do not overlap. If chunk_size does not divide the +length of the slice, then the last chunk will not have length chunk_size.

+

See rchunks_exact_mut for a variant of this iterator that returns chunks of always +exactly chunk_size elements, and chunks_mut for the same iterator but starting at the +beginning of the slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+for chunk in v.rchunks_mut(2) {
+    for elem in chunk.iter_mut() {
+        *elem += count;
+    }
+    count += 1;
+}
+assert_eq!(v, &[3, 2, 2, 1, 1]);
+
1.31.0 · source

pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the +end of the slice.

+

The chunks are slices and do not overlap. If chunk_size does not divide the length of the +slice, then the last up to chunk_size-1 elements will be omitted and can be retrieved +from the remainder function of the iterator.

+

Due to each chunk having exactly chunk_size elements, the compiler can often optimize the +resulting code better than in the case of rchunks.

+

See rchunks for a variant of this iterator that also returns the remainder as a smaller +chunk, and chunks_exact for the same iterator but starting at the beginning of the +slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.rchunks_exact(2);
+assert_eq!(iter.next().unwrap(), &['e', 'm']);
+assert_eq!(iter.next().unwrap(), &['o', 'r']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['l']);
+
1.31.0 · source

pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T>

Returns an iterator over chunk_size elements of the slice at a time, starting at the end +of the slice.

+

The chunks are mutable slices, and do not overlap. If chunk_size does not divide the +length of the slice, then the last up to chunk_size-1 elements will be omitted and can be +retrieved from the into_remainder function of the iterator.

+

Due to each chunk having exactly chunk_size elements, the compiler can often optimize the +resulting code better than in the case of chunks_mut.

+

See rchunks_mut for a variant of this iterator that also returns the remainder as a +smaller chunk, and chunks_exact_mut for the same iterator but starting at the beginning +of the slice.

+
§Panics
+

Panics if chunk_size is 0.

+
§Examples
+
let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+for chunk in v.rchunks_exact_mut(2) {
+    for elem in chunk.iter_mut() {
+        *elem += count;
+    }
+    count += 1;
+}
+assert_eq!(v, &[0, 2, 2, 1, 1]);
+
1.77.0 · source

pub fn chunk_by<F>(&self, pred: F) -> ChunkBy<'_, T, F>
where + F: FnMut(&T, &T) -> bool,

Returns an iterator over the slice producing non-overlapping runs +of elements using the predicate to separate them.

+

The predicate is called for every pair of consecutive elements, +meaning that it is called on slice[0] and slice[1], +followed by slice[1] and slice[2], and so on.

+
§Examples
+
let slice = &[1, 1, 1, 3, 3, 2, 2, 2];
+
+let mut iter = slice.chunk_by(|a, b| a == b);
+
+assert_eq!(iter.next(), Some(&[1, 1, 1][..]));
+assert_eq!(iter.next(), Some(&[3, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 2, 2][..]));
+assert_eq!(iter.next(), None);
+

This method can be used to extract the sorted subslices:

+ +
let slice = &[1, 1, 2, 3, 2, 3, 2, 3, 4];
+
+let mut iter = slice.chunk_by(|a, b| a <= b);
+
+assert_eq!(iter.next(), Some(&[1, 1, 2, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 3, 4][..]));
+assert_eq!(iter.next(), None);
+
1.77.0 · source

pub fn chunk_by_mut<F>(&mut self, pred: F) -> ChunkByMut<'_, T, F>
where + F: FnMut(&T, &T) -> bool,

Returns an iterator over the slice producing non-overlapping mutable +runs of elements using the predicate to separate them.

+

The predicate is called for every pair of consecutive elements, +meaning that it is called on slice[0] and slice[1], +followed by slice[1] and slice[2], and so on.

+
§Examples
+
let slice = &mut [1, 1, 1, 3, 3, 2, 2, 2];
+
+let mut iter = slice.chunk_by_mut(|a, b| a == b);
+
+assert_eq!(iter.next(), Some(&mut [1, 1, 1][..]));
+assert_eq!(iter.next(), Some(&mut [3, 3][..]));
+assert_eq!(iter.next(), Some(&mut [2, 2, 2][..]));
+assert_eq!(iter.next(), None);
+

This method can be used to extract the sorted subslices:

+ +
let slice = &mut [1, 1, 2, 3, 2, 3, 2, 3, 4];
+
+let mut iter = slice.chunk_by_mut(|a, b| a <= b);
+
+assert_eq!(iter.next(), Some(&mut [1, 1, 2, 3][..]));
+assert_eq!(iter.next(), Some(&mut [2, 3][..]));
+assert_eq!(iter.next(), Some(&mut [2, 3, 4][..]));
+assert_eq!(iter.next(), None);
+
1.0.0 · source

pub fn split_at(&self, mid: usize) -> (&[T], &[T])

Divides one slice into two at an index.

+

The first will contain all indices from [0, mid) (excluding +the index mid itself) and the second will contain all +indices from [mid, len) (excluding the index len itself).

+
§Panics
+

Panics if mid > len. For a non-panicking alternative see +split_at_checked.

+
§Examples
+
let v = [1, 2, 3, 4, 5, 6];
+
+{
+   let (left, right) = v.split_at(0);
+   assert_eq!(left, []);
+   assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+}
+
+{
+    let (left, right) = v.split_at(2);
+    assert_eq!(left, [1, 2]);
+    assert_eq!(right, [3, 4, 5, 6]);
+}
+
+{
+    let (left, right) = v.split_at(6);
+    assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+    assert_eq!(right, []);
+}
+
1.0.0 · source

pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T])

Divides one mutable slice into two at an index.

+

The first will contain all indices from [0, mid) (excluding +the index mid itself) and the second will contain all +indices from [mid, len) (excluding the index len itself).

+
§Panics
+

Panics if mid > len. For a non-panicking alternative see +split_at_mut_checked.

+
§Examples
+
let mut v = [1, 0, 3, 0, 5, 6];
+let (left, right) = v.split_at_mut(2);
+assert_eq!(left, [1, 0]);
+assert_eq!(right, [3, 0, 5, 6]);
+left[1] = 2;
+right[1] = 4;
+assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+
1.79.0 · source

pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T])

Divides one slice into two at an index, without doing bounds checking.

+

The first will contain all indices from [0, mid) (excluding +the index mid itself) and the second will contain all +indices from [mid, len) (excluding the index len itself).

+

For a safe alternative see split_at.

+
§Safety
+

Calling this method with an out-of-bounds index is undefined behavior +even if the resulting reference is not used. The caller has to ensure that +0 <= mid <= self.len().

+
§Examples
+
let v = [1, 2, 3, 4, 5, 6];
+
+unsafe {
+   let (left, right) = v.split_at_unchecked(0);
+   assert_eq!(left, []);
+   assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+}
+
+unsafe {
+    let (left, right) = v.split_at_unchecked(2);
+    assert_eq!(left, [1, 2]);
+    assert_eq!(right, [3, 4, 5, 6]);
+}
+
+unsafe {
+    let (left, right) = v.split_at_unchecked(6);
+    assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+    assert_eq!(right, []);
+}
+
1.79.0 · source

pub unsafe fn split_at_mut_unchecked( + &mut self, + mid: usize, +) -> (&mut [T], &mut [T])

Divides one mutable slice into two at an index, without doing bounds checking.

+

The first will contain all indices from [0, mid) (excluding +the index mid itself) and the second will contain all +indices from [mid, len) (excluding the index len itself).

+

For a safe alternative see split_at_mut.

+
§Safety
+

Calling this method with an out-of-bounds index is undefined behavior +even if the resulting reference is not used. The caller has to ensure that +0 <= mid <= self.len().

+
§Examples
+
let mut v = [1, 0, 3, 0, 5, 6];
+// scoped to restrict the lifetime of the borrows
+unsafe {
+    let (left, right) = v.split_at_mut_unchecked(2);
+    assert_eq!(left, [1, 0]);
+    assert_eq!(right, [3, 0, 5, 6]);
+    left[1] = 2;
+    right[1] = 4;
+}
+assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+
1.80.0 · source

pub fn split_at_checked(&self, mid: usize) -> Option<(&[T], &[T])>

Divides one slice into two at an index, returning None if the slice is +too short.

+

If mid ≤ len returns a pair of slices where the first will contain all +indices from [0, mid) (excluding the index mid itself) and the +second will contain all indices from [mid, len) (excluding the index +len itself).

+

Otherwise, if mid > len, returns None.

+
§Examples
+
let v = [1, -2, 3, -4, 5, -6];
+
+{
+   let (left, right) = v.split_at_checked(0).unwrap();
+   assert_eq!(left, []);
+   assert_eq!(right, [1, -2, 3, -4, 5, -6]);
+}
+
+{
+    let (left, right) = v.split_at_checked(2).unwrap();
+    assert_eq!(left, [1, -2]);
+    assert_eq!(right, [3, -4, 5, -6]);
+}
+
+{
+    let (left, right) = v.split_at_checked(6).unwrap();
+    assert_eq!(left, [1, -2, 3, -4, 5, -6]);
+    assert_eq!(right, []);
+}
+
+assert_eq!(None, v.split_at_checked(7));
+
1.80.0 · source

pub fn split_at_mut_checked( + &mut self, + mid: usize, +) -> Option<(&mut [T], &mut [T])>

Divides one mutable slice into two at an index, returning None if the +slice is too short.

+

If mid ≤ len returns a pair of slices where the first will contain all +indices from [0, mid) (excluding the index mid itself) and the +second will contain all indices from [mid, len) (excluding the index +len itself).

+

Otherwise, if mid > len, returns None.

+
§Examples
+
let mut v = [1, 0, 3, 0, 5, 6];
+
+if let Some((left, right)) = v.split_at_mut_checked(2) {
+    assert_eq!(left, [1, 0]);
+    assert_eq!(right, [3, 0, 5, 6]);
+    left[1] = 2;
+    right[1] = 4;
+}
+assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+
+assert_eq!(None, v.split_at_mut_checked(7));
+
1.0.0 · source

pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over subslices separated by elements that match +pred. The matched element is not contained in the subslices.

+
§Examples
+
let slice = [10, 40, 33, 20];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
+

If the first element is matched, an empty slice will be the first item +returned by the iterator. Similarly, if the last element in the slice +is matched, an empty slice will be the last item returned by the +iterator:

+ +
let slice = [10, 40, 33];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40]);
+assert_eq!(iter.next().unwrap(), &[]);
+assert!(iter.next().is_none());
+

If two matched elements are directly adjacent, an empty slice will be +present between them:

+ +
let slice = [10, 6, 33, 20];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10]);
+assert_eq!(iter.next().unwrap(), &[]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
+
1.0.0 · source

pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over mutable subslices separated by elements that +match pred. The matched element is not contained in the subslices.

+
§Examples
+
let mut v = [10, 40, 30, 20, 60, 50];
+
+for group in v.split_mut(|num| *num % 3 == 0) {
+    group[0] = 1;
+}
+assert_eq!(v, [1, 40, 30, 1, 60, 1]);
+
1.51.0 · source

pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over subslices separated by elements that match +pred. The matched element is contained in the end of the previous +subslice as a terminator.

+
§Examples
+
let slice = [10, 40, 33, 20];
+let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
+

If the last element of the slice is matched, +that element will be considered the terminator of the preceding slice. +That slice will be the last item returned by the iterator.

+ +
let slice = [3, 10, 40, 33];
+let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[3]);
+assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+assert!(iter.next().is_none());
+
1.51.0 · source

pub fn split_inclusive_mut<F>(&mut self, pred: F) -> SplitInclusiveMut<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over mutable subslices separated by elements that +match pred. The matched element is contained in the previous +subslice as a terminator.

+
§Examples
+
let mut v = [10, 40, 30, 20, 60, 50];
+
+for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
+    let terminator_idx = group.len()-1;
+    group[terminator_idx] = 1;
+}
+assert_eq!(v, [10, 40, 1, 20, 1, 1]);
+
1.27.0 · source

pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over subslices separated by elements that match +pred, starting at the end of the slice and working backwards. +The matched element is not contained in the subslices.

+
§Examples
+
let slice = [11, 22, 33, 0, 44, 55];
+let mut iter = slice.rsplit(|num| *num == 0);
+
+assert_eq!(iter.next().unwrap(), &[44, 55]);
+assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
+assert_eq!(iter.next(), None);
+

As with split(), if the first or last element is matched, an empty +slice will be the first (or last) item returned by the iterator.

+ +
let v = &[0, 1, 1, 2, 3, 5, 8];
+let mut it = v.rsplit(|n| *n % 2 == 0);
+assert_eq!(it.next().unwrap(), &[]);
+assert_eq!(it.next().unwrap(), &[3, 5]);
+assert_eq!(it.next().unwrap(), &[1, 1]);
+assert_eq!(it.next().unwrap(), &[]);
+assert_eq!(it.next(), None);
+
1.27.0 · source

pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over mutable subslices separated by elements that +match pred, starting at the end of the slice and working +backwards. The matched element is not contained in the subslices.

+
§Examples
+
let mut v = [100, 400, 300, 200, 600, 500];
+
+let mut count = 0;
+for group in v.rsplit_mut(|num| *num % 3 == 0) {
+    count += 1;
+    group[0] = count;
+}
+assert_eq!(v, [3, 400, 300, 2, 600, 1]);
+
1.0.0 · source

pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over subslices separated by elements that match +pred, limited to returning at most n items. The matched element is +not contained in the subslices.

+

The last element returned, if any, will contain the remainder of the +slice.

+
§Examples
+

Print the slice split once by numbers divisible by 3 (i.e., [10, 40], +[20, 60, 50]):

+ +
let v = [10, 40, 30, 20, 60, 50];
+
+for group in v.splitn(2, |num| *num % 3 == 0) {
+    println!("{group:?}");
+}
+
1.0.0 · source

pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over mutable subslices separated by elements that match +pred, limited to returning at most n items. The matched element is +not contained in the subslices.

+

The last element returned, if any, will contain the remainder of the +slice.

+
§Examples
+
let mut v = [10, 40, 30, 20, 60, 50];
+
+for group in v.splitn_mut(2, |num| *num % 3 == 0) {
+    group[0] = 1;
+}
+assert_eq!(v, [1, 40, 30, 1, 60, 50]);
+
1.0.0 · source

pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over subslices separated by elements that match +pred limited to returning at most n items. This starts at the end of +the slice and works backwards. The matched element is not contained in +the subslices.

+

The last element returned, if any, will contain the remainder of the +slice.

+
§Examples
+

Print the slice split once, starting from the end, by numbers divisible +by 3 (i.e., [50], [10, 40, 30, 20]):

+ +
let v = [10, 40, 30, 20, 60, 50];
+
+for group in v.rsplitn(2, |num| *num % 3 == 0) {
+    println!("{group:?}");
+}
+
1.0.0 · source

pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<'_, T, F>
where + F: FnMut(&T) -> bool,

Returns an iterator over subslices separated by elements that match +pred limited to returning at most n items. This starts at the end of +the slice and works backwards. The matched element is not contained in +the subslices.

+

The last element returned, if any, will contain the remainder of the +slice.

+
§Examples
+
let mut s = [10, 40, 30, 20, 60, 50];
+
+for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
+    group[0] = 1;
+}
+assert_eq!(s, [1, 40, 30, 20, 60, 1]);
+
source

pub fn split_once<F>(&self, pred: F) -> Option<(&[T], &[T])>
where + F: FnMut(&T) -> bool,

🔬This is a nightly-only experimental API. (slice_split_once)

Splits the slice on the first element that matches the specified +predicate.

+

If any matching elements are present in the slice, returns the prefix +before the match and suffix after. The matching element itself is not +included. If no elements match, returns None.

+
§Examples
+
#![feature(slice_split_once)]
+let s = [1, 2, 3, 2, 4];
+assert_eq!(s.split_once(|&x| x == 2), Some((
+    &[1][..],
+    &[3, 2, 4][..]
+)));
+assert_eq!(s.split_once(|&x| x == 0), None);
+
source

pub fn rsplit_once<F>(&self, pred: F) -> Option<(&[T], &[T])>
where + F: FnMut(&T) -> bool,

🔬This is a nightly-only experimental API. (slice_split_once)

Splits the slice on the last element that matches the specified +predicate.

+

If any matching elements are present in the slice, returns the prefix +before the match and suffix after. The matching element itself is not +included. If no elements match, returns None.

+
§Examples
+
#![feature(slice_split_once)]
+let s = [1, 2, 3, 2, 4];
+assert_eq!(s.rsplit_once(|&x| x == 2), Some((
+    &[1, 2, 3][..],
+    &[4][..]
+)));
+assert_eq!(s.rsplit_once(|&x| x == 0), None);
+
1.0.0 · source

pub fn contains(&self, x: &T) -> bool
where + T: PartialEq,

Returns true if the slice contains an element with the given value.

+

This operation is O(n).

+

Note that if you have a sorted slice, binary_search may be faster.

+
§Examples
+
let v = [10, 40, 30];
+assert!(v.contains(&30));
+assert!(!v.contains(&50));
+

If you do not have a &T, but some other value that you can compare +with one (for example, String implements PartialEq<str>), you can +use iter().any:

+ +
let v = [String::from("hello"), String::from("world")]; // slice of `String`
+assert!(v.iter().any(|e| e == "hello")); // search with `&str`
+assert!(!v.iter().any(|e| e == "hi"));
+
1.0.0 · source

pub fn starts_with(&self, needle: &[T]) -> bool
where + T: PartialEq,

Returns true if needle is a prefix of the slice or equal to the slice.

+
§Examples
+
let v = [10, 40, 30];
+assert!(v.starts_with(&[10]));
+assert!(v.starts_with(&[10, 40]));
+assert!(v.starts_with(&v));
+assert!(!v.starts_with(&[50]));
+assert!(!v.starts_with(&[10, 50]));
+

Always returns true if needle is an empty slice:

+ +
let v = &[10, 40, 30];
+assert!(v.starts_with(&[]));
+let v: &[u8] = &[];
+assert!(v.starts_with(&[]));
+
1.0.0 · source

pub fn ends_with(&self, needle: &[T]) -> bool
where + T: PartialEq,

Returns true if needle is a suffix of the slice or equal to the slice.

+
§Examples
+
let v = [10, 40, 30];
+assert!(v.ends_with(&[30]));
+assert!(v.ends_with(&[40, 30]));
+assert!(v.ends_with(&v));
+assert!(!v.ends_with(&[50]));
+assert!(!v.ends_with(&[50, 30]));
+

Always returns true if needle is an empty slice:

+ +
let v = &[10, 40, 30];
+assert!(v.ends_with(&[]));
+let v: &[u8] = &[];
+assert!(v.ends_with(&[]));
+
1.51.0 · source

pub fn strip_prefix<P>(&self, prefix: &P) -> Option<&[T]>
where + P: SlicePattern<Item = T> + ?Sized, + T: PartialEq,

Returns a subslice with the prefix removed.

+

If the slice starts with prefix, returns the subslice after the prefix, wrapped in Some. +If prefix is empty, simply returns the original slice. If prefix is equal to the +original slice, returns an empty slice.

+

If the slice does not start with prefix, returns None.

+
§Examples
+
let v = &[10, 40, 30];
+assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
+assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
+assert_eq!(v.strip_prefix(&[10, 40, 30]), Some(&[][..]));
+assert_eq!(v.strip_prefix(&[50]), None);
+assert_eq!(v.strip_prefix(&[10, 50]), None);
+
+let prefix : &str = "he";
+assert_eq!(b"hello".strip_prefix(prefix.as_bytes()),
+           Some(b"llo".as_ref()));
+
1.51.0 · source

pub fn strip_suffix<P>(&self, suffix: &P) -> Option<&[T]>
where + P: SlicePattern<Item = T> + ?Sized, + T: PartialEq,

Returns a subslice with the suffix removed.

+

If the slice ends with suffix, returns the subslice before the suffix, wrapped in Some. +If suffix is empty, simply returns the original slice. If suffix is equal to the +original slice, returns an empty slice.

+

If the slice does not end with suffix, returns None.

+
§Examples
+
let v = &[10, 40, 30];
+assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
+assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
+assert_eq!(v.strip_suffix(&[10, 40, 30]), Some(&[][..]));
+assert_eq!(v.strip_suffix(&[50]), None);
+assert_eq!(v.strip_suffix(&[50, 30]), None);
+

Binary searches this slice for a given element. +If the slice is not sorted, the returned result is unspecified and +meaningless.

+

If the value is found then Result::Ok is returned, containing the +index of the matching element. If there are multiple matches, then any +one of the matches could be returned. The index is chosen +deterministically, but is subject to change in future versions of Rust. +If the value is not found then Result::Err is returned, containing +the index where a matching element could be inserted while maintaining +sorted order.

+

See also binary_search_by, binary_search_by_key, and partition_point.

+
§Examples
+

Looks up a series of four elements. The first is found, with a +uniquely determined position; the second and third are not +found; the fourth could match any position in [1, 4].

+ +
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+assert_eq!(s.binary_search(&13),  Ok(9));
+assert_eq!(s.binary_search(&4),   Err(7));
+assert_eq!(s.binary_search(&100), Err(13));
+let r = s.binary_search(&1);
+assert!(match r { Ok(1..=4) => true, _ => false, });
+

If you want to find that whole range of matching items, rather than +an arbitrary matching one, that can be done using partition_point:

+ +
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+let low = s.partition_point(|x| x < &1);
+assert_eq!(low, 1);
+let high = s.partition_point(|x| x <= &1);
+assert_eq!(high, 5);
+let r = s.binary_search(&1);
+assert!((low..high).contains(&r.unwrap()));
+
+assert!(s[..low].iter().all(|&x| x < 1));
+assert!(s[low..high].iter().all(|&x| x == 1));
+assert!(s[high..].iter().all(|&x| x > 1));
+
+// For something not found, the "range" of equal items is empty
+assert_eq!(s.partition_point(|x| x < &11), 9);
+assert_eq!(s.partition_point(|x| x <= &11), 9);
+assert_eq!(s.binary_search(&11), Err(9));
+

If you want to insert an item to a sorted vector, while maintaining +sort order, consider using partition_point:

+ +
let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+let num = 42;
+let idx = s.partition_point(|&x| x <= num);
+// If `num` is unique, `s.partition_point(|&x| x < num)` (with `<`) is equivalent to
+// `s.binary_search(&num).unwrap_or_else(|x| x)`, but using `<=` will allow `insert`
+// to shift less elements.
+s.insert(idx, num);
+assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
+
1.0.0 · source

pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result<usize, usize>
where + F: FnMut(&'a T) -> Ordering,

Binary searches this slice with a comparator function.

+

The comparator function should return an order code that indicates +whether its argument is Less, Equal or Greater the desired +target. +If the slice is not sorted or if the comparator function does not +implement an order consistent with the sort order of the underlying +slice, the returned result is unspecified and meaningless.

+

If the value is found then Result::Ok is returned, containing the +index of the matching element. If there are multiple matches, then any +one of the matches could be returned. The index is chosen +deterministically, but is subject to change in future versions of Rust. +If the value is not found then Result::Err is returned, containing +the index where a matching element could be inserted while maintaining +sorted order.

+

See also binary_search, binary_search_by_key, and partition_point.

+
§Examples
+

Looks up a series of four elements. The first is found, with a +uniquely determined position; the second and third are not +found; the fourth could match any position in [1, 4].

+ +
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+let seek = 13;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
+let seek = 4;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
+let seek = 100;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
+let seek = 1;
+let r = s.binary_search_by(|probe| probe.cmp(&seek));
+assert!(match r { Ok(1..=4) => true, _ => false, });
+
1.10.0 · source

pub fn binary_search_by_key<'a, B, F>( + &'a self, + b: &B, + f: F, +) -> Result<usize, usize>
where + F: FnMut(&'a T) -> B, + B: Ord,

Binary searches this slice with a key extraction function.

+

Assumes that the slice is sorted by the key, for instance with +sort_by_key using the same key extraction function. +If the slice is not sorted by the key, the returned result is +unspecified and meaningless.

+

If the value is found then Result::Ok is returned, containing the +index of the matching element. If there are multiple matches, then any +one of the matches could be returned. The index is chosen +deterministically, but is subject to change in future versions of Rust. +If the value is not found then Result::Err is returned, containing +the index where a matching element could be inserted while maintaining +sorted order.

+

See also binary_search, binary_search_by, and partition_point.

+
§Examples
+

Looks up a series of four elements in a slice of pairs sorted by +their second elements. The first is found, with a uniquely +determined position; the second and third are not found; the +fourth could match any position in [1, 4].

+ +
let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
+         (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
+         (1, 21), (2, 34), (4, 55)];
+
+assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b),  Ok(9));
+assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b),   Err(7));
+assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13));
+let r = s.binary_search_by_key(&1, |&(a, b)| b);
+assert!(match r { Ok(1..=4) => true, _ => false, });
+
1.20.0 · source

pub fn sort_unstable(&mut self)
where + T: Ord,

Sorts the slice without preserving the initial order of equal elements.

+

This sort is unstable (i.e., may reorder equal elements), in-place (i.e., does not +allocate), and O(n * log(n)) worst-case.

+

If the implementation of Ord for T does not implement a total order the resulting +order of elements in the slice is unspecified. All original elements will remain in the +slice and any possible modifications via interior mutability are observed in the input. Same +is true if the implementation of Ord for T panics.

+

Sorting types that only implement PartialOrd such as f32 and f64 require +additional precautions. For example, f32::NAN != f32::NAN, which doesn’t fulfill the +reflexivity requirement of Ord. By using an alternative comparison function with +slice::sort_unstable_by such as f32::total_cmp or f64::total_cmp that defines a +total order users can sort slices containing floating-point values. Alternatively, if all +values in the slice are guaranteed to be in a subset for which PartialOrd::partial_cmp +forms a total order, it’s possible to sort the slice with sort_unstable_by(|a, b| a.partial_cmp(b).unwrap()).

+
§Current implementation
+

The current implementation is based on ipnsort by Lukas Bergdoll and Orson Peters, which +combines the fast average case of quicksort with the fast worst case of heapsort, achieving +linear time on fully sorted and reversed inputs. On inputs with k distinct elements, the +expected time to sort the data is O(n * log(k)).

+

It is typically faster than stable sorting, except in a few special cases, e.g., when the +slice is partially sorted.

+
§Panics
+

May panic if the implementation of Ord for T does not implement a total order.

+
§Examples
+
let mut v = [4, -5, 1, -3, 2];
+
+v.sort_unstable();
+assert_eq!(v, [-5, -3, 1, 2, 4]);
+
1.20.0 · source

pub fn sort_unstable_by<F>(&mut self, compare: F)
where + F: FnMut(&T, &T) -> Ordering,

Sorts the slice with a comparison function, without preserving the initial order of +equal elements.

+

This sort is unstable (i.e., may reorder equal elements), in-place (i.e., does not +allocate), and O(n * log(n)) worst-case.

+

If the comparison function compare does not implement a total order the resulting order +of elements in the slice is unspecified. All original elements will remain in the slice and +any possible modifications via interior mutability are observed in the input. Same is true +if compare panics.

+

For example |a, b| (a - b).cmp(a) is a comparison function that is neither transitive nor +reflexive nor total, a < b < c < a with a = 1, b = 2, c = 3. For more information and +examples see the Ord documentation.

+
§Current implementation
+

The current implementation is based on ipnsort by Lukas Bergdoll and Orson Peters, which +combines the fast average case of quicksort with the fast worst case of heapsort, achieving +linear time on fully sorted and reversed inputs. On inputs with k distinct elements, the +expected time to sort the data is O(n * log(k)).

+

It is typically faster than stable sorting, except in a few special cases, e.g., when the +slice is partially sorted.

+
§Panics
+

May panic if compare does not implement a total order.

+
§Examples
+
let mut v = [4, -5, 1, -3, 2];
+v.sort_unstable_by(|a, b| a.cmp(b));
+assert_eq!(v, [-5, -3, 1, 2, 4]);
+
+// reverse sorting
+v.sort_unstable_by(|a, b| b.cmp(a));
+assert_eq!(v, [4, 2, 1, -3, -5]);
+
1.20.0 · source

pub fn sort_unstable_by_key<K, F>(&mut self, f: F)
where + F: FnMut(&T) -> K, + K: Ord,

Sorts the slice with a key extraction function, without preserving the initial order of +equal elements.

+

This sort is unstable (i.e., may reorder equal elements), in-place (i.e., does not +allocate), and O(n * log(n)) worst-case.

+

If the implementation of Ord for K does not implement a total order the resulting +order of elements in the slice is unspecified. All original elements will remain in the +slice and any possible modifications via interior mutability are observed in the input. Same +is true if the implementation of Ord for K panics.

+
§Current implementation
+

The current implementation is based on ipnsort by Lukas Bergdoll and Orson Peters, which +combines the fast average case of quicksort with the fast worst case of heapsort, achieving +linear time on fully sorted and reversed inputs. On inputs with k distinct elements, the +expected time to sort the data is O(n * log(k)).

+

It is typically faster than stable sorting, except in a few special cases, e.g., when the +slice is partially sorted.

+
§Panics
+

May panic if the implementation of Ord for K does not implement a total order.

+
§Examples
+
let mut v = [4i32, -5, 1, -3, 2];
+
+v.sort_unstable_by_key(|k| k.abs());
+assert_eq!(v, [1, 2, -3, 4, -5]);
+
1.49.0 · source

pub fn select_nth_unstable( + &mut self, + index: usize, +) -> (&mut [T], &mut T, &mut [T])
where + T: Ord,

Reorders the slice such that the element at index after the reordering is at its final +sorted position.

+

This reordering has the additional property that any value at position i < index will be +less than or equal to any value at a position j > index. Additionally, this reordering is +unstable (i.e. any number of equal elements may end up at position index), in-place (i.e. +does not allocate), and runs in O(n) time. This function is also known as “kth element” +in other libraries.

+

It returns a triplet of the following from the reordered slice: the subslice prior to +index, the element at index, and the subslice after index; accordingly, the values in +those two subslices will respectively all be less-than-or-equal-to and +greater-than-or-equal-to the value of the element at index.

+
§Current implementation
+

The current algorithm is an introselect implementation based on ipnsort by Lukas Bergdoll +and Orson Peters, which is also the basis for sort_unstable. The fallback algorithm is +Median of Medians using Tukey’s Ninther for pivot selection, which guarantees linear runtime +for all inputs.

+
§Panics
+

Panics when index >= len(), meaning it always panics on empty slices.

+

May panic if the implementation of Ord for T does not implement a total order.

+
§Examples
+
let mut v = [-5i32, 4, 2, -3, 1];
+
+// Find the items less than or equal to the median, the median, and greater than or equal to
+// the median.
+let (lesser, median, greater) = v.select_nth_unstable(2);
+
+assert!(lesser == [-3, -5] || lesser == [-5, -3]);
+assert_eq!(median, &mut 1);
+assert!(greater == [4, 2] || greater == [2, 4]);
+
+// We are only guaranteed the slice will be one of the following, based on the way we sort
+// about the specified index.
+assert!(v == [-3, -5, 1, 2, 4] ||
+        v == [-5, -3, 1, 2, 4] ||
+        v == [-3, -5, 1, 4, 2] ||
+        v == [-5, -3, 1, 4, 2]);
+
1.49.0 · source

pub fn select_nth_unstable_by<F>( + &mut self, + index: usize, + compare: F, +) -> (&mut [T], &mut T, &mut [T])
where + F: FnMut(&T, &T) -> Ordering,

Reorders the slice with a comparator function such that the element at index after the +reordering is at its final sorted position.

+

This reordering has the additional property that any value at position i < index will be +less than or equal to any value at a position j > index using the comparator function. +Additionally, this reordering is unstable (i.e. any number of equal elements may end up at +position index), in-place (i.e. does not allocate), and runs in O(n) time. This +function is also known as “kth element” in other libraries.

+

It returns a triplet of the following from the slice reordered according to the provided +comparator function: the subslice prior to index, the element at index, and the subslice +after index; accordingly, the values in those two subslices will respectively all be +less-than-or-equal-to and greater-than-or-equal-to the value of the element at index.

+
§Current implementation
+

The current algorithm is an introselect implementation based on ipnsort by Lukas Bergdoll +and Orson Peters, which is also the basis for sort_unstable. The fallback algorithm is +Median of Medians using Tukey’s Ninther for pivot selection, which guarantees linear runtime +for all inputs.

+
§Panics
+

Panics when index >= len(), meaning it always panics on empty slices.

+

May panic if compare does not implement a total order.

+
§Examples
+
let mut v = [-5i32, 4, 2, -3, 1];
+
+// Find the items less than or equal to the median, the median, and greater than or equal to
+// the median as if the slice were sorted in descending order.
+let (lesser, median, greater) = v.select_nth_unstable_by(2, |a, b| b.cmp(a));
+
+assert!(lesser == [4, 2] || lesser == [2, 4]);
+assert_eq!(median, &mut 1);
+assert!(greater == [-3, -5] || greater == [-5, -3]);
+
+// We are only guaranteed the slice will be one of the following, based on the way we sort
+// about the specified index.
+assert!(v == [2, 4, 1, -5, -3] ||
+        v == [2, 4, 1, -3, -5] ||
+        v == [4, 2, 1, -5, -3] ||
+        v == [4, 2, 1, -3, -5]);
+
1.49.0 · source

pub fn select_nth_unstable_by_key<K, F>( + &mut self, + index: usize, + f: F, +) -> (&mut [T], &mut T, &mut [T])
where + F: FnMut(&T) -> K, + K: Ord,

Reorders the slice with a key extraction function such that the element at index after the +reordering is at its final sorted position.

+

This reordering has the additional property that any value at position i < index will be +less than or equal to any value at a position j > index using the key extraction function. +Additionally, this reordering is unstable (i.e. any number of equal elements may end up at +position index), in-place (i.e. does not allocate), and runs in O(n) time. This +function is also known as “kth element” in other libraries.

+

It returns a triplet of the following from the slice reordered according to the provided key +extraction function: the subslice prior to index, the element at index, and the subslice +after index; accordingly, the values in those two subslices will respectively all be +less-than-or-equal-to and greater-than-or-equal-to the value of the element at index.

+
§Current implementation
+

The current algorithm is an introselect implementation based on ipnsort by Lukas Bergdoll +and Orson Peters, which is also the basis for sort_unstable. The fallback algorithm is +Median of Medians using Tukey’s Ninther for pivot selection, which guarantees linear runtime +for all inputs.

+
§Panics
+

Panics when index >= len(), meaning it always panics on empty slices.

+

May panic if K: Ord does not implement a total order.

+
§Examples
+
let mut v = [-5i32, 4, 1, -3, 2];
+
+// Find the items less than or equal to the median, the median, and greater than or equal to
+// the median as if the slice were sorted according to absolute value.
+let (lesser, median, greater) = v.select_nth_unstable_by_key(2, |a| a.abs());
+
+assert!(lesser == [1, 2] || lesser == [2, 1]);
+assert_eq!(median, &mut -3);
+assert!(greater == [4, -5] || greater == [-5, 4]);
+
+// We are only guaranteed the slice will be one of the following, based on the way we sort
+// about the specified index.
+assert!(v == [1, 2, -3, 4, -5] ||
+        v == [1, 2, -3, -5, 4] ||
+        v == [2, 1, -3, 4, -5] ||
+        v == [2, 1, -3, -5, 4]);
+
source

pub fn partition_dedup(&mut self) -> (&mut [T], &mut [T])
where + T: PartialEq,

🔬This is a nightly-only experimental API. (slice_partition_dedup)

Moves all consecutive repeated elements to the end of the slice according to the +PartialEq trait implementation.

+

Returns two slices. The first contains no consecutive repeated elements. +The second contains all the duplicates in no specified order.

+

If the slice is sorted, the first returned slice contains no duplicates.

+
§Examples
+
#![feature(slice_partition_dedup)]
+
+let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
+
+let (dedup, duplicates) = slice.partition_dedup();
+
+assert_eq!(dedup, [1, 2, 3, 2, 1]);
+assert_eq!(duplicates, [2, 3, 1]);
+
source

pub fn partition_dedup_by<F>(&mut self, same_bucket: F) -> (&mut [T], &mut [T])
where + F: FnMut(&mut T, &mut T) -> bool,

🔬This is a nightly-only experimental API. (slice_partition_dedup)

Moves all but the first of consecutive elements to the end of the slice satisfying +a given equality relation.

+

Returns two slices. The first contains no consecutive repeated elements. +The second contains all the duplicates in no specified order.

+

The same_bucket function is passed references to two elements from the slice and +must determine if the elements compare equal. The elements are passed in opposite order +from their order in the slice, so if same_bucket(a, b) returns true, a is moved +at the end of the slice.

+

If the slice is sorted, the first returned slice contains no duplicates.

+
§Examples
+
#![feature(slice_partition_dedup)]
+
+let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
+
+let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
+
+assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
+assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
+
source

pub fn partition_dedup_by_key<K, F>(&mut self, key: F) -> (&mut [T], &mut [T])
where + F: FnMut(&mut T) -> K, + K: PartialEq,

🔬This is a nightly-only experimental API. (slice_partition_dedup)

Moves all but the first of consecutive elements to the end of the slice that resolve +to the same key.

+

Returns two slices. The first contains no consecutive repeated elements. +The second contains all the duplicates in no specified order.

+

If the slice is sorted, the first returned slice contains no duplicates.

+
§Examples
+
#![feature(slice_partition_dedup)]
+
+let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
+
+let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
+
+assert_eq!(dedup, [10, 20, 30, 20, 11]);
+assert_eq!(duplicates, [21, 30, 13]);
+
1.26.0 · source

pub fn rotate_left(&mut self, mid: usize)

Rotates the slice in-place such that the first mid elements of the +slice move to the end while the last self.len() - mid elements move to +the front.

+

After calling rotate_left, the element previously at index mid will +become the first element in the slice.

+
§Panics
+

This function will panic if mid is greater than the length of the +slice. Note that mid == self.len() does not panic and is a no-op +rotation.

+
§Complexity
+

Takes linear (in self.len()) time.

+
§Examples
+
let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+a.rotate_left(2);
+assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
+

Rotating a subslice:

+ +
let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+a[1..5].rotate_left(1);
+assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
+
1.26.0 · source

pub fn rotate_right(&mut self, k: usize)

Rotates the slice in-place such that the first self.len() - k +elements of the slice move to the end while the last k elements move +to the front.

+

After calling rotate_right, the element previously at index +self.len() - k will become the first element in the slice.

+
§Panics
+

This function will panic if k is greater than the length of the +slice. Note that k == self.len() does not panic and is a no-op +rotation.

+
§Complexity
+

Takes linear (in self.len()) time.

+
§Examples
+
let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+a.rotate_right(2);
+assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
+

Rotating a subslice:

+ +
let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+a[1..5].rotate_right(1);
+assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
+
1.50.0 · source

pub fn fill(&mut self, value: T)
where + T: Clone,

Fills self with elements by cloning value.

+
§Examples
+
let mut buf = vec![0; 10];
+buf.fill(1);
+assert_eq!(buf, vec![1; 10]);
+
1.51.0 · source

pub fn fill_with<F>(&mut self, f: F)
where + F: FnMut() -> T,

Fills self with elements returned by calling a closure repeatedly.

+

This method uses a closure to create new values. If you’d rather +Clone a given value, use fill. If you want to use the Default +trait to generate values, you can pass Default::default as the +argument.

+
§Examples
+
let mut buf = vec![1; 10];
+buf.fill_with(Default::default);
+assert_eq!(buf, vec![0; 10]);
+
1.7.0 · source

pub fn clone_from_slice(&mut self, src: &[T])
where + T: Clone,

Copies the elements from src into self.

+

The length of src must be the same as self.

+
§Panics
+

This function will panic if the two slices have different lengths.

+
§Examples
+

Cloning two elements from a slice into another:

+ +
let src = [1, 2, 3, 4];
+let mut dst = [0, 0];
+
+// Because the slices have to be the same length,
+// we slice the source slice from four elements
+// to two. It will panic if we don't do this.
+dst.clone_from_slice(&src[2..]);
+
+assert_eq!(src, [1, 2, 3, 4]);
+assert_eq!(dst, [3, 4]);
+

Rust enforces that there can only be one mutable reference with no +immutable references to a particular piece of data in a particular +scope. Because of this, attempting to use clone_from_slice on a +single slice will result in a compile failure:

+ +
let mut slice = [1, 2, 3, 4, 5];
+
+slice[..2].clone_from_slice(&slice[3..]); // compile fail!
+

To work around this, we can use split_at_mut to create two distinct +sub-slices from a slice:

+ +
let mut slice = [1, 2, 3, 4, 5];
+
+{
+    let (left, right) = slice.split_at_mut(2);
+    left.clone_from_slice(&right[1..]);
+}
+
+assert_eq!(slice, [4, 5, 3, 4, 5]);
+
1.9.0 · source

pub fn copy_from_slice(&mut self, src: &[T])
where + T: Copy,

Copies all elements from src into self, using a memcpy.

+

The length of src must be the same as self.

+

If T does not implement Copy, use clone_from_slice.

+
§Panics
+

This function will panic if the two slices have different lengths.

+
§Examples
+

Copying two elements from a slice into another:

+ +
let src = [1, 2, 3, 4];
+let mut dst = [0, 0];
+
+// Because the slices have to be the same length,
+// we slice the source slice from four elements
+// to two. It will panic if we don't do this.
+dst.copy_from_slice(&src[2..]);
+
+assert_eq!(src, [1, 2, 3, 4]);
+assert_eq!(dst, [3, 4]);
+

Rust enforces that there can only be one mutable reference with no +immutable references to a particular piece of data in a particular +scope. Because of this, attempting to use copy_from_slice on a +single slice will result in a compile failure:

+ +
let mut slice = [1, 2, 3, 4, 5];
+
+slice[..2].copy_from_slice(&slice[3..]); // compile fail!
+

To work around this, we can use split_at_mut to create two distinct +sub-slices from a slice:

+ +
let mut slice = [1, 2, 3, 4, 5];
+
+{
+    let (left, right) = slice.split_at_mut(2);
+    left.copy_from_slice(&right[1..]);
+}
+
+assert_eq!(slice, [4, 5, 3, 4, 5]);
+
1.37.0 · source

pub fn copy_within<R>(&mut self, src: R, dest: usize)
where + R: RangeBounds<usize>, + T: Copy,

Copies elements from one part of the slice to another part of itself, +using a memmove.

+

src is the range within self to copy from. dest is the starting +index of the range within self to copy to, which will have the same +length as src. The two ranges may overlap. The ends of the two ranges +must be less than or equal to self.len().

+
§Panics
+

This function will panic if either range exceeds the end of the slice, +or if the end of src is before the start.

+
§Examples
+

Copying four bytes within a slice:

+ +
let mut bytes = *b"Hello, World!";
+
+bytes.copy_within(1..5, 8);
+
+assert_eq!(&bytes, b"Hello, Wello!");
+
1.27.0 · source

pub fn swap_with_slice(&mut self, other: &mut [T])

Swaps all elements in self with those in other.

+

The length of other must be the same as self.

+
§Panics
+

This function will panic if the two slices have different lengths.

+
§Example
+

Swapping two elements across slices:

+ +
let mut slice1 = [0, 0];
+let mut slice2 = [1, 2, 3, 4];
+
+slice1.swap_with_slice(&mut slice2[2..]);
+
+assert_eq!(slice1, [3, 4]);
+assert_eq!(slice2, [1, 2, 0, 0]);
+

Rust enforces that there can only be one mutable reference to a +particular piece of data in a particular scope. Because of this, +attempting to use swap_with_slice on a single slice will result in +a compile failure:

+ +
let mut slice = [1, 2, 3, 4, 5];
+slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
+

To work around this, we can use split_at_mut to create two distinct +mutable sub-slices from a slice:

+ +
let mut slice = [1, 2, 3, 4, 5];
+
+{
+    let (left, right) = slice.split_at_mut(2);
+    left.swap_with_slice(&mut right[1..]);
+}
+
+assert_eq!(slice, [4, 5, 3, 1, 2]);
+
1.30.0 · source

pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T])

Transmutes the slice to a slice of another type, ensuring alignment of the types is +maintained.

+

This method splits the slice into three distinct slices: prefix, correctly aligned middle +slice of a new type, and the suffix slice. The middle part will be as big as possible under +the given alignment constraint and element size.

+

This method has no purpose when either input element T or output element U are +zero-sized and will return the original slice without splitting anything.

+
§Safety
+

This method is essentially a transmute with respect to the elements in the returned +middle slice, so all the usual caveats pertaining to transmute::<T, U> also apply here.

+
§Examples
+

Basic usage:

+ +
unsafe {
+    let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+    let (prefix, shorts, suffix) = bytes.align_to::<u16>();
+    // less_efficient_algorithm_for_bytes(prefix);
+    // more_efficient_algorithm_for_aligned_shorts(shorts);
+    // less_efficient_algorithm_for_bytes(suffix);
+}
+
1.30.0 · source

pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T])

Transmutes the mutable slice to a mutable slice of another type, ensuring alignment of the +types is maintained.

+

This method splits the slice into three distinct slices: prefix, correctly aligned middle +slice of a new type, and the suffix slice. The middle part will be as big as possible under +the given alignment constraint and element size.

+

This method has no purpose when either input element T or output element U are +zero-sized and will return the original slice without splitting anything.

+
§Safety
+

This method is essentially a transmute with respect to the elements in the returned +middle slice, so all the usual caveats pertaining to transmute::<T, U> also apply here.

+
§Examples
+

Basic usage:

+ +
unsafe {
+    let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+    let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
+    // less_efficient_algorithm_for_bytes(prefix);
+    // more_efficient_algorithm_for_aligned_shorts(shorts);
+    // less_efficient_algorithm_for_bytes(suffix);
+}
+
source

pub fn as_simd<const LANES: usize>(&self) -> (&[T], &[Simd<T, LANES>], &[T])
where + Simd<T, LANES>: AsRef<[T; LANES]>, + T: SimdElement, + LaneCount<LANES>: SupportedLaneCount,

🔬This is a nightly-only experimental API. (portable_simd)

Splits a slice into a prefix, a middle of aligned SIMD types, and a suffix.

+

This is a safe wrapper around slice::align_to, so inherits the same +guarantees as that method.

+
§Panics
+

This will panic if the size of the SIMD type is different from +LANES times that of the scalar.

+

At the time of writing, the trait restrictions on Simd<T, LANES> keeps +that from ever happening, as only power-of-two numbers of lanes are +supported. It’s possible that, in the future, those restrictions might +be lifted in a way that would make it possible to see panics from this +method for something like LANES == 3.

+
§Examples
+
#![feature(portable_simd)]
+use core::simd::prelude::*;
+
+let short = &[1, 2, 3];
+let (prefix, middle, suffix) = short.as_simd::<4>();
+assert_eq!(middle, []); // Not enough elements for anything in the middle
+
+// They might be split in any possible way between prefix and suffix
+let it = prefix.iter().chain(suffix).copied();
+assert_eq!(it.collect::<Vec<_>>(), vec![1, 2, 3]);
+
+fn basic_simd_sum(x: &[f32]) -> f32 {
+    use std::ops::Add;
+    let (prefix, middle, suffix) = x.as_simd();
+    let sums = f32x4::from_array([
+        prefix.iter().copied().sum(),
+        0.0,
+        0.0,
+        suffix.iter().copied().sum(),
+    ]);
+    let sums = middle.iter().copied().fold(sums, f32x4::add);
+    sums.reduce_sum()
+}
+
+let numbers: Vec<f32> = (1..101).map(|x| x as _).collect();
+assert_eq!(basic_simd_sum(&numbers[1..99]), 4949.0);
+
source

pub fn as_simd_mut<const LANES: usize>( + &mut self, +) -> (&mut [T], &mut [Simd<T, LANES>], &mut [T])
where + Simd<T, LANES>: AsMut<[T; LANES]>, + T: SimdElement, + LaneCount<LANES>: SupportedLaneCount,

🔬This is a nightly-only experimental API. (portable_simd)

Splits a mutable slice into a mutable prefix, a middle of aligned SIMD types, +and a mutable suffix.

+

This is a safe wrapper around slice::align_to_mut, so inherits the same +guarantees as that method.

+

This is the mutable version of slice::as_simd; see that for examples.

+
§Panics
+

This will panic if the size of the SIMD type is different from +LANES times that of the scalar.

+

At the time of writing, the trait restrictions on Simd<T, LANES> keeps +that from ever happening, as only power-of-two numbers of lanes are +supported. It’s possible that, in the future, those restrictions might +be lifted in a way that would make it possible to see panics from this +method for something like LANES == 3.

+
1.82.0 · source

pub fn is_sorted(&self) -> bool
where + T: PartialOrd,

Checks if the elements of this slice are sorted.

+

That is, for each element a and its following element b, a <= b must hold. If the +slice yields exactly zero or one element, true is returned.

+

Note that if Self::Item is only PartialOrd, but not Ord, the above definition +implies that this function returns false if any two consecutive items are not +comparable.

+
§Examples
+
let empty: [i32; 0] = [];
+
+assert!([1, 2, 2, 9].is_sorted());
+assert!(![1, 3, 2, 4].is_sorted());
+assert!([0].is_sorted());
+assert!(empty.is_sorted());
+assert!(![0.0, 1.0, f32::NAN].is_sorted());
+
1.82.0 · source

pub fn is_sorted_by<'a, F>(&'a self, compare: F) -> bool
where + F: FnMut(&'a T, &'a T) -> bool,

Checks if the elements of this slice are sorted using the given comparator function.

+

Instead of using PartialOrd::partial_cmp, this function uses the given compare +function to determine whether two elements are to be considered in sorted order.

+
§Examples
+
assert!([1, 2, 2, 9].is_sorted_by(|a, b| a <= b));
+assert!(![1, 2, 2, 9].is_sorted_by(|a, b| a < b));
+
+assert!([0].is_sorted_by(|a, b| true));
+assert!([0].is_sorted_by(|a, b| false));
+
+let empty: [i32; 0] = [];
+assert!(empty.is_sorted_by(|a, b| false));
+assert!(empty.is_sorted_by(|a, b| true));
+
1.82.0 · source

pub fn is_sorted_by_key<'a, F, K>(&'a self, f: F) -> bool
where + F: FnMut(&'a T) -> K, + K: PartialOrd,

Checks if the elements of this slice are sorted using the given key extraction function.

+

Instead of comparing the slice’s elements directly, this function compares the keys of the +elements, as determined by f. Apart from that, it’s equivalent to is_sorted; see its +documentation for more information.

+
§Examples
+
assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
+assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
+
1.52.0 · source

pub fn partition_point<P>(&self, pred: P) -> usize
where + P: FnMut(&T) -> bool,

Returns the index of the partition point according to the given predicate +(the index of the first element of the second partition).

+

The slice is assumed to be partitioned according to the given predicate. +This means that all elements for which the predicate returns true are at the start of the slice +and all elements for which the predicate returns false are at the end. +For example, [7, 15, 3, 5, 4, 12, 6] is partitioned under the predicate x % 2 != 0 +(all odd numbers are at the start, all even at the end).

+

If this slice is not partitioned, the returned result is unspecified and meaningless, +as this method performs a kind of binary search.

+

See also binary_search, binary_search_by, and binary_search_by_key.

+
§Examples
+
let v = [1, 2, 3, 3, 5, 6, 7];
+let i = v.partition_point(|&x| x < 5);
+
+assert_eq!(i, 4);
+assert!(v[..i].iter().all(|&x| x < 5));
+assert!(v[i..].iter().all(|&x| !(x < 5)));
+

If all elements of the slice match the predicate, including if the slice +is empty, then the length of the slice will be returned:

+ +
let a = [2, 4, 8];
+assert_eq!(a.partition_point(|x| x < &100), a.len());
+let a: [i32; 0] = [];
+assert_eq!(a.partition_point(|x| x < &100), 0);
+

If you want to insert an item to a sorted vector, while maintaining +sort order:

+ +
let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+let num = 42;
+let idx = s.partition_point(|&x| x <= num);
+s.insert(idx, num);
+assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
+
source

pub fn take<'a, R>(self: &mut &'a [T], range: R) -> Option<&'a [T]>
where + R: OneSidedRange<usize>,

🔬This is a nightly-only experimental API. (slice_take)

Removes the subslice corresponding to the given range +and returns a reference to it.

+

Returns None and does not modify the slice if the given +range is out of bounds.

+

Note that this method only accepts one-sided ranges such as +2.. or ..6, but not 2..6.

+
§Examples
+

Taking the first three elements of a slice:

+ +
#![feature(slice_take)]
+
+let mut slice: &[_] = &['a', 'b', 'c', 'd'];
+let mut first_three = slice.take(..3).unwrap();
+
+assert_eq!(slice, &['d']);
+assert_eq!(first_three, &['a', 'b', 'c']);
+

Taking the last two elements of a slice:

+ +
#![feature(slice_take)]
+
+let mut slice: &[_] = &['a', 'b', 'c', 'd'];
+let mut tail = slice.take(2..).unwrap();
+
+assert_eq!(slice, &['a', 'b']);
+assert_eq!(tail, &['c', 'd']);
+

Getting None when range is out of bounds:

+ +
#![feature(slice_take)]
+
+let mut slice: &[_] = &['a', 'b', 'c', 'd'];
+
+assert_eq!(None, slice.take(5..));
+assert_eq!(None, slice.take(..5));
+assert_eq!(None, slice.take(..=4));
+let expected: &[char] = &['a', 'b', 'c', 'd'];
+assert_eq!(Some(expected), slice.take(..4));
+
source

pub fn take_mut<'a, R>(self: &mut &'a mut [T], range: R) -> Option<&'a mut [T]>
where + R: OneSidedRange<usize>,

🔬This is a nightly-only experimental API. (slice_take)

Removes the subslice corresponding to the given range +and returns a mutable reference to it.

+

Returns None and does not modify the slice if the given +range is out of bounds.

+

Note that this method only accepts one-sided ranges such as +2.. or ..6, but not 2..6.

+
§Examples
+

Taking the first three elements of a slice:

+ +
#![feature(slice_take)]
+
+let mut slice: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+let mut first_three = slice.take_mut(..3).unwrap();
+
+assert_eq!(slice, &mut ['d']);
+assert_eq!(first_three, &mut ['a', 'b', 'c']);
+

Taking the last two elements of a slice:

+ +
#![feature(slice_take)]
+
+let mut slice: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+let mut tail = slice.take_mut(2..).unwrap();
+
+assert_eq!(slice, &mut ['a', 'b']);
+assert_eq!(tail, &mut ['c', 'd']);
+

Getting None when range is out of bounds:

+ +
#![feature(slice_take)]
+
+let mut slice: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+
+assert_eq!(None, slice.take_mut(5..));
+assert_eq!(None, slice.take_mut(..5));
+assert_eq!(None, slice.take_mut(..=4));
+let expected: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+assert_eq!(Some(expected), slice.take_mut(..4));
+
source

pub fn take_first<'a>(self: &mut &'a [T]) -> Option<&'a T>

🔬This is a nightly-only experimental API. (slice_take)

Removes the first element of the slice and returns a reference +to it.

+

Returns None if the slice is empty.

+
§Examples
+
#![feature(slice_take)]
+
+let mut slice: &[_] = &['a', 'b', 'c'];
+let first = slice.take_first().unwrap();
+
+assert_eq!(slice, &['b', 'c']);
+assert_eq!(first, &'a');
+
source

pub fn take_first_mut<'a>(self: &mut &'a mut [T]) -> Option<&'a mut T>

🔬This is a nightly-only experimental API. (slice_take)

Removes the first element of the slice and returns a mutable +reference to it.

+

Returns None if the slice is empty.

+
§Examples
+
#![feature(slice_take)]
+
+let mut slice: &mut [_] = &mut ['a', 'b', 'c'];
+let first = slice.take_first_mut().unwrap();
+*first = 'd';
+
+assert_eq!(slice, &['b', 'c']);
+assert_eq!(first, &'d');
+
source

pub fn take_last<'a>(self: &mut &'a [T]) -> Option<&'a T>

🔬This is a nightly-only experimental API. (slice_take)

Removes the last element of the slice and returns a reference +to it.

+

Returns None if the slice is empty.

+
§Examples
+
#![feature(slice_take)]
+
+let mut slice: &[_] = &['a', 'b', 'c'];
+let last = slice.take_last().unwrap();
+
+assert_eq!(slice, &['a', 'b']);
+assert_eq!(last, &'c');
+
source

pub fn take_last_mut<'a>(self: &mut &'a mut [T]) -> Option<&'a mut T>

🔬This is a nightly-only experimental API. (slice_take)

Removes the last element of the slice and returns a mutable +reference to it.

+

Returns None if the slice is empty.

+
§Examples
+
#![feature(slice_take)]
+
+let mut slice: &mut [_] = &mut ['a', 'b', 'c'];
+let last = slice.take_last_mut().unwrap();
+*last = 'd';
+
+assert_eq!(slice, &['a', 'b']);
+assert_eq!(last, &'d');
+
source

pub unsafe fn get_many_unchecked_mut<const N: usize>( + &mut self, + indices: [usize; N], +) -> [&mut T; N]

🔬This is a nightly-only experimental API. (get_many_mut)

Returns mutable references to many indices at once, without doing any checks.

+

For a safe alternative see get_many_mut.

+
§Safety
+

Calling this method with overlapping or out-of-bounds indices is undefined behavior +even if the resulting references are not used.

+
§Examples
+
#![feature(get_many_mut)]
+
+let x = &mut [1, 2, 4];
+
+unsafe {
+    let [a, b] = x.get_many_unchecked_mut([0, 2]);
+    *a *= 10;
+    *b *= 100;
+}
+assert_eq!(x, &[10, 2, 400]);
+
source

pub fn get_many_mut<const N: usize>( + &mut self, + indices: [usize; N], +) -> Result<[&mut T; N], GetManyMutError<N>>

🔬This is a nightly-only experimental API. (get_many_mut)

Returns mutable references to many indices at once.

+

Returns an error if any index is out-of-bounds, or if the same index was +passed more than once.

+
§Examples
+
#![feature(get_many_mut)]
+
+let v = &mut [1, 2, 3];
+if let Ok([a, b]) = v.get_many_mut([0, 2]) {
+    *a = 413;
+    *b = 612;
+}
+assert_eq!(v, &[413, 2, 612]);
+
source

pub fn elem_offset(&self, element: &T) -> Option<usize>

🔬This is a nightly-only experimental API. (substr_range)

Returns the index that an element reference points to.

+

Returns None if element does not point within the slice or if it points between elements.

+

This method is useful for extending slice iterators like slice::split.

+

Note that this uses pointer arithmetic and does not compare elements. +To find the index of an element via comparison, use +.iter().position() instead.

+
§Panics
+

Panics if T is zero-sized.

+
§Examples
+

Basic usage:

+ +
#![feature(substr_range)]
+
+let nums: &[u32] = &[1, 7, 1, 1];
+let num = &nums[2];
+
+assert_eq!(num, &1);
+assert_eq!(nums.elem_offset(num), Some(2));
+

Returning None with an in-between element:

+ +
#![feature(substr_range)]
+
+let arr: &[[u32; 2]] = &[[0, 1], [2, 3]];
+let flat_arr: &[u32] = arr.as_flattened();
+
+let ok_elm: &[u32; 2] = flat_arr[0..2].try_into().unwrap();
+let weird_elm: &[u32; 2] = flat_arr[1..3].try_into().unwrap();
+
+assert_eq!(ok_elm, &[0, 1]);
+assert_eq!(weird_elm, &[1, 2]);
+
+assert_eq!(arr.elem_offset(ok_elm), Some(0)); // Points to element 0
+assert_eq!(arr.elem_offset(weird_elm), None); // Points between element 0 and 1
+
source

pub fn subslice_range(&self, subslice: &[T]) -> Option<Range<usize>>

🔬This is a nightly-only experimental API. (substr_range)

Returns the range of indices that a subslice points to.

+

Returns None if subslice does not point within the slice or if it points between elements.

+

This method does not compare elements. Instead, this method finds the location in the slice that +subslice was obtained from. To find the index of a subslice via comparison, instead use +.windows().position().

+

This method is useful for extending slice iterators like slice::split.

+

Note that this may return a false positive (either Some(0..0) or Some(self.len()..self.len())) +if subslice has a length of zero and points to the beginning or end of another, separate, slice.

+
§Panics
+

Panics if T is zero-sized.

+
§Examples
+

Basic usage:

+ +
#![feature(substr_range)]
+
+let nums = &[0, 5, 10, 0, 0, 5];
+
+let mut iter = nums
+    .split(|t| *t == 0)
+    .map(|n| nums.subslice_range(n).unwrap());
+
+assert_eq!(iter.next(), Some(0..0));
+assert_eq!(iter.next(), Some(1..3));
+assert_eq!(iter.next(), Some(4..4));
+assert_eq!(iter.next(), Some(5..6));
+
1.80.0 · source

pub fn as_flattened(&self) -> &[T]

Takes a &[[T; N]], and flattens it to a &[T].

+
§Panics
+

This panics if the length of the resulting slice would overflow a usize.

+

This is only possible when flattening a slice of arrays of zero-sized +types, and thus tends to be irrelevant in practice. If +size_of::<T>() > 0, this will never panic.

+
§Examples
+
assert_eq!([[1, 2, 3], [4, 5, 6]].as_flattened(), &[1, 2, 3, 4, 5, 6]);
+
+assert_eq!(
+    [[1, 2, 3], [4, 5, 6]].as_flattened(),
+    [[1, 2], [3, 4], [5, 6]].as_flattened(),
+);
+
+let slice_of_empty_arrays: &[[i32; 0]] = &[[], [], [], [], []];
+assert!(slice_of_empty_arrays.as_flattened().is_empty());
+
+let empty_slice_of_arrays: &[[u32; 10]] = &[];
+assert!(empty_slice_of_arrays.as_flattened().is_empty());
+
1.80.0 · source

pub fn as_flattened_mut(&mut self) -> &mut [T]

Takes a &mut [[T; N]], and flattens it to a &mut [T].

+
§Panics
+

This panics if the length of the resulting slice would overflow a usize.

+

This is only possible when flattening a slice of arrays of zero-sized +types, and thus tends to be irrelevant in practice. If +size_of::<T>() > 0, this will never panic.

+
§Examples
+
fn add_5_to_all(slice: &mut [i32]) {
+    for i in slice {
+        *i += 5;
+    }
+}
+
+let mut array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]];
+add_5_to_all(array.as_flattened_mut());
+assert_eq!(array, [[6, 7, 8], [9, 10, 11], [12, 13, 14]]);
+
source

pub fn sort_floats(&mut self)

🔬This is a nightly-only experimental API. (sort_floats)

Sorts the slice of floats.

+

This sort is in-place (i.e. does not allocate), O(n * log(n)) worst-case, and uses +the ordering defined by f32::total_cmp.

+
§Current implementation
+

This uses the same sorting algorithm as sort_unstable_by.

+
§Examples
+
#![feature(sort_floats)]
+let mut v = [2.6, -5e-8, f32::NAN, 8.29, f32::INFINITY, -1.0, 0.0, -f32::INFINITY, -0.0];
+
+v.sort_floats();
+let sorted = [-f32::INFINITY, -1.0, -5e-8, -0.0, 0.0, 2.6, 8.29, f32::INFINITY, f32::NAN];
+assert_eq!(&v[..8], &sorted[..8]);
+assert!(v[8].is_nan());
+
source

pub fn sort_floats(&mut self)

🔬This is a nightly-only experimental API. (sort_floats)

Sorts the slice of floats.

+

This sort is in-place (i.e. does not allocate), O(n * log(n)) worst-case, and uses +the ordering defined by f64::total_cmp.

+
§Current implementation
+

This uses the same sorting algorithm as sort_unstable_by.

+
§Examples
+
#![feature(sort_floats)]
+let mut v = [2.6, -5e-8, f64::NAN, 8.29, f64::INFINITY, -1.0, 0.0, -f64::INFINITY, -0.0];
+
+v.sort_floats();
+let sorted = [-f64::INFINITY, -1.0, -5e-8, -0.0, 0.0, 2.6, 8.29, f64::INFINITY, f64::NAN];
+assert_eq!(&v[..8], &sorted[..8]);
+assert!(v[8].is_nan());
+
1.79.0 · source

pub fn utf8_chunks(&self) -> Utf8Chunks<'_>

Creates an iterator over the contiguous valid UTF-8 ranges of this +slice, and the non-UTF-8 fragments in between.

+
§Examples
+

This function formats arbitrary but mostly-UTF-8 bytes into Rust source +code in the form of a C-string literal (c"...").

+ +
use std::fmt::Write as _;
+
+pub fn cstr_literal(bytes: &[u8]) -> String {
+    let mut repr = String::new();
+    repr.push_str("c\"");
+    for chunk in bytes.utf8_chunks() {
+        for ch in chunk.valid().chars() {
+            // Escapes \0, \t, \r, \n, \\, \', \", and uses \u{...} for non-printable characters.
+            write!(repr, "{}", ch.escape_debug()).unwrap();
+        }
+        for byte in chunk.invalid() {
+            write!(repr, "\\x{:02X}", byte).unwrap();
+        }
+    }
+    repr.push('"');
+    repr
+}
+
+fn main() {
+    let lit = cstr_literal(b"\xferris the \xf0\x9f\xa6\x80\x07");
+    let expected = stringify!(c"\xFErris the 🦀\u{7}");
+    assert_eq!(lit, expected);
+}
+
1.0.0 · source

pub fn sort(&mut self)
where + T: Ord,

Sorts the slice, preserving initial order of equal elements.

+

This sort is stable (i.e., does not reorder equal elements) and O(n * log(n)) +worst-case.

+

If the implementation of Ord for T does not implement a total order the resulting +order of elements in the slice is unspecified. All original elements will remain in the +slice and any possible modifications via interior mutability are observed in the input. Same +is true if the implementation of Ord for T panics.

+

When applicable, unstable sorting is preferred because it is generally faster than stable +sorting and it doesn’t allocate auxiliary memory. See +sort_unstable. The exception are partially sorted slices, which +may be better served with slice::sort.

+

Sorting types that only implement PartialOrd such as f32 and f64 require +additional precautions. For example, f32::NAN != f32::NAN, which doesn’t fulfill the +reflexivity requirement of Ord. By using an alternative comparison function with +slice::sort_by such as f32::total_cmp or f64::total_cmp that defines a total +order users can sort slices containing floating-point values. Alternatively, if all values +in the slice are guaranteed to be in a subset for which PartialOrd::partial_cmp forms a +total order, it’s possible to sort the slice with sort_by(|a, b| a.partial_cmp(b).unwrap()).

+
§Current implementation
+

The current implementation is based on driftsort by Orson Peters and Lukas Bergdoll, which +combines the fast average case of quicksort with the fast worst case and partial run +detection of mergesort, achieving linear time on fully sorted and reversed inputs. On inputs +with k distinct elements, the expected time to sort the data is O(n * log(k)).

+

The auxiliary memory allocation behavior depends on the input length. Short slices are +handled without allocation, medium sized slices allocate self.len() and beyond that it +clamps at self.len() / 2.

+
§Panics
+

May panic if the implementation of Ord for T does not implement a total order.

+
§Examples
+
let mut v = [4, -5, 1, -3, 2];
+
+v.sort();
+assert_eq!(v, [-5, -3, 1, 2, 4]);
+
1.0.0 · source

pub fn sort_by<F>(&mut self, compare: F)
where + F: FnMut(&T, &T) -> Ordering,

Sorts the slice with a comparison function, preserving initial order of equal elements.

+

This sort is stable (i.e., does not reorder equal elements) and O(n * log(n)) +worst-case.

+

If the comparison function compare does not implement a total order the resulting order +of elements in the slice is unspecified. All original elements will remain in the slice and +any possible modifications via interior mutability are observed in the input. Same is true +if compare panics.

+

For example |a, b| (a - b).cmp(a) is a comparison function that is neither transitive nor +reflexive nor total, a < b < c < a with a = 1, b = 2, c = 3. For more information and +examples see the Ord documentation.

+
§Current implementation
+

The current implementation is based on driftsort by Orson Peters and Lukas Bergdoll, which +combines the fast average case of quicksort with the fast worst case and partial run +detection of mergesort, achieving linear time on fully sorted and reversed inputs. On inputs +with k distinct elements, the expected time to sort the data is O(n * log(k)).

+

The auxiliary memory allocation behavior depends on the input length. Short slices are +handled without allocation, medium sized slices allocate self.len() and beyond that it +clamps at self.len() / 2.

+
§Panics
+

May panic if compare does not implement a total order.

+
§Examples
+
let mut v = [4, -5, 1, -3, 2];
+v.sort_by(|a, b| a.cmp(b));
+assert_eq!(v, [-5, -3, 1, 2, 4]);
+
+// reverse sorting
+v.sort_by(|a, b| b.cmp(a));
+assert_eq!(v, [4, 2, 1, -3, -5]);
+
1.7.0 · source

pub fn sort_by_key<K, F>(&mut self, f: F)
where + F: FnMut(&T) -> K, + K: Ord,

Sorts the slice with a key extraction function, preserving initial order of equal elements.

+

This sort is stable (i.e., does not reorder equal elements) and O(m * n * log(n)) +worst-case, where the key function is O(m).

+

If the implementation of Ord for K does not implement a total order the resulting +order of elements in the slice is unspecified. All original elements will remain in the +slice and any possible modifications via interior mutability are observed in the input. Same +is true if the implementation of Ord for K panics.

+
§Current implementation
+

The current implementation is based on driftsort by Orson Peters and Lukas Bergdoll, which +combines the fast average case of quicksort with the fast worst case and partial run +detection of mergesort, achieving linear time on fully sorted and reversed inputs. On inputs +with k distinct elements, the expected time to sort the data is O(n * log(k)).

+

The auxiliary memory allocation behavior depends on the input length. Short slices are +handled without allocation, medium sized slices allocate self.len() and beyond that it +clamps at self.len() / 2.

+
§Panics
+

May panic if the implementation of Ord for K does not implement a total order.

+
§Examples
+
let mut v = [4i32, -5, 1, -3, 2];
+
+v.sort_by_key(|k| k.abs());
+assert_eq!(v, [1, 2, -3, 4, -5]);
+
1.34.0 · source

pub fn sort_by_cached_key<K, F>(&mut self, f: F)
where + F: FnMut(&T) -> K, + K: Ord,

Sorts the slice with a key extraction function, preserving initial order of equal elements.

+

This sort is stable (i.e., does not reorder equal elements) and O(m * n + n * +log(n)) worst-case, where the key function is O(m).

+

During sorting, the key function is called at most once per element, by using temporary +storage to remember the results of key evaluation. The order of calls to the key function is +unspecified and may change in future versions of the standard library.

+

If the implementation of Ord for K does not implement a total order the resulting +order of elements in the slice is unspecified. All original elements will remain in the +slice and any possible modifications via interior mutability are observed in the input. Same +is true if the implementation of Ord for K panics.

+

For simple key functions (e.g., functions that are property accesses or basic operations), +sort_by_key is likely to be faster.

+
§Current implementation
+

The current implementation is based on instruction-parallel-network sort by Lukas +Bergdoll, which combines the fast average case of randomized quicksort with the fast worst +case of heapsort, while achieving linear time on fully sorted and reversed inputs. And +O(k * log(n)) where k is the number of distinct elements in the input. It leverages +superscalar out-of-order execution capabilities commonly found in CPUs, to efficiently +perform the operation.

+

In the worst case, the algorithm allocates temporary storage in a Vec<(K, usize)> the +length of the slice.

+
§Panics
+

May panic if the implementation of Ord for K does not implement a total order.

+
§Examples
+
let mut v = [4i32, -5, 1, -3, 2, 10];
+
+// Strings are sorted by lexicographical order.
+v.sort_by_cached_key(|k| k.to_string());
+assert_eq!(v, [-3, -5, 1, 10, 2, 4]);
+
1.0.0 · source

pub fn to_vec(&self) -> Vec<T>
where + T: Clone,

Copies self into a new Vec.

+
§Examples
+
let s = [10, 40, 30];
+let x = s.to_vec();
+// Here, `s` and `x` can be modified independently.
+
source

pub fn to_vec_in<A>(&self, alloc: A) -> Vec<T, A>
where + A: Allocator, + T: Clone,

🔬This is a nightly-only experimental API. (allocator_api)

Copies self into a new Vec with an allocator.

+
§Examples
+
#![feature(allocator_api)]
+
+use std::alloc::System;
+
+let s = [10, 40, 30];
+let x = s.to_vec_in(System);
+// Here, `s` and `x` can be modified independently.
+
1.40.0 · source

pub fn repeat(&self, n: usize) -> Vec<T>
where + T: Copy,

Creates a vector by copying a slice n times.

+
§Panics
+

This function will panic if the capacity would overflow.

+
§Examples
+

Basic usage:

+ +
assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
+

A panic upon overflow:

+ +
// this will panic at runtime
+b"0123456789abcdef".repeat(usize::MAX);
+
1.0.0 · source

pub fn concat<Item>(&self) -> <[T] as Concat<Item>>::Output
where + [T]: Concat<Item>, + Item: ?Sized,

Flattens a slice of T into a single value Self::Output.

+
§Examples
+
assert_eq!(["hello", "world"].concat(), "helloworld");
+assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
+
1.3.0 · source

pub fn join<Separator>( + &self, + sep: Separator, +) -> <[T] as Join<Separator>>::Output
where + [T]: Join<Separator>,

Flattens a slice of T into a single value Self::Output, placing a +given separator between each.

+
§Examples
+
assert_eq!(["hello", "world"].join(" "), "hello world");
+assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
+assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
+
1.0.0 · source

pub fn connect<Separator>( + &self, + sep: Separator, +) -> <[T] as Join<Separator>>::Output
where + [T]: Join<Separator>,

👎Deprecated since 1.3.0: renamed to join

Flattens a slice of T into a single value Self::Output, placing a +given separator between each.

+
§Examples
+
assert_eq!(["hello", "world"].connect(" "), "hello world");
+assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
+
1.23.0 · source

pub fn to_ascii_uppercase(&self) -> Vec<u8>

Returns a vector containing a copy of this slice where each byte +is mapped to its ASCII upper case equivalent.

+

ASCII letters ‘a’ to ‘z’ are mapped to ‘A’ to ‘Z’, +but non-ASCII letters are unchanged.

+

To uppercase the value in-place, use make_ascii_uppercase.

+
1.23.0 · source

pub fn to_ascii_lowercase(&self) -> Vec<u8>

Returns a vector containing a copy of this slice where each byte +is mapped to its ASCII lower case equivalent.

+

ASCII letters ‘A’ to ‘Z’ are mapped to ‘a’ to ‘z’, +but non-ASCII letters are unchanged.

+

To lowercase the value in-place, use make_ascii_lowercase.

+

Trait Implementations§

source§

impl AsMut<[u8]> for BytesMut

source§

fn as_mut(&mut self) -> &mut [u8]

Converts this type into a mutable reference of the (usually inferred) input type.
source§

impl AsRef<[u8]> for BytesMut

source§

fn as_ref(&self) -> &[u8]

Converts this type into a shared reference of the (usually inferred) input type.
source§

impl Borrow<[u8]> for BytesMut

source§

fn borrow(&self) -> &[u8]

Immutably borrows from an owned value. Read more
source§

impl BorrowMut<[u8]> for BytesMut

source§

fn borrow_mut(&mut self) -> &mut [u8]

Mutably borrows from an owned value. Read more
source§

impl Buf for BytesMut

source§

fn remaining(&self) -> usize

Returns the number of bytes between the current position and the end of +the buffer. Read more
source§

fn chunk(&self) -> &[u8]

Returns a slice starting at the current position and of length between 0 +and Buf::remaining(). Note that this can return shorter slice (this allows +non-continuous internal representation). Read more
source§

fn advance(&mut self, cnt: usize)

Advance the internal cursor of the Buf Read more
source§

fn copy_to_bytes(&mut self, len: usize) -> Bytes

Consumes len bytes inside self and returns new instance of Bytes +with this data. Read more
source§

fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize

Available on crate feature std only.
Fills dst with potentially multiple slices starting at self’s +current position. Read more
source§

fn has_remaining(&self) -> bool

Returns true if there are any more bytes to consume Read more
source§

fn copy_to_slice(&mut self, dst: &mut [u8])

Copies bytes from self into dst. Read more
source§

fn get_u8(&mut self) -> u8

Gets an unsigned 8 bit integer from self. Read more
source§

fn get_i8(&mut self) -> i8

Gets a signed 8 bit integer from self. Read more
source§

fn get_u16(&mut self) -> u16

Gets an unsigned 16 bit integer from self in big-endian byte order. Read more
source§

fn get_u16_le(&mut self) -> u16

Gets an unsigned 16 bit integer from self in little-endian byte order. Read more
source§

fn get_u16_ne(&mut self) -> u16

Gets an unsigned 16 bit integer from self in native-endian byte order. Read more
source§

fn get_i16(&mut self) -> i16

Gets a signed 16 bit integer from self in big-endian byte order. Read more
source§

fn get_i16_le(&mut self) -> i16

Gets a signed 16 bit integer from self in little-endian byte order. Read more
source§

fn get_i16_ne(&mut self) -> i16

Gets a signed 16 bit integer from self in native-endian byte order. Read more
source§

fn get_u32(&mut self) -> u32

Gets an unsigned 32 bit integer from self in the big-endian byte order. Read more
source§

fn get_u32_le(&mut self) -> u32

Gets an unsigned 32 bit integer from self in the little-endian byte order. Read more
source§

fn get_u32_ne(&mut self) -> u32

Gets an unsigned 32 bit integer from self in native-endian byte order. Read more
source§

fn get_i32(&mut self) -> i32

Gets a signed 32 bit integer from self in big-endian byte order. Read more
source§

fn get_i32_le(&mut self) -> i32

Gets a signed 32 bit integer from self in little-endian byte order. Read more
source§

fn get_i32_ne(&mut self) -> i32

Gets a signed 32 bit integer from self in native-endian byte order. Read more
source§

fn get_u64(&mut self) -> u64

Gets an unsigned 64 bit integer from self in big-endian byte order. Read more
source§

fn get_u64_le(&mut self) -> u64

Gets an unsigned 64 bit integer from self in little-endian byte order. Read more
source§

fn get_u64_ne(&mut self) -> u64

Gets an unsigned 64 bit integer from self in native-endian byte order. Read more
source§

fn get_i64(&mut self) -> i64

Gets a signed 64 bit integer from self in big-endian byte order. Read more
source§

fn get_i64_le(&mut self) -> i64

Gets a signed 64 bit integer from self in little-endian byte order. Read more
source§

fn get_i64_ne(&mut self) -> i64

Gets a signed 64 bit integer from self in native-endian byte order. Read more
source§

fn get_u128(&mut self) -> u128

Gets an unsigned 128 bit integer from self in big-endian byte order. Read more
source§

fn get_u128_le(&mut self) -> u128

Gets an unsigned 128 bit integer from self in little-endian byte order. Read more
source§

fn get_u128_ne(&mut self) -> u128

Gets an unsigned 128 bit integer from self in native-endian byte order. Read more
source§

fn get_i128(&mut self) -> i128

Gets a signed 128 bit integer from self in big-endian byte order. Read more
source§

fn get_i128_le(&mut self) -> i128

Gets a signed 128 bit integer from self in little-endian byte order. Read more
source§

fn get_i128_ne(&mut self) -> i128

Gets a signed 128 bit integer from self in native-endian byte order. Read more
source§

fn get_uint(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in big-endian byte order. Read more
source§

fn get_uint_le(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in little-endian byte order. Read more
source§

fn get_uint_ne(&mut self, nbytes: usize) -> u64

Gets an unsigned n-byte integer from self in native-endian byte order. Read more
source§

fn get_int(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in big-endian byte order. Read more
source§

fn get_int_le(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in little-endian byte order. Read more
source§

fn get_int_ne(&mut self, nbytes: usize) -> i64

Gets a signed n-byte integer from self in native-endian byte order. Read more
source§

fn get_f32(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in big-endian byte order. Read more
source§

fn get_f32_le(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in little-endian byte order. Read more
source§

fn get_f32_ne(&mut self) -> f32

Gets an IEEE754 single-precision (4 bytes) floating point number from +self in native-endian byte order. Read more
source§

fn get_f64(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in big-endian byte order. Read more
source§

fn get_f64_le(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in little-endian byte order. Read more
source§

fn get_f64_ne(&mut self) -> f64

Gets an IEEE754 double-precision (8 bytes) floating point number from +self in native-endian byte order. Read more
source§

fn take(self, limit: usize) -> Take<Self>
where + Self: Sized,

Creates an adaptor which will read at most limit bytes from self. Read more
source§

fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
where + Self: Sized,

Creates an adaptor which will chain this buffer with another. Read more
source§

fn reader(self) -> Reader<Self>
where + Self: Sized,

Available on crate feature std only.
Creates an adaptor which implements the Read trait for self. Read more
source§

impl BufMut for BytesMut

source§

fn remaining_mut(&self) -> usize

Returns the number of bytes that can be written from the current +position until the end of the buffer is reached. Read more
source§

unsafe fn advance_mut(&mut self, cnt: usize)

Advance the internal cursor of the BufMut Read more
source§

fn chunk_mut(&mut self) -> &mut UninitSlice

Returns a mutable slice starting at the current BufMut position and of +length between 0 and BufMut::remaining_mut(). Note that this can be shorter than the +whole remainder of the buffer (this allows non-continuous implementation). Read more
source§

fn put<T: Buf>(&mut self, src: T)
where + Self: Sized,

Transfer bytes into self from src and advance the cursor by the +number of bytes written. Read more
source§

fn put_slice(&mut self, src: &[u8])

Transfer bytes into self from src and advance the cursor by the +number of bytes written. Read more
source§

fn put_bytes(&mut self, val: u8, cnt: usize)

Put cnt bytes val into self. Read more
source§

fn has_remaining_mut(&self) -> bool

Returns true if there is space in self for more bytes. Read more
source§

fn put_u8(&mut self, n: u8)

Writes an unsigned 8 bit integer to self. Read more
source§

fn put_i8(&mut self, n: i8)

Writes a signed 8 bit integer to self. Read more
source§

fn put_u16(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in big-endian byte order. Read more
source§

fn put_u16_le(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in little-endian byte order. Read more
source§

fn put_u16_ne(&mut self, n: u16)

Writes an unsigned 16 bit integer to self in native-endian byte order. Read more
source§

fn put_i16(&mut self, n: i16)

Writes a signed 16 bit integer to self in big-endian byte order. Read more
source§

fn put_i16_le(&mut self, n: i16)

Writes a signed 16 bit integer to self in little-endian byte order. Read more
source§

fn put_i16_ne(&mut self, n: i16)

Writes a signed 16 bit integer to self in native-endian byte order. Read more
source§

fn put_u32(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in big-endian byte order. Read more
source§

fn put_u32_le(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in little-endian byte order. Read more
source§

fn put_u32_ne(&mut self, n: u32)

Writes an unsigned 32 bit integer to self in native-endian byte order. Read more
source§

fn put_i32(&mut self, n: i32)

Writes a signed 32 bit integer to self in big-endian byte order. Read more
source§

fn put_i32_le(&mut self, n: i32)

Writes a signed 32 bit integer to self in little-endian byte order. Read more
source§

fn put_i32_ne(&mut self, n: i32)

Writes a signed 32 bit integer to self in native-endian byte order. Read more
source§

fn put_u64(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in the big-endian byte order. Read more
source§

fn put_u64_le(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in little-endian byte order. Read more
source§

fn put_u64_ne(&mut self, n: u64)

Writes an unsigned 64 bit integer to self in native-endian byte order. Read more
source§

fn put_i64(&mut self, n: i64)

Writes a signed 64 bit integer to self in the big-endian byte order. Read more
source§

fn put_i64_le(&mut self, n: i64)

Writes a signed 64 bit integer to self in little-endian byte order. Read more
source§

fn put_i64_ne(&mut self, n: i64)

Writes a signed 64 bit integer to self in native-endian byte order. Read more
source§

fn put_u128(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in the big-endian byte order. Read more
source§

fn put_u128_le(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in little-endian byte order. Read more
source§

fn put_u128_ne(&mut self, n: u128)

Writes an unsigned 128 bit integer to self in native-endian byte order. Read more
source§

fn put_i128(&mut self, n: i128)

Writes a signed 128 bit integer to self in the big-endian byte order. Read more
source§

fn put_i128_le(&mut self, n: i128)

Writes a signed 128 bit integer to self in little-endian byte order. Read more
source§

fn put_i128_ne(&mut self, n: i128)

Writes a signed 128 bit integer to self in native-endian byte order. Read more
source§

fn put_uint(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in big-endian byte order. Read more
source§

fn put_uint_le(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in the little-endian byte order. Read more
source§

fn put_uint_ne(&mut self, n: u64, nbytes: usize)

Writes an unsigned n-byte integer to self in the native-endian byte order. Read more
source§

fn put_int(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in big-endian byte order. Read more
source§

fn put_int_le(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in little-endian byte order. Read more
source§

fn put_int_ne(&mut self, n: i64, nbytes: usize)

Writes low nbytes of a signed integer to self in native-endian byte order. Read more
source§

fn put_f32(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in big-endian byte order. Read more
source§

fn put_f32_le(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in little-endian byte order. Read more
source§

fn put_f32_ne(&mut self, n: f32)

Writes an IEEE754 single-precision (4 bytes) floating point number to +self in native-endian byte order. Read more
source§

fn put_f64(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in big-endian byte order. Read more
source§

fn put_f64_le(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in little-endian byte order. Read more
source§

fn put_f64_ne(&mut self, n: f64)

Writes an IEEE754 double-precision (8 bytes) floating point number to +self in native-endian byte order. Read more
source§

fn limit(self, limit: usize) -> Limit<Self>
where + Self: Sized,

Creates an adaptor which can write at most limit bytes to self. Read more
source§

fn writer(self) -> Writer<Self>
where + Self: Sized,

Available on crate feature std only.
Creates an adaptor which implements the Write trait for self. Read more
source§

fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
where + Self: Sized,

Creates an adapter which will chain this buffer with another. Read more
source§

impl Clone for BytesMut

source§

fn clone(&self) -> BytesMut

Returns a copy of the value. Read more
1.0.0 · source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
source§

impl Debug for BytesMut

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl Default for BytesMut

source§

fn default() -> BytesMut

Returns the “default value” for a type. Read more
source§

impl Deref for BytesMut

source§

type Target = [u8]

The resulting type after dereferencing.
source§

fn deref(&self) -> &[u8]

Dereferences the value.
source§

impl DerefMut for BytesMut

source§

fn deref_mut(&mut self) -> &mut [u8]

Mutably dereferences the value.
source§

impl<'de> Deserialize<'de> for BytesMut

source§

fn deserialize<D>(deserializer: D) -> Result<BytesMut, D::Error>
where + D: Deserializer<'de>,

Deserialize this value from the given Serde deserializer. Read more
source§

impl Drop for BytesMut

source§

fn drop(&mut self)

Executes the destructor for this type. Read more
source§

impl<'a> Extend<&'a u8> for BytesMut

source§

fn extend<T>(&mut self, iter: T)
where + T: IntoIterator<Item = &'a u8>,

Extends a collection with the contents of an iterator. Read more
source§

fn extend_one(&mut self, item: A)

🔬This is a nightly-only experimental API. (extend_one)
Extends a collection with exactly one element.
source§

fn extend_reserve(&mut self, additional: usize)

🔬This is a nightly-only experimental API. (extend_one)
Reserves capacity in a collection for the given number of additional elements. Read more
source§

impl Extend<Bytes> for BytesMut

source§

fn extend<T>(&mut self, iter: T)
where + T: IntoIterator<Item = Bytes>,

Extends a collection with the contents of an iterator. Read more
source§

fn extend_one(&mut self, item: A)

🔬This is a nightly-only experimental API. (extend_one)
Extends a collection with exactly one element.
source§

fn extend_reserve(&mut self, additional: usize)

🔬This is a nightly-only experimental API. (extend_one)
Reserves capacity in a collection for the given number of additional elements. Read more
source§

impl Extend<u8> for BytesMut

source§

fn extend<T>(&mut self, iter: T)
where + T: IntoIterator<Item = u8>,

Extends a collection with the contents of an iterator. Read more
source§

fn extend_one(&mut self, item: A)

🔬This is a nightly-only experimental API. (extend_one)
Extends a collection with exactly one element.
source§

fn extend_reserve(&mut self, additional: usize)

🔬This is a nightly-only experimental API. (extend_one)
Reserves capacity in a collection for the given number of additional elements. Read more
source§

impl<'a> From<&'a [u8]> for BytesMut

source§

fn from(src: &'a [u8]) -> BytesMut

Converts to this type from the input type.
source§

impl<'a> From<&'a str> for BytesMut

source§

fn from(src: &'a str) -> BytesMut

Converts to this type from the input type.
source§

impl From<Bytes> for BytesMut

source§

fn from(bytes: Bytes) -> Self

Convert self into BytesMut.

+

If bytes is unique for the entire original buffer, this will return a +BytesMut with the contents of bytes without copying. +If bytes is not unique for the entire original buffer, this will make +a copy of bytes subset of the original buffer in a new BytesMut.

+
§Examples
+
use bytes::{Bytes, BytesMut};
+
+let bytes = Bytes::from(b"hello".to_vec());
+assert_eq!(BytesMut::from(bytes), BytesMut::from(&b"hello"[..]));
+
source§

impl From<BytesMut> for Bytes

source§

fn from(src: BytesMut) -> Bytes

Converts to this type from the input type.
source§

impl From<BytesMut> for Vec<u8>

source§

fn from(bytes: BytesMut) -> Self

Converts to this type from the input type.
source§

impl<'a> FromIterator<&'a u8> for BytesMut

source§

fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self

Creates a value from an iterator. Read more
source§

impl FromIterator<u8> for BytesMut

source§

fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self

Creates a value from an iterator. Read more
source§

impl Hash for BytesMut

source§

fn hash<H>(&self, state: &mut H)
where + H: Hasher,

Feeds this value into the given Hasher. Read more
1.3.0 · source§

fn hash_slice<H>(data: &[Self], state: &mut H)
where + H: Hasher, + Self: Sized,

Feeds a slice of this type into the given Hasher. Read more
source§

impl<'a> IntoIterator for &'a BytesMut

source§

type Item = &'a u8

The type of the elements being iterated over.
source§

type IntoIter = Iter<'a, u8>

Which kind of iterator are we turning this into?
source§

fn into_iter(self) -> Self::IntoIter

Creates an iterator from a value. Read more
source§

impl IntoIterator for BytesMut

source§

type Item = u8

The type of the elements being iterated over.
source§

type IntoIter = IntoIter<BytesMut>

Which kind of iterator are we turning this into?
source§

fn into_iter(self) -> Self::IntoIter

Creates an iterator from a value. Read more
source§

impl LowerHex for BytesMut

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl Ord for BytesMut

source§

fn cmp(&self, other: &BytesMut) -> Ordering

This method returns an Ordering between self and other. Read more
1.21.0 · source§

fn max(self, other: Self) -> Self
where + Self: Sized,

Compares and returns the maximum of two values. Read more
1.21.0 · source§

fn min(self, other: Self) -> Self
where + Self: Sized,

Compares and returns the minimum of two values. Read more
1.50.0 · source§

fn clamp(self, min: Self, max: Self) -> Self
where + Self: Sized + PartialOrd,

Restrict a value to a certain interval. Read more
source§

impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
where + BytesMut: PartialEq<T>,

source§

fn eq(&self, other: &&'a T) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<[u8]> for BytesMut

source§

fn eq(&self, other: &[u8]) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<Bytes> for BytesMut

source§

fn eq(&self, other: &Bytes) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<BytesMut> for &[u8]

source§

fn eq(&self, other: &BytesMut) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<BytesMut> for &str

source§

fn eq(&self, other: &BytesMut) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<BytesMut> for [u8]

source§

fn eq(&self, other: &BytesMut) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<BytesMut> for Bytes

source§

fn eq(&self, other: &BytesMut) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<BytesMut> for String

source§

fn eq(&self, other: &BytesMut) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<BytesMut> for Vec<u8>

source§

fn eq(&self, other: &BytesMut) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<BytesMut> for str

source§

fn eq(&self, other: &BytesMut) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<String> for BytesMut

source§

fn eq(&self, other: &String) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<Vec<u8>> for BytesMut

source§

fn eq(&self, other: &Vec<u8>) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq<str> for BytesMut

source§

fn eq(&self, other: &str) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl PartialEq for BytesMut

source§

fn eq(&self, other: &BytesMut) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, +and should not be overridden without very good reason.
source§

impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
where + BytesMut: PartialOrd<T>,

source§

fn partial_cmp(&self, other: &&'a T) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<[u8]> for BytesMut

source§

fn partial_cmp(&self, other: &[u8]) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<BytesMut> for &[u8]

source§

fn partial_cmp(&self, other: &BytesMut) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<BytesMut> for &str

source§

fn partial_cmp(&self, other: &BytesMut) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<BytesMut> for [u8]

source§

fn partial_cmp(&self, other: &BytesMut) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<BytesMut> for String

source§

fn partial_cmp(&self, other: &BytesMut) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<BytesMut> for Vec<u8>

source§

fn partial_cmp(&self, other: &BytesMut) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<BytesMut> for str

source§

fn partial_cmp(&self, other: &BytesMut) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<String> for BytesMut

source§

fn partial_cmp(&self, other: &String) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<Vec<u8>> for BytesMut

source§

fn partial_cmp(&self, other: &Vec<u8>) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd<str> for BytesMut

source§

fn partial_cmp(&self, other: &str) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl PartialOrd for BytesMut

source§

fn partial_cmp(&self, other: &BytesMut) -> Option<Ordering>

This method returns an ordering between self and other values if one exists. Read more
1.0.0 · source§

fn lt(&self, other: &Rhs) -> bool

Tests less than (for self and other) and is used by the < operator. Read more
1.0.0 · source§

fn le(&self, other: &Rhs) -> bool

Tests less than or equal to (for self and other) and is used by the +<= operator. Read more
1.0.0 · source§

fn gt(&self, other: &Rhs) -> bool

Tests greater than (for self and other) and is used by the > +operator. Read more
1.0.0 · source§

fn ge(&self, other: &Rhs) -> bool

Tests greater than or equal to (for self and other) and is used by +the >= operator. Read more
source§

impl Serialize for BytesMut

source§

fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where + S: Serializer,

Serialize this value into the given Serde serializer. Read more
source§

impl UpperHex for BytesMut

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl Write for BytesMut

source§

fn write_str(&mut self, s: &str) -> Result

Writes a string slice into this writer, returning whether the write +succeeded. Read more
source§

fn write_fmt(&mut self, args: Arguments<'_>) -> Result

Glue for usage of the write! macro with implementors of this trait. Read more
1.1.0 · source§

fn write_char(&mut self, c: char) -> Result<(), Error>

Writes a char into this writer, returning whether the write succeeded. Read more
source§

impl Eq for BytesMut

source§

impl Send for BytesMut

source§

impl Sync for BytesMut

Auto Trait Implementations§

Blanket Implementations§

source§

impl<T> Any for T
where + T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where + T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where + T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> CloneToUninit for T
where + T: Clone,

source§

unsafe fn clone_to_uninit(&self, dst: *mut T)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dst. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

+
source§

impl<T, U> Into<U> for T
where + U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

+

That is, this conversion is whatever the implementation of +From<T> for U chooses to do.

+
source§

impl<T> ToOwned for T
where + T: Clone,

source§

type Owned = T

The resulting type after obtaining ownership.
source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
source§

impl<T, U> TryFrom<U> for T
where + U: Into<T>,

source§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where + U: TryFrom<T>,

source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
source§

impl<T> DeserializeOwned for T
where + T: for<'de> Deserialize<'de>,

\ No newline at end of file diff --git a/crates.js b/crates.js new file mode 100644 index 000000000..5b2342479 --- /dev/null +++ b/crates.js @@ -0,0 +1,2 @@ +window.ALL_CRATES = ["bytes"]; +//{"start":21,"fragment_lengths":[7]} \ No newline at end of file diff --git a/help.html b/help.html new file mode 100644 index 000000000..c49325766 --- /dev/null +++ b/help.html @@ -0,0 +1 @@ +Help

Rustdoc help

Back
\ No newline at end of file diff --git a/search-index.js b/search-index.js new file mode 100644 index 000000000..72178c261 --- /dev/null +++ b/search-index.js @@ -0,0 +1,4 @@ +var searchIndex = new Map(JSON.parse('[["bytes",{"t":"EEFFNNNNNNNNNNNNNCNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNKKFFFFFFFMNNMNNNNNNNNNNNNNNNNNNNNNNMNNMNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNMNNMNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN","n":["Buf","BufMut","Bytes","BytesMut","advance","","advance_mut","as_mut","as_ref","","borrow","","","","borrow_mut","","","buf","capacity","chunk","","chunk_mut","clear","","clone","","clone_into","","clone_to_uninit","","cmp","","copy_from_slice","copy_to_bytes","","default","","deref","","deref_mut","deserialize","","drop","","eq","","","","","","","","","","","","","","extend","","","extend_from_slice","fmt","","","","","","freeze","from","","","","","","","","","","","from_iter","","","from_static","hash","","into","","into_iter","","","","is_empty","","is_unique","len","","new","","partial_cmp","","","","","","","","","","","","put","put_bytes","put_slice","remaining","","remaining_mut","reserve","resize","serialize","","set_len","slice","slice_ref","spare_capacity_mut","split","split_off","","split_to","","to_owned","","truncate","","try_from","","try_into","","try_into_mut","try_reclaim","type_id","","unsplit","with_capacity","write_fmt","write_str","zeroed","Buf","BufMut","Chain","IntoIter","Limit","Reader","Take","UninitSlice","Writer","advance","","","advance_mut","","","as_mut_ptr","as_uninit_slice_mut","borrow","","","","","","","borrow_mut","","","","","","","chain","","chain_mut","","chunk","","","chunk_mut","","","chunks_vectored","","","consume","copy_from_slice","copy_to_bytes","","","","copy_to_slice","","fill_buf","first_mut","first_ref","flush","fmt","","","","","","","from","","","","","","","","from_raw_parts_mut","get_f32","","get_f32_le","","get_f32_ne","","get_f64","","get_f64_le","","get_f64_ne","","get_i128","","get_i128_le","","get_i128_ne","","get_i16","","get_i16_le","","get_i16_ne","","get_i32","","get_i32_le","","get_i32_ne","","get_i64","","get_i64_le","","get_i64_ne","","get_i8","","get_int","","get_int_le","","get_int_ne","","get_mut","","","","","get_ref","","","","","get_u128","","get_u128_le","","get_u128_ne","","get_u16","","get_u16_le","","get_u16_ne","","get_u32","","get_u32_le","","get_u32_ne","","get_u64","","get_u64_le","","get_u64_ne","","get_u8","","get_uint","","get_uint_le","","get_uint_ne","","has_remaining","","has_remaining_mut","","index","","","","","","index_mut","","","","","","into","","","","","","into_inner","","","","","","into_iter","","last_mut","last_ref","len","limit","","","","new","","next","put","","put_bytes","","put_f32","","put_f32_le","","put_f32_ne","","put_f64","","put_f64_le","","put_f64_ne","","put_i128","","put_i128_le","","put_i128_ne","","put_i16","","put_i16_le","","put_i16_ne","","put_i32","","put_i32_le","","put_i32_ne","","put_i64","","put_i64_le","","put_i64_ne","","put_i8","","put_int","","put_int_le","","put_int_ne","","put_slice","","put_u128","","put_u128_le","","put_u128_ne","","put_u16","","put_u16_le","","put_u16_ne","","put_u32","","put_u32_le","","put_u32_ne","","put_u64","","put_u64_le","","put_u64_ne","","put_u8","","put_uint","","put_uint_le","","put_uint_ne","","read","reader","","remaining","","","remaining_mut","","","set_limit","","size_hint","take","","try_from","","","","","","try_into","","","","","","type_id","","","","","","","uninit","write","write_byte","writer",""],"q":[[0,"bytes"],[147,"bytes::buf"],[465,"bytes::bytes"],[466,"bytes::bytes_mut"],[467,"bytes::buf::uninit_slice"],[468,"core::cmp"],[469,"core::result"],[470,"serde::de"],[471,"alloc::vec"],[472,"alloc::string"],[473,"core::marker"],[474,"core::iter::traits::collect"],[475,"core::fmt"],[476,"alloc::boxed"],[477,"core::hash"],[478,"core::option"],[479,"bytes::buf::buf_impl"],[480,"serde::ser"],[481,"core::ops::range"],[482,"core::mem::maybe_uninit"],[483,"core::any"],[484,"bytes::buf::chain"],[485,"bytes::buf::take"],[486,"bytes::buf::buf_mut"],[487,"bytes::buf::limit"],[488,"std::io"],[489,"bytes::buf::reader"],[490,"std::io::error"],[491,"bytes::buf::writer"],[492,"bytes::buf::iter"]],"i":"````dl00101100100`010010101010101101010010101111111000000000001110000111111100001001101011001011010111111000000000100001001100101010101010101000000`````````CbCnD`Db2DdAb004E`2Dh5Dl372416088558765748871388768817703724160337241603888888888888888888888888888888888888888888882416024160888888888888888888888888888888888855333333333333724160724160727735546322555555555555555555555555555555555555555555555555555555555555555555555555555555555518887657446288724160724160372416030355","f":"````{{{f{bd}}h}j}{{{f{bl}}h}j}0{{{f{bl}}}{{f{b{A`{n}}}}}}{{{f{d}}}{{f{{A`{n}}}}}}{{{f{l}}}{{f{{A`{n}}}}}}1{f{{f{c}}}{}}01{{{f{b}}}{{f{bc}}}{}}40`{{{f{l}}}h}43{{{f{bl}}}{{f{bAb}}}}{{{f{bd}}}j}{{{f{bl}}}j}{{{f{d}}}d}{{{f{l}}}l}{{f{f{bc}}}j{}}0{fj}0{{{f{d}}{f{d}}}Ad}{{{f{l}}{f{l}}}Ad}{{{f{{A`{n}}}}}d}{{{f{bd}}h}d}{{{f{bl}}h}d}{{}d}{{}l}{{{f{d}}}{{f{{A`{n}}}}}}{{{f{l}}}{{f{{A`{n}}}}}}{{{f{bl}}}{{f{b{A`{n}}}}}}{c{{Af{d}}}Ah}{c{{Af{l}}}Ah}{{{f{bd}}}j}{{{f{bl}}}j}{{{f{d}}{f{{Aj{n}}}}}Al}{{{f{d}}{f{d}}}Al}{{{f{d}}{f{An}}}Al}{{{f{d}}{f{{A`{n}}}}}Al}{{{f{d}}{f{{f{c}}}}}AlB`}{{{f{d}}{f{l}}}Al}{{{f{d}}{f{Bb}}}Al}{{{f{l}}{f{d}}}Al}{{{f{l}}{f{An}}}Al}{{{f{l}}{f{{Aj{n}}}}}Al}{{{f{l}}{f{{f{c}}}}}AlB`}{{{f{l}}{f{Bb}}}Al}{{{f{l}}{f{l}}}Al}{{{f{l}}{f{{A`{n}}}}}Al}{{{f{bl}}c}j{{Bf{}{{Bd{n}}}}}}{{{f{bl}}c}j{{Bf{}{{Bd{{f{n}}}}}}}}{{{f{bl}}c}j{{Bf{}{{Bd{d}}}}}}{{{f{bl}}{f{{A`{n}}}}}j}{{{f{d}}{f{bBh}}}Bj}00{{{f{l}}{f{bBh}}}Bj}00{ld}{{{f{{A`{n}}}}}d}{cc{}}2{And}{{{Bl{{A`{n}}}}}d}{{{Aj{n}}}d}{{{f{Bb}}}d}{{{f{{A`{n}}}}}l}{{{f{Bb}}}l}6{dl}{cd{{Bf{}{{Bd{n}}}}}}{cl{{Bf{}{{Bd{n}}}}}}{cl{{Bf{}{{Bd{{f{n}}}}}}}};{{{f{d}}{f{bc}}}jBn}{{{f{l}}{f{bc}}}jBn}{{}c{}}0{dc{}}{{{f{d}}}c{}}{{{f{l}}}c{}}{lc{}}{{{f{d}}}Al}{{{f{l}}}Al}1{{{f{d}}}h}{{{f{l}}}h}{{}d}{{}l}{{{f{d}}{f{{Aj{n}}}}}{{C`{Ad}}}}{{{f{d}}{f{Bb}}}{{C`{Ad}}}}{{{f{d}}{f{{f{c}}}}}{{C`{Ad}}}B`}{{{f{d}}{f{{A`{n}}}}}{{C`{Ad}}}}{{{f{d}}{f{d}}}{{C`{Ad}}}}{{{f{d}}{f{An}}}{{C`{Ad}}}}{{{f{l}}{f{An}}}{{C`{Ad}}}}{{{f{l}}{f{Bb}}}{{C`{Ad}}}}{{{f{l}}{f{l}}}{{C`{Ad}}}}{{{f{l}}{f{{f{c}}}}}{{C`{Ad}}}B`}{{{f{l}}{f{{A`{n}}}}}{{C`{Ad}}}}{{{f{l}}{f{{Aj{n}}}}}{{C`{Ad}}}}{{{f{bl}}c}jCb}{{{f{bl}}nh}j}{{{f{bl}}{f{{A`{n}}}}}j}{{{f{d}}}h}{{{f{l}}}h}0{{{f{bl}}h}j}{{{f{bl}}hn}j}{{{f{d}}c}AfCd}{{{f{l}}c}AfCd}3{{{f{d}}c}d{{Cf{h}}}}{{{f{d}}{f{{A`{n}}}}}d}{{{f{bl}}}{{f{b{A`{{Ch{n}}}}}}}}{{{f{bl}}}l}{{{f{bd}}h}d}{{{f{bl}}h}l}10{fc{}}0{{{f{bd}}h}j};{c{{Af{e}}}{}{}}0{{}{{Af{c}}}{}}0{d{{Af{ld}}}}{{{f{bl}}h}Al}{fCj}0{{{f{bl}}l}j}{hl}{{{f{bl}}Cl}Bj}{{{f{bl}}{f{Bb}}}Bj}2`````````{{{f{bCb}}h}j}{{{f{b{Cn{ce}}}}h}jCbCb}{{{f{b{D`{c}}}}h}jCb}{{{f{bDb}}h}j}{{{f{b{Cn{ce}}}}h}jDbDb}{{{f{b{Dd{c}}}}h}jDb}{{{f{bAb}}}n}{{{f{bAb}}}{{f{b{A`{{Ch{n}}}}}}}}{f{{f{c}}}{}}000000{{{f{b}}}{{f{bc}}}{}}000000{{Cbc}{{Cn{Cbc}}}Cb}0{{Dbc}{{Cn{Dbc}}}Db}0{{{f{Cb}}}{{f{{A`{n}}}}}}{{{f{{Cn{ce}}}}}{{f{{A`{n}}}}}CbCb}{{{f{{D`{c}}}}}{{f{{A`{n}}}}}Cb}{{{f{bDb}}}{{f{bAb}}}}{{{f{b{Cn{ce}}}}}{{f{bAb}}}DbDb}{{{f{b{Dd{c}}}}}{{f{bAb}}}Db}{{{f{Cb}}{f{b{A`{Df}}}}}h}0{{{f{{Cn{ce}}}}{f{b{A`{Df}}}}}hCbCb}{{{f{b{Dh{c}}}}h}j{CbB`}}{{{f{bAb}}{f{{A`{n}}}}}j}{{{f{bCb}}h}d}0{{{f{b{Cn{ce}}}}h}dCbCb}{{{f{b{D`{c}}}}h}dCb}{{{f{bCb}}{f{b{A`{n}}}}}j}0{{{f{b{Dh{c}}}}}{{Dj{{f{{A`{n}}}}}}}{CbB`}}{{{f{b{Cn{ce}}}}}{{f{bc}}}{}{}}{{{f{{Cn{ce}}}}}{{f{c}}}{}{}}{{{f{b{Dl{c}}}}}{{Dj{j}}}{DbB`}}{{{f{Ab}}{f{bBh}}}Bj}{{{f{{Cn{ce}}}}{f{bBh}}}BjDnDn}{{{f{{E`{c}}}}{f{bBh}}}BjDn}{{{f{{Dd{c}}}}{f{bBh}}}BjDn}{{{f{{Dh{c}}}}{f{bBh}}}BjDn}{{{f{{D`{c}}}}{f{bBh}}}BjDn}{{{f{{Dl{c}}}}{f{bBh}}}BjDn}{{{f{b{A`{n}}}}}{{f{bAb}}}}{{{f{b{A`{{Ch{n}}}}}}}{{f{bAb}}}}{cc{}}00000{{nh}{{f{bAb}}}}{{{f{bCb}}}Eb}00000{{{f{bCb}}}Ed}00000{{{f{bCb}}}Ef}00000{{{f{bCb}}}Eh}00000{{{f{bCb}}}Ej}00000{{{f{bCb}}}El}00000{{{f{bCb}}}En}0{{{f{bCb}}h}El}00000{{{f{b{E`{c}}}}}{{f{bc}}}{}}{{{f{b{Dd{c}}}}}{{f{bc}}}{}}{{{f{b{Dh{c}}}}}{{f{bc}}}Cb}{{{f{b{D`{c}}}}}{{f{bc}}}{}}{{{f{b{Dl{c}}}}}{{f{bc}}}Db}{{{f{{E`{c}}}}}{{f{c}}}{}}{{{f{{Dd{c}}}}}{{f{c}}}{}}{{{f{{Dh{c}}}}}{{f{c}}}Cb}{{{f{{D`{c}}}}}{{f{c}}}{}}{{{f{{Dl{c}}}}}{{f{c}}}Db}{{{f{bCb}}}F`}00000{{{f{bCb}}}Fb}00000{{{f{bCb}}}Fd}00000{{{f{bCb}}}Ff}00000{{{f{bCb}}}n}0{{{f{bCb}}h}Ff}00000{{{f{Cb}}}Al}0{{{f{Db}}}Al}0{{{f{Ab}}{Fh{h}}}{{f{Ab}}}}{{{f{Ab}}Fj}{{f{Ab}}}}{{{f{Ab}}{Fl{h}}}{{f{Ab}}}}{{{f{Ab}}{Fn{h}}}{{f{Ab}}}}{{{f{Ab}}{G`{h}}}{{f{Ab}}}}{{{f{Ab}}{Gb{h}}}{{f{Ab}}}}{{{f{bAb}}{Fh{h}}}{{f{bAb}}}}{{{f{bAb}}{G`{h}}}{{f{bAb}}}}{{{f{bAb}}{Fn{h}}}{{f{bAb}}}}{{{f{bAb}}Fj}{{f{bAb}}}}{{{f{bAb}}{Gb{h}}}{{f{bAb}}}}{{{f{bAb}}{Fl{h}}}{{f{bAb}}}}{{}c{}}00000{{{Cn{ce}}}{{Gd{ce}}}{}{}}{{{E`{c}}}c{}}{{{Dd{c}}}c{}}{{{Dh{c}}}cCb}{{{D`{c}}}c{}}{{{Dl{c}}}cDb}{{{Cn{ce}}}gCbCb{}}7{{{f{b{Cn{ce}}}}}{{f{be}}}{}{}}{{{f{{Cn{ce}}}}}{{f{e}}}{}{}}{{{f{Ab}}}h}{{Dbh}{{Dd{Db}}}}0{{{f{{Dd{c}}}}}h{}}{{{f{{D`{c}}}}}h{}}{{{f{b{A`{n}}}}}{{f{bAb}}}}{c{{E`{c}}}{}}{{{f{b{E`{c}}}}}{{C`{n}}}Cb}{{{f{bDb}}c}jCb}0{{{f{bDb}}nh}j}0{{{f{bDb}}Eb}j}00000{{{f{bDb}}Ed}j}00000{{{f{bDb}}Ef}j}00000{{{f{bDb}}Eh}j}00000{{{f{bDb}}Ej}j}00000{{{f{bDb}}El}j}00000{{{f{bDb}}En}j}0{{{f{bDb}}Elh}j}00000{{{f{bDb}}{f{{A`{n}}}}}j}0{{{f{bDb}}F`}j}00000{{{f{bDb}}Fb}j}00000{{{f{bDb}}Fd}j}00000{{{f{bDb}}Ff}j}00000{{{f{bDb}}n}j}0{{{f{bDb}}Ffh}j}00000{{{f{b{Dh{c}}}}{f{b{A`{n}}}}}{{Dj{h}}}{CbB`}}{Cb{{Dh{Cb}}}}0{{{f{Cb}}}h}{{{f{{Cn{ce}}}}}hCbCb}{{{f{{D`{c}}}}}hCb}{{{f{Db}}}h}{{{f{{Cn{ce}}}}}hDbDb}{{{f{{Dd{c}}}}}hDb}{{{f{b{Dd{c}}}}h}j{}}{{{f{b{D`{c}}}}h}j{}}{{{f{{E`{c}}}}}{{Gd{h{C`{h}}}}}Cb}{{Cbh}{{D`{Cb}}}}0{c{{Af{e}}}{}{}}00000{{}{{Af{c}}}{}}00000{fCj}000000{{{f{b{A`{{Ch{n}}}}}}}{{f{bAb}}}}{{{f{b{Dl{c}}}}{f{{A`{n}}}}}{{Dj{h}}}{DbB`}}{{{f{bAb}}hn}j}{Db{{Dl{Db}}}}0","D":"BCb","p":[[0,"mut"],[5,"Bytes",0,465],[1,"reference"],[1,"usize"],[1,"unit"],[5,"BytesMut",0,466],[1,"u8"],[1,"slice"],[5,"UninitSlice",147,467],[6,"Ordering",468],[6,"Result",469],[10,"Deserializer",470],[5,"Vec",471],[1,"bool"],[5,"String",472],[10,"Sized",473],[1,"str"],[17,"Item"],[10,"IntoIterator",474],[5,"Formatter",475],[8,"Result",475],[5,"Box",476],[10,"Hasher",477],[6,"Option",478],[10,"Buf",147,479],[10,"Serializer",480],[10,"RangeBounds",481],[20,"MaybeUninit",482],[5,"TypeId",483],[5,"Arguments",475],[5,"Chain",147,484],[5,"Take",147,485],[10,"BufMut",147,486],[5,"Limit",147,487],[5,"IoSlice",488],[5,"Reader",147,489],[8,"Result",490],[5,"Writer",147,491],[10,"Debug",475],[5,"IntoIter",147,492],[1,"f32"],[1,"f64"],[1,"i128"],[1,"i16"],[1,"i32"],[1,"i64"],[1,"i8"],[1,"u128"],[1,"u16"],[1,"u32"],[1,"u64"],[5,"RangeToInclusive",481],[5,"RangeFull",481],[5,"RangeTo",481],[5,"RangeFrom",481],[5,"RangeInclusive",481],[5,"Range",481],[1,"tuple"]],"r":[[0,479],[1,486],[2,465],[3,466],[147,479],[148,486],[149,484],[150,492],[151,487],[152,489],[153,485],[154,467],[155,491]],"b":[[44,"impl-PartialEq%3CVec%3Cu8%3E%3E-for-Bytes"],[45,"impl-PartialEq-for-Bytes"],[46,"impl-PartialEq%3CString%3E-for-Bytes"],[47,"impl-PartialEq%3C%5Bu8%5D%3E-for-Bytes"],[48,"impl-PartialEq%3C%26T%3E-for-Bytes"],[49,"impl-PartialEq%3CBytesMut%3E-for-Bytes"],[50,"impl-PartialEq%3Cstr%3E-for-Bytes"],[51,"impl-PartialEq%3CBytes%3E-for-BytesMut"],[52,"impl-PartialEq%3CString%3E-for-BytesMut"],[53,"impl-PartialEq%3CVec%3Cu8%3E%3E-for-BytesMut"],[54,"impl-PartialEq%3C%26T%3E-for-BytesMut"],[55,"impl-PartialEq%3Cstr%3E-for-BytesMut"],[56,"impl-PartialEq-for-BytesMut"],[57,"impl-PartialEq%3C%5Bu8%5D%3E-for-BytesMut"],[58,"impl-Extend%3Cu8%3E-for-BytesMut"],[59,"impl-Extend%3C%26u8%3E-for-BytesMut"],[60,"impl-Extend%3CBytes%3E-for-BytesMut"],[62,"impl-Debug-for-Bytes"],[63,"impl-UpperHex-for-Bytes"],[64,"impl-LowerHex-for-Bytes"],[65,"impl-LowerHex-for-BytesMut"],[66,"impl-UpperHex-for-BytesMut"],[67,"impl-Debug-for-BytesMut"],[69,"impl-From%3C%26%5Bu8%5D%3E-for-Bytes"],[71,"impl-From%3CBytesMut%3E-for-Bytes"],[72,"impl-From%3CString%3E-for-Bytes"],[73,"impl-From%3CBox%3C%5Bu8%5D%3E%3E-for-Bytes"],[74,"impl-From%3CVec%3Cu8%3E%3E-for-Bytes"],[75,"impl-From%3C%26str%3E-for-Bytes"],[76,"impl-From%3C%26%5Bu8%5D%3E-for-BytesMut"],[77,"impl-From%3C%26str%3E-for-BytesMut"],[79,"impl-From%3CBytes%3E-for-BytesMut"],[81,"impl-FromIterator%3Cu8%3E-for-BytesMut"],[82,"impl-FromIterator%3C%26u8%3E-for-BytesMut"],[88,"impl-IntoIterator-for-Bytes"],[89,"impl-IntoIterator-for-%26Bytes"],[90,"impl-IntoIterator-for-%26BytesMut"],[91,"impl-IntoIterator-for-BytesMut"],[99,"impl-PartialOrd%3CVec%3Cu8%3E%3E-for-Bytes"],[100,"impl-PartialOrd%3Cstr%3E-for-Bytes"],[101,"impl-PartialOrd%3C%26T%3E-for-Bytes"],[102,"impl-PartialOrd%3C%5Bu8%5D%3E-for-Bytes"],[103,"impl-PartialOrd-for-Bytes"],[104,"impl-PartialOrd%3CString%3E-for-Bytes"],[105,"impl-PartialOrd%3CString%3E-for-BytesMut"],[106,"impl-PartialOrd%3Cstr%3E-for-BytesMut"],[107,"impl-PartialOrd-for-BytesMut"],[108,"impl-PartialOrd%3C%26T%3E-for-BytesMut"],[109,"impl-PartialOrd%3C%5Bu8%5D%3E-for-BytesMut"],[110,"impl-PartialOrd%3CVec%3Cu8%3E%3E-for-BytesMut"],[210,"impl-From%3C%26mut+%5Bu8%5D%3E-for-%26mut+UninitSlice"],[211,"impl-From%3C%26mut+%5BMaybeUninit%3Cu8%3E%5D%3E-for-%26mut+UninitSlice"],[309,"impl-Index%3CRangeToInclusive%3Cusize%3E%3E-for-UninitSlice"],[310,"impl-Index%3CRangeFull%3E-for-UninitSlice"],[311,"impl-Index%3CRangeTo%3Cusize%3E%3E-for-UninitSlice"],[312,"impl-Index%3CRangeFrom%3Cusize%3E%3E-for-UninitSlice"],[313,"impl-Index%3CRangeInclusive%3Cusize%3E%3E-for-UninitSlice"],[314,"impl-Index%3CRange%3Cusize%3E%3E-for-UninitSlice"],[315,"impl-IndexMut%3CRangeToInclusive%3Cusize%3E%3E-for-UninitSlice"],[316,"impl-IndexMut%3CRangeInclusive%3Cusize%3E%3E-for-UninitSlice"],[317,"impl-IndexMut%3CRangeFrom%3Cusize%3E%3E-for-UninitSlice"],[318,"impl-IndexMut%3CRangeFull%3E-for-UninitSlice"],[319,"impl-IndexMut%3CRange%3Cusize%3E%3E-for-UninitSlice"],[320,"impl-IndexMut%3CRangeTo%3Cusize%3E%3E-for-UninitSlice"]],"c":"OjAAAAAAAAA=","e":"OzAAAAEAALgAIwABAAEABQAMABQAAgAZAAcAIgAbAD8ABQBGAAAASAAGAFEAAgBVAAEAWQADAGQAEQB4AAEAgwABAIcAAwCNAAEAkQABAJ4AAQChAAEApQANALgAAQC7AAEAvwABAMQAAQDIAAAAywAJADYBCwBOAQEAWQEAAKwBAACwAQEAswEBALcBAAC6ARIAzgEAAA==","a":{"bytes":[182],"bytes_mut":[185]}}]]')); +if (typeof exports !== 'undefined') exports.searchIndex = searchIndex; +else if (window.initSearch) window.initSearch(searchIndex); +//{"start":39,"fragment_lengths":[14159]} \ No newline at end of file diff --git a/search.desc/bytes/bytes-desc-0-.js b/search.desc/bytes/bytes-desc-0-.js new file mode 100644 index 000000000..930bb5fbe --- /dev/null +++ b/search.desc/bytes/bytes-desc-0-.js @@ -0,0 +1 @@ +searchState.loadedDescShard("bytes", 0, "Provides abstractions for working with bytes.\nA cheaply cloneable and sliceable chunk of contiguous …\nA unique reference to a contiguous slice of memory.\nUtilities for working with buffers.\nReturns the number of bytes the BytesMut can hold without …\nClears the buffer, removing all data.\nClears the buffer, removing all data. Existing capacity is …\nCreates Bytes instance from slice, by copying it.\nAppends given bytes to this BytesMut.\nConverts self into an immutable Bytes.\nReturns the argument unchanged.\nReturns the argument unchanged.\nConvert self into BytesMut.\nCreates a new Bytes from a static slice.\nCalls U::from(self).\nCalls U::from(self).\nReturns true if the Bytes has a length of 0.\nReturns true if the BytesMut has a length of 0.\nReturns true if this is the only reference to the data.\nReturns the number of bytes contained in this Bytes.\nReturns the number of bytes contained in this BytesMut.\nCreates a new empty Bytes.\nCreates a new BytesMut with default capacity.\nReserves capacity for at least additional more bytes to be …\nResizes the buffer so that len is equal to new_len.\nSets the length of the buffer.\nReturns a slice of self for the provided range.\nReturns a slice of self that is equivalent to the given …\nReturns the remaining spare capacity of the buffer as a …\nRemoves the bytes from the current view, returning them in …\nSplits the bytes into two at the given index.\nSplits the bytes into two at the given index.\nSplits the bytes into two at the given index.\nSplits the buffer into two at the given index.\nShortens the buffer, keeping the first len bytes and …\nShortens the buffer, keeping the first len bytes and …\nTry to convert self into BytesMut.\nAttempts to cheaply reclaim already allocated capacity for …\nAbsorbs a BytesMut that was previously split off.\nCreates a new BytesMut with the specified capacity.\nCreates a new BytesMut containing len zeros.\nRead bytes from a buffer.\nA trait for values that provide sequential write access to …\nA Chain sequences two buffers.\nIterator over the bytes contained by the buffer.\nA BufMut adapter which limits the amount of bytes that can …\nA Buf adapter which implements io::Read for the inner …\nA Buf adapter which limits the bytes read from an …\nUninitialized byte slice.\nA BufMut adapter which implements io::Write for the inner …\nAdvance the internal cursor of the Buf\nAdvance the internal cursor of the BufMut\nReturn a raw pointer to the slice’s buffer.\nReturn a &mut [MaybeUninit<u8>] to this slice’s buffer.\nCreates an adaptor which will chain this buffer with …\nCreates an adaptor which will chain this buffer with …\nCreates an adapter which will chain this buffer with …\nCreates an adapter which will chain this buffer with …\nReturns a slice starting at the current position and of …\nReturns a mutable slice starting at the current BufMut …\nFills dst with potentially multiple slices starting at self…\nFills dst with potentially multiple slices starting at self…\nCopies bytes from src into self.\nConsumes len bytes inside self and returns new instance of …\nConsumes len bytes inside self and returns new instance of …\nCopies bytes from self into dst.\nCopies bytes from self into dst.\nGets a mutable reference to the first underlying Buf.\nGets a reference to the first underlying Buf.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nReturns the argument unchanged.\nCreate a &mut UninitSlice from a pointer and a length.\nGets an IEEE754 single-precision (4 bytes) floating point …\nGets an IEEE754 single-precision (4 bytes) floating point …\nGets an IEEE754 single-precision (4 bytes) floating point …\nGets an IEEE754 single-precision (4 bytes) floating point …\nGets an IEEE754 single-precision (4 bytes) floating point …\nGets an IEEE754 single-precision (4 bytes) floating point …\nGets an IEEE754 double-precision (8 bytes) floating point …\nGets an IEEE754 double-precision (8 bytes) floating point …\nGets an IEEE754 double-precision (8 bytes) floating point …\nGets an IEEE754 double-precision (8 bytes) floating point …\nGets an IEEE754 double-precision (8 bytes) floating point …\nGets an IEEE754 double-precision (8 bytes) floating point …\nGets a signed 128 bit integer from self in big-endian byte …\nGets a signed 128 bit integer from self in big-endian byte …\nGets a signed 128 bit integer from self in little-endian …\nGets a signed 128 bit integer from self in little-endian …\nGets a signed 128 bit integer from self in native-endian …\nGets a signed 128 bit integer from self in native-endian …\nGets a signed 16 bit integer from self in big-endian byte …\nGets a signed 16 bit integer from self in big-endian byte …\nGets a signed 16 bit integer from self in little-endian …\nGets a signed 16 bit integer from self in little-endian …\nGets a signed 16 bit integer from self in native-endian …\nGets a signed 16 bit integer from self in native-endian …\nGets a signed 32 bit integer from self in big-endian byte …\nGets a signed 32 bit integer from self in big-endian byte …\nGets a signed 32 bit integer from self in little-endian …\nGets a signed 32 bit integer from self in little-endian …\nGets a signed 32 bit integer from self in native-endian …\nGets a signed 32 bit integer from self in native-endian …\nGets a signed 64 bit integer from self in big-endian byte …\nGets a signed 64 bit integer from self in big-endian byte …\nGets a signed 64 bit integer from self in little-endian …\nGets a signed 64 bit integer from self in little-endian …\nGets a signed 64 bit integer from self in native-endian …\nGets a signed 64 bit integer from self in native-endian …\nGets a signed 8 bit integer from self.\nGets a signed 8 bit integer from self.\nGets a signed n-byte integer from self in big-endian byte …\nGets a signed n-byte integer from self in big-endian byte …\nGets a signed n-byte integer from self in little-endian …\nGets a signed n-byte integer from self in little-endian …\nGets a signed n-byte integer from self in native-endian …\nGets a signed n-byte integer from self in native-endian …\nGets a mutable reference to the underlying Buf.\nGets a mutable reference to the underlying BufMut.\nGets a mutable reference to the underlying Buf.\nGets a mutable reference to the underlying Buf.\nGets a mutable reference to the underlying BufMut.\nGets a reference to the underlying Buf.\nGets a reference to the underlying BufMut.\nGets a reference to the underlying Buf.\nGets a reference to the underlying Buf.\nGets a reference to the underlying BufMut.\nGets an unsigned 128 bit integer from self in big-endian …\nGets an unsigned 128 bit integer from self in big-endian …\nGets an unsigned 128 bit integer from self in …\nGets an unsigned 128 bit integer from self in …\nGets an unsigned 128 bit integer from self in …\nGets an unsigned 128 bit integer from self in …\nGets an unsigned 16 bit integer from self in big-endian …\nGets an unsigned 16 bit integer from self in big-endian …\nGets an unsigned 16 bit integer from self in little-endian …\nGets an unsigned 16 bit integer from self in little-endian …\nGets an unsigned 16 bit integer from self in native-endian …\nGets an unsigned 16 bit integer from self in native-endian …\nGets an unsigned 32 bit integer from self in the …\nGets an unsigned 32 bit integer from self in the …\nGets an unsigned 32 bit integer from self in the …\nGets an unsigned 32 bit integer from self in the …\nGets an unsigned 32 bit integer from self in native-endian …\nGets an unsigned 32 bit integer from self in native-endian …\nGets an unsigned 64 bit integer from self in big-endian …\nGets an unsigned 64 bit integer from self in big-endian …\nGets an unsigned 64 bit integer from self in little-endian …\nGets an unsigned 64 bit integer from self in little-endian …\nGets an unsigned 64 bit integer from self in native-endian …\nGets an unsigned 64 bit integer from self in native-endian …\nGets an unsigned 8 bit integer from self.\nGets an unsigned 8 bit integer from self.\nGets an unsigned n-byte integer from self in big-endian …\nGets an unsigned n-byte integer from self in big-endian …\nGets an unsigned n-byte integer from self in little-endian …\nGets an unsigned n-byte integer from self in little-endian …\nGets an unsigned n-byte integer from self in native-endian …\nGets an unsigned n-byte integer from self in native-endian …\nReturns true if there are any more bytes to consume\nReturns true if there are any more bytes to consume\nReturns true if there is space in self for more bytes.\nReturns true if there is space in self for more bytes.\nCalls U::from(self).\nCalls U::from(self).\nCalls U::from(self).\nCalls U::from(self).\nCalls U::from(self).\nCalls U::from(self).\nConsumes this Chain, returning the underlying values.\nConsumes this IntoIter, returning the underlying value.\nConsumes this Limit, returning the underlying value.\nConsumes this Reader, returning the underlying value.\nConsumes this Take, returning the underlying value.\nConsumes this Writer, returning the underlying value.\nGets a mutable reference to the last underlying Buf.\nGets a reference to the last underlying Buf.\nReturns the number of bytes in the slice.\nCreates an adaptor which can write at most limit bytes to …\nCreates an adaptor which can write at most limit bytes to …\nReturns the maximum number of bytes that can be written\nReturns the maximum number of bytes that can be read.\nCreates a &mut UninitSlice wrapping a slice of initialised …\nCreates an iterator over the bytes contained by the buffer.\nTransfer bytes into self from src and advance the cursor …\nTransfer bytes into self from src and advance the cursor …\nPut cnt bytes val into self.\nPut cnt bytes val into self.\nWrites an IEEE754 single-precision (4 bytes) floating …\nWrites an IEEE754 single-precision (4 bytes) floating …\nWrites an IEEE754 single-precision (4 bytes) floating …\nWrites an IEEE754 single-precision (4 bytes) floating …\nWrites an IEEE754 single-precision (4 bytes) floating …\nWrites an IEEE754 single-precision (4 bytes) floating …\nWrites an IEEE754 double-precision (8 bytes) floating …\nWrites an IEEE754 double-precision (8 bytes) floating …\nWrites an IEEE754 double-precision (8 bytes) floating …\nWrites an IEEE754 double-precision (8 bytes) floating …\nWrites an IEEE754 double-precision (8 bytes) floating …\nWrites an IEEE754 double-precision (8 bytes) floating …\nWrites a signed 128 bit integer to self in the big-endian …\nWrites a signed 128 bit integer to self in the big-endian …\nWrites a signed 128 bit integer to self in little-endian …\nWrites a signed 128 bit integer to self in little-endian …\nWrites a signed 128 bit integer to self in native-endian …\nWrites a signed 128 bit integer to self in native-endian …\nWrites a signed 16 bit integer to self in big-endian byte …\nWrites a signed 16 bit integer to self in big-endian byte …\nWrites a signed 16 bit integer to self in little-endian …\nWrites a signed 16 bit integer to self in little-endian …\nWrites a signed 16 bit integer to self in native-endian …\nWrites a signed 16 bit integer to self in native-endian …\nWrites a signed 32 bit integer to self in big-endian byte …\nWrites a signed 32 bit integer to self in big-endian byte …\nWrites a signed 32 bit integer to self in little-endian …\nWrites a signed 32 bit integer to self in little-endian …\nWrites a signed 32 bit integer to self in native-endian …\nWrites a signed 32 bit integer to self in native-endian …\nWrites a signed 64 bit integer to self in the big-endian …\nWrites a signed 64 bit integer to self in the big-endian …\nWrites a signed 64 bit integer to self in little-endian …\nWrites a signed 64 bit integer to self in little-endian …\nWrites a signed 64 bit integer to self in native-endian …\nWrites a signed 64 bit integer to self in native-endian …\nWrites a signed 8 bit integer to self.\nWrites a signed 8 bit integer to self.\nWrites low nbytes of a signed integer to self in …\nWrites low nbytes of a signed integer to self in …\nWrites low nbytes of a signed integer to self in …\nWrites low nbytes of a signed integer to self in …\nWrites low nbytes of a signed integer to self in …\nWrites low nbytes of a signed integer to self in …\nTransfer bytes into self from src and advance the cursor …\nTransfer bytes into self from src and advance the cursor …\nWrites an unsigned 128 bit integer to self in the …\nWrites an unsigned 128 bit integer to self in the …\nWrites an unsigned 128 bit integer to self in …\nWrites an unsigned 128 bit integer to self in …\nWrites an unsigned 128 bit integer to self in …\nWrites an unsigned 128 bit integer to self in …\nWrites an unsigned 16 bit integer to self in big-endian …\nWrites an unsigned 16 bit integer to self in big-endian …\nWrites an unsigned 16 bit integer to self in little-endian …\nWrites an unsigned 16 bit integer to self in little-endian …\nWrites an unsigned 16 bit integer to self in native-endian …\nWrites an unsigned 16 bit integer to self in native-endian …\nWrites an unsigned 32 bit integer to self in big-endian …\nWrites an unsigned 32 bit integer to self in big-endian …\nWrites an unsigned 32 bit integer to self in little-endian …\nWrites an unsigned 32 bit integer to self in little-endian …\nWrites an unsigned 32 bit integer to self in native-endian …\nWrites an unsigned 32 bit integer to self in native-endian …\nWrites an unsigned 64 bit integer to self in the …\nWrites an unsigned 64 bit integer to self in the …\nWrites an unsigned 64 bit integer to self in little-endian …\nWrites an unsigned 64 bit integer to self in little-endian …\nWrites an unsigned 64 bit integer to self in native-endian …\nWrites an unsigned 64 bit integer to self in native-endian …\nWrites an unsigned 8 bit integer to self.\nWrites an unsigned 8 bit integer to self.\nWrites an unsigned n-byte integer to self in big-endian …\nWrites an unsigned n-byte integer to self in big-endian …\nWrites an unsigned n-byte integer to self in the …\nWrites an unsigned n-byte integer to self in the …\nWrites an unsigned n-byte integer to self in the …\nWrites an unsigned n-byte integer to self in the …\nCreates an adaptor which implements the Read trait for self…\nCreates an adaptor which implements the Read trait for self…\nReturns the number of bytes between the current position …\nReturns the number of bytes that can be written from the …\nSets the maximum number of bytes that can be written.\nSets the maximum number of bytes that can be read.\nCreates an adaptor which will read at most limit bytes …\nCreates an adaptor which will read at most limit bytes …\nCreates a &mut UninitSlice wrapping a slice of …\nWrite a single byte at the specified offset.\nCreates an adaptor which implements the Write trait for …\nCreates an adaptor which implements the Write trait for …") \ No newline at end of file diff --git a/settings.html b/settings.html new file mode 100644 index 000000000..e6fe2ef1c --- /dev/null +++ b/settings.html @@ -0,0 +1 @@ +Settings

Rustdoc settings

Back
\ No newline at end of file diff --git a/src-files.js b/src-files.js new file mode 100644 index 000000000..03aa7ae9f --- /dev/null +++ b/src-files.js @@ -0,0 +1,3 @@ +var srcIndex = new Map(JSON.parse('[["bytes",["",[["buf",[],["buf_impl.rs","buf_mut.rs","chain.rs","iter.rs","limit.rs","mod.rs","reader.rs","take.rs","uninit_slice.rs","vec_deque.rs","writer.rs"]],["fmt",[],["debug.rs","hex.rs","mod.rs"]]],["bytes.rs","bytes_mut.rs","lib.rs","loom.rs","serde.rs"]]]]')); +createSrcSidebar(); +//{"start":36,"fragment_lengths":[264]} \ No newline at end of file diff --git a/src/bytes/buf/buf_impl.rs.html b/src/bytes/buf/buf_impl.rs.html new file mode 100644 index 000000000..4f60c7c8b --- /dev/null +++ b/src/bytes/buf/buf_impl.rs.html @@ -0,0 +1,2945 @@ +buf_impl.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
+764
+765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
+796
+797
+798
+799
+800
+801
+802
+803
+804
+805
+806
+807
+808
+809
+810
+811
+812
+813
+814
+815
+816
+817
+818
+819
+820
+821
+822
+823
+824
+825
+826
+827
+828
+829
+830
+831
+832
+833
+834
+835
+836
+837
+838
+839
+840
+841
+842
+843
+844
+845
+846
+847
+848
+849
+850
+851
+852
+853
+854
+855
+856
+857
+858
+859
+860
+861
+862
+863
+864
+865
+866
+867
+868
+869
+870
+871
+872
+873
+874
+875
+876
+877
+878
+879
+880
+881
+882
+883
+884
+885
+886
+887
+888
+889
+890
+891
+892
+893
+894
+895
+896
+897
+898
+899
+900
+901
+902
+903
+904
+905
+906
+907
+908
+909
+910
+911
+912
+913
+914
+915
+916
+917
+918
+919
+920
+921
+922
+923
+924
+925
+926
+927
+928
+929
+930
+931
+932
+933
+934
+935
+936
+937
+938
+939
+940
+941
+942
+943
+944
+945
+946
+947
+948
+949
+950
+951
+952
+953
+954
+955
+956
+957
+958
+959
+960
+961
+962
+963
+964
+965
+966
+967
+968
+969
+970
+971
+972
+973
+974
+975
+976
+977
+978
+979
+980
+981
+982
+983
+984
+985
+986
+987
+988
+989
+990
+991
+992
+993
+994
+995
+996
+997
+998
+999
+1000
+1001
+1002
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
+1022
+1023
+1024
+1025
+1026
+1027
+1028
+1029
+1030
+1031
+1032
+1033
+1034
+1035
+1036
+1037
+1038
+1039
+1040
+1041
+1042
+1043
+1044
+1045
+1046
+1047
+1048
+1049
+1050
+1051
+1052
+1053
+1054
+1055
+1056
+1057
+1058
+1059
+1060
+1061
+1062
+1063
+1064
+1065
+1066
+1067
+1068
+1069
+1070
+1071
+1072
+1073
+1074
+1075
+1076
+1077
+1078
+1079
+1080
+1081
+1082
+1083
+1084
+1085
+1086
+1087
+1088
+1089
+1090
+1091
+1092
+1093
+1094
+1095
+1096
+1097
+1098
+1099
+1100
+1101
+1102
+1103
+1104
+1105
+1106
+1107
+1108
+1109
+1110
+1111
+1112
+1113
+1114
+1115
+1116
+1117
+1118
+1119
+1120
+1121
+1122
+1123
+1124
+1125
+1126
+1127
+1128
+1129
+1130
+1131
+1132
+1133
+1134
+1135
+1136
+1137
+1138
+1139
+1140
+1141
+1142
+1143
+1144
+1145
+1146
+1147
+1148
+1149
+1150
+1151
+1152
+1153
+1154
+1155
+1156
+1157
+1158
+1159
+1160
+1161
+1162
+1163
+1164
+1165
+1166
+1167
+1168
+1169
+1170
+1171
+1172
+1173
+1174
+1175
+1176
+1177
+1178
+1179
+1180
+1181
+1182
+1183
+1184
+1185
+1186
+1187
+1188
+1189
+1190
+1191
+1192
+1193
+1194
+1195
+1196
+1197
+1198
+1199
+1200
+1201
+1202
+1203
+1204
+1205
+1206
+1207
+1208
+1209
+1210
+1211
+1212
+1213
+1214
+1215
+1216
+1217
+1218
+1219
+1220
+1221
+1222
+1223
+1224
+1225
+1226
+1227
+1228
+1229
+1230
+1231
+1232
+1233
+1234
+1235
+1236
+1237
+1238
+1239
+1240
+1241
+1242
+1243
+1244
+1245
+1246
+1247
+1248
+1249
+1250
+1251
+1252
+1253
+1254
+1255
+1256
+1257
+1258
+1259
+1260
+1261
+1262
+1263
+1264
+1265
+1266
+1267
+1268
+1269
+1270
+1271
+1272
+1273
+1274
+1275
+1276
+1277
+1278
+1279
+1280
+1281
+1282
+1283
+1284
+1285
+1286
+1287
+1288
+1289
+1290
+1291
+1292
+1293
+1294
+1295
+1296
+1297
+1298
+1299
+1300
+1301
+1302
+1303
+1304
+1305
+1306
+1307
+1308
+1309
+1310
+1311
+1312
+1313
+1314
+1315
+1316
+1317
+1318
+1319
+1320
+1321
+1322
+1323
+1324
+1325
+1326
+1327
+1328
+1329
+1330
+1331
+1332
+1333
+1334
+1335
+1336
+1337
+1338
+1339
+1340
+1341
+1342
+1343
+1344
+1345
+1346
+1347
+1348
+1349
+1350
+1351
+1352
+1353
+1354
+1355
+1356
+1357
+1358
+1359
+1360
+1361
+1362
+1363
+1364
+1365
+1366
+1367
+1368
+1369
+1370
+1371
+1372
+1373
+1374
+1375
+1376
+1377
+1378
+1379
+1380
+1381
+1382
+1383
+1384
+1385
+1386
+1387
+1388
+1389
+1390
+1391
+1392
+1393
+1394
+1395
+1396
+1397
+1398
+1399
+1400
+1401
+1402
+1403
+1404
+1405
+1406
+1407
+1408
+1409
+1410
+1411
+1412
+1413
+1414
+1415
+1416
+1417
+1418
+1419
+1420
+1421
+1422
+1423
+1424
+1425
+1426
+1427
+1428
+1429
+1430
+1431
+1432
+1433
+1434
+1435
+1436
+1437
+1438
+1439
+1440
+1441
+1442
+1443
+1444
+1445
+1446
+1447
+1448
+1449
+1450
+1451
+1452
+1453
+1454
+1455
+1456
+1457
+1458
+1459
+1460
+1461
+1462
+1463
+1464
+1465
+1466
+1467
+1468
+1469
+1470
+1471
+1472
#[cfg(feature = "std")]
+use crate::buf::{reader, Reader};
+use crate::buf::{take, Chain, Take};
+#[cfg(feature = "std")]
+use crate::{min_u64_usize, saturating_sub_usize_u64};
+use crate::{panic_advance, panic_does_not_fit};
+
+#[cfg(feature = "std")]
+use std::io::IoSlice;
+
+use alloc::boxed::Box;
+
+macro_rules! buf_get_impl {
+    ($this:ident, $typ:tt::$conv:tt) => {{
+        const SIZE: usize = core::mem::size_of::<$typ>();
+
+        if $this.remaining() < SIZE {
+            panic_advance(SIZE, $this.remaining());
+        }
+
+        // try to convert directly from the bytes
+        // this Option<ret> trick is to avoid keeping a borrow on self
+        // when advance() is called (mut borrow) and to call bytes() only once
+        let ret = $this
+            .chunk()
+            .get(..SIZE)
+            .map(|src| unsafe { $typ::$conv(*(src as *const _ as *const [_; SIZE])) });
+
+        if let Some(ret) = ret {
+            // if the direct conversion was possible, advance and return
+            $this.advance(SIZE);
+            return ret;
+        } else {
+            // if not we copy the bytes in a temp buffer then convert
+            let mut buf = [0; SIZE];
+            $this.copy_to_slice(&mut buf); // (do the advance)
+            return $typ::$conv(buf);
+        }
+    }};
+    (le => $this:ident, $typ:tt, $len_to_read:expr) => {{
+        const SIZE: usize = core::mem::size_of::<$typ>();
+
+        // The same trick as above does not improve the best case speed.
+        // It seems to be linked to the way the method is optimised by the compiler
+        let mut buf = [0; SIZE];
+
+        let subslice = match buf.get_mut(..$len_to_read) {
+            Some(subslice) => subslice,
+            None => panic_does_not_fit(SIZE, $len_to_read),
+        };
+
+        $this.copy_to_slice(subslice);
+        return $typ::from_le_bytes(buf);
+    }};
+    (be => $this:ident, $typ:tt, $len_to_read:expr) => {{
+        const SIZE: usize = core::mem::size_of::<$typ>();
+
+        let slice_at = match SIZE.checked_sub($len_to_read) {
+            Some(slice_at) => slice_at,
+            None => panic_does_not_fit(SIZE, $len_to_read),
+        };
+
+        let mut buf = [0; SIZE];
+        $this.copy_to_slice(&mut buf[slice_at..]);
+        return $typ::from_be_bytes(buf);
+    }};
+}
+
+// https://en.wikipedia.org/wiki/Sign_extension
+fn sign_extend(val: u64, nbytes: usize) -> i64 {
+    let shift = (8 - nbytes) * 8;
+    (val << shift) as i64 >> shift
+}
+
+/// Read bytes from a buffer.
+///
+/// A buffer stores bytes in memory such that read operations are infallible.
+/// The underlying storage may or may not be in contiguous memory. A `Buf` value
+/// is a cursor into the buffer. Reading from `Buf` advances the cursor
+/// position. It can be thought of as an efficient `Iterator` for collections of
+/// bytes.
+///
+/// The simplest `Buf` is a `&[u8]`.
+///
+/// ```
+/// use bytes::Buf;
+///
+/// let mut buf = &b"hello world"[..];
+///
+/// assert_eq!(b'h', buf.get_u8());
+/// assert_eq!(b'e', buf.get_u8());
+/// assert_eq!(b'l', buf.get_u8());
+///
+/// let mut rest = [0; 8];
+/// buf.copy_to_slice(&mut rest);
+///
+/// assert_eq!(&rest[..], &b"lo world"[..]);
+/// ```
+pub trait Buf {
+    /// Returns the number of bytes between the current position and the end of
+    /// the buffer.
+    ///
+    /// This value is greater than or equal to the length of the slice returned
+    /// by `chunk()`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"hello world"[..];
+    ///
+    /// assert_eq!(buf.remaining(), 11);
+    ///
+    /// buf.get_u8();
+    ///
+    /// assert_eq!(buf.remaining(), 10);
+    /// ```
+    ///
+    /// # Implementer notes
+    ///
+    /// Implementations of `remaining` should ensure that the return value does
+    /// not change unless a call is made to `advance` or any other function that
+    /// is documented to change the `Buf`'s current position.
+    fn remaining(&self) -> usize;
+
+    /// Returns a slice starting at the current position and of length between 0
+    /// and `Buf::remaining()`. Note that this *can* return shorter slice (this allows
+    /// non-continuous internal representation).
+    ///
+    /// This is a lower level function. Most operations are done with other
+    /// functions.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"hello world"[..];
+    ///
+    /// assert_eq!(buf.chunk(), &b"hello world"[..]);
+    ///
+    /// buf.advance(6);
+    ///
+    /// assert_eq!(buf.chunk(), &b"world"[..]);
+    /// ```
+    ///
+    /// # Implementer notes
+    ///
+    /// This function should never panic. `chunk()` should return an empty
+    /// slice **if and only if** `remaining()` returns 0. In other words,
+    /// `chunk()` returning an empty slice implies that `remaining()` will
+    /// return 0 and `remaining()` returning 0 implies that `chunk()` will
+    /// return an empty slice.
+    // The `chunk` method was previously called `bytes`. This alias makes the rename
+    // more easily discoverable.
+    #[cfg_attr(docsrs, doc(alias = "bytes"))]
+    fn chunk(&self) -> &[u8];
+
+    /// Fills `dst` with potentially multiple slices starting at `self`'s
+    /// current position.
+    ///
+    /// If the `Buf` is backed by disjoint slices of bytes, `chunk_vectored` enables
+    /// fetching more than one slice at once. `dst` is a slice of `IoSlice`
+    /// references, enabling the slice to be directly used with [`writev`]
+    /// without any further conversion. The sum of the lengths of all the
+    /// buffers in `dst` will be less than or equal to `Buf::remaining()`.
+    ///
+    /// The entries in `dst` will be overwritten, but the data **contained** by
+    /// the slices **will not** be modified. If `chunk_vectored` does not fill every
+    /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices
+    /// in `self.
+    ///
+    /// This is a lower level function. Most operations are done with other
+    /// functions.
+    ///
+    /// # Implementer notes
+    ///
+    /// This function should never panic. Once the end of the buffer is reached,
+    /// i.e., `Buf::remaining` returns 0, calls to `chunk_vectored` must return 0
+    /// without mutating `dst`.
+    ///
+    /// Implementations should also take care to properly handle being called
+    /// with `dst` being a zero length slice.
+    ///
+    /// [`writev`]: http://man7.org/linux/man-pages/man2/readv.2.html
+    #[cfg(feature = "std")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+    fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
+        if dst.is_empty() {
+            return 0;
+        }
+
+        if self.has_remaining() {
+            dst[0] = IoSlice::new(self.chunk());
+            1
+        } else {
+            0
+        }
+    }
+
+    /// Advance the internal cursor of the Buf
+    ///
+    /// The next call to `chunk()` will return a slice starting `cnt` bytes
+    /// further into the underlying buffer.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"hello world"[..];
+    ///
+    /// assert_eq!(buf.chunk(), &b"hello world"[..]);
+    ///
+    /// buf.advance(6);
+    ///
+    /// assert_eq!(buf.chunk(), &b"world"[..]);
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function **may** panic if `cnt > self.remaining()`.
+    ///
+    /// # Implementer notes
+    ///
+    /// It is recommended for implementations of `advance` to panic if `cnt >
+    /// self.remaining()`. If the implementation does not panic, the call must
+    /// behave as if `cnt == self.remaining()`.
+    ///
+    /// A call with `cnt == 0` should never panic and be a no-op.
+    fn advance(&mut self, cnt: usize);
+
+    /// Returns true if there are any more bytes to consume
+    ///
+    /// This is equivalent to `self.remaining() != 0`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"a"[..];
+    ///
+    /// assert!(buf.has_remaining());
+    ///
+    /// buf.get_u8();
+    ///
+    /// assert!(!buf.has_remaining());
+    /// ```
+    fn has_remaining(&self) -> bool {
+        self.remaining() > 0
+    }
+
+    /// Copies bytes from `self` into `dst`.
+    ///
+    /// The cursor is advanced by the number of bytes copied. `self` must have
+    /// enough remaining bytes to fill `dst`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"hello world"[..];
+    /// let mut dst = [0; 5];
+    ///
+    /// buf.copy_to_slice(&mut dst);
+    /// assert_eq!(&b"hello"[..], &dst);
+    /// assert_eq!(6, buf.remaining());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if `self.remaining() < dst.len()`.
+    fn copy_to_slice(&mut self, mut dst: &mut [u8]) {
+        if self.remaining() < dst.len() {
+            panic_advance(dst.len(), self.remaining());
+        }
+
+        while !dst.is_empty() {
+            let src = self.chunk();
+            let cnt = usize::min(src.len(), dst.len());
+
+            dst[..cnt].copy_from_slice(&src[..cnt]);
+            dst = &mut dst[cnt..];
+
+            self.advance(cnt);
+        }
+    }
+
+    /// Gets an unsigned 8 bit integer from `self`.
+    ///
+    /// The current position is advanced by 1.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x08 hello"[..];
+    /// assert_eq!(8, buf.get_u8());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is no more remaining data in `self`.
+    fn get_u8(&mut self) -> u8 {
+        if self.remaining() < 1 {
+            panic_advance(1, 0);
+        }
+        let ret = self.chunk()[0];
+        self.advance(1);
+        ret
+    }
+
+    /// Gets a signed 8 bit integer from `self`.
+    ///
+    /// The current position is advanced by 1.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x08 hello"[..];
+    /// assert_eq!(8, buf.get_i8());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is no more remaining data in `self`.
+    fn get_i8(&mut self) -> i8 {
+        if self.remaining() < 1 {
+            panic_advance(1, 0);
+        }
+        let ret = self.chunk()[0] as i8;
+        self.advance(1);
+        ret
+    }
+
+    /// Gets an unsigned 16 bit integer from `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x08\x09 hello"[..];
+    /// assert_eq!(0x0809, buf.get_u16());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u16(&mut self) -> u16 {
+        buf_get_impl!(self, u16::from_be_bytes);
+    }
+
+    /// Gets an unsigned 16 bit integer from `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x09\x08 hello"[..];
+    /// assert_eq!(0x0809, buf.get_u16_le());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u16_le(&mut self) -> u16 {
+        buf_get_impl!(self, u16::from_le_bytes);
+    }
+
+    /// Gets an unsigned 16 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x08\x09 hello",
+    ///     false => b"\x09\x08 hello",
+    /// };
+    /// assert_eq!(0x0809, buf.get_u16_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u16_ne(&mut self) -> u16 {
+        buf_get_impl!(self, u16::from_ne_bytes);
+    }
+
+    /// Gets a signed 16 bit integer from `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x08\x09 hello"[..];
+    /// assert_eq!(0x0809, buf.get_i16());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i16(&mut self) -> i16 {
+        buf_get_impl!(self, i16::from_be_bytes);
+    }
+
+    /// Gets a signed 16 bit integer from `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x09\x08 hello"[..];
+    /// assert_eq!(0x0809, buf.get_i16_le());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i16_le(&mut self) -> i16 {
+        buf_get_impl!(self, i16::from_le_bytes);
+    }
+
+    /// Gets a signed 16 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x08\x09 hello",
+    ///     false => b"\x09\x08 hello",
+    /// };
+    /// assert_eq!(0x0809, buf.get_i16_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i16_ne(&mut self) -> i16 {
+        buf_get_impl!(self, i16::from_ne_bytes);
+    }
+
+    /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..];
+    /// assert_eq!(0x0809A0A1, buf.get_u32());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u32(&mut self) -> u32 {
+        buf_get_impl!(self, u32::from_be_bytes);
+    }
+
+    /// Gets an unsigned 32 bit integer from `self` in the little-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
+    /// assert_eq!(0x0809A0A1, buf.get_u32_le());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u32_le(&mut self) -> u32 {
+        buf_get_impl!(self, u32::from_le_bytes);
+    }
+
+    /// Gets an unsigned 32 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x08\x09\xA0\xA1 hello",
+    ///     false => b"\xA1\xA0\x09\x08 hello",
+    /// };
+    /// assert_eq!(0x0809A0A1, buf.get_u32_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u32_ne(&mut self) -> u32 {
+        buf_get_impl!(self, u32::from_ne_bytes);
+    }
+
+    /// Gets a signed 32 bit integer from `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..];
+    /// assert_eq!(0x0809A0A1, buf.get_i32());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i32(&mut self) -> i32 {
+        buf_get_impl!(self, i32::from_be_bytes);
+    }
+
+    /// Gets a signed 32 bit integer from `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
+    /// assert_eq!(0x0809A0A1, buf.get_i32_le());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i32_le(&mut self) -> i32 {
+        buf_get_impl!(self, i32::from_le_bytes);
+    }
+
+    /// Gets a signed 32 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x08\x09\xA0\xA1 hello",
+    ///     false => b"\xA1\xA0\x09\x08 hello",
+    /// };
+    /// assert_eq!(0x0809A0A1, buf.get_i32_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i32_ne(&mut self) -> i32 {
+        buf_get_impl!(self, i32::from_ne_bytes);
+    }
+
+    /// Gets an unsigned 64 bit integer from `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+    /// assert_eq!(0x0102030405060708, buf.get_u64());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u64(&mut self) -> u64 {
+        buf_get_impl!(self, u64::from_be_bytes);
+    }
+
+    /// Gets an unsigned 64 bit integer from `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+    /// assert_eq!(0x0102030405060708, buf.get_u64_le());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u64_le(&mut self) -> u64 {
+        buf_get_impl!(self, u64::from_le_bytes);
+    }
+
+    /// Gets an unsigned 64 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+    ///     false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x0102030405060708, buf.get_u64_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u64_ne(&mut self) -> u64 {
+        buf_get_impl!(self, u64::from_ne_bytes);
+    }
+
+    /// Gets a signed 64 bit integer from `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+    /// assert_eq!(0x0102030405060708, buf.get_i64());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i64(&mut self) -> i64 {
+        buf_get_impl!(self, i64::from_be_bytes);
+    }
+
+    /// Gets a signed 64 bit integer from `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+    /// assert_eq!(0x0102030405060708, buf.get_i64_le());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i64_le(&mut self) -> i64 {
+        buf_get_impl!(self, i64::from_le_bytes);
+    }
+
+    /// Gets a signed 64 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+    ///     false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x0102030405060708, buf.get_i64_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i64_ne(&mut self) -> i64 {
+        buf_get_impl!(self, i64::from_ne_bytes);
+    }
+
+    /// Gets an unsigned 128 bit integer from `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+    /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u128(&mut self) -> u128 {
+        buf_get_impl!(self, u128::from_be_bytes);
+    }
+
+    /// Gets an unsigned 128 bit integer from `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+    /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u128_le(&mut self) -> u128 {
+        buf_get_impl!(self, u128::from_le_bytes);
+    }
+
+    /// Gets an unsigned 128 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+    ///     false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u128_ne(&mut self) -> u128 {
+        buf_get_impl!(self, u128::from_ne_bytes);
+    }
+
+    /// Gets a signed 128 bit integer from `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+    /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i128(&mut self) -> i128 {
+        buf_get_impl!(self, i128::from_be_bytes);
+    }
+
+    /// Gets a signed 128 bit integer from `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+    /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i128_le(&mut self) -> i128 {
+        buf_get_impl!(self, i128::from_le_bytes);
+    }
+
+    /// Gets a signed 128 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+    ///     false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i128_ne(&mut self) -> i128 {
+        buf_get_impl!(self, i128::from_ne_bytes);
+    }
+
+    /// Gets an unsigned n-byte integer from `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x01\x02\x03 hello"[..];
+    /// assert_eq!(0x010203, buf.get_uint(3));
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_uint(&mut self, nbytes: usize) -> u64 {
+        buf_get_impl!(be => self, u64, nbytes);
+    }
+
+    /// Gets an unsigned n-byte integer from `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x03\x02\x01 hello"[..];
+    /// assert_eq!(0x010203, buf.get_uint_le(3));
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_uint_le(&mut self, nbytes: usize) -> u64 {
+        buf_get_impl!(le => self, u64, nbytes);
+    }
+
+    /// Gets an unsigned n-byte integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03 hello",
+    ///     false => b"\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x010203, buf.get_uint_ne(3));
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`, or
+    /// if `nbytes` is greater than 8.
+    fn get_uint_ne(&mut self, nbytes: usize) -> u64 {
+        if cfg!(target_endian = "big") {
+            self.get_uint(nbytes)
+        } else {
+            self.get_uint_le(nbytes)
+        }
+    }
+
+    /// Gets a signed n-byte integer from `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x01\x02\x03 hello"[..];
+    /// assert_eq!(0x010203, buf.get_int(3));
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`, or
+    /// if `nbytes` is greater than 8.
+    fn get_int(&mut self, nbytes: usize) -> i64 {
+        sign_extend(self.get_uint(nbytes), nbytes)
+    }
+
+    /// Gets a signed n-byte integer from `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x03\x02\x01 hello"[..];
+    /// assert_eq!(0x010203, buf.get_int_le(3));
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`, or
+    /// if `nbytes` is greater than 8.
+    fn get_int_le(&mut self, nbytes: usize) -> i64 {
+        sign_extend(self.get_uint_le(nbytes), nbytes)
+    }
+
+    /// Gets a signed n-byte integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03 hello",
+    ///     false => b"\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x010203, buf.get_int_ne(3));
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`, or
+    /// if `nbytes` is greater than 8.
+    fn get_int_ne(&mut self, nbytes: usize) -> i64 {
+        if cfg!(target_endian = "big") {
+            self.get_int(nbytes)
+        } else {
+            self.get_int_le(nbytes)
+        }
+    }
+
+    /// Gets an IEEE754 single-precision (4 bytes) floating point number from
+    /// `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x3F\x99\x99\x9A hello"[..];
+    /// assert_eq!(1.2f32, buf.get_f32());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_f32(&mut self) -> f32 {
+        f32::from_bits(self.get_u32())
+    }
+
+    /// Gets an IEEE754 single-precision (4 bytes) floating point number from
+    /// `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x9A\x99\x99\x3F hello"[..];
+    /// assert_eq!(1.2f32, buf.get_f32_le());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_f32_le(&mut self) -> f32 {
+        f32::from_bits(self.get_u32_le())
+    }
+
+    /// Gets an IEEE754 single-precision (4 bytes) floating point number from
+    /// `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x3F\x99\x99\x9A hello",
+    ///     false => b"\x9A\x99\x99\x3F hello",
+    /// };
+    /// assert_eq!(1.2f32, buf.get_f32_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_f32_ne(&mut self) -> f32 {
+        f32::from_bits(self.get_u32_ne())
+    }
+
+    /// Gets an IEEE754 double-precision (8 bytes) floating point number from
+    /// `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"[..];
+    /// assert_eq!(1.2f64, buf.get_f64());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_f64(&mut self) -> f64 {
+        f64::from_bits(self.get_u64())
+    }
+
+    /// Gets an IEEE754 double-precision (8 bytes) floating point number from
+    /// `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = &b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"[..];
+    /// assert_eq!(1.2f64, buf.get_f64_le());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_f64_le(&mut self) -> f64 {
+        f64::from_bits(self.get_u64_le())
+    }
+
+    /// Gets an IEEE754 double-precision (8 bytes) floating point number from
+    /// `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello",
+    ///     false => b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello",
+    /// };
+    /// assert_eq!(1.2f64, buf.get_f64_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_f64_ne(&mut self) -> f64 {
+        f64::from_bits(self.get_u64_ne())
+    }
+
+    /// Consumes `len` bytes inside self and returns new instance of `Bytes`
+    /// with this data.
+    ///
+    /// This function may be optimized by the underlying type to avoid actual
+    /// copies. For example, `Bytes` implementation will do a shallow copy
+    /// (ref-count increment).
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let bytes = (&b"hello world"[..]).copy_to_bytes(5);
+    /// assert_eq!(&bytes[..], &b"hello"[..]);
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if `len > self.remaining()`.
+    fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
+        use super::BufMut;
+
+        if self.remaining() < len {
+            panic_advance(len, self.remaining());
+        }
+
+        let mut ret = crate::BytesMut::with_capacity(len);
+        ret.put(self.take(len));
+        ret.freeze()
+    }
+
+    /// Creates an adaptor which will read at most `limit` bytes from `self`.
+    ///
+    /// This function returns a new instance of `Buf` which will read at most
+    /// `limit` bytes.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{Buf, BufMut};
+    ///
+    /// let mut buf = b"hello world"[..].take(5);
+    /// let mut dst = vec![];
+    ///
+    /// dst.put(&mut buf);
+    /// assert_eq!(dst, b"hello");
+    ///
+    /// let mut buf = buf.into_inner();
+    /// dst.clear();
+    /// dst.put(&mut buf);
+    /// assert_eq!(dst, b" world");
+    /// ```
+    fn take(self, limit: usize) -> Take<Self>
+    where
+        Self: Sized,
+    {
+        take::new(self, limit)
+    }
+
+    /// Creates an adaptor which will chain this buffer with another.
+    ///
+    /// The returned `Buf` instance will first consume all bytes from `self`.
+    /// Afterwards the output is equivalent to the output of next.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut chain = b"hello "[..].chain(&b"world"[..]);
+    ///
+    /// let full = chain.copy_to_bytes(11);
+    /// assert_eq!(full.chunk(), b"hello world");
+    /// ```
+    fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
+    where
+        Self: Sized,
+    {
+        Chain::new(self, next)
+    }
+
+    /// Creates an adaptor which implements the `Read` trait for `self`.
+    ///
+    /// This function returns a new value which implements `Read` by adapting
+    /// the `Read` trait functions to the `Buf` trait functions. Given that
+    /// `Buf` operations are infallible, none of the `Read` functions will
+    /// return with `Err`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{Bytes, Buf};
+    /// use std::io::Read;
+    ///
+    /// let buf = Bytes::from("hello world");
+    ///
+    /// let mut reader = buf.reader();
+    /// let mut dst = [0; 1024];
+    ///
+    /// let num = reader.read(&mut dst).unwrap();
+    ///
+    /// assert_eq!(11, num);
+    /// assert_eq!(&dst[..11], &b"hello world"[..]);
+    /// ```
+    #[cfg(feature = "std")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+    fn reader(self) -> Reader<Self>
+    where
+        Self: Sized,
+    {
+        reader::new(self)
+    }
+}
+
+macro_rules! deref_forward_buf {
+    () => {
+        #[inline]
+        fn remaining(&self) -> usize {
+            (**self).remaining()
+        }
+
+        #[inline]
+        fn chunk(&self) -> &[u8] {
+            (**self).chunk()
+        }
+
+        #[cfg(feature = "std")]
+        #[inline]
+        fn chunks_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize {
+            (**self).chunks_vectored(dst)
+        }
+
+        #[inline]
+        fn advance(&mut self, cnt: usize) {
+            (**self).advance(cnt)
+        }
+
+        #[inline]
+        fn has_remaining(&self) -> bool {
+            (**self).has_remaining()
+        }
+
+        #[inline]
+        fn copy_to_slice(&mut self, dst: &mut [u8]) {
+            (**self).copy_to_slice(dst)
+        }
+
+        #[inline]
+        fn get_u8(&mut self) -> u8 {
+            (**self).get_u8()
+        }
+
+        #[inline]
+        fn get_i8(&mut self) -> i8 {
+            (**self).get_i8()
+        }
+
+        #[inline]
+        fn get_u16(&mut self) -> u16 {
+            (**self).get_u16()
+        }
+
+        #[inline]
+        fn get_u16_le(&mut self) -> u16 {
+            (**self).get_u16_le()
+        }
+
+        #[inline]
+        fn get_u16_ne(&mut self) -> u16 {
+            (**self).get_u16_ne()
+        }
+
+        #[inline]
+        fn get_i16(&mut self) -> i16 {
+            (**self).get_i16()
+        }
+
+        #[inline]
+        fn get_i16_le(&mut self) -> i16 {
+            (**self).get_i16_le()
+        }
+
+        #[inline]
+        fn get_i16_ne(&mut self) -> i16 {
+            (**self).get_i16_ne()
+        }
+
+        #[inline]
+        fn get_u32(&mut self) -> u32 {
+            (**self).get_u32()
+        }
+
+        #[inline]
+        fn get_u32_le(&mut self) -> u32 {
+            (**self).get_u32_le()
+        }
+
+        #[inline]
+        fn get_u32_ne(&mut self) -> u32 {
+            (**self).get_u32_ne()
+        }
+
+        #[inline]
+        fn get_i32(&mut self) -> i32 {
+            (**self).get_i32()
+        }
+
+        #[inline]
+        fn get_i32_le(&mut self) -> i32 {
+            (**self).get_i32_le()
+        }
+
+        #[inline]
+        fn get_i32_ne(&mut self) -> i32 {
+            (**self).get_i32_ne()
+        }
+
+        #[inline]
+        fn get_u64(&mut self) -> u64 {
+            (**self).get_u64()
+        }
+
+        #[inline]
+        fn get_u64_le(&mut self) -> u64 {
+            (**self).get_u64_le()
+        }
+
+        #[inline]
+        fn get_u64_ne(&mut self) -> u64 {
+            (**self).get_u64_ne()
+        }
+
+        #[inline]
+        fn get_i64(&mut self) -> i64 {
+            (**self).get_i64()
+        }
+
+        #[inline]
+        fn get_i64_le(&mut self) -> i64 {
+            (**self).get_i64_le()
+        }
+
+        #[inline]
+        fn get_i64_ne(&mut self) -> i64 {
+            (**self).get_i64_ne()
+        }
+
+        #[inline]
+        fn get_uint(&mut self, nbytes: usize) -> u64 {
+            (**self).get_uint(nbytes)
+        }
+
+        #[inline]
+        fn get_uint_le(&mut self, nbytes: usize) -> u64 {
+            (**self).get_uint_le(nbytes)
+        }
+
+        #[inline]
+        fn get_uint_ne(&mut self, nbytes: usize) -> u64 {
+            (**self).get_uint_ne(nbytes)
+        }
+
+        #[inline]
+        fn get_int(&mut self, nbytes: usize) -> i64 {
+            (**self).get_int(nbytes)
+        }
+
+        #[inline]
+        fn get_int_le(&mut self, nbytes: usize) -> i64 {
+            (**self).get_int_le(nbytes)
+        }
+
+        #[inline]
+        fn get_int_ne(&mut self, nbytes: usize) -> i64 {
+            (**self).get_int_ne(nbytes)
+        }
+
+        #[inline]
+        fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
+            (**self).copy_to_bytes(len)
+        }
+    };
+}
+
+impl<T: Buf + ?Sized> Buf for &mut T {
+    deref_forward_buf!();
+}
+
+impl<T: Buf + ?Sized> Buf for Box<T> {
+    deref_forward_buf!();
+}
+
+impl Buf for &[u8] {
+    #[inline]
+    fn remaining(&self) -> usize {
+        self.len()
+    }
+
+    #[inline]
+    fn chunk(&self) -> &[u8] {
+        self
+    }
+
+    #[inline]
+    fn advance(&mut self, cnt: usize) {
+        if self.len() < cnt {
+            panic_advance(cnt, self.len());
+        }
+
+        *self = &self[cnt..];
+    }
+
+    #[inline]
+    fn copy_to_slice(&mut self, dst: &mut [u8]) {
+        if self.len() < dst.len() {
+            panic_advance(dst.len(), self.len());
+        }
+
+        dst.copy_from_slice(&self[..dst.len()]);
+        self.advance(dst.len());
+    }
+}
+
+#[cfg(feature = "std")]
+impl<T: AsRef<[u8]>> Buf for std::io::Cursor<T> {
+    #[inline]
+    fn remaining(&self) -> usize {
+        saturating_sub_usize_u64(self.get_ref().as_ref().len(), self.position())
+    }
+
+    #[inline]
+    fn chunk(&self) -> &[u8] {
+        let slice = self.get_ref().as_ref();
+        let pos = min_u64_usize(self.position(), slice.len());
+        &slice[pos..]
+    }
+
+    #[inline]
+    fn advance(&mut self, cnt: usize) {
+        let len = self.get_ref().as_ref().len();
+        let pos = self.position();
+
+        // We intentionally allow `cnt == 0` here even if `pos > len`.
+        let max_cnt = saturating_sub_usize_u64(len, pos);
+        if cnt > max_cnt {
+            panic_advance(cnt, max_cnt);
+        }
+
+        // This will not overflow because either `cnt == 0` or the sum is not
+        // greater than `len`.
+        self.set_position(pos + cnt as u64);
+    }
+}
+
+// The existence of this function makes the compiler catch if the Buf
+// trait is "object-safe" or not.
+fn _assert_trait_object(_b: &dyn Buf) {}
+
\ No newline at end of file diff --git a/src/bytes/buf/buf_mut.rs.html b/src/bytes/buf/buf_mut.rs.html new file mode 100644 index 000000000..5c5c4d11a --- /dev/null +++ b/src/bytes/buf/buf_mut.rs.html @@ -0,0 +1,3283 @@ +buf_mut.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
+764
+765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
+796
+797
+798
+799
+800
+801
+802
+803
+804
+805
+806
+807
+808
+809
+810
+811
+812
+813
+814
+815
+816
+817
+818
+819
+820
+821
+822
+823
+824
+825
+826
+827
+828
+829
+830
+831
+832
+833
+834
+835
+836
+837
+838
+839
+840
+841
+842
+843
+844
+845
+846
+847
+848
+849
+850
+851
+852
+853
+854
+855
+856
+857
+858
+859
+860
+861
+862
+863
+864
+865
+866
+867
+868
+869
+870
+871
+872
+873
+874
+875
+876
+877
+878
+879
+880
+881
+882
+883
+884
+885
+886
+887
+888
+889
+890
+891
+892
+893
+894
+895
+896
+897
+898
+899
+900
+901
+902
+903
+904
+905
+906
+907
+908
+909
+910
+911
+912
+913
+914
+915
+916
+917
+918
+919
+920
+921
+922
+923
+924
+925
+926
+927
+928
+929
+930
+931
+932
+933
+934
+935
+936
+937
+938
+939
+940
+941
+942
+943
+944
+945
+946
+947
+948
+949
+950
+951
+952
+953
+954
+955
+956
+957
+958
+959
+960
+961
+962
+963
+964
+965
+966
+967
+968
+969
+970
+971
+972
+973
+974
+975
+976
+977
+978
+979
+980
+981
+982
+983
+984
+985
+986
+987
+988
+989
+990
+991
+992
+993
+994
+995
+996
+997
+998
+999
+1000
+1001
+1002
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
+1022
+1023
+1024
+1025
+1026
+1027
+1028
+1029
+1030
+1031
+1032
+1033
+1034
+1035
+1036
+1037
+1038
+1039
+1040
+1041
+1042
+1043
+1044
+1045
+1046
+1047
+1048
+1049
+1050
+1051
+1052
+1053
+1054
+1055
+1056
+1057
+1058
+1059
+1060
+1061
+1062
+1063
+1064
+1065
+1066
+1067
+1068
+1069
+1070
+1071
+1072
+1073
+1074
+1075
+1076
+1077
+1078
+1079
+1080
+1081
+1082
+1083
+1084
+1085
+1086
+1087
+1088
+1089
+1090
+1091
+1092
+1093
+1094
+1095
+1096
+1097
+1098
+1099
+1100
+1101
+1102
+1103
+1104
+1105
+1106
+1107
+1108
+1109
+1110
+1111
+1112
+1113
+1114
+1115
+1116
+1117
+1118
+1119
+1120
+1121
+1122
+1123
+1124
+1125
+1126
+1127
+1128
+1129
+1130
+1131
+1132
+1133
+1134
+1135
+1136
+1137
+1138
+1139
+1140
+1141
+1142
+1143
+1144
+1145
+1146
+1147
+1148
+1149
+1150
+1151
+1152
+1153
+1154
+1155
+1156
+1157
+1158
+1159
+1160
+1161
+1162
+1163
+1164
+1165
+1166
+1167
+1168
+1169
+1170
+1171
+1172
+1173
+1174
+1175
+1176
+1177
+1178
+1179
+1180
+1181
+1182
+1183
+1184
+1185
+1186
+1187
+1188
+1189
+1190
+1191
+1192
+1193
+1194
+1195
+1196
+1197
+1198
+1199
+1200
+1201
+1202
+1203
+1204
+1205
+1206
+1207
+1208
+1209
+1210
+1211
+1212
+1213
+1214
+1215
+1216
+1217
+1218
+1219
+1220
+1221
+1222
+1223
+1224
+1225
+1226
+1227
+1228
+1229
+1230
+1231
+1232
+1233
+1234
+1235
+1236
+1237
+1238
+1239
+1240
+1241
+1242
+1243
+1244
+1245
+1246
+1247
+1248
+1249
+1250
+1251
+1252
+1253
+1254
+1255
+1256
+1257
+1258
+1259
+1260
+1261
+1262
+1263
+1264
+1265
+1266
+1267
+1268
+1269
+1270
+1271
+1272
+1273
+1274
+1275
+1276
+1277
+1278
+1279
+1280
+1281
+1282
+1283
+1284
+1285
+1286
+1287
+1288
+1289
+1290
+1291
+1292
+1293
+1294
+1295
+1296
+1297
+1298
+1299
+1300
+1301
+1302
+1303
+1304
+1305
+1306
+1307
+1308
+1309
+1310
+1311
+1312
+1313
+1314
+1315
+1316
+1317
+1318
+1319
+1320
+1321
+1322
+1323
+1324
+1325
+1326
+1327
+1328
+1329
+1330
+1331
+1332
+1333
+1334
+1335
+1336
+1337
+1338
+1339
+1340
+1341
+1342
+1343
+1344
+1345
+1346
+1347
+1348
+1349
+1350
+1351
+1352
+1353
+1354
+1355
+1356
+1357
+1358
+1359
+1360
+1361
+1362
+1363
+1364
+1365
+1366
+1367
+1368
+1369
+1370
+1371
+1372
+1373
+1374
+1375
+1376
+1377
+1378
+1379
+1380
+1381
+1382
+1383
+1384
+1385
+1386
+1387
+1388
+1389
+1390
+1391
+1392
+1393
+1394
+1395
+1396
+1397
+1398
+1399
+1400
+1401
+1402
+1403
+1404
+1405
+1406
+1407
+1408
+1409
+1410
+1411
+1412
+1413
+1414
+1415
+1416
+1417
+1418
+1419
+1420
+1421
+1422
+1423
+1424
+1425
+1426
+1427
+1428
+1429
+1430
+1431
+1432
+1433
+1434
+1435
+1436
+1437
+1438
+1439
+1440
+1441
+1442
+1443
+1444
+1445
+1446
+1447
+1448
+1449
+1450
+1451
+1452
+1453
+1454
+1455
+1456
+1457
+1458
+1459
+1460
+1461
+1462
+1463
+1464
+1465
+1466
+1467
+1468
+1469
+1470
+1471
+1472
+1473
+1474
+1475
+1476
+1477
+1478
+1479
+1480
+1481
+1482
+1483
+1484
+1485
+1486
+1487
+1488
+1489
+1490
+1491
+1492
+1493
+1494
+1495
+1496
+1497
+1498
+1499
+1500
+1501
+1502
+1503
+1504
+1505
+1506
+1507
+1508
+1509
+1510
+1511
+1512
+1513
+1514
+1515
+1516
+1517
+1518
+1519
+1520
+1521
+1522
+1523
+1524
+1525
+1526
+1527
+1528
+1529
+1530
+1531
+1532
+1533
+1534
+1535
+1536
+1537
+1538
+1539
+1540
+1541
+1542
+1543
+1544
+1545
+1546
+1547
+1548
+1549
+1550
+1551
+1552
+1553
+1554
+1555
+1556
+1557
+1558
+1559
+1560
+1561
+1562
+1563
+1564
+1565
+1566
+1567
+1568
+1569
+1570
+1571
+1572
+1573
+1574
+1575
+1576
+1577
+1578
+1579
+1580
+1581
+1582
+1583
+1584
+1585
+1586
+1587
+1588
+1589
+1590
+1591
+1592
+1593
+1594
+1595
+1596
+1597
+1598
+1599
+1600
+1601
+1602
+1603
+1604
+1605
+1606
+1607
+1608
+1609
+1610
+1611
+1612
+1613
+1614
+1615
+1616
+1617
+1618
+1619
+1620
+1621
+1622
+1623
+1624
+1625
+1626
+1627
+1628
+1629
+1630
+1631
+1632
+1633
+1634
+1635
+1636
+1637
+1638
+1639
+1640
+1641
use crate::buf::{limit, Chain, Limit, UninitSlice};
+#[cfg(feature = "std")]
+use crate::buf::{writer, Writer};
+use crate::{panic_advance, panic_does_not_fit};
+
+use core::{mem, ptr, usize};
+
+use alloc::{boxed::Box, vec::Vec};
+
+/// A trait for values that provide sequential write access to bytes.
+///
+/// Write bytes to a buffer
+///
+/// A buffer stores bytes in memory such that write operations are infallible.
+/// The underlying storage may or may not be in contiguous memory. A `BufMut`
+/// value is a cursor into the buffer. Writing to `BufMut` advances the cursor
+/// position.
+///
+/// The simplest `BufMut` is a `Vec<u8>`.
+///
+/// ```
+/// use bytes::BufMut;
+///
+/// let mut buf = vec![];
+///
+/// buf.put(&b"hello world"[..]);
+///
+/// assert_eq!(buf, b"hello world");
+/// ```
+pub unsafe trait BufMut {
+    /// Returns the number of bytes that can be written from the current
+    /// position until the end of the buffer is reached.
+    ///
+    /// This value is greater than or equal to the length of the slice returned
+    /// by `chunk_mut()`.
+    ///
+    /// Writing to a `BufMut` may involve allocating more memory on the fly.
+    /// Implementations may fail before reaching the number of bytes indicated
+    /// by this method if they encounter an allocation failure.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut dst = [0; 10];
+    /// let mut buf = &mut dst[..];
+    ///
+    /// let original_remaining = buf.remaining_mut();
+    /// buf.put(&b"hello"[..]);
+    ///
+    /// assert_eq!(original_remaining - 5, buf.remaining_mut());
+    /// ```
+    ///
+    /// # Implementer notes
+    ///
+    /// Implementations of `remaining_mut` should ensure that the return value
+    /// does not change unless a call is made to `advance_mut` or any other
+    /// function that is documented to change the `BufMut`'s current position.
+    ///
+    /// # Note
+    ///
+    /// `remaining_mut` may return value smaller than actual available space.
+    fn remaining_mut(&self) -> usize;
+
+    /// Advance the internal cursor of the BufMut
+    ///
+    /// The next call to `chunk_mut` will return a slice starting `cnt` bytes
+    /// further into the underlying buffer.
+    ///
+    /// # Safety
+    ///
+    /// The caller must ensure that the next `cnt` bytes of `chunk` are
+    /// initialized.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = Vec::with_capacity(16);
+    ///
+    /// // Write some data
+    /// buf.chunk_mut()[0..2].copy_from_slice(b"he");
+    /// unsafe { buf.advance_mut(2) };
+    ///
+    /// // write more bytes
+    /// buf.chunk_mut()[0..3].copy_from_slice(b"llo");
+    ///
+    /// unsafe { buf.advance_mut(3); }
+    ///
+    /// assert_eq!(5, buf.len());
+    /// assert_eq!(buf, b"hello");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function **may** panic if `cnt > self.remaining_mut()`.
+    ///
+    /// # Implementer notes
+    ///
+    /// It is recommended for implementations of `advance_mut` to panic if
+    /// `cnt > self.remaining_mut()`. If the implementation does not panic,
+    /// the call must behave as if `cnt == self.remaining_mut()`.
+    ///
+    /// A call with `cnt == 0` should never panic and be a no-op.
+    unsafe fn advance_mut(&mut self, cnt: usize);
+
+    /// Returns true if there is space in `self` for more bytes.
+    ///
+    /// This is equivalent to `self.remaining_mut() != 0`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut dst = [0; 5];
+    /// let mut buf = &mut dst[..];
+    ///
+    /// assert!(buf.has_remaining_mut());
+    ///
+    /// buf.put(&b"hello"[..]);
+    ///
+    /// assert!(!buf.has_remaining_mut());
+    /// ```
+    #[inline]
+    fn has_remaining_mut(&self) -> bool {
+        self.remaining_mut() > 0
+    }
+
+    /// Returns a mutable slice starting at the current BufMut position and of
+    /// length between 0 and `BufMut::remaining_mut()`. Note that this *can* be shorter than the
+    /// whole remainder of the buffer (this allows non-continuous implementation).
+    ///
+    /// This is a lower level function. Most operations are done with other
+    /// functions.
+    ///
+    /// The returned byte slice may represent uninitialized memory.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = Vec::with_capacity(16);
+    ///
+    /// unsafe {
+    ///     // MaybeUninit::as_mut_ptr
+    ///     buf.chunk_mut()[0..].as_mut_ptr().write(b'h');
+    ///     buf.chunk_mut()[1..].as_mut_ptr().write(b'e');
+    ///
+    ///     buf.advance_mut(2);
+    ///
+    ///     buf.chunk_mut()[0..].as_mut_ptr().write(b'l');
+    ///     buf.chunk_mut()[1..].as_mut_ptr().write(b'l');
+    ///     buf.chunk_mut()[2..].as_mut_ptr().write(b'o');
+    ///
+    ///     buf.advance_mut(3);
+    /// }
+    ///
+    /// assert_eq!(5, buf.len());
+    /// assert_eq!(buf, b"hello");
+    /// ```
+    ///
+    /// # Implementer notes
+    ///
+    /// This function should never panic. `chunk_mut()` should return an empty
+    /// slice **if and only if** `remaining_mut()` returns 0. In other words,
+    /// `chunk_mut()` returning an empty slice implies that `remaining_mut()` will
+    /// return 0 and `remaining_mut()` returning 0 implies that `chunk_mut()` will
+    /// return an empty slice.
+    ///
+    /// This function may trigger an out-of-memory abort if it tries to allocate
+    /// memory and fails to do so.
+    // The `chunk_mut` method was previously called `bytes_mut`. This alias makes the
+    // rename more easily discoverable.
+    #[cfg_attr(docsrs, doc(alias = "bytes_mut"))]
+    fn chunk_mut(&mut self) -> &mut UninitSlice;
+
+    /// Transfer bytes into `self` from `src` and advance the cursor by the
+    /// number of bytes written.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    ///
+    /// buf.put_u8(b'h');
+    /// buf.put(&b"ello"[..]);
+    /// buf.put(&b" world"[..]);
+    ///
+    /// assert_eq!(buf, b"hello world");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if `self` does not have enough capacity to contain `src`.
+    #[inline]
+    fn put<T: super::Buf>(&mut self, mut src: T)
+    where
+        Self: Sized,
+    {
+        if self.remaining_mut() < src.remaining() {
+            panic_advance(src.remaining(), self.remaining_mut());
+        }
+
+        while src.has_remaining() {
+            let s = src.chunk();
+            let d = self.chunk_mut();
+            let cnt = usize::min(s.len(), d.len());
+
+            d[..cnt].copy_from_slice(&s[..cnt]);
+
+            // SAFETY: We just initialized `cnt` bytes in `self`.
+            unsafe { self.advance_mut(cnt) };
+            src.advance(cnt);
+        }
+    }
+
+    /// Transfer bytes into `self` from `src` and advance the cursor by the
+    /// number of bytes written.
+    ///
+    /// `self` must have enough remaining capacity to contain all of `src`.
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut dst = [0; 6];
+    ///
+    /// {
+    ///     let mut buf = &mut dst[..];
+    ///     buf.put_slice(b"hello");
+    ///
+    ///     assert_eq!(1, buf.remaining_mut());
+    /// }
+    ///
+    /// assert_eq!(b"hello\0", &dst);
+    /// ```
+    #[inline]
+    fn put_slice(&mut self, mut src: &[u8]) {
+        if self.remaining_mut() < src.len() {
+            panic_advance(src.len(), self.remaining_mut());
+        }
+
+        while !src.is_empty() {
+            let dst = self.chunk_mut();
+            let cnt = usize::min(src.len(), dst.len());
+
+            dst[..cnt].copy_from_slice(&src[..cnt]);
+            src = &src[cnt..];
+
+            // SAFETY: We just initialized `cnt` bytes in `self`.
+            unsafe { self.advance_mut(cnt) };
+        }
+    }
+
+    /// Put `cnt` bytes `val` into `self`.
+    ///
+    /// Logically equivalent to calling `self.put_u8(val)` `cnt` times, but may work faster.
+    ///
+    /// `self` must have at least `cnt` remaining capacity.
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut dst = [0; 6];
+    ///
+    /// {
+    ///     let mut buf = &mut dst[..];
+    ///     buf.put_bytes(b'a', 4);
+    ///
+    ///     assert_eq!(2, buf.remaining_mut());
+    /// }
+    ///
+    /// assert_eq!(b"aaaa\0\0", &dst);
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_bytes(&mut self, val: u8, mut cnt: usize) {
+        if self.remaining_mut() < cnt {
+            panic_advance(cnt, self.remaining_mut());
+        }
+
+        while cnt > 0 {
+            let dst = self.chunk_mut();
+            let dst_len = usize::min(dst.len(), cnt);
+            // SAFETY: The pointer is valid for `dst_len <= dst.len()` bytes.
+            unsafe { core::ptr::write_bytes(dst.as_mut_ptr(), val, dst_len) };
+            // SAFETY: We just initialized `dst_len` bytes in `self`.
+            unsafe { self.advance_mut(dst_len) };
+            cnt -= dst_len;
+        }
+    }
+
+    /// Writes an unsigned 8 bit integer to `self`.
+    ///
+    /// The current position is advanced by 1.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u8(0x01);
+    /// assert_eq!(buf, b"\x01");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u8(&mut self, n: u8) {
+        let src = [n];
+        self.put_slice(&src);
+    }
+
+    /// Writes a signed 8 bit integer to `self`.
+    ///
+    /// The current position is advanced by 1.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i8(0x01);
+    /// assert_eq!(buf, b"\x01");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i8(&mut self, n: i8) {
+        let src = [n as u8];
+        self.put_slice(&src)
+    }
+
+    /// Writes an unsigned 16 bit integer to `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u16(0x0809);
+    /// assert_eq!(buf, b"\x08\x09");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u16(&mut self, n: u16) {
+        self.put_slice(&n.to_be_bytes())
+    }
+
+    /// Writes an unsigned 16 bit integer to `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u16_le(0x0809);
+    /// assert_eq!(buf, b"\x09\x08");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u16_le(&mut self, n: u16) {
+        self.put_slice(&n.to_le_bytes())
+    }
+
+    /// Writes an unsigned 16 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u16_ne(0x0809);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x08\x09");
+    /// } else {
+    ///     assert_eq!(buf, b"\x09\x08");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u16_ne(&mut self, n: u16) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
+    /// Writes a signed 16 bit integer to `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i16(0x0809);
+    /// assert_eq!(buf, b"\x08\x09");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i16(&mut self, n: i16) {
+        self.put_slice(&n.to_be_bytes())
+    }
+
+    /// Writes a signed 16 bit integer to `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i16_le(0x0809);
+    /// assert_eq!(buf, b"\x09\x08");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i16_le(&mut self, n: i16) {
+        self.put_slice(&n.to_le_bytes())
+    }
+
+    /// Writes a signed 16 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i16_ne(0x0809);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x08\x09");
+    /// } else {
+    ///     assert_eq!(buf, b"\x09\x08");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i16_ne(&mut self, n: i16) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
+    /// Writes an unsigned 32 bit integer to `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u32(0x0809A0A1);
+    /// assert_eq!(buf, b"\x08\x09\xA0\xA1");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u32(&mut self, n: u32) {
+        self.put_slice(&n.to_be_bytes())
+    }
+
+    /// Writes an unsigned 32 bit integer to `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u32_le(0x0809A0A1);
+    /// assert_eq!(buf, b"\xA1\xA0\x09\x08");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u32_le(&mut self, n: u32) {
+        self.put_slice(&n.to_le_bytes())
+    }
+
+    /// Writes an unsigned 32 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u32_ne(0x0809A0A1);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x08\x09\xA0\xA1");
+    /// } else {
+    ///     assert_eq!(buf, b"\xA1\xA0\x09\x08");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u32_ne(&mut self, n: u32) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
+    /// Writes a signed 32 bit integer to `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i32(0x0809A0A1);
+    /// assert_eq!(buf, b"\x08\x09\xA0\xA1");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i32(&mut self, n: i32) {
+        self.put_slice(&n.to_be_bytes())
+    }
+
+    /// Writes a signed 32 bit integer to `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i32_le(0x0809A0A1);
+    /// assert_eq!(buf, b"\xA1\xA0\x09\x08");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i32_le(&mut self, n: i32) {
+        self.put_slice(&n.to_le_bytes())
+    }
+
+    /// Writes a signed 32 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i32_ne(0x0809A0A1);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x08\x09\xA0\xA1");
+    /// } else {
+    ///     assert_eq!(buf, b"\xA1\xA0\x09\x08");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i32_ne(&mut self, n: i32) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
+    /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u64(0x0102030405060708);
+    /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u64(&mut self, n: u64) {
+        self.put_slice(&n.to_be_bytes())
+    }
+
+    /// Writes an unsigned 64 bit integer to `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u64_le(0x0102030405060708);
+    /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u64_le(&mut self, n: u64) {
+        self.put_slice(&n.to_le_bytes())
+    }
+
+    /// Writes an unsigned 64 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u64_ne(0x0102030405060708);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+    /// } else {
+    ///     assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u64_ne(&mut self, n: u64) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
+    /// Writes a signed 64 bit integer to `self` in the big-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i64(0x0102030405060708);
+    /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i64(&mut self, n: i64) {
+        self.put_slice(&n.to_be_bytes())
+    }
+
+    /// Writes a signed 64 bit integer to `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i64_le(0x0102030405060708);
+    /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i64_le(&mut self, n: i64) {
+        self.put_slice(&n.to_le_bytes())
+    }
+
+    /// Writes a signed 64 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i64_ne(0x0102030405060708);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+    /// } else {
+    ///     assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i64_ne(&mut self, n: i64) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
+    /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u128(0x01020304050607080910111213141516);
+    /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u128(&mut self, n: u128) {
+        self.put_slice(&n.to_be_bytes())
+    }
+
+    /// Writes an unsigned 128 bit integer to `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u128_le(0x01020304050607080910111213141516);
+    /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u128_le(&mut self, n: u128) {
+        self.put_slice(&n.to_le_bytes())
+    }
+
+    /// Writes an unsigned 128 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u128_ne(0x01020304050607080910111213141516);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+    /// } else {
+    ///     assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_u128_ne(&mut self, n: u128) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
+    /// Writes a signed 128 bit integer to `self` in the big-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i128(0x01020304050607080910111213141516);
+    /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i128(&mut self, n: i128) {
+        self.put_slice(&n.to_be_bytes())
+    }
+
+    /// Writes a signed 128 bit integer to `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i128_le(0x01020304050607080910111213141516);
+    /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i128_le(&mut self, n: i128) {
+        self.put_slice(&n.to_le_bytes())
+    }
+
+    /// Writes a signed 128 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i128_ne(0x01020304050607080910111213141516);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+    /// } else {
+    ///     assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_i128_ne(&mut self, n: i128) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
+    /// Writes an unsigned n-byte integer to `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_uint(0x010203, 3);
+    /// assert_eq!(buf, b"\x01\x02\x03");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self` or if `nbytes` is greater than 8.
+    #[inline]
+    fn put_uint(&mut self, n: u64, nbytes: usize) {
+        let start = match mem::size_of_val(&n).checked_sub(nbytes) {
+            Some(start) => start,
+            None => panic_does_not_fit(nbytes, mem::size_of_val(&n)),
+        };
+
+        self.put_slice(&n.to_be_bytes()[start..]);
+    }
+
+    /// Writes an unsigned n-byte integer to `self` in the little-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_uint_le(0x010203, 3);
+    /// assert_eq!(buf, b"\x03\x02\x01");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self` or if `nbytes` is greater than 8.
+    #[inline]
+    fn put_uint_le(&mut self, n: u64, nbytes: usize) {
+        let slice = n.to_le_bytes();
+        let slice = match slice.get(..nbytes) {
+            Some(slice) => slice,
+            None => panic_does_not_fit(nbytes, slice.len()),
+        };
+
+        self.put_slice(slice);
+    }
+
+    /// Writes an unsigned n-byte integer to `self` in the native-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_uint_ne(0x010203, 3);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03");
+    /// } else {
+    ///     assert_eq!(buf, b"\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self` or if `nbytes` is greater than 8.
+    #[inline]
+    fn put_uint_ne(&mut self, n: u64, nbytes: usize) {
+        if cfg!(target_endian = "big") {
+            self.put_uint(n, nbytes)
+        } else {
+            self.put_uint_le(n, nbytes)
+        }
+    }
+
+    /// Writes low `nbytes` of a signed integer to `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_int(0x0504010203, 3);
+    /// assert_eq!(buf, b"\x01\x02\x03");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self` or if `nbytes` is greater than 8.
+    #[inline]
+    fn put_int(&mut self, n: i64, nbytes: usize) {
+        let start = match mem::size_of_val(&n).checked_sub(nbytes) {
+            Some(start) => start,
+            None => panic_does_not_fit(nbytes, mem::size_of_val(&n)),
+        };
+
+        self.put_slice(&n.to_be_bytes()[start..]);
+    }
+
+    /// Writes low `nbytes` of a signed integer to `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_int_le(0x0504010203, 3);
+    /// assert_eq!(buf, b"\x03\x02\x01");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self` or if `nbytes` is greater than 8.
+    #[inline]
+    fn put_int_le(&mut self, n: i64, nbytes: usize) {
+        let slice = n.to_le_bytes();
+        let slice = match slice.get(..nbytes) {
+            Some(slice) => slice,
+            None => panic_does_not_fit(nbytes, slice.len()),
+        };
+
+        self.put_slice(slice);
+    }
+
+    /// Writes low `nbytes` of a signed integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_int_ne(0x010203, 3);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03");
+    /// } else {
+    ///     assert_eq!(buf, b"\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self` or if `nbytes` is greater than 8.
+    #[inline]
+    fn put_int_ne(&mut self, n: i64, nbytes: usize) {
+        if cfg!(target_endian = "big") {
+            self.put_int(n, nbytes)
+        } else {
+            self.put_int_le(n, nbytes)
+        }
+    }
+
+    /// Writes an IEEE754 single-precision (4 bytes) floating point number to
+    /// `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_f32(1.2f32);
+    /// assert_eq!(buf, b"\x3F\x99\x99\x9A");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_f32(&mut self, n: f32) {
+        self.put_u32(n.to_bits());
+    }
+
+    /// Writes an IEEE754 single-precision (4 bytes) floating point number to
+    /// `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_f32_le(1.2f32);
+    /// assert_eq!(buf, b"\x9A\x99\x99\x3F");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_f32_le(&mut self, n: f32) {
+        self.put_u32_le(n.to_bits());
+    }
+
+    /// Writes an IEEE754 single-precision (4 bytes) floating point number to
+    /// `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_f32_ne(1.2f32);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x3F\x99\x99\x9A");
+    /// } else {
+    ///     assert_eq!(buf, b"\x9A\x99\x99\x3F");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_f32_ne(&mut self, n: f32) {
+        self.put_u32_ne(n.to_bits());
+    }
+
+    /// Writes an IEEE754 double-precision (8 bytes) floating point number to
+    /// `self` in big-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_f64(1.2f64);
+    /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_f64(&mut self, n: f64) {
+        self.put_u64(n.to_bits());
+    }
+
+    /// Writes an IEEE754 double-precision (8 bytes) floating point number to
+    /// `self` in little-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_f64_le(1.2f64);
+    /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_f64_le(&mut self, n: f64) {
+        self.put_u64_le(n.to_bits());
+    }
+
+    /// Writes an IEEE754 double-precision (8 bytes) floating point number to
+    /// `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_f64_ne(1.2f64);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
+    /// } else {
+    ///     assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    #[inline]
+    fn put_f64_ne(&mut self, n: f64) {
+        self.put_u64_ne(n.to_bits());
+    }
+
+    /// Creates an adaptor which can write at most `limit` bytes to `self`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let arr = &mut [0u8; 128][..];
+    /// assert_eq!(arr.remaining_mut(), 128);
+    ///
+    /// let dst = arr.limit(10);
+    /// assert_eq!(dst.remaining_mut(), 10);
+    /// ```
+    #[inline]
+    fn limit(self, limit: usize) -> Limit<Self>
+    where
+        Self: Sized,
+    {
+        limit::new(self, limit)
+    }
+
+    /// Creates an adaptor which implements the `Write` trait for `self`.
+    ///
+    /// This function returns a new value which implements `Write` by adapting
+    /// the `Write` trait functions to the `BufMut` trait functions. Given that
+    /// `BufMut` operations are infallible, none of the `Write` functions will
+    /// return with `Err`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    /// use std::io::Write;
+    ///
+    /// let mut buf = vec![].writer();
+    ///
+    /// let num = buf.write(&b"hello world"[..]).unwrap();
+    /// assert_eq!(11, num);
+    ///
+    /// let buf = buf.into_inner();
+    ///
+    /// assert_eq!(*buf, b"hello world"[..]);
+    /// ```
+    #[cfg(feature = "std")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+    #[inline]
+    fn writer(self) -> Writer<Self>
+    where
+        Self: Sized,
+    {
+        writer::new(self)
+    }
+
+    /// Creates an adapter which will chain this buffer with another.
+    ///
+    /// The returned `BufMut` instance will first write to all bytes from
+    /// `self`. Afterwards, it will write to `next`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut a = [0u8; 5];
+    /// let mut b = [0u8; 6];
+    ///
+    /// let mut chain = (&mut a[..]).chain_mut(&mut b[..]);
+    ///
+    /// chain.put_slice(b"hello world");
+    ///
+    /// assert_eq!(&a[..], b"hello");
+    /// assert_eq!(&b[..], b" world");
+    /// ```
+    #[inline]
+    fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
+    where
+        Self: Sized,
+    {
+        Chain::new(self, next)
+    }
+}
+
+macro_rules! deref_forward_bufmut {
+    () => {
+        #[inline]
+        fn remaining_mut(&self) -> usize {
+            (**self).remaining_mut()
+        }
+
+        #[inline]
+        fn chunk_mut(&mut self) -> &mut UninitSlice {
+            (**self).chunk_mut()
+        }
+
+        #[inline]
+        unsafe fn advance_mut(&mut self, cnt: usize) {
+            (**self).advance_mut(cnt)
+        }
+
+        #[inline]
+        fn put_slice(&mut self, src: &[u8]) {
+            (**self).put_slice(src)
+        }
+
+        #[inline]
+        fn put_u8(&mut self, n: u8) {
+            (**self).put_u8(n)
+        }
+
+        #[inline]
+        fn put_i8(&mut self, n: i8) {
+            (**self).put_i8(n)
+        }
+
+        #[inline]
+        fn put_u16(&mut self, n: u16) {
+            (**self).put_u16(n)
+        }
+
+        #[inline]
+        fn put_u16_le(&mut self, n: u16) {
+            (**self).put_u16_le(n)
+        }
+
+        #[inline]
+        fn put_u16_ne(&mut self, n: u16) {
+            (**self).put_u16_ne(n)
+        }
+
+        #[inline]
+        fn put_i16(&mut self, n: i16) {
+            (**self).put_i16(n)
+        }
+
+        #[inline]
+        fn put_i16_le(&mut self, n: i16) {
+            (**self).put_i16_le(n)
+        }
+
+        #[inline]
+        fn put_i16_ne(&mut self, n: i16) {
+            (**self).put_i16_ne(n)
+        }
+
+        #[inline]
+        fn put_u32(&mut self, n: u32) {
+            (**self).put_u32(n)
+        }
+
+        #[inline]
+        fn put_u32_le(&mut self, n: u32) {
+            (**self).put_u32_le(n)
+        }
+
+        #[inline]
+        fn put_u32_ne(&mut self, n: u32) {
+            (**self).put_u32_ne(n)
+        }
+
+        #[inline]
+        fn put_i32(&mut self, n: i32) {
+            (**self).put_i32(n)
+        }
+
+        #[inline]
+        fn put_i32_le(&mut self, n: i32) {
+            (**self).put_i32_le(n)
+        }
+
+        #[inline]
+        fn put_i32_ne(&mut self, n: i32) {
+            (**self).put_i32_ne(n)
+        }
+
+        #[inline]
+        fn put_u64(&mut self, n: u64) {
+            (**self).put_u64(n)
+        }
+
+        #[inline]
+        fn put_u64_le(&mut self, n: u64) {
+            (**self).put_u64_le(n)
+        }
+
+        #[inline]
+        fn put_u64_ne(&mut self, n: u64) {
+            (**self).put_u64_ne(n)
+        }
+
+        #[inline]
+        fn put_i64(&mut self, n: i64) {
+            (**self).put_i64(n)
+        }
+
+        #[inline]
+        fn put_i64_le(&mut self, n: i64) {
+            (**self).put_i64_le(n)
+        }
+
+        #[inline]
+        fn put_i64_ne(&mut self, n: i64) {
+            (**self).put_i64_ne(n)
+        }
+    };
+}
+
+unsafe impl<T: BufMut + ?Sized> BufMut for &mut T {
+    deref_forward_bufmut!();
+}
+
+unsafe impl<T: BufMut + ?Sized> BufMut for Box<T> {
+    deref_forward_bufmut!();
+}
+
+unsafe impl BufMut for &mut [u8] {
+    #[inline]
+    fn remaining_mut(&self) -> usize {
+        self.len()
+    }
+
+    #[inline]
+    fn chunk_mut(&mut self) -> &mut UninitSlice {
+        UninitSlice::new(self)
+    }
+
+    #[inline]
+    unsafe fn advance_mut(&mut self, cnt: usize) {
+        if self.len() < cnt {
+            panic_advance(cnt, self.len());
+        }
+
+        // Lifetime dance taken from `impl Write for &mut [u8]`.
+        let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt);
+        *self = b;
+    }
+
+    #[inline]
+    fn put_slice(&mut self, src: &[u8]) {
+        if self.len() < src.len() {
+            panic_advance(src.len(), self.len());
+        }
+
+        self[..src.len()].copy_from_slice(src);
+        // SAFETY: We just initialized `src.len()` bytes.
+        unsafe { self.advance_mut(src.len()) };
+    }
+
+    #[inline]
+    fn put_bytes(&mut self, val: u8, cnt: usize) {
+        if self.len() < cnt {
+            panic_advance(cnt, self.len());
+        }
+
+        // SAFETY: We just checked that the pointer is valid for `cnt` bytes.
+        unsafe {
+            ptr::write_bytes(self.as_mut_ptr(), val, cnt);
+            self.advance_mut(cnt);
+        }
+    }
+}
+
+unsafe impl BufMut for &mut [core::mem::MaybeUninit<u8>] {
+    #[inline]
+    fn remaining_mut(&self) -> usize {
+        self.len()
+    }
+
+    #[inline]
+    fn chunk_mut(&mut self) -> &mut UninitSlice {
+        UninitSlice::uninit(self)
+    }
+
+    #[inline]
+    unsafe fn advance_mut(&mut self, cnt: usize) {
+        if self.len() < cnt {
+            panic_advance(cnt, self.len());
+        }
+
+        // Lifetime dance taken from `impl Write for &mut [u8]`.
+        let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt);
+        *self = b;
+    }
+
+    #[inline]
+    fn put_slice(&mut self, src: &[u8]) {
+        if self.len() < src.len() {
+            panic_advance(src.len(), self.len());
+        }
+
+        // SAFETY: We just checked that the pointer is valid for `src.len()` bytes.
+        unsafe {
+            ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr().cast(), src.len());
+            self.advance_mut(src.len());
+        }
+    }
+
+    #[inline]
+    fn put_bytes(&mut self, val: u8, cnt: usize) {
+        if self.len() < cnt {
+            panic_advance(cnt, self.len());
+        }
+
+        // SAFETY: We just checked that the pointer is valid for `cnt` bytes.
+        unsafe {
+            ptr::write_bytes(self.as_mut_ptr() as *mut u8, val, cnt);
+            self.advance_mut(cnt);
+        }
+    }
+}
+
+unsafe impl BufMut for Vec<u8> {
+    #[inline]
+    fn remaining_mut(&self) -> usize {
+        // A vector can never have more than isize::MAX bytes
+        core::isize::MAX as usize - self.len()
+    }
+
+    #[inline]
+    unsafe fn advance_mut(&mut self, cnt: usize) {
+        let len = self.len();
+        let remaining = self.capacity() - len;
+
+        if remaining < cnt {
+            panic_advance(cnt, remaining);
+        }
+
+        // Addition will not overflow since the sum is at most the capacity.
+        self.set_len(len + cnt);
+    }
+
+    #[inline]
+    fn chunk_mut(&mut self) -> &mut UninitSlice {
+        if self.capacity() == self.len() {
+            self.reserve(64); // Grow the vec
+        }
+
+        let cap = self.capacity();
+        let len = self.len();
+
+        let ptr = self.as_mut_ptr();
+        // SAFETY: Since `ptr` is valid for `cap` bytes, `ptr.add(len)` must be
+        // valid for `cap - len` bytes. The subtraction will not underflow since
+        // `len <= cap`.
+        unsafe { UninitSlice::from_raw_parts_mut(ptr.add(len), cap - len) }
+    }
+
+    // Specialize these methods so they can skip checking `remaining_mut`
+    // and `advance_mut`.
+    #[inline]
+    fn put<T: super::Buf>(&mut self, mut src: T)
+    where
+        Self: Sized,
+    {
+        // In case the src isn't contiguous, reserve upfront.
+        self.reserve(src.remaining());
+
+        while src.has_remaining() {
+            let s = src.chunk();
+            let l = s.len();
+            self.extend_from_slice(s);
+            src.advance(l);
+        }
+    }
+
+    #[inline]
+    fn put_slice(&mut self, src: &[u8]) {
+        self.extend_from_slice(src);
+    }
+
+    #[inline]
+    fn put_bytes(&mut self, val: u8, cnt: usize) {
+        // If the addition overflows, then the `resize` will fail.
+        let new_len = self.len().saturating_add(cnt);
+        self.resize(new_len, val);
+    }
+}
+
+// The existence of this function makes the compiler catch if the BufMut
+// trait is "object-safe" or not.
+fn _assert_trait_object(_b: &dyn BufMut) {}
+
\ No newline at end of file diff --git a/src/bytes/buf/chain.rs.html b/src/bytes/buf/chain.rs.html new file mode 100644 index 000000000..cce5b685d --- /dev/null +++ b/src/bytes/buf/chain.rs.html @@ -0,0 +1,481 @@ +chain.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
use crate::buf::{IntoIter, UninitSlice};
+use crate::{Buf, BufMut, Bytes};
+
+#[cfg(feature = "std")]
+use std::io::IoSlice;
+
+/// A `Chain` sequences two buffers.
+///
+/// `Chain` is an adapter that links two underlying buffers and provides a
+/// continuous view across both buffers. It is able to sequence either immutable
+/// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values).
+///
+/// This struct is generally created by calling [`Buf::chain`]. Please see that
+/// function's documentation for more detail.
+///
+/// # Examples
+///
+/// ```
+/// use bytes::{Bytes, Buf};
+///
+/// let mut buf = (&b"hello "[..])
+///     .chain(&b"world"[..]);
+///
+/// let full: Bytes = buf.copy_to_bytes(11);
+/// assert_eq!(full[..], b"hello world"[..]);
+/// ```
+///
+/// [`Buf::chain`]: Buf::chain
+#[derive(Debug)]
+pub struct Chain<T, U> {
+    a: T,
+    b: U,
+}
+
+impl<T, U> Chain<T, U> {
+    /// Creates a new `Chain` sequencing the provided values.
+    pub(crate) fn new(a: T, b: U) -> Chain<T, U> {
+        Chain { a, b }
+    }
+
+    /// Gets a reference to the first underlying `Buf`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let buf = (&b"hello"[..])
+    ///     .chain(&b"world"[..]);
+    ///
+    /// assert_eq!(buf.first_ref()[..], b"hello"[..]);
+    /// ```
+    pub fn first_ref(&self) -> &T {
+        &self.a
+    }
+
+    /// Gets a mutable reference to the first underlying `Buf`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = (&b"hello"[..])
+    ///     .chain(&b"world"[..]);
+    ///
+    /// buf.first_mut().advance(1);
+    ///
+    /// let full = buf.copy_to_bytes(9);
+    /// assert_eq!(full, b"elloworld"[..]);
+    /// ```
+    pub fn first_mut(&mut self) -> &mut T {
+        &mut self.a
+    }
+
+    /// Gets a reference to the last underlying `Buf`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let buf = (&b"hello"[..])
+    ///     .chain(&b"world"[..]);
+    ///
+    /// assert_eq!(buf.last_ref()[..], b"world"[..]);
+    /// ```
+    pub fn last_ref(&self) -> &U {
+        &self.b
+    }
+
+    /// Gets a mutable reference to the last underlying `Buf`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = (&b"hello "[..])
+    ///     .chain(&b"world"[..]);
+    ///
+    /// buf.last_mut().advance(1);
+    ///
+    /// let full = buf.copy_to_bytes(10);
+    /// assert_eq!(full, b"hello orld"[..]);
+    /// ```
+    pub fn last_mut(&mut self) -> &mut U {
+        &mut self.b
+    }
+
+    /// Consumes this `Chain`, returning the underlying values.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let chain = (&b"hello"[..])
+    ///     .chain(&b"world"[..]);
+    ///
+    /// let (first, last) = chain.into_inner();
+    /// assert_eq!(first[..], b"hello"[..]);
+    /// assert_eq!(last[..], b"world"[..]);
+    /// ```
+    pub fn into_inner(self) -> (T, U) {
+        (self.a, self.b)
+    }
+}
+
+impl<T, U> Buf for Chain<T, U>
+where
+    T: Buf,
+    U: Buf,
+{
+    fn remaining(&self) -> usize {
+        self.a.remaining().saturating_add(self.b.remaining())
+    }
+
+    fn chunk(&self) -> &[u8] {
+        if self.a.has_remaining() {
+            self.a.chunk()
+        } else {
+            self.b.chunk()
+        }
+    }
+
+    fn advance(&mut self, mut cnt: usize) {
+        let a_rem = self.a.remaining();
+
+        if a_rem != 0 {
+            if a_rem >= cnt {
+                self.a.advance(cnt);
+                return;
+            }
+
+            // Consume what is left of a
+            self.a.advance(a_rem);
+
+            cnt -= a_rem;
+        }
+
+        self.b.advance(cnt);
+    }
+
+    #[cfg(feature = "std")]
+    fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
+        let mut n = self.a.chunks_vectored(dst);
+        n += self.b.chunks_vectored(&mut dst[n..]);
+        n
+    }
+
+    fn copy_to_bytes(&mut self, len: usize) -> Bytes {
+        let a_rem = self.a.remaining();
+        if a_rem >= len {
+            self.a.copy_to_bytes(len)
+        } else if a_rem == 0 {
+            self.b.copy_to_bytes(len)
+        } else {
+            assert!(
+                len - a_rem <= self.b.remaining(),
+                "`len` greater than remaining"
+            );
+            let mut ret = crate::BytesMut::with_capacity(len);
+            ret.put(&mut self.a);
+            ret.put((&mut self.b).take(len - a_rem));
+            ret.freeze()
+        }
+    }
+}
+
+unsafe impl<T, U> BufMut for Chain<T, U>
+where
+    T: BufMut,
+    U: BufMut,
+{
+    fn remaining_mut(&self) -> usize {
+        self.a
+            .remaining_mut()
+            .saturating_add(self.b.remaining_mut())
+    }
+
+    fn chunk_mut(&mut self) -> &mut UninitSlice {
+        if self.a.has_remaining_mut() {
+            self.a.chunk_mut()
+        } else {
+            self.b.chunk_mut()
+        }
+    }
+
+    unsafe fn advance_mut(&mut self, mut cnt: usize) {
+        let a_rem = self.a.remaining_mut();
+
+        if a_rem != 0 {
+            if a_rem >= cnt {
+                self.a.advance_mut(cnt);
+                return;
+            }
+
+            // Consume what is left of a
+            self.a.advance_mut(a_rem);
+
+            cnt -= a_rem;
+        }
+
+        self.b.advance_mut(cnt);
+    }
+}
+
+impl<T, U> IntoIterator for Chain<T, U>
+where
+    T: Buf,
+    U: Buf,
+{
+    type Item = u8;
+    type IntoIter = IntoIter<Chain<T, U>>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        IntoIter::new(self)
+    }
+}
+
\ No newline at end of file diff --git a/src/bytes/buf/iter.rs.html b/src/bytes/buf/iter.rs.html new file mode 100644 index 000000000..157889e99 --- /dev/null +++ b/src/bytes/buf/iter.rs.html @@ -0,0 +1,255 @@ +iter.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
use crate::Buf;
+
+/// Iterator over the bytes contained by the buffer.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use bytes::Bytes;
+///
+/// let buf = Bytes::from(&b"abc"[..]);
+/// let mut iter = buf.into_iter();
+///
+/// assert_eq!(iter.next(), Some(b'a'));
+/// assert_eq!(iter.next(), Some(b'b'));
+/// assert_eq!(iter.next(), Some(b'c'));
+/// assert_eq!(iter.next(), None);
+/// ```
+#[derive(Debug)]
+pub struct IntoIter<T> {
+    inner: T,
+}
+
+impl<T> IntoIter<T> {
+    /// Creates an iterator over the bytes contained by the buffer.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let buf = Bytes::from_static(b"abc");
+    /// let mut iter = buf.into_iter();
+    ///
+    /// assert_eq!(iter.next(), Some(b'a'));
+    /// assert_eq!(iter.next(), Some(b'b'));
+    /// assert_eq!(iter.next(), Some(b'c'));
+    /// assert_eq!(iter.next(), None);
+    /// ```
+    pub fn new(inner: T) -> IntoIter<T> {
+        IntoIter { inner }
+    }
+
+    /// Consumes this `IntoIter`, returning the underlying value.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::{Buf, Bytes};
+    ///
+    /// let buf = Bytes::from(&b"abc"[..]);
+    /// let mut iter = buf.into_iter();
+    ///
+    /// assert_eq!(iter.next(), Some(b'a'));
+    ///
+    /// let buf = iter.into_inner();
+    /// assert_eq!(2, buf.remaining());
+    /// ```
+    pub fn into_inner(self) -> T {
+        self.inner
+    }
+
+    /// Gets a reference to the underlying `Buf`.
+    ///
+    /// It is inadvisable to directly read from the underlying `Buf`.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::{Buf, Bytes};
+    ///
+    /// let buf = Bytes::from(&b"abc"[..]);
+    /// let mut iter = buf.into_iter();
+    ///
+    /// assert_eq!(iter.next(), Some(b'a'));
+    ///
+    /// assert_eq!(2, iter.get_ref().remaining());
+    /// ```
+    pub fn get_ref(&self) -> &T {
+        &self.inner
+    }
+
+    /// Gets a mutable reference to the underlying `Buf`.
+    ///
+    /// It is inadvisable to directly read from the underlying `Buf`.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::{Buf, BytesMut};
+    ///
+    /// let buf = BytesMut::from(&b"abc"[..]);
+    /// let mut iter = buf.into_iter();
+    ///
+    /// assert_eq!(iter.next(), Some(b'a'));
+    ///
+    /// iter.get_mut().advance(1);
+    ///
+    /// assert_eq!(iter.next(), Some(b'c'));
+    /// ```
+    pub fn get_mut(&mut self) -> &mut T {
+        &mut self.inner
+    }
+}
+
+impl<T: Buf> Iterator for IntoIter<T> {
+    type Item = u8;
+
+    fn next(&mut self) -> Option<u8> {
+        if !self.inner.has_remaining() {
+            return None;
+        }
+
+        let b = self.inner.chunk()[0];
+        self.inner.advance(1);
+
+        Some(b)
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let rem = self.inner.remaining();
+        (rem, Some(rem))
+    }
+}
+
+impl<T: Buf> ExactSizeIterator for IntoIter<T> {}
+
\ No newline at end of file diff --git a/src/bytes/buf/limit.rs.html b/src/bytes/buf/limit.rs.html new file mode 100644 index 000000000..5d1c2d1e8 --- /dev/null +++ b/src/bytes/buf/limit.rs.html @@ -0,0 +1,151 @@ +limit.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
use crate::buf::UninitSlice;
+use crate::BufMut;
+
+use core::cmp;
+
+/// A `BufMut` adapter which limits the amount of bytes that can be written
+/// to an underlying buffer.
+#[derive(Debug)]
+pub struct Limit<T> {
+    inner: T,
+    limit: usize,
+}
+
+pub(super) fn new<T>(inner: T, limit: usize) -> Limit<T> {
+    Limit { inner, limit }
+}
+
+impl<T> Limit<T> {
+    /// Consumes this `Limit`, returning the underlying value.
+    pub fn into_inner(self) -> T {
+        self.inner
+    }
+
+    /// Gets a reference to the underlying `BufMut`.
+    ///
+    /// It is inadvisable to directly write to the underlying `BufMut`.
+    pub fn get_ref(&self) -> &T {
+        &self.inner
+    }
+
+    /// Gets a mutable reference to the underlying `BufMut`.
+    ///
+    /// It is inadvisable to directly write to the underlying `BufMut`.
+    pub fn get_mut(&mut self) -> &mut T {
+        &mut self.inner
+    }
+
+    /// Returns the maximum number of bytes that can be written
+    ///
+    /// # Note
+    ///
+    /// If the inner `BufMut` has fewer bytes than indicated by this method then
+    /// that is the actual number of available bytes.
+    pub fn limit(&self) -> usize {
+        self.limit
+    }
+
+    /// Sets the maximum number of bytes that can be written.
+    ///
+    /// # Note
+    ///
+    /// If the inner `BufMut` has fewer bytes than `lim` then that is the actual
+    /// number of available bytes.
+    pub fn set_limit(&mut self, lim: usize) {
+        self.limit = lim
+    }
+}
+
+unsafe impl<T: BufMut> BufMut for Limit<T> {
+    fn remaining_mut(&self) -> usize {
+        cmp::min(self.inner.remaining_mut(), self.limit)
+    }
+
+    fn chunk_mut(&mut self) -> &mut UninitSlice {
+        let bytes = self.inner.chunk_mut();
+        let end = cmp::min(bytes.len(), self.limit);
+        &mut bytes[..end]
+    }
+
+    unsafe fn advance_mut(&mut self, cnt: usize) {
+        assert!(cnt <= self.limit);
+        self.inner.advance_mut(cnt);
+        self.limit -= cnt;
+    }
+}
+
\ No newline at end of file diff --git a/src/bytes/buf/mod.rs.html b/src/bytes/buf/mod.rs.html new file mode 100644 index 000000000..55f2427ca --- /dev/null +++ b/src/bytes/buf/mod.rs.html @@ -0,0 +1,79 @@ +mod.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
//! Utilities for working with buffers.
+//!
+//! A buffer is any structure that contains a sequence of bytes. The bytes may
+//! or may not be stored in contiguous memory. This module contains traits used
+//! to abstract over buffers as well as utilities for working with buffer types.
+//!
+//! # `Buf`, `BufMut`
+//!
+//! These are the two foundational traits for abstractly working with buffers.
+//! They can be thought as iterators for byte structures. They offer additional
+//! performance over `Iterator` by providing an API optimized for byte slices.
+//!
+//! See [`Buf`] and [`BufMut`] for more details.
+//!
+//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure)
+
+mod buf_impl;
+mod buf_mut;
+mod chain;
+mod iter;
+mod limit;
+#[cfg(feature = "std")]
+mod reader;
+mod take;
+mod uninit_slice;
+mod vec_deque;
+#[cfg(feature = "std")]
+mod writer;
+
+pub use self::buf_impl::Buf;
+pub use self::buf_mut::BufMut;
+pub use self::chain::Chain;
+pub use self::iter::IntoIter;
+pub use self::limit::Limit;
+pub use self::take::Take;
+pub use self::uninit_slice::UninitSlice;
+
+#[cfg(feature = "std")]
+pub use self::{reader::Reader, writer::Writer};
+
\ No newline at end of file diff --git a/src/bytes/buf/reader.rs.html b/src/bytes/buf/reader.rs.html new file mode 100644 index 000000000..bb3179d26 --- /dev/null +++ b/src/bytes/buf/reader.rs.html @@ -0,0 +1,163 @@ +reader.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
use crate::Buf;
+
+use std::{cmp, io};
+
+/// A `Buf` adapter which implements `io::Read` for the inner value.
+///
+/// This struct is generally created by calling `reader()` on `Buf`. See
+/// documentation of [`reader()`](Buf::reader) for more
+/// details.
+#[derive(Debug)]
+pub struct Reader<B> {
+    buf: B,
+}
+
+pub fn new<B>(buf: B) -> Reader<B> {
+    Reader { buf }
+}
+
+impl<B: Buf> Reader<B> {
+    /// Gets a reference to the underlying `Buf`.
+    ///
+    /// It is inadvisable to directly read from the underlying `Buf`.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::Buf;
+    ///
+    /// let buf = b"hello world".reader();
+    ///
+    /// assert_eq!(b"hello world", buf.get_ref());
+    /// ```
+    pub fn get_ref(&self) -> &B {
+        &self.buf
+    }
+
+    /// Gets a mutable reference to the underlying `Buf`.
+    ///
+    /// It is inadvisable to directly read from the underlying `Buf`.
+    pub fn get_mut(&mut self) -> &mut B {
+        &mut self.buf
+    }
+
+    /// Consumes this `Reader`, returning the underlying value.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::Buf;
+    /// use std::io;
+    ///
+    /// let mut buf = b"hello world".reader();
+    /// let mut dst = vec![];
+    ///
+    /// io::copy(&mut buf, &mut dst).unwrap();
+    ///
+    /// let buf = buf.into_inner();
+    /// assert_eq!(0, buf.remaining());
+    /// ```
+    pub fn into_inner(self) -> B {
+        self.buf
+    }
+}
+
+impl<B: Buf + Sized> io::Read for Reader<B> {
+    fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
+        let len = cmp::min(self.buf.remaining(), dst.len());
+
+        Buf::copy_to_slice(&mut self.buf, &mut dst[0..len]);
+        Ok(len)
+    }
+}
+
+impl<B: Buf + Sized> io::BufRead for Reader<B> {
+    fn fill_buf(&mut self) -> io::Result<&[u8]> {
+        Ok(self.buf.chunk())
+    }
+    fn consume(&mut self, amt: usize) {
+        self.buf.advance(amt)
+    }
+}
+
\ No newline at end of file diff --git a/src/bytes/buf/take.rs.html b/src/bytes/buf/take.rs.html new file mode 100644 index 000000000..59f3b019c --- /dev/null +++ b/src/bytes/buf/take.rs.html @@ -0,0 +1,311 @@ +take.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
use crate::{Buf, Bytes};
+
+use core::cmp;
+
+/// A `Buf` adapter which limits the bytes read from an underlying buffer.
+///
+/// This struct is generally created by calling `take()` on `Buf`. See
+/// documentation of [`take()`](Buf::take) for more details.
+#[derive(Debug)]
+pub struct Take<T> {
+    inner: T,
+    limit: usize,
+}
+
+pub fn new<T>(inner: T, limit: usize) -> Take<T> {
+    Take { inner, limit }
+}
+
+impl<T> Take<T> {
+    /// Consumes this `Take`, returning the underlying value.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::{Buf, BufMut};
+    ///
+    /// let mut buf = b"hello world".take(2);
+    /// let mut dst = vec![];
+    ///
+    /// dst.put(&mut buf);
+    /// assert_eq!(*dst, b"he"[..]);
+    ///
+    /// let mut buf = buf.into_inner();
+    ///
+    /// dst.clear();
+    /// dst.put(&mut buf);
+    /// assert_eq!(*dst, b"llo world"[..]);
+    /// ```
+    pub fn into_inner(self) -> T {
+        self.inner
+    }
+
+    /// Gets a reference to the underlying `Buf`.
+    ///
+    /// It is inadvisable to directly read from the underlying `Buf`.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::Buf;
+    ///
+    /// let buf = b"hello world".take(2);
+    ///
+    /// assert_eq!(11, buf.get_ref().remaining());
+    /// ```
+    pub fn get_ref(&self) -> &T {
+        &self.inner
+    }
+
+    /// Gets a mutable reference to the underlying `Buf`.
+    ///
+    /// It is inadvisable to directly read from the underlying `Buf`.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::{Buf, BufMut};
+    ///
+    /// let mut buf = b"hello world".take(2);
+    /// let mut dst = vec![];
+    ///
+    /// buf.get_mut().advance(2);
+    ///
+    /// dst.put(&mut buf);
+    /// assert_eq!(*dst, b"ll"[..]);
+    /// ```
+    pub fn get_mut(&mut self) -> &mut T {
+        &mut self.inner
+    }
+
+    /// Returns the maximum number of bytes that can be read.
+    ///
+    /// # Note
+    ///
+    /// If the inner `Buf` has fewer bytes than indicated by this method then
+    /// that is the actual number of available bytes.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::Buf;
+    ///
+    /// let mut buf = b"hello world".take(2);
+    ///
+    /// assert_eq!(2, buf.limit());
+    /// assert_eq!(b'h', buf.get_u8());
+    /// assert_eq!(1, buf.limit());
+    /// ```
+    pub fn limit(&self) -> usize {
+        self.limit
+    }
+
+    /// Sets the maximum number of bytes that can be read.
+    ///
+    /// # Note
+    ///
+    /// If the inner `Buf` has fewer bytes than `lim` then that is the actual
+    /// number of available bytes.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::{Buf, BufMut};
+    ///
+    /// let mut buf = b"hello world".take(2);
+    /// let mut dst = vec![];
+    ///
+    /// dst.put(&mut buf);
+    /// assert_eq!(*dst, b"he"[..]);
+    ///
+    /// dst.clear();
+    ///
+    /// buf.set_limit(3);
+    /// dst.put(&mut buf);
+    /// assert_eq!(*dst, b"llo"[..]);
+    /// ```
+    pub fn set_limit(&mut self, lim: usize) {
+        self.limit = lim
+    }
+}
+
+impl<T: Buf> Buf for Take<T> {
+    fn remaining(&self) -> usize {
+        cmp::min(self.inner.remaining(), self.limit)
+    }
+
+    fn chunk(&self) -> &[u8] {
+        let bytes = self.inner.chunk();
+        &bytes[..cmp::min(bytes.len(), self.limit)]
+    }
+
+    fn advance(&mut self, cnt: usize) {
+        assert!(cnt <= self.limit);
+        self.inner.advance(cnt);
+        self.limit -= cnt;
+    }
+
+    fn copy_to_bytes(&mut self, len: usize) -> Bytes {
+        assert!(len <= self.remaining(), "`len` greater than remaining");
+
+        let r = self.inner.copy_to_bytes(len);
+        self.limit -= len;
+        r
+    }
+}
+
\ No newline at end of file diff --git a/src/bytes/buf/uninit_slice.rs.html b/src/bytes/buf/uninit_slice.rs.html new file mode 100644 index 000000000..cd63b282b --- /dev/null +++ b/src/bytes/buf/uninit_slice.rs.html @@ -0,0 +1,515 @@ +uninit_slice.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
use core::fmt;
+use core::mem::MaybeUninit;
+use core::ops::{
+    Index, IndexMut, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive,
+};
+
+/// Uninitialized byte slice.
+///
+/// Returned by `BufMut::chunk_mut()`, the referenced byte slice may be
+/// uninitialized. The wrapper provides safe access without introducing
+/// undefined behavior.
+///
+/// The safety invariants of this wrapper are:
+///
+///  1. Reading from an `UninitSlice` is undefined behavior.
+///  2. Writing uninitialized bytes to an `UninitSlice` is undefined behavior.
+///
+/// The difference between `&mut UninitSlice` and `&mut [MaybeUninit<u8>]` is
+/// that it is possible in safe code to write uninitialized bytes to an
+/// `&mut [MaybeUninit<u8>]`, which this type prohibits.
+#[repr(transparent)]
+pub struct UninitSlice([MaybeUninit<u8>]);
+
+impl UninitSlice {
+    /// Creates a `&mut UninitSlice` wrapping a slice of initialised memory.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::buf::UninitSlice;
+    ///
+    /// let mut buffer = [0u8; 64];
+    /// let slice = UninitSlice::new(&mut buffer[..]);
+    /// ```
+    #[inline]
+    pub fn new(slice: &mut [u8]) -> &mut UninitSlice {
+        unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit<u8>] as *mut UninitSlice) }
+    }
+
+    /// Creates a `&mut UninitSlice` wrapping a slice of uninitialised memory.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::buf::UninitSlice;
+    /// use core::mem::MaybeUninit;
+    ///
+    /// let mut buffer = [MaybeUninit::uninit(); 64];
+    /// let slice = UninitSlice::uninit(&mut buffer[..]);
+    ///
+    /// let mut vec = Vec::with_capacity(1024);
+    /// let spare: &mut UninitSlice = vec.spare_capacity_mut().into();
+    /// ```
+    #[inline]
+    pub fn uninit(slice: &mut [MaybeUninit<u8>]) -> &mut UninitSlice {
+        unsafe { &mut *(slice as *mut [MaybeUninit<u8>] as *mut UninitSlice) }
+    }
+
+    fn uninit_ref(slice: &[MaybeUninit<u8>]) -> &UninitSlice {
+        unsafe { &*(slice as *const [MaybeUninit<u8>] as *const UninitSlice) }
+    }
+
+    /// Create a `&mut UninitSlice` from a pointer and a length.
+    ///
+    /// # Safety
+    ///
+    /// The caller must ensure that `ptr` references a valid memory region owned
+    /// by the caller representing a byte slice for the duration of `'a`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::buf::UninitSlice;
+    ///
+    /// let bytes = b"hello world".to_vec();
+    /// let ptr = bytes.as_ptr() as *mut _;
+    /// let len = bytes.len();
+    ///
+    /// let slice = unsafe { UninitSlice::from_raw_parts_mut(ptr, len) };
+    /// ```
+    #[inline]
+    pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut UninitSlice {
+        let maybe_init: &mut [MaybeUninit<u8>] =
+            core::slice::from_raw_parts_mut(ptr as *mut _, len);
+        Self::uninit(maybe_init)
+    }
+
+    /// Write a single byte at the specified offset.
+    ///
+    /// # Panics
+    ///
+    /// The function panics if `index` is out of bounds.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::buf::UninitSlice;
+    ///
+    /// let mut data = [b'f', b'o', b'o'];
+    /// let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) };
+    ///
+    /// slice.write_byte(0, b'b');
+    ///
+    /// assert_eq!(b"boo", &data[..]);
+    /// ```
+    #[inline]
+    pub fn write_byte(&mut self, index: usize, byte: u8) {
+        assert!(index < self.len());
+
+        unsafe { self[index..].as_mut_ptr().write(byte) }
+    }
+
+    /// Copies bytes from `src` into `self`.
+    ///
+    /// The length of `src` must be the same as `self`.
+    ///
+    /// # Panics
+    ///
+    /// The function panics if `src` has a different length than `self`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::buf::UninitSlice;
+    ///
+    /// let mut data = [b'f', b'o', b'o'];
+    /// let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) };
+    ///
+    /// slice.copy_from_slice(b"bar");
+    ///
+    /// assert_eq!(b"bar", &data[..]);
+    /// ```
+    #[inline]
+    pub fn copy_from_slice(&mut self, src: &[u8]) {
+        use core::ptr;
+
+        assert_eq!(self.len(), src.len());
+
+        unsafe {
+            ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
+        }
+    }
+
+    /// Return a raw pointer to the slice's buffer.
+    ///
+    /// # Safety
+    ///
+    /// The caller **must not** read from the referenced memory and **must not**
+    /// write **uninitialized** bytes to the slice either.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut data = [0, 1, 2];
+    /// let mut slice = &mut data[..];
+    /// let ptr = BufMut::chunk_mut(&mut slice).as_mut_ptr();
+    /// ```
+    #[inline]
+    pub fn as_mut_ptr(&mut self) -> *mut u8 {
+        self.0.as_mut_ptr() as *mut _
+    }
+
+    /// Return a `&mut [MaybeUninit<u8>]` to this slice's buffer.
+    ///
+    /// # Safety
+    ///
+    /// The caller **must not** read from the referenced memory and **must not** write
+    /// **uninitialized** bytes to the slice either. This is because `BufMut` implementation
+    /// that created the `UninitSlice` knows which parts are initialized. Writing uninitialized
+    /// bytes to the slice may cause the `BufMut` to read those bytes and trigger undefined
+    /// behavior.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut data = [0, 1, 2];
+    /// let mut slice = &mut data[..];
+    /// unsafe {
+    ///     let uninit_slice = BufMut::chunk_mut(&mut slice).as_uninit_slice_mut();
+    /// };
+    /// ```
+    #[inline]
+    pub unsafe fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+        &mut self.0
+    }
+
+    /// Returns the number of bytes in the slice.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut data = [0, 1, 2];
+    /// let mut slice = &mut data[..];
+    /// let len = BufMut::chunk_mut(&mut slice).len();
+    ///
+    /// assert_eq!(len, 3);
+    /// ```
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.0.len()
+    }
+}
+
+impl fmt::Debug for UninitSlice {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt.debug_struct("UninitSlice[...]").finish()
+    }
+}
+
+impl<'a> From<&'a mut [u8]> for &'a mut UninitSlice {
+    fn from(slice: &'a mut [u8]) -> Self {
+        UninitSlice::new(slice)
+    }
+}
+
+impl<'a> From<&'a mut [MaybeUninit<u8>]> for &'a mut UninitSlice {
+    fn from(slice: &'a mut [MaybeUninit<u8>]) -> Self {
+        UninitSlice::uninit(slice)
+    }
+}
+
+macro_rules! impl_index {
+    ($($t:ty),*) => {
+        $(
+            impl Index<$t> for UninitSlice {
+                type Output = UninitSlice;
+
+                #[inline]
+                fn index(&self, index: $t) -> &UninitSlice {
+                    UninitSlice::uninit_ref(&self.0[index])
+                }
+            }
+
+            impl IndexMut<$t> for UninitSlice {
+                #[inline]
+                fn index_mut(&mut self, index: $t) -> &mut UninitSlice {
+                    UninitSlice::uninit(&mut self.0[index])
+                }
+            }
+        )*
+    };
+}
+
+impl_index!(
+    Range<usize>,
+    RangeFrom<usize>,
+    RangeFull,
+    RangeInclusive<usize>,
+    RangeTo<usize>,
+    RangeToInclusive<usize>
+);
+
\ No newline at end of file diff --git a/src/bytes/buf/vec_deque.rs.html b/src/bytes/buf/vec_deque.rs.html new file mode 100644 index 000000000..120b9dea7 --- /dev/null +++ b/src/bytes/buf/vec_deque.rs.html @@ -0,0 +1,45 @@ +vec_deque.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
use alloc::collections::VecDeque;
+
+use super::Buf;
+
+impl Buf for VecDeque<u8> {
+    fn remaining(&self) -> usize {
+        self.len()
+    }
+
+    fn chunk(&self) -> &[u8] {
+        let (s1, s2) = self.as_slices();
+        if s1.is_empty() {
+            s2
+        } else {
+            s1
+        }
+    }
+
+    fn advance(&mut self, cnt: usize) {
+        self.drain(..cnt);
+    }
+}
+
\ No newline at end of file diff --git a/src/bytes/buf/writer.rs.html b/src/bytes/buf/writer.rs.html new file mode 100644 index 000000000..b17763086 --- /dev/null +++ b/src/bytes/buf/writer.rs.html @@ -0,0 +1,177 @@ +writer.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
use crate::BufMut;
+
+use std::{cmp, io};
+
+/// A `BufMut` adapter which implements `io::Write` for the inner value.
+///
+/// This struct is generally created by calling `writer()` on `BufMut`. See
+/// documentation of [`writer()`](BufMut::writer) for more
+/// details.
+#[derive(Debug)]
+pub struct Writer<B> {
+    buf: B,
+}
+
+pub fn new<B>(buf: B) -> Writer<B> {
+    Writer { buf }
+}
+
+impl<B: BufMut> Writer<B> {
+    /// Gets a reference to the underlying `BufMut`.
+    ///
+    /// It is inadvisable to directly write to the underlying `BufMut`.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::BufMut;
+    ///
+    /// let buf = Vec::with_capacity(1024).writer();
+    ///
+    /// assert_eq!(1024, buf.get_ref().capacity());
+    /// ```
+    pub fn get_ref(&self) -> &B {
+        &self.buf
+    }
+
+    /// Gets a mutable reference to the underlying `BufMut`.
+    ///
+    /// It is inadvisable to directly write to the underlying `BufMut`.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![].writer();
+    ///
+    /// buf.get_mut().reserve(1024);
+    ///
+    /// assert_eq!(1024, buf.get_ref().capacity());
+    /// ```
+    pub fn get_mut(&mut self) -> &mut B {
+        &mut self.buf
+    }
+
+    /// Consumes this `Writer`, returning the underlying value.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use bytes::BufMut;
+    /// use std::io;
+    ///
+    /// let mut buf = vec![].writer();
+    /// let mut src = &b"hello world"[..];
+    ///
+    /// io::copy(&mut src, &mut buf).unwrap();
+    ///
+    /// let buf = buf.into_inner();
+    /// assert_eq!(*buf, b"hello world"[..]);
+    /// ```
+    pub fn into_inner(self) -> B {
+        self.buf
+    }
+}
+
+impl<B: BufMut + Sized> io::Write for Writer<B> {
+    fn write(&mut self, src: &[u8]) -> io::Result<usize> {
+        let n = cmp::min(self.buf.remaining_mut(), src.len());
+
+        self.buf.put(&src[0..n]);
+        Ok(n)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        Ok(())
+    }
+}
+
\ No newline at end of file diff --git a/src/bytes/bytes.rs.html b/src/bytes/bytes.rs.html new file mode 100644 index 000000000..1dc0e02e3 --- /dev/null +++ b/src/bytes/bytes.rs.html @@ -0,0 +1,2965 @@ +bytes.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
+764
+765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
+796
+797
+798
+799
+800
+801
+802
+803
+804
+805
+806
+807
+808
+809
+810
+811
+812
+813
+814
+815
+816
+817
+818
+819
+820
+821
+822
+823
+824
+825
+826
+827
+828
+829
+830
+831
+832
+833
+834
+835
+836
+837
+838
+839
+840
+841
+842
+843
+844
+845
+846
+847
+848
+849
+850
+851
+852
+853
+854
+855
+856
+857
+858
+859
+860
+861
+862
+863
+864
+865
+866
+867
+868
+869
+870
+871
+872
+873
+874
+875
+876
+877
+878
+879
+880
+881
+882
+883
+884
+885
+886
+887
+888
+889
+890
+891
+892
+893
+894
+895
+896
+897
+898
+899
+900
+901
+902
+903
+904
+905
+906
+907
+908
+909
+910
+911
+912
+913
+914
+915
+916
+917
+918
+919
+920
+921
+922
+923
+924
+925
+926
+927
+928
+929
+930
+931
+932
+933
+934
+935
+936
+937
+938
+939
+940
+941
+942
+943
+944
+945
+946
+947
+948
+949
+950
+951
+952
+953
+954
+955
+956
+957
+958
+959
+960
+961
+962
+963
+964
+965
+966
+967
+968
+969
+970
+971
+972
+973
+974
+975
+976
+977
+978
+979
+980
+981
+982
+983
+984
+985
+986
+987
+988
+989
+990
+991
+992
+993
+994
+995
+996
+997
+998
+999
+1000
+1001
+1002
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
+1022
+1023
+1024
+1025
+1026
+1027
+1028
+1029
+1030
+1031
+1032
+1033
+1034
+1035
+1036
+1037
+1038
+1039
+1040
+1041
+1042
+1043
+1044
+1045
+1046
+1047
+1048
+1049
+1050
+1051
+1052
+1053
+1054
+1055
+1056
+1057
+1058
+1059
+1060
+1061
+1062
+1063
+1064
+1065
+1066
+1067
+1068
+1069
+1070
+1071
+1072
+1073
+1074
+1075
+1076
+1077
+1078
+1079
+1080
+1081
+1082
+1083
+1084
+1085
+1086
+1087
+1088
+1089
+1090
+1091
+1092
+1093
+1094
+1095
+1096
+1097
+1098
+1099
+1100
+1101
+1102
+1103
+1104
+1105
+1106
+1107
+1108
+1109
+1110
+1111
+1112
+1113
+1114
+1115
+1116
+1117
+1118
+1119
+1120
+1121
+1122
+1123
+1124
+1125
+1126
+1127
+1128
+1129
+1130
+1131
+1132
+1133
+1134
+1135
+1136
+1137
+1138
+1139
+1140
+1141
+1142
+1143
+1144
+1145
+1146
+1147
+1148
+1149
+1150
+1151
+1152
+1153
+1154
+1155
+1156
+1157
+1158
+1159
+1160
+1161
+1162
+1163
+1164
+1165
+1166
+1167
+1168
+1169
+1170
+1171
+1172
+1173
+1174
+1175
+1176
+1177
+1178
+1179
+1180
+1181
+1182
+1183
+1184
+1185
+1186
+1187
+1188
+1189
+1190
+1191
+1192
+1193
+1194
+1195
+1196
+1197
+1198
+1199
+1200
+1201
+1202
+1203
+1204
+1205
+1206
+1207
+1208
+1209
+1210
+1211
+1212
+1213
+1214
+1215
+1216
+1217
+1218
+1219
+1220
+1221
+1222
+1223
+1224
+1225
+1226
+1227
+1228
+1229
+1230
+1231
+1232
+1233
+1234
+1235
+1236
+1237
+1238
+1239
+1240
+1241
+1242
+1243
+1244
+1245
+1246
+1247
+1248
+1249
+1250
+1251
+1252
+1253
+1254
+1255
+1256
+1257
+1258
+1259
+1260
+1261
+1262
+1263
+1264
+1265
+1266
+1267
+1268
+1269
+1270
+1271
+1272
+1273
+1274
+1275
+1276
+1277
+1278
+1279
+1280
+1281
+1282
+1283
+1284
+1285
+1286
+1287
+1288
+1289
+1290
+1291
+1292
+1293
+1294
+1295
+1296
+1297
+1298
+1299
+1300
+1301
+1302
+1303
+1304
+1305
+1306
+1307
+1308
+1309
+1310
+1311
+1312
+1313
+1314
+1315
+1316
+1317
+1318
+1319
+1320
+1321
+1322
+1323
+1324
+1325
+1326
+1327
+1328
+1329
+1330
+1331
+1332
+1333
+1334
+1335
+1336
+1337
+1338
+1339
+1340
+1341
+1342
+1343
+1344
+1345
+1346
+1347
+1348
+1349
+1350
+1351
+1352
+1353
+1354
+1355
+1356
+1357
+1358
+1359
+1360
+1361
+1362
+1363
+1364
+1365
+1366
+1367
+1368
+1369
+1370
+1371
+1372
+1373
+1374
+1375
+1376
+1377
+1378
+1379
+1380
+1381
+1382
+1383
+1384
+1385
+1386
+1387
+1388
+1389
+1390
+1391
+1392
+1393
+1394
+1395
+1396
+1397
+1398
+1399
+1400
+1401
+1402
+1403
+1404
+1405
+1406
+1407
+1408
+1409
+1410
+1411
+1412
+1413
+1414
+1415
+1416
+1417
+1418
+1419
+1420
+1421
+1422
+1423
+1424
+1425
+1426
+1427
+1428
+1429
+1430
+1431
+1432
+1433
+1434
+1435
+1436
+1437
+1438
+1439
+1440
+1441
+1442
+1443
+1444
+1445
+1446
+1447
+1448
+1449
+1450
+1451
+1452
+1453
+1454
+1455
+1456
+1457
+1458
+1459
+1460
+1461
+1462
+1463
+1464
+1465
+1466
+1467
+1468
+1469
+1470
+1471
+1472
+1473
+1474
+1475
+1476
+1477
+1478
+1479
+1480
+1481
+1482
use core::iter::FromIterator;
+use core::mem::{self, ManuallyDrop};
+use core::ops::{Deref, RangeBounds};
+use core::{cmp, fmt, hash, ptr, slice, usize};
+
+use alloc::{
+    alloc::{dealloc, Layout},
+    borrow::Borrow,
+    boxed::Box,
+    string::String,
+    vec::Vec,
+};
+
+use crate::buf::IntoIter;
+#[allow(unused)]
+use crate::loom::sync::atomic::AtomicMut;
+use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+use crate::{offset_from, Buf, BytesMut};
+
+/// A cheaply cloneable and sliceable chunk of contiguous memory.
+///
+/// `Bytes` is an efficient container for storing and operating on contiguous
+/// slices of memory. It is intended for use primarily in networking code, but
+/// could have applications elsewhere as well.
+///
+/// `Bytes` values facilitate zero-copy network programming by allowing multiple
+/// `Bytes` objects to point to the same underlying memory.
+///
+/// `Bytes` does not have a single implementation. It is an interface, whose
+/// exact behavior is implemented through dynamic dispatch in several underlying
+/// implementations of `Bytes`.
+///
+/// All `Bytes` implementations must fulfill the following requirements:
+/// - They are cheaply cloneable and thereby shareable between an unlimited amount
+///   of components, for example by modifying a reference count.
+/// - Instances can be sliced to refer to a subset of the original buffer.
+///
+/// ```
+/// use bytes::Bytes;
+///
+/// let mut mem = Bytes::from("Hello world");
+/// let a = mem.slice(0..5);
+///
+/// assert_eq!(a, "Hello");
+///
+/// let b = mem.split_to(6);
+///
+/// assert_eq!(mem, "world");
+/// assert_eq!(b, "Hello ");
+/// ```
+///
+/// # Memory layout
+///
+/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used
+/// to track information about which segment of the underlying memory the
+/// `Bytes` handle has access to.
+///
+/// `Bytes` keeps both a pointer to the shared state containing the full memory
+/// slice and a pointer to the start of the region visible by the handle.
+/// `Bytes` also tracks the length of its view into the memory.
+///
+/// # Sharing
+///
+/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
+/// how sharing/cloning is implemented in detail.
+/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
+/// cloning the backing storage in order to share it behind multiple `Bytes`
+/// instances.
+///
+/// For `Bytes` implementations which refer to constant memory (e.g. created
+/// via `Bytes::from_static()`) the cloning implementation will be a no-op.
+///
+/// For `Bytes` implementations which point to a reference counted shared storage
+/// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the
+/// reference count.
+///
+/// Due to this mechanism, multiple `Bytes` instances may point to the same
+/// shared memory region.
+/// Each `Bytes` instance can point to different sections within that
+/// memory region, and `Bytes` instances may or may not have overlapping views
+/// into the memory.
+///
+/// The following diagram visualizes a scenario where 2 `Bytes` instances make
+/// use of an `Arc`-based backing storage, and provide access to different views:
+///
+/// ```text
+///
+///    Arc ptrs                   ┌─────────┐
+///    ________________________ / │ Bytes 2 │
+///   /                           └─────────┘
+///  /          ┌───────────┐     |         |
+/// |_________/ │  Bytes 1  │     |         |
+/// |           └───────────┘     |         |
+/// |           |           | ___/ data     | tail
+/// |      data |      tail |/              |
+/// v           v           v               v
+/// ┌─────┬─────┬───────────┬───────────────┬─────┐
+/// │ Arc │     │           │               │     │
+/// └─────┴─────┴───────────┴───────────────┴─────┘
+/// ```
+pub struct Bytes {
+    ptr: *const u8,
+    len: usize,
+    // inlined "trait object"
+    data: AtomicPtr<()>,
+    vtable: &'static Vtable,
+}
+
+pub(crate) struct Vtable {
+    /// fn(data, ptr, len)
+    pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
+    /// fn(data, ptr, len)
+    ///
+    /// takes `Bytes` to value
+    pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
+    pub to_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut,
+    /// fn(data)
+    pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
+    /// fn(data, ptr, len)
+    pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
+}
+
+impl Bytes {
+    /// Creates a new empty `Bytes`.
+    ///
+    /// This will not allocate and the returned `Bytes` handle will be empty.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let b = Bytes::new();
+    /// assert_eq!(&b[..], b"");
+    /// ```
+    #[inline]
+    #[cfg(not(all(loom, test)))]
+    pub const fn new() -> Self {
+        // Make it a named const to work around
+        // "unsizing casts are not allowed in const fn"
+        const EMPTY: &[u8] = &[];
+        Bytes::from_static(EMPTY)
+    }
+
+    /// Creates a new empty `Bytes`.
+    #[cfg(all(loom, test))]
+    pub fn new() -> Self {
+        const EMPTY: &[u8] = &[];
+        Bytes::from_static(EMPTY)
+    }
+
+    /// Creates a new `Bytes` from a static slice.
+    ///
+    /// The returned `Bytes` will point directly to the static slice. There is
+    /// no allocating or copying.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let b = Bytes::from_static(b"hello");
+    /// assert_eq!(&b[..], b"hello");
+    /// ```
+    #[inline]
+    #[cfg(not(all(loom, test)))]
+    pub const fn from_static(bytes: &'static [u8]) -> Self {
+        Bytes {
+            ptr: bytes.as_ptr(),
+            len: bytes.len(),
+            data: AtomicPtr::new(ptr::null_mut()),
+            vtable: &STATIC_VTABLE,
+        }
+    }
+
+    /// Creates a new `Bytes` from a static slice.
+    #[cfg(all(loom, test))]
+    pub fn from_static(bytes: &'static [u8]) -> Self {
+        Bytes {
+            ptr: bytes.as_ptr(),
+            len: bytes.len(),
+            data: AtomicPtr::new(ptr::null_mut()),
+            vtable: &STATIC_VTABLE,
+        }
+    }
+
+    /// Returns the number of bytes contained in this `Bytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let b = Bytes::from(&b"hello"[..]);
+    /// assert_eq!(b.len(), 5);
+    /// ```
+    #[inline]
+    pub const fn len(&self) -> usize {
+        self.len
+    }
+
+    /// Returns true if the `Bytes` has a length of 0.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let b = Bytes::new();
+    /// assert!(b.is_empty());
+    /// ```
+    #[inline]
+    pub const fn is_empty(&self) -> bool {
+        self.len == 0
+    }
+
+    /// Returns true if this is the only reference to the data.
+    ///
+    /// Always returns false if the data is backed by a static slice.
+    ///
+    /// The result of this method may be invalidated immediately if another
+    /// thread clones this value while this is being called. Ensure you have
+    /// unique access to this value (`&mut Bytes`) first if you need to be
+    /// certain the result is valid (i.e. for safety reasons)
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let a = Bytes::from(vec![1, 2, 3]);
+    /// assert!(a.is_unique());
+    /// let b = a.clone();
+    /// assert!(!a.is_unique());
+    /// ```
+    pub fn is_unique(&self) -> bool {
+        unsafe { (self.vtable.is_unique)(&self.data) }
+    }
+
+    /// Creates `Bytes` instance from slice, by copying it.
+    pub fn copy_from_slice(data: &[u8]) -> Self {
+        data.to_vec().into()
+    }
+
+    /// Returns a slice of self for the provided range.
+    ///
+    /// This will increment the reference count for the underlying memory and
+    /// return a new `Bytes` handle set to the slice.
+    ///
+    /// This operation is `O(1)`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let a = Bytes::from(&b"hello world"[..]);
+    /// let b = a.slice(2..5);
+    ///
+    /// assert_eq!(&b[..], b"llo");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
+    /// will panic.
+    pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
+        use core::ops::Bound;
+
+        let len = self.len();
+
+        let begin = match range.start_bound() {
+            Bound::Included(&n) => n,
+            Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
+            Bound::Unbounded => 0,
+        };
+
+        let end = match range.end_bound() {
+            Bound::Included(&n) => n.checked_add(1).expect("out of range"),
+            Bound::Excluded(&n) => n,
+            Bound::Unbounded => len,
+        };
+
+        assert!(
+            begin <= end,
+            "range start must not be greater than end: {:?} <= {:?}",
+            begin,
+            end,
+        );
+        assert!(
+            end <= len,
+            "range end out of bounds: {:?} <= {:?}",
+            end,
+            len,
+        );
+
+        if end == begin {
+            return Bytes::new();
+        }
+
+        let mut ret = self.clone();
+
+        ret.len = end - begin;
+        ret.ptr = unsafe { ret.ptr.add(begin) };
+
+        ret
+    }
+
+    /// Returns a slice of self that is equivalent to the given `subset`.
+    ///
+    /// When processing a `Bytes` buffer with other tools, one often gets a
+    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
+    /// This function turns that `&[u8]` into another `Bytes`, as if one had
+    /// called `self.slice()` with the offsets that correspond to `subset`.
+    ///
+    /// This operation is `O(1)`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let bytes = Bytes::from(&b"012345678"[..]);
+    /// let as_slice = bytes.as_ref();
+    /// let subset = &as_slice[2..6];
+    /// let subslice = bytes.slice_ref(&subset);
+    /// assert_eq!(&subslice[..], b"2345");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Requires that the given `sub` slice is in fact contained within the
+    /// `Bytes` buffer; otherwise this function will panic.
+    pub fn slice_ref(&self, subset: &[u8]) -> Self {
+        // Empty slice and empty Bytes may have their pointers reset
+        // so explicitly allow empty slice to be a subslice of any slice.
+        if subset.is_empty() {
+            return Bytes::new();
+        }
+
+        let bytes_p = self.as_ptr() as usize;
+        let bytes_len = self.len();
+
+        let sub_p = subset.as_ptr() as usize;
+        let sub_len = subset.len();
+
+        assert!(
+            sub_p >= bytes_p,
+            "subset pointer ({:p}) is smaller than self pointer ({:p})",
+            subset.as_ptr(),
+            self.as_ptr(),
+        );
+        assert!(
+            sub_p + sub_len <= bytes_p + bytes_len,
+            "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
+            self.as_ptr(),
+            bytes_len,
+            subset.as_ptr(),
+            sub_len,
+        );
+
+        let sub_offset = sub_p - bytes_p;
+
+        self.slice(sub_offset..(sub_offset + sub_len))
+    }
+
+    /// Splits the bytes into two at the given index.
+    ///
+    /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
+    /// contains elements `[at, len)`.
+    ///
+    /// This is an `O(1)` operation that just increases the reference count and
+    /// sets a few indices.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let mut a = Bytes::from(&b"hello world"[..]);
+    /// let b = a.split_off(5);
+    ///
+    /// assert_eq!(&a[..], b"hello");
+    /// assert_eq!(&b[..], b" world");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if `at > len`.
+    #[must_use = "consider Bytes::truncate if you don't need the other half"]
+    pub fn split_off(&mut self, at: usize) -> Self {
+        if at == self.len() {
+            return Bytes::new();
+        }
+
+        if at == 0 {
+            return mem::replace(self, Bytes::new());
+        }
+
+        assert!(
+            at <= self.len(),
+            "split_off out of bounds: {:?} <= {:?}",
+            at,
+            self.len(),
+        );
+
+        let mut ret = self.clone();
+
+        self.len = at;
+
+        unsafe { ret.inc_start(at) };
+
+        ret
+    }
+
+    /// Splits the bytes into two at the given index.
+    ///
+    /// Afterwards `self` contains elements `[at, len)`, and the returned
+    /// `Bytes` contains elements `[0, at)`.
+    ///
+    /// This is an `O(1)` operation that just increases the reference count and
+    /// sets a few indices.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let mut a = Bytes::from(&b"hello world"[..]);
+    /// let b = a.split_to(5);
+    ///
+    /// assert_eq!(&a[..], b" world");
+    /// assert_eq!(&b[..], b"hello");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if `at > len`.
+    #[must_use = "consider Bytes::advance if you don't need the other half"]
+    pub fn split_to(&mut self, at: usize) -> Self {
+        if at == self.len() {
+            return mem::replace(self, Bytes::new());
+        }
+
+        if at == 0 {
+            return Bytes::new();
+        }
+
+        assert!(
+            at <= self.len(),
+            "split_to out of bounds: {:?} <= {:?}",
+            at,
+            self.len(),
+        );
+
+        let mut ret = self.clone();
+
+        unsafe { self.inc_start(at) };
+
+        ret.len = at;
+        ret
+    }
+
+    /// Shortens the buffer, keeping the first `len` bytes and dropping the
+    /// rest.
+    ///
+    /// If `len` is greater than the buffer's current length, this has no
+    /// effect.
+    ///
+    /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
+    /// excess bytes to be returned instead of dropped.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let mut buf = Bytes::from(&b"hello world"[..]);
+    /// buf.truncate(5);
+    /// assert_eq!(buf, b"hello"[..]);
+    /// ```
+    #[inline]
+    pub fn truncate(&mut self, len: usize) {
+        if len < self.len {
+            // The Vec "promotable" vtables do not store the capacity,
+            // so we cannot truncate while using this repr. We *have* to
+            // promote using `split_off` so the capacity can be stored.
+            if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
+                || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
+            {
+                drop(self.split_off(len));
+            } else {
+                self.len = len;
+            }
+        }
+    }
+
+    /// Clears the buffer, removing all data.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Bytes;
+    ///
+    /// let mut buf = Bytes::from(&b"hello world"[..]);
+    /// buf.clear();
+    /// assert!(buf.is_empty());
+    /// ```
+    #[inline]
+    pub fn clear(&mut self) {
+        self.truncate(0);
+    }
+
+    /// Try to convert self into `BytesMut`.
+    ///
+    /// If `self` is unique for the entire original buffer, this will succeed
+    /// and return a `BytesMut` with the contents of `self` without copying.
+    /// If `self` is not unique for the entire original buffer, this will fail
+    /// and return self.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{Bytes, BytesMut};
+    ///
+    /// let bytes = Bytes::from(b"hello".to_vec());
+    /// assert_eq!(bytes.try_into_mut(), Ok(BytesMut::from(&b"hello"[..])));
+    /// ```
+    pub fn try_into_mut(self) -> Result<BytesMut, Bytes> {
+        if self.is_unique() {
+            Ok(self.into())
+        } else {
+            Err(self)
+        }
+    }
+
+    #[inline]
+    pub(crate) unsafe fn with_vtable(
+        ptr: *const u8,
+        len: usize,
+        data: AtomicPtr<()>,
+        vtable: &'static Vtable,
+    ) -> Bytes {
+        Bytes {
+            ptr,
+            len,
+            data,
+            vtable,
+        }
+    }
+
+    // private
+
+    #[inline]
+    fn as_slice(&self) -> &[u8] {
+        unsafe { slice::from_raw_parts(self.ptr, self.len) }
+    }
+
+    #[inline]
+    unsafe fn inc_start(&mut self, by: usize) {
+        // should already be asserted, but debug assert for tests
+        debug_assert!(self.len >= by, "internal: inc_start out of bounds");
+        self.len -= by;
+        self.ptr = self.ptr.add(by);
+    }
+}
+
+// Vtable must enforce this behavior
+unsafe impl Send for Bytes {}
+unsafe impl Sync for Bytes {}
+
+impl Drop for Bytes {
+    #[inline]
+    fn drop(&mut self) {
+        unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
+    }
+}
+
+impl Clone for Bytes {
+    #[inline]
+    fn clone(&self) -> Bytes {
+        unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
+    }
+}
+
+impl Buf for Bytes {
+    #[inline]
+    fn remaining(&self) -> usize {
+        self.len()
+    }
+
+    #[inline]
+    fn chunk(&self) -> &[u8] {
+        self.as_slice()
+    }
+
+    #[inline]
+    fn advance(&mut self, cnt: usize) {
+        assert!(
+            cnt <= self.len(),
+            "cannot advance past `remaining`: {:?} <= {:?}",
+            cnt,
+            self.len(),
+        );
+
+        unsafe {
+            self.inc_start(cnt);
+        }
+    }
+
+    fn copy_to_bytes(&mut self, len: usize) -> Self {
+        self.split_to(len)
+    }
+}
+
+impl Deref for Bytes {
+    type Target = [u8];
+
+    #[inline]
+    fn deref(&self) -> &[u8] {
+        self.as_slice()
+    }
+}
+
+impl AsRef<[u8]> for Bytes {
+    #[inline]
+    fn as_ref(&self) -> &[u8] {
+        self.as_slice()
+    }
+}
+
+impl hash::Hash for Bytes {
+    fn hash<H>(&self, state: &mut H)
+    where
+        H: hash::Hasher,
+    {
+        self.as_slice().hash(state);
+    }
+}
+
+impl Borrow<[u8]> for Bytes {
+    fn borrow(&self) -> &[u8] {
+        self.as_slice()
+    }
+}
+
+impl IntoIterator for Bytes {
+    type Item = u8;
+    type IntoIter = IntoIter<Bytes>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        IntoIter::new(self)
+    }
+}
+
+impl<'a> IntoIterator for &'a Bytes {
+    type Item = &'a u8;
+    type IntoIter = core::slice::Iter<'a, u8>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.as_slice().iter()
+    }
+}
+
+impl FromIterator<u8> for Bytes {
+    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
+        Vec::from_iter(into_iter).into()
+    }
+}
+
+// impl Eq
+
+impl PartialEq for Bytes {
+    fn eq(&self, other: &Bytes) -> bool {
+        self.as_slice() == other.as_slice()
+    }
+}
+
+impl PartialOrd for Bytes {
+    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+        self.as_slice().partial_cmp(other.as_slice())
+    }
+}
+
+impl Ord for Bytes {
+    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
+        self.as_slice().cmp(other.as_slice())
+    }
+}
+
+impl Eq for Bytes {}
+
+impl PartialEq<[u8]> for Bytes {
+    fn eq(&self, other: &[u8]) -> bool {
+        self.as_slice() == other
+    }
+}
+
+impl PartialOrd<[u8]> for Bytes {
+    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
+        self.as_slice().partial_cmp(other)
+    }
+}
+
+impl PartialEq<Bytes> for [u8] {
+    fn eq(&self, other: &Bytes) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<Bytes> for [u8] {
+    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+    }
+}
+
+impl PartialEq<str> for Bytes {
+    fn eq(&self, other: &str) -> bool {
+        self.as_slice() == other.as_bytes()
+    }
+}
+
+impl PartialOrd<str> for Bytes {
+    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
+        self.as_slice().partial_cmp(other.as_bytes())
+    }
+}
+
+impl PartialEq<Bytes> for str {
+    fn eq(&self, other: &Bytes) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<Bytes> for str {
+    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+    }
+}
+
+impl PartialEq<Vec<u8>> for Bytes {
+    fn eq(&self, other: &Vec<u8>) -> bool {
+        *self == other[..]
+    }
+}
+
+impl PartialOrd<Vec<u8>> for Bytes {
+    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
+        self.as_slice().partial_cmp(&other[..])
+    }
+}
+
+impl PartialEq<Bytes> for Vec<u8> {
+    fn eq(&self, other: &Bytes) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<Bytes> for Vec<u8> {
+    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+    }
+}
+
+impl PartialEq<String> for Bytes {
+    fn eq(&self, other: &String) -> bool {
+        *self == other[..]
+    }
+}
+
+impl PartialOrd<String> for Bytes {
+    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
+        self.as_slice().partial_cmp(other.as_bytes())
+    }
+}
+
+impl PartialEq<Bytes> for String {
+    fn eq(&self, other: &Bytes) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<Bytes> for String {
+    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+    }
+}
+
+impl PartialEq<Bytes> for &[u8] {
+    fn eq(&self, other: &Bytes) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<Bytes> for &[u8] {
+    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+    }
+}
+
+impl PartialEq<Bytes> for &str {
+    fn eq(&self, other: &Bytes) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<Bytes> for &str {
+    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+    }
+}
+
+impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
+where
+    Bytes: PartialEq<T>,
+{
+    fn eq(&self, other: &&'a T) -> bool {
+        *self == **other
+    }
+}
+
+impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
+where
+    Bytes: PartialOrd<T>,
+{
+    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
+        self.partial_cmp(&**other)
+    }
+}
+
+// impl From
+
+impl Default for Bytes {
+    #[inline]
+    fn default() -> Bytes {
+        Bytes::new()
+    }
+}
+
+impl From<&'static [u8]> for Bytes {
+    fn from(slice: &'static [u8]) -> Bytes {
+        Bytes::from_static(slice)
+    }
+}
+
+impl From<&'static str> for Bytes {
+    fn from(slice: &'static str) -> Bytes {
+        Bytes::from_static(slice.as_bytes())
+    }
+}
+
+impl From<Vec<u8>> for Bytes {
+    fn from(vec: Vec<u8>) -> Bytes {
+        let mut vec = ManuallyDrop::new(vec);
+        let ptr = vec.as_mut_ptr();
+        let len = vec.len();
+        let cap = vec.capacity();
+
+        // Avoid an extra allocation if possible.
+        if len == cap {
+            let vec = ManuallyDrop::into_inner(vec);
+            return Bytes::from(vec.into_boxed_slice());
+        }
+
+        let shared = Box::new(Shared {
+            buf: ptr,
+            cap,
+            ref_cnt: AtomicUsize::new(1),
+        });
+
+        let shared = Box::into_raw(shared);
+        // The pointer should be aligned, so this assert should
+        // always succeed.
+        debug_assert!(
+            0 == (shared as usize & KIND_MASK),
+            "internal: Box<Shared> should have an aligned pointer",
+        );
+        Bytes {
+            ptr,
+            len,
+            data: AtomicPtr::new(shared as _),
+            vtable: &SHARED_VTABLE,
+        }
+    }
+}
+
+impl From<Box<[u8]>> for Bytes {
+    fn from(slice: Box<[u8]>) -> Bytes {
+        // Box<[u8]> doesn't contain a heap allocation for empty slices,
+        // so the pointer isn't aligned enough for the KIND_VEC stashing to
+        // work.
+        if slice.is_empty() {
+            return Bytes::new();
+        }
+
+        let len = slice.len();
+        let ptr = Box::into_raw(slice) as *mut u8;
+
+        if ptr as usize & 0x1 == 0 {
+            let data = ptr_map(ptr, |addr| addr | KIND_VEC);
+            Bytes {
+                ptr,
+                len,
+                data: AtomicPtr::new(data.cast()),
+                vtable: &PROMOTABLE_EVEN_VTABLE,
+            }
+        } else {
+            Bytes {
+                ptr,
+                len,
+                data: AtomicPtr::new(ptr.cast()),
+                vtable: &PROMOTABLE_ODD_VTABLE,
+            }
+        }
+    }
+}
+
+impl From<Bytes> for BytesMut {
+    /// Convert self into `BytesMut`.
+    ///
+    /// If `bytes` is unique for the entire original buffer, this will return a
+    /// `BytesMut` with the contents of `bytes` without copying.
+    /// If `bytes` is not unique for the entire original buffer, this will make
+    /// a copy of `bytes` subset of the original buffer in a new `BytesMut`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{Bytes, BytesMut};
+    ///
+    /// let bytes = Bytes::from(b"hello".to_vec());
+    /// assert_eq!(BytesMut::from(bytes), BytesMut::from(&b"hello"[..]));
+    /// ```
+    fn from(bytes: Bytes) -> Self {
+        let bytes = ManuallyDrop::new(bytes);
+        unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) }
+    }
+}
+
+impl From<String> for Bytes {
+    fn from(s: String) -> Bytes {
+        Bytes::from(s.into_bytes())
+    }
+}
+
+impl From<Bytes> for Vec<u8> {
+    fn from(bytes: Bytes) -> Vec<u8> {
+        let bytes = ManuallyDrop::new(bytes);
+        unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
+    }
+}
+
+// ===== impl Vtable =====
+
+impl fmt::Debug for Vtable {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("Vtable")
+            .field("clone", &(self.clone as *const ()))
+            .field("drop", &(self.drop as *const ()))
+            .finish()
+    }
+}
+
+// ===== impl StaticVtable =====
+
+const STATIC_VTABLE: Vtable = Vtable {
+    clone: static_clone,
+    to_vec: static_to_vec,
+    to_mut: static_to_mut,
+    is_unique: static_is_unique,
+    drop: static_drop,
+};
+
+unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+    let slice = slice::from_raw_parts(ptr, len);
+    Bytes::from_static(slice)
+}
+
+unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+    let slice = slice::from_raw_parts(ptr, len);
+    slice.to_vec()
+}
+
+unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
+    let slice = slice::from_raw_parts(ptr, len);
+    BytesMut::from(slice)
+}
+
+fn static_is_unique(_: &AtomicPtr<()>) -> bool {
+    false
+}
+
+unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
+    // nothing to drop for &'static [u8]
+}
+
+// ===== impl PromotableVtable =====
+
+static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
+    clone: promotable_even_clone,
+    to_vec: promotable_even_to_vec,
+    to_mut: promotable_even_to_mut,
+    is_unique: promotable_is_unique,
+    drop: promotable_even_drop,
+};
+
+static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
+    clone: promotable_odd_clone,
+    to_vec: promotable_odd_to_vec,
+    to_mut: promotable_odd_to_mut,
+    is_unique: promotable_is_unique,
+    drop: promotable_odd_drop,
+};
+
+unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+    let shared = data.load(Ordering::Acquire);
+    let kind = shared as usize & KIND_MASK;
+
+    if kind == KIND_ARC {
+        shallow_clone_arc(shared.cast(), ptr, len)
+    } else {
+        debug_assert_eq!(kind, KIND_VEC);
+        let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
+        shallow_clone_vec(data, shared, buf, ptr, len)
+    }
+}
+
+unsafe fn promotable_to_vec(
+    data: &AtomicPtr<()>,
+    ptr: *const u8,
+    len: usize,
+    f: fn(*mut ()) -> *mut u8,
+) -> Vec<u8> {
+    let shared = data.load(Ordering::Acquire);
+    let kind = shared as usize & KIND_MASK;
+
+    if kind == KIND_ARC {
+        shared_to_vec_impl(shared.cast(), ptr, len)
+    } else {
+        // If Bytes holds a Vec, then the offset must be 0.
+        debug_assert_eq!(kind, KIND_VEC);
+
+        let buf = f(shared);
+
+        let cap = offset_from(ptr, buf) + len;
+
+        // Copy back buffer
+        ptr::copy(ptr, buf, len);
+
+        Vec::from_raw_parts(buf, len, cap)
+    }
+}
+
+unsafe fn promotable_to_mut(
+    data: &AtomicPtr<()>,
+    ptr: *const u8,
+    len: usize,
+    f: fn(*mut ()) -> *mut u8,
+) -> BytesMut {
+    let shared = data.load(Ordering::Acquire);
+    let kind = shared as usize & KIND_MASK;
+
+    if kind == KIND_ARC {
+        shared_to_mut_impl(shared.cast(), ptr, len)
+    } else {
+        // KIND_VEC is a view of an underlying buffer at a certain offset.
+        // The ptr + len always represents the end of that buffer.
+        // Before truncating it, it is first promoted to KIND_ARC.
+        // Thus, we can safely reconstruct a Vec from it without leaking memory.
+        debug_assert_eq!(kind, KIND_VEC);
+
+        let buf = f(shared);
+        let off = offset_from(ptr, buf);
+        let cap = off + len;
+        let v = Vec::from_raw_parts(buf, cap, cap);
+
+        let mut b = BytesMut::from_vec(v);
+        b.advance_unchecked(off);
+        b
+    }
+}
+
+unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+    promotable_to_vec(data, ptr, len, |shared| {
+        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
+    })
+}
+
+unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
+    promotable_to_mut(data, ptr, len, |shared| {
+        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
+    })
+}
+
+unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
+    data.with_mut(|shared| {
+        let shared = *shared;
+        let kind = shared as usize & KIND_MASK;
+
+        if kind == KIND_ARC {
+            release_shared(shared.cast());
+        } else {
+            debug_assert_eq!(kind, KIND_VEC);
+            let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
+            free_boxed_slice(buf, ptr, len);
+        }
+    });
+}
+
+unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+    let shared = data.load(Ordering::Acquire);
+    let kind = shared as usize & KIND_MASK;
+
+    if kind == KIND_ARC {
+        shallow_clone_arc(shared as _, ptr, len)
+    } else {
+        debug_assert_eq!(kind, KIND_VEC);
+        shallow_clone_vec(data, shared, shared.cast(), ptr, len)
+    }
+}
+
+unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+    promotable_to_vec(data, ptr, len, |shared| shared.cast())
+}
+
+unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
+    promotable_to_mut(data, ptr, len, |shared| shared.cast())
+}
+
+unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
+    data.with_mut(|shared| {
+        let shared = *shared;
+        let kind = shared as usize & KIND_MASK;
+
+        if kind == KIND_ARC {
+            release_shared(shared.cast());
+        } else {
+            debug_assert_eq!(kind, KIND_VEC);
+
+            free_boxed_slice(shared.cast(), ptr, len);
+        }
+    });
+}
+
+unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
+    let shared = data.load(Ordering::Acquire);
+    let kind = shared as usize & KIND_MASK;
+
+    if kind == KIND_ARC {
+        let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
+        ref_cnt == 1
+    } else {
+        true
+    }
+}
+
+unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
+    let cap = offset_from(offset, buf) + len;
+    dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
+}
+
+// ===== impl SharedVtable =====
+
+struct Shared {
+    // Holds arguments to dealloc upon Drop, but otherwise doesn't use them
+    buf: *mut u8,
+    cap: usize,
+    ref_cnt: AtomicUsize,
+}
+
+impl Drop for Shared {
+    fn drop(&mut self) {
+        unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
+    }
+}
+
+// Assert that the alignment of `Shared` is divisible by 2.
+// This is a necessary invariant since we depend on allocating `Shared` a
+// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
+// This flag is set when the LSB is 0.
+const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
+
+static SHARED_VTABLE: Vtable = Vtable {
+    clone: shared_clone,
+    to_vec: shared_to_vec,
+    to_mut: shared_to_mut,
+    is_unique: shared_is_unique,
+    drop: shared_drop,
+};
+
+const KIND_ARC: usize = 0b0;
+const KIND_VEC: usize = 0b1;
+const KIND_MASK: usize = 0b1;
+
+unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+    let shared = data.load(Ordering::Relaxed);
+    shallow_clone_arc(shared as _, ptr, len)
+}
+
+unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
+    // Check that the ref_cnt is 1 (unique).
+    //
+    // If it is unique, then it is set to 0 with AcqRel fence for the same
+    // reason in release_shared.
+    //
+    // Otherwise, we take the other branch and call release_shared.
+    if (*shared)
+        .ref_cnt
+        .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
+        .is_ok()
+    {
+        // Deallocate the `Shared` instance without running its destructor.
+        let shared = *Box::from_raw(shared);
+        let shared = ManuallyDrop::new(shared);
+        let buf = shared.buf;
+        let cap = shared.cap;
+
+        // Copy back buffer
+        ptr::copy(ptr, buf, len);
+
+        Vec::from_raw_parts(buf, len, cap)
+    } else {
+        let v = slice::from_raw_parts(ptr, len).to_vec();
+        release_shared(shared);
+        v
+    }
+}
+
+unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+    shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
+}
+
+unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut {
+    // The goal is to check if the current handle is the only handle
+    // that currently has access to the buffer. This is done by
+    // checking if the `ref_cnt` is currently 1.
+    //
+    // The `Acquire` ordering synchronizes with the `Release` as
+    // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
+    // operation guarantees that any mutations done in other threads
+    // are ordered before the `ref_cnt` is decremented. As such,
+    // this `Acquire` will guarantee that those mutations are
+    // visible to the current thread.
+    //
+    // Otherwise, we take the other branch, copy the data and call `release_shared`.
+    if (*shared).ref_cnt.load(Ordering::Acquire) == 1 {
+        // Deallocate the `Shared` instance without running its destructor.
+        let shared = *Box::from_raw(shared);
+        let shared = ManuallyDrop::new(shared);
+        let buf = shared.buf;
+        let cap = shared.cap;
+
+        // Rebuild Vec
+        let off = offset_from(ptr, buf);
+        let v = Vec::from_raw_parts(buf, len + off, cap);
+
+        let mut b = BytesMut::from_vec(v);
+        b.advance_unchecked(off);
+        b
+    } else {
+        // Copy the data from Shared in a new Vec, then release it
+        let v = slice::from_raw_parts(ptr, len).to_vec();
+        release_shared(shared);
+        BytesMut::from_vec(v)
+    }
+}
+
+unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
+    shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
+}
+
+pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
+    let shared = data.load(Ordering::Acquire);
+    let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
+    ref_cnt == 1
+}
+
+unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
+    data.with_mut(|shared| {
+        release_shared(shared.cast());
+    });
+}
+
+unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
+    let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
+
+    if old_size > usize::MAX >> 1 {
+        crate::abort();
+    }
+
+    Bytes {
+        ptr,
+        len,
+        data: AtomicPtr::new(shared as _),
+        vtable: &SHARED_VTABLE,
+    }
+}
+
+#[cold]
+unsafe fn shallow_clone_vec(
+    atom: &AtomicPtr<()>,
+    ptr: *const (),
+    buf: *mut u8,
+    offset: *const u8,
+    len: usize,
+) -> Bytes {
+    // If the buffer is still tracked in a `Vec<u8>`. It is time to
+    // promote the vec to an `Arc`. This could potentially be called
+    // concurrently, so some care must be taken.
+
+    // First, allocate a new `Shared` instance containing the
+    // `Vec` fields. It's important to note that `ptr`, `len`,
+    // and `cap` cannot be mutated without having `&mut self`.
+    // This means that these fields will not be concurrently
+    // updated and since the buffer hasn't been promoted to an
+    // `Arc`, those three fields still are the components of the
+    // vector.
+    let shared = Box::new(Shared {
+        buf,
+        cap: offset_from(offset, buf) + len,
+        // Initialize refcount to 2. One for this reference, and one
+        // for the new clone that will be returned from
+        // `shallow_clone`.
+        ref_cnt: AtomicUsize::new(2),
+    });
+
+    let shared = Box::into_raw(shared);
+
+    // The pointer should be aligned, so this assert should
+    // always succeed.
+    debug_assert!(
+        0 == (shared as usize & KIND_MASK),
+        "internal: Box<Shared> should have an aligned pointer",
+    );
+
+    // Try compare & swapping the pointer into the `arc` field.
+    // `Release` is used synchronize with other threads that
+    // will load the `arc` field.
+    //
+    // If the `compare_exchange` fails, then the thread lost the
+    // race to promote the buffer to shared. The `Acquire`
+    // ordering will synchronize with the `compare_exchange`
+    // that happened in the other thread and the `Shared`
+    // pointed to by `actual` will be visible.
+    match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
+        Ok(actual) => {
+            debug_assert!(actual as usize == ptr as usize);
+            // The upgrade was successful, the new handle can be
+            // returned.
+            Bytes {
+                ptr: offset,
+                len,
+                data: AtomicPtr::new(shared as _),
+                vtable: &SHARED_VTABLE,
+            }
+        }
+        Err(actual) => {
+            // The upgrade failed, a concurrent clone happened. Release
+            // the allocation that was made in this thread, it will not
+            // be needed.
+            let shared = Box::from_raw(shared);
+            mem::forget(*shared);
+
+            // Buffer already promoted to shared storage, so increment ref
+            // count.
+            shallow_clone_arc(actual as _, offset, len)
+        }
+    }
+}
+
+unsafe fn release_shared(ptr: *mut Shared) {
+    // `Shared` storage... follow the drop steps from Arc.
+    if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
+        return;
+    }
+
+    // This fence is needed to prevent reordering of use of the data and
+    // deletion of the data.  Because it is marked `Release`, the decreasing
+    // of the reference count synchronizes with this `Acquire` fence. This
+    // means that use of the data happens before decreasing the reference
+    // count, which happens before this fence, which happens before the
+    // deletion of the data.
+    //
+    // As explained in the [Boost documentation][1],
+    //
+    // > It is important to enforce any possible access to the object in one
+    // > thread (through an existing reference) to *happen before* deleting
+    // > the object in a different thread. This is achieved by a "release"
+    // > operation after dropping a reference (any access to the object
+    // > through this reference must obviously happened before), and an
+    // > "acquire" operation before deleting the object.
+    //
+    // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+    //
+    // Thread sanitizer does not support atomic fences. Use an atomic load
+    // instead.
+    (*ptr).ref_cnt.load(Ordering::Acquire);
+
+    // Drop the data
+    drop(Box::from_raw(ptr));
+}
+
+// Ideally we would always use this version of `ptr_map` since it is strict
+// provenance compatible, but it results in worse codegen. We will however still
+// use it on miri because it gives better diagnostics for people who test bytes
+// code with miri.
+//
+// See https://github.com/tokio-rs/bytes/pull/545 for more info.
+#[cfg(miri)]
+fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
+where
+    F: FnOnce(usize) -> usize,
+{
+    let old_addr = ptr as usize;
+    let new_addr = f(old_addr);
+    let diff = new_addr.wrapping_sub(old_addr);
+    ptr.wrapping_add(diff)
+}
+
+#[cfg(not(miri))]
+fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
+where
+    F: FnOnce(usize) -> usize,
+{
+    let old_addr = ptr as usize;
+    let new_addr = f(old_addr);
+    new_addr as *mut u8
+}
+
+// compile-fails
+
+/// ```compile_fail
+/// use bytes::Bytes;
+/// #[deny(unused_must_use)]
+/// {
+///     let mut b1 = Bytes::from("hello world");
+///     b1.split_to(6);
+/// }
+/// ```
+fn _split_to_must_use() {}
+
+/// ```compile_fail
+/// use bytes::Bytes;
+/// #[deny(unused_must_use)]
+/// {
+///     let mut b1 = Bytes::from("hello world");
+///     b1.split_off(6);
+/// }
+/// ```
+fn _split_off_must_use() {}
+
+// fuzz tests
+#[cfg(all(test, loom))]
+mod fuzz {
+    use loom::sync::Arc;
+    use loom::thread;
+
+    use super::Bytes;
+    #[test]
+    fn bytes_cloning_vec() {
+        loom::model(|| {
+            let a = Bytes::from(b"abcdefgh".to_vec());
+            let addr = a.as_ptr() as usize;
+
+            // test the Bytes::clone is Sync by putting it in an Arc
+            let a1 = Arc::new(a);
+            let a2 = a1.clone();
+
+            let t1 = thread::spawn(move || {
+                let b: Bytes = (*a1).clone();
+                assert_eq!(b.as_ptr() as usize, addr);
+            });
+
+            let t2 = thread::spawn(move || {
+                let b: Bytes = (*a2).clone();
+                assert_eq!(b.as_ptr() as usize, addr);
+            });
+
+            t1.join().unwrap();
+            t2.join().unwrap();
+        });
+    }
+}
+
\ No newline at end of file diff --git a/src/bytes/bytes_mut.rs.html b/src/bytes/bytes_mut.rs.html new file mode 100644 index 000000000..82ed6ed58 --- /dev/null +++ b/src/bytes/bytes_mut.rs.html @@ -0,0 +1,3833 @@ +bytes_mut.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
+764
+765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
+796
+797
+798
+799
+800
+801
+802
+803
+804
+805
+806
+807
+808
+809
+810
+811
+812
+813
+814
+815
+816
+817
+818
+819
+820
+821
+822
+823
+824
+825
+826
+827
+828
+829
+830
+831
+832
+833
+834
+835
+836
+837
+838
+839
+840
+841
+842
+843
+844
+845
+846
+847
+848
+849
+850
+851
+852
+853
+854
+855
+856
+857
+858
+859
+860
+861
+862
+863
+864
+865
+866
+867
+868
+869
+870
+871
+872
+873
+874
+875
+876
+877
+878
+879
+880
+881
+882
+883
+884
+885
+886
+887
+888
+889
+890
+891
+892
+893
+894
+895
+896
+897
+898
+899
+900
+901
+902
+903
+904
+905
+906
+907
+908
+909
+910
+911
+912
+913
+914
+915
+916
+917
+918
+919
+920
+921
+922
+923
+924
+925
+926
+927
+928
+929
+930
+931
+932
+933
+934
+935
+936
+937
+938
+939
+940
+941
+942
+943
+944
+945
+946
+947
+948
+949
+950
+951
+952
+953
+954
+955
+956
+957
+958
+959
+960
+961
+962
+963
+964
+965
+966
+967
+968
+969
+970
+971
+972
+973
+974
+975
+976
+977
+978
+979
+980
+981
+982
+983
+984
+985
+986
+987
+988
+989
+990
+991
+992
+993
+994
+995
+996
+997
+998
+999
+1000
+1001
+1002
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
+1022
+1023
+1024
+1025
+1026
+1027
+1028
+1029
+1030
+1031
+1032
+1033
+1034
+1035
+1036
+1037
+1038
+1039
+1040
+1041
+1042
+1043
+1044
+1045
+1046
+1047
+1048
+1049
+1050
+1051
+1052
+1053
+1054
+1055
+1056
+1057
+1058
+1059
+1060
+1061
+1062
+1063
+1064
+1065
+1066
+1067
+1068
+1069
+1070
+1071
+1072
+1073
+1074
+1075
+1076
+1077
+1078
+1079
+1080
+1081
+1082
+1083
+1084
+1085
+1086
+1087
+1088
+1089
+1090
+1091
+1092
+1093
+1094
+1095
+1096
+1097
+1098
+1099
+1100
+1101
+1102
+1103
+1104
+1105
+1106
+1107
+1108
+1109
+1110
+1111
+1112
+1113
+1114
+1115
+1116
+1117
+1118
+1119
+1120
+1121
+1122
+1123
+1124
+1125
+1126
+1127
+1128
+1129
+1130
+1131
+1132
+1133
+1134
+1135
+1136
+1137
+1138
+1139
+1140
+1141
+1142
+1143
+1144
+1145
+1146
+1147
+1148
+1149
+1150
+1151
+1152
+1153
+1154
+1155
+1156
+1157
+1158
+1159
+1160
+1161
+1162
+1163
+1164
+1165
+1166
+1167
+1168
+1169
+1170
+1171
+1172
+1173
+1174
+1175
+1176
+1177
+1178
+1179
+1180
+1181
+1182
+1183
+1184
+1185
+1186
+1187
+1188
+1189
+1190
+1191
+1192
+1193
+1194
+1195
+1196
+1197
+1198
+1199
+1200
+1201
+1202
+1203
+1204
+1205
+1206
+1207
+1208
+1209
+1210
+1211
+1212
+1213
+1214
+1215
+1216
+1217
+1218
+1219
+1220
+1221
+1222
+1223
+1224
+1225
+1226
+1227
+1228
+1229
+1230
+1231
+1232
+1233
+1234
+1235
+1236
+1237
+1238
+1239
+1240
+1241
+1242
+1243
+1244
+1245
+1246
+1247
+1248
+1249
+1250
+1251
+1252
+1253
+1254
+1255
+1256
+1257
+1258
+1259
+1260
+1261
+1262
+1263
+1264
+1265
+1266
+1267
+1268
+1269
+1270
+1271
+1272
+1273
+1274
+1275
+1276
+1277
+1278
+1279
+1280
+1281
+1282
+1283
+1284
+1285
+1286
+1287
+1288
+1289
+1290
+1291
+1292
+1293
+1294
+1295
+1296
+1297
+1298
+1299
+1300
+1301
+1302
+1303
+1304
+1305
+1306
+1307
+1308
+1309
+1310
+1311
+1312
+1313
+1314
+1315
+1316
+1317
+1318
+1319
+1320
+1321
+1322
+1323
+1324
+1325
+1326
+1327
+1328
+1329
+1330
+1331
+1332
+1333
+1334
+1335
+1336
+1337
+1338
+1339
+1340
+1341
+1342
+1343
+1344
+1345
+1346
+1347
+1348
+1349
+1350
+1351
+1352
+1353
+1354
+1355
+1356
+1357
+1358
+1359
+1360
+1361
+1362
+1363
+1364
+1365
+1366
+1367
+1368
+1369
+1370
+1371
+1372
+1373
+1374
+1375
+1376
+1377
+1378
+1379
+1380
+1381
+1382
+1383
+1384
+1385
+1386
+1387
+1388
+1389
+1390
+1391
+1392
+1393
+1394
+1395
+1396
+1397
+1398
+1399
+1400
+1401
+1402
+1403
+1404
+1405
+1406
+1407
+1408
+1409
+1410
+1411
+1412
+1413
+1414
+1415
+1416
+1417
+1418
+1419
+1420
+1421
+1422
+1423
+1424
+1425
+1426
+1427
+1428
+1429
+1430
+1431
+1432
+1433
+1434
+1435
+1436
+1437
+1438
+1439
+1440
+1441
+1442
+1443
+1444
+1445
+1446
+1447
+1448
+1449
+1450
+1451
+1452
+1453
+1454
+1455
+1456
+1457
+1458
+1459
+1460
+1461
+1462
+1463
+1464
+1465
+1466
+1467
+1468
+1469
+1470
+1471
+1472
+1473
+1474
+1475
+1476
+1477
+1478
+1479
+1480
+1481
+1482
+1483
+1484
+1485
+1486
+1487
+1488
+1489
+1490
+1491
+1492
+1493
+1494
+1495
+1496
+1497
+1498
+1499
+1500
+1501
+1502
+1503
+1504
+1505
+1506
+1507
+1508
+1509
+1510
+1511
+1512
+1513
+1514
+1515
+1516
+1517
+1518
+1519
+1520
+1521
+1522
+1523
+1524
+1525
+1526
+1527
+1528
+1529
+1530
+1531
+1532
+1533
+1534
+1535
+1536
+1537
+1538
+1539
+1540
+1541
+1542
+1543
+1544
+1545
+1546
+1547
+1548
+1549
+1550
+1551
+1552
+1553
+1554
+1555
+1556
+1557
+1558
+1559
+1560
+1561
+1562
+1563
+1564
+1565
+1566
+1567
+1568
+1569
+1570
+1571
+1572
+1573
+1574
+1575
+1576
+1577
+1578
+1579
+1580
+1581
+1582
+1583
+1584
+1585
+1586
+1587
+1588
+1589
+1590
+1591
+1592
+1593
+1594
+1595
+1596
+1597
+1598
+1599
+1600
+1601
+1602
+1603
+1604
+1605
+1606
+1607
+1608
+1609
+1610
+1611
+1612
+1613
+1614
+1615
+1616
+1617
+1618
+1619
+1620
+1621
+1622
+1623
+1624
+1625
+1626
+1627
+1628
+1629
+1630
+1631
+1632
+1633
+1634
+1635
+1636
+1637
+1638
+1639
+1640
+1641
+1642
+1643
+1644
+1645
+1646
+1647
+1648
+1649
+1650
+1651
+1652
+1653
+1654
+1655
+1656
+1657
+1658
+1659
+1660
+1661
+1662
+1663
+1664
+1665
+1666
+1667
+1668
+1669
+1670
+1671
+1672
+1673
+1674
+1675
+1676
+1677
+1678
+1679
+1680
+1681
+1682
+1683
+1684
+1685
+1686
+1687
+1688
+1689
+1690
+1691
+1692
+1693
+1694
+1695
+1696
+1697
+1698
+1699
+1700
+1701
+1702
+1703
+1704
+1705
+1706
+1707
+1708
+1709
+1710
+1711
+1712
+1713
+1714
+1715
+1716
+1717
+1718
+1719
+1720
+1721
+1722
+1723
+1724
+1725
+1726
+1727
+1728
+1729
+1730
+1731
+1732
+1733
+1734
+1735
+1736
+1737
+1738
+1739
+1740
+1741
+1742
+1743
+1744
+1745
+1746
+1747
+1748
+1749
+1750
+1751
+1752
+1753
+1754
+1755
+1756
+1757
+1758
+1759
+1760
+1761
+1762
+1763
+1764
+1765
+1766
+1767
+1768
+1769
+1770
+1771
+1772
+1773
+1774
+1775
+1776
+1777
+1778
+1779
+1780
+1781
+1782
+1783
+1784
+1785
+1786
+1787
+1788
+1789
+1790
+1791
+1792
+1793
+1794
+1795
+1796
+1797
+1798
+1799
+1800
+1801
+1802
+1803
+1804
+1805
+1806
+1807
+1808
+1809
+1810
+1811
+1812
+1813
+1814
+1815
+1816
+1817
+1818
+1819
+1820
+1821
+1822
+1823
+1824
+1825
+1826
+1827
+1828
+1829
+1830
+1831
+1832
+1833
+1834
+1835
+1836
+1837
+1838
+1839
+1840
+1841
+1842
+1843
+1844
+1845
+1846
+1847
+1848
+1849
+1850
+1851
+1852
+1853
+1854
+1855
+1856
+1857
+1858
+1859
+1860
+1861
+1862
+1863
+1864
+1865
+1866
+1867
+1868
+1869
+1870
+1871
+1872
+1873
+1874
+1875
+1876
+1877
+1878
+1879
+1880
+1881
+1882
+1883
+1884
+1885
+1886
+1887
+1888
+1889
+1890
+1891
+1892
+1893
+1894
+1895
+1896
+1897
+1898
+1899
+1900
+1901
+1902
+1903
+1904
+1905
+1906
+1907
+1908
+1909
+1910
+1911
+1912
+1913
+1914
+1915
+1916
use core::iter::FromIterator;
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::{Deref, DerefMut};
+use core::ptr::{self, NonNull};
+use core::{cmp, fmt, hash, isize, slice, usize};
+
+use alloc::{
+    borrow::{Borrow, BorrowMut},
+    boxed::Box,
+    string::String,
+    vec,
+    vec::Vec,
+};
+
+use crate::buf::{IntoIter, UninitSlice};
+use crate::bytes::Vtable;
+#[allow(unused)]
+use crate::loom::sync::atomic::AtomicMut;
+use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+use crate::{offset_from, Buf, BufMut, Bytes};
+
+/// A unique reference to a contiguous slice of memory.
+///
+/// `BytesMut` represents a unique view into a potentially shared memory region.
+/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
+/// mutate the memory.
+///
+/// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
+/// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
+/// same `buf` overlaps with its slice. That guarantee means that a write lock
+/// is not required.
+///
+/// # Growth
+///
+/// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
+/// necessary. However, explicitly reserving the required space up-front before
+/// a series of inserts will be more efficient.
+///
+/// # Examples
+///
+/// ```
+/// use bytes::{BytesMut, BufMut};
+///
+/// let mut buf = BytesMut::with_capacity(64);
+///
+/// buf.put_u8(b'h');
+/// buf.put_u8(b'e');
+/// buf.put(&b"llo"[..]);
+///
+/// assert_eq!(&buf[..], b"hello");
+///
+/// // Freeze the buffer so that it can be shared
+/// let a = buf.freeze();
+///
+/// // This does not allocate, instead `b` points to the same memory.
+/// let b = a.clone();
+///
+/// assert_eq!(&a[..], b"hello");
+/// assert_eq!(&b[..], b"hello");
+/// ```
+pub struct BytesMut {
+    ptr: NonNull<u8>,
+    len: usize,
+    cap: usize,
+    data: *mut Shared,
+}
+
+// Thread-safe reference-counted container for the shared storage. This mostly
+// the same as `core::sync::Arc` but without the weak counter. The ref counting
+// fns are based on the ones found in `std`.
+//
+// The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
+// up making the overall code simpler and easier to reason about. This is due to
+// some of the logic around setting `Inner::arc` and other ways the `arc` field
+// is used. Using `Arc` ended up requiring a number of funky transmutes and
+// other shenanigans to make it work.
+struct Shared {
+    vec: Vec<u8>,
+    original_capacity_repr: usize,
+    ref_count: AtomicUsize,
+}
+
+// Assert that the alignment of `Shared` is divisible by 2.
+// This is a necessary invariant since we depend on allocating `Shared` a
+// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
+// This flag is set when the LSB is 0.
+const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
+
+// Buffer storage strategy flags.
+const KIND_ARC: usize = 0b0;
+const KIND_VEC: usize = 0b1;
+const KIND_MASK: usize = 0b1;
+
+// The max original capacity value. Any `Bytes` allocated with a greater initial
+// capacity will default to this.
+const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
+// The original capacity algorithm will not take effect unless the originally
+// allocated capacity was at least 1kb in size.
+const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
+// The original capacity is stored in powers of 2 starting at 1kb to a max of
+// 64kb. Representing it as such requires only 3 bits of storage.
+const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
+const ORIGINAL_CAPACITY_OFFSET: usize = 2;
+
+const VEC_POS_OFFSET: usize = 5;
+// When the storage is in the `Vec` representation, the pointer can be advanced
+// at most this value. This is due to the amount of storage available to track
+// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
+// bits.
+const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
+const NOT_VEC_POS_MASK: usize = 0b11111;
+
+#[cfg(target_pointer_width = "64")]
+const PTR_WIDTH: usize = 64;
+#[cfg(target_pointer_width = "32")]
+const PTR_WIDTH: usize = 32;
+
+/*
+ *
+ * ===== BytesMut =====
+ *
+ */
+
+impl BytesMut {
+    /// Creates a new `BytesMut` with the specified capacity.
+    ///
+    /// The returned `BytesMut` will be able to hold at least `capacity` bytes
+    /// without reallocating.
+    ///
+    /// It is important to note that this function does not specify the length
+    /// of the returned `BytesMut`, but only the capacity.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{BytesMut, BufMut};
+    ///
+    /// let mut bytes = BytesMut::with_capacity(64);
+    ///
+    /// // `bytes` contains no data, even though there is capacity
+    /// assert_eq!(bytes.len(), 0);
+    ///
+    /// bytes.put(&b"hello world"[..]);
+    ///
+    /// assert_eq!(&bytes[..], b"hello world");
+    /// ```
+    #[inline]
+    pub fn with_capacity(capacity: usize) -> BytesMut {
+        BytesMut::from_vec(Vec::with_capacity(capacity))
+    }
+
+    /// Creates a new `BytesMut` with default capacity.
+    ///
+    /// Resulting object has length 0 and unspecified capacity.
+    /// This function does not allocate.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{BytesMut, BufMut};
+    ///
+    /// let mut bytes = BytesMut::new();
+    ///
+    /// assert_eq!(0, bytes.len());
+    ///
+    /// bytes.reserve(2);
+    /// bytes.put_slice(b"xy");
+    ///
+    /// assert_eq!(&b"xy"[..], &bytes[..]);
+    /// ```
+    #[inline]
+    pub fn new() -> BytesMut {
+        BytesMut::with_capacity(0)
+    }
+
+    /// Returns the number of bytes contained in this `BytesMut`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let b = BytesMut::from(&b"hello"[..]);
+    /// assert_eq!(b.len(), 5);
+    /// ```
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.len
+    }
+
+    /// Returns true if the `BytesMut` has a length of 0.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let b = BytesMut::with_capacity(64);
+    /// assert!(b.is_empty());
+    /// ```
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.len == 0
+    }
+
+    /// Returns the number of bytes the `BytesMut` can hold without reallocating.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let b = BytesMut::with_capacity(64);
+    /// assert_eq!(b.capacity(), 64);
+    /// ```
+    #[inline]
+    pub fn capacity(&self) -> usize {
+        self.cap
+    }
+
+    /// Converts `self` into an immutable `Bytes`.
+    ///
+    /// The conversion is zero cost and is used to indicate that the slice
+    /// referenced by the handle will no longer be mutated. Once the conversion
+    /// is done, the handle can be cloned and shared across threads.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{BytesMut, BufMut};
+    /// use std::thread;
+    ///
+    /// let mut b = BytesMut::with_capacity(64);
+    /// b.put(&b"hello world"[..]);
+    /// let b1 = b.freeze();
+    /// let b2 = b1.clone();
+    ///
+    /// let th = thread::spawn(move || {
+    ///     assert_eq!(&b1[..], b"hello world");
+    /// });
+    ///
+    /// assert_eq!(&b2[..], b"hello world");
+    /// th.join().unwrap();
+    /// ```
+    #[inline]
+    pub fn freeze(self) -> Bytes {
+        let bytes = ManuallyDrop::new(self);
+        if bytes.kind() == KIND_VEC {
+            // Just re-use `Bytes` internal Vec vtable
+            unsafe {
+                let off = bytes.get_vec_pos();
+                let vec = rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off);
+                let mut b: Bytes = vec.into();
+                b.advance(off);
+                b
+            }
+        } else {
+            debug_assert_eq!(bytes.kind(), KIND_ARC);
+
+            let ptr = bytes.ptr.as_ptr();
+            let len = bytes.len;
+            let data = AtomicPtr::new(bytes.data.cast());
+            unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
+        }
+    }
+
+    /// Creates a new `BytesMut` containing `len` zeros.
+    ///
+    /// The resulting object has a length of `len` and a capacity greater
+    /// than or equal to `len`. The entire length of the object will be filled
+    /// with zeros.
+    ///
+    /// On some platforms or allocators this function may be faster than
+    /// a manual implementation.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let zeros = BytesMut::zeroed(42);
+    ///
+    /// assert!(zeros.capacity() >= 42);
+    /// assert_eq!(zeros.len(), 42);
+    /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
+    /// ```
+    pub fn zeroed(len: usize) -> BytesMut {
+        BytesMut::from_vec(vec![0; len])
+    }
+
+    /// Splits the bytes into two at the given index.
+    ///
+    /// Afterwards `self` contains elements `[0, at)`, and the returned
+    /// `BytesMut` contains elements `[at, capacity)`.
+    ///
+    /// This is an `O(1)` operation that just increases the reference count
+    /// and sets a few indices.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut a = BytesMut::from(&b"hello world"[..]);
+    /// let mut b = a.split_off(5);
+    ///
+    /// a[0] = b'j';
+    /// b[0] = b'!';
+    ///
+    /// assert_eq!(&a[..], b"jello");
+    /// assert_eq!(&b[..], b"!world");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if `at > capacity`.
+    #[must_use = "consider BytesMut::truncate if you don't need the other half"]
+    pub fn split_off(&mut self, at: usize) -> BytesMut {
+        assert!(
+            at <= self.capacity(),
+            "split_off out of bounds: {:?} <= {:?}",
+            at,
+            self.capacity(),
+        );
+        unsafe {
+            let mut other = self.shallow_clone();
+            // SAFETY: We've checked that `at` <= `self.capacity()` above.
+            other.advance_unchecked(at);
+            self.cap = at;
+            self.len = cmp::min(self.len, at);
+            other
+        }
+    }
+
+    /// Removes the bytes from the current view, returning them in a new
+    /// `BytesMut` handle.
+    ///
+    /// Afterwards, `self` will be empty, but will retain any additional
+    /// capacity that it had before the operation. This is identical to
+    /// `self.split_to(self.len())`.
+    ///
+    /// This is an `O(1)` operation that just increases the reference count and
+    /// sets a few indices.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{BytesMut, BufMut};
+    ///
+    /// let mut buf = BytesMut::with_capacity(1024);
+    /// buf.put(&b"hello world"[..]);
+    ///
+    /// let other = buf.split();
+    ///
+    /// assert!(buf.is_empty());
+    /// assert_eq!(1013, buf.capacity());
+    ///
+    /// assert_eq!(other, b"hello world"[..]);
+    /// ```
+    #[must_use = "consider BytesMut::clear if you don't need the other half"]
+    pub fn split(&mut self) -> BytesMut {
+        let len = self.len();
+        self.split_to(len)
+    }
+
+    /// Splits the buffer into two at the given index.
+    ///
+    /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
+    /// contains elements `[0, at)`.
+    ///
+    /// This is an `O(1)` operation that just increases the reference count and
+    /// sets a few indices.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut a = BytesMut::from(&b"hello world"[..]);
+    /// let mut b = a.split_to(5);
+    ///
+    /// a[0] = b'!';
+    /// b[0] = b'j';
+    ///
+    /// assert_eq!(&a[..], b"!world");
+    /// assert_eq!(&b[..], b"jello");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if `at > len`.
+    #[must_use = "consider BytesMut::advance if you don't need the other half"]
+    pub fn split_to(&mut self, at: usize) -> BytesMut {
+        assert!(
+            at <= self.len(),
+            "split_to out of bounds: {:?} <= {:?}",
+            at,
+            self.len(),
+        );
+
+        unsafe {
+            let mut other = self.shallow_clone();
+            // SAFETY: We've checked that `at` <= `self.len()` and we know that `self.len()` <=
+            // `self.capacity()`.
+            self.advance_unchecked(at);
+            other.cap = at;
+            other.len = at;
+            other
+        }
+    }
+
+    /// Shortens the buffer, keeping the first `len` bytes and dropping the
+    /// rest.
+    ///
+    /// If `len` is greater than the buffer's current length, this has no
+    /// effect.
+    ///
+    /// Existing underlying capacity is preserved.
+    ///
+    /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
+    /// excess bytes to be returned instead of dropped.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::from(&b"hello world"[..]);
+    /// buf.truncate(5);
+    /// assert_eq!(buf, b"hello"[..]);
+    /// ```
+    pub fn truncate(&mut self, len: usize) {
+        if len <= self.len() {
+            // SAFETY: Shrinking the buffer cannot expose uninitialized bytes.
+            unsafe { self.set_len(len) };
+        }
+    }
+
+    /// Clears the buffer, removing all data. Existing capacity is preserved.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::from(&b"hello world"[..]);
+    /// buf.clear();
+    /// assert!(buf.is_empty());
+    /// ```
+    pub fn clear(&mut self) {
+        // SAFETY: Setting the length to zero cannot expose uninitialized bytes.
+        unsafe { self.set_len(0) };
+    }
+
+    /// Resizes the buffer so that `len` is equal to `new_len`.
+    ///
+    /// If `new_len` is greater than `len`, the buffer is extended by the
+    /// difference with each additional byte set to `value`. If `new_len` is
+    /// less than `len`, the buffer is simply truncated.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::new();
+    ///
+    /// buf.resize(3, 0x1);
+    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
+    ///
+    /// buf.resize(2, 0x2);
+    /// assert_eq!(&buf[..], &[0x1, 0x1]);
+    ///
+    /// buf.resize(4, 0x3);
+    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
+    /// ```
+    pub fn resize(&mut self, new_len: usize, value: u8) {
+        let additional = if let Some(additional) = new_len.checked_sub(self.len()) {
+            additional
+        } else {
+            self.truncate(new_len);
+            return;
+        };
+
+        if additional == 0 {
+            return;
+        }
+
+        self.reserve(additional);
+        let dst = self.spare_capacity_mut().as_mut_ptr();
+        // SAFETY: `spare_capacity_mut` returns a valid, properly aligned pointer and we've
+        // reserved enough space to write `additional` bytes.
+        unsafe { ptr::write_bytes(dst, value, additional) };
+
+        // SAFETY: There are at least `new_len` initialized bytes in the buffer so no
+        // uninitialized bytes are being exposed.
+        unsafe { self.set_len(new_len) };
+    }
+
+    /// Sets the length of the buffer.
+    ///
+    /// This will explicitly set the size of the buffer without actually
+    /// modifying the data, so it is up to the caller to ensure that the data
+    /// has been initialized.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut b = BytesMut::from(&b"hello world"[..]);
+    ///
+    /// unsafe {
+    ///     b.set_len(5);
+    /// }
+    ///
+    /// assert_eq!(&b[..], b"hello");
+    ///
+    /// unsafe {
+    ///     b.set_len(11);
+    /// }
+    ///
+    /// assert_eq!(&b[..], b"hello world");
+    /// ```
+    #[inline]
+    pub unsafe fn set_len(&mut self, len: usize) {
+        debug_assert!(len <= self.cap, "set_len out of bounds");
+        self.len = len;
+    }
+
+    /// Reserves capacity for at least `additional` more bytes to be inserted
+    /// into the given `BytesMut`.
+    ///
+    /// More than `additional` bytes may be reserved in order to avoid frequent
+    /// reallocations. A call to `reserve` may result in an allocation.
+    ///
+    /// Before allocating new buffer space, the function will attempt to reclaim
+    /// space in the existing buffer. If the current handle references a view
+    /// into a larger original buffer, and all other handles referencing part
+    /// of the same original buffer have been dropped, then the current view
+    /// can be copied/shifted to the front of the buffer and the handle can take
+    /// ownership of the full buffer, provided that the full buffer is large
+    /// enough to fit the requested additional capacity.
+    ///
+    /// This optimization will only happen if shifting the data from the current
+    /// view to the front of the buffer is not too expensive in terms of the
+    /// (amortized) time required. The precise condition is subject to change;
+    /// as of now, the length of the data being shifted needs to be at least as
+    /// large as the distance that it's shifted by. If the current view is empty
+    /// and the original buffer is large enough to fit the requested additional
+    /// capacity, then reallocations will never happen.
+    ///
+    /// # Examples
+    ///
+    /// In the following example, a new buffer is allocated.
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::from(&b"hello"[..]);
+    /// buf.reserve(64);
+    /// assert!(buf.capacity() >= 69);
+    /// ```
+    ///
+    /// In the following example, the existing buffer is reclaimed.
+    ///
+    /// ```
+    /// use bytes::{BytesMut, BufMut};
+    ///
+    /// let mut buf = BytesMut::with_capacity(128);
+    /// buf.put(&[0; 64][..]);
+    ///
+    /// let ptr = buf.as_ptr();
+    /// let other = buf.split();
+    ///
+    /// assert!(buf.is_empty());
+    /// assert_eq!(buf.capacity(), 64);
+    ///
+    /// drop(other);
+    /// buf.reserve(128);
+    ///
+    /// assert_eq!(buf.capacity(), 128);
+    /// assert_eq!(buf.as_ptr(), ptr);
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if the new capacity overflows `usize`.
+    #[inline]
+    pub fn reserve(&mut self, additional: usize) {
+        let len = self.len();
+        let rem = self.capacity() - len;
+
+        if additional <= rem {
+            // The handle can already store at least `additional` more bytes, so
+            // there is no further work needed to be done.
+            return;
+        }
+
+        // will always succeed
+        let _ = self.reserve_inner(additional, true);
+    }
+
+    // In separate function to allow the short-circuits in `reserve` and `try_reclaim` to
+    // be inline-able. Significantly helps performance. Returns false if it did not succeed.
+    fn reserve_inner(&mut self, additional: usize, allocate: bool) -> bool {
+        let len = self.len();
+        let kind = self.kind();
+
+        if kind == KIND_VEC {
+            // If there's enough free space before the start of the buffer, then
+            // just copy the data backwards and reuse the already-allocated
+            // space.
+            //
+            // Otherwise, since backed by a vector, use `Vec::reserve`
+            //
+            // We need to make sure that this optimization does not kill the
+            // amortized runtimes of BytesMut's operations.
+            unsafe {
+                let off = self.get_vec_pos();
+
+                // Only reuse space if we can satisfy the requested additional space.
+                //
+                // Also check if the value of `off` suggests that enough bytes
+                // have been read to account for the overhead of shifting all
+                // the data (in an amortized analysis).
+                // Hence the condition `off >= self.len()`.
+                //
+                // This condition also already implies that the buffer is going
+                // to be (at least) half-empty in the end; so we do not break
+                // the (amortized) runtime with future resizes of the underlying
+                // `Vec`.
+                //
+                // [For more details check issue #524, and PR #525.]
+                if self.capacity() - self.len() + off >= additional && off >= self.len() {
+                    // There's enough space, and it's not too much overhead:
+                    // reuse the space!
+                    //
+                    // Just move the pointer back to the start after copying
+                    // data back.
+                    let base_ptr = self.ptr.as_ptr().sub(off);
+                    // Since `off >= self.len()`, the two regions don't overlap.
+                    ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
+                    self.ptr = vptr(base_ptr);
+                    self.set_vec_pos(0);
+
+                    // Length stays constant, but since we moved backwards we
+                    // can gain capacity back.
+                    self.cap += off;
+                } else {
+                    if !allocate {
+                        return false;
+                    }
+                    // Not enough space, or reusing might be too much overhead:
+                    // allocate more space!
+                    let mut v =
+                        ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
+                    v.reserve(additional);
+
+                    // Update the info
+                    self.ptr = vptr(v.as_mut_ptr().add(off));
+                    self.cap = v.capacity() - off;
+                    debug_assert_eq!(self.len, v.len() - off);
+                }
+
+                return true;
+            }
+        }
+
+        debug_assert_eq!(kind, KIND_ARC);
+        let shared: *mut Shared = self.data;
+
+        // Reserving involves abandoning the currently shared buffer and
+        // allocating a new vector with the requested capacity.
+        //
+        // Compute the new capacity
+        let mut new_cap = match len.checked_add(additional) {
+            Some(new_cap) => new_cap,
+            None if !allocate => return false,
+            None => panic!("overflow"),
+        };
+
+        unsafe {
+            // First, try to reclaim the buffer. This is possible if the current
+            // handle is the only outstanding handle pointing to the buffer.
+            if (*shared).is_unique() {
+                // This is the only handle to the buffer. It can be reclaimed.
+                // However, before doing the work of copying data, check to make
+                // sure that the vector has enough capacity.
+                let v = &mut (*shared).vec;
+
+                let v_capacity = v.capacity();
+                let ptr = v.as_mut_ptr();
+
+                let offset = offset_from(self.ptr.as_ptr(), ptr);
+
+                // Compare the condition in the `kind == KIND_VEC` case above
+                // for more details.
+                if v_capacity >= new_cap + offset {
+                    self.cap = new_cap;
+                    // no copy is necessary
+                } else if v_capacity >= new_cap && offset >= len {
+                    // The capacity is sufficient, and copying is not too much
+                    // overhead: reclaim the buffer!
+
+                    // `offset >= len` means: no overlap
+                    ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
+
+                    self.ptr = vptr(ptr);
+                    self.cap = v.capacity();
+                } else {
+                    if !allocate {
+                        return false;
+                    }
+                    // calculate offset
+                    let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
+
+                    // new_cap is calculated in terms of `BytesMut`, not the underlying
+                    // `Vec`, so it does not take the offset into account.
+                    //
+                    // Thus we have to manually add it here.
+                    new_cap = new_cap.checked_add(off).expect("overflow");
+
+                    // The vector capacity is not sufficient. The reserve request is
+                    // asking for more than the initial buffer capacity. Allocate more
+                    // than requested if `new_cap` is not much bigger than the current
+                    // capacity.
+                    //
+                    // There are some situations, using `reserve_exact` that the
+                    // buffer capacity could be below `original_capacity`, so do a
+                    // check.
+                    let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
+
+                    new_cap = cmp::max(double, new_cap);
+
+                    // No space - allocate more
+                    //
+                    // The length field of `Shared::vec` is not used by the `BytesMut`;
+                    // instead we use the `len` field in the `BytesMut` itself. However,
+                    // when calling `reserve`, it doesn't guarantee that data stored in
+                    // the unused capacity of the vector is copied over to the new
+                    // allocation, so we need to ensure that we don't have any data we
+                    // care about in the unused capacity before calling `reserve`.
+                    debug_assert!(off + len <= v.capacity());
+                    v.set_len(off + len);
+                    v.reserve(new_cap - v.len());
+
+                    // Update the info
+                    self.ptr = vptr(v.as_mut_ptr().add(off));
+                    self.cap = v.capacity() - off;
+                }
+
+                return true;
+            }
+        }
+        if !allocate {
+            return false;
+        }
+
+        let original_capacity_repr = unsafe { (*shared).original_capacity_repr };
+        let original_capacity = original_capacity_from_repr(original_capacity_repr);
+
+        new_cap = cmp::max(new_cap, original_capacity);
+
+        // Create a new vector to store the data
+        let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
+
+        // Copy the bytes
+        v.extend_from_slice(self.as_ref());
+
+        // Release the shared handle. This must be done *after* the bytes are
+        // copied.
+        unsafe { release_shared(shared) };
+
+        // Update self
+        let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
+        self.data = invalid_ptr(data);
+        self.ptr = vptr(v.as_mut_ptr());
+        self.cap = v.capacity();
+        debug_assert_eq!(self.len, v.len());
+        return true;
+    }
+
+    /// Attempts to cheaply reclaim already allocated capacity for at least `additional` more
+    /// bytes to be inserted into the given `BytesMut` and returns `true` if it succeeded.
+    ///
+    /// `try_reclaim` behaves exactly like `reserve`, except that it never allocates new storage
+    /// and returns a `bool` indicating whether it was successful in doing so:
+    ///
+    /// `try_reclaim` returns false under these conditions:
+    ///  - The spare capacity left is less than `additional` bytes AND
+    ///  - The existing allocation cannot be reclaimed cheaply or it was less than
+    ///    `additional` bytes in size
+    ///
+    /// Reclaiming the allocation cheaply is possible if the `BytesMut` has no outstanding
+    /// references through other `BytesMut`s or `Bytes` which point to the same underlying
+    /// storage.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::with_capacity(64);
+    /// assert_eq!(true, buf.try_reclaim(64));
+    /// assert_eq!(64, buf.capacity());
+    ///
+    /// buf.extend_from_slice(b"abcd");
+    /// let mut split = buf.split();
+    /// assert_eq!(60, buf.capacity());
+    /// assert_eq!(4, split.capacity());
+    /// assert_eq!(false, split.try_reclaim(64));
+    /// assert_eq!(false, buf.try_reclaim(64));
+    /// // The split buffer is filled with "abcd"
+    /// assert_eq!(false, split.try_reclaim(4));
+    /// // buf is empty and has capacity for 60 bytes
+    /// assert_eq!(true, buf.try_reclaim(60));
+    ///
+    /// drop(buf);
+    /// assert_eq!(false, split.try_reclaim(64));
+    ///
+    /// split.clear();
+    /// assert_eq!(4, split.capacity());
+    /// assert_eq!(true, split.try_reclaim(64));
+    /// assert_eq!(64, split.capacity());
+    /// ```
+    // I tried splitting out try_reclaim_inner after the short circuits, but it was inlined
+    // regardless with Rust 1.78.0 so probably not worth it
+    #[inline]
+    #[must_use = "consider BytesMut::reserve if you need an infallible reservation"]
+    pub fn try_reclaim(&mut self, additional: usize) -> bool {
+        let len = self.len();
+        let rem = self.capacity() - len;
+
+        if additional <= rem {
+            // The handle can already store at least `additional` more bytes, so
+            // there is no further work needed to be done.
+            return true;
+        }
+
+        self.reserve_inner(additional, false)
+    }
+
+    /// Appends given bytes to this `BytesMut`.
+    ///
+    /// If this `BytesMut` object does not have enough capacity, it is resized
+    /// first.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::with_capacity(0);
+    /// buf.extend_from_slice(b"aaabbb");
+    /// buf.extend_from_slice(b"cccddd");
+    ///
+    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
+    /// ```
+    #[inline]
+    pub fn extend_from_slice(&mut self, extend: &[u8]) {
+        let cnt = extend.len();
+        self.reserve(cnt);
+
+        unsafe {
+            let dst = self.spare_capacity_mut();
+            // Reserved above
+            debug_assert!(dst.len() >= cnt);
+
+            ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt);
+        }
+
+        unsafe {
+            self.advance_mut(cnt);
+        }
+    }
+
+    /// Absorbs a `BytesMut` that was previously split off.
+    ///
+    /// If the two `BytesMut` objects were previously contiguous and not mutated
+    /// in a way that causes re-allocation i.e., if `other` was created by
+    /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
+    /// that just decreases a reference count and sets a few indices.
+    /// Otherwise this method degenerates to
+    /// `self.extend_from_slice(other.as_ref())`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::with_capacity(64);
+    /// buf.extend_from_slice(b"aaabbbcccddd");
+    ///
+    /// let split = buf.split_off(6);
+    /// assert_eq!(b"aaabbb", &buf[..]);
+    /// assert_eq!(b"cccddd", &split[..]);
+    ///
+    /// buf.unsplit(split);
+    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
+    /// ```
+    pub fn unsplit(&mut self, other: BytesMut) {
+        if self.is_empty() {
+            *self = other;
+            return;
+        }
+
+        if let Err(other) = self.try_unsplit(other) {
+            self.extend_from_slice(other.as_ref());
+        }
+    }
+
+    // private
+
+    // For now, use a `Vec` to manage the memory for us, but we may want to
+    // change that in the future to some alternate allocator strategy.
+    //
+    // Thus, we don't expose an easy way to construct from a `Vec` since an
+    // internal change could make a simple pattern (`BytesMut::from(vec)`)
+    // suddenly a lot more expensive.
+    #[inline]
+    pub(crate) fn from_vec(vec: Vec<u8>) -> BytesMut {
+        let mut vec = ManuallyDrop::new(vec);
+        let ptr = vptr(vec.as_mut_ptr());
+        let len = vec.len();
+        let cap = vec.capacity();
+
+        let original_capacity_repr = original_capacity_to_repr(cap);
+        let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
+
+        BytesMut {
+            ptr,
+            len,
+            cap,
+            data: invalid_ptr(data),
+        }
+    }
+
+    #[inline]
+    fn as_slice(&self) -> &[u8] {
+        unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
+    }
+
+    #[inline]
+    fn as_slice_mut(&mut self) -> &mut [u8] {
+        unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
+    }
+
+    /// Advance the buffer without bounds checking.
+    ///
+    /// # SAFETY
+    ///
+    /// The caller must ensure that `count` <= `self.cap`.
+    pub(crate) unsafe fn advance_unchecked(&mut self, count: usize) {
+        // Setting the start to 0 is a no-op, so return early if this is the
+        // case.
+        if count == 0 {
+            return;
+        }
+
+        debug_assert!(count <= self.cap, "internal: set_start out of bounds");
+
+        let kind = self.kind();
+
+        if kind == KIND_VEC {
+            // Setting the start when in vec representation is a little more
+            // complicated. First, we have to track how far ahead the
+            // "start" of the byte buffer from the beginning of the vec. We
+            // also have to ensure that we don't exceed the maximum shift.
+            let pos = self.get_vec_pos() + count;
+
+            if pos <= MAX_VEC_POS {
+                self.set_vec_pos(pos);
+            } else {
+                // The repr must be upgraded to ARC. This will never happen
+                // on 64 bit systems and will only happen on 32 bit systems
+                // when shifting past 134,217,727 bytes. As such, we don't
+                // worry too much about performance here.
+                self.promote_to_shared(/*ref_count = */ 1);
+            }
+        }
+
+        // Updating the start of the view is setting `ptr` to point to the
+        // new start and updating the `len` field to reflect the new length
+        // of the view.
+        self.ptr = vptr(self.ptr.as_ptr().add(count));
+        self.len = self.len.checked_sub(count).unwrap_or(0);
+        self.cap -= count;
+    }
+
+    fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> {
+        if other.capacity() == 0 {
+            return Ok(());
+        }
+
+        let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
+        if ptr == other.ptr.as_ptr()
+            && self.kind() == KIND_ARC
+            && other.kind() == KIND_ARC
+            && self.data == other.data
+        {
+            // Contiguous blocks, just combine directly
+            self.len += other.len;
+            self.cap += other.cap;
+            Ok(())
+        } else {
+            Err(other)
+        }
+    }
+
+    #[inline]
+    fn kind(&self) -> usize {
+        self.data as usize & KIND_MASK
+    }
+
+    unsafe fn promote_to_shared(&mut self, ref_cnt: usize) {
+        debug_assert_eq!(self.kind(), KIND_VEC);
+        debug_assert!(ref_cnt == 1 || ref_cnt == 2);
+
+        let original_capacity_repr =
+            (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
+
+        // The vec offset cannot be concurrently mutated, so there
+        // should be no danger reading it.
+        let off = (self.data as usize) >> VEC_POS_OFFSET;
+
+        // First, allocate a new `Shared` instance containing the
+        // `Vec` fields. It's important to note that `ptr`, `len`,
+        // and `cap` cannot be mutated without having `&mut self`.
+        // This means that these fields will not be concurrently
+        // updated and since the buffer hasn't been promoted to an
+        // `Arc`, those three fields still are the components of the
+        // vector.
+        let shared = Box::new(Shared {
+            vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off),
+            original_capacity_repr,
+            ref_count: AtomicUsize::new(ref_cnt),
+        });
+
+        let shared = Box::into_raw(shared);
+
+        // The pointer should be aligned, so this assert should
+        // always succeed.
+        debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
+
+        self.data = shared;
+    }
+
+    /// Makes an exact shallow clone of `self`.
+    ///
+    /// The kind of `self` doesn't matter, but this is unsafe
+    /// because the clone will have the same offsets. You must
+    /// be sure the returned value to the user doesn't allow
+    /// two views into the same range.
+    #[inline]
+    unsafe fn shallow_clone(&mut self) -> BytesMut {
+        if self.kind() == KIND_ARC {
+            increment_shared(self.data);
+            ptr::read(self)
+        } else {
+            self.promote_to_shared(/*ref_count = */ 2);
+            ptr::read(self)
+        }
+    }
+
+    #[inline]
+    unsafe fn get_vec_pos(&self) -> usize {
+        debug_assert_eq!(self.kind(), KIND_VEC);
+
+        self.data as usize >> VEC_POS_OFFSET
+    }
+
+    #[inline]
+    unsafe fn set_vec_pos(&mut self, pos: usize) {
+        debug_assert_eq!(self.kind(), KIND_VEC);
+        debug_assert!(pos <= MAX_VEC_POS);
+
+        self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (self.data as usize & NOT_VEC_POS_MASK));
+    }
+
+    /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`.
+    ///
+    /// The returned slice can be used to fill the buffer with data (e.g. by
+    /// reading from a file) before marking the data as initialized using the
+    /// [`set_len`] method.
+    ///
+    /// [`set_len`]: BytesMut::set_len
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// // Allocate buffer big enough for 10 bytes.
+    /// let mut buf = BytesMut::with_capacity(10);
+    ///
+    /// // Fill in the first 3 elements.
+    /// let uninit = buf.spare_capacity_mut();
+    /// uninit[0].write(0);
+    /// uninit[1].write(1);
+    /// uninit[2].write(2);
+    ///
+    /// // Mark the first 3 bytes of the buffer as being initialized.
+    /// unsafe {
+    ///     buf.set_len(3);
+    /// }
+    ///
+    /// assert_eq!(&buf[..], &[0, 1, 2]);
+    /// ```
+    #[inline]
+    pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+        unsafe {
+            let ptr = self.ptr.as_ptr().add(self.len);
+            let len = self.cap - self.len;
+
+            slice::from_raw_parts_mut(ptr.cast(), len)
+        }
+    }
+}
+
+impl Drop for BytesMut {
+    fn drop(&mut self) {
+        let kind = self.kind();
+
+        if kind == KIND_VEC {
+            unsafe {
+                let off = self.get_vec_pos();
+
+                // Vector storage, free the vector
+                let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
+            }
+        } else if kind == KIND_ARC {
+            unsafe { release_shared(self.data) };
+        }
+    }
+}
+
+impl Buf for BytesMut {
+    #[inline]
+    fn remaining(&self) -> usize {
+        self.len()
+    }
+
+    #[inline]
+    fn chunk(&self) -> &[u8] {
+        self.as_slice()
+    }
+
+    #[inline]
+    fn advance(&mut self, cnt: usize) {
+        assert!(
+            cnt <= self.remaining(),
+            "cannot advance past `remaining`: {:?} <= {:?}",
+            cnt,
+            self.remaining(),
+        );
+        unsafe {
+            // SAFETY: We've checked that `cnt` <= `self.remaining()` and we know that
+            // `self.remaining()` <= `self.cap`.
+            self.advance_unchecked(cnt);
+        }
+    }
+
+    fn copy_to_bytes(&mut self, len: usize) -> Bytes {
+        self.split_to(len).freeze()
+    }
+}
+
+unsafe impl BufMut for BytesMut {
+    #[inline]
+    fn remaining_mut(&self) -> usize {
+        usize::MAX - self.len()
+    }
+
+    #[inline]
+    unsafe fn advance_mut(&mut self, cnt: usize) {
+        let remaining = self.cap - self.len();
+        if cnt > remaining {
+            super::panic_advance(cnt, remaining);
+        }
+        // Addition won't overflow since it is at most `self.cap`.
+        self.len = self.len() + cnt;
+    }
+
+    #[inline]
+    fn chunk_mut(&mut self) -> &mut UninitSlice {
+        if self.capacity() == self.len() {
+            self.reserve(64);
+        }
+        self.spare_capacity_mut().into()
+    }
+
+    // Specialize these methods so they can skip checking `remaining_mut`
+    // and `advance_mut`.
+
+    fn put<T: Buf>(&mut self, mut src: T)
+    where
+        Self: Sized,
+    {
+        while src.has_remaining() {
+            let s = src.chunk();
+            let l = s.len();
+            self.extend_from_slice(s);
+            src.advance(l);
+        }
+    }
+
+    fn put_slice(&mut self, src: &[u8]) {
+        self.extend_from_slice(src);
+    }
+
+    fn put_bytes(&mut self, val: u8, cnt: usize) {
+        self.reserve(cnt);
+        unsafe {
+            let dst = self.spare_capacity_mut();
+            // Reserved above
+            debug_assert!(dst.len() >= cnt);
+
+            ptr::write_bytes(dst.as_mut_ptr(), val, cnt);
+
+            self.advance_mut(cnt);
+        }
+    }
+}
+
+impl AsRef<[u8]> for BytesMut {
+    #[inline]
+    fn as_ref(&self) -> &[u8] {
+        self.as_slice()
+    }
+}
+
+impl Deref for BytesMut {
+    type Target = [u8];
+
+    #[inline]
+    fn deref(&self) -> &[u8] {
+        self.as_ref()
+    }
+}
+
+impl AsMut<[u8]> for BytesMut {
+    #[inline]
+    fn as_mut(&mut self) -> &mut [u8] {
+        self.as_slice_mut()
+    }
+}
+
+impl DerefMut for BytesMut {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut [u8] {
+        self.as_mut()
+    }
+}
+
+impl<'a> From<&'a [u8]> for BytesMut {
+    fn from(src: &'a [u8]) -> BytesMut {
+        BytesMut::from_vec(src.to_vec())
+    }
+}
+
+impl<'a> From<&'a str> for BytesMut {
+    fn from(src: &'a str) -> BytesMut {
+        BytesMut::from(src.as_bytes())
+    }
+}
+
+impl From<BytesMut> for Bytes {
+    fn from(src: BytesMut) -> Bytes {
+        src.freeze()
+    }
+}
+
+impl PartialEq for BytesMut {
+    fn eq(&self, other: &BytesMut) -> bool {
+        self.as_slice() == other.as_slice()
+    }
+}
+
+impl PartialOrd for BytesMut {
+    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+        self.as_slice().partial_cmp(other.as_slice())
+    }
+}
+
+impl Ord for BytesMut {
+    fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
+        self.as_slice().cmp(other.as_slice())
+    }
+}
+
+impl Eq for BytesMut {}
+
+impl Default for BytesMut {
+    #[inline]
+    fn default() -> BytesMut {
+        BytesMut::new()
+    }
+}
+
+impl hash::Hash for BytesMut {
+    fn hash<H>(&self, state: &mut H)
+    where
+        H: hash::Hasher,
+    {
+        let s: &[u8] = self.as_ref();
+        s.hash(state);
+    }
+}
+
+impl Borrow<[u8]> for BytesMut {
+    fn borrow(&self) -> &[u8] {
+        self.as_ref()
+    }
+}
+
+impl BorrowMut<[u8]> for BytesMut {
+    fn borrow_mut(&mut self) -> &mut [u8] {
+        self.as_mut()
+    }
+}
+
+impl fmt::Write for BytesMut {
+    #[inline]
+    fn write_str(&mut self, s: &str) -> fmt::Result {
+        if self.remaining_mut() >= s.len() {
+            self.put_slice(s.as_bytes());
+            Ok(())
+        } else {
+            Err(fmt::Error)
+        }
+    }
+
+    #[inline]
+    fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
+        fmt::write(self, args)
+    }
+}
+
+impl Clone for BytesMut {
+    fn clone(&self) -> BytesMut {
+        BytesMut::from(&self[..])
+    }
+}
+
+impl IntoIterator for BytesMut {
+    type Item = u8;
+    type IntoIter = IntoIter<BytesMut>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        IntoIter::new(self)
+    }
+}
+
+impl<'a> IntoIterator for &'a BytesMut {
+    type Item = &'a u8;
+    type IntoIter = core::slice::Iter<'a, u8>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.as_ref().iter()
+    }
+}
+
+impl Extend<u8> for BytesMut {
+    fn extend<T>(&mut self, iter: T)
+    where
+        T: IntoIterator<Item = u8>,
+    {
+        let iter = iter.into_iter();
+
+        let (lower, _) = iter.size_hint();
+        self.reserve(lower);
+
+        // TODO: optimize
+        // 1. If self.kind() == KIND_VEC, use Vec::extend
+        for b in iter {
+            self.put_u8(b);
+        }
+    }
+}
+
+impl<'a> Extend<&'a u8> for BytesMut {
+    fn extend<T>(&mut self, iter: T)
+    where
+        T: IntoIterator<Item = &'a u8>,
+    {
+        self.extend(iter.into_iter().copied())
+    }
+}
+
+impl Extend<Bytes> for BytesMut {
+    fn extend<T>(&mut self, iter: T)
+    where
+        T: IntoIterator<Item = Bytes>,
+    {
+        for bytes in iter {
+            self.extend_from_slice(&bytes)
+        }
+    }
+}
+
+impl FromIterator<u8> for BytesMut {
+    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
+        BytesMut::from_vec(Vec::from_iter(into_iter))
+    }
+}
+
+impl<'a> FromIterator<&'a u8> for BytesMut {
+    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
+        BytesMut::from_iter(into_iter.into_iter().copied())
+    }
+}
+
+/*
+ *
+ * ===== Inner =====
+ *
+ */
+
+unsafe fn increment_shared(ptr: *mut Shared) {
+    let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
+
+    if old_size > isize::MAX as usize {
+        crate::abort();
+    }
+}
+
+unsafe fn release_shared(ptr: *mut Shared) {
+    // `Shared` storage... follow the drop steps from Arc.
+    if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 {
+        return;
+    }
+
+    // This fence is needed to prevent reordering of use of the data and
+    // deletion of the data.  Because it is marked `Release`, the decreasing
+    // of the reference count synchronizes with this `Acquire` fence. This
+    // means that use of the data happens before decreasing the reference
+    // count, which happens before this fence, which happens before the
+    // deletion of the data.
+    //
+    // As explained in the [Boost documentation][1],
+    //
+    // > It is important to enforce any possible access to the object in one
+    // > thread (through an existing reference) to *happen before* deleting
+    // > the object in a different thread. This is achieved by a "release"
+    // > operation after dropping a reference (any access to the object
+    // > through this reference must obviously happened before), and an
+    // > "acquire" operation before deleting the object.
+    //
+    // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+    //
+    // Thread sanitizer does not support atomic fences. Use an atomic load
+    // instead.
+    (*ptr).ref_count.load(Ordering::Acquire);
+
+    // Drop the data
+    drop(Box::from_raw(ptr));
+}
+
+impl Shared {
+    fn is_unique(&self) -> bool {
+        // The goal is to check if the current handle is the only handle
+        // that currently has access to the buffer. This is done by
+        // checking if the `ref_count` is currently 1.
+        //
+        // The `Acquire` ordering synchronizes with the `Release` as
+        // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
+        // operation guarantees that any mutations done in other threads
+        // are ordered before the `ref_count` is decremented. As such,
+        // this `Acquire` will guarantee that those mutations are
+        // visible to the current thread.
+        self.ref_count.load(Ordering::Acquire) == 1
+    }
+}
+
+#[inline]
+fn original_capacity_to_repr(cap: usize) -> usize {
+    let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
+    cmp::min(
+        width,
+        MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
+    )
+}
+
+fn original_capacity_from_repr(repr: usize) -> usize {
+    if repr == 0 {
+        return 0;
+    }
+
+    1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_original_capacity_to_repr() {
+        assert_eq!(original_capacity_to_repr(0), 0);
+
+        let max_width = 32;
+
+        for width in 1..(max_width + 1) {
+            let cap = 1 << width - 1;
+
+            let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
+                0
+            } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
+                width - MIN_ORIGINAL_CAPACITY_WIDTH
+            } else {
+                MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
+            };
+
+            assert_eq!(original_capacity_to_repr(cap), expected);
+
+            if width > 1 {
+                assert_eq!(original_capacity_to_repr(cap + 1), expected);
+            }
+
+            //  MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
+            if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
+                assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
+                assert_eq!(original_capacity_to_repr(cap + 76), expected);
+            } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
+                assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
+                assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
+            }
+        }
+    }
+
+    #[test]
+    fn test_original_capacity_from_repr() {
+        assert_eq!(0, original_capacity_from_repr(0));
+
+        let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
+
+        assert_eq!(min_cap, original_capacity_from_repr(1));
+        assert_eq!(min_cap * 2, original_capacity_from_repr(2));
+        assert_eq!(min_cap * 4, original_capacity_from_repr(3));
+        assert_eq!(min_cap * 8, original_capacity_from_repr(4));
+        assert_eq!(min_cap * 16, original_capacity_from_repr(5));
+        assert_eq!(min_cap * 32, original_capacity_from_repr(6));
+        assert_eq!(min_cap * 64, original_capacity_from_repr(7));
+    }
+}
+
+unsafe impl Send for BytesMut {}
+unsafe impl Sync for BytesMut {}
+
+/*
+ *
+ * ===== PartialEq / PartialOrd =====
+ *
+ */
+
+impl PartialEq<[u8]> for BytesMut {
+    fn eq(&self, other: &[u8]) -> bool {
+        &**self == other
+    }
+}
+
+impl PartialOrd<[u8]> for BytesMut {
+    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
+        (**self).partial_cmp(other)
+    }
+}
+
+impl PartialEq<BytesMut> for [u8] {
+    fn eq(&self, other: &BytesMut) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<BytesMut> for [u8] {
+    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+    }
+}
+
+impl PartialEq<str> for BytesMut {
+    fn eq(&self, other: &str) -> bool {
+        &**self == other.as_bytes()
+    }
+}
+
+impl PartialOrd<str> for BytesMut {
+    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
+        (**self).partial_cmp(other.as_bytes())
+    }
+}
+
+impl PartialEq<BytesMut> for str {
+    fn eq(&self, other: &BytesMut) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<BytesMut> for str {
+    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+    }
+}
+
+impl PartialEq<Vec<u8>> for BytesMut {
+    fn eq(&self, other: &Vec<u8>) -> bool {
+        *self == other[..]
+    }
+}
+
+impl PartialOrd<Vec<u8>> for BytesMut {
+    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
+        (**self).partial_cmp(&other[..])
+    }
+}
+
+impl PartialEq<BytesMut> for Vec<u8> {
+    fn eq(&self, other: &BytesMut) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<BytesMut> for Vec<u8> {
+    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+        other.partial_cmp(self)
+    }
+}
+
+impl PartialEq<String> for BytesMut {
+    fn eq(&self, other: &String) -> bool {
+        *self == other[..]
+    }
+}
+
+impl PartialOrd<String> for BytesMut {
+    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
+        (**self).partial_cmp(other.as_bytes())
+    }
+}
+
+impl PartialEq<BytesMut> for String {
+    fn eq(&self, other: &BytesMut) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<BytesMut> for String {
+    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+    }
+}
+
+impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
+where
+    BytesMut: PartialEq<T>,
+{
+    fn eq(&self, other: &&'a T) -> bool {
+        *self == **other
+    }
+}
+
+impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
+where
+    BytesMut: PartialOrd<T>,
+{
+    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
+        self.partial_cmp(*other)
+    }
+}
+
+impl PartialEq<BytesMut> for &[u8] {
+    fn eq(&self, other: &BytesMut) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<BytesMut> for &[u8] {
+    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+    }
+}
+
+impl PartialEq<BytesMut> for &str {
+    fn eq(&self, other: &BytesMut) -> bool {
+        *other == *self
+    }
+}
+
+impl PartialOrd<BytesMut> for &str {
+    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+        other.partial_cmp(self)
+    }
+}
+
+impl PartialEq<BytesMut> for Bytes {
+    fn eq(&self, other: &BytesMut) -> bool {
+        other[..] == self[..]
+    }
+}
+
+impl PartialEq<Bytes> for BytesMut {
+    fn eq(&self, other: &Bytes) -> bool {
+        other[..] == self[..]
+    }
+}
+
+impl From<BytesMut> for Vec<u8> {
+    fn from(bytes: BytesMut) -> Self {
+        let kind = bytes.kind();
+        let bytes = ManuallyDrop::new(bytes);
+
+        let mut vec = if kind == KIND_VEC {
+            unsafe {
+                let off = bytes.get_vec_pos();
+                rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
+            }
+        } else {
+            let shared = bytes.data as *mut Shared;
+
+            if unsafe { (*shared).is_unique() } {
+                let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new());
+
+                unsafe { release_shared(shared) };
+
+                vec
+            } else {
+                return ManuallyDrop::into_inner(bytes).deref().to_vec();
+            }
+        };
+
+        let len = bytes.len;
+
+        unsafe {
+            ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
+            vec.set_len(len);
+        }
+
+        vec
+    }
+}
+
+#[inline]
+fn vptr(ptr: *mut u8) -> NonNull<u8> {
+    if cfg!(debug_assertions) {
+        NonNull::new(ptr).expect("Vec pointer should be non-null")
+    } else {
+        unsafe { NonNull::new_unchecked(ptr) }
+    }
+}
+
+/// Returns a dangling pointer with the given address. This is used to store
+/// integer data in pointer fields.
+///
+/// It is equivalent to `addr as *mut T`, but this fails on miri when strict
+/// provenance checking is enabled.
+#[inline]
+fn invalid_ptr<T>(addr: usize) -> *mut T {
+    let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
+    debug_assert_eq!(ptr as usize, addr);
+    ptr.cast::<T>()
+}
+
+unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
+    let ptr = ptr.sub(off);
+    len += off;
+    cap += off;
+
+    Vec::from_raw_parts(ptr, len, cap)
+}
+
+// ===== impl SharedVtable =====
+
+static SHARED_VTABLE: Vtable = Vtable {
+    clone: shared_v_clone,
+    to_vec: shared_v_to_vec,
+    to_mut: shared_v_to_mut,
+    is_unique: shared_v_is_unique,
+    drop: shared_v_drop,
+};
+
+unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+    let shared = data.load(Ordering::Relaxed) as *mut Shared;
+    increment_shared(shared);
+
+    let data = AtomicPtr::new(shared as *mut ());
+    Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
+}
+
+unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+    let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
+
+    if (*shared).is_unique() {
+        let shared = &mut *shared;
+
+        // Drop shared
+        let mut vec = mem::replace(&mut shared.vec, Vec::new());
+        release_shared(shared);
+
+        // Copy back buffer
+        ptr::copy(ptr, vec.as_mut_ptr(), len);
+        vec.set_len(len);
+
+        vec
+    } else {
+        let v = slice::from_raw_parts(ptr, len).to_vec();
+        release_shared(shared);
+        v
+    }
+}
+
+unsafe fn shared_v_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
+    let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
+
+    if (*shared).is_unique() {
+        let shared = &mut *shared;
+
+        // The capacity is always the original capacity of the buffer
+        // minus the offset from the start of the buffer
+        let v = &mut shared.vec;
+        let v_capacity = v.capacity();
+        let v_ptr = v.as_mut_ptr();
+        let offset = offset_from(ptr as *mut u8, v_ptr);
+        let cap = v_capacity - offset;
+
+        let ptr = vptr(ptr as *mut u8);
+
+        BytesMut {
+            ptr,
+            len,
+            cap,
+            data: shared,
+        }
+    } else {
+        let v = slice::from_raw_parts(ptr, len).to_vec();
+        release_shared(shared);
+        BytesMut::from_vec(v)
+    }
+}
+
+unsafe fn shared_v_is_unique(data: &AtomicPtr<()>) -> bool {
+    let shared = data.load(Ordering::Acquire);
+    let ref_count = (*shared.cast::<Shared>()).ref_count.load(Ordering::Relaxed);
+    ref_count == 1
+}
+
+unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
+    data.with_mut(|shared| {
+        release_shared(*shared as *mut Shared);
+    });
+}
+
+// compile-fails
+
+/// ```compile_fail
+/// use bytes::BytesMut;
+/// #[deny(unused_must_use)]
+/// {
+///     let mut b1 = BytesMut::from("hello world");
+///     b1.split_to(6);
+/// }
+/// ```
+fn _split_to_must_use() {}
+
+/// ```compile_fail
+/// use bytes::BytesMut;
+/// #[deny(unused_must_use)]
+/// {
+///     let mut b1 = BytesMut::from("hello world");
+///     b1.split_off(6);
+/// }
+/// ```
+fn _split_off_must_use() {}
+
+/// ```compile_fail
+/// use bytes::BytesMut;
+/// #[deny(unused_must_use)]
+/// {
+///     let mut b1 = BytesMut::from("hello world");
+///     b1.split();
+/// }
+/// ```
+fn _split_must_use() {}
+
+// fuzz tests
+#[cfg(all(test, loom))]
+mod fuzz {
+    use loom::sync::Arc;
+    use loom::thread;
+
+    use super::BytesMut;
+    use crate::Bytes;
+
+    #[test]
+    fn bytes_mut_cloning_frozen() {
+        loom::model(|| {
+            let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze();
+            let addr = a.as_ptr() as usize;
+
+            // test the Bytes::clone is Sync by putting it in an Arc
+            let a1 = Arc::new(a);
+            let a2 = a1.clone();
+
+            let t1 = thread::spawn(move || {
+                let b: Bytes = (*a1).clone();
+                assert_eq!(b.as_ptr() as usize, addr);
+            });
+
+            let t2 = thread::spawn(move || {
+                let b: Bytes = (*a2).clone();
+                assert_eq!(b.as_ptr() as usize, addr);
+            });
+
+            t1.join().unwrap();
+            t2.join().unwrap();
+        });
+    }
+}
+
\ No newline at end of file diff --git a/src/bytes/fmt/debug.rs.html b/src/bytes/fmt/debug.rs.html new file mode 100644 index 000000000..a772bdf74 --- /dev/null +++ b/src/bytes/fmt/debug.rs.html @@ -0,0 +1,99 @@ +debug.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
use core::fmt::{Debug, Formatter, Result};
+
+use super::BytesRef;
+use crate::{Bytes, BytesMut};
+
+/// Alternative implementation of `std::fmt::Debug` for byte slice.
+///
+/// Standard `Debug` implementation for `[u8]` is comma separated
+/// list of numbers. Since large amount of byte strings are in fact
+/// ASCII strings or contain a lot of ASCII strings (e. g. HTTP),
+/// it is convenient to print strings as ASCII when possible.
+impl Debug for BytesRef<'_> {
+    fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+        write!(f, "b\"")?;
+        for &b in self.0 {
+            // https://doc.rust-lang.org/reference/tokens.html#byte-escapes
+            if b == b'\n' {
+                write!(f, "\\n")?;
+            } else if b == b'\r' {
+                write!(f, "\\r")?;
+            } else if b == b'\t' {
+                write!(f, "\\t")?;
+            } else if b == b'\\' || b == b'"' {
+                write!(f, "\\{}", b as char)?;
+            } else if b == b'\0' {
+                write!(f, "\\0")?;
+            // ASCII printable
+            } else if (0x20..0x7f).contains(&b) {
+                write!(f, "{}", b as char)?;
+            } else {
+                write!(f, "\\x{:02x}", b)?;
+            }
+        }
+        write!(f, "\"")?;
+        Ok(())
+    }
+}
+
+impl Debug for Bytes {
+    fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+        Debug::fmt(&BytesRef(self.as_ref()), f)
+    }
+}
+
+impl Debug for BytesMut {
+    fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+        Debug::fmt(&BytesRef(self.as_ref()), f)
+    }
+}
+
\ No newline at end of file diff --git a/src/bytes/fmt/hex.rs.html b/src/bytes/fmt/hex.rs.html new file mode 100644 index 000000000..45b20ad4c --- /dev/null +++ b/src/bytes/fmt/hex.rs.html @@ -0,0 +1,75 @@ +hex.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
use core::fmt::{Formatter, LowerHex, Result, UpperHex};
+
+use super::BytesRef;
+use crate::{Bytes, BytesMut};
+
+impl LowerHex for BytesRef<'_> {
+    fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+        for &b in self.0 {
+            write!(f, "{:02x}", b)?;
+        }
+        Ok(())
+    }
+}
+
+impl UpperHex for BytesRef<'_> {
+    fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+        for &b in self.0 {
+            write!(f, "{:02X}", b)?;
+        }
+        Ok(())
+    }
+}
+
+macro_rules! hex_impl {
+    ($tr:ident, $ty:ty) => {
+        impl $tr for $ty {
+            fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+                $tr::fmt(&BytesRef(self.as_ref()), f)
+            }
+        }
+    };
+}
+
+hex_impl!(LowerHex, Bytes);
+hex_impl!(LowerHex, BytesMut);
+hex_impl!(UpperHex, Bytes);
+hex_impl!(UpperHex, BytesMut);
+
\ No newline at end of file diff --git a/src/bytes/fmt/mod.rs.html b/src/bytes/fmt/mod.rs.html new file mode 100644 index 000000000..fe6155f19 --- /dev/null +++ b/src/bytes/fmt/mod.rs.html @@ -0,0 +1,11 @@ +mod.rs - source
+1
+2
+3
+4
+5
mod debug;
+mod hex;
+
+/// `BytesRef` is not a part of public API of bytes crate.
+struct BytesRef<'a>(&'a [u8]);
+
\ No newline at end of file diff --git a/src/bytes/lib.rs.html b/src/bytes/lib.rs.html new file mode 100644 index 000000000..f418211e1 --- /dev/null +++ b/src/bytes/lib.rs.html @@ -0,0 +1,331 @@ +lib.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
#![allow(unknown_lints, unexpected_cfgs)]
+#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
+#![doc(test(
+    no_crate_inject,
+    attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
+))]
+#![no_std]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+
+//! Provides abstractions for working with bytes.
+//!
+//! The `bytes` crate provides an efficient byte buffer structure
+//! ([`Bytes`]) and traits for working with buffer
+//! implementations ([`Buf`], [`BufMut`]).
+//!
+//! # `Bytes`
+//!
+//! `Bytes` is an efficient container for storing and operating on contiguous
+//! slices of memory. It is intended for use primarily in networking code, but
+//! could have applications elsewhere as well.
+//!
+//! `Bytes` values facilitate zero-copy network programming by allowing multiple
+//! `Bytes` objects to point to the same underlying memory. This is managed by
+//! using a reference count to track when the memory is no longer needed and can
+//! be freed.
+//!
+//! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]`
+//! or `Vec<u8>`), but usually a `BytesMut` is used first and written to. For
+//! example:
+//!
+//! ```rust
+//! use bytes::{BytesMut, BufMut};
+//!
+//! let mut buf = BytesMut::with_capacity(1024);
+//! buf.put(&b"hello world"[..]);
+//! buf.put_u16(1234);
+//!
+//! let a = buf.split();
+//! assert_eq!(a, b"hello world\x04\xD2"[..]);
+//!
+//! buf.put(&b"goodbye world"[..]);
+//!
+//! let b = buf.split();
+//! assert_eq!(b, b"goodbye world"[..]);
+//!
+//! assert_eq!(buf.capacity(), 998);
+//! ```
+//!
+//! In the above example, only a single buffer of 1024 is allocated. The handles
+//! `a` and `b` will share the underlying buffer and maintain indices tracking
+//! the view into the buffer represented by the handle.
+//!
+//! See the [struct docs](`Bytes`) for more details.
+//!
+//! # `Buf`, `BufMut`
+//!
+//! These two traits provide read and write access to buffers. The underlying
+//! storage may or may not be in contiguous memory. For example, `Bytes` is a
+//! buffer that guarantees contiguous memory, but a [rope] stores the bytes in
+//! disjoint chunks. `Buf` and `BufMut` maintain cursors tracking the current
+//! position in the underlying byte storage. When bytes are read or written, the
+//! cursor is advanced.
+//!
+//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure)
+//!
+//! ## Relation with `Read` and `Write`
+//!
+//! At first glance, it may seem that `Buf` and `BufMut` overlap in
+//! functionality with [`std::io::Read`] and [`std::io::Write`]. However, they
+//! serve different purposes. A buffer is the value that is provided as an
+//! argument to `Read::read` and `Write::write`. `Read` and `Write` may then
+//! perform a syscall, which has the potential of failing. Operations on `Buf`
+//! and `BufMut` are infallible.
+
+extern crate alloc;
+
+#[cfg(feature = "std")]
+extern crate std;
+
+pub mod buf;
+pub use crate::buf::{Buf, BufMut};
+
+mod bytes;
+mod bytes_mut;
+mod fmt;
+mod loom;
+pub use crate::bytes::Bytes;
+pub use crate::bytes_mut::BytesMut;
+
+// Optional Serde support
+#[cfg(feature = "serde")]
+mod serde;
+
+#[inline(never)]
+#[cold]
+fn abort() -> ! {
+    #[cfg(feature = "std")]
+    {
+        std::process::abort();
+    }
+
+    #[cfg(not(feature = "std"))]
+    {
+        struct Abort;
+        impl Drop for Abort {
+            fn drop(&mut self) {
+                panic!();
+            }
+        }
+        let _a = Abort;
+        panic!("abort");
+    }
+}
+
+#[inline(always)]
+#[cfg(feature = "std")]
+fn saturating_sub_usize_u64(a: usize, b: u64) -> usize {
+    use core::convert::TryFrom;
+    match usize::try_from(b) {
+        Ok(b) => a.saturating_sub(b),
+        Err(_) => 0,
+    }
+}
+
+#[inline(always)]
+#[cfg(feature = "std")]
+fn min_u64_usize(a: u64, b: usize) -> usize {
+    use core::convert::TryFrom;
+    match usize::try_from(a) {
+        Ok(a) => usize::min(a, b),
+        Err(_) => b,
+    }
+}
+
+/// Panic with a nice error message.
+#[cold]
+fn panic_advance(idx: usize, len: usize) -> ! {
+    panic!(
+        "advance out of bounds: the len is {} but advancing by {}",
+        len, idx
+    );
+}
+
+#[cold]
+fn panic_does_not_fit(size: usize, nbytes: usize) -> ! {
+    panic!(
+        "size too large: the integer type can fit {} bytes, but nbytes is {}",
+        size, nbytes
+    );
+}
+
+/// Precondition: dst >= original
+///
+/// The following line is equivalent to:
+///
+/// ```rust,ignore
+/// self.ptr.as_ptr().offset_from(ptr) as usize;
+/// ```
+///
+/// But due to min rust is 1.39 and it is only stabilized
+/// in 1.47, we cannot use it.
+#[inline]
+fn offset_from(dst: *const u8, original: *const u8) -> usize {
+    dst as usize - original as usize
+}
+
\ No newline at end of file diff --git a/src/bytes/loom.rs.html b/src/bytes/loom.rs.html new file mode 100644 index 000000000..97d1acb30 --- /dev/null +++ b/src/bytes/loom.rs.html @@ -0,0 +1,61 @@ +loom.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
#[cfg(not(all(test, loom)))]
+pub(crate) mod sync {
+    pub(crate) mod atomic {
+        pub(crate) use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+
+        pub(crate) trait AtomicMut<T> {
+            fn with_mut<F, R>(&mut self, f: F) -> R
+            where
+                F: FnOnce(&mut *mut T) -> R;
+        }
+
+        impl<T> AtomicMut<T> for AtomicPtr<T> {
+            fn with_mut<F, R>(&mut self, f: F) -> R
+            where
+                F: FnOnce(&mut *mut T) -> R,
+            {
+                f(self.get_mut())
+            }
+        }
+    }
+}
+
+#[cfg(all(test, loom))]
+pub(crate) mod sync {
+    pub(crate) mod atomic {
+        pub(crate) use loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+
+        pub(crate) trait AtomicMut<T> {}
+    }
+}
+
\ No newline at end of file diff --git a/src/bytes/serde.rs.html b/src/bytes/serde.rs.html new file mode 100644 index 000000000..b3326f381 --- /dev/null +++ b/src/bytes/serde.rs.html @@ -0,0 +1,179 @@ +serde.rs - source
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
use super::{Bytes, BytesMut};
+use alloc::string::String;
+use alloc::vec::Vec;
+use core::{cmp, fmt};
+use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
+
+macro_rules! serde_impl {
+    ($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => {
+        impl Serialize for $ty {
+            #[inline]
+            fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+            where
+                S: Serializer,
+            {
+                serializer.serialize_bytes(&self)
+            }
+        }
+
+        struct $visitor_ty;
+
+        impl<'de> de::Visitor<'de> for $visitor_ty {
+            type Value = $ty;
+
+            fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+                formatter.write_str("byte array")
+            }
+
+            #[inline]
+            fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
+            where
+                V: de::SeqAccess<'de>,
+            {
+                let len = cmp::min(seq.size_hint().unwrap_or(0), 4096);
+                let mut values: Vec<u8> = Vec::with_capacity(len);
+
+                while let Some(value) = seq.next_element()? {
+                    values.push(value);
+                }
+
+                Ok($ty::$from_vec(values))
+            }
+
+            #[inline]
+            fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok($ty::$from_slice(v))
+            }
+
+            #[inline]
+            fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok($ty::$from_vec(v))
+            }
+
+            #[inline]
+            fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok($ty::$from_slice(v.as_bytes()))
+            }
+
+            #[inline]
+            fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok($ty::$from_vec(v.into_bytes()))
+            }
+        }
+
+        impl<'de> Deserialize<'de> for $ty {
+            #[inline]
+            fn deserialize<D>(deserializer: D) -> Result<$ty, D::Error>
+            where
+                D: Deserializer<'de>,
+            {
+                deserializer.deserialize_byte_buf($visitor_ty)
+            }
+        }
+    };
+}
+
+serde_impl!(Bytes, BytesVisitor, copy_from_slice, from);
+serde_impl!(BytesMut, BytesMutVisitor, from, from_vec);
+
\ No newline at end of file diff --git a/static.files/COPYRIGHT-23e9bde6c69aea69.txt b/static.files/COPYRIGHT-23e9bde6c69aea69.txt new file mode 100644 index 000000000..1447df792 --- /dev/null +++ b/static.files/COPYRIGHT-23e9bde6c69aea69.txt @@ -0,0 +1,50 @@ +# REUSE-IgnoreStart + +These documentation pages include resources by third parties. This copyright +file applies only to those resources. The following third party resources are +included, and carry their own copyright notices and license terms: + +* Fira Sans (FiraSans-Regular.woff2, FiraSans-Medium.woff2): + + Copyright (c) 2014, Mozilla Foundation https://mozilla.org/ + with Reserved Font Name Fira Sans. + + Copyright (c) 2014, Telefonica S.A. + + Licensed under the SIL Open Font License, Version 1.1. + See FiraSans-LICENSE.txt. + +* rustdoc.css, main.js, and playpen.js: + + Copyright 2015 The Rust Developers. + Licensed under the Apache License, Version 2.0 (see LICENSE-APACHE.txt) or + the MIT license (LICENSE-MIT.txt) at your option. + +* normalize.css: + + Copyright (c) Nicolas Gallagher and Jonathan Neal. + Licensed under the MIT license (see LICENSE-MIT.txt). + +* Source Code Pro (SourceCodePro-Regular.ttf.woff2, + SourceCodePro-Semibold.ttf.woff2, SourceCodePro-It.ttf.woff2): + + Copyright 2010, 2012 Adobe Systems Incorporated (http://www.adobe.com/), + with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark + of Adobe Systems Incorporated in the United States and/or other countries. + + Licensed under the SIL Open Font License, Version 1.1. + See SourceCodePro-LICENSE.txt. + +* Source Serif 4 (SourceSerif4-Regular.ttf.woff2, SourceSerif4-Bold.ttf.woff2, + SourceSerif4-It.ttf.woff2): + + Copyright 2014-2021 Adobe (http://www.adobe.com/), with Reserved Font Name + 'Source'. All Rights Reserved. Source is a trademark of Adobe in the United + States and/or other countries. + + Licensed under the SIL Open Font License, Version 1.1. + See SourceSerif4-LICENSE.md. + +This copyright file is intended to be distributed with rustdoc output. + +# REUSE-IgnoreEnd diff --git a/static.files/FiraSans-LICENSE-db4b642586e02d97.txt b/static.files/FiraSans-LICENSE-db4b642586e02d97.txt new file mode 100644 index 000000000..d7e9c149b --- /dev/null +++ b/static.files/FiraSans-LICENSE-db4b642586e02d97.txt @@ -0,0 +1,98 @@ +// REUSE-IgnoreStart + +Digitized data copyright (c) 2012-2015, The Mozilla Foundation and Telefonica S.A. +with Reserved Font Name < Fira >, + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. + +// REUSE-IgnoreEnd diff --git a/static.files/FiraSans-Medium-8f9a781e4970d388.woff2 b/static.files/FiraSans-Medium-8f9a781e4970d388.woff2 new file mode 100644 index 000000000..7a1e5fc54 Binary files /dev/null and b/static.files/FiraSans-Medium-8f9a781e4970d388.woff2 differ diff --git a/static.files/FiraSans-Regular-018c141bf0843ffd.woff2 b/static.files/FiraSans-Regular-018c141bf0843ffd.woff2 new file mode 100644 index 000000000..e766e06cc Binary files /dev/null and b/static.files/FiraSans-Regular-018c141bf0843ffd.woff2 differ diff --git a/static.files/LICENSE-APACHE-b91fa81cba47b86a.txt b/static.files/LICENSE-APACHE-b91fa81cba47b86a.txt new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/static.files/LICENSE-APACHE-b91fa81cba47b86a.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/static.files/LICENSE-MIT-65090b722b3f6c56.txt b/static.files/LICENSE-MIT-65090b722b3f6c56.txt new file mode 100644 index 000000000..31aa79387 --- /dev/null +++ b/static.files/LICENSE-MIT-65090b722b3f6c56.txt @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/static.files/NanumBarunGothic-0f09457c7a19b7c6.ttf.woff2 b/static.files/NanumBarunGothic-0f09457c7a19b7c6.ttf.woff2 new file mode 100644 index 000000000..1866ad4bc Binary files /dev/null and b/static.files/NanumBarunGothic-0f09457c7a19b7c6.ttf.woff2 differ diff --git a/static.files/NanumBarunGothic-LICENSE-18c5adf4b52b4041.txt b/static.files/NanumBarunGothic-LICENSE-18c5adf4b52b4041.txt new file mode 100644 index 000000000..4b3edc29e --- /dev/null +++ b/static.files/NanumBarunGothic-LICENSE-18c5adf4b52b4041.txt @@ -0,0 +1,103 @@ +// REUSE-IgnoreStart + +Copyright (c) 2010, NAVER Corporation (https://www.navercorp.com/), + +with Reserved Font Name Nanum, Naver Nanum, NanumGothic, Naver NanumGothic, +NanumMyeongjo, Naver NanumMyeongjo, NanumBrush, Naver NanumBrush, NanumPen, +Naver NanumPen, Naver NanumGothicEco, NanumGothicEco, Naver NanumMyeongjoEco, +NanumMyeongjoEco, Naver NanumGothicLight, NanumGothicLight, NanumBarunGothic, +Naver NanumBarunGothic, NanumSquareRound, NanumBarunPen, MaruBuri + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. + +// REUSE-IgnoreEnd diff --git a/static.files/SourceCodePro-It-1cc31594bf4f1f79.ttf.woff2 b/static.files/SourceCodePro-It-1cc31594bf4f1f79.ttf.woff2 new file mode 100644 index 000000000..462c34efc Binary files /dev/null and b/static.files/SourceCodePro-It-1cc31594bf4f1f79.ttf.woff2 differ diff --git a/static.files/SourceCodePro-LICENSE-d180d465a756484a.txt b/static.files/SourceCodePro-LICENSE-d180d465a756484a.txt new file mode 100644 index 000000000..0d2941e14 --- /dev/null +++ b/static.files/SourceCodePro-LICENSE-d180d465a756484a.txt @@ -0,0 +1,97 @@ +// REUSE-IgnoreStart + +Copyright 2010, 2012 Adobe Systems Incorporated (http://www.adobe.com/), with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of Adobe Systems Incorporated in the United States and/or other countries. + +This Font Software is licensed under the SIL Open Font License, Version 1.1. + +This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. + +// REUSE-IgnoreEnd diff --git a/static.files/SourceCodePro-Regular-562dcc5011b6de7d.ttf.woff2 b/static.files/SourceCodePro-Regular-562dcc5011b6de7d.ttf.woff2 new file mode 100644 index 000000000..10b558e0b Binary files /dev/null and b/static.files/SourceCodePro-Regular-562dcc5011b6de7d.ttf.woff2 differ diff --git a/static.files/SourceCodePro-Semibold-d899c5a5c4aeb14a.ttf.woff2 b/static.files/SourceCodePro-Semibold-d899c5a5c4aeb14a.ttf.woff2 new file mode 100644 index 000000000..5ec64eef0 Binary files /dev/null and b/static.files/SourceCodePro-Semibold-d899c5a5c4aeb14a.ttf.woff2 differ diff --git a/static.files/SourceSerif4-Bold-a2c9cd1067f8b328.ttf.woff2 b/static.files/SourceSerif4-Bold-a2c9cd1067f8b328.ttf.woff2 new file mode 100644 index 000000000..181a07f63 Binary files /dev/null and b/static.files/SourceSerif4-Bold-a2c9cd1067f8b328.ttf.woff2 differ diff --git a/static.files/SourceSerif4-It-acdfaf1a8af734b1.ttf.woff2 b/static.files/SourceSerif4-It-acdfaf1a8af734b1.ttf.woff2 new file mode 100644 index 000000000..2ae08a7be Binary files /dev/null and b/static.files/SourceSerif4-It-acdfaf1a8af734b1.ttf.woff2 differ diff --git a/static.files/SourceSerif4-LICENSE-3bb119e13b1258b7.md b/static.files/SourceSerif4-LICENSE-3bb119e13b1258b7.md new file mode 100644 index 000000000..175fa4f47 --- /dev/null +++ b/static.files/SourceSerif4-LICENSE-3bb119e13b1258b7.md @@ -0,0 +1,98 @@ + + +Copyright 2014-2021 Adobe (http://www.adobe.com/), with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of Adobe in the United States and/or other countries. +Copyright 2014 - 2023 Adobe (http://www.adobe.com/), with Reserved Font Name ‘Source’. All Rights Reserved. Source is a trademark of Adobe in the United States and/or other countries. + +This Font Software is licensed under the SIL Open Font License, Version 1.1. + +This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. + + diff --git a/static.files/SourceSerif4-Regular-46f98efaafac5295.ttf.woff2 b/static.files/SourceSerif4-Regular-46f98efaafac5295.ttf.woff2 new file mode 100644 index 000000000..0263fc304 Binary files /dev/null and b/static.files/SourceSerif4-Regular-46f98efaafac5295.ttf.woff2 differ diff --git a/static.files/favicon-2c020d218678b618.svg b/static.files/favicon-2c020d218678b618.svg new file mode 100644 index 000000000..8b34b5119 --- /dev/null +++ b/static.files/favicon-2c020d218678b618.svg @@ -0,0 +1,24 @@ + + + + + diff --git a/static.files/favicon-32x32-422f7d1d52889060.png b/static.files/favicon-32x32-422f7d1d52889060.png new file mode 100644 index 000000000..69b8613ce Binary files /dev/null and b/static.files/favicon-32x32-422f7d1d52889060.png differ diff --git a/static.files/main-14659ec02b58af51.js b/static.files/main-14659ec02b58af51.js new file mode 100644 index 000000000..22dc44b8c --- /dev/null +++ b/static.files/main-14659ec02b58af51.js @@ -0,0 +1,11 @@ +"use strict";window.RUSTDOC_TOOLTIP_HOVER_MS=300;window.RUSTDOC_TOOLTIP_HOVER_EXIT_MS=450;function resourcePath(basename,extension){return getVar("root-path")+basename+getVar("resource-suffix")+extension}function hideMain(){addClass(document.getElementById(MAIN_ID),"hidden")}function showMain(){removeClass(document.getElementById(MAIN_ID),"hidden")}function blurHandler(event,parentElem,hideCallback){if(!parentElem.contains(document.activeElement)&&!parentElem.contains(event.relatedTarget)){hideCallback()}}window.rootPath=getVar("root-path");window.currentCrate=getVar("current-crate");function setMobileTopbar(){const mobileTopbar=document.querySelector(".mobile-topbar");const locationTitle=document.querySelector(".sidebar h2.location");if(mobileTopbar){const mobileTitle=document.createElement("h2");mobileTitle.className="location";if(hasClass(document.querySelector(".rustdoc"),"crate")){mobileTitle.innerHTML=`Crate ${window.currentCrate}`}else if(locationTitle){mobileTitle.innerHTML=locationTitle.innerHTML}mobileTopbar.appendChild(mobileTitle)}}function getVirtualKey(ev){if("key"in ev&&typeof ev.key!=="undefined"){return ev.key}const c=ev.charCode||ev.keyCode;if(c===27){return"Escape"}return String.fromCharCode(c)}const MAIN_ID="main-content";const SETTINGS_BUTTON_ID="settings-menu";const ALTERNATIVE_DISPLAY_ID="alternative-display";const NOT_DISPLAYED_ID="not-displayed";const HELP_BUTTON_ID="help-button";function getSettingsButton(){return document.getElementById(SETTINGS_BUTTON_ID)}function getHelpButton(){return document.getElementById(HELP_BUTTON_ID)}function getNakedUrl(){return window.location.href.split("?")[0].split("#")[0]}function insertAfter(newNode,referenceNode){referenceNode.parentNode.insertBefore(newNode,referenceNode.nextSibling)}function getOrCreateSection(id,classes){let el=document.getElementById(id);if(!el){el=document.createElement("section");el.id=id;el.className=classes;insertAfter(el,document.getElementById(MAIN_ID))}return el}function getAlternativeDisplayElem(){return getOrCreateSection(ALTERNATIVE_DISPLAY_ID,"content hidden")}function getNotDisplayedElem(){return getOrCreateSection(NOT_DISPLAYED_ID,"hidden")}function switchDisplayedElement(elemToDisplay){const el=getAlternativeDisplayElem();if(el.children.length>0){getNotDisplayedElem().appendChild(el.firstElementChild)}if(elemToDisplay===null){addClass(el,"hidden");showMain();return}el.appendChild(elemToDisplay);hideMain();removeClass(el,"hidden")}function browserSupportsHistoryApi(){return window.history&&typeof window.history.pushState==="function"}function preLoadCss(cssUrl){const link=document.createElement("link");link.href=cssUrl;link.rel="preload";link.as="style";document.getElementsByTagName("head")[0].appendChild(link)}(function(){const isHelpPage=window.location.pathname.endsWith("/help.html");function loadScript(url,errorCallback){const script=document.createElement("script");script.src=url;if(errorCallback!==undefined){script.onerror=errorCallback}document.head.append(script)}getSettingsButton().onclick=event=>{if(event.ctrlKey||event.altKey||event.metaKey){return}window.hideAllModals(false);addClass(getSettingsButton(),"rotate");event.preventDefault();loadScript(getVar("static-root-path")+getVar("settings-js"));setTimeout(()=>{const themes=getVar("themes").split(",");for(const theme of themes){if(theme!==""){preLoadCss(getVar("root-path")+theme+".css")}}},0)};window.searchState={loadingText:"Loading search results...",input:document.getElementsByClassName("search-input")[0],outputElement:()=>{let el=document.getElementById("search");if(!el){el=document.createElement("section");el.id="search";getNotDisplayedElem().appendChild(el)}return el},title:document.title,titleBeforeSearch:document.title,timeout:null,currentTab:0,focusedByTab:[null,null,null],clearInputTimeout:()=>{if(searchState.timeout!==null){clearTimeout(searchState.timeout);searchState.timeout=null}},isDisplayed:()=>searchState.outputElement().parentElement.id===ALTERNATIVE_DISPLAY_ID,focus:()=>{searchState.input.focus()},defocus:()=>{searchState.input.blur()},showResults:search=>{if(search===null||typeof search==="undefined"){search=searchState.outputElement()}switchDisplayedElement(search);searchState.mouseMovedAfterSearch=false;document.title=searchState.title},removeQueryParameters:()=>{document.title=searchState.titleBeforeSearch;if(browserSupportsHistoryApi()){history.replaceState(null,"",getNakedUrl()+window.location.hash)}},hideResults:()=>{switchDisplayedElement(null);searchState.removeQueryParameters()},getQueryStringParams:()=>{const params={};window.location.search.substring(1).split("&").map(s=>{const pair=s.split("=").map(x=>x.replace(/\+/g," "));params[decodeURIComponent(pair[0])]=typeof pair[1]==="undefined"?null:decodeURIComponent(pair[1])});return params},setup:()=>{const search_input=searchState.input;if(!searchState.input){return}let searchLoaded=false;function sendSearchForm(){document.getElementsByClassName("search-form")[0].submit()}function loadSearch(){if(!searchLoaded){searchLoaded=true;loadScript(getVar("static-root-path")+getVar("search-js"),sendSearchForm);loadScript(resourcePath("search-index",".js"),sendSearchForm)}}search_input.addEventListener("focus",()=>{search_input.origPlaceholder=search_input.placeholder;search_input.placeholder="Type your search here.";loadSearch()});if(search_input.value!==""){loadSearch()}const params=searchState.getQueryStringParams();if(params.search!==undefined){searchState.setLoadingSearch();loadSearch()}},setLoadingSearch:()=>{const search=searchState.outputElement();search.innerHTML="

"+searchState.loadingText+"

";searchState.showResults(search)},descShards:new Map(),loadDesc:async function({descShard,descIndex}){if(descShard.promise===null){descShard.promise=new Promise((resolve,reject)=>{descShard.resolve=resolve;const ds=descShard;const fname=`${ds.crate}-desc-${ds.shard}-`;const url=resourcePath(`search.desc/${descShard.crate}/${fname}`,".js",);loadScript(url,reject)})}const list=await descShard.promise;return list[descIndex]},loadedDescShard:function(crate,shard,data){this.descShards.get(crate)[shard].resolve(data.split("\n"))},};const toggleAllDocsId="toggle-all-docs";let savedHash="";function handleHashes(ev){if(ev!==null&&searchState.isDisplayed()&&ev.newURL){switchDisplayedElement(null);const hash=ev.newURL.slice(ev.newURL.indexOf("#")+1);if(browserSupportsHistoryApi()){history.replaceState(null,"",getNakedUrl()+window.location.search+"#"+hash)}const elem=document.getElementById(hash);if(elem){elem.scrollIntoView()}}const pageId=window.location.hash.replace(/^#/,"");if(savedHash!==pageId){savedHash=pageId;if(pageId!==""){expandSection(pageId)}}if(savedHash.startsWith("impl-")){const splitAt=savedHash.indexOf("/");if(splitAt!==-1){const implId=savedHash.slice(0,splitAt);const assocId=savedHash.slice(splitAt+1);const implElems=document.querySelectorAll(`details > summary > section[id^="${implId}"]`,);onEachLazy(implElems,implElem=>{const numbered=/^(.+?)-([0-9]+)$/.exec(implElem.id);if(implElem.id!==implId&&(!numbered||numbered[1]!==implId)){return false}return onEachLazy(implElem.parentElement.parentElement.querySelectorAll(`[id^="${assocId}"]`),item=>{const numbered=/^(.+?)-([0-9]+)$/.exec(item.id);if(item.id===assocId||(numbered&&numbered[1]===assocId)){openParentDetails(item);item.scrollIntoView();setTimeout(()=>{window.location.replace("#"+item.id)},0);return true}},)})}}}function onHashChange(ev){hideSidebar();handleHashes(ev)}function openParentDetails(elem){while(elem){if(elem.tagName==="DETAILS"){elem.open=true}elem=elem.parentNode}}function expandSection(id){openParentDetails(document.getElementById(id))}function handleEscape(ev){searchState.clearInputTimeout();searchState.hideResults();ev.preventDefault();searchState.defocus();window.hideAllModals(true)}function handleShortcut(ev){const disableShortcuts=getSettingValue("disable-shortcuts")==="true";if(ev.ctrlKey||ev.altKey||ev.metaKey||disableShortcuts){return}if(document.activeElement.tagName==="INPUT"&&document.activeElement.type!=="checkbox"&&document.activeElement.type!=="radio"){switch(getVirtualKey(ev)){case"Escape":handleEscape(ev);break}}else{switch(getVirtualKey(ev)){case"Escape":handleEscape(ev);break;case"s":case"S":case"/":ev.preventDefault();searchState.focus();break;case"+":ev.preventDefault();expandAllDocs();break;case"-":ev.preventDefault();collapseAllDocs();break;case"?":showHelp();break;default:break}}}document.addEventListener("keypress",handleShortcut);document.addEventListener("keydown",handleShortcut);function addSidebarItems(){if(!window.SIDEBAR_ITEMS){return}const sidebar=document.getElementById("rustdoc-modnav");function block(shortty,id,longty){const filtered=window.SIDEBAR_ITEMS[shortty];if(!filtered){return}const modpath=hasClass(document.querySelector(".rustdoc"),"mod")?"../":"";const h3=document.createElement("h3");h3.innerHTML=`${longty}`;const ul=document.createElement("ul");ul.className="block "+shortty;for(const name of filtered){let path;if(shortty==="mod"){path=`${modpath}${name}/index.html`}else{path=`${modpath}${shortty}.${name}.html`}let current_page=document.location.href.toString();if(current_page.endsWith("/")){current_page+="index.html"}const link=document.createElement("a");link.href=path;link.textContent=name;const li=document.createElement("li");if(link.href===current_page){li.classList.add("current")}li.appendChild(link);ul.appendChild(li)}sidebar.appendChild(h3);sidebar.appendChild(ul)}if(sidebar){block("primitive","primitives","Primitive Types");block("mod","modules","Modules");block("macro","macros","Macros");block("struct","structs","Structs");block("enum","enums","Enums");block("constant","constants","Constants");block("static","static","Statics");block("trait","traits","Traits");block("fn","functions","Functions");block("type","types","Type Aliases");block("union","unions","Unions");block("foreigntype","foreign-types","Foreign Types");block("keyword","keywords","Keywords");block("attr","attributes","Attribute Macros");block("derive","derives","Derive Macros");block("traitalias","trait-aliases","Trait Aliases")}}window.register_implementors=imp=>{const implementors=document.getElementById("implementors-list");const synthetic_implementors=document.getElementById("synthetic-implementors-list");const inlined_types=new Set();const TEXT_IDX=0;const SYNTHETIC_IDX=1;const TYPES_IDX=2;if(synthetic_implementors){onEachLazy(synthetic_implementors.getElementsByClassName("impl"),el=>{const aliases=el.getAttribute("data-aliases");if(!aliases){return}aliases.split(",").forEach(alias=>{inlined_types.add(alias)})})}let currentNbImpls=implementors.getElementsByClassName("impl").length;const traitName=document.querySelector(".main-heading h1 > .trait").textContent;const baseIdName="impl-"+traitName+"-";const libs=Object.getOwnPropertyNames(imp);const script=document.querySelector("script[data-ignore-extern-crates]");const ignoreExternCrates=new Set((script?script.getAttribute("data-ignore-extern-crates"):"").split(","),);for(const lib of libs){if(lib===window.currentCrate||ignoreExternCrates.has(lib)){continue}const structs=imp[lib];struct_loop:for(const struct of structs){const list=struct[SYNTHETIC_IDX]?synthetic_implementors:implementors;if(struct[SYNTHETIC_IDX]){for(const struct_type of struct[TYPES_IDX]){if(inlined_types.has(struct_type)){continue struct_loop}inlined_types.add(struct_type)}}const code=document.createElement("h3");code.innerHTML=struct[TEXT_IDX];addClass(code,"code-header");onEachLazy(code.getElementsByTagName("a"),elem=>{const href=elem.getAttribute("href");if(href&&!href.startsWith("#")&&!/^(?:[a-z+]+:)?\/\//.test(href)){elem.setAttribute("href",window.rootPath+href)}});const currentId=baseIdName+currentNbImpls;const anchor=document.createElement("a");anchor.href="#"+currentId;addClass(anchor,"anchor");const display=document.createElement("div");display.id=currentId;addClass(display,"impl");display.appendChild(anchor);display.appendChild(code);list.appendChild(display);currentNbImpls+=1}}};if(window.pending_implementors){window.register_implementors(window.pending_implementors)}window.register_type_impls=imp=>{if(!imp||!imp[window.currentCrate]){return}window.pending_type_impls=null;const idMap=new Map();let implementations=document.getElementById("implementations-list");let trait_implementations=document.getElementById("trait-implementations-list");let trait_implementations_header=document.getElementById("trait-implementations");const script=document.querySelector("script[data-self-path]");const selfPath=script?script.getAttribute("data-self-path"):null;const mainContent=document.querySelector("#main-content");const sidebarSection=document.querySelector(".sidebar section");let methods=document.querySelector(".sidebar .block.method");let associatedTypes=document.querySelector(".sidebar .block.associatedtype");let associatedConstants=document.querySelector(".sidebar .block.associatedconstant");let sidebarTraitList=document.querySelector(".sidebar .block.trait-implementation");for(const impList of imp[window.currentCrate]){const types=impList.slice(2);const text=impList[0];const isTrait=impList[1]!==0;const traitName=impList[1];if(types.indexOf(selfPath)===-1){continue}let outputList=isTrait?trait_implementations:implementations;if(outputList===null){const outputListName=isTrait?"Trait Implementations":"Implementations";const outputListId=isTrait?"trait-implementations-list":"implementations-list";const outputListHeaderId=isTrait?"trait-implementations":"implementations";const outputListHeader=document.createElement("h2");outputListHeader.id=outputListHeaderId;outputListHeader.innerText=outputListName;outputList=document.createElement("div");outputList.id=outputListId;if(isTrait){const link=document.createElement("a");link.href=`#${outputListHeaderId}`;link.innerText="Trait Implementations";const h=document.createElement("h3");h.appendChild(link);trait_implementations=outputList;trait_implementations_header=outputListHeader;sidebarSection.appendChild(h);sidebarTraitList=document.createElement("ul");sidebarTraitList.className="block trait-implementation";sidebarSection.appendChild(sidebarTraitList);mainContent.appendChild(outputListHeader);mainContent.appendChild(outputList)}else{implementations=outputList;if(trait_implementations){mainContent.insertBefore(outputListHeader,trait_implementations_header);mainContent.insertBefore(outputList,trait_implementations_header)}else{const mainContent=document.querySelector("#main-content");mainContent.appendChild(outputListHeader);mainContent.appendChild(outputList)}}}const template=document.createElement("template");template.innerHTML=text;onEachLazy(template.content.querySelectorAll("a"),elem=>{const href=elem.getAttribute("href");if(href&&!href.startsWith("#")&&!/^(?:[a-z+]+:)?\/\//.test(href)){elem.setAttribute("href",window.rootPath+href)}});onEachLazy(template.content.querySelectorAll("[id]"),el=>{let i=0;if(idMap.has(el.id)){i=idMap.get(el.id)}else if(document.getElementById(el.id)){i=1;while(document.getElementById(`${el.id}-${2 * i}`)){i=2*i}while(document.getElementById(`${el.id}-${i}`)){i+=1}}if(i!==0){const oldHref=`#${el.id}`;const newHref=`#${el.id}-${i}`;el.id=`${el.id}-${i}`;onEachLazy(template.content.querySelectorAll("a[href]"),link=>{if(link.getAttribute("href")===oldHref){link.href=newHref}})}idMap.set(el.id,i+1)});const templateAssocItems=template.content.querySelectorAll("section.tymethod, "+"section.method, section.associatedtype, section.associatedconstant");if(isTrait){const li=document.createElement("li");const a=document.createElement("a");a.href=`#${template.content.querySelector(".impl").id}`;a.textContent=traitName;li.appendChild(a);sidebarTraitList.append(li)}else{onEachLazy(templateAssocItems,item=>{let block=hasClass(item,"associatedtype")?associatedTypes:(hasClass(item,"associatedconstant")?associatedConstants:(methods));if(!block){const blockTitle=hasClass(item,"associatedtype")?"Associated Types":(hasClass(item,"associatedconstant")?"Associated Constants":("Methods"));const blockClass=hasClass(item,"associatedtype")?"associatedtype":(hasClass(item,"associatedconstant")?"associatedconstant":("method"));const blockHeader=document.createElement("h3");const blockLink=document.createElement("a");blockLink.href="#implementations";blockLink.innerText=blockTitle;blockHeader.appendChild(blockLink);block=document.createElement("ul");block.className=`block ${blockClass}`;const insertionReference=methods||sidebarTraitList;if(insertionReference){const insertionReferenceH=insertionReference.previousElementSibling;sidebarSection.insertBefore(blockHeader,insertionReferenceH);sidebarSection.insertBefore(block,insertionReferenceH)}else{sidebarSection.appendChild(blockHeader);sidebarSection.appendChild(block)}if(hasClass(item,"associatedtype")){associatedTypes=block}else if(hasClass(item,"associatedconstant")){associatedConstants=block}else{methods=block}}const li=document.createElement("li");const a=document.createElement("a");a.innerText=item.id.split("-")[0].split(".")[1];a.href=`#${item.id}`;li.appendChild(a);block.appendChild(li)})}outputList.appendChild(template.content)}for(const list of[methods,associatedTypes,associatedConstants,sidebarTraitList]){if(!list){continue}const newChildren=Array.prototype.slice.call(list.children);newChildren.sort((a,b)=>{const aI=a.innerText;const bI=b.innerText;return aIbI?1:0});list.replaceChildren(...newChildren)}};if(window.pending_type_impls){window.register_type_impls(window.pending_type_impls)}function addSidebarCrates(){if(!window.ALL_CRATES){return}const sidebarElems=document.getElementById("rustdoc-modnav");if(!sidebarElems){return}const h3=document.createElement("h3");h3.innerHTML="Crates";const ul=document.createElement("ul");ul.className="block crate";for(const crate of window.ALL_CRATES){const link=document.createElement("a");link.href=window.rootPath+crate+"/index.html";link.textContent=crate;const li=document.createElement("li");if(window.rootPath!=="./"&&crate===window.currentCrate){li.className="current"}li.appendChild(link);ul.appendChild(li)}sidebarElems.appendChild(h3);sidebarElems.appendChild(ul)}function expandAllDocs(){const innerToggle=document.getElementById(toggleAllDocsId);removeClass(innerToggle,"will-expand");onEachLazy(document.getElementsByClassName("toggle"),e=>{if(!hasClass(e,"type-contents-toggle")&&!hasClass(e,"more-examples-toggle")){e.open=true}});innerToggle.title="collapse all docs";innerToggle.children[0].innerText="\u2212"}function collapseAllDocs(){const innerToggle=document.getElementById(toggleAllDocsId);addClass(innerToggle,"will-expand");onEachLazy(document.getElementsByClassName("toggle"),e=>{if(e.parentNode.id!=="implementations-list"||(!hasClass(e,"implementors-toggle")&&!hasClass(e,"type-contents-toggle"))){e.open=false}});innerToggle.title="expand all docs";innerToggle.children[0].innerText="+"}function toggleAllDocs(){const innerToggle=document.getElementById(toggleAllDocsId);if(!innerToggle){return}if(hasClass(innerToggle,"will-expand")){expandAllDocs()}else{collapseAllDocs()}}(function(){const toggles=document.getElementById(toggleAllDocsId);if(toggles){toggles.onclick=toggleAllDocs}const hideMethodDocs=getSettingValue("auto-hide-method-docs")==="true";const hideImplementations=getSettingValue("auto-hide-trait-implementations")==="true";const hideLargeItemContents=getSettingValue("auto-hide-large-items")!=="false";function setImplementorsTogglesOpen(id,open){const list=document.getElementById(id);if(list!==null){onEachLazy(list.getElementsByClassName("implementors-toggle"),e=>{e.open=open})}}if(hideImplementations){setImplementorsTogglesOpen("trait-implementations-list",false);setImplementorsTogglesOpen("blanket-implementations-list",false)}onEachLazy(document.getElementsByClassName("toggle"),e=>{if(!hideLargeItemContents&&hasClass(e,"type-contents-toggle")){e.open=true}if(hideMethodDocs&&hasClass(e,"method-toggle")){e.open=false}})}());window.rustdoc_add_line_numbers_to_examples=()=>{onEachLazy(document.getElementsByClassName("rust-example-rendered"),x=>{const parent=x.parentNode;const line_numbers=parent.querySelectorAll(".example-line-numbers");if(line_numbers.length>0){return}const count=x.textContent.split("\n").length;const elems=[];for(let i=0;i{onEachLazy(document.getElementsByClassName("rust-example-rendered"),x=>{const parent=x.parentNode;const line_numbers=parent.querySelectorAll(".example-line-numbers");for(const node of line_numbers){parent.removeChild(node)}})};if(getSettingValue("line-numbers")==="true"){window.rustdoc_add_line_numbers_to_examples()}function showSidebar(){window.hideAllModals(false);const sidebar=document.getElementsByClassName("sidebar")[0];addClass(sidebar,"shown")}function hideSidebar(){const sidebar=document.getElementsByClassName("sidebar")[0];removeClass(sidebar,"shown")}window.addEventListener("resize",()=>{if(window.CURRENT_TOOLTIP_ELEMENT){const base=window.CURRENT_TOOLTIP_ELEMENT.TOOLTIP_BASE;const force_visible=base.TOOLTIP_FORCE_VISIBLE;hideTooltip(false);if(force_visible){showTooltip(base);base.TOOLTIP_FORCE_VISIBLE=true}}});const mainElem=document.getElementById(MAIN_ID);if(mainElem){mainElem.addEventListener("click",hideSidebar)}onEachLazy(document.querySelectorAll("a[href^='#']"),el=>{el.addEventListener("click",()=>{expandSection(el.hash.slice(1));hideSidebar()})});onEachLazy(document.querySelectorAll(".toggle > summary:not(.hideme)"),el=>{el.addEventListener("click",e=>{if(e.target.tagName!=="SUMMARY"&&e.target.tagName!=="A"){e.preventDefault()}})});function showTooltip(e){const notable_ty=e.getAttribute("data-notable-ty");if(!window.NOTABLE_TRAITS&¬able_ty){const data=document.getElementById("notable-traits-data");if(data){window.NOTABLE_TRAITS=JSON.parse(data.innerText)}else{throw new Error("showTooltip() called with notable without any notable traits!")}}if(window.CURRENT_TOOLTIP_ELEMENT&&window.CURRENT_TOOLTIP_ELEMENT.TOOLTIP_BASE===e){clearTooltipHoverTimeout(window.CURRENT_TOOLTIP_ELEMENT);return}window.hideAllModals(false);const wrapper=document.createElement("div");if(notable_ty){wrapper.innerHTML="
"+window.NOTABLE_TRAITS[notable_ty]+"
"}else{if(e.getAttribute("title")!==null){e.setAttribute("data-title",e.getAttribute("title"));e.removeAttribute("title")}if(e.getAttribute("data-title")!==null){const titleContent=document.createElement("div");titleContent.className="content";titleContent.appendChild(document.createTextNode(e.getAttribute("data-title")));wrapper.appendChild(titleContent)}}wrapper.className="tooltip popover";const focusCatcher=document.createElement("div");focusCatcher.setAttribute("tabindex","0");focusCatcher.onfocus=hideTooltip;wrapper.appendChild(focusCatcher);const pos=e.getBoundingClientRect();wrapper.style.top=(pos.top+window.scrollY+pos.height)+"px";wrapper.style.left=0;wrapper.style.right="auto";wrapper.style.visibility="hidden";document.body.appendChild(wrapper);const wrapperPos=wrapper.getBoundingClientRect();const finalPos=pos.left+window.scrollX-wrapperPos.width+24;if(finalPos>0){wrapper.style.left=finalPos+"px"}else{wrapper.style.setProperty("--popover-arrow-offset",(wrapperPos.right-pos.right+4)+"px",)}wrapper.style.visibility="";window.CURRENT_TOOLTIP_ELEMENT=wrapper;window.CURRENT_TOOLTIP_ELEMENT.TOOLTIP_BASE=e;clearTooltipHoverTimeout(window.CURRENT_TOOLTIP_ELEMENT);wrapper.onpointerenter=ev=>{if(ev.pointerType!=="mouse"){return}clearTooltipHoverTimeout(e)};wrapper.onpointerleave=ev=>{if(ev.pointerType!=="mouse"){return}if(!e.TOOLTIP_FORCE_VISIBLE&&!e.contains(ev.relatedTarget)){setTooltipHoverTimeout(e,false);addClass(wrapper,"fade-out")}}}function setTooltipHoverTimeout(element,show){clearTooltipHoverTimeout(element);if(!show&&!window.CURRENT_TOOLTIP_ELEMENT){return}if(show&&window.CURRENT_TOOLTIP_ELEMENT){return}if(window.CURRENT_TOOLTIP_ELEMENT&&window.CURRENT_TOOLTIP_ELEMENT.TOOLTIP_BASE!==element){return}element.TOOLTIP_HOVER_TIMEOUT=setTimeout(()=>{if(show){showTooltip(element)}else if(!element.TOOLTIP_FORCE_VISIBLE){hideTooltip(false)}},show?window.RUSTDOC_TOOLTIP_HOVER_MS:window.RUSTDOC_TOOLTIP_HOVER_EXIT_MS)}function clearTooltipHoverTimeout(element){if(element.TOOLTIP_HOVER_TIMEOUT!==undefined){removeClass(window.CURRENT_TOOLTIP_ELEMENT,"fade-out");clearTimeout(element.TOOLTIP_HOVER_TIMEOUT);delete element.TOOLTIP_HOVER_TIMEOUT}}function tooltipBlurHandler(event){if(window.CURRENT_TOOLTIP_ELEMENT&&!window.CURRENT_TOOLTIP_ELEMENT.contains(document.activeElement)&&!window.CURRENT_TOOLTIP_ELEMENT.contains(event.relatedTarget)&&!window.CURRENT_TOOLTIP_ELEMENT.TOOLTIP_BASE.contains(document.activeElement)&&!window.CURRENT_TOOLTIP_ELEMENT.TOOLTIP_BASE.contains(event.relatedTarget)){setTimeout(()=>hideTooltip(false),0)}}function hideTooltip(focus){if(window.CURRENT_TOOLTIP_ELEMENT){if(window.CURRENT_TOOLTIP_ELEMENT.TOOLTIP_BASE.TOOLTIP_FORCE_VISIBLE){if(focus){window.CURRENT_TOOLTIP_ELEMENT.TOOLTIP_BASE.focus()}window.CURRENT_TOOLTIP_ELEMENT.TOOLTIP_BASE.TOOLTIP_FORCE_VISIBLE=false}document.body.removeChild(window.CURRENT_TOOLTIP_ELEMENT);clearTooltipHoverTimeout(window.CURRENT_TOOLTIP_ELEMENT);window.CURRENT_TOOLTIP_ELEMENT=null}}onEachLazy(document.getElementsByClassName("tooltip"),e=>{e.onclick=()=>{e.TOOLTIP_FORCE_VISIBLE=e.TOOLTIP_FORCE_VISIBLE?false:true;if(window.CURRENT_TOOLTIP_ELEMENT&&!e.TOOLTIP_FORCE_VISIBLE){hideTooltip(true)}else{showTooltip(e);window.CURRENT_TOOLTIP_ELEMENT.setAttribute("tabindex","0");window.CURRENT_TOOLTIP_ELEMENT.focus();window.CURRENT_TOOLTIP_ELEMENT.onblur=tooltipBlurHandler}return false};e.onpointerenter=ev=>{if(ev.pointerType!=="mouse"){return}setTooltipHoverTimeout(e,true)};e.onpointermove=ev=>{if(ev.pointerType!=="mouse"){return}setTooltipHoverTimeout(e,true)};e.onpointerleave=ev=>{if(ev.pointerType!=="mouse"){return}if(!e.TOOLTIP_FORCE_VISIBLE&&window.CURRENT_TOOLTIP_ELEMENT&&!window.CURRENT_TOOLTIP_ELEMENT.contains(ev.relatedTarget)){setTooltipHoverTimeout(e,false);addClass(window.CURRENT_TOOLTIP_ELEMENT,"fade-out")}}});const sidebar_menu_toggle=document.getElementsByClassName("sidebar-menu-toggle")[0];if(sidebar_menu_toggle){sidebar_menu_toggle.addEventListener("click",()=>{const sidebar=document.getElementsByClassName("sidebar")[0];if(!hasClass(sidebar,"shown")){showSidebar()}else{hideSidebar()}})}function helpBlurHandler(event){blurHandler(event,getHelpButton(),window.hidePopoverMenus)}function buildHelpMenu(){const book_info=document.createElement("span");const channel=getVar("channel");book_info.className="top";book_info.innerHTML=`You can find more information in \ +the rustdoc book.`;const shortcuts=[["?","Show this help dialog"],["S / /","Focus the search field"],["↑","Move up in search results"],["↓","Move down in search results"],["← / →","Switch result tab (when results focused)"],["⏎","Go to active search result"],["+","Expand all sections"],["-","Collapse all sections"],].map(x=>"
"+x[0].split(" ").map((y,index)=>((index&1)===0?""+y+"":" "+y+" ")).join("")+"
"+x[1]+"
").join("");const div_shortcuts=document.createElement("div");addClass(div_shortcuts,"shortcuts");div_shortcuts.innerHTML="

Keyboard Shortcuts

"+shortcuts+"
";const infos=[`For a full list of all search features, take a look here.`,"Prefix searches with a type followed by a colon (e.g., fn:) to \ + restrict the search to a given item kind.","Accepted kinds are: fn, mod, struct, \ + enum, trait, type, macro, \ + and const.","Search functions by type signature (e.g., vec -> usize or \ + -> vec or String, enum:Cow -> bool)","You can look for items with an exact name by putting double quotes around \ + your request: \"string\"","Look for functions that accept or return \ + slices and \ + arrays by writing \ + square brackets (e.g., -> [u8] or [] -> Option)","Look for items inside another one by searching for a path: vec::Vec",].map(x=>"

"+x+"

").join("");const div_infos=document.createElement("div");addClass(div_infos,"infos");div_infos.innerHTML="

Search Tricks

"+infos;const rustdoc_version=document.createElement("span");rustdoc_version.className="bottom";const rustdoc_version_code=document.createElement("code");rustdoc_version_code.innerText="rustdoc "+getVar("rustdoc-version");rustdoc_version.appendChild(rustdoc_version_code);const container=document.createElement("div");if(!isHelpPage){container.className="popover"}container.id="help";container.style.display="none";const side_by_side=document.createElement("div");side_by_side.className="side-by-side";side_by_side.appendChild(div_shortcuts);side_by_side.appendChild(div_infos);container.appendChild(book_info);container.appendChild(side_by_side);container.appendChild(rustdoc_version);if(isHelpPage){const help_section=document.createElement("section");help_section.appendChild(container);document.getElementById("main-content").appendChild(help_section);container.style.display="block"}else{const help_button=getHelpButton();help_button.appendChild(container);container.onblur=helpBlurHandler;help_button.onblur=helpBlurHandler;help_button.children[0].onblur=helpBlurHandler}return container}window.hideAllModals=switchFocus=>{hideSidebar();window.hidePopoverMenus();hideTooltip(switchFocus)};window.hidePopoverMenus=()=>{onEachLazy(document.querySelectorAll(".search-form .popover"),elem=>{elem.style.display="none"})};function getHelpMenu(buildNeeded){let menu=getHelpButton().querySelector(".popover");if(!menu&&buildNeeded){menu=buildHelpMenu()}return menu}function showHelp(){getHelpButton().querySelector("a").focus();const menu=getHelpMenu(true);if(menu.style.display==="none"){window.hideAllModals();menu.style.display=""}}if(isHelpPage){showHelp();document.querySelector(`#${HELP_BUTTON_ID} > a`).addEventListener("click",event=>{const target=event.target;if(target.tagName!=="A"||target.parentElement.id!==HELP_BUTTON_ID||event.ctrlKey||event.altKey||event.metaKey){return}event.preventDefault()})}else{document.querySelector(`#${HELP_BUTTON_ID} > a`).addEventListener("click",event=>{const target=event.target;if(target.tagName!=="A"||target.parentElement.id!==HELP_BUTTON_ID||event.ctrlKey||event.altKey||event.metaKey){return}event.preventDefault();const menu=getHelpMenu(true);const shouldShowHelp=menu.style.display==="none";if(shouldShowHelp){showHelp()}else{window.hidePopoverMenus()}})}setMobileTopbar();addSidebarItems();addSidebarCrates();onHashChange(null);window.addEventListener("hashchange",onHashChange);searchState.setup()}());(function(){const SIDEBAR_MIN=100;const SIDEBAR_MAX=500;const RUSTDOC_MOBILE_BREAKPOINT=700;const BODY_MIN=400;const SIDEBAR_VANISH_THRESHOLD=SIDEBAR_MIN/2;const sidebarButton=document.getElementById("sidebar-button");if(sidebarButton){sidebarButton.addEventListener("click",e=>{removeClass(document.documentElement,"hide-sidebar");updateLocalStorage("hide-sidebar","false");if(document.querySelector(".rustdoc.src")){window.rustdocToggleSrcSidebar()}e.preventDefault()})}let currentPointerId=null;let desiredSidebarSize=null;let pendingSidebarResizingFrame=false;const resizer=document.querySelector(".sidebar-resizer");const sidebar=document.querySelector(".sidebar");if(!resizer||!sidebar){return}const isSrcPage=hasClass(document.body,"src");function hideSidebar(){if(isSrcPage){window.rustdocCloseSourceSidebar();updateLocalStorage("src-sidebar-width",null);document.documentElement.style.removeProperty("--src-sidebar-width");sidebar.style.removeProperty("--src-sidebar-width");resizer.style.removeProperty("--src-sidebar-width")}else{addClass(document.documentElement,"hide-sidebar");updateLocalStorage("hide-sidebar","true");updateLocalStorage("desktop-sidebar-width",null);document.documentElement.style.removeProperty("--desktop-sidebar-width");sidebar.style.removeProperty("--desktop-sidebar-width");resizer.style.removeProperty("--desktop-sidebar-width")}}function showSidebar(){if(isSrcPage){window.rustdocShowSourceSidebar()}else{removeClass(document.documentElement,"hide-sidebar");updateLocalStorage("hide-sidebar","false")}}function changeSidebarSize(size){if(isSrcPage){updateLocalStorage("src-sidebar-width",size);sidebar.style.setProperty("--src-sidebar-width",size+"px");resizer.style.setProperty("--src-sidebar-width",size+"px")}else{updateLocalStorage("desktop-sidebar-width",size);sidebar.style.setProperty("--desktop-sidebar-width",size+"px");resizer.style.setProperty("--desktop-sidebar-width",size+"px")}}function isSidebarHidden(){return isSrcPage?!hasClass(document.documentElement,"src-sidebar-expanded"):hasClass(document.documentElement,"hide-sidebar")}function resize(e){if(currentPointerId===null||currentPointerId!==e.pointerId){return}e.preventDefault();const pos=e.clientX-3;if(pos=SIDEBAR_MIN){if(isSidebarHidden()){showSidebar()}const constrainedPos=Math.min(pos,window.innerWidth-BODY_MIN,SIDEBAR_MAX);changeSidebarSize(constrainedPos);desiredSidebarSize=constrainedPos;if(pendingSidebarResizingFrame!==false){clearTimeout(pendingSidebarResizingFrame)}pendingSidebarResizingFrame=setTimeout(()=>{if(currentPointerId===null||pendingSidebarResizingFrame===false){return}pendingSidebarResizingFrame=false;document.documentElement.style.setProperty("--resizing-sidebar-width",desiredSidebarSize+"px",)},100)}}window.addEventListener("resize",()=>{if(window.innerWidth=(window.innerWidth-BODY_MIN)){changeSidebarSize(window.innerWidth-BODY_MIN)}else if(desiredSidebarSize!==null&&desiredSidebarSize>SIDEBAR_MIN){changeSidebarSize(desiredSidebarSize)}});function stopResize(e){if(currentPointerId===null){return}if(e){e.preventDefault()}desiredSidebarSize=sidebar.getBoundingClientRect().width;removeClass(resizer,"active");window.removeEventListener("pointermove",resize,false);window.removeEventListener("pointerup",stopResize,false);removeClass(document.documentElement,"sidebar-resizing");document.documentElement.style.removeProperty("--resizing-sidebar-width");if(resizer.releasePointerCapture){resizer.releasePointerCapture(currentPointerId);currentPointerId=null}}function initResize(e){if(currentPointerId!==null||e.altKey||e.ctrlKey||e.metaKey||e.button!==0){return}if(resizer.setPointerCapture){resizer.setPointerCapture(e.pointerId);if(!resizer.hasPointerCapture(e.pointerId)){resizer.releasePointerCapture(e.pointerId);return}currentPointerId=e.pointerId}window.hideAllModals(false);e.preventDefault();window.addEventListener("pointermove",resize,false);window.addEventListener("pointercancel",stopResize,false);window.addEventListener("pointerup",stopResize,false);addClass(resizer,"active");addClass(document.documentElement,"sidebar-resizing");const pos=e.clientX-sidebar.offsetLeft-3;document.documentElement.style.setProperty("--resizing-sidebar-width",pos+"px");desiredSidebarSize=null}resizer.addEventListener("pointerdown",initResize,false)}());(function(){function copyContentToClipboard(content){const el=document.createElement("textarea");el.value=content;el.setAttribute("readonly","");el.style.position="absolute";el.style.left="-9999px";document.body.appendChild(el);el.select();document.execCommand("copy");document.body.removeChild(el)}function copyButtonAnimation(button){button.classList.add("clicked");if(button.reset_button_timeout!==undefined){window.clearTimeout(button.reset_button_timeout)}button.reset_button_timeout=window.setTimeout(()=>{button.reset_button_timeout=undefined;button.classList.remove("clicked")},1000)}const but=document.getElementById("copy-path");if(!but){return}but.onclick=()=>{const parent=but.parentElement;const path=[];onEach(parent.childNodes,child=>{if(child.tagName==="A"){path.push(child.textContent)}});copyContentToClipboard(path.join("::"));copyButtonAnimation(but)};function copyCode(codeElem){if(!codeElem){return}copyContentToClipboard(codeElem.textContent)}function getExampleWrap(event){let elem=event.target;while(!hasClass(elem,"example-wrap")){if(elem===document.body||elem.tagName==="A"||elem.tagName==="BUTTON"||hasClass(elem,"docblock")){return null}elem=elem.parentElement}return elem}function addCopyButton(event){const elem=getExampleWrap(event);if(elem===null){return}elem.removeEventListener("mouseover",addCopyButton);let parent=elem.querySelector(".button-holder");if(!parent){parent=document.createElement("div");parent.className="button-holder"}const runButton=elem.querySelector(".test-arrow");if(runButton!==null){parent.appendChild(runButton)}elem.appendChild(parent);const copyButton=document.createElement("button");copyButton.className="copy-button";copyButton.title="Copy code to clipboard";copyButton.addEventListener("click",()=>{copyCode(elem.querySelector("pre > code"));copyButtonAnimation(copyButton)});parent.appendChild(copyButton)}function showHideCodeExampleButtons(event){const elem=getExampleWrap(event);if(elem===null){return}let buttons=elem.querySelector(".button-holder");if(buttons===null){addCopyButton(event);buttons=elem.querySelector(".button-holder");if(buttons===null){return}}buttons.classList.toggle("keep-visible")}onEachLazy(document.querySelectorAll(".docblock .example-wrap"),elem=>{elem.addEventListener("mouseover",addCopyButton);elem.addEventListener("click",showHideCodeExampleButtons)})}()) \ No newline at end of file diff --git a/static.files/normalize-76eba96aa4d2e634.css b/static.files/normalize-76eba96aa4d2e634.css new file mode 100644 index 000000000..469959f13 --- /dev/null +++ b/static.files/normalize-76eba96aa4d2e634.css @@ -0,0 +1,2 @@ + /*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */ +html{line-height:1.15;-webkit-text-size-adjust:100%}body{margin:0}main{display:block}h1{font-size:2em;margin:0.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-0.25em}sup{top:-0.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type="button"],[type="reset"],[type="submit"],button{-webkit-appearance:button}[type="button"]::-moz-focus-inner,[type="reset"]::-moz-focus-inner,[type="submit"]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type="button"]:-moz-focusring,[type="reset"]:-moz-focusring,[type="submit"]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:0.35em 0.75em 0.625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type="checkbox"],[type="radio"]{box-sizing:border-box;padding:0}[type="number"]::-webkit-inner-spin-button,[type="number"]::-webkit-outer-spin-button{height:auto}[type="search"]{-webkit-appearance:textfield;outline-offset:-2px}[type="search"]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}template{display:none}[hidden]{display:none} \ No newline at end of file diff --git a/static.files/noscript-40f72c9259523cb3.css b/static.files/noscript-40f72c9259523cb3.css new file mode 100644 index 000000000..06afc77fd --- /dev/null +++ b/static.files/noscript-40f72c9259523cb3.css @@ -0,0 +1 @@ + #main-content .attributes{margin-left:0 !important;}#copy-path,#sidebar-button,.sidebar-resizer{display:none !important;}nav.sub{display:none;}.src .sidebar{display:none;}.notable-traits{display:none;}:root,:root:not([data-theme]){--main-background-color:white;--main-color:black;--settings-input-color:#2196f3;--settings-input-border-color:#717171;--settings-button-color:#000;--settings-button-border-focus:#717171;--sidebar-background-color:#f5f5f5;--sidebar-background-color-hover:#e0e0e0;--code-block-background-color:#f5f5f5;--scrollbar-track-background-color:#dcdcdc;--scrollbar-thumb-background-color:rgba(36,37,39,0.6);--scrollbar-color:rgba(36,37,39,0.6) #d9d9d9;--headings-border-bottom-color:#ddd;--border-color:#e0e0e0;--button-background-color:#fff;--right-side-color:grey;--code-attribute-color:#999;--toggles-color:#999;--toggle-filter:none;--mobile-sidebar-menu-filter:none;--search-input-focused-border-color:#66afe9;--copy-path-button-color:#999;--copy-path-img-filter:invert(50%);--copy-path-img-hover-filter:invert(35%);--code-example-button-color:#7f7f7f;--code-example-button-hover-color:#595959;--codeblock-error-hover-color:rgb(255,0,0);--codeblock-error-color:rgba(255,0,0,.5);--codeblock-ignore-hover-color:rgb(255,142,0);--codeblock-ignore-color:rgba(255,142,0,.6);--warning-border-color:#ff8e00;--type-link-color:#ad378a;--trait-link-color:#6e4fc9;--assoc-item-link-color:#3873ad;--function-link-color:#ad7c37;--macro-link-color:#068000;--keyword-link-color:#3873ad;--mod-link-color:#3873ad;--link-color:#3873ad;--sidebar-link-color:#356da4;--sidebar-current-link-background-color:#fff;--search-result-link-focus-background-color:#ccc;--search-result-border-color:#aaa3;--search-color:#000;--search-error-code-background-color:#d0cccc;--search-results-alias-color:#000;--search-results-grey-color:#999;--search-tab-title-count-color:#888;--search-tab-button-not-selected-border-top-color:#e6e6e6;--search-tab-button-not-selected-background:#e6e6e6;--search-tab-button-selected-border-top-color:#0089ff;--search-tab-button-selected-background:#fff;--settings-menu-filter:none;--stab-background-color:#fff5d6;--stab-code-color:#000;--code-highlight-kw-color:#8959a8;--code-highlight-kw-2-color:#4271ae;--code-highlight-lifetime-color:#b76514;--code-highlight-prelude-color:#4271ae;--code-highlight-prelude-val-color:#c82829;--code-highlight-number-color:#718c00;--code-highlight-string-color:#718c00;--code-highlight-literal-color:#c82829;--code-highlight-attribute-color:#c82829;--code-highlight-self-color:#c82829;--code-highlight-macro-color:#3e999f;--code-highlight-question-mark-color:#ff9011;--code-highlight-comment-color:#8e908c;--code-highlight-doc-comment-color:#4d4d4c;--src-line-numbers-span-color:#c67e2d;--src-line-number-highlighted-background-color:#fdffd3;--target-background-color:#fdffd3;--target-border-color:#ad7c37;--kbd-color:#000;--kbd-background:#fafbfc;--kbd-box-shadow-color:#c6cbd1;--rust-logo-filter:initial;--crate-search-div-filter:invert(100%) sepia(0%) saturate(4223%) hue-rotate(289deg) brightness(114%) contrast(76%);--crate-search-div-hover-filter:invert(44%) sepia(18%) saturate(23%) hue-rotate(317deg) brightness(96%) contrast(93%);--crate-search-hover-border:#717171;--src-sidebar-background-selected:#fff;--src-sidebar-background-hover:#e0e0e0;--table-alt-row-background-color:#f5f5f5;--codeblock-link-background:#eee;--scrape-example-toggle-line-background:#ccc;--scrape-example-toggle-line-hover-background:#999;--scrape-example-code-line-highlight:#fcffd6;--scrape-example-code-line-highlight-focus:#f6fdb0;--scrape-example-help-border-color:#555;--scrape-example-help-color:#333;--scrape-example-help-hover-border-color:#000;--scrape-example-help-hover-color:#000;--scrape-example-code-wrapper-background-start:rgba(255,255,255,1);--scrape-example-code-wrapper-background-end:rgba(255,255,255,0);--sidebar-resizer-hover:hsl(207,90%,66%);--sidebar-resizer-active:hsl(207,90%,54%);}@media (prefers-color-scheme:dark){:root,:root:not([data-theme]){--main-background-color:#353535;--main-color:#ddd;--settings-input-color:#2196f3;--settings-input-border-color:#999;--settings-button-color:#000;--settings-button-border-focus:#ffb900;--sidebar-background-color:#505050;--sidebar-background-color-hover:#676767;--code-block-background-color:#2A2A2A;--scrollbar-track-background-color:#717171;--scrollbar-thumb-background-color:rgba(32,34,37,.6);--scrollbar-color:rgba(32,34,37,.6) #5a5a5a;--headings-border-bottom-color:#d2d2d2;--border-color:#e0e0e0;--button-background-color:#f0f0f0;--right-side-color:grey;--code-attribute-color:#999;--toggles-color:#999;--toggle-filter:invert(100%);--mobile-sidebar-menu-filter:invert(100%);--search-input-focused-border-color:#008dfd;--copy-path-button-color:#999;--copy-path-img-filter:invert(50%);--copy-path-img-hover-filter:invert(65%);--code-example-button-color:#7f7f7f;--code-example-button-hover-color:#a5a5a5;--codeblock-error-hover-color:rgb(255,0,0);--codeblock-error-color:rgba(255,0,0,.5);--codeblock-ignore-hover-color:rgb(255,142,0);--codeblock-ignore-color:rgba(255,142,0,.6);--warning-border-color:#ff8e00;--type-link-color:#2dbfb8;--trait-link-color:#b78cf2;--assoc-item-link-color:#d2991d;--function-link-color:#2bab63;--macro-link-color:#09bd00;--keyword-link-color:#d2991d;--mod-link-color:#d2991d;--link-color:#d2991d;--sidebar-link-color:#fdbf35;--sidebar-current-link-background-color:#444;--search-result-link-focus-background-color:#616161;--search-result-border-color:#aaa3;--search-color:#111;--search-error-code-background-color:#484848;--search-results-alias-color:#fff;--search-results-grey-color:#ccc;--search-tab-title-count-color:#888;--search-tab-button-not-selected-border-top-color:#252525;--search-tab-button-not-selected-background:#252525;--search-tab-button-selected-border-top-color:#0089ff;--search-tab-button-selected-background:#353535;--stab-background-color:#314559;--stab-code-color:#e6e1cf;--code-highlight-kw-color:#ab8ac1;--code-highlight-kw-2-color:#769acb;--code-highlight-lifetime-color:#d97f26;--code-highlight-prelude-color:#769acb;--code-highlight-prelude-val-color:#ee6868;--code-highlight-number-color:#83a300;--code-highlight-string-color:#83a300;--code-highlight-literal-color:#ee6868;--code-highlight-attribute-color:#ee6868;--code-highlight-self-color:#ee6868;--code-highlight-macro-color:#3e999f;--code-highlight-question-mark-color:#ff9011;--code-highlight-comment-color:#8d8d8b;--code-highlight-doc-comment-color:#8ca375;--src-line-numbers-span-color:#3b91e2;--src-line-number-highlighted-background-color:#0a042f;--target-background-color:#494a3d;--target-border-color:#bb7410;--kbd-color:#000;--kbd-background:#fafbfc;--kbd-box-shadow-color:#c6cbd1;--rust-logo-filter:drop-shadow(1px 0 0px #fff) drop-shadow(0 1px 0 #fff) drop-shadow(-1px 0 0 #fff) drop-shadow(0 -1px 0 #fff);--crate-search-div-filter:invert(94%) sepia(0%) saturate(721%) hue-rotate(255deg) brightness(90%) contrast(90%);--crate-search-div-hover-filter:invert(69%) sepia(60%) saturate(6613%) hue-rotate(184deg) brightness(100%) contrast(91%);--crate-search-hover-border:#2196f3;--src-sidebar-background-selected:#333;--src-sidebar-background-hover:#444;--table-alt-row-background-color:#2a2a2a;--codeblock-link-background:#333;--scrape-example-toggle-line-background:#999;--scrape-example-toggle-line-hover-background:#c5c5c5;--scrape-example-code-line-highlight:#5b3b01;--scrape-example-code-line-highlight-focus:#7c4b0f;--scrape-example-help-border-color:#aaa;--scrape-example-help-color:#eee;--scrape-example-help-hover-border-color:#fff;--scrape-example-help-hover-color:#fff;--scrape-example-code-wrapper-background-start:rgba(53,53,53,1);--scrape-example-code-wrapper-background-end:rgba(53,53,53,0);--sidebar-resizer-hover:hsl(207,30%,54%);--sidebar-resizer-active:hsl(207,90%,54%);}} \ No newline at end of file diff --git a/static.files/rust-logo-151179464ae7ed46.svg b/static.files/rust-logo-151179464ae7ed46.svg new file mode 100644 index 000000000..62424d8ff --- /dev/null +++ b/static.files/rust-logo-151179464ae7ed46.svg @@ -0,0 +1,61 @@ + + + diff --git a/static.files/rustdoc-405f8b29f52305f8.css b/static.files/rustdoc-405f8b29f52305f8.css new file mode 100644 index 000000000..25af92136 --- /dev/null +++ b/static.files/rustdoc-405f8b29f52305f8.css @@ -0,0 +1,47 @@ + :root{--nav-sub-mobile-padding:8px;--search-typename-width:6.75rem;--desktop-sidebar-width:200px;--src-sidebar-width:300px;--desktop-sidebar-z-index:100;--sidebar-elems-left-padding:24px;--clipboard-image:url('data:image/svg+xml,\ +\ +\ +');--copy-path-height:34px;--copy-path-width:33px;--checkmark-image:url('data:image/svg+xml,\ +\ +');--button-left-margin:4px;--button-border-radius:2px;}@font-face {font-family:'Fira Sans';font-style:normal;font-weight:400;src:local('Fira Sans'),url("FiraSans-Regular-018c141bf0843ffd.woff2") format("woff2");font-display:swap;}@font-face {font-family:'Fira Sans';font-style:normal;font-weight:500;src:local('Fira Sans Medium'),url("FiraSans-Medium-8f9a781e4970d388.woff2") format("woff2");font-display:swap;}@font-face {font-family:'Source Serif 4';font-style:normal;font-weight:400;src:local('Source Serif 4'),url("SourceSerif4-Regular-46f98efaafac5295.ttf.woff2") format("woff2");font-display:swap;}@font-face {font-family:'Source Serif 4';font-style:italic;font-weight:400;src:local('Source Serif 4 Italic'),url("SourceSerif4-It-acdfaf1a8af734b1.ttf.woff2") format("woff2");font-display:swap;}@font-face {font-family:'Source Serif 4';font-style:normal;font-weight:700;src:local('Source Serif 4 Bold'),url("SourceSerif4-Bold-a2c9cd1067f8b328.ttf.woff2") format("woff2");font-display:swap;}@font-face {font-family:'Source Code Pro';font-style:normal;font-weight:400;src:url("SourceCodePro-Regular-562dcc5011b6de7d.ttf.woff2") format("woff2");font-display:swap;}@font-face {font-family:'Source Code Pro';font-style:italic;font-weight:400;src:url("SourceCodePro-It-1cc31594bf4f1f79.ttf.woff2") format("woff2");font-display:swap;}@font-face {font-family:'Source Code Pro';font-style:normal;font-weight:600;src:url("SourceCodePro-Semibold-d899c5a5c4aeb14a.ttf.woff2") format("woff2");font-display:swap;}@font-face {font-family:'NanumBarunGothic';src:url("NanumBarunGothic-0f09457c7a19b7c6.ttf.woff2") format("woff2");font-display:swap;unicode-range:U+AC00-D7AF,U+1100-11FF,U+3130-318F,U+A960-A97F,U+D7B0-D7FF;}*{box-sizing:border-box;}body{font:1rem/1.5 "Source Serif 4",NanumBarunGothic,serif;margin:0;position:relative;overflow-wrap:break-word;overflow-wrap:anywhere;font-feature-settings:"kern","liga";background-color:var(--main-background-color);color:var(--main-color);}h1{font-size:1.5rem;}h2{font-size:1.375rem;}h3{font-size:1.25rem;}h1,h2,h3,h4,h5,h6{font-weight:500;}h1,h2,h3,h4{margin:25px 0 15px 0;padding-bottom:6px;}.docblock h3,.docblock h4,h5,h6{margin:15px 0 5px 0;}.docblock>h2:first-child,.docblock>h3:first-child,.docblock>h4:first-child,.docblock>h5:first-child,.docblock>h6:first-child{margin-top:0;}.main-heading h1{margin:0;padding:0;flex-grow:1;overflow-wrap:break-word;overflow-wrap:anywhere;}.main-heading{display:flex;flex-wrap:wrap;padding-bottom:6px;margin-bottom:15px;}.content h2,.top-doc .docblock>h3,.top-doc .docblock>h4{border-bottom:1px solid var(--headings-border-bottom-color);}h1,h2{line-height:1.25;padding-top:3px;padding-bottom:9px;}h3.code-header{font-size:1.125rem;}h4.code-header{font-size:1rem;}.code-header{font-weight:600;margin:0;padding:0;white-space:pre-wrap;}#crate-search,h1,h2,h3,h4,h5,h6,.sidebar,.mobile-topbar,.search-input,.search-results .result-name,.item-name>a,.out-of-band,span.since,a.src,#help-button>a,summary.hideme,.scraped-example-list,ul.all-items{font-family:"Fira Sans",Arial,NanumBarunGothic,sans-serif;}#toggle-all-docs,a.anchor,.section-header a,#src-sidebar a,.rust a,.sidebar h2 a,.sidebar h3 a,.mobile-topbar h2 a,h1 a,.search-results a,.stab,.result-name i{color:var(--main-color);}span.enum,a.enum,span.struct,a.struct,span.union,a.union,span.primitive,a.primitive,span.type,a.type,span.foreigntype,a.foreigntype{color:var(--type-link-color);}span.trait,a.trait,span.traitalias,a.traitalias{color:var(--trait-link-color);}span.associatedtype,a.associatedtype,span.constant,a.constant,span.static,a.static{color:var(--assoc-item-link-color);}span.fn,a.fn,span.method,a.method,span.tymethod,a.tymethod{color:var(--function-link-color);}span.attr,a.attr,span.derive,a.derive,span.macro,a.macro{color:var(--macro-link-color);}span.mod,a.mod{color:var(--mod-link-color);}span.keyword,a.keyword{color:var(--keyword-link-color);}a{color:var(--link-color);text-decoration:none;}ol,ul{padding-left:24px;}ul ul,ol ul,ul ol,ol ol{margin-bottom:.625em;}p,.docblock>.warning{margin:0 0 .75em 0;}p:last-child,.docblock>.warning:last-child{margin:0;}button{padding:1px 6px;cursor:pointer;}button#toggle-all-docs{padding:0;background:none;border:none;-webkit-appearance:none;opacity:1;}.rustdoc{display:flex;flex-direction:row;flex-wrap:nowrap;}main{position:relative;flex-grow:1;padding:10px 15px 40px 45px;min-width:0;}.src main{padding:15px;}.width-limiter{max-width:960px;margin-right:auto;}details:not(.toggle) summary{margin-bottom:.6em;}code,pre,.code-header{font-family:"Source Code Pro",monospace;}.docblock code,.docblock-short code{border-radius:3px;padding:0 0.125em;}.docblock pre code,.docblock-short pre code{padding:0;}pre{padding:14px;line-height:1.5;}pre.item-decl{overflow-x:auto;}.item-decl .type-contents-toggle{contain:initial;}.src .content pre{padding:20px;}.rustdoc.src .example-wrap .src-line-numbers{padding:20px 0 20px 4px;}img{max-width:100%;}.logo-container{line-height:0;display:block;}.rust-logo{filter:var(--rust-logo-filter);}.sidebar{font-size:0.875rem;flex:0 0 var(--desktop-sidebar-width);width:var(--desktop-sidebar-width);overflow-y:scroll;overscroll-behavior:contain;position:sticky;height:100vh;top:0;left:0;z-index:var(--desktop-sidebar-z-index);}.rustdoc.src .sidebar{flex-basis:50px;width:50px;border-right:1px solid;overflow-x:hidden;overflow-y:hidden;}.hide-sidebar .sidebar,.hide-sidebar .sidebar-resizer{display:none;}.sidebar-resizer{touch-action:none;width:9px;cursor:col-resize;z-index:calc(var(--desktop-sidebar-z-index) + 1);position:fixed;height:100%;left:calc(var(--desktop-sidebar-width) + 1px);}.rustdoc.src .sidebar-resizer{left:49px;}.src-sidebar-expanded .src .sidebar-resizer{left:var(--src-sidebar-width);}.sidebar-resizing{-moz-user-select:none;-webkit-user-select:none;-ms-user-select:none;user-select:none;}.sidebar-resizing*{cursor:col-resize !important;}.sidebar-resizing .sidebar{position:fixed;}.sidebar-resizing>body{padding-left:var(--resizing-sidebar-width);}.sidebar-resizer:hover,.sidebar-resizer:active,.sidebar-resizer:focus,.sidebar-resizer.active{width:10px;margin:0;left:var(--desktop-sidebar-width);border-left:solid 1px var(--sidebar-resizer-hover);}.src-sidebar-expanded .rustdoc.src .sidebar-resizer:hover,.src-sidebar-expanded .rustdoc.src .sidebar-resizer:active,.src-sidebar-expanded .rustdoc.src .sidebar-resizer:focus,.src-sidebar-expanded .rustdoc.src .sidebar-resizer.active{left:calc(var(--src-sidebar-width) - 1px);}@media (pointer:coarse){.sidebar-resizer{display:none !important;}}.sidebar-resizer.active{padding:0 140px;width:2px;margin-left:-140px;border-left:none;}.sidebar-resizer.active:before{border-left:solid 2px var(--sidebar-resizer-active);display:block;height:100%;content:"";}.sidebar,.mobile-topbar,.sidebar-menu-toggle,#src-sidebar{background-color:var(--sidebar-background-color);}.src .sidebar>*{visibility:hidden;}.src-sidebar-expanded .src .sidebar{overflow-y:auto;flex-basis:var(--src-sidebar-width);width:var(--src-sidebar-width);}.src-sidebar-expanded .src .sidebar>*{visibility:visible;}#all-types{margin-top:1em;}*{scrollbar-width:initial;scrollbar-color:var(--scrollbar-color);}.sidebar{scrollbar-width:thin;scrollbar-color:var(--scrollbar-color);}::-webkit-scrollbar{width:12px;}.sidebar::-webkit-scrollbar{width:8px;}::-webkit-scrollbar-track{-webkit-box-shadow:inset 0;background-color:var(--scrollbar-track-background-color);}.sidebar::-webkit-scrollbar-track{background-color:var(--scrollbar-track-background-color);}::-webkit-scrollbar-thumb,.sidebar::-webkit-scrollbar-thumb{background-color:var(--scrollbar-thumb-background-color);}.hidden{display:none !important;}.logo-container>img{height:48px;width:48px;}ul.block,.block li,.block ul{padding:0;margin:0;list-style:none;}.block ul a{padding-left:1rem;}.sidebar-elems a,.sidebar>h2 a{display:block;padding:0.25rem;margin-right:0.25rem;border-left:solid var(--sidebar-elems-left-padding) transparent;margin-left:calc(-0.25rem - var(--sidebar-elems-left-padding));background-clip:border-box;}.hide-toc #rustdoc-toc,.hide-toc .in-crate{display:none;}.hide-modnav #rustdoc-modnav{display:none;}.sidebar h2{text-wrap:balance;overflow-wrap:anywhere;padding:0;margin:0.7rem 0;}.sidebar h3{text-wrap:balance;overflow-wrap:anywhere;font-size:1.125rem;padding:0;margin:0;}.sidebar-elems,.sidebar>.version,.sidebar>h2{padding-left:var(--sidebar-elems-left-padding);}.sidebar a{color:var(--sidebar-link-color);}.sidebar .current,.sidebar .current a,.sidebar-crate a.logo-container:hover+h2 a,.sidebar a:hover:not(.logo-container){background-color:var(--sidebar-current-link-background-color);}.sidebar-elems .block{margin-bottom:2em;}.sidebar-elems .block li a{white-space:nowrap;text-overflow:ellipsis;overflow:hidden;}.sidebar-crate{display:flex;align-items:center;justify-content:center;margin:14px 32px 1rem;row-gap:10px;column-gap:32px;flex-wrap:wrap;}.sidebar-crate h2{flex-grow:1;margin:0 -8px;align-self:start;}.sidebar-crate .logo-container{margin:0 calc(-16px - var(--sidebar-elems-left-padding));padding:0 var(--sidebar-elems-left-padding);text-align:center;}.sidebar-crate .logo-container img{margin-top:-16px;border-top:solid 16px transparent;box-sizing:content-box;position:relative;background-clip:border-box;z-index:1;}.sidebar-crate h2 a{display:block;border-left:solid var(--sidebar-elems-left-padding) transparent;background-clip:border-box;margin:0 calc(-24px + 0.25rem) 0 calc(-0.2rem - var(--sidebar-elems-left-padding));padding:calc((16px - 0.57rem ) / 2 ) 0.25rem;padding-left:0.2rem;}.sidebar-crate h2 .version{display:block;font-weight:normal;font-size:1rem;overflow-wrap:break-word;}.sidebar-crate+.version{margin-top:-1rem;margin-bottom:1rem;}.mobile-topbar{display:none;}.rustdoc .example-wrap{display:flex;position:relative;margin-bottom:10px;}.rustdoc .example-wrap>pre,.rustdoc .scraped-example .src-line-numbers,.rustdoc .scraped-example .src-line-numbers>pre{border-radius:6px;}.rustdoc .example-wrap>.example-line-numbers,.rustdoc .scraped-example .src-line-numbers,.rustdoc .scraped-example .src-line-numbers>pre{border-top-right-radius:0;border-bottom-right-radius:0;}.rustdoc .example-wrap>.example-line-numbers+pre,.rustdoc .scraped-example .rust{border-top-left-radius:0;border-bottom-left-radius:0;}.rustdoc .scraped-example{position:relative;}.rustdoc .example-wrap:last-child{margin-bottom:0px;}.rustdoc .example-wrap pre{margin:0;flex-grow:1;}.scraped-example:not(.expanded) .example-wrap{max-height:calc(1.5em * 5 + 10px);}.rustdoc:not(.src) .scraped-example:not(.expanded) .src-line-numbers,.rustdoc:not(.src) .scraped-example:not(.expanded) .src-line-numbers>pre,.rustdoc:not(.src) .scraped-example:not(.expanded) pre.rust{padding-bottom:0;overflow:auto hidden;}.rustdoc:not(.src) .scraped-example .src-line-numbers{padding-top:0;}.rustdoc:not(.src) .scraped-example.expanded .src-line-numbers{padding-bottom:0;}.rustdoc:not(.src) .example-wrap pre{overflow:auto;}.rustdoc .example-wrap pre.example-line-numbers,.rustdoc .example-wrap .src-line-numbers{min-width:fit-content;flex-grow:0;text-align:right;-webkit-user-select:none;user-select:none;padding:14px 8px;color:var(--src-line-numbers-span-color);}.rustdoc .scraped-example .src-line-numbers{padding:14px 0;}.src-line-numbers a,.src-line-numbers span{color:var(--src-line-numbers-span-color);padding:0 8px;}.src-line-numbers :target{background-color:transparent;border-right:none;padding:0 8px;}.src-line-numbers .line-highlighted{background-color:var(--src-line-number-highlighted-background-color);}.search-loading{text-align:center;}.docblock-short{overflow-wrap:break-word;overflow-wrap:anywhere;}.docblock :not(pre)>code,.docblock-short code{white-space:pre-wrap;}.top-doc .docblock h2{font-size:1.375rem;}.top-doc .docblock h3{font-size:1.25rem;}.top-doc .docblock h4,.top-doc .docblock h5{font-size:1.125rem;}.top-doc .docblock h6{font-size:1rem;}.docblock h5{font-size:1rem;}.docblock h6{font-size:0.875rem;}.docblock{margin-left:24px;position:relative;}.docblock>:not(.more-examples-toggle):not(.example-wrap){max-width:100%;overflow-x:auto;}.out-of-band{flex-grow:0;font-size:1.125rem;}.docblock code,.docblock-short code,pre,.rustdoc.src .example-wrap{background-color:var(--code-block-background-color);}#main-content{position:relative;}.docblock table{margin:.5em 0;border-collapse:collapse;}.docblock table td,.docblock table th{padding:.5em;border:1px solid var(--border-color);}.docblock table tbody tr:nth-child(2n){background:var(--table-alt-row-background-color);}.docblock .stab,.docblock-short .stab{display:inline-block;}div.where{white-space:pre-wrap;font-size:0.875rem;}.item-info{display:block;margin-left:24px;}.item-info code{font-size:0.875rem;}#main-content>.item-info{margin-left:0;}nav.sub{flex-grow:1;flex-flow:row nowrap;margin:4px 0 25px 0;display:flex;align-items:center;}.search-form{position:relative;display:flex;height:34px;flex-grow:1;}.src nav.sub{margin:0 0 15px 0;}.section-header{display:block;position:relative;}.section-header:hover>.anchor,.impl:hover>.anchor,.trait-impl:hover>.anchor,.variant:hover>.anchor{display:initial;}.anchor{display:none;position:absolute;left:-0.5em;background:none !important;}.anchor.field{left:-5px;}.section-header>.anchor{left:-15px;padding-right:8px;}h2.section-header>.anchor{padding-right:6px;}a.doc-anchor{color:var(--main-color);display:none;position:absolute;left:-17px;padding-right:10px;padding-left:3px;}*:hover>.doc-anchor{display:block;}.top-doc>.docblock>*:first-child>.doc-anchor{display:none !important;}.main-heading a:hover,.example-wrap .rust a:hover,.all-items a:hover,.docblock a:not(.scrape-help):not(.tooltip):hover:not(.doc-anchor),.docblock-short a:not(.scrape-help):not(.tooltip):hover,.item-info a{text-decoration:underline;}.crate.block li.current a{font-weight:500;}table,.item-table{overflow-wrap:break-word;}.item-table{display:table;padding:0;margin:0;width:100%;}.item-table>li{display:table-row;}.item-table>li>div{display:table-cell;}.item-table>li>.item-name{padding-right:1.25rem;}.search-results-title{margin-top:0;white-space:nowrap;display:flex;align-items:baseline;}#crate-search-div{position:relative;min-width:5em;}#crate-search{min-width:115px;padding:0 23px 0 4px;max-width:100%;text-overflow:ellipsis;border:1px solid var(--border-color);border-radius:4px;outline:none;cursor:pointer;-moz-appearance:none;-webkit-appearance:none;text-indent:0.01px;background-color:var(--main-background-color);color:inherit;line-height:1.5;font-weight:500;}#crate-search:hover,#crate-search:focus{border-color:var(--crate-search-hover-border);}#crate-search-div::after{pointer-events:none;width:100%;height:100%;position:absolute;top:0;left:0;content:"";background-repeat:no-repeat;background-size:20px;background-position:calc(100% - 2px) 56%;background-image:url('data:image/svg+xml, \ + ');filter:var(--crate-search-div-filter);}#crate-search-div:hover::after,#crate-search-div:focus-within::after{filter:var(--crate-search-div-hover-filter);}#crate-search>option{font-size:1rem;}.search-input{-webkit-appearance:none;outline:none;border:1px solid var(--border-color);border-radius:2px;padding:8px;font-size:1rem;flex-grow:1;background-color:var(--button-background-color);color:var(--search-color);}.search-input:focus{border-color:var(--search-input-focused-border-color);}.search-results{display:none;}.search-results.active{display:block;}.search-results>a{display:flex;margin-left:2px;margin-right:2px;border-bottom:1px solid var(--search-result-border-color);gap:1em;}.search-results>a>div.desc{white-space:nowrap;text-overflow:ellipsis;overflow:hidden;flex:2;}.search-results a:hover,.search-results a:focus{background-color:var(--search-result-link-focus-background-color);}.search-results .result-name{display:flex;align-items:center;justify-content:start;flex:3;}.search-results .result-name .alias{color:var(--search-results-alias-color);}.search-results .result-name .grey{color:var(--search-results-grey-color);}.search-results .result-name .typename{color:var(--search-results-grey-color);font-size:0.875rem;width:var(--search-typename-width);}.search-results .result-name .path{word-break:break-all;max-width:calc(100% - var(--search-typename-width));display:inline-block;}.search-results .result-name .path>*{display:inline;}.popover{position:absolute;top:100%;right:0;z-index:calc(var(--desktop-sidebar-z-index) + 1);margin-top:7px;border-radius:3px;border:1px solid var(--border-color);background-color:var(--main-background-color);color:var(--main-color);--popover-arrow-offset:11px;}.popover::before{content:'';position:absolute;right:var(--popover-arrow-offset);border:solid var(--border-color);border-width:1px 1px 0 0;background-color:var(--main-background-color);padding:4px;transform:rotate(-45deg);top:-5px;}.setting-line{margin:1.2em 0.6em;}.setting-radio input,.setting-check input{margin-right:0.3em;height:1.2rem;width:1.2rem;border:2px solid var(--settings-input-border-color);outline:none;-webkit-appearance:none;cursor:pointer;}.setting-radio input{border-radius:50%;}.setting-radio span,.setting-check span{padding-bottom:1px;}.setting-radio{margin-top:0.1em;margin-bottom:0.1em;min-width:3.8em;padding:0.3em;display:inline-flex;align-items:center;cursor:pointer;}.setting-radio+.setting-radio{margin-left:0.5em;}.setting-check{margin-right:20px;display:flex;align-items:center;cursor:pointer;}.setting-radio input:checked{box-shadow:inset 0 0 0 3px var(--main-background-color);background-color:var(--settings-input-color);}.setting-check input:checked{background-color:var(--settings-input-color);border-width:1px;content:url('data:image/svg+xml,\ + \ + ');}.setting-radio input:focus,.setting-check input:focus{box-shadow:0 0 1px 1px var(--settings-input-color);}.setting-radio input:checked:focus{box-shadow:inset 0 0 0 3px var(--main-background-color),0 0 2px 2px var(--settings-input-color);}.setting-radio input:hover,.setting-check input:hover{border-color:var(--settings-input-color) !important;}#help.popover{max-width:600px;--popover-arrow-offset:48px;}#help dt{float:left;clear:left;margin-right:0.5rem;}#help span.top,#help span.bottom{text-align:center;display:block;font-size:1.125rem;}#help span.top{margin:10px 0;border-bottom:1px solid var(--border-color);padding-bottom:4px;margin-bottom:6px;}#help span.bottom{clear:both;border-top:1px solid var(--border-color);}.side-by-side>div{width:50%;float:left;padding:0 20px 20px 17px;}.item-info .stab{display:block;padding:3px;margin-bottom:5px;}.item-name .stab{margin-left:0.3125em;}.stab{padding:0 2px;font-size:0.875rem;font-weight:normal;color:var(--main-color);background-color:var(--stab-background-color);width:fit-content;white-space:pre-wrap;border-radius:3px;display:inline;vertical-align:baseline;}.stab.portability>code{background:none;color:var(--stab-code-color);}.stab .emoji,.item-info .stab::before{font-size:1.25rem;}.stab .emoji{margin-right:0.3rem;}.item-info .stab::before{content:"\0";width:0;display:inline-block;color:transparent;}.emoji{text-shadow:1px 0 0 black,-1px 0 0 black,0 1px 0 black,0 -1px 0 black;}.since{font-weight:normal;font-size:initial;}.rightside{padding-left:12px;float:right;}.rightside:not(a),.out-of-band{color:var(--right-side-color);}pre.rust{tab-size:4;-moz-tab-size:4;}pre.rust .kw{color:var(--code-highlight-kw-color);}pre.rust .kw-2{color:var(--code-highlight-kw-2-color);}pre.rust .lifetime{color:var(--code-highlight-lifetime-color);}pre.rust .prelude-ty{color:var(--code-highlight-prelude-color);}pre.rust .prelude-val{color:var(--code-highlight-prelude-val-color);}pre.rust .string{color:var(--code-highlight-string-color);}pre.rust .number{color:var(--code-highlight-number-color);}pre.rust .bool-val{color:var(--code-highlight-literal-color);}pre.rust .self{color:var(--code-highlight-self-color);}pre.rust .attr{color:var(--code-highlight-attribute-color);}pre.rust .macro,pre.rust .macro-nonterminal{color:var(--code-highlight-macro-color);}pre.rust .question-mark{font-weight:bold;color:var(--code-highlight-question-mark-color);}pre.rust .comment{color:var(--code-highlight-comment-color);}pre.rust .doccomment{color:var(--code-highlight-doc-comment-color);}.rustdoc.src .example-wrap pre.rust a{background:var(--codeblock-link-background);}.example-wrap.compile_fail,.example-wrap.should_panic{border-left:2px solid var(--codeblock-error-color);}.ignore.example-wrap{border-left:2px solid var(--codeblock-ignore-color);}.example-wrap.compile_fail:hover,.example-wrap.should_panic:hover{border-left:2px solid var(--codeblock-error-hover-color);}.example-wrap.ignore:hover{border-left:2px solid var(--codeblock-ignore-hover-color);}.example-wrap.compile_fail .tooltip,.example-wrap.should_panic .tooltip{color:var(--codeblock-error-color);}.example-wrap.ignore .tooltip{color:var(--codeblock-ignore-color);}.example-wrap.compile_fail:hover .tooltip,.example-wrap.should_panic:hover .tooltip{color:var(--codeblock-error-hover-color);}.example-wrap.ignore:hover .tooltip{color:var(--codeblock-ignore-hover-color);}.example-wrap .tooltip{position:absolute;display:block;left:-25px;top:5px;margin:0;line-height:1;}.example-wrap.compile_fail .tooltip,.example-wrap.should_panic .tooltip,.example-wrap.ignore .tooltip{font-weight:bold;font-size:1.25rem;}.content .docblock .warning{border-left:2px solid var(--warning-border-color);padding:14px;position:relative;overflow-x:visible !important;}.content .docblock .warning::before{color:var(--warning-border-color);content:"ⓘ";position:absolute;left:-25px;top:5px;font-weight:bold;font-size:1.25rem;}.top-doc>.docblock>.warning:first-child::before{top:20px;}.example-wrap>a.test-arrow,.example-wrap .button-holder{visibility:hidden;position:absolute;top:4px;right:4px;z-index:1;}a.test-arrow{height:var(--copy-path-height);padding:6px 4px 0 11px;}a.test-arrow::before{content:url('data:image/svg+xml,');}.example-wrap .button-holder{display:flex;}@media not (pointer:coarse){.example-wrap:hover>a.test-arrow,.example-wrap:hover>.button-holder{visibility:visible;}}.example-wrap .button-holder.keep-visible{visibility:visible;}.example-wrap .button-holder>*{background:var(--main-background-color);cursor:pointer;border-radius:var(--button-border-radius);height:var(--copy-path-height);width:var(--copy-path-width);border:0;color:var(--code-example-button-color);}.example-wrap .button-holder>*:hover{color:var(--code-example-button-hover-color);}.example-wrap .button-holder>*:not(:first-child){margin-left:var(--button-left-margin);}.example-wrap .button-holder .copy-button{padding:2px 0 0 4px;}.example-wrap .button-holder .copy-button::before,.example-wrap .test-arrow::before{filter:var(--copy-path-img-filter);}.example-wrap .button-holder .copy-button::before{content:var(--clipboard-image);}.example-wrap .button-holder .copy-button:hover::before,.example-wrap .test-arrow:hover::before{filter:var(--copy-path-img-hover-filter);}.example-wrap .button-holder .copy-button.clicked::before{content:var(--checkmark-image);padding-right:5px;}.code-attribute{font-weight:300;color:var(--code-attribute-color);}.item-spacer{width:100%;height:12px;display:block;}.out-of-band>span.since{font-size:1.25rem;}.sub-variant h4{font-size:1rem;font-weight:400;margin-top:0;margin-bottom:0;}.sub-variant{margin-left:24px;margin-bottom:40px;}.sub-variant>.sub-variant-field{margin-left:24px;}@keyframes targetfadein{from{background-color:var(--main-background-color);}10%{background-color:var(--target-border-color);}to{background-color:var(--target-background-color);}}:target{padding-right:3px;background-color:var(--target-background-color);border-right:3px solid var(--target-border-color);animation:0.65s cubic-bezier(0,0,0.1,1.0) 0.1s targetfadein;}.code-header a.tooltip{color:inherit;margin-right:15px;position:relative;}.code-header a.tooltip:hover{color:var(--link-color);}a.tooltip:hover::after{position:absolute;top:calc(100% - 10px);left:-15px;right:-15px;height:20px;content:"\00a0";}.fade-out{opacity:0;transition:opacity 0.45s cubic-bezier(0,0,0.1,1.0);}.popover.tooltip .content{margin:0.25em 0.5em;}.popover.tooltip .content pre,.popover.tooltip .content code{background:transparent;margin:0;padding:0;font-size:1.25rem;white-space:pre-wrap;}.popover.tooltip .content>h3:first-child{margin:0 0 5px 0;}.search-failed{text-align:center;margin-top:20px;display:none;}.search-failed.active{display:block;}.search-failed>ul{text-align:left;max-width:570px;margin-left:auto;margin-right:auto;}#search-tabs{display:flex;flex-direction:row;gap:1px;margin-bottom:4px;}#search-tabs button{text-align:center;font-size:1.125rem;border:0;border-top:2px solid;flex:1;line-height:1.5;color:inherit;}#search-tabs button:not(.selected){background-color:var(--search-tab-button-not-selected-background);border-top-color:var(--search-tab-button-not-selected-border-top-color);}#search-tabs button:hover,#search-tabs button.selected{background-color:var(--search-tab-button-selected-background);border-top-color:var(--search-tab-button-selected-border-top-color);}#search-tabs .count{font-size:1rem;font-variant-numeric:tabular-nums;color:var(--search-tab-title-count-color);}#search .error code{border-radius:3px;background-color:var(--search-error-code-background-color);}.search-corrections{font-weight:normal;}#src-sidebar{width:100%;overflow:auto;}#src-sidebar div.files>a:hover,details.dir-entry summary:hover,#src-sidebar div.files>a:focus,details.dir-entry summary:focus{background-color:var(--src-sidebar-background-hover);}#src-sidebar div.files>a.selected{background-color:var(--src-sidebar-background-selected);}.src-sidebar-title{position:sticky;top:0;display:flex;padding:8px 8px 0 48px;margin-bottom:7px;background:var(--sidebar-background-color);border-bottom:1px solid var(--border-color);}#settings-menu,#help-button{margin-left:var(--button-left-margin);display:flex;}#sidebar-button{display:none;line-height:0;}.hide-sidebar #sidebar-button,.src #sidebar-button{display:flex;margin-right:4px;position:fixed;left:6px;height:34px;width:34px;background-color:var(--main-background-color);z-index:1;}.src #sidebar-button{left:8px;z-index:calc(var(--desktop-sidebar-z-index) + 1);}.hide-sidebar .src #sidebar-button{position:static;}#settings-menu>a,#help-button>a,#sidebar-button>a{display:flex;align-items:center;justify-content:center;background-color:var(--button-background-color);border:1px solid var(--border-color);border-radius:var(--button-border-radius);color:var(--settings-button-color);font-size:20px;width:33px;}#settings-menu>a:hover,#settings-menu>a:focus,#help-button>a:hover,#help-button>a:focus,#sidebar-button>a:hover,#sidebar-button>a:focus{border-color:var(--settings-button-border-focus);}#settings-menu>a{line-height:0;font-size:0;}#settings-menu>a:before{content:url('data:image/svg+xml,\ + ');width:22px;height:22px;filter:var(--settings-menu-filter);}#sidebar-button>a:before{content:url('data:image/svg+xml,\ + \ + \ + ');width:22px;height:22px;}#copy-path{color:var(--copy-path-button-color);background:var(--main-background-color);height:var(--copy-path-height);width:var(--copy-path-width);margin-left:10px;padding:0;padding-left:2px;border:0;font-size:0;}#copy-path::before{filter:var(--copy-path-img-filter);content:var(--clipboard-image);}#copy-path:hover::before{filter:var(--copy-path-img-hover-filter);}#copy-path.clicked::before{content:var(--checkmark-image);}@keyframes rotating{from{transform:rotate(0deg);}to{transform:rotate(360deg);}}#settings-menu.rotate>a img{animation:rotating 2s linear infinite;}kbd{display:inline-block;padding:3px 5px;font:15px monospace;line-height:10px;vertical-align:middle;border:solid 1px var(--border-color);border-radius:3px;color:var(--kbd-color);background-color:var(--kbd-background);box-shadow:inset 0 -1px 0 var(--kbd-box-shadow-color);}ul.all-items>li{list-style:none;}details.dir-entry{padding-left:4px;}details.dir-entry>summary{margin:0 0 0 -4px;padding:0 0 0 4px;cursor:pointer;}details.dir-entry div.folders,details.dir-entry div.files{padding-left:23px;}details.dir-entry a{display:block;}details.toggle{contain:layout;position:relative;}details.big-toggle{contain:inline-size;}details.toggle>summary.hideme{cursor:pointer;font-size:1rem;}details.toggle>summary{list-style:none;outline:none;}details.toggle>summary::-webkit-details-marker,details.toggle>summary::marker{display:none;}details.toggle>summary.hideme>span{margin-left:9px;}details.toggle>summary::before{background:url('data:image/svg+xml,') no-repeat top left;content:"";cursor:pointer;width:16px;height:16px;display:inline-block;vertical-align:middle;opacity:.5;filter:var(--toggle-filter);}details.toggle>summary.hideme>span,.more-examples-toggle summary,.more-examples-toggle .hide-more{color:var(--toggles-color);}details.toggle>summary::after{content:"Expand";overflow:hidden;width:0;height:0;position:absolute;}details.toggle>summary.hideme::after{content:"";}details.toggle>summary:focus::before,details.toggle>summary:hover::before{opacity:1;}details.toggle>summary:focus-visible::before{outline:1px dotted #000;outline-offset:1px;}details.non-exhaustive{margin-bottom:8px;}details.toggle>summary.hideme::before{position:relative;}details.toggle>summary:not(.hideme)::before{position:absolute;left:-24px;top:4px;}.impl-items>details.toggle>summary:not(.hideme)::before{position:absolute;left:-24px;}details.big-toggle>summary:not(.hideme)::before{left:-34px;top:9px;}details.toggle[open] >summary.hideme{position:absolute;}details.toggle[open] >summary.hideme>span{display:none;}details.toggle[open] >summary::before{background:url('data:image/svg+xml,') no-repeat top left;}details.toggle[open] >summary::after{content:"Collapse";}.docblock summary>*{display:inline-block;}.docblock>.example-wrap:first-child .tooltip{margin-top:16px;}.src #sidebar-button>a:before,.sidebar-menu-toggle:before{content:url('data:image/svg+xml,\ + ');opacity:0.75;}.sidebar-menu-toggle:hover:before,.sidebar-menu-toggle:active:before,.sidebar-menu-toggle:focus:before{opacity:1;}.src #sidebar-button>a:before{content:url('data:image/svg+xml,\ + \ + \ + ');opacity:0.75;}@media (max-width:850px){#search-tabs .count{display:block;}}@media (max-width:700px){*[id]{scroll-margin-top:45px;}.rustdoc{display:block;}main{padding-left:15px;padding-top:0px;}.main-heading{flex-direction:column;}.out-of-band{text-align:left;margin-left:initial;padding:initial;}.out-of-band .since::before{content:"Since ";}.sidebar .logo-container,.sidebar .location,.sidebar-resizer{display:none;}.sidebar{position:fixed;top:45px;left:-1000px;z-index:11;height:calc(100vh - 45px);width:200px;}.src main,.rustdoc.src .sidebar{top:0;padding:0;height:100vh;border:0;}.src .search-form{margin-left:40px;}.hide-sidebar .search-form{margin-left:32px;}.hide-sidebar .src .search-form{margin-left:0;}.sidebar.shown,.src-sidebar-expanded .src .sidebar,.rustdoc:not(.src) .sidebar:focus-within{left:0;}.mobile-topbar h2{padding-bottom:0;margin:auto 0.5em auto auto;overflow:hidden;font-size:24px;white-space:nowrap;text-overflow:ellipsis;}.mobile-topbar .logo-container>img{max-width:35px;max-height:35px;margin:5px 0 5px 20px;}.mobile-topbar{display:flex;flex-direction:row;position:sticky;z-index:10;font-size:2rem;height:45px;width:100%;left:0;top:0;}.hide-sidebar .mobile-topbar{display:none;}.sidebar-menu-toggle{width:45px;border:none;line-height:0;}.hide-sidebar .sidebar-menu-toggle{display:none;}.sidebar-elems{margin-top:1em;}.anchor{display:none !important;}#main-content>details.toggle>summary::before,#main-content>div>details.toggle>summary::before{left:-11px;}#copy-path,#help-button{display:none;}#sidebar-button>a:before{content:url('data:image/svg+xml,\ + \ + \ + ');width:22px;height:22px;}.sidebar-menu-toggle:before{filter:var(--mobile-sidebar-menu-filter);}.sidebar-menu-toggle:hover{background:var(--main-background-color);}.item-table,.item-row,.item-table>li,.item-table>li>div,.search-results>a,.search-results>a>div{display:block;}.search-results>a{padding:5px 0px;}.search-results>a>div.desc,.item-table>li>div.desc{padding-left:2em;}.search-results .result-name{display:block;}.search-results .result-name .typename{width:initial;margin-right:0;}.search-results .result-name .typename,.search-results .result-name .path{display:inline;}.src-sidebar-expanded .src .sidebar{position:fixed;max-width:100vw;width:100vw;}.src .src-sidebar-title{padding-top:0;}details.toggle:not(.top-doc)>summary{margin-left:10px;}.impl-items>details.toggle>summary:not(.hideme)::before,#main-content>details.toggle:not(.top-doc)>summary::before,#main-content>div>details.toggle>summary::before{left:-11px;}.impl-items>.item-info{margin-left:34px;}.src nav.sub{margin:0;padding:var(--nav-sub-mobile-padding);}}@media (min-width:701px){.scraped-example-title{position:absolute;z-index:10;background:var(--main-background-color);bottom:8px;right:5px;padding:2px 4px;box-shadow:0 0 4px var(--main-background-color);}.item-table>li>.item-name{width:33%;}.item-table>li>div{overflow-wrap:anywhere;}}@media print{nav.sidebar,nav.sub,.out-of-band,a.src,#copy-path,details.toggle[open] >summary::before,details.toggle>summary::before,details.toggle.top-doc>summary{display:none;}.docblock{margin-left:0;}main{padding:10px;}}@media (max-width:464px){.docblock{margin-left:12px;}.docblock code{overflow-wrap:break-word;overflow-wrap:anywhere;}nav.sub{flex-direction:column;}.search-form{align-self:stretch;}}.variant,.implementors-toggle>summary,.impl,#implementors-list>.docblock,.impl-items>section,.impl-items>.toggle>summary,.methods>section,.methods>.toggle>summary{margin-bottom:0.75em;}.variants>.docblock,.implementors-toggle>.docblock,.impl-items>.toggle[open]:not(:last-child),.methods>.toggle[open]:not(:last-child),.implementors-toggle[open]:not(:last-child){margin-bottom:2em;}#trait-implementations-list .impl-items>.toggle:not(:last-child),#synthetic-implementations-list .impl-items>.toggle:not(:last-child),#blanket-implementations-list .impl-items>.toggle:not(:last-child){margin-bottom:1em;}.scraped-example-list .scrape-help{margin-left:10px;padding:0 4px;font-weight:normal;font-size:12px;position:relative;bottom:1px;border:1px solid var(--scrape-example-help-border-color);border-radius:50px;color:var(--scrape-example-help-color);}.scraped-example-list .scrape-help:hover{border-color:var(--scrape-example-help-hover-border-color);color:var(--scrape-example-help-hover-color);}.scraped-example:not(.expanded) .example-wrap::before,.scraped-example:not(.expanded) .example-wrap::after{content:" ";width:100%;height:5px;position:absolute;z-index:1;}.scraped-example:not(.expanded) .example-wrap::before{top:0;background:linear-gradient(to bottom,var(--scrape-example-code-wrapper-background-start),var(--scrape-example-code-wrapper-background-end));}.scraped-example:not(.expanded) .example-wrap::after{bottom:0;background:linear-gradient(to top,var(--scrape-example-code-wrapper-background-start),var(--scrape-example-code-wrapper-background-end));}.scraped-example:not(.expanded){width:100%;overflow-y:hidden;margin-bottom:0;}.scraped-example:not(.expanded){overflow-x:hidden;}.scraped-example .rust span.highlight{background:var(--scrape-example-code-line-highlight);}.scraped-example .rust span.highlight.focus{background:var(--scrape-example-code-line-highlight-focus);}.more-examples-toggle{max-width:calc(100% + 25px);margin-top:10px;margin-left:-25px;}.more-examples-toggle .hide-more{margin-left:25px;cursor:pointer;}.more-scraped-examples{margin-left:25px;position:relative;}.toggle-line{position:absolute;top:5px;bottom:0;right:calc(100% + 10px);padding:0 4px;cursor:pointer;}.toggle-line-inner{min-width:2px;height:100%;background:var(--scrape-example-toggle-line-background);}.toggle-line:hover .toggle-line-inner{background:var(--scrape-example-toggle-line-hover-background);}.more-scraped-examples .scraped-example,.example-links{margin-top:20px;}.more-scraped-examples .scraped-example:first-child{margin-top:5px;}.example-links ul{margin-bottom:0;}:root[data-theme="light"],:root:not([data-theme]){--main-background-color:white;--main-color:black;--settings-input-color:#2196f3;--settings-input-border-color:#717171;--settings-button-color:#000;--settings-button-border-focus:#717171;--sidebar-background-color:#f5f5f5;--sidebar-background-color-hover:#e0e0e0;--code-block-background-color:#f5f5f5;--scrollbar-track-background-color:#dcdcdc;--scrollbar-thumb-background-color:rgba(36,37,39,0.6);--scrollbar-color:rgba(36,37,39,0.6) #d9d9d9;--headings-border-bottom-color:#ddd;--border-color:#e0e0e0;--button-background-color:#fff;--right-side-color:grey;--code-attribute-color:#999;--toggles-color:#999;--toggle-filter:none;--mobile-sidebar-menu-filter:none;--search-input-focused-border-color:#66afe9;--copy-path-button-color:#999;--copy-path-img-filter:invert(50%);--copy-path-img-hover-filter:invert(35%);--code-example-button-color:#7f7f7f;--code-example-button-hover-color:#595959;--codeblock-error-hover-color:rgb(255,0,0);--codeblock-error-color:rgba(255,0,0,.5);--codeblock-ignore-hover-color:rgb(255,142,0);--codeblock-ignore-color:rgba(255,142,0,.6);--warning-border-color:#ff8e00;--type-link-color:#ad378a;--trait-link-color:#6e4fc9;--assoc-item-link-color:#3873ad;--function-link-color:#ad7c37;--macro-link-color:#068000;--keyword-link-color:#3873ad;--mod-link-color:#3873ad;--link-color:#3873ad;--sidebar-link-color:#356da4;--sidebar-current-link-background-color:#fff;--search-result-link-focus-background-color:#ccc;--search-result-border-color:#aaa3;--search-color:#000;--search-error-code-background-color:#d0cccc;--search-results-alias-color:#000;--search-results-grey-color:#999;--search-tab-title-count-color:#888;--search-tab-button-not-selected-border-top-color:#e6e6e6;--search-tab-button-not-selected-background:#e6e6e6;--search-tab-button-selected-border-top-color:#0089ff;--search-tab-button-selected-background:#fff;--settings-menu-filter:none;--stab-background-color:#fff5d6;--stab-code-color:#000;--code-highlight-kw-color:#8959a8;--code-highlight-kw-2-color:#4271ae;--code-highlight-lifetime-color:#b76514;--code-highlight-prelude-color:#4271ae;--code-highlight-prelude-val-color:#c82829;--code-highlight-number-color:#718c00;--code-highlight-string-color:#718c00;--code-highlight-literal-color:#c82829;--code-highlight-attribute-color:#c82829;--code-highlight-self-color:#c82829;--code-highlight-macro-color:#3e999f;--code-highlight-question-mark-color:#ff9011;--code-highlight-comment-color:#8e908c;--code-highlight-doc-comment-color:#4d4d4c;--src-line-numbers-span-color:#c67e2d;--src-line-number-highlighted-background-color:#fdffd3;--target-background-color:#fdffd3;--target-border-color:#ad7c37;--kbd-color:#000;--kbd-background:#fafbfc;--kbd-box-shadow-color:#c6cbd1;--rust-logo-filter:initial;--crate-search-div-filter:invert(100%) sepia(0%) saturate(4223%) hue-rotate(289deg) brightness(114%) contrast(76%);--crate-search-div-hover-filter:invert(44%) sepia(18%) saturate(23%) hue-rotate(317deg) brightness(96%) contrast(93%);--crate-search-hover-border:#717171;--src-sidebar-background-selected:#fff;--src-sidebar-background-hover:#e0e0e0;--table-alt-row-background-color:#f5f5f5;--codeblock-link-background:#eee;--scrape-example-toggle-line-background:#ccc;--scrape-example-toggle-line-hover-background:#999;--scrape-example-code-line-highlight:#fcffd6;--scrape-example-code-line-highlight-focus:#f6fdb0;--scrape-example-help-border-color:#555;--scrape-example-help-color:#333;--scrape-example-help-hover-border-color:#000;--scrape-example-help-hover-color:#000;--scrape-example-code-wrapper-background-start:rgba(255,255,255,1);--scrape-example-code-wrapper-background-end:rgba(255,255,255,0);--sidebar-resizer-hover:hsl(207,90%,66%);--sidebar-resizer-active:hsl(207,90%,54%);}:root[data-theme="dark"]{--main-background-color:#353535;--main-color:#ddd;--settings-input-color:#2196f3;--settings-input-border-color:#999;--settings-button-color:#000;--settings-button-border-focus:#ffb900;--sidebar-background-color:#505050;--sidebar-background-color-hover:#676767;--code-block-background-color:#2A2A2A;--scrollbar-track-background-color:#717171;--scrollbar-thumb-background-color:rgba(32,34,37,.6);--scrollbar-color:rgba(32,34,37,.6) #5a5a5a;--headings-border-bottom-color:#d2d2d2;--border-color:#e0e0e0;--button-background-color:#f0f0f0;--right-side-color:grey;--code-attribute-color:#999;--toggles-color:#999;--toggle-filter:invert(100%);--mobile-sidebar-menu-filter:invert(100%);--search-input-focused-border-color:#008dfd;--copy-path-button-color:#999;--copy-path-img-filter:invert(50%);--copy-path-img-hover-filter:invert(65%);--code-example-button-color:#7f7f7f;--code-example-button-hover-color:#a5a5a5;--codeblock-error-hover-color:rgb(255,0,0);--codeblock-error-color:rgba(255,0,0,.5);--codeblock-ignore-hover-color:rgb(255,142,0);--codeblock-ignore-color:rgba(255,142,0,.6);--warning-border-color:#ff8e00;--type-link-color:#2dbfb8;--trait-link-color:#b78cf2;--assoc-item-link-color:#d2991d;--function-link-color:#2bab63;--macro-link-color:#09bd00;--keyword-link-color:#d2991d;--mod-link-color:#d2991d;--link-color:#d2991d;--sidebar-link-color:#fdbf35;--sidebar-current-link-background-color:#444;--search-result-link-focus-background-color:#616161;--search-result-border-color:#aaa3;--search-color:#111;--search-error-code-background-color:#484848;--search-results-alias-color:#fff;--search-results-grey-color:#ccc;--search-tab-title-count-color:#888;--search-tab-button-not-selected-border-top-color:#252525;--search-tab-button-not-selected-background:#252525;--search-tab-button-selected-border-top-color:#0089ff;--search-tab-button-selected-background:#353535;--settings-menu-filter:none;--stab-background-color:#314559;--stab-code-color:#e6e1cf;--code-highlight-kw-color:#ab8ac1;--code-highlight-kw-2-color:#769acb;--code-highlight-lifetime-color:#d97f26;--code-highlight-prelude-color:#769acb;--code-highlight-prelude-val-color:#ee6868;--code-highlight-number-color:#83a300;--code-highlight-string-color:#83a300;--code-highlight-literal-color:#ee6868;--code-highlight-attribute-color:#ee6868;--code-highlight-self-color:#ee6868;--code-highlight-macro-color:#3e999f;--code-highlight-question-mark-color:#ff9011;--code-highlight-comment-color:#8d8d8b;--code-highlight-doc-comment-color:#8ca375;--src-line-numbers-span-color:#3b91e2;--src-line-number-highlighted-background-color:#0a042f;--target-background-color:#494a3d;--target-border-color:#bb7410;--kbd-color:#000;--kbd-background:#fafbfc;--kbd-box-shadow-color:#c6cbd1;--rust-logo-filter:drop-shadow(1px 0 0px #fff) drop-shadow(0 1px 0 #fff) drop-shadow(-1px 0 0 #fff) drop-shadow(0 -1px 0 #fff);--crate-search-div-filter:invert(94%) sepia(0%) saturate(721%) hue-rotate(255deg) brightness(90%) contrast(90%);--crate-search-div-hover-filter:invert(69%) sepia(60%) saturate(6613%) hue-rotate(184deg) brightness(100%) contrast(91%);--crate-search-hover-border:#2196f3;--src-sidebar-background-selected:#333;--src-sidebar-background-hover:#444;--table-alt-row-background-color:#2a2a2a;--codeblock-link-background:#333;--scrape-example-toggle-line-background:#999;--scrape-example-toggle-line-hover-background:#c5c5c5;--scrape-example-code-line-highlight:#5b3b01;--scrape-example-code-line-highlight-focus:#7c4b0f;--scrape-example-help-border-color:#aaa;--scrape-example-help-color:#eee;--scrape-example-help-hover-border-color:#fff;--scrape-example-help-hover-color:#fff;--scrape-example-code-wrapper-background-start:rgba(53,53,53,1);--scrape-example-code-wrapper-background-end:rgba(53,53,53,0);--sidebar-resizer-hover:hsl(207,30%,54%);--sidebar-resizer-active:hsl(207,90%,54%);}:root[data-theme="ayu"]{--main-background-color:#0f1419;--main-color:#c5c5c5;--settings-input-color:#ffb454;--settings-input-border-color:#999;--settings-button-color:#fff;--settings-button-border-focus:#e0e0e0;--sidebar-background-color:#14191f;--sidebar-background-color-hover:rgba(70,70,70,0.33);--code-block-background-color:#191f26;--scrollbar-track-background-color:transparent;--scrollbar-thumb-background-color:#5c6773;--scrollbar-color:#5c6773 #24292f;--headings-border-bottom-color:#5c6773;--border-color:#5c6773;--button-background-color:#141920;--right-side-color:grey;--code-attribute-color:#999;--toggles-color:#999;--toggle-filter:invert(100%);--mobile-sidebar-menu-filter:invert(100%);--search-input-focused-border-color:#5c6773;--copy-path-button-color:#fff;--copy-path-img-filter:invert(70%);--copy-path-img-hover-filter:invert(100%);--code-example-button-color:#b2b2b2;--code-example-button-hover-color:#fff;--codeblock-error-hover-color:rgb(255,0,0);--codeblock-error-color:rgba(255,0,0,.5);--codeblock-ignore-hover-color:rgb(255,142,0);--codeblock-ignore-color:rgba(255,142,0,.6);--warning-border-color:#ff8e00;--type-link-color:#ffa0a5;--trait-link-color:#39afd7;--assoc-item-link-color:#39afd7;--function-link-color:#fdd687;--macro-link-color:#a37acc;--keyword-link-color:#39afd7;--mod-link-color:#39afd7;--link-color:#39afd7;--sidebar-link-color:#53b1db;--sidebar-current-link-background-color:transparent;--search-result-link-focus-background-color:#3c3c3c;--search-result-border-color:#aaa3;--search-color:#fff;--search-error-code-background-color:#4f4c4c;--search-results-alias-color:#c5c5c5;--search-results-grey-color:#999;--search-tab-title-count-color:#888;--search-tab-button-not-selected-border-top-color:none;--search-tab-button-not-selected-background:transparent !important;--search-tab-button-selected-border-top-color:none;--search-tab-button-selected-background:#141920 !important;--settings-menu-filter:invert(100%);--stab-background-color:#314559;--stab-code-color:#e6e1cf;--code-highlight-kw-color:#ff7733;--code-highlight-kw-2-color:#ff7733;--code-highlight-lifetime-color:#ff7733;--code-highlight-prelude-color:#69f2df;--code-highlight-prelude-val-color:#ff7733;--code-highlight-number-color:#b8cc52;--code-highlight-string-color:#b8cc52;--code-highlight-literal-color:#ff7733;--code-highlight-attribute-color:#e6e1cf;--code-highlight-self-color:#36a3d9;--code-highlight-macro-color:#a37acc;--code-highlight-question-mark-color:#ff9011;--code-highlight-comment-color:#788797;--code-highlight-doc-comment-color:#a1ac88;--src-line-numbers-span-color:#5c6773;--src-line-number-highlighted-background-color:rgba(255,236,164,0.06);--target-background-color:rgba(255,236,164,0.06);--target-border-color:rgba(255,180,76,0.85);--kbd-color:#c5c5c5;--kbd-background:#314559;--kbd-box-shadow-color:#5c6773;--rust-logo-filter:drop-shadow(1px 0 0px #fff) drop-shadow(0 1px 0 #fff) drop-shadow(-1px 0 0 #fff) drop-shadow(0 -1px 0 #fff);--crate-search-div-filter:invert(41%) sepia(12%) saturate(487%) hue-rotate(171deg) brightness(94%) contrast(94%);--crate-search-div-hover-filter:invert(98%) sepia(12%) saturate(81%) hue-rotate(343deg) brightness(113%) contrast(76%);--crate-search-hover-border:#e0e0e0;--src-sidebar-background-selected:#14191f;--src-sidebar-background-hover:#14191f;--table-alt-row-background-color:#191f26;--codeblock-link-background:#333;--scrape-example-toggle-line-background:#999;--scrape-example-toggle-line-hover-background:#c5c5c5;--scrape-example-code-line-highlight:#5b3b01;--scrape-example-code-line-highlight-focus:#7c4b0f;--scrape-example-help-border-color:#aaa;--scrape-example-help-color:#eee;--scrape-example-help-hover-border-color:#fff;--scrape-example-help-hover-color:#fff;--scrape-example-code-wrapper-background-start:rgba(15,20,25,1);--scrape-example-code-wrapper-background-end:rgba(15,20,25,0);--sidebar-resizer-hover:hsl(34,50%,33%);--sidebar-resizer-active:hsl(34,100%,66%);}:root[data-theme="ayu"] h1,:root[data-theme="ayu"] h2,:root[data-theme="ayu"] h3,:root[data-theme="ayu"] h4,:where(:root[data-theme="ayu"]) h1 a,:root[data-theme="ayu"] .sidebar h2 a,:root[data-theme="ayu"] .sidebar h3 a{color:#fff;}:root[data-theme="ayu"] .docblock code{color:#ffb454;}:root[data-theme="ayu"] .docblock a>code{color:#39AFD7 !important;}:root[data-theme="ayu"] .code-header,:root[data-theme="ayu"] .docblock pre>code,:root[data-theme="ayu"] pre,:root[data-theme="ayu"] pre>code,:root[data-theme="ayu"] .item-info code,:root[data-theme="ayu"] .rustdoc.source .example-wrap{color:#e6e1cf;}:root[data-theme="ayu"] .sidebar .current,:root[data-theme="ayu"] .sidebar .current a,:root[data-theme="ayu"] .sidebar a:hover,:root[data-theme="ayu"] #src-sidebar div.files>a:hover,:root[data-theme="ayu"] details.dir-entry summary:hover,:root[data-theme="ayu"] #src-sidebar div.files>a:focus,:root[data-theme="ayu"] details.dir-entry summary:focus,:root[data-theme="ayu"] #src-sidebar div.files>a.selected{color:#ffb44c;}:root[data-theme="ayu"] .sidebar-elems .location{color:#ff7733;}:root[data-theme="ayu"] .src-line-numbers .line-highlighted{color:#708090;padding-right:7px;border-right:1px solid #ffb44c;}:root[data-theme="ayu"] .search-results a:hover,:root[data-theme="ayu"] .search-results a:focus{color:#fff !important;background-color:#3c3c3c;}:root[data-theme="ayu"] .search-results a{color:#0096cf;}:root[data-theme="ayu"] .search-results a div.desc{color:#c5c5c5;}:root[data-theme="ayu"] .result-name .primitive>i,:root[data-theme="ayu"] .result-name .keyword>i{color:#788797;}:root[data-theme="ayu"] #search-tabs>button.selected{border-bottom:1px solid #ffb44c !important;border-top:none;}:root[data-theme="ayu"] #search-tabs>button:not(.selected){border:none;background-color:transparent !important;}:root[data-theme="ayu"] #search-tabs>button:hover{border-bottom:1px solid rgba(242,151,24,0.3);}:root[data-theme="ayu"] #settings-menu>a img,:root[data-theme="ayu"] #sidebar-button>a:before{filter:invert(100);} \ No newline at end of file diff --git a/static.files/scrape-examples-5ae8dca501e56595.js b/static.files/scrape-examples-5ae8dca501e56595.js new file mode 100644 index 000000000..2a547a704 --- /dev/null +++ b/static.files/scrape-examples-5ae8dca501e56595.js @@ -0,0 +1 @@ +"use strict";(function(){const DEFAULT_MAX_LINES=5;const HIDDEN_MAX_LINES=10;function scrollToLoc(elt,loc,isHidden){const lines=elt.querySelector(".src-line-numbers > pre");let scrollOffset;const maxLines=isHidden?HIDDEN_MAX_LINES:DEFAULT_MAX_LINES;if(loc[1]-loc[0]>maxLines){const line=Math.max(0,loc[0]-1);scrollOffset=lines.children[line].offsetTop}else{const halfHeight=elt.offsetHeight/2;const offsetTop=lines.children[loc[0]].offsetTop;const lastLine=lines.children[loc[1]];const offsetBot=lastLine.offsetTop+lastLine.offsetHeight;const offsetMid=(offsetTop+offsetBot)/2;scrollOffset=offsetMid-halfHeight}lines.parentElement.scrollTo(0,scrollOffset);elt.querySelector(".rust").scrollTo(0,scrollOffset)}function updateScrapedExample(example,isHidden){const locs=JSON.parse(example.attributes.getNamedItem("data-locs").textContent);let locIndex=0;const highlights=Array.prototype.slice.call(example.querySelectorAll(".highlight"));const link=example.querySelector(".scraped-example-title a");if(locs.length>1){const onChangeLoc=changeIndex=>{removeClass(highlights[locIndex],"focus");changeIndex();scrollToLoc(example,locs[locIndex][0],isHidden);addClass(highlights[locIndex],"focus");const url=locs[locIndex][1];const title=locs[locIndex][2];link.href=url;link.innerHTML=title};example.querySelector(".prev").addEventListener("click",()=>{onChangeLoc(()=>{locIndex=(locIndex-1+locs.length)%locs.length})});example.querySelector(".next").addEventListener("click",()=>{onChangeLoc(()=>{locIndex=(locIndex+1)%locs.length})})}const expandButton=example.querySelector(".expand");if(expandButton){expandButton.addEventListener("click",()=>{if(hasClass(example,"expanded")){removeClass(example,"expanded");scrollToLoc(example,locs[0][0],isHidden)}else{addClass(example,"expanded")}})}scrollToLoc(example,locs[0][0],isHidden)}const firstExamples=document.querySelectorAll(".scraped-example-list > .scraped-example");onEachLazy(firstExamples,el=>updateScrapedExample(el,false));onEachLazy(document.querySelectorAll(".more-examples-toggle"),toggle=>{onEachLazy(toggle.querySelectorAll(".toggle-line, .hide-more"),button=>{button.addEventListener("click",()=>{toggle.open=false})});const moreExamples=toggle.querySelectorAll(".scraped-example");toggle.querySelector("summary").addEventListener("click",()=>{setTimeout(()=>{onEachLazy(moreExamples,el=>updateScrapedExample(el,true))})},{once:true})})})() \ No newline at end of file diff --git a/static.files/search-0cfde64e8ad3a7fe.js b/static.files/search-0cfde64e8ad3a7fe.js new file mode 100644 index 000000000..5e3c05172 --- /dev/null +++ b/static.files/search-0cfde64e8ad3a7fe.js @@ -0,0 +1,5 @@ +"use strict";if(!Array.prototype.toSpliced){Array.prototype.toSpliced=function(){const me=this.slice();Array.prototype.splice.apply(me,arguments);return me}}(function(){const itemTypes=["keyword","primitive","mod","externcrate","import","struct","enum","fn","type","static","trait","impl","tymethod","method","structfield","variant","macro","associatedtype","constant","associatedconstant","union","foreigntype","existential","attr","derive","traitalias","generic",];const TY_GENERIC=itemTypes.indexOf("generic");const TY_IMPORT=itemTypes.indexOf("import");const ROOT_PATH=typeof window!=="undefined"?window.rootPath:"../";const UNBOXING_LIMIT=5;const REGEX_IDENT=/\p{ID_Start}\p{ID_Continue}*|_\p{ID_Continue}+/uy;const REGEX_INVALID_TYPE_FILTER=/[^a-z]/ui;const MAX_RESULTS=200;const NO_TYPE_FILTER=-1;const editDistanceState={current:[],prev:[],prevPrev:[],calculate:function calculate(a,b,limit){if(a.lengthlimit){return limit+1}while(b.length>0&&b[0]===a[0]){a=a.substring(1);b=b.substring(1)}while(b.length>0&&b[b.length-1]===a[a.length-1]){a=a.substring(0,a.length-1);b=b.substring(0,b.length-1)}if(b.length===0){return minDist}const aLength=a.length;const bLength=b.length;for(let i=0;i<=bLength;++i){this.current[i]=0;this.prev[i]=i;this.prevPrev[i]=Number.MAX_VALUE}for(let i=1;i<=aLength;++i){this.current[0]=i;const aIdx=i-1;for(let j=1;j<=bLength;++j){const bIdx=j-1;const substitutionCost=a[aIdx]===b[bIdx]?0:1;this.current[j]=Math.min(this.prev[j]+1,this.current[j-1]+1,this.prev[j-1]+substitutionCost,);if((i>1)&&(j>1)&&(a[aIdx]===b[bIdx-1])&&(a[aIdx-1]===b[bIdx])){this.current[j]=Math.min(this.current[j],this.prevPrev[j-2]+1,)}}const prevPrevTmp=this.prevPrev;this.prevPrev=this.prev;this.prev=this.current;this.current=prevPrevTmp}const distance=this.prev[bLength];return distance<=limit?distance:(limit+1)},};function editDistance(a,b,limit){return editDistanceState.calculate(a,b,limit)}function isEndCharacter(c){return"=,>-])".indexOf(c)!==-1}function isSeparatorCharacter(c){return c===","||c==="="}function isReturnArrow(parserState){return parserState.userQuery.slice(parserState.pos,parserState.pos+2)==="->"}function skipWhitespace(parserState){while(parserState.pos0){const c=parserState.userQuery[pos-1];if(c===lookingFor){return true}else if(c!==" "){break}pos-=1}return false}function isLastElemGeneric(elems,parserState){return(elems.length>0&&elems[elems.length-1].generics.length>0)||prevIs(parserState,">")}function getFilteredNextElem(query,parserState,elems,isInGenerics){const start=parserState.pos;if(parserState.userQuery[parserState.pos]===":"&&!isPathStart(parserState)){throw["Expected type filter before ",":"]}getNextElem(query,parserState,elems,isInGenerics);if(parserState.userQuery[parserState.pos]===":"&&!isPathStart(parserState)){if(parserState.typeFilter!==null){throw["Unexpected ",":"," (expected path after type filter ",parserState.typeFilter+":",")",]}if(elems.length===0){throw["Expected type filter before ",":"]}else if(query.literalSearch){throw["Cannot use quotes on type filter"]}const typeFilterElem=elems.pop();checkExtraTypeFilterCharacters(start,parserState);parserState.typeFilter=typeFilterElem.name;parserState.pos+=1;parserState.totalElems-=1;query.literalSearch=false;getNextElem(query,parserState,elems,isInGenerics)}}function getItemsBefore(query,parserState,elems,endChar){let foundStopChar=true;let foundSeparator=false;const oldTypeFilter=parserState.typeFilter;parserState.typeFilter=null;const oldIsInBinding=parserState.isInBinding;parserState.isInBinding=null;let hofParameters=null;let extra="";if(endChar===">"){extra="<"}else if(endChar==="]"){extra="["}else if(endChar===")"){extra="("}else if(endChar===""){extra="->"}else{extra=endChar}while(parserState.pos"," after ","="]}hofParameters=[...elems];elems.length=0;parserState.pos+=2;foundStopChar=true;foundSeparator=false;continue}else if(c===" "){parserState.pos+=1;continue}else if(isSeparatorCharacter(c)){parserState.pos+=1;foundStopChar=true;foundSeparator=true;continue}else if(c===":"&&isPathStart(parserState)){throw["Unexpected ","::",": paths cannot start with ","::"]}else if(isEndCharacter(c)){throw["Unexpected ",c," after ",extra]}if(!foundStopChar){let extra=[];if(isLastElemGeneric(query.elems,parserState)){extra=[" after ",">"]}else if(prevIs(parserState,"\"")){throw["Cannot have more than one element if you use quotes"]}if(endChar!==""){throw["Expected ",",",", ","=",", or ",endChar,...extra,", found ",c,]}throw["Expected ",","," or ","=",...extra,", found ",c,]}const posBefore=parserState.pos;getFilteredNextElem(query,parserState,elems,endChar!=="");if(endChar!==""&&parserState.pos>=parserState.length){throw["Unclosed ",extra]}if(posBefore===parserState.pos){parserState.pos+=1}foundStopChar=false}if(parserState.pos>=parserState.length&&endChar!==""){throw["Unclosed ",extra]}parserState.pos+=1;if(hofParameters){foundSeparator=false;if([...elems,...hofParameters].some(x=>x.bindingName)||parserState.isInBinding){throw["Unexpected ","="," within ","->"]}const hofElem=makePrimitiveElement("->",{generics:hofParameters,bindings:new Map([["output",[...elems]]]),typeFilter:null,});elems.length=0;elems[0]=hofElem}parserState.typeFilter=oldTypeFilter;parserState.isInBinding=oldIsInBinding;return{foundSeparator}}function getNextElem(query,parserState,elems,isInGenerics){const generics=[];skipWhitespace(parserState);let start=parserState.pos;let end;if("[(".indexOf(parserState.userQuery[parserState.pos])!==-1){let endChar=")";let name="()";let friendlyName="tuple";if(parserState.userQuery[parserState.pos]==="["){endChar="]";name="[]";friendlyName="slice"}parserState.pos+=1;const{foundSeparator}=getItemsBefore(query,parserState,generics,endChar);const typeFilter=parserState.typeFilter;const bindingName=parserState.isInBinding;parserState.typeFilter=null;parserState.isInBinding=null;for(const gen of generics){if(gen.bindingName!==null){throw["Type parameter ","=",` cannot be within ${friendlyName} `,name]}}if(name==="()"&&!foundSeparator&&generics.length===1&&typeFilter===null){elems.push(generics[0])}else if(name==="()"&&generics.length===1&&generics[0].name==="->"){generics[0].typeFilter=typeFilter;elems.push(generics[0])}else{if(typeFilter!==null&&typeFilter!=="primitive"){throw["Invalid search type: primitive ",name," and ",typeFilter," both specified",]}parserState.totalElems+=1;if(isInGenerics){parserState.genericsElems+=1}elems.push(makePrimitiveElement(name,{bindingName,generics}))}}else if(parserState.userQuery[parserState.pos]==="&"){if(parserState.typeFilter!==null&&parserState.typeFilter!=="primitive"){throw["Invalid search type: primitive ","&"," and ",parserState.typeFilter," both specified",]}parserState.typeFilter=null;parserState.pos+=1;let c=parserState.userQuery[parserState.pos];while(c===" "&&parserState.pos=end){throw["Found generics without a path"]}parserState.pos+=1;getItemsBefore(query,parserState,generics,">")}else if(parserState.pos=end){throw["Found generics without a path"]}if(parserState.isInBinding){throw["Unexpected ","("," after ","="]}parserState.pos+=1;const typeFilter=parserState.typeFilter;parserState.typeFilter=null;getItemsBefore(query,parserState,generics,")");skipWhitespace(parserState);if(isReturnArrow(parserState)){parserState.pos+=2;skipWhitespace(parserState);getFilteredNextElem(query,parserState,generics,isInGenerics);generics[generics.length-1].bindingName=makePrimitiveElement("output")}else{generics.push(makePrimitiveElement(null,{bindingName:makePrimitiveElement("output"),typeFilter:null,}))}parserState.typeFilter=typeFilter}if(isStringElem){skipWhitespace(parserState)}if(start>=end&&generics.length===0){return}if(parserState.userQuery[parserState.pos]==="="){if(parserState.isInBinding){throw["Cannot write ","="," twice in a binding"]}if(!isInGenerics){throw["Type parameter ","="," must be within generics list"]}const name=parserState.userQuery.slice(start,end).trim();if(name==="!"){throw["Type parameter ","="," key cannot be ","!"," never type"]}if(name.includes("!")){throw["Type parameter ","="," key cannot be ","!"," macro"]}if(name.includes("::")){throw["Type parameter ","="," key cannot contain ","::"," path"]}if(name.includes(":")){throw["Type parameter ","="," key cannot contain ",":"," type"]}parserState.isInBinding={name,generics}}else{elems.push(createQueryElement(query,parserState,parserState.userQuery.slice(start,end),generics,isInGenerics,),)}}}function checkExtraTypeFilterCharacters(start,parserState){const query=parserState.userQuery.slice(start,parserState.pos).trim();const match=query.match(REGEX_INVALID_TYPE_FILTER);if(match){throw["Unexpected ",match[0]," in type filter (before ",":",")",]}}function createQueryElement(query,parserState,name,generics,isInGenerics){const path=name.trim();if(path.length===0&&generics.length===0){throw["Unexpected ",parserState.userQuery[parserState.pos]]}if(query.literalSearch&&parserState.totalElems-parserState.genericsElems>0){throw["Cannot have more than one element if you use quotes"]}const typeFilter=parserState.typeFilter;parserState.typeFilter=null;if(name.trim()==="!"){if(typeFilter!==null&&typeFilter!=="primitive"){throw["Invalid search type: primitive never type ","!"," and ",typeFilter," both specified",]}if(generics.length!==0){throw["Never type ","!"," does not accept generic parameters",]}const bindingName=parserState.isInBinding;parserState.isInBinding=null;return makePrimitiveElement("never",{bindingName})}const quadcolon=/::\s*::/.exec(path);if(path.startsWith("::")){throw["Paths cannot start with ","::"]}else if(path.endsWith("::")){throw["Paths cannot end with ","::"]}else if(quadcolon!==null){throw["Unexpected ",quadcolon[0]]}const pathSegments=path.split(/(?:::\s*)|(?:\s+(?:::\s*)?)/);if(pathSegments.length===0||(pathSegments.length===1&&pathSegments[0]==="")){if(generics.length>0||prevIs(parserState,">")){throw["Found generics without a path"]}else{throw["Unexpected ",parserState.userQuery[parserState.pos]]}}for(const[i,pathSegment]of pathSegments.entries()){if(pathSegment==="!"){if(i!==0){throw["Never type ","!"," is not associated item"]}pathSegments[i]="never"}}parserState.totalElems+=1;if(isInGenerics){parserState.genericsElems+=1}const bindingName=parserState.isInBinding;parserState.isInBinding=null;const bindings=new Map();const pathLast=pathSegments[pathSegments.length-1];return{name:name.trim(),id:null,fullPath:pathSegments,pathWithoutLast:pathSegments.slice(0,pathSegments.length-1),pathLast,normalizedPathLast:pathLast.replace(/_/g,""),generics:generics.filter(gen=>{if(gen.bindingName!==null){if(gen.name!==null){gen.bindingName.generics.unshift(gen)}bindings.set(gen.bindingName.name,gen.bindingName.generics);return false}return true}),bindings,typeFilter,bindingName,}}function makePrimitiveElement(name,extra){return Object.assign({name,id:null,fullPath:[name],pathWithoutLast:[],pathLast:name,normalizedPathLast:name,generics:[],bindings:new Map(),typeFilter:"primitive",bindingName:null,},extra)}function getStringElem(query,parserState,isInGenerics){if(isInGenerics){throw["Unexpected ","\""," in generics"]}else if(query.literalSearch){throw["Cannot have more than one literal search element"]}else if(parserState.totalElems-parserState.genericsElems>0){throw["Cannot use literal search when there is more than one element"]}parserState.pos+=1;const start=parserState.pos;const end=getIdentEndPosition(parserState);if(parserState.pos>=parserState.length){throw["Unclosed ","\""]}else if(parserState.userQuery[end]!=="\""){throw["Unexpected ",parserState.userQuery[end]," in a string element"]}else if(start===end){throw["Cannot have empty string element"]}parserState.pos+=1;query.literalSearch=true}function getIdentEndPosition(parserState){let afterIdent=consumeIdent(parserState);let end=parserState.pos;let macroExclamation=-1;while(parserState.pos0){throw["Unexpected ",c," after ",parserState.userQuery[parserState.pos-1]," (not a valid identifier)"]}else{throw["Unexpected ",c," (not a valid identifier)"]}parserState.pos+=1;afterIdent=consumeIdent(parserState);end=parserState.pos}if(macroExclamation!==-1){if(parserState.typeFilter===null){parserState.typeFilter="macro"}else if(parserState.typeFilter!=="macro"){throw["Invalid search type: macro ","!"," and ",parserState.typeFilter," both specified",]}end=macroExclamation}return end}function isSpecialStartCharacter(c){return"<\"".indexOf(c)!==-1}function isPathStart(parserState){return parserState.userQuery.slice(parserState.pos,parserState.pos+2)==="::"}function consumeIdent(parserState){REGEX_IDENT.lastIndex=parserState.pos;const match=parserState.userQuery.match(REGEX_IDENT);if(match){parserState.pos+=match[0].length;return true}return false}function isPathSeparator(c){return c===":"||c===" "}class VlqHexDecoder{constructor(string,cons){this.string=string;this.cons=cons;this.offset=0;this.backrefQueue=[]}decodeList(){let c=this.string.charCodeAt(this.offset);const ret=[];while(c!==125){ret.push(this.decode());c=this.string.charCodeAt(this.offset)}this.offset+=1;return ret}decode(){let n=0;let c=this.string.charCodeAt(this.offset);if(c===123){this.offset+=1;return this.decodeList()}while(c<96){n=(n<<4)|(c&0xF);this.offset+=1;c=this.string.charCodeAt(this.offset)}n=(n<<4)|(c&0xF);const[sign,value]=[n&1,n>>1];this.offset+=1;return sign?-value:value}next(){const c=this.string.charCodeAt(this.offset);if(c>=48&&c<64){this.offset+=1;return this.backrefQueue[c-48]}if(c===96){this.offset+=1;return this.cons(0)}const result=this.cons(this.decode());this.backrefQueue.unshift(result);if(this.backrefQueue.length>16){this.backrefQueue.pop()}return result}}class RoaringBitmap{constructor(str){const strdecoded=atob(str);const u8array=new Uint8Array(strdecoded.length);for(let j=0;j=4){offsets=[];for(let j=0;j>3]&(1<<(j&0x7))){const runcount=(u8array[i]|(u8array[i+1]<<8));i+=2;this.containers.push(new RoaringBitmapRun(runcount,u8array.slice(i,i+(runcount*4)),));i+=runcount*4}else if(this.cardinalities[j]>=4096){this.containers.push(new RoaringBitmapBits(u8array.slice(i,i+8192)));i+=8192}else{const end=this.cardinalities[j]*2;this.containers.push(new RoaringBitmapArray(this.cardinalities[j],u8array.slice(i,i+end),));i+=end}}}contains(keyvalue){const key=keyvalue>>16;const value=keyvalue&0xFFFF;for(let i=0;i=start&&value<=(start+lenm1)){return true}}return false}}class RoaringBitmapArray{constructor(cardinality,array){this.cardinality=cardinality;this.array=array}contains(value){const l=this.cardinality*2;for(let i=0;i>3]&(1<<(value&7)))}}class DocSearch{constructor(rawSearchIndex,rootPath,searchState){this.searchIndexDeprecated=new Map();this.searchIndexEmptyDesc=new Map();this.functionTypeFingerprint=null;this.typeNameIdMap=new Map();this.ALIASES=new Map();this.rootPath=rootPath;this.searchState=searchState;this.typeNameIdOfArray=this.buildTypeMapIndex("array");this.typeNameIdOfSlice=this.buildTypeMapIndex("slice");this.typeNameIdOfArrayOrSlice=this.buildTypeMapIndex("[]");this.typeNameIdOfTuple=this.buildTypeMapIndex("tuple");this.typeNameIdOfUnit=this.buildTypeMapIndex("unit");this.typeNameIdOfTupleOrUnit=this.buildTypeMapIndex("()");this.typeNameIdOfFn=this.buildTypeMapIndex("fn");this.typeNameIdOfFnMut=this.buildTypeMapIndex("fnmut");this.typeNameIdOfFnOnce=this.buildTypeMapIndex("fnonce");this.typeNameIdOfHof=this.buildTypeMapIndex("->");this.EMPTY_BINDINGS_MAP=new Map();this.EMPTY_GENERICS_ARRAY=[];this.TYPES_POOL=new Map();this.searchIndex=this.buildIndex(rawSearchIndex)}buildTypeMapIndex(name,isAssocType){if(name===""||name===null){return null}if(this.typeNameIdMap.has(name)){const obj=this.typeNameIdMap.get(name);obj.assocOnly=isAssocType&&obj.assocOnly;return obj.id}else{const id=this.typeNameIdMap.size;this.typeNameIdMap.set(name,{id,assocOnly:isAssocType});return id}}buildItemSearchTypeAll(types,lowercasePaths){return types.length>0?types.map(type=>this.buildItemSearchType(type,lowercasePaths)):this.EMPTY_GENERICS_ARRAY}buildItemSearchType(type,lowercasePaths,isAssocType){const PATH_INDEX_DATA=0;const GENERICS_DATA=1;const BINDINGS_DATA=2;let pathIndex,generics,bindings;if(typeof type==="number"){pathIndex=type;generics=this.EMPTY_GENERICS_ARRAY;bindings=this.EMPTY_BINDINGS_MAP}else{pathIndex=type[PATH_INDEX_DATA];generics=this.buildItemSearchTypeAll(type[GENERICS_DATA],lowercasePaths,);if(type.length>BINDINGS_DATA&&type[BINDINGS_DATA].length>0){bindings=new Map(type[BINDINGS_DATA].map(binding=>{const[assocType,constraints]=binding;return[this.buildItemSearchType(assocType,lowercasePaths,true).id,this.buildItemSearchTypeAll(constraints,lowercasePaths),]}))}else{bindings=this.EMPTY_BINDINGS_MAP}}let result;if(pathIndex<0){result={id:pathIndex,ty:TY_GENERIC,path:null,exactPath:null,generics,bindings,}}else if(pathIndex===0){result={id:null,ty:null,path:null,exactPath:null,generics,bindings,}}else{const item=lowercasePaths[pathIndex-1];result={id:this.buildTypeMapIndex(item.name,isAssocType),ty:item.ty,path:item.path,exactPath:item.exactPath,generics,bindings,}}const cr=this.TYPES_POOL.get(result.id);if(cr){if(cr.generics.length===result.generics.length&&cr.generics!==result.generics&&cr.generics.every((x,i)=>result.generics[i]===x)){result.generics=cr.generics}if(cr.bindings.size===result.bindings.size&&cr.bindings!==result.bindings){let ok=true;for(const[k,v]of cr.bindings.entries()){const v2=result.bindings.get(v);if(!v2){ok=false;break}if(v!==v2&&v.length===v2.length&&v.every((x,i)=>v2[i]===x)){result.bindings.set(k,v)}else if(v!==v2){ok=false;break}}if(ok){result.bindings=cr.bindings}}if(cr.ty===result.ty&&cr.path===result.path&&cr.bindings===result.bindings&&cr.generics===result.generics&&cr.ty===result.ty){return cr}}this.TYPES_POOL.set(result.id,result);return result}buildFunctionTypeFingerprint(type,output,fps){let input=type.id;if(input===this.typeNameIdOfArray||input===this.typeNameIdOfSlice){input=this.typeNameIdOfArrayOrSlice}if(input===this.typeNameIdOfTuple||input===this.typeNameIdOfUnit){input=this.typeNameIdOfTupleOrUnit}if(input===this.typeNameIdOfFn||input===this.typeNameIdOfFnMut||input===this.typeNameIdOfFnOnce){input=this.typeNameIdOfHof}const hashint1=k=>{k=(~~k+0x7ed55d16)+(k<<12);k=(k ^ 0xc761c23c)^(k>>>19);k=(~~k+0x165667b1)+(k<<5);k=(~~k+0xd3a2646c)^(k<<9);k=(~~k+0xfd7046c5)+(k<<3);return(k ^ 0xb55a4f09)^(k>>>16)};const hashint2=k=>{k=~k+(k<<15);k ^=k>>>12;k+=k<<2;k ^=k>>>4;k=Math.imul(k,2057);return k ^(k>>16)};if(input!==null){const h0a=hashint1(input);const h0b=hashint2(input);const h1a=~~(h0a+Math.imul(h0b,2));const h1b=~~(h0a+Math.imul(h0b,3));const h2a=~~(h0a+Math.imul(h0b,4));const h2b=~~(h0a+Math.imul(h0b,5));output[0]|=(1<<(h0a%32))|(1<<(h1b%32));output[1]|=(1<<(h1a%32))|(1<<(h2b%32));output[2]|=(1<<(h2a%32))|(1<<(h0b%32));fps.add(input)}for(const g of type.generics){this.buildFunctionTypeFingerprint(g,output,fps)}const fb={id:null,ty:0,generics:this.EMPTY_GENERICS_ARRAY,bindings:this.EMPTY_BINDINGS_MAP,};for(const[k,v]of type.bindings.entries()){fb.id=k;fb.generics=v;this.buildFunctionTypeFingerprint(fb,output,fps)}output[3]=fps.size}buildIndex(rawSearchIndex){const buildFunctionSearchTypeCallback=lowercasePaths=>{return functionSearchType=>{if(functionSearchType===0){return null}const INPUTS_DATA=0;const OUTPUT_DATA=1;let inputs,output;if(typeof functionSearchType[INPUTS_DATA]==="number"){inputs=[this.buildItemSearchType(functionSearchType[INPUTS_DATA],lowercasePaths),]}else{inputs=this.buildItemSearchTypeAll(functionSearchType[INPUTS_DATA],lowercasePaths,)}if(functionSearchType.length>1){if(typeof functionSearchType[OUTPUT_DATA]==="number"){output=[this.buildItemSearchType(functionSearchType[OUTPUT_DATA],lowercasePaths,),]}else{output=this.buildItemSearchTypeAll(functionSearchType[OUTPUT_DATA],lowercasePaths,)}}else{output=[]}const where_clause=[];const l=functionSearchType.length;for(let i=2;inoop);let descShard={crate,shard:0,start:0,len:itemDescShardDecoder.next(),promise:null,resolve:null,};const descShardList=[descShard];this.searchIndexDeprecated.set(crate,new RoaringBitmap(crateCorpus.c));this.searchIndexEmptyDesc.set(crate,new RoaringBitmap(crateCorpus.e));let descIndex=0;const crateRow={crate,ty:3,name:crate,path:"",descShard,descIndex,exactPath:"",desc:crateCorpus.doc,parent:undefined,type:null,id,word:crate,normalizedName:crate.indexOf("_")===-1?crate:crate.replace(/_/g,""),bitIndex:0,implDisambiguator:null,};id+=1;searchIndex.push(crateRow);currentIndex+=1;if(!this.searchIndexEmptyDesc.get(crate).contains(0)){descIndex+=1}const itemTypes=crateCorpus.t;const itemNames=crateCorpus.n;const itemPaths=new Map(crateCorpus.q);const itemReexports=new Map(crateCorpus.r);const itemParentIdxDecoder=new VlqHexDecoder(crateCorpus.i,noop=>noop);const implDisambiguator=new Map(crateCorpus.b);const paths=crateCorpus.p;const aliases=crateCorpus.a;const lowercasePaths=[];const itemFunctionDecoder=new VlqHexDecoder(crateCorpus.f,buildFunctionSearchTypeCallback(lowercasePaths),);let len=paths.length;let lastPath=itemPaths.get(0);for(let i=0;i2){path=itemPaths.has(elem[2])?itemPaths.get(elem[2]):lastPath;lastPath=path}const exactPath=elem.length>3?itemPaths.get(elem[3]):path;lowercasePaths.push({ty,name:name.toLowerCase(),path,exactPath});paths[i]={ty,name,path,exactPath}}lastPath="";len=itemTypes.length;let lastName="";let lastWord="";for(let i=0;i=descShard.len&&!this.searchIndexEmptyDesc.get(crate).contains(bitIndex)){descShard={crate,shard:descShard.shard+1,start:descShard.start+descShard.len,len:itemDescShardDecoder.next(),promise:null,resolve:null,};descIndex=0;descShardList.push(descShard)}const name=itemNames[i]===""?lastName:itemNames[i];const word=itemNames[i]===""?lastWord:itemNames[i].toLowerCase();const path=itemPaths.has(i)?itemPaths.get(i):lastPath;const type=itemFunctionDecoder.next();if(type!==null){if(type){const fp=this.functionTypeFingerprint.subarray(id*4,(id+1)*4);const fps=new Set();for(const t of type.inputs){this.buildFunctionTypeFingerprint(t,fp,fps)}for(const t of type.output){this.buildFunctionTypeFingerprint(t,fp,fps)}for(const w of type.where_clause){for(const t of w){this.buildFunctionTypeFingerprint(t,fp,fps)}}}}const itemParentIdx=itemParentIdxDecoder.next();const row={crate,ty:itemTypes.charCodeAt(i)-65,name,path,descShard,descIndex,exactPath:itemReexports.has(i)?itemPaths.get(itemReexports.get(i)):path,parent:itemParentIdx>0?paths[itemParentIdx-1]:undefined,type,id,word,normalizedName:word.indexOf("_")===-1?word:word.replace(/_/g,""),bitIndex,implDisambiguator:implDisambiguator.has(i)?implDisambiguator.get(i):null,};id+=1;searchIndex.push(row);lastPath=row.path;if(!this.searchIndexEmptyDesc.get(crate).contains(bitIndex)){descIndex+=1}lastName=name;lastWord=word}if(aliases){const currentCrateAliases=new Map();this.ALIASES.set(crate,currentCrateAliases);for(const alias_name in aliases){if(!Object.prototype.hasOwnProperty.call(aliases,alias_name)){continue}let currentNameAliases;if(currentCrateAliases.has(alias_name)){currentNameAliases=currentCrateAliases.get(alias_name)}else{currentNameAliases=[];currentCrateAliases.set(alias_name,currentNameAliases)}for(const local_alias of aliases[alias_name]){currentNameAliases.push(local_alias+currentIndex)}}}currentIndex+=itemTypes.length;this.searchState.descShards.set(crate,descShardList)}this.TYPES_POOL=new Map();return searchIndex}static parseQuery(userQuery){function itemTypeFromName(typename){const index=itemTypes.findIndex(i=>i===typename);if(index<0){throw["Unknown type filter ",typename]}return index}function convertTypeFilterOnElem(elem){if(elem.typeFilter!==null){let typeFilter=elem.typeFilter;if(typeFilter==="const"){typeFilter="constant"}elem.typeFilter=itemTypeFromName(typeFilter)}else{elem.typeFilter=NO_TYPE_FILTER}for(const elem2 of elem.generics){convertTypeFilterOnElem(elem2)}for(const constraints of elem.bindings.values()){for(const constraint of constraints){convertTypeFilterOnElem(constraint)}}}function newParsedQuery(userQuery){return{original:userQuery,userQuery:userQuery.toLowerCase(),elems:[],returned:[],foundElems:0,totalElems:0,literalSearch:false,hasReturnArrow:false,error:null,correction:null,proposeCorrectionFrom:null,proposeCorrectionTo:null,typeFingerprint:new Uint32Array(4),}}function parseInput(query,parserState){let foundStopChar=true;while(parserState.pos"){if(isReturnArrow(parserState)){query.hasReturnArrow=true;break}throw["Unexpected ",c," (did you mean ","->","?)"]}else if(parserState.pos>0){throw["Unexpected ",c," after ",parserState.userQuery[parserState.pos-1]]}throw["Unexpected ",c]}else if(c===" "){skipWhitespace(parserState);continue}if(!foundStopChar){let extra="";if(isLastElemGeneric(query.elems,parserState)){extra=[" after ",">"]}else if(prevIs(parserState,"\"")){throw["Cannot have more than one element if you use quotes"]}if(parserState.typeFilter!==null){throw["Expected ",","," or ","->",...extra,", found ",c,]}throw["Expected ",",",", ",":"," or ","->",...extra,", found ",c,]}const before=query.elems.length;getFilteredNextElem(query,parserState,query.elems,false);if(query.elems.length===before){parserState.pos+=1}foundStopChar=false}if(parserState.typeFilter!==null){throw["Unexpected ",":"," (expected path after type filter ",parserState.typeFilter+":",")",]}while(parserState.pos1}query.foundElems=query.elems.length+query.returned.length;query.totalElems=parserState.totalElems;return query}async execQuery(parsedQuery,filterCrates,currentCrate){const results_others=new Map(),results_in_args=new Map(),results_returned=new Map();function createQueryResults(results_in_args,results_returned,results_others,parsedQuery){return{"in_args":results_in_args,"returned":results_returned,"others":results_others,"query":parsedQuery,}}const buildHrefAndPath=item=>{let displayPath;let href;const type=itemTypes[item.ty];const name=item.name;let path=item.path;let exactPath=item.exactPath;if(type==="mod"){displayPath=path+"::";href=this.rootPath+path.replace(/::/g,"/")+"/"+name+"/index.html"}else if(type==="import"){displayPath=item.path+"::";href=this.rootPath+item.path.replace(/::/g,"/")+"/index.html#reexport."+name}else if(type==="primitive"||type==="keyword"){displayPath="";href=this.rootPath+path.replace(/::/g,"/")+"/"+type+"."+name+".html"}else if(type==="externcrate"){displayPath="";href=this.rootPath+name+"/index.html"}else if(item.parent!==undefined){const myparent=item.parent;let anchor=type+"."+name;const parentType=itemTypes[myparent.ty];let pageType=parentType;let pageName=myparent.name;exactPath=`${myparent.exactPath}::${myparent.name}`;if(parentType==="primitive"){displayPath=myparent.name+"::"}else if(type==="structfield"&&parentType==="variant"){const enumNameIdx=item.path.lastIndexOf("::");const enumName=item.path.substr(enumNameIdx+2);path=item.path.substr(0,enumNameIdx);displayPath=path+"::"+enumName+"::"+myparent.name+"::";anchor="variant."+myparent.name+".field."+name;pageType="enum";pageName=enumName}else{displayPath=path+"::"+myparent.name+"::"}if(item.implDisambiguator!==null){anchor=item.implDisambiguator+"/"+anchor}href=this.rootPath+path.replace(/::/g,"/")+"/"+pageType+"."+pageName+".html#"+anchor}else{displayPath=item.path+"::";href=this.rootPath+item.path.replace(/::/g,"/")+"/"+type+"."+name+".html"}return[displayPath,href,`${exactPath}::${name}`]};function pathSplitter(path){const tmp=""+path.replace(/::/g,"::");if(tmp.endsWith("")){return tmp.slice(0,tmp.length-6)}return tmp}const transformResults=results=>{const duplicates=new Set();const out=[];for(const result of results){if(result.id!==-1){const obj=this.searchIndex[result.id];obj.dist=result.dist;const res=buildHrefAndPath(obj);obj.displayPath=pathSplitter(res[0]);obj.fullPath=res[2]+"|"+obj.ty;if(duplicates.has(obj.fullPath)){continue}if(obj.ty===TY_IMPORT&&duplicates.has(res[2])){continue}if(duplicates.has(res[2]+"|"+TY_IMPORT)){continue}duplicates.add(obj.fullPath);duplicates.add(res[2]);obj.href=res[1];out.push(obj);if(out.length>=MAX_RESULTS){break}}}return out};const sortResults=async(results,isType,preferredCrate)=>{const userQuery=parsedQuery.userQuery;const casedUserQuery=parsedQuery.original;const result_list=[];for(const result of results.values()){result.item=this.searchIndex[result.id];result.word=this.searchIndex[result.id].word;result_list.push(result)}result_list.sort((aaa,bbb)=>{let a,b;a=(aaa.item.name!==casedUserQuery);b=(bbb.item.name!==casedUserQuery);if(a!==b){return a-b}a=(aaa.word!==userQuery);b=(bbb.word!==userQuery);if(a!==b){return a-b}a=(aaa.index<0);b=(bbb.index<0);if(a!==b){return a-b}a=aaa.path_dist;b=bbb.path_dist;if(a!==b){return a-b}a=aaa.index;b=bbb.index;if(a!==b){return a-b}a=(aaa.dist);b=(bbb.dist);if(a!==b){return a-b}a=this.searchIndexDeprecated.get(aaa.item.crate).contains(aaa.item.bitIndex);b=this.searchIndexDeprecated.get(bbb.item.crate).contains(bbb.item.bitIndex);if(a!==b){return a-b}a=(aaa.item.crate!==preferredCrate);b=(bbb.item.crate!==preferredCrate);if(a!==b){return a-b}a=aaa.word.length;b=bbb.word.length;if(a!==b){return a-b}a=aaa.word;b=bbb.word;if(a!==b){return(a>b?+1:-1)}a=this.searchIndexEmptyDesc.get(aaa.item.crate).contains(aaa.item.bitIndex);b=this.searchIndexEmptyDesc.get(bbb.item.crate).contains(bbb.item.bitIndex);if(a!==b){return a-b}a=aaa.item.ty;b=bbb.item.ty;if(a!==b){return a-b}a=aaa.item.path;b=bbb.item.path;if(a!==b){return(a>b?+1:-1)}return 0});return transformResults(result_list)};function unifyFunctionTypes(fnTypesIn,queryElems,whereClause,mgensIn,solutionCb,unboxingDepth,){if(unboxingDepth>=UNBOXING_LIMIT){return false}const mgens=mgensIn===null?null:new Map(mgensIn);if(queryElems.length===0){return!solutionCb||solutionCb(mgens)}if(!fnTypesIn||fnTypesIn.length===0){return false}const ql=queryElems.length;const fl=fnTypesIn.length;if(ql===1&&queryElems[0].generics.length===0&&queryElems[0].bindings.size===0){const queryElem=queryElems[0];for(const fnType of fnTypesIn){if(!unifyFunctionTypeIsMatchCandidate(fnType,queryElem,mgens)){continue}if(fnType.id<0&&queryElem.id<0){if(mgens&&mgens.has(fnType.id)&&mgens.get(fnType.id)!==queryElem.id){continue}const mgensScratch=new Map(mgens);mgensScratch.set(fnType.id,queryElem.id);if(!solutionCb||solutionCb(mgensScratch)){return true}}else if(!solutionCb||solutionCb(mgens?new Map(mgens):null)){return true}}for(const fnType of fnTypesIn){if(!unifyFunctionTypeIsUnboxCandidate(fnType,queryElem,whereClause,mgens,unboxingDepth+1,)){continue}if(fnType.id<0){if(mgens&&mgens.has(fnType.id)&&mgens.get(fnType.id)!==0){continue}const mgensScratch=new Map(mgens);mgensScratch.set(fnType.id,0);if(unifyFunctionTypes(whereClause[(-fnType.id)-1],queryElems,whereClause,mgensScratch,solutionCb,unboxingDepth+1,)){return true}}else if(unifyFunctionTypes([...fnType.generics,...Array.from(fnType.bindings.values()).flat()],queryElems,whereClause,mgens?new Map(mgens):null,solutionCb,unboxingDepth+1,)){return true}}return false}const fnTypes=fnTypesIn.slice();const flast=fl-1;const qlast=ql-1;const queryElem=queryElems[qlast];let queryElemsTmp=null;for(let i=flast;i>=0;i-=1){const fnType=fnTypes[i];if(!unifyFunctionTypeIsMatchCandidate(fnType,queryElem,mgens)){continue}let mgensScratch;if(fnType.id<0){mgensScratch=new Map(mgens);if(mgensScratch.has(fnType.id)&&mgensScratch.get(fnType.id)!==queryElem.id){continue}mgensScratch.set(fnType.id,queryElem.id)}else{mgensScratch=mgens}fnTypes[i]=fnTypes[flast];fnTypes.length=flast;if(!queryElemsTmp){queryElemsTmp=queryElems.slice(0,qlast)}const passesUnification=unifyFunctionTypes(fnTypes,queryElemsTmp,whereClause,mgensScratch,mgensScratch=>{if(fnType.generics.length===0&&queryElem.generics.length===0&&fnType.bindings.size===0&&queryElem.bindings.size===0){return!solutionCb||solutionCb(mgensScratch)}const solution=unifyFunctionTypeCheckBindings(fnType,queryElem,whereClause,mgensScratch,unboxingDepth,);if(!solution){return false}const simplifiedGenerics=solution.simplifiedGenerics;for(const simplifiedMgens of solution.mgens){const passesUnification=unifyFunctionTypes(simplifiedGenerics,queryElem.generics,whereClause,simplifiedMgens,solutionCb,unboxingDepth,);if(passesUnification){return true}}return false},unboxingDepth,);if(passesUnification){return true}fnTypes[flast]=fnTypes[i];fnTypes[i]=fnType;fnTypes.length=fl}for(let i=flast;i>=0;i-=1){const fnType=fnTypes[i];if(!unifyFunctionTypeIsUnboxCandidate(fnType,queryElem,whereClause,mgens,unboxingDepth+1,)){continue}let mgensScratch;if(fnType.id<0){mgensScratch=new Map(mgens);if(mgensScratch.has(fnType.id)&&mgensScratch.get(fnType.id)!==0){continue}mgensScratch.set(fnType.id,0)}else{mgensScratch=mgens}const generics=fnType.id<0?whereClause[(-fnType.id)-1]:fnType.generics;const bindings=fnType.bindings?Array.from(fnType.bindings.values()).flat():[];const passesUnification=unifyFunctionTypes(fnTypes.toSpliced(i,1,...generics,...bindings),queryElems,whereClause,mgensScratch,solutionCb,unboxingDepth+1,);if(passesUnification){return true}}return false}const unifyFunctionTypeIsMatchCandidate=(fnType,queryElem,mgensIn)=>{if(!typePassesFilter(queryElem.typeFilter,fnType.ty)){return false}if(fnType.id<0&&queryElem.id<0){if(mgensIn){if(mgensIn.has(fnType.id)&&mgensIn.get(fnType.id)!==queryElem.id){return false}for(const[fid,qid]of mgensIn.entries()){if(fnType.id!==fid&&queryElem.id===qid){return false}if(fnType.id===fid&&queryElem.id!==qid){return false}}}return true}else{if(queryElem.id===this.typeNameIdOfArrayOrSlice&&(fnType.id===this.typeNameIdOfSlice||fnType.id===this.typeNameIdOfArray)){}else if(queryElem.id===this.typeNameIdOfTupleOrUnit&&(fnType.id===this.typeNameIdOfTuple||fnType.id===this.typeNameIdOfUnit)){}else if(queryElem.id===this.typeNameIdOfHof&&(fnType.id===this.typeNameIdOfFn||fnType.id===this.typeNameIdOfFnMut||fnType.id===this.typeNameIdOfFnOnce)){}else if(fnType.id!==queryElem.id||queryElem.id===null){return false}if((fnType.generics.length+fnType.bindings.size)===0&&queryElem.generics.length!==0){return false}if(fnType.bindings.size0){const fnTypePath=fnType.path!==undefined&&fnType.path!==null?fnType.path.split("::"):[];if(queryElemPathLength>fnTypePath.length){return false}let i=0;for(const path of fnTypePath){if(path===queryElem.pathWithoutLast[i]){i+=1;if(i>=queryElemPathLength){break}}}if(i0){let mgensSolutionSet=[mgensIn];for(const[name,constraints]of queryElem.bindings.entries()){if(mgensSolutionSet.length===0){return false}if(!fnType.bindings.has(name)){return false}const fnTypeBindings=fnType.bindings.get(name);mgensSolutionSet=mgensSolutionSet.flatMap(mgens=>{const newSolutions=[];unifyFunctionTypes(fnTypeBindings,constraints,whereClause,mgens,newMgens=>{newSolutions.push(newMgens);return false},unboxingDepth,);return newSolutions})}if(mgensSolutionSet.length===0){return false}const binds=Array.from(fnType.bindings.entries()).flatMap(entry=>{const[name,constraints]=entry;if(queryElem.bindings.has(name)){return[]}else{return constraints}});if(simplifiedGenerics.length>0){simplifiedGenerics=[...simplifiedGenerics,...binds]}else{simplifiedGenerics=binds}return{simplifiedGenerics,mgens:mgensSolutionSet}}return{simplifiedGenerics,mgens:[mgensIn]}}function unifyFunctionTypeIsUnboxCandidate(fnType,queryElem,whereClause,mgens,unboxingDepth,){if(unboxingDepth>=UNBOXING_LIMIT){return false}if(fnType.id<0&&queryElem.id>=0){if(!whereClause){return false}if(mgens&&mgens.has(fnType.id)&&mgens.get(fnType.id)!==0){return false}const mgensTmp=new Map(mgens);mgensTmp.set(fnType.id,null);return checkIfInList(whereClause[(-fnType.id)-1],queryElem,whereClause,mgensTmp,unboxingDepth,)}else if(fnType.generics.length>0||fnType.bindings.size>0){const simplifiedGenerics=[...fnType.generics,...Array.from(fnType.bindings.values()).flat(),];return checkIfInList(simplifiedGenerics,queryElem,whereClause,mgens,unboxingDepth,)}return false}function checkIfInList(list,elem,whereClause,mgens,unboxingDepth){for(const entry of list){if(checkType(entry,elem,whereClause,mgens,unboxingDepth)){return true}}return false}const checkType=(row,elem,whereClause,mgens,unboxingDepth)=>{if(unboxingDepth>=UNBOXING_LIMIT){return false}if(row.bindings.size===0&&elem.bindings.size===0){if(elem.id<0&&mgens===null){return row.id<0||checkIfInList(row.generics,elem,whereClause,mgens,unboxingDepth+1,)}if(row.id>0&&elem.id>0&&elem.pathWithoutLast.length===0&&typePassesFilter(elem.typeFilter,row.ty)&&elem.generics.length===0&&elem.id!==this.typeNameIdOfArrayOrSlice&&elem.id!==this.typeNameIdOfTupleOrUnit&&elem.id!==this.typeNameIdOfHof){return row.id===elem.id||checkIfInList(row.generics,elem,whereClause,mgens,unboxingDepth,)}}return unifyFunctionTypes([row],[elem],whereClause,mgens,null,unboxingDepth)};function checkPath(contains,ty){if(contains.length===0){return 0}const maxPathEditDistance=Math.floor(contains.reduce((acc,next)=>acc+next.length,0)/3,);let ret_dist=maxPathEditDistance+1;const path=ty.path.split("::");if(ty.parent&&ty.parent.name){path.push(ty.parent.name.toLowerCase())}const length=path.length;const clength=contains.length;pathiter:for(let i=length-clength;i>=0;i-=1){let dist_total=0;for(let x=0;xmaxPathEditDistance){continue pathiter}dist_total+=dist}}ret_dist=Math.min(ret_dist,Math.round(dist_total/clength))}return ret_dist>maxPathEditDistance?null:ret_dist}function typePassesFilter(filter,type){if(filter<=NO_TYPE_FILTER||filter===type)return true;const name=itemTypes[type];switch(itemTypes[filter]){case"constant":return name==="associatedconstant";case"fn":return name==="method"||name==="tymethod";case"type":return name==="primitive"||name==="associatedtype";case"trait":return name==="traitalias"}return false}function createAliasFromItem(item){return{crate:item.crate,name:item.name,path:item.path,descShard:item.descShard,descIndex:item.descIndex,exactPath:item.exactPath,ty:item.ty,parent:item.parent,type:item.type,is_alias:true,bitIndex:item.bitIndex,implDisambiguator:item.implDisambiguator,}}const handleAliases=async(ret,query,filterCrates,currentCrate)=>{const lowerQuery=query.toLowerCase();const aliases=[];const crateAliases=[];if(filterCrates!==null){if(this.ALIASES.has(filterCrates)&&this.ALIASES.get(filterCrates).has(lowerQuery)){const query_aliases=this.ALIASES.get(filterCrates).get(lowerQuery);for(const alias of query_aliases){aliases.push(createAliasFromItem(this.searchIndex[alias]))}}}else{for(const[crate,crateAliasesIndex]of this.ALIASES){if(crateAliasesIndex.has(lowerQuery)){const pushTo=crate===currentCrate?crateAliases:aliases;const query_aliases=crateAliasesIndex.get(lowerQuery);for(const alias of query_aliases){pushTo.push(createAliasFromItem(this.searchIndex[alias]))}}}}const sortFunc=(aaa,bbb)=>{if(aaa.path{return this.searchIndexEmptyDesc.get(alias.crate).contains(alias.bitIndex)?"":this.searchState.loadDesc(alias)};const[crateDescs,descs]=await Promise.all([Promise.all(crateAliases.map(fetchDesc)),Promise.all(aliases.map(fetchDesc)),]);const pushFunc=alias=>{alias.alias=query;const res=buildHrefAndPath(alias);alias.displayPath=pathSplitter(res[0]);alias.fullPath=alias.displayPath+alias.name;alias.href=res[1];ret.others.unshift(alias);if(ret.others.length>MAX_RESULTS){ret.others.pop()}};aliases.forEach((alias,i)=>{alias.desc=descs[i]});aliases.forEach(pushFunc);crateAliases.forEach((alias,i)=>{alias.desc=crateDescs[i]});crateAliases.forEach(pushFunc)};function addIntoResults(results,fullId,id,index,dist,path_dist,maxEditDistance){if(dist<=maxEditDistance||index!==-1){if(results.has(fullId)){const result=results.get(fullId);if(result.dontValidate||result.dist<=dist){return}}results.set(fullId,{id:id,index:index,dontValidate:parsedQuery.literalSearch,dist:dist,path_dist:path_dist,})}}function handleSingleArg(row,pos,elem,results_others,results_in_args,results_returned,maxEditDistance,){if(!row||(filterCrates!==null&&row.crate!==filterCrates)){return}let path_dist=0;const fullId=row.id;const tfpDist=compareTypeFingerprints(fullId,parsedQuery.typeFingerprint,);if(tfpDist!==null){const in_args=row.type&&row.type.inputs&&checkIfInList(row.type.inputs,elem,row.type.where_clause,null,0);const returned=row.type&&row.type.output&&checkIfInList(row.type.output,elem,row.type.where_clause,null,0);if(in_args){results_in_args.max_dist=Math.max(results_in_args.max_dist||0,tfpDist);const maxDist=results_in_args.sizenormalizedIndex&&normalizedIndex!==-1)){index=normalizedIndex}if(elem.fullPath.length>1){path_dist=checkPath(elem.pathWithoutLast,row);if(path_dist===null){return}}if(parsedQuery.literalSearch){if(row.word===elem.pathLast){addIntoResults(results_others,fullId,pos,index,0,path_dist)}return}const dist=editDistance(row.normalizedName,elem.normalizedPathLast,maxEditDistance);if(index===-1&&dist>maxEditDistance){return}addIntoResults(results_others,fullId,pos,index,dist,path_dist,maxEditDistance)}function handleArgs(row,pos,results){if(!row||(filterCrates!==null&&row.crate!==filterCrates)||!row.type){return}const tfpDist=compareTypeFingerprints(row.id,parsedQuery.typeFingerprint,);if(tfpDist===null){return}if(results.size>=MAX_RESULTS&&tfpDist>results.max_dist){return}if(!unifyFunctionTypes(row.type.inputs,parsedQuery.elems,row.type.where_clause,null,mgens=>{return unifyFunctionTypes(row.type.output,parsedQuery.returned,row.type.where_clause,mgens,null,0,)},0,)){return}results.max_dist=Math.max(results.max_dist||0,tfpDist);addIntoResults(results,row.id,pos,0,tfpDist,0,Number.MAX_VALUE)}const compareTypeFingerprints=(fullId,queryFingerprint)=>{const fh0=this.functionTypeFingerprint[fullId*4];const fh1=this.functionTypeFingerprint[(fullId*4)+1];const fh2=this.functionTypeFingerprint[(fullId*4)+2];const[qh0,qh1,qh2]=queryFingerprint;const[in0,in1,in2]=[fh0&qh0,fh1&qh1,fh2&qh2];if((in0 ^ qh0)||(in1 ^ qh1)||(in2 ^ qh2)){return null}return this.functionTypeFingerprint[(fullId*4)+3]};const innerRunQuery=()=>{const queryLen=parsedQuery.elems.reduce((acc,next)=>acc+next.pathLast.length,0)+parsedQuery.returned.reduce((acc,next)=>acc+next.pathLast.length,0);const maxEditDistance=Math.floor(queryLen/3);const genericSymbols=new Map();const convertNameToId=(elem,isAssocType)=>{const loweredName=elem.pathLast.toLowerCase();if(this.typeNameIdMap.has(loweredName)&&(isAssocType||!this.typeNameIdMap.get(loweredName).assocOnly)){elem.id=this.typeNameIdMap.get(loweredName).id}else if(!parsedQuery.literalSearch){let match=null;let matchDist=maxEditDistance+1;let matchName="";for(const[name,{id,assocOnly}]of this.typeNameIdMap){const dist=Math.min(editDistance(name,loweredName,maxEditDistance),editDistance(name,elem.normalizedPathLast,maxEditDistance),);if(dist<=matchDist&&dist<=maxEditDistance&&(isAssocType||!assocOnly)){if(dist===matchDist&&matchName>name){continue}match=id;matchDist=dist;matchName=name}}if(match!==null){parsedQuery.correction=matchName}elem.id=match}if((elem.id===null&&parsedQuery.totalElems>1&&elem.typeFilter===-1&&elem.generics.length===0&&elem.bindings.size===0)||elem.typeFilter===TY_GENERIC){if(genericSymbols.has(elem.name)){elem.id=genericSymbols.get(elem.name)}else{elem.id=-(genericSymbols.size+1);genericSymbols.set(elem.name,elem.id)}if(elem.typeFilter===-1&&elem.name.length>=3){const maxPartDistance=Math.floor(elem.name.length/3);let matchDist=maxPartDistance+1;let matchName="";for(const name of this.typeNameIdMap.keys()){const dist=editDistance(name,elem.name,maxPartDistance);if(dist<=matchDist&&dist<=maxPartDistance){if(dist===matchDist&&matchName>name){continue}matchDist=dist;matchName=name}}if(matchName!==""){parsedQuery.proposeCorrectionFrom=elem.name;parsedQuery.proposeCorrectionTo=matchName}}elem.typeFilter=TY_GENERIC}if(elem.generics.length>0&&elem.typeFilter===TY_GENERIC){parsedQuery.error=["Generic type parameter ",elem.name," does not accept generic parameters",]}for(const elem2 of elem.generics){convertNameToId(elem2)}elem.bindings=new Map(Array.from(elem.bindings.entries()).map(entry=>{const[name,constraints]=entry;if(!this.typeNameIdMap.has(name)){parsedQuery.error=["Type parameter ",name," does not exist",];return[null,[]]}for(const elem2 of constraints){convertNameToId(elem2)}return[this.typeNameIdMap.get(name).id,constraints]}),)};const fps=new Set();for(const elem of parsedQuery.elems){convertNameToId(elem);this.buildFunctionTypeFingerprint(elem,parsedQuery.typeFingerprint,fps)}for(const elem of parsedQuery.returned){convertNameToId(elem);this.buildFunctionTypeFingerprint(elem,parsedQuery.typeFingerprint,fps)}if(parsedQuery.foundElems===1&&!parsedQuery.hasReturnArrow){if(parsedQuery.elems.length===1){const elem=parsedQuery.elems[0];const length=this.searchIndex.length;for(let i=0,nSearchIndex=length;i0){const sortQ=(a,b)=>{const ag=a.generics.length===0&&a.bindings.size===0;const bg=b.generics.length===0&&b.bindings.size===0;if(ag!==bg){return ag-bg}const ai=a.id>0;const bi=b.id>0;return ai-bi};parsedQuery.elems.sort(sortQ);parsedQuery.returned.sort(sortQ);for(let i=0,nSearchIndex=this.searchIndex.length;i{const descs=await Promise.all(list.map(result=>{return this.searchIndexEmptyDesc.get(result.crate).contains(result.bitIndex)?"":this.searchState.loadDesc(result)}));for(const[i,result]of list.entries()){result.desc=descs[i]}}));if(parsedQuery.error!==null&&ret.others.length!==0){ret.query.error=null}return ret}}let rawSearchIndex;let docSearch;const longItemTypes=["keyword","primitive type","module","extern crate","re-export","struct","enum","function","type alias","static","trait","","trait method","method","struct field","enum variant","macro","assoc type","constant","assoc const","union","foreign type","existential type","attribute macro","derive macro","trait alias",];let currentResults;function printTab(nb){let iter=0;let foundCurrentTab=false;let foundCurrentResultSet=false;onEachLazy(document.getElementById("search-tabs").childNodes,elem=>{if(nb===iter){addClass(elem,"selected");foundCurrentTab=true}else{removeClass(elem,"selected")}iter+=1});const isTypeSearch=(nb>0||iter===1);iter=0;onEachLazy(document.getElementById("results").childNodes,elem=>{if(nb===iter){addClass(elem,"active");foundCurrentResultSet=true}else{removeClass(elem,"active")}iter+=1});if(foundCurrentTab&&foundCurrentResultSet){searchState.currentTab=nb;const correctionsElem=document.getElementsByClassName("search-corrections");if(isTypeSearch){removeClass(correctionsElem[0],"hidden")}else{addClass(correctionsElem[0],"hidden")}}else if(nb!==0){printTab(0)}}function buildUrl(search,filterCrates){let extra="?search="+encodeURIComponent(search);if(filterCrates!==null){extra+="&filter-crate="+encodeURIComponent(filterCrates)}return getNakedUrl()+extra+window.location.hash}function getFilterCrates(){const elem=document.getElementById("crate-search");if(elem&&elem.value!=="all crates"&&window.searchIndex.has(elem.value)){return elem.value}return null}function nextTab(direction){const next=(searchState.currentTab+direction+3)%searchState.focusedByTab.length;searchState.focusedByTab[searchState.currentTab]=document.activeElement;printTab(next);focusSearchResult()}function focusSearchResult(){const target=searchState.focusedByTab[searchState.currentTab]||document.querySelectorAll(".search-results.active a").item(0)||document.querySelectorAll("#search-tabs button").item(searchState.currentTab);searchState.focusedByTab[searchState.currentTab]=null;if(target){target.focus()}}async function addTab(array,query,display){const extraClass=display?" active":"";const output=document.createElement("div");if(array.length>0){output.className="search-results "+extraClass;for(const item of array){const name=item.name;const type=itemTypes[item.ty];const longType=longItemTypes[item.ty];const typeName=longType.length!==0?`${longType}`:"?";const link=document.createElement("a");link.className="result-"+type;link.href=item.href;const resultName=document.createElement("div");resultName.className="result-name";resultName.insertAdjacentHTML("beforeend",`${typeName}`);link.appendChild(resultName);let alias=" ";if(item.is_alias){alias=`
\ +${item.alias} - see \ +
`}resultName.insertAdjacentHTML("beforeend",`
${alias}\ +${item.displayPath}${name}\ +
`);const description=document.createElement("div");description.className="desc";description.insertAdjacentHTML("beforeend",item.desc);link.appendChild(description);output.appendChild(link)}}else if(query.error===null){output.className="search-failed"+extraClass;output.innerHTML="No results :(
"+"Try on DuckDuckGo?

"+"Or try looking in one of these:"}return[output,array.length]}function makeTabHeader(tabNb,text,nbElems){const fmtNbElems=nbElems<10?`\u{2007}(${nbElems})\u{2007}\u{2007}`:nbElems<100?`\u{2007}(${nbElems})\u{2007}`:`\u{2007}(${nbElems})`;if(searchState.currentTab===tabNb){return""}return""}async function showResults(results,go_to_first,filterCrates){const search=searchState.outputElement();if(go_to_first||(results.others.length===1&&getSettingValue("go-to-only-result")==="true")){window.onunload=()=>{};searchState.removeQueryParameters();const elem=document.createElement("a");elem.href=results.others[0].href;removeClass(elem,"active");document.body.appendChild(elem);elem.click();return}if(results.query===undefined){results.query=DocSearch.parseQuery(searchState.input.value)}currentResults=results.query.userQuery;const[ret_others,ret_in_args,ret_returned]=await Promise.all([addTab(results.others,results.query,true),addTab(results.in_args,results.query,false),addTab(results.returned,results.query,false),]);let currentTab=searchState.currentTab;if((currentTab===0&&ret_others[1]===0)||(currentTab===1&&ret_in_args[1]===0)||(currentTab===2&&ret_returned[1]===0)){if(ret_others[1]!==0){currentTab=0}else if(ret_in_args[1]!==0){currentTab=1}else if(ret_returned[1]!==0){currentTab=2}}let crates="";if(rawSearchIndex.size>1){crates=" in 
"}let output=`

Results${crates}

`;if(results.query.error!==null){const error=results.query.error;error.forEach((value,index)=>{value=value.split("<").join("<").split(">").join(">");if(index%2!==0){error[index]=`${value.replaceAll(" ", " ")}`}else{error[index]=value}});output+=`

Query parser error: "${error.join("")}".

`;output+="
"+makeTabHeader(0,"In Names",ret_others[1])+"
";currentTab=0}else if(results.query.foundElems<=1&&results.query.returned.length===0){output+="
"+makeTabHeader(0,"In Names",ret_others[1])+makeTabHeader(1,"In Parameters",ret_in_args[1])+makeTabHeader(2,"In Return Types",ret_returned[1])+"
"}else{const signatureTabTitle=results.query.elems.length===0?"In Function Return Types":results.query.returned.length===0?"In Function Parameters":"In Function Signatures";output+="
"+makeTabHeader(0,signatureTabTitle,ret_others[1])+"
";currentTab=0}if(results.query.correction!==null){const orig=results.query.returned.length>0?results.query.returned[0].name:results.query.elems[0].name;output+="

"+`Type "${orig}" not found. `+"Showing results for closest type name "+`"${results.query.correction}" instead.

`}if(results.query.proposeCorrectionFrom!==null){const orig=results.query.proposeCorrectionFrom;const targ=results.query.proposeCorrectionTo;output+="

"+`Type "${orig}" not found and used as generic parameter. `+`Consider searching for "${targ}" instead.

`}const resultsElem=document.createElement("div");resultsElem.id="results";resultsElem.appendChild(ret_others[0]);resultsElem.appendChild(ret_in_args[0]);resultsElem.appendChild(ret_returned[0]);search.innerHTML=output;const crateSearch=document.getElementById("crate-search");if(crateSearch){crateSearch.addEventListener("input",updateCrate)}search.appendChild(resultsElem);searchState.showResults(search);const elems=document.getElementById("search-tabs").childNodes;searchState.focusedByTab=[];let i=0;for(const elem of elems){const j=i;elem.onclick=()=>printTab(j);searchState.focusedByTab.push(null);i+=1}printTab(currentTab)}function updateSearchHistory(url){if(!browserSupportsHistoryApi()){return}const params=searchState.getQueryStringParams();if(!history.state&&!params.search){history.pushState(null,"",url)}else{history.replaceState(null,"",url)}}async function search(forced){const query=DocSearch.parseQuery(searchState.input.value.trim());let filterCrates=getFilterCrates();if(!forced&&query.userQuery===currentResults){if(query.userQuery.length>0){putBackSearch()}return}searchState.setLoadingSearch();const params=searchState.getQueryStringParams();if(filterCrates===null&¶ms["filter-crate"]!==undefined){filterCrates=params["filter-crate"]}searchState.title="\""+query.original+"\" Search - Rust";updateSearchHistory(buildUrl(query.original,filterCrates));await showResults(await docSearch.execQuery(query,filterCrates,window.currentCrate),params.go_to_first,filterCrates)}function onSearchSubmit(e){e.preventDefault();searchState.clearInputTimeout();search()}function putBackSearch(){const search_input=searchState.input;if(!searchState.input){return}if(search_input.value!==""&&!searchState.isDisplayed()){searchState.showResults();if(browserSupportsHistoryApi()){history.replaceState(null,"",buildUrl(search_input.value,getFilterCrates()))}document.title=searchState.title}}function registerSearchEvents(){const params=searchState.getQueryStringParams();if(searchState.input.value===""){searchState.input.value=params.search||""}const searchAfter500ms=()=>{searchState.clearInputTimeout();if(searchState.input.value.length===0){searchState.hideResults()}else{searchState.timeout=setTimeout(search,500)}};searchState.input.onkeyup=searchAfter500ms;searchState.input.oninput=searchAfter500ms;document.getElementsByClassName("search-form")[0].onsubmit=onSearchSubmit;searchState.input.onchange=e=>{if(e.target!==document.activeElement){return}searchState.clearInputTimeout();setTimeout(search,0)};searchState.input.onpaste=searchState.input.onchange;searchState.outputElement().addEventListener("keydown",e=>{if(e.altKey||e.ctrlKey||e.shiftKey||e.metaKey){return}if(e.which===38){const previous=document.activeElement.previousElementSibling;if(previous){previous.focus()}else{searchState.focus()}e.preventDefault()}else if(e.which===40){const next=document.activeElement.nextElementSibling;if(next){next.focus()}const rect=document.activeElement.getBoundingClientRect();if(window.innerHeight-rect.bottom{if(e.which===40){focusSearchResult();e.preventDefault()}});searchState.input.addEventListener("focus",()=>{putBackSearch()});searchState.input.addEventListener("blur",()=>{searchState.input.placeholder=searchState.input.origPlaceholder});if(browserSupportsHistoryApi()){const previousTitle=document.title;window.addEventListener("popstate",e=>{const params=searchState.getQueryStringParams();document.title=previousTitle;currentResults=null;if(params.search&¶ms.search.length>0){searchState.input.value=params.search;e.preventDefault();search()}else{searchState.input.value="";searchState.hideResults()}})}window.onpageshow=()=>{const qSearch=searchState.getQueryStringParams().search;if(searchState.input.value===""&&qSearch){searchState.input.value=qSearch}search()}}function updateCrate(ev){if(ev.target.value==="all crates"){const query=searchState.input.value.trim();updateSearchHistory(buildUrl(query,null))}currentResults=null;search(true)}function initSearch(searchIndx){rawSearchIndex=searchIndx;if(typeof window!=="undefined"){docSearch=new DocSearch(rawSearchIndex,ROOT_PATH,searchState);registerSearchEvents();if(window.searchState.getQueryStringParams().search){search()}}else if(typeof exports!=="undefined"){docSearch=new DocSearch(rawSearchIndex,ROOT_PATH,searchState);exports.docSearch=docSearch;exports.parseQuery=DocSearch.parseQuery}}if(typeof exports!=="undefined"){exports.initSearch=initSearch}if(typeof window!=="undefined"){window.initSearch=initSearch;if(window.searchIndex!==undefined){initSearch(window.searchIndex)}}else{initSearch(new Map())}})() \ No newline at end of file diff --git a/static.files/settings-7e3bb6c46e92e77c.js b/static.files/settings-7e3bb6c46e92e77c.js new file mode 100644 index 000000000..049ee8374 --- /dev/null +++ b/static.files/settings-7e3bb6c46e92e77c.js @@ -0,0 +1,17 @@ +"use strict";(function(){const isSettingsPage=window.location.pathname.endsWith("/settings.html");function changeSetting(settingName,value){if(settingName==="theme"){const useSystem=value==="system preference"?"true":"false";updateLocalStorage("use-system-theme",useSystem)}updateLocalStorage(settingName,value);switch(settingName){case"theme":case"preferred-dark-theme":case"preferred-light-theme":updateTheme();updateLightAndDark();break;case"line-numbers":if(value===true){window.rustdoc_add_line_numbers_to_examples()}else{window.rustdoc_remove_line_numbers_from_examples()}break;case"hide-sidebar":if(value===true){addClass(document.documentElement,"hide-sidebar")}else{removeClass(document.documentElement,"hide-sidebar")}break;case"hide-toc":if(value===true){addClass(document.documentElement,"hide-toc")}else{removeClass(document.documentElement,"hide-toc")}break;case"hide-modnav":if(value===true){addClass(document.documentElement,"hide-modnav")}else{removeClass(document.documentElement,"hide-modnav")}break}}function showLightAndDark(){removeClass(document.getElementById("preferred-light-theme"),"hidden");removeClass(document.getElementById("preferred-dark-theme"),"hidden")}function hideLightAndDark(){addClass(document.getElementById("preferred-light-theme"),"hidden");addClass(document.getElementById("preferred-dark-theme"),"hidden")}function updateLightAndDark(){const useSystem=getSettingValue("use-system-theme");if(useSystem==="true"||(useSystem===null&&getSettingValue("theme")===null)){showLightAndDark()}else{hideLightAndDark()}}function setEvents(settingsElement){updateLightAndDark();onEachLazy(settingsElement.querySelectorAll("input[type=\"checkbox\"]"),toggle=>{const settingId=toggle.id;const settingValue=getSettingValue(settingId);if(settingValue!==null){toggle.checked=settingValue==="true"}toggle.onchange=()=>{changeSetting(toggle.id,toggle.checked)}});onEachLazy(settingsElement.querySelectorAll("input[type=\"radio\"]"),elem=>{const settingId=elem.name;let settingValue=getSettingValue(settingId);if(settingId==="theme"){const useSystem=getSettingValue("use-system-theme");if(useSystem==="true"||settingValue===null){settingValue=useSystem==="false"?"light":"system preference"}}if(settingValue!==null&&settingValue!=="null"){elem.checked=settingValue===elem.value}elem.addEventListener("change",ev=>{changeSetting(ev.target.name,ev.target.value)})})}function buildSettingsPageSections(settings){let output="";for(const setting of settings){if(setting==="hr"){output+="
";continue}const js_data_name=setting["js_name"];const setting_name=setting["name"];if(setting["options"]!==undefined){output+=`\ +
+
${setting_name}
+
`;onEach(setting["options"],option=>{const checked=option===setting["default"]?" checked":"";const full=`${js_data_name}-${option.replace(/ /g,"-")}`;output+=`\ + `});output+=`\ +
+
`}else{const checked=setting["default"]===true?" checked":"";output+=`\ +
\ + \ +
`}}return output}function buildSettingsPage(){const theme_names=getVar("themes").split(",").filter(t=>t);theme_names.push("light","dark","ayu");const settings=[{"name":"Theme","js_name":"theme","default":"system preference","options":theme_names.concat("system preference"),},{"name":"Preferred light theme","js_name":"preferred-light-theme","default":"light","options":theme_names,},{"name":"Preferred dark theme","js_name":"preferred-dark-theme","default":"dark","options":theme_names,},{"name":"Auto-hide item contents for large items","js_name":"auto-hide-large-items","default":true,},{"name":"Auto-hide item methods' documentation","js_name":"auto-hide-method-docs","default":false,},{"name":"Auto-hide trait implementation documentation","js_name":"auto-hide-trait-implementations","default":false,},{"name":"Directly go to item in search if there is only one result","js_name":"go-to-only-result","default":false,},{"name":"Show line numbers on code examples","js_name":"line-numbers","default":false,},{"name":"Hide persistent navigation bar","js_name":"hide-sidebar","default":false,},{"name":"Hide table of contents","js_name":"hide-toc","default":false,},{"name":"Hide module navigation","js_name":"hide-modnav","default":false,},{"name":"Disable keyboard shortcuts","js_name":"disable-shortcuts","default":false,},];const elementKind=isSettingsPage?"section":"div";const innerHTML=`
${buildSettingsPageSections(settings)}
`;const el=document.createElement(elementKind);el.id="settings";if(!isSettingsPage){el.className="popover"}el.innerHTML=innerHTML;if(isSettingsPage){document.getElementById(MAIN_ID).appendChild(el)}else{el.setAttribute("tabindex","-1");getSettingsButton().appendChild(el)}return el}const settingsMenu=buildSettingsPage();function displaySettings(){settingsMenu.style.display="";onEachLazy(settingsMenu.querySelectorAll("input[type='checkbox']"),el=>{const val=getSettingValue(el.id);const checked=val==="true";if(checked!==el.checked&&val!==null){el.checked=checked}})}function settingsBlurHandler(event){blurHandler(event,getSettingsButton(),window.hidePopoverMenus)}if(isSettingsPage){getSettingsButton().onclick=event=>{event.preventDefault()}}else{const settingsButton=getSettingsButton();const settingsMenu=document.getElementById("settings");settingsButton.onclick=event=>{if(settingsMenu.contains(event.target)){return}event.preventDefault();const shouldDisplaySettings=settingsMenu.style.display==="none";window.hideAllModals();if(shouldDisplaySettings){displaySettings()}};settingsButton.onblur=settingsBlurHandler;settingsButton.querySelector("a").onblur=settingsBlurHandler;onEachLazy(settingsMenu.querySelectorAll("input"),el=>{el.onblur=settingsBlurHandler});settingsMenu.onblur=settingsBlurHandler}setTimeout(()=>{setEvents(settingsMenu);if(!isSettingsPage){displaySettings()}removeClass(getSettingsButton(),"rotate")},0)})() \ No newline at end of file diff --git a/static.files/src-script-e66d777a5a92e9b2.js b/static.files/src-script-e66d777a5a92e9b2.js new file mode 100644 index 000000000..d0aebb851 --- /dev/null +++ b/static.files/src-script-e66d777a5a92e9b2.js @@ -0,0 +1 @@ +"use strict";(function(){const rootPath=getVar("root-path");const NAME_OFFSET=0;const DIRS_OFFSET=1;const FILES_OFFSET=2;const RUSTDOC_MOBILE_BREAKPOINT=700;function closeSidebarIfMobile(){if(window.innerWidth{removeClass(document.documentElement,"src-sidebar-expanded");updateLocalStorage("source-sidebar-show","false")};window.rustdocShowSourceSidebar=()=>{addClass(document.documentElement,"src-sidebar-expanded");updateLocalStorage("source-sidebar-show","true")};window.rustdocToggleSrcSidebar=()=>{if(document.documentElement.classList.contains("src-sidebar-expanded")){window.rustdocCloseSourceSidebar()}else{window.rustdocShowSourceSidebar()}};function createSrcSidebar(){const container=document.querySelector("nav.sidebar");const sidebar=document.createElement("div");sidebar.id="src-sidebar";let hasFoundFile=false;for(const[key,source]of srcIndex){source[NAME_OFFSET]=key;hasFoundFile=createDirEntry(source,sidebar,"",hasFoundFile)}container.appendChild(sidebar);const selected_elem=sidebar.getElementsByClassName("selected")[0];if(typeof selected_elem!=="undefined"){selected_elem.focus()}}function highlightSrcLines(){const match=window.location.hash.match(/^#?(\d+)(?:-(\d+))?$/);if(!match){return}let from=parseInt(match[1],10);let to=from;if(typeof match[2]!=="undefined"){to=parseInt(match[2],10)}if(to{onEachLazy(e.getElementsByTagName("a"),i_e=>{removeClass(i_e,"line-highlighted")})});for(let i=from;i<=to;++i){elem=document.getElementById(i);if(!elem){break}addClass(elem,"line-highlighted")}}const handleSrcHighlight=(function(){let prev_line_id=0;const set_fragment=name=>{const x=window.scrollX,y=window.scrollY;if(browserSupportsHistoryApi()){history.replaceState(null,null,"#"+name);highlightSrcLines()}else{location.replace("#"+name)}window.scrollTo(x,y)};return ev=>{let cur_line_id=parseInt(ev.target.id,10);if(isNaN(cur_line_id)||ev.ctrlKey||ev.altKey||ev.metaKey){return}ev.preventDefault();if(ev.shiftKey&&prev_line_id){if(prev_line_id>cur_line_id){const tmp=prev_line_id;prev_line_id=cur_line_id;cur_line_id=tmp}set_fragment(prev_line_id+"-"+cur_line_id)}else{prev_line_id=cur_line_id;set_fragment(cur_line_id)}}}());window.addEventListener("hashchange",highlightSrcLines);onEachLazy(document.getElementsByClassName("src-line-numbers"),el=>{el.addEventListener("click",handleSrcHighlight)});highlightSrcLines();window.createSrcSidebar=createSrcSidebar})() \ No newline at end of file diff --git a/static.files/storage-29b1d5a9048d38fe.js b/static.files/storage-29b1d5a9048d38fe.js new file mode 100644 index 000000000..688214da8 --- /dev/null +++ b/static.files/storage-29b1d5a9048d38fe.js @@ -0,0 +1,24 @@ +"use strict";const builtinThemes=["light","dark","ayu"];const darkThemes=["dark","ayu"];window.currentTheme=document.getElementById("themeStyle");const settingsDataset=(function(){const settingsElement=document.getElementById("default-settings");return settingsElement&&settingsElement.dataset?settingsElement.dataset:null})();function getSettingValue(settingName){const current=getCurrentValue(settingName);if(current===null&&settingsDataset!==null){const def=settingsDataset[settingName.replace(/-/g,"_")];if(def!==undefined){return def}}return current}const localStoredTheme=getSettingValue("theme");function hasClass(elem,className){return elem&&elem.classList&&elem.classList.contains(className)}function addClass(elem,className){if(elem&&elem.classList){elem.classList.add(className)}}function removeClass(elem,className){if(elem&&elem.classList){elem.classList.remove(className)}}function onEach(arr,func){for(const elem of arr){if(func(elem)){return true}}return false}function onEachLazy(lazyArray,func){return onEach(Array.prototype.slice.call(lazyArray),func)}function updateLocalStorage(name,value){try{window.localStorage.setItem("rustdoc-"+name,value)}catch(e){}}function getCurrentValue(name){try{return window.localStorage.getItem("rustdoc-"+name)}catch(e){return null}}const getVar=(function getVar(name){const el=document.querySelector("head > meta[name='rustdoc-vars']");return el?el.attributes["data-"+name].value:null});function switchTheme(newThemeName,saveTheme){const themeNames=getVar("themes").split(",").filter(t=>t);themeNames.push(...builtinThemes);if(themeNames.indexOf(newThemeName)===-1){return}if(saveTheme){updateLocalStorage("theme",newThemeName)}document.documentElement.setAttribute("data-theme",newThemeName);if(builtinThemes.indexOf(newThemeName)!==-1){if(window.currentTheme){window.currentTheme.parentNode.removeChild(window.currentTheme);window.currentTheme=null}}else{const newHref=getVar("root-path")+encodeURIComponent(newThemeName)+getVar("resource-suffix")+".css";if(!window.currentTheme){if(document.readyState==="loading"){document.write(``);window.currentTheme=document.getElementById("themeStyle")}else{window.currentTheme=document.createElement("link");window.currentTheme.rel="stylesheet";window.currentTheme.id="themeStyle";window.currentTheme.href=newHref;document.documentElement.appendChild(window.currentTheme)}}else if(newHref!==window.currentTheme.href){window.currentTheme.href=newHref}}}const updateTheme=(function(){const mql=window.matchMedia("(prefers-color-scheme: dark)");function updateTheme(){if(getSettingValue("use-system-theme")!=="false"){const lightTheme=getSettingValue("preferred-light-theme")||"light";const darkTheme=getSettingValue("preferred-dark-theme")||"dark";updateLocalStorage("use-system-theme","true");switchTheme(mql.matches?darkTheme:lightTheme,true)}else{switchTheme(getSettingValue("theme"),false)}}mql.addEventListener("change",updateTheme);return updateTheme})();if(getSettingValue("use-system-theme")!=="false"&&window.matchMedia){if(getSettingValue("use-system-theme")===null&&getSettingValue("preferred-dark-theme")===null&&darkThemes.indexOf(localStoredTheme)>=0){updateLocalStorage("preferred-dark-theme",localStoredTheme)}}updateTheme();if(getSettingValue("source-sidebar-show")==="true"){addClass(document.documentElement,"src-sidebar-expanded")}if(getSettingValue("hide-sidebar")==="true"){addClass(document.documentElement,"hide-sidebar")}if(getSettingValue("hide-toc")==="true"){addClass(document.documentElement,"hide-toc")}if(getSettingValue("hide-modnav")==="true"){addClass(document.documentElement,"hide-modnav")}function updateSidebarWidth(){const desktopSidebarWidth=getSettingValue("desktop-sidebar-width");if(desktopSidebarWidth&&desktopSidebarWidth!=="null"){document.documentElement.style.setProperty("--desktop-sidebar-width",desktopSidebarWidth+"px",)}const srcSidebarWidth=getSettingValue("src-sidebar-width");if(srcSidebarWidth&&srcSidebarWidth!=="null"){document.documentElement.style.setProperty("--src-sidebar-width",srcSidebarWidth+"px",)}}updateSidebarWidth();window.addEventListener("pageshow",ev=>{if(ev.persisted){setTimeout(updateTheme,0);setTimeout(updateSidebarWidth,0)}});class RustdocSearchElement extends HTMLElement{constructor(){super()}connectedCallback(){const rootPath=getVar("root-path");const currentCrate=getVar("current-crate");this.innerHTML=``}}window.customElements.define("rustdoc-search",RustdocSearchElement) \ No newline at end of file diff --git a/trait.impl/bytes/buf/buf_impl/trait.Buf.js b/trait.impl/bytes/buf/buf_impl/trait.Buf.js new file mode 100644 index 000000000..d1ce19268 --- /dev/null +++ b/trait.impl/bytes/buf/buf_impl/trait.Buf.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[12]} \ No newline at end of file diff --git a/trait.impl/bytes/buf/buf_mut/trait.BufMut.js b/trait.impl/bytes/buf/buf_mut/trait.BufMut.js new file mode 100644 index 000000000..d1ce19268 --- /dev/null +++ b/trait.impl/bytes/buf/buf_mut/trait.BufMut.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[12]} \ No newline at end of file diff --git a/trait.impl/core/borrow/trait.Borrow.js b/trait.impl/core/borrow/trait.Borrow.js new file mode 100644 index 000000000..3e7f459e2 --- /dev/null +++ b/trait.impl/core/borrow/trait.Borrow.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Borrow<[u8]> for Bytes"],["impl Borrow<[u8]> for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[720]} \ No newline at end of file diff --git a/trait.impl/core/borrow/trait.BorrowMut.js b/trait.impl/core/borrow/trait.BorrowMut.js new file mode 100644 index 000000000..f2f6b86c9 --- /dev/null +++ b/trait.impl/core/borrow/trait.BorrowMut.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl BorrowMut<[u8]> for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[379]} \ No newline at end of file diff --git a/trait.impl/core/clone/trait.Clone.js b/trait.impl/core/clone/trait.Clone.js new file mode 100644 index 000000000..9394ab59e --- /dev/null +++ b/trait.impl/core/clone/trait.Clone.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Clone for Bytes"],["impl Clone for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[502]} \ No newline at end of file diff --git a/trait.impl/core/cmp/trait.Eq.js b/trait.impl/core/cmp/trait.Eq.js new file mode 100644 index 000000000..ec848b51d --- /dev/null +++ b/trait.impl/core/cmp/trait.Eq.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Eq for Bytes"],["impl Eq for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[476]} \ No newline at end of file diff --git a/trait.impl/core/cmp/trait.Ord.js b/trait.impl/core/cmp/trait.Ord.js new file mode 100644 index 000000000..219add10d --- /dev/null +++ b/trait.impl/core/cmp/trait.Ord.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Ord for Bytes"],["impl Ord for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[482]} \ No newline at end of file diff --git a/trait.impl/core/cmp/trait.PartialEq.js b/trait.impl/core/cmp/trait.PartialEq.js new file mode 100644 index 000000000..92044fd8a --- /dev/null +++ b/trait.impl/core/cmp/trait.PartialEq.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl PartialEq for Bytes"],["impl PartialEq for BytesMut"],["impl PartialEq<str> for Bytes"],["impl PartialEq<str> for BytesMut"],["impl PartialEq<Bytes> for &str"],["impl PartialEq<Bytes> for &[u8]"],["impl PartialEq<Bytes> for str"],["impl PartialEq<Bytes> for BytesMut"],["impl PartialEq<Bytes> for String"],["impl PartialEq<Bytes> for Vec<u8>"],["impl PartialEq<Bytes> for [u8]"],["impl PartialEq<BytesMut> for &str"],["impl PartialEq<BytesMut> for &[u8]"],["impl PartialEq<BytesMut> for str"],["impl PartialEq<BytesMut> for Bytes"],["impl PartialEq<BytesMut> for String"],["impl PartialEq<BytesMut> for Vec<u8>"],["impl PartialEq<BytesMut> for [u8]"],["impl PartialEq<String> for Bytes"],["impl PartialEq<String> for BytesMut"],["impl PartialEq<Vec<u8>> for Bytes"],["impl PartialEq<Vec<u8>> for BytesMut"],["impl PartialEq<[u8]> for Bytes"],["impl PartialEq<[u8]> for BytesMut"],["impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
where\n Bytes: PartialEq<T>,
"],["impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
where\n BytesMut: PartialEq<T>,
"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[10756]} \ No newline at end of file diff --git a/trait.impl/core/cmp/trait.PartialOrd.js b/trait.impl/core/cmp/trait.PartialOrd.js new file mode 100644 index 000000000..6eeaf5931 --- /dev/null +++ b/trait.impl/core/cmp/trait.PartialOrd.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl PartialOrd for Bytes"],["impl PartialOrd for BytesMut"],["impl PartialOrd<str> for Bytes"],["impl PartialOrd<str> for BytesMut"],["impl PartialOrd<Bytes> for &str"],["impl PartialOrd<Bytes> for &[u8]"],["impl PartialOrd<Bytes> for str"],["impl PartialOrd<Bytes> for String"],["impl PartialOrd<Bytes> for Vec<u8>"],["impl PartialOrd<Bytes> for [u8]"],["impl PartialOrd<BytesMut> for &str"],["impl PartialOrd<BytesMut> for &[u8]"],["impl PartialOrd<BytesMut> for str"],["impl PartialOrd<BytesMut> for String"],["impl PartialOrd<BytesMut> for Vec<u8>"],["impl PartialOrd<BytesMut> for [u8]"],["impl PartialOrd<String> for Bytes"],["impl PartialOrd<String> for BytesMut"],["impl PartialOrd<Vec<u8>> for Bytes"],["impl PartialOrd<Vec<u8>> for BytesMut"],["impl PartialOrd<[u8]> for Bytes"],["impl PartialOrd<[u8]> for BytesMut"],["impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
where\n Bytes: PartialOrd<T>,
"],["impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
where\n BytesMut: PartialOrd<T>,
"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[10118]} \ No newline at end of file diff --git a/trait.impl/core/convert/trait.AsMut.js b/trait.impl/core/convert/trait.AsMut.js new file mode 100644 index 000000000..29c951bab --- /dev/null +++ b/trait.impl/core/convert/trait.AsMut.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl AsMut<[u8]> for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[369]} \ No newline at end of file diff --git a/trait.impl/core/convert/trait.AsRef.js b/trait.impl/core/convert/trait.AsRef.js new file mode 100644 index 000000000..889b7ca39 --- /dev/null +++ b/trait.impl/core/convert/trait.AsRef.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl AsRef<[u8]> for Bytes"],["impl AsRef<[u8]> for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[718]} \ No newline at end of file diff --git a/trait.impl/core/convert/trait.From.js b/trait.impl/core/convert/trait.From.js new file mode 100644 index 000000000..b18d2b6d9 --- /dev/null +++ b/trait.impl/core/convert/trait.From.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl From<&'static str> for Bytes"],["impl From<&'static [u8]> for Bytes"],["impl From<Bytes> for BytesMut"],["impl From<Bytes> for Vec<u8>"],["impl From<BytesMut> for Bytes"],["impl From<BytesMut> for Vec<u8>"],["impl From<Box<[u8]>> for Bytes"],["impl From<String> for Bytes"],["impl From<Vec<u8>> for Bytes"],["impl<'a> From<&'a str> for BytesMut"],["impl<'a> From<&'a [u8]> for BytesMut"],["impl<'a> From<&'a mut [u8]> for &'a mut UninitSlice"],["impl<'a> From<&'a mut [MaybeUninit<u8>]> for &'a mut UninitSlice"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[5507]} \ No newline at end of file diff --git a/trait.impl/core/default/trait.Default.js b/trait.impl/core/default/trait.Default.js new file mode 100644 index 000000000..670681d5f --- /dev/null +++ b/trait.impl/core/default/trait.Default.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Default for Bytes"],["impl Default for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[522]} \ No newline at end of file diff --git a/trait.impl/core/fmt/trait.Debug.js b/trait.impl/core/fmt/trait.Debug.js new file mode 100644 index 000000000..d3864262e --- /dev/null +++ b/trait.impl/core/fmt/trait.Debug.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Debug for UninitSlice"],["impl Debug for Bytes"],["impl Debug for BytesMut"],["impl<B: Debug> Debug for Reader<B>"],["impl<B: Debug> Debug for Writer<B>"],["impl<T: Debug> Debug for IntoIter<T>"],["impl<T: Debug> Debug for Limit<T>"],["impl<T: Debug> Debug for Take<T>"],["impl<T: Debug, U: Debug> Debug for Chain<T, U>"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[3284]} \ No newline at end of file diff --git a/trait.impl/core/fmt/trait.LowerHex.js b/trait.impl/core/fmt/trait.LowerHex.js new file mode 100644 index 000000000..db126e894 --- /dev/null +++ b/trait.impl/core/fmt/trait.LowerHex.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl LowerHex for Bytes"],["impl LowerHex for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[512]} \ No newline at end of file diff --git a/trait.impl/core/fmt/trait.UpperHex.js b/trait.impl/core/fmt/trait.UpperHex.js new file mode 100644 index 000000000..32176b1a0 --- /dev/null +++ b/trait.impl/core/fmt/trait.UpperHex.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl UpperHex for Bytes"],["impl UpperHex for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[512]} \ No newline at end of file diff --git a/trait.impl/core/fmt/trait.Write.js b/trait.impl/core/fmt/trait.Write.js new file mode 100644 index 000000000..45573fe58 --- /dev/null +++ b/trait.impl/core/fmt/trait.Write.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Write for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[257]} \ No newline at end of file diff --git a/trait.impl/core/hash/trait.Hash.js b/trait.impl/core/hash/trait.Hash.js new file mode 100644 index 000000000..4c5620c5d --- /dev/null +++ b/trait.impl/core/hash/trait.Hash.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Hash for Bytes"],["impl Hash for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[492]} \ No newline at end of file diff --git a/trait.impl/core/iter/traits/collect/trait.Extend.js b/trait.impl/core/iter/traits/collect/trait.Extend.js new file mode 100644 index 000000000..e6fb6cfd8 --- /dev/null +++ b/trait.impl/core/iter/traits/collect/trait.Extend.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Extend<u8> for BytesMut"],["impl Extend<Bytes> for BytesMut"],["impl<'a> Extend<&'a u8> for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[1182]} \ No newline at end of file diff --git a/trait.impl/core/iter/traits/collect/trait.FromIterator.js b/trait.impl/core/iter/traits/collect/trait.FromIterator.js new file mode 100644 index 000000000..9e739ba89 --- /dev/null +++ b/trait.impl/core/iter/traits/collect/trait.FromIterator.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl FromIterator<u8> for Bytes"],["impl FromIterator<u8> for BytesMut"],["impl<'a> FromIterator<&'a u8> for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[1229]} \ No newline at end of file diff --git a/trait.impl/core/iter/traits/collect/trait.IntoIterator.js b/trait.impl/core/iter/traits/collect/trait.IntoIterator.js new file mode 100644 index 000000000..4a6cf5b27 --- /dev/null +++ b/trait.impl/core/iter/traits/collect/trait.IntoIterator.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl IntoIterator for Bytes"],["impl IntoIterator for BytesMut"],["impl<'a> IntoIterator for &'a Bytes"],["impl<'a> IntoIterator for &'a BytesMut"],["impl<T, U> IntoIterator for Chain<T, U>
where\n T: Buf,\n U: Buf,
"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[1794]} \ No newline at end of file diff --git a/trait.impl/core/iter/traits/exact_size/trait.ExactSizeIterator.js b/trait.impl/core/iter/traits/exact_size/trait.ExactSizeIterator.js new file mode 100644 index 000000000..09462086b --- /dev/null +++ b/trait.impl/core/iter/traits/exact_size/trait.ExactSizeIterator.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl<T: Buf> ExactSizeIterator for IntoIter<T>"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[454]} \ No newline at end of file diff --git a/trait.impl/core/iter/traits/iterator/trait.Iterator.js b/trait.impl/core/iter/traits/iterator/trait.Iterator.js new file mode 100644 index 000000000..b5147d0d8 --- /dev/null +++ b/trait.impl/core/iter/traits/iterator/trait.Iterator.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl<T: Buf> Iterator for IntoIter<T>"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[423]} \ No newline at end of file diff --git a/trait.impl/core/marker/trait.Freeze.js b/trait.impl/core/marker/trait.Freeze.js new file mode 100644 index 000000000..b7bcbc75f --- /dev/null +++ b/trait.impl/core/marker/trait.Freeze.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl !Freeze for Bytes",1,["bytes::bytes::Bytes"]],["impl Freeze for UninitSlice",1,["bytes::buf::uninit_slice::UninitSlice"]],["impl Freeze for BytesMut",1,["bytes::bytes_mut::BytesMut"]],["impl<B> Freeze for Reader<B>
where\n B: Freeze,
",1,["bytes::buf::reader::Reader"]],["impl<B> Freeze for Writer<B>
where\n B: Freeze,
",1,["bytes::buf::writer::Writer"]],["impl<T> Freeze for IntoIter<T>
where\n T: Freeze,
",1,["bytes::buf::iter::IntoIter"]],["impl<T> Freeze for Limit<T>
where\n T: Freeze,
",1,["bytes::buf::limit::Limit"]],["impl<T> Freeze for Take<T>
where\n T: Freeze,
",1,["bytes::buf::take::Take"]],["impl<T, U> Freeze for Chain<T, U>
where\n T: Freeze,\n U: Freeze,
",1,["bytes::buf::chain::Chain"]]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[3970]} \ No newline at end of file diff --git a/trait.impl/core/marker/trait.Send.js b/trait.impl/core/marker/trait.Send.js new file mode 100644 index 000000000..fd3f3d24e --- /dev/null +++ b/trait.impl/core/marker/trait.Send.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Send for UninitSlice",1,["bytes::buf::uninit_slice::UninitSlice"]],["impl Send for Bytes"],["impl Send for BytesMut"],["impl<B> Send for Reader<B>
where\n B: Send,
",1,["bytes::buf::reader::Reader"]],["impl<B> Send for Writer<B>
where\n B: Send,
",1,["bytes::buf::writer::Writer"]],["impl<T> Send for IntoIter<T>
where\n T: Send,
",1,["bytes::buf::iter::IntoIter"]],["impl<T> Send for Limit<T>
where\n T: Send,
",1,["bytes::buf::limit::Limit"]],["impl<T> Send for Take<T>
where\n T: Send,
",1,["bytes::buf::take::Take"]],["impl<T, U> Send for Chain<T, U>
where\n T: Send,\n U: Send,
",1,["bytes::buf::chain::Chain"]]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[3814]} \ No newline at end of file diff --git a/trait.impl/core/marker/trait.Sized.js b/trait.impl/core/marker/trait.Sized.js new file mode 100644 index 000000000..b286579c9 --- /dev/null +++ b/trait.impl/core/marker/trait.Sized.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl !Sized for UninitSlice",1,["bytes::buf::uninit_slice::UninitSlice"]]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[326]} \ No newline at end of file diff --git a/trait.impl/core/marker/trait.Sync.js b/trait.impl/core/marker/trait.Sync.js new file mode 100644 index 000000000..dce33beb8 --- /dev/null +++ b/trait.impl/core/marker/trait.Sync.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Sync for UninitSlice",1,["bytes::buf::uninit_slice::UninitSlice"]],["impl Sync for Bytes"],["impl Sync for BytesMut"],["impl<B> Sync for Reader<B>
where\n B: Sync,
",1,["bytes::buf::reader::Reader"]],["impl<B> Sync for Writer<B>
where\n B: Sync,
",1,["bytes::buf::writer::Writer"]],["impl<T> Sync for IntoIter<T>
where\n T: Sync,
",1,["bytes::buf::iter::IntoIter"]],["impl<T> Sync for Limit<T>
where\n T: Sync,
",1,["bytes::buf::limit::Limit"]],["impl<T> Sync for Take<T>
where\n T: Sync,
",1,["bytes::buf::take::Take"]],["impl<T, U> Sync for Chain<T, U>
where\n T: Sync,\n U: Sync,
",1,["bytes::buf::chain::Chain"]]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[3814]} \ No newline at end of file diff --git a/trait.impl/core/marker/trait.Unpin.js b/trait.impl/core/marker/trait.Unpin.js new file mode 100644 index 000000000..af0a8b2df --- /dev/null +++ b/trait.impl/core/marker/trait.Unpin.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Unpin for UninitSlice",1,["bytes::buf::uninit_slice::UninitSlice"]],["impl Unpin for Bytes",1,["bytes::bytes::Bytes"]],["impl Unpin for BytesMut",1,["bytes::bytes_mut::BytesMut"]],["impl<B> Unpin for Reader<B>
where\n B: Unpin,
",1,["bytes::buf::reader::Reader"]],["impl<B> Unpin for Writer<B>
where\n B: Unpin,
",1,["bytes::buf::writer::Writer"]],["impl<T> Unpin for IntoIter<T>
where\n T: Unpin,
",1,["bytes::buf::iter::IntoIter"]],["impl<T> Unpin for Limit<T>
where\n T: Unpin,
",1,["bytes::buf::limit::Limit"]],["impl<T> Unpin for Take<T>
where\n T: Unpin,
",1,["bytes::buf::take::Take"]],["impl<T, U> Unpin for Chain<T, U>
where\n T: Unpin,\n U: Unpin,
",1,["bytes::buf::chain::Chain"]]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[3921]} \ No newline at end of file diff --git a/trait.impl/core/ops/deref/trait.Deref.js b/trait.impl/core/ops/deref/trait.Deref.js new file mode 100644 index 000000000..3e816c396 --- /dev/null +++ b/trait.impl/core/ops/deref/trait.Deref.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Deref for Bytes"],["impl Deref for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[520]} \ No newline at end of file diff --git a/trait.impl/core/ops/deref/trait.DerefMut.js b/trait.impl/core/ops/deref/trait.DerefMut.js new file mode 100644 index 000000000..51878a422 --- /dev/null +++ b/trait.impl/core/ops/deref/trait.DerefMut.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl DerefMut for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[279]} \ No newline at end of file diff --git a/trait.impl/core/ops/drop/trait.Drop.js b/trait.impl/core/ops/drop/trait.Drop.js new file mode 100644 index 000000000..e1aa98f1f --- /dev/null +++ b/trait.impl/core/ops/drop/trait.Drop.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Drop for Bytes"],["impl Drop for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[510]} \ No newline at end of file diff --git a/trait.impl/core/ops/index/trait.Index.js b/trait.impl/core/ops/index/trait.Index.js new file mode 100644 index 000000000..e8f39752a --- /dev/null +++ b/trait.impl/core/ops/index/trait.Index.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Index<Range<usize>> for UninitSlice"],["impl Index<RangeFrom<usize>> for UninitSlice"],["impl Index<RangeFull> for UninitSlice"],["impl Index<RangeInclusive<usize>> for UninitSlice"],["impl Index<RangeTo<usize>> for UninitSlice"],["impl Index<RangeToInclusive<usize>> for UninitSlice"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[3227]} \ No newline at end of file diff --git a/trait.impl/core/ops/index/trait.IndexMut.js b/trait.impl/core/ops/index/trait.IndexMut.js new file mode 100644 index 000000000..be0084ab4 --- /dev/null +++ b/trait.impl/core/ops/index/trait.IndexMut.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl IndexMut<Range<usize>> for UninitSlice"],["impl IndexMut<RangeFrom<usize>> for UninitSlice"],["impl IndexMut<RangeFull> for UninitSlice"],["impl IndexMut<RangeInclusive<usize>> for UninitSlice"],["impl IndexMut<RangeTo<usize>> for UninitSlice"],["impl IndexMut<RangeToInclusive<usize>> for UninitSlice"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[3281]} \ No newline at end of file diff --git a/trait.impl/core/panic/unwind_safe/trait.RefUnwindSafe.js b/trait.impl/core/panic/unwind_safe/trait.RefUnwindSafe.js new file mode 100644 index 000000000..20d507cca --- /dev/null +++ b/trait.impl/core/panic/unwind_safe/trait.RefUnwindSafe.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl RefUnwindSafe for UninitSlice",1,["bytes::buf::uninit_slice::UninitSlice"]],["impl RefUnwindSafe for Bytes",1,["bytes::bytes::Bytes"]],["impl RefUnwindSafe for BytesMut",1,["bytes::bytes_mut::BytesMut"]],["impl<B> RefUnwindSafe for Reader<B>
where\n B: RefUnwindSafe,
",1,["bytes::buf::reader::Reader"]],["impl<B> RefUnwindSafe for Writer<B>
where\n B: RefUnwindSafe,
",1,["bytes::buf::writer::Writer"]],["impl<T> RefUnwindSafe for IntoIter<T>
where\n T: RefUnwindSafe,
",1,["bytes::buf::iter::IntoIter"]],["impl<T> RefUnwindSafe for Limit<T>
where\n T: RefUnwindSafe,
",1,["bytes::buf::limit::Limit"]],["impl<T> RefUnwindSafe for Take<T>
where\n T: RefUnwindSafe,
",1,["bytes::buf::take::Take"]],["impl<T, U> RefUnwindSafe for Chain<T, U>
where\n T: RefUnwindSafe,\n U: RefUnwindSafe,
",1,["bytes::buf::chain::Chain"]]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[4673]} \ No newline at end of file diff --git a/trait.impl/core/panic/unwind_safe/trait.UnwindSafe.js b/trait.impl/core/panic/unwind_safe/trait.UnwindSafe.js new file mode 100644 index 000000000..9456eed8a --- /dev/null +++ b/trait.impl/core/panic/unwind_safe/trait.UnwindSafe.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl UnwindSafe for UninitSlice",1,["bytes::buf::uninit_slice::UninitSlice"]],["impl UnwindSafe for Bytes",1,["bytes::bytes::Bytes"]],["impl UnwindSafe for BytesMut",1,["bytes::bytes_mut::BytesMut"]],["impl<B> UnwindSafe for Reader<B>
where\n B: UnwindSafe,
",1,["bytes::buf::reader::Reader"]],["impl<B> UnwindSafe for Writer<B>
where\n B: UnwindSafe,
",1,["bytes::buf::writer::Writer"]],["impl<T> UnwindSafe for IntoIter<T>
where\n T: UnwindSafe,
",1,["bytes::buf::iter::IntoIter"]],["impl<T> UnwindSafe for Limit<T>
where\n T: UnwindSafe,
",1,["bytes::buf::limit::Limit"]],["impl<T> UnwindSafe for Take<T>
where\n T: UnwindSafe,
",1,["bytes::buf::take::Take"]],["impl<T, U> UnwindSafe for Chain<T, U>
where\n T: UnwindSafe,\n U: UnwindSafe,
",1,["bytes::buf::chain::Chain"]]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[4529]} \ No newline at end of file diff --git a/trait.impl/serde/de/trait.Deserialize.js b/trait.impl/serde/de/trait.Deserialize.js new file mode 100644 index 000000000..38e07f0e7 --- /dev/null +++ b/trait.impl/serde/de/trait.Deserialize.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl<'de> Deserialize<'de> for Bytes"],["impl<'de> Deserialize<'de> for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[566]} \ No newline at end of file diff --git a/trait.impl/serde/ser/trait.Serialize.js b/trait.impl/serde/ser/trait.Serialize.js new file mode 100644 index 000000000..30506e6ab --- /dev/null +++ b/trait.impl/serde/ser/trait.Serialize.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl Serialize for Bytes"],["impl Serialize for BytesMut"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[514]} \ No newline at end of file diff --git a/trait.impl/std/io/trait.BufRead.js b/trait.impl/std/io/trait.BufRead.js new file mode 100644 index 000000000..c31c7ef3d --- /dev/null +++ b/trait.impl/std/io/trait.BufRead.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl<B: Buf + Sized> BufRead for Reader<B>"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[513]} \ No newline at end of file diff --git a/trait.impl/std/io/trait.Read.js b/trait.impl/std/io/trait.Read.js new file mode 100644 index 000000000..cbc4946da --- /dev/null +++ b/trait.impl/std/io/trait.Read.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl<B: Buf + Sized> Read for Reader<B>"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[504]} \ No newline at end of file diff --git a/trait.impl/std/io/trait.Write.js b/trait.impl/std/io/trait.Write.js new file mode 100644 index 000000000..2451fc4af --- /dev/null +++ b/trait.impl/std/io/trait.Write.js @@ -0,0 +1,9 @@ +(function() { + var implementors = Object.fromEntries([["bytes",[["impl<B: BufMut + Sized> Write for Writer<B>"]]]]); + if (window.register_implementors) { + window.register_implementors(implementors); + } else { + window.pending_implementors = implementors; + } +})() +//{"start":57,"fragment_lengths":[516]} \ No newline at end of file