Created
October 17, 2020 03:56
-
-
Save myrrlyn/02c46cc05dd8b881de52baf0eaabd37c to your computer and use it in GitHub Desktop.
Single-file implementation of `bitvec` that can be loaded into online environments without `crates.io` access
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
//! 2020-10-16T23:40 | |
#![cfg_attr(not(feature = "std"), no_std)] | |
#[cfg(feature = "alloc")] | |
extern crate alloc; | |
#[macro_use] | |
pub mod macros { | |
#![allow(deprecated)] | |
#[macro_use] | |
pub mod internal { | |
#[macro_export] | |
macro_rules! __bits_store_array { | |
($order:tt, usize; $($val:expr),*) => {{ | |
const LEN: usize = $crate::__count_elts!(usize; $($val),*); | |
#[cfg(target_pointer_width = "32")] | |
let out: [usize; LEN] = $crate::__bits_store_array!( | |
$order, u32 @ usz; $($val),* | |
); | |
#[cfg(target_pointer_width = "64")] | |
let out: [usize; LEN] = $crate::__bits_store_array!( | |
$order, u64 @ usz; $($val),* | |
); | |
out | |
}}; | |
($order:tt, $store:ident $(@ $usz:ident )?; $($val:expr),*) => { | |
$crate::__bits_store_array!( | |
$order, $store $(@ $usz)?, []; $($val,)* | |
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 | |
); | |
}; | |
($order:tt, $store:ident @ usz, [$( ($($elt:tt),*) )*]; $(0),*) => { | |
[$( | |
$crate::__elt_from_bits!($order, $store; $($elt),*) as usize | |
),*] | |
}; | |
($order:tt, $store:ident, [$( ($($elt:tt),*) )*]; $(0),*) => { | |
[$( | |
$crate::__elt_from_bits!($order, $store; $($elt),*) | |
),*] | |
}; | |
( | |
$order:tt, u8 $(@ $usz:ident)?, [$($w:tt)*]; | |
$a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt | |
$(, $($t:tt)*)? | |
) => { | |
$crate::__bits_store_array!( | |
$order, u8 $(@ $usz)?, [$($w)* ( | |
$a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0 | |
)]; | |
$($($t)*)? | |
) | |
}; | |
( | |
$order:tt, u16 $(@ $usz:ident)?, [$($w:tt)*]; | |
$a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt, | |
$a1:tt, $b1:tt, $c1:tt, $d1:tt, $e1:tt, $f1:tt, $g1:tt, $h1:tt | |
$(, $($t:tt)*)? | |
) => { | |
$crate::__bits_store_array!( | |
$order, u16 $(@ $usz)?, [$($w)* ( | |
$a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0, | |
$a1, $b1, $c1, $d1, $e1, $f1, $g1, $h1 | |
)]; | |
$($($t)*)? | |
) | |
}; | |
( | |
$order:tt, u32 $(@ $usz:ident)?, [$($w:tt)*]; | |
$a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt, | |
$a1:tt, $b1:tt, $c1:tt, $d1:tt, $e1:tt, $f1:tt, $g1:tt, $h1:tt, | |
$a2:tt, $b2:tt, $c2:tt, $d2:tt, $e2:tt, $f2:tt, $g2:tt, $h2:tt, | |
$a3:tt, $b3:tt, $c3:tt, $d3:tt, $e3:tt, $f3:tt, $g3:tt, $h3:tt | |
$(, $($t:tt)*)? | |
) => { | |
$crate::__bits_store_array!( | |
$order, u32 $(@ $usz)?, [$($w)* ( | |
$a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0, | |
$a1, $b1, $c1, $d1, $e1, $f1, $g1, $h1, | |
$a2, $b2, $c2, $d2, $e2, $f2, $g2, $h2, | |
$a3, $b3, $c3, $d3, $e3, $f3, $g3, $h3 | |
)]; | |
$($($t)*)? | |
) | |
}; | |
( | |
$order:tt, u64 $(@ $usz:ident)?, [$($w:tt)*]; | |
$a0:tt, $b0:tt, $c0:tt, $d0:tt, $e0:tt, $f0:tt, $g0:tt, $h0:tt, | |
$a1:tt, $b1:tt, $c1:tt, $d1:tt, $e1:tt, $f1:tt, $g1:tt, $h1:tt, | |
$a2:tt, $b2:tt, $c2:tt, $d2:tt, $e2:tt, $f2:tt, $g2:tt, $h2:tt, | |
$a3:tt, $b3:tt, $c3:tt, $d3:tt, $e3:tt, $f3:tt, $g3:tt, $h3:tt, | |
$a4:tt, $b4:tt, $c4:tt, $d4:tt, $e4:tt, $f4:tt, $g4:tt, $h4:tt, | |
$a5:tt, $b5:tt, $c5:tt, $d5:tt, $e5:tt, $f5:tt, $g5:tt, $h5:tt, | |
$a6:tt, $b6:tt, $c6:tt, $d6:tt, $e6:tt, $f6:tt, $g6:tt, $h6:tt, | |
$a7:tt, $b7:tt, $c7:tt, $d7:tt, $e7:tt, $f7:tt, $g7:tt, $h7:tt | |
$(, $($t:tt)*)? | |
) => { | |
$crate::__bits_store_array!( | |
$order, u64 $(@ $usz)?, [$($w)* ( | |
$a0, $b0, $c0, $d0, $e0, $f0, $g0, $h0, | |
$a1, $b1, $c1, $d1, $e1, $f1, $g1, $h1, | |
$a2, $b2, $c2, $d2, $e2, $f2, $g2, $h2, | |
$a3, $b3, $c3, $d3, $e3, $f3, $g3, $h3, | |
$a4, $b4, $c4, $d4, $e4, $f4, $g4, $h4, | |
$a5, $b5, $c5, $d5, $e5, $f5, $g5, $h5, | |
$a6, $b6, $c6, $d6, $e6, $f6, $g6, $h6, | |
$a7, $b7, $c7, $d7, $e7, $f7, $g7, $h7 | |
)]; | |
$($($t)*)? | |
) | |
}; | |
} | |
#[macro_export] | |
macro_rules! __count { | |
(@ $val:expr) => { 1 }; | |
($($val:expr),*) => {{ | |
const LEN: usize = 0usize $(+ $crate::__count!(@ $val))*; | |
LEN | |
}}; | |
} | |
#[macro_export] | |
macro_rules! __count_elts { | |
($t:ident; $($val:expr),*) => {{ | |
$crate::mem::elts::<$t>($crate::__count!($($val),*)) | |
}}; | |
} | |
#[macro_export] | |
macro_rules! __elt_from_bits { | |
( | |
Lsb0, $store:ident; | |
$( | |
$a:expr, $b:expr, $c:expr, $d:expr, | |
$e:expr, $f:expr, $g:expr, $h:expr | |
),* | |
) => { | |
$crate::__ty_from_bytes!( | |
Lsb0, $store, [$($crate::macros::internal::u8_from_le_bits( | |
$a != 0, $b != 0, $c != 0, $d != 0, | |
$e != 0, $f != 0, $g != 0, $h != 0, | |
)),*] | |
) | |
}; | |
( | |
Msb0, $store:ident; | |
$( | |
$a:expr, $b:expr, $c:expr, $d:expr, | |
$e:expr, $f:expr, $g:expr, $h:expr | |
),* | |
) => { | |
$crate::__ty_from_bytes!( | |
Msb0, $store, [$($crate::macros::internal::u8_from_be_bits( | |
$a != 0, $b != 0, $c != 0, $d != 0, | |
$e != 0, $f != 0, $g != 0, $h != 0, | |
)),*] | |
) | |
}; | |
( | |
LocalBits, $store:ident; | |
$( | |
$a:expr, $b:expr, $c:expr, $d:expr, | |
$e:expr, $f:expr, $g:expr, $h:expr | |
),* | |
) => { | |
$crate::__ty_from_bytes!( | |
LocalBits, $store, [$($crate::macros::internal::u8_from_ne_bits( | |
$a != 0, $b != 0, $c != 0, $d != 0, | |
$e != 0, $f != 0, $g != 0, $h != 0, | |
)),*] | |
) | |
}; | |
( | |
$order:tt, $store:ident; | |
$( | |
$a:expr, $b:expr, $c:expr, $d:expr, | |
$e:expr, $f:expr, $g:expr, $h:expr | |
),* | |
) => {{ | |
let mut tmp: $store = 0; | |
let _tmp_bits = $crate::slice::BitSlice::<$order, $store>::from_element_mut(&mut tmp); | |
let mut _idx = 0; | |
$( | |
_tmp_bits.set(_idx, $a != 0); _idx += 1; | |
_tmp_bits.set(_idx, $b != 0); _idx += 1; | |
_tmp_bits.set(_idx, $c != 0); _idx += 1; | |
_tmp_bits.set(_idx, $d != 0); _idx += 1; | |
_tmp_bits.set(_idx, $e != 0); _idx += 1; | |
_tmp_bits.set(_idx, $f != 0); _idx += 1; | |
_tmp_bits.set(_idx, $g != 0); _idx += 1; | |
_tmp_bits.set(_idx, $h != 0); _idx += 1; | |
)* | |
tmp | |
}}; | |
} | |
#[macro_export] | |
macro_rules! __extend_bool { | |
($val:expr, $typ:ident) => { | |
[0 as $typ, !0][($val != 0) as usize] | |
}; | |
} | |
#[macro_export] | |
macro_rules! __ty_from_bytes { | |
(Msb0, u8, [$($byte:expr),*]) => { | |
u8::from_be_bytes([$($byte),*]) | |
}; | |
(Lsb0, u8, [$($byte:expr),*]) => { | |
u8::from_le_bytes([$($byte),*]) | |
}; | |
(LocalBits, u8, [$($byte:expr),*]) => { | |
u8::from_ne_bytes([$($byte),*]) | |
}; | |
(Msb0, u16, [$($byte:expr),*]) => { | |
u16::from_be_bytes([$($byte),*]) | |
}; | |
(Lsb0, u16, [$($byte:expr),*]) => { | |
u16::from_le_bytes([$($byte),*]) | |
}; | |
(LocalBits, u16, [$($byte:expr),*]) => { | |
u16::from_ne_bytes([$($byte),*]) | |
}; | |
(Msb0, u32, [$($byte:expr),*]) => { | |
u32::from_be_bytes([$($byte),*]) | |
}; | |
(Lsb0, u32, [$($byte:expr),*]) => { | |
u32::from_le_bytes([$($byte),*]) | |
}; | |
(LocalBits, u32, [$($byte:expr),*]) => { | |
u32::from_ne_bytes([$($byte),*]) | |
}; | |
(Msb0, u64, [$($byte:expr),*]) => { | |
u64::from_be_bytes([$($byte),*]) | |
}; | |
(Lsb0, u64, [$($byte:expr),*]) => { | |
u64::from_le_bytes([$($byte),*]) | |
}; | |
(LocalBits, u64, [$($byte:expr),*]) => { | |
u64::from_ne_bytes([$($byte),*]) | |
}; | |
(Msb0, usize, [$($byte:expr),*]) => { | |
usize::from_be_bytes([$($byte),*]) | |
}; | |
(Lsb0, usize, [$($byte:expr),*]) => { | |
usize::from_le_bytes([$($byte),*]) | |
}; | |
(LocalBits, usize, [$($byte:expr),*]) => { | |
usize::from_ne_bytes([$($byte),*]) | |
}; | |
} | |
#[inline] | |
#[allow(clippy::many_single_char_names)] | |
#[allow(clippy::too_many_arguments)] | |
pub const fn u8_from_le_bits( | |
a: bool, | |
b: bool, | |
c: bool, | |
d: bool, | |
e: bool, | |
f: bool, | |
g: bool, | |
h: bool, | |
) -> u8 | |
{ | |
(a as u8) | |
| ((b as u8) << 1) | |
| ((c as u8) << 2) | |
| ((d as u8) << 3) | |
| ((e as u8) << 4) | |
| ((f as u8) << 5) | |
| ((g as u8) << 6) | |
| ((h as u8) << 7) | |
} | |
#[inline] | |
#[allow(clippy::many_single_char_names)] | |
#[allow(clippy::too_many_arguments)] | |
pub const fn u8_from_be_bits( | |
a: bool, | |
b: bool, | |
c: bool, | |
d: bool, | |
e: bool, | |
f: bool, | |
g: bool, | |
h: bool, | |
) -> u8 | |
{ | |
(h as u8) | |
| ((g as u8) << 1) | |
| ((f as u8) << 2) | |
| ((e as u8) << 3) | |
| ((d as u8) << 4) | |
| ((c as u8) << 5) | |
| ((b as u8) << 6) | |
| ((a as u8) << 7) | |
} | |
#[cfg(target_endian = "little")] | |
pub use self::u8_from_le_bits as u8_from_ne_bits; | |
#[cfg(target_endian = "big")] | |
pub use self::u8_from_be_bits as u8_from_ne_bits; | |
#[deprecated = "Ordering-only macro constructors are deprecated. Specify a \ | |
storage type as well, or remove the ordering and use the \ | |
default."] | |
pub const fn __deprecated_order_no_store() { | |
} | |
} | |
#[macro_export] | |
macro_rules! bitarr { | |
(for $len:literal, in $order:ty, $store:ident) => { | |
$crate::array::BitArray::< | |
$order, | |
[$store; $crate::mem::elts::<$store>($len)], | |
> | |
}; | |
(for $len:literal, in $store:ident) => { | |
$crate::bitarr!(for $len, in $crate::order::Lsb0, $store) | |
}; | |
(for $len:literal) => { | |
$crate::bitarr!(for $len, in usize) | |
}; | |
($order:ident, $store:ident; $($val:expr),* $(,)?) => { | |
$crate::array::BitArray::< | |
$order, | |
[$store; $crate::__count_elts!($store; $($val),*)], | |
>::new( | |
$crate::__bits_store_array!($order, $store; $($val),*) | |
) | |
}; | |
($order:path, $store:ident; $($val:expr),* $(,)?) => { | |
$crate::array::BitArray::< | |
$order, | |
[$store; $crate::__count_elts!($store; $($val),*)], | |
>::new( | |
$crate::__bits_store_array!($order, $store; $($val),*) | |
) | |
}; | |
($order:ident; $($val:expr),* $(,)?) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bitarr!($order, usize; $($val),*) | |
}}; | |
($order:path; $($val:expr),* $(,)?) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bitarr!($order, usize; $($val),*) | |
}}; | |
($($val:expr),* $(,)?) => { | |
$crate::bitarr!(Lsb0, usize; $($val),*) | |
}; | |
($order:ident, $store:ident; $val:expr; $len:expr) => { | |
$crate::array::BitArray::< | |
$order, | |
[$store; $crate::mem::elts::<$store>($len)], | |
>::new([ | |
$crate::__extend_bool!($val, $store); | |
$crate::mem::elts::<$store>($len) | |
]) | |
}; | |
($order:path, $store:ident; $val:expr; $len:expr) => { | |
$crate::array::BitArray::< | |
$order, | |
[$store; $crate::mem::elts::<$store>($len)], | |
>::new([ | |
$crate::__extend_bool!($val, $store); | |
$crate::mem::elts::<$store>($len) | |
]) | |
}; | |
($order:ident; $val:expr; $len:expr) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bitarr!($order, usize; $val; $len) | |
}}; | |
($order:path; $val:expr; $len:expr) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bitarr!($order, usize; $val; $len) | |
}}; | |
($val:expr; $len:expr) => { | |
$crate::bitarr!(Lsb0, usize; $val; $len) | |
}; | |
} | |
#[macro_export] | |
macro_rules! bits { | |
(mut $order:ident, $store:ident; $($val:expr),* $(,)?) => {{ | |
&mut $crate::bitarr![$order, $store; $($val),*][.. $crate::__count!($($val),*)] | |
}}; | |
(mut $order:path, $store:ident; $($val:expr),* $(,)?) => {{ | |
&mut $crate::bitarr![$order, $store; $($val),*][.. $crate::__count!($($val),*)] | |
}}; | |
(mut $order:ident; $($val:expr),* $(,)?) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bits!(mut $order, usize; $($val),*) | |
}}; | |
(mut $order:path; $($val:expr),* $(,)?) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bits!(mut $order, usize; $($val),*) | |
}}; | |
(mut $($val:expr),* $(,)?) => { | |
$crate::bits!(mut Lsb0, usize; $($val),*) | |
}; | |
(mut $order:ident, $store:ident; $val:expr; $len:expr) => {{ | |
&mut $crate::bitarr![$order, $store; $val; $len][.. $len] | |
}}; | |
(mut $order:path, $store:ident; $val:expr; $len:expr) => {{ | |
&mut $crate::bitarr![$order, $store; $val; $len][.. $len] | |
}}; | |
(mut $order:ident; $val:expr; $len:expr) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bits!(mut $order, usize; $val; $len) | |
}}; | |
(mut $order:path; $val:expr; $len:expr) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bits!(mut $order, usize; $val; $len) | |
}}; | |
(mut $val:expr; $len:expr) => { | |
$crate::bits!(mut Lsb0, usize; $val; $len) | |
}; | |
($order:ident, $store:ident; $($val:expr),* $(,)?) => {{ | |
&$crate::bitarr![$order, $store; $($val),*][.. $crate::__count!($($val),*)] | |
}}; | |
($order:path, $store:ident; $($val:expr),* $(,)?) => {{ | |
&$crate::bitarr![$order, $store; $($val),*][.. $crate::__count!($($val),*)] | |
}}; | |
($order:ident; $($val:expr),* $(,)?) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bits!($order, usize; $($val),*) | |
}}; | |
($order:path; $($val:expr),* $(,)?) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bits!($order, usize; $($val),*) | |
}}; | |
($($val:expr),* $(,)?) => { | |
$crate::bits!(Lsb0, usize; $($val),*) | |
}; | |
($order:ident, $store:ident; $val:expr; $len:expr) => {{ | |
&$crate::bitarr![$order, $store; $val; $len][.. $len] | |
}}; | |
($order:path, $store:ident; $val:expr; $len:expr) => {{ | |
&$crate::bitarr![$order, $store; $val; $len][.. $len] | |
}}; | |
($order:ident; $val:expr; $len:expr) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bits!($order, usize; $val; $len) | |
}}; | |
($order:path; $val:expr; $len:expr) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::bits!($order, usize; $val; $len) | |
}}; | |
($val:expr; $len:expr) => { | |
$crate::bits!(Lsb0, usize; $val; $len) | |
}; | |
} | |
#[macro_export] | |
#[cfg(feature = "alloc")] | |
macro_rules! bitvec { | |
($order:ty, $store:ident; $val:expr; $rep:expr) => { | |
$crate::vec::BitVec::<$order, $store>::repeat($val != 0, $rep) | |
}; | |
($order:ty; $val:expr; $rep:expr) => {{ | |
$crate::macros::internal::__deprecated_order_no_store(); | |
$crate::vec::BitVec::<$order, usize>::repeat($val != 0, $rep) | |
}}; | |
($val:expr; $rep:expr) => { | |
$crate::vec::BitVec::<$crate::order::Lsb0, usize>::repeat($val != 0, $rep) | |
}; | |
($($arg:tt)*) => {{ | |
$crate::vec::BitVec::from_bitslice($crate::bits!($($arg)*)) | |
}}; | |
} | |
#[macro_export] | |
#[cfg(feature = "alloc")] | |
macro_rules! bitbox { | |
($($arg:tt)*) => { | |
$crate::bitvec!($($arg)*).into_boxed_bitslice() | |
}; | |
} | |
} | |
pub mod access { | |
use crate::{ | |
index::{ | |
BitIdx, | |
BitMask, | |
}, | |
mem::BitRegister, | |
order::BitOrder, | |
}; | |
use core::{ | |
cell::Cell, | |
sync::atomic, | |
}; | |
use radium::Radium; | |
pub trait BitAccess: Radium | |
where <Self as Radium>::Item: BitRegister | |
{ | |
#[inline] | |
fn clear_bit<O>(&self, index: BitIdx<Self::Item>) | |
where O: BitOrder { | |
self.fetch_and(!index.select::<O>().value(), atomic::Ordering::Relaxed); | |
} | |
#[inline] | |
fn clear_bits(&self, mask: BitMask<Self::Item>) { | |
self.fetch_and(!mask.value(), atomic::Ordering::Relaxed); | |
} | |
#[inline] | |
fn set_bit<O>(&self, index: BitIdx<Self::Item>) | |
where O: BitOrder { | |
self.fetch_or(index.select::<O>().value(), atomic::Ordering::Relaxed); | |
} | |
#[inline] | |
fn set_bits(&self, mask: BitMask<Self::Item>) { | |
self.fetch_or(mask.value(), atomic::Ordering::Relaxed); | |
} | |
#[inline] | |
fn invert_bit<O>(&self, index: BitIdx<Self::Item>) | |
where O: BitOrder { | |
self.fetch_xor(index.select::<O>().value(), atomic::Ordering::Relaxed); | |
} | |
#[inline] | |
fn invert_bits(&self, mask: BitMask<Self::Item>) { | |
self.fetch_xor(mask.value(), atomic::Ordering::Relaxed); | |
} | |
#[inline] | |
fn write_bit<O>(&self, index: BitIdx<Self::Item>, value: bool) | |
where O: BitOrder { | |
if value { | |
self.set_bit::<O>(index); | |
} | |
else { | |
self.clear_bit::<O>(index); | |
} | |
} | |
#[inline] | |
fn write_bits(&self, mask: BitMask<Self::Item>, value: bool) { | |
if value { | |
self.set_bits(mask); | |
} | |
else { | |
self.clear_bits(mask); | |
} | |
} | |
#[inline] | |
fn get_writer<O>(value: bool) -> for<'a> fn(&'a Self, BitIdx<Self::Item>) | |
where O: BitOrder { | |
if value { | |
Self::set_bit::<O> | |
} | |
else { | |
Self::clear_bit::<O> | |
} | |
} | |
#[inline] | |
fn get_writers(value: bool) -> for<'a> fn(&'a Self, BitMask<Self::Item>) { | |
if value { | |
Self::set_bits | |
} | |
else { | |
Self::clear_bits | |
} | |
} | |
#[inline] | |
fn store_value(&self, value: Self::Item) { | |
self.store(value, atomic::Ordering::Relaxed); | |
} | |
} | |
impl<A> BitAccess for A | |
where | |
A: Radium, | |
A::Item: BitRegister, | |
{ | |
} | |
pub trait BitSafe { | |
type Mem: BitRegister; | |
fn load(&self) -> Self::Mem; | |
} | |
macro_rules! safe { | |
($($t:ident => $cw:ident => $aw:ident => $a:path),+ $(,)?) => { $( | |
#[derive(Debug)] | |
#[repr(transparent)] | |
pub struct $cw { | |
inner: Cell<$t>, | |
} | |
#[derive(Debug)] | |
#[repr(transparent)] | |
#[cfg(feature = "atomic")] | |
pub struct $aw { | |
inner: $a, | |
} | |
impl BitSafe for $cw { | |
type Mem = $t; | |
#[inline(always)] | |
fn load(&self) -> $t { | |
self.inner.get() | |
} | |
} | |
#[cfg(feature = "atomic")] | |
impl BitSafe for $aw { | |
type Mem = $t; | |
#[inline(always)] | |
fn load(&self) -> $t { | |
self.inner.load(atomic::Ordering::Relaxed) | |
} | |
} | |
)+ }; | |
} | |
safe! { | |
u8 => BitSafeCellU8 => BitSafeAtomU8 => atomic::AtomicU8, | |
u16 => BitSafeCellU16 => BitSafeAtomU16 => atomic::AtomicU16, | |
u32 => BitSafeCellU32 => BitSafeAtomU32 => atomic::AtomicU32, | |
} | |
#[cfg(target_pointer_width = "64")] | |
safe!(u64 => BitSafeCellU64 => BitSafeAtomU64 => atomic::AtomicU64); | |
safe!(usize => BitSafeCellUsize => BitSafeAtomUsize => atomic::AtomicUsize); | |
} | |
pub mod array { | |
use crate::{ | |
order::{ | |
BitOrder, | |
Lsb0, | |
}, | |
slice::BitSlice, | |
view::BitView, | |
}; | |
use core::{ | |
marker::PhantomData, | |
mem::MaybeUninit, | |
slice, | |
}; | |
#[repr(transparent)] | |
#[derive(Clone, Copy)] | |
pub struct BitArray<O = Lsb0, V = [usize; 1]> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
_ord: PhantomData<O>, | |
data: V, | |
} | |
impl<O, V> BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline(always)] | |
pub fn zeroed() -> Self { | |
Self { | |
_ord: PhantomData, | |
data: unsafe { MaybeUninit::zeroed().assume_init() }, | |
} | |
} | |
#[inline(always)] | |
pub fn new(data: V) -> Self { | |
Self { | |
_ord: PhantomData, | |
data, | |
} | |
} | |
#[inline(always)] | |
pub fn value(self) -> V { | |
self.data | |
} | |
#[inline(always)] | |
pub fn as_bitslice(&self) -> &BitSlice<O, V::Mem> { | |
self.data.view_bits::<O>() | |
} | |
#[inline(always)] | |
pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<O, V::Mem> { | |
self.data.view_bits_mut::<O>() | |
} | |
#[inline(always)] | |
pub fn as_slice(&self) -> &[V::Mem] { | |
unsafe { | |
slice::from_raw_parts( | |
&self.data as *const V as *const V::Mem, | |
V::const_elts(), | |
) | |
} | |
} | |
#[inline(always)] | |
pub fn as_mut_slice(&mut self) -> &mut [V::Mem] { | |
unsafe { | |
slice::from_raw_parts_mut( | |
&mut self.data as *mut V as *mut V::Mem, | |
V::const_elts(), | |
) | |
} | |
} | |
#[inline(always)] | |
pub fn as_buffer(&self) -> &V { | |
&self.data | |
} | |
#[inline(always)] | |
pub fn as_mut_buffer(&mut self) -> &mut V { | |
&mut self.data | |
} | |
} | |
mod ops { | |
use crate::{ | |
array::BitArray, | |
order::BitOrder, | |
slice::BitSlice, | |
view::BitView, | |
}; | |
use core::ops::{ | |
BitAnd, | |
BitAndAssign, | |
BitOr, | |
BitOrAssign, | |
BitXor, | |
BitXorAssign, | |
Deref, | |
DerefMut, | |
Index, | |
IndexMut, | |
Not, | |
}; | |
impl<O, V, Rhs> BitAnd<Rhs> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
BitSlice<O, V::Mem>: BitAndAssign<Rhs>, | |
{ | |
type Output = Self; | |
#[inline] | |
fn bitand(mut self, rhs: Rhs) -> Self::Output { | |
*self.as_mut_bitslice() &= rhs; | |
self | |
} | |
} | |
impl<O, V, Rhs> BitAndAssign<Rhs> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
BitSlice<O, V::Mem>: BitAndAssign<Rhs>, | |
{ | |
#[inline] | |
fn bitand_assign(&mut self, rhs: Rhs) { | |
*self.as_mut_bitslice() &= rhs; | |
} | |
} | |
impl<O, V, Rhs> BitOr<Rhs> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
BitSlice<O, V::Mem>: BitOrAssign<Rhs>, | |
{ | |
type Output = Self; | |
#[inline] | |
fn bitor(mut self, rhs: Rhs) -> Self::Output { | |
*self.as_mut_bitslice() |= rhs; | |
self | |
} | |
} | |
impl<O, V, Rhs> BitOrAssign<Rhs> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
BitSlice<O, V::Mem>: BitOrAssign<Rhs>, | |
{ | |
#[inline] | |
fn bitor_assign(&mut self, rhs: Rhs) { | |
*self.as_mut_bitslice() |= rhs; | |
} | |
} | |
impl<O, V, Rhs> BitXor<Rhs> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
BitSlice<O, V::Mem>: BitXorAssign<Rhs>, | |
{ | |
type Output = Self; | |
#[inline] | |
fn bitxor(mut self, rhs: Rhs) -> Self::Output { | |
*self.as_mut_bitslice() ^= rhs; | |
self | |
} | |
} | |
impl<O, V, Rhs> BitXorAssign<Rhs> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
BitSlice<O, V::Mem>: BitXorAssign<Rhs>, | |
{ | |
#[inline] | |
fn bitxor_assign(&mut self, rhs: Rhs) { | |
*self.as_mut_bitslice() ^= rhs; | |
} | |
} | |
impl<O, V> Deref for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
type Target = BitSlice<O, V::Mem>; | |
#[inline(always)] | |
fn deref(&self) -> &Self::Target { | |
self.as_bitslice() | |
} | |
} | |
impl<O, V> DerefMut for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline(always)] | |
fn deref_mut(&mut self) -> &mut Self::Target { | |
self.as_mut_bitslice() | |
} | |
} | |
impl<O, V, Idx> Index<Idx> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
BitSlice<O, V::Mem>: Index<Idx>, | |
{ | |
type Output = <BitSlice<O, V::Mem> as Index<Idx>>::Output; | |
#[inline] | |
fn index(&self, index: Idx) -> &Self::Output { | |
self.as_bitslice().index(index) | |
} | |
} | |
impl<O, V, Idx> IndexMut<Idx> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
BitSlice<O, V::Mem>: IndexMut<Idx>, | |
{ | |
#[inline] | |
fn index_mut(&mut self, index: Idx) -> &mut Self::Output { | |
self.as_mut_bitslice().index_mut(index) | |
} | |
} | |
impl<O, V> Not for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
type Output = Self; | |
#[inline] | |
fn not(mut self) -> Self::Output { | |
for elem in self.as_mut_slice() { | |
*elem = !*elem; | |
} | |
self | |
} | |
} | |
} | |
mod traits { | |
use crate::{ | |
array::BitArray, | |
index::BitIdx, | |
order::BitOrder, | |
slice::BitSlice, | |
store::BitStore, | |
view::BitView, | |
}; | |
use core::{ | |
borrow::{ | |
Borrow, | |
BorrowMut, | |
}, | |
cmp, | |
convert::TryFrom, | |
fmt::{ | |
self, | |
Binary, | |
Debug, | |
Display, | |
Formatter, | |
LowerHex, | |
Octal, | |
UpperHex, | |
}, | |
hash::{ | |
Hash, | |
Hasher, | |
}, | |
}; | |
impl<O, V> Borrow<BitSlice<O, V::Mem>> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline(always)] | |
fn borrow(&self) -> &BitSlice<O, V::Mem> { | |
self.as_bitslice() | |
} | |
} | |
impl<O, V> BorrowMut<BitSlice<O, V::Mem>> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline(always)] | |
fn borrow_mut(&mut self) -> &mut BitSlice<O, V::Mem> { | |
self.as_mut_bitslice() | |
} | |
} | |
impl<O, V> Eq for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
} | |
impl<O, V> Ord for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline] | |
fn cmp(&self, other: &Self) -> cmp::Ordering { | |
self.as_bitslice().cmp(other.as_bitslice()) | |
} | |
} | |
impl<O, V, T> PartialEq<BitArray<O, V>> for BitSlice<O, T> | |
where | |
O: BitOrder, | |
V: BitView, | |
T: BitStore, | |
{ | |
#[inline] | |
fn eq(&self, other: &BitArray<O, V>) -> bool { | |
self == other.as_bitslice() | |
} | |
} | |
impl<O, V, Rhs> PartialEq<Rhs> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
Rhs: ?Sized, | |
BitSlice<O, V::Mem>: PartialEq<Rhs>, | |
{ | |
#[inline] | |
fn eq(&self, other: &Rhs) -> bool { | |
self.as_bitslice() == other | |
} | |
} | |
impl<O, V, T> PartialOrd<BitArray<O, V>> for BitSlice<O, T> | |
where | |
O: BitOrder, | |
V: BitView, | |
T: BitStore, | |
{ | |
#[inline] | |
fn partial_cmp(&self, other: &BitArray<O, V>) -> Option<cmp::Ordering> { | |
self.partial_cmp(other.as_bitslice()) | |
} | |
} | |
impl<O, V, Rhs> PartialOrd<Rhs> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
Rhs: ?Sized, | |
BitSlice<O, V::Mem>: PartialOrd<Rhs>, | |
{ | |
#[inline] | |
fn partial_cmp(&self, other: &Rhs) -> Option<cmp::Ordering> { | |
self.as_bitslice().partial_cmp(other) | |
} | |
} | |
impl<O, V> AsRef<BitSlice<O, V::Mem>> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline(always)] | |
fn as_ref(&self) -> &BitSlice<O, V::Mem> { | |
self.as_bitslice() | |
} | |
} | |
impl<O, V> AsMut<BitSlice<O, V::Mem>> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline(always)] | |
fn as_mut(&mut self) -> &mut BitSlice<O, V::Mem> { | |
self.as_mut_bitslice() | |
} | |
} | |
impl<O, V> From<V> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline(always)] | |
fn from(data: V) -> Self { | |
Self::new(data) | |
} | |
} | |
impl<'a, O, V> TryFrom<&'a BitSlice<O, V::Mem>> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
type Error = TryFromBitSliceError<'a, O, V::Mem>; | |
#[inline] | |
fn try_from(src: &'a BitSlice<O, V::Mem>) -> Result<Self, Self::Error> { | |
if src.len() != V::const_bits() { | |
return Self::Error::err(src); | |
} | |
let mut out = Self::zeroed(); | |
out.copy_from_bitslice(src); | |
Ok(out) | |
} | |
} | |
impl<'a, O, V> TryFrom<&'a BitSlice<O, V::Mem>> for &'a BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
type Error = TryFromBitSliceError<'a, O, V::Mem>; | |
#[inline] | |
fn try_from(src: &'a BitSlice<O, V::Mem>) -> Result<Self, Self::Error> { | |
let bitptr = src.bitptr(); | |
if src.len() != V::const_bits() || bitptr.head() != BitIdx::ZERO { | |
return Self::Error::err(src); | |
} | |
Ok(unsafe { &*(bitptr.pointer().to_const() as *const BitArray<O, V>) }) | |
} | |
} | |
impl<'a, O, V> TryFrom<&'a mut BitSlice<O, V::Mem>> for &'a mut BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
type Error = TryFromBitSliceError<'a, O, V::Mem>; | |
#[inline] | |
fn try_from(src: &'a mut BitSlice<O, V::Mem>) -> Result<Self, Self::Error> { | |
let bitptr = src.bitptr(); | |
if src.len() != V::const_bits() || bitptr.head() != BitIdx::ZERO { | |
return Self::Error::err(&*src); | |
} | |
Ok(unsafe { &mut *(bitptr.pointer().to_mut() as *mut BitArray<O, V>) }) | |
} | |
} | |
impl<O, V> Default for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline(always)] | |
fn default() -> Self { | |
Self::zeroed() | |
} | |
} | |
impl<O, V> Binary for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Binary::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, V> Debug for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
self.bitptr().render(fmt, "Array", None)?; | |
fmt.write_str(" ")?; | |
Binary::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, V> Display for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Binary::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, V> LowerHex for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
LowerHex::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, V> Octal for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Octal::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, V> UpperHex for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
UpperHex::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, V> Hash for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
#[inline] | |
fn hash<H>(&self, hasher: &mut H) | |
where H: Hasher { | |
self.as_bitslice().hash(hasher) | |
} | |
} | |
impl<'a, O, V> IntoIterator for &'a BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
type IntoIter = <&'a BitSlice<O, V::Mem> as IntoIterator>::IntoIter; | |
type Item = <&'a BitSlice<O, V::Mem> as IntoIterator>::Item; | |
#[inline] | |
fn into_iter(self) -> Self::IntoIter { | |
self.as_bitslice().into_iter() | |
} | |
} | |
impl<'a, O, V> IntoIterator for &'a mut BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
type IntoIter = <&'a mut BitSlice<O, V::Mem> as IntoIterator>::IntoIter; | |
type Item = <&'a mut BitSlice<O, V::Mem> as IntoIterator>::Item; | |
#[inline] | |
fn into_iter(self) -> Self::IntoIter { | |
self.as_mut_bitslice().into_iter() | |
} | |
} | |
impl<O, V> Unpin for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
{ | |
} | |
#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)] | |
pub struct TryFromBitSliceError<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
inner: &'a BitSlice<O, T>, | |
} | |
impl<'a, O, T> TryFromBitSliceError<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
fn err<A>(inner: &'a BitSlice<O, T>) -> Result<A, Self> { | |
Err(Self { inner }) | |
} | |
} | |
impl<O, T> Debug for TryFromBitSliceError<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
fmt.debug_struct("TryFromBitSliceError") | |
.field("inner", &self.inner) | |
.finish() | |
} | |
} | |
impl<O, T> Display for TryFromBitSliceError<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
fmt.write_fmt(format_args!( | |
"could not convert bitslice to bitarray: {:?}", | |
self.inner | |
)) | |
} | |
} | |
#[cfg(feature = "std")] | |
impl<'a, O, T> std::error::Error for TryFromBitSliceError<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
} | |
} | |
} | |
mod devel { | |
use core::ops::{ | |
Bound, | |
Range, | |
RangeBounds, | |
}; | |
#[inline] | |
pub fn normalize_range<R>(bounds: R, end: usize) -> Range<usize> | |
where R: RangeBounds<usize> { | |
let min = match bounds.start_bound() { | |
Bound::Included(&n) => n, | |
Bound::Excluded(&n) => n + 1, | |
Bound::Unbounded => 0, | |
}; | |
let max = match bounds.end_bound() { | |
Bound::Included(&n) => n + 1, | |
Bound::Excluded(&n) => n, | |
Bound::Unbounded => end, | |
}; | |
min .. max | |
} | |
#[inline] | |
pub fn assert_range(range: Range<usize>, end: impl Into<Option<usize>>) { | |
if range.start > range.end { | |
panic!( | |
"Malformed range: `{} .. {}` must run from lower to higher", | |
range.start, range.end | |
); | |
} | |
if let Some(end) = end.into() { | |
if range.end > end { | |
panic!( | |
"Range out of bounds: `{} .. {}` must not exceed `{}`", | |
range.start, range.end, end | |
); | |
} | |
} | |
} | |
} | |
pub mod domain { | |
use crate::{ | |
index::{ | |
BitIdx, | |
BitTail, | |
}, | |
mem::BitMemory, | |
order::BitOrder, | |
slice::BitSlice, | |
store::BitStore, | |
}; | |
use core::{ | |
fmt::{ | |
self, | |
Binary, | |
Debug, | |
Formatter, | |
LowerHex, | |
Octal, | |
UpperHex, | |
}, | |
slice, | |
}; | |
use tap::{ | |
pipe::Pipe, | |
tap::Tap, | |
}; | |
use wyz::fmt::FmtForward; | |
macro_rules! bit_domain { | |
($t:ident $(=> $m:ident)? $(@ $a:ident)?) => { | |
#[derive(Debug)] | |
pub enum $t <'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
Enclave { | |
head: BitIdx<T::Mem>, | |
body: &'a $($m)? BitSlice<O, T>, | |
tail: BitTail<T::Mem>, | |
}, | |
Region { | |
head: &'a $($m)? BitSlice<O, T>, | |
body: &'a $($m)? BitSlice<O, T::Mem>, | |
tail: &'a $($m)? BitSlice<O, T>, | |
}, | |
} | |
impl<'a, O, T> $t <'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
pub fn enclave(self) -> Option<( | |
BitIdx<T::Mem>, | |
&'a $($m)? BitSlice<O, T>, | |
BitTail<T::Mem>, | |
)> { | |
if let Self::Enclave { head, body, tail } = self { | |
Some((head, body, tail)) | |
} | |
else { | |
None | |
} | |
} | |
#[inline] | |
pub fn region(self) -> Option<( | |
&'a $($m)? BitSlice<O, T>, | |
&'a $($m)? BitSlice<O, T::Mem>, | |
&'a $($m)? BitSlice<O, T>, | |
)> { | |
if let Self::Region { head, body, tail } = self { | |
Some((head, body, tail)) | |
} | |
else { | |
None | |
} | |
} | |
#[inline] | |
pub(crate) fn new(slice: &'a $($m)? BitSlice<O, T>) -> Self { | |
let bitptr = slice.bitptr(); | |
let h = bitptr.head(); | |
let (e, t) = h.span(bitptr.len()); | |
let w = T::Mem::BITS; | |
match (h.value(), e, t.value()) { | |
(_, 0, _) => Self::empty(), | |
(0, _, t) if t == w => Self::spanning(slice), | |
(_, _, t) if t == w => Self::partial_head(slice, h), | |
(0, ..) => Self::partial_tail(slice, h, t), | |
(_, 1, _) => Self::minor(slice, h, t), | |
_ => Self::major(slice, h, t), | |
} | |
} | |
#[inline] | |
fn empty() -> Self { | |
Self::Region { | |
head: Default::default(), | |
body: Default::default(), | |
tail: Default::default(), | |
} | |
} | |
#[inline] | |
fn major( | |
slice: &'a $($m)? BitSlice<O, T>, | |
head: BitIdx<T::Mem>, | |
tail: BitTail<T::Mem>, | |
) -> Self { | |
let (head, rest) = bit_domain!(split $($m)? | |
slice, | |
(T::Mem::BITS - head.value()) as usize, | |
); | |
let (body, tail) = bit_domain!(split $($m)? | |
rest, | |
rest.len() - (tail.value() as usize), | |
); | |
Self::Region { | |
head: bit_domain!(retype $($m)? head), | |
body: bit_domain!(retype $($m)? body), | |
tail: bit_domain!(retype $($m)? tail), | |
} | |
} | |
#[inline] | |
fn minor( | |
slice: &'a $($m)? BitSlice<O, T>, | |
head: BitIdx<T::Mem>, | |
tail: BitTail<T::Mem>, | |
) -> Self { | |
Self::Enclave { | |
head, | |
body: slice, | |
tail, | |
} | |
} | |
#[inline] | |
fn partial_head( | |
slice: &'a $($m)? BitSlice<O, T>, | |
head: BitIdx<T::Mem>, | |
) -> Self { | |
let (head, rest) = bit_domain!(split $($m)? | |
slice, | |
(T::Mem::BITS - head.value()) as usize, | |
); | |
let (head, body) = ( | |
bit_domain!(retype $($m)? head), | |
bit_domain!(retype $($m)? rest), | |
); | |
Self::Region { | |
head, | |
body, | |
tail: Default::default(), | |
} | |
} | |
#[inline] | |
fn partial_tail( | |
slice: &'a $($m)? BitSlice<O, T>, | |
_head: BitIdx<T::Mem>, | |
tail: BitTail<T::Mem>, | |
) -> Self { | |
let (rest, tail) = bit_domain!(split $($m)? | |
slice, | |
slice.len() - (tail.value() as usize), | |
); | |
let (body, tail) = ( | |
bit_domain!(retype $($m)? rest), | |
bit_domain!(retype $($m)? tail), | |
); | |
Self::Region { | |
head: Default::default(), | |
body, | |
tail, | |
} | |
} | |
#[inline] | |
fn spanning(slice: &'a $($m)? BitSlice<O, T>) -> Self { | |
Self::Region { | |
head: Default::default(), | |
body: bit_domain!(retype $($m)? slice), | |
tail: Default::default(), | |
} | |
} | |
} | |
}; | |
(retype mut $slice:ident $(,)? ) => { | |
unsafe { &mut *($slice as *mut BitSlice<O, _> as *mut BitSlice<O, _>) } | |
}; | |
(retype $slice:ident $(,)? ) => { | |
unsafe { &*($slice as *const BitSlice<O, _> as *const BitSlice<O, _>) } | |
}; | |
(split mut $slice:ident, $at:expr $(,)? ) => { | |
unsafe { $slice.split_at_unchecked_mut($at) } | |
}; | |
(split $slice:ident, $at:expr $(,)? ) => { | |
unsafe { $slice.split_at_unchecked($at) } | |
}; | |
} | |
bit_domain!(BitDomain); | |
bit_domain!(BitDomainMut => mut @ Alias); | |
impl<O, T> Clone for BitDomain<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
fn clone(&self) -> Self { | |
*self | |
} | |
} | |
impl<O, T> Copy for BitDomain<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
} | |
macro_rules! domain { | |
($t:ident $(=> $m:ident @ $a:ident)?) => { | |
#[derive(Debug)] | |
pub enum $t <'a, T> | |
where | |
T: BitStore, | |
{ | |
Enclave { | |
head: BitIdx<T::Mem>, | |
elem: &'a T $(::$a)?, | |
tail: BitTail<T::Mem>, | |
}, | |
Region { | |
head: Option<(BitIdx<T::Mem>, &'a T $(::$a)?)>, | |
body: &'a $($m)? [T::Mem], | |
tail: Option<(&'a T $(::$a)?, BitTail<T::Mem>)>, | |
} | |
} | |
impl<'a, T> $t <'a, T> | |
where | |
T: BitStore, | |
{ | |
#[inline] | |
pub fn enclave(self) -> Option<( | |
BitIdx<T::Mem>, | |
&'a T $(::$a)?, | |
BitTail<T::Mem>, | |
)> { | |
if let Self::Enclave { head, elem, tail } = self { | |
Some((head, elem, tail)) | |
} else { | |
None | |
} | |
} | |
#[inline] | |
pub fn region(self) -> Option<( | |
Option<(BitIdx<T::Mem>, &'a T $(::$a)?)>, | |
&'a $($m)? [T::Mem], | |
Option<(&'a T $(::$a)?, BitTail<T::Mem>)>, | |
)> { | |
if let Self::Region { head, body, tail } = self { | |
Some((head,body,tail)) | |
} | |
else { | |
None | |
} | |
} | |
#[inline] | |
pub(crate) fn new<O>(slice: &'a $($m)? BitSlice<O, T>) -> Self | |
where O: BitOrder { | |
let bitptr = slice.bitptr(); | |
let head = bitptr.head(); | |
let elts = bitptr.elements(); | |
let tail = bitptr.tail(); | |
let bits = T::Mem::BITS; | |
let base = bitptr.pointer().to_const() as *const _; | |
match (head.value(), elts, tail.value()) { | |
(_, 0, _) => Self::empty(), | |
(0, _, t) if t == bits => Self::spanning(base, elts), | |
(_, _, t) if t == bits => Self::partial_head(base, elts, head), | |
(0, ..) => Self::partial_tail(base, elts, tail), | |
(_, 1, _) => Self::minor(base, head, tail), | |
_ => Self::major(base, elts, head, tail), | |
} | |
} | |
#[inline] | |
fn empty() -> Self { | |
Self::Region { | |
head: None, | |
body: & $($m)? [], | |
tail: None, | |
} | |
} | |
#[inline] | |
fn major( | |
base: *const T $(::$a)?, | |
elts: usize, | |
head: BitIdx<T::Mem>, | |
tail: BitTail<T::Mem>, | |
) -> Self { | |
let h = unsafe { &*base }; | |
let t = unsafe { &*base.add(elts - 1) }; | |
let body = domain!(slice $($m)? base.add(1), elts - 2); | |
Self::Region { | |
head: Some((head, h)), | |
body, | |
tail: Some((t, tail)), | |
} | |
} | |
#[inline] | |
fn minor( | |
addr: *const T $(::$a)?, | |
head: BitIdx<T::Mem>, | |
tail: BitTail<T::Mem>, | |
) -> Self { | |
Self::Enclave { | |
head, | |
elem: unsafe { &*addr }, | |
tail, | |
} | |
} | |
#[inline] | |
fn partial_head( | |
base: *const T $(::$a)?, | |
elts: usize, | |
head: BitIdx<T::Mem>, | |
) -> Self { | |
let h = unsafe { &*base }; | |
let body = domain!(slice $($m)? base.add(1), elts - 1); | |
Self::Region { | |
head: Some((head, h)), | |
body, | |
tail: None, | |
} | |
} | |
#[inline] | |
fn partial_tail( | |
base: *const T $(::$a)?, | |
elts: usize, | |
tail: BitTail<T::Mem>, | |
) -> Self { | |
let t = unsafe { &*base.add(elts - 1) }; | |
let body = domain!(slice $($m)? base, elts - 1); | |
Self::Region { | |
head: None, | |
body, | |
tail: Some((t, tail)), | |
} | |
} | |
#[inline] | |
fn spanning(base: *const T $(::$a)?, elts: usize) -> Self { | |
Self::Region { | |
head: None, | |
body: domain!(slice $($m)? base, elts), | |
tail: None, | |
} | |
} | |
} | |
}; | |
(slice mut $base:expr, $elts:expr) => { | |
unsafe { slice::from_raw_parts_mut($base as *const _ as *mut _, $elts) } | |
}; | |
(slice $base:expr, $elts:expr) => { | |
unsafe { slice::from_raw_parts($base as *const _, $elts) } | |
}; | |
} | |
domain!(Domain); | |
domain!(DomainMut => mut @ Access); | |
impl<T> Clone for Domain<'_, T> | |
where T: BitStore | |
{ | |
#[inline(always)] | |
fn clone(&self) -> Self { | |
*self | |
} | |
} | |
impl<'a, T> Iterator for Domain<'a, T> | |
where T: BitStore | |
{ | |
type Item = T::Mem; | |
#[inline] | |
fn next(&mut self) -> Option<Self::Item> { | |
match self { | |
Self::Enclave { elem, .. } => { | |
elem.load_value().pipe(Some).tap(|_| *self = Self::empty()) | |
}, | |
Self::Region { head, body, tail } => { | |
if let Some((_, elem)) = *head { | |
return elem.load_value().pipe(Some).tap(|_| *head = None); | |
} | |
if let Some((elem, rest)) = body.split_first() { | |
*body = rest; | |
return Some(*elem); | |
} | |
if let Some((elem, _)) = *tail { | |
return elem.load_value().pipe(Some).tap(|_| *tail = None); | |
} | |
None | |
}, | |
} | |
} | |
} | |
impl<'a, T> DoubleEndedIterator for Domain<'a, T> | |
where T: BitStore | |
{ | |
#[inline] | |
fn next_back(&mut self) -> Option<Self::Item> { | |
match self { | |
Self::Enclave { elem, .. } => { | |
elem.load_value().pipe(Some).tap(|_| *self = Self::empty()) | |
}, | |
Self::Region { head, body, tail } => { | |
if let Some((elem, _)) = *tail { | |
return elem.load_value().pipe(Some).tap(|_| *tail = None); | |
} | |
if let Some((elem, rest)) = body.split_last() { | |
*body = rest; | |
return Some(*elem); | |
} | |
if let Some((_, elem)) = *head { | |
return elem.load_value().pipe(Some).tap(|_| *head = None); | |
} | |
None | |
}, | |
} | |
} | |
} | |
impl<T> ExactSizeIterator for Domain<'_, T> | |
where T: BitStore | |
{ | |
#[inline] | |
fn len(&self) -> usize { | |
match self { | |
Self::Enclave { .. } => 1, | |
Self::Region { head, body, tail } => { | |
head.is_some() as usize + body.len() + tail.is_some() as usize | |
}, | |
} | |
} | |
} | |
impl<T> core::iter::FusedIterator for Domain<'_, T> where T: BitStore | |
{ | |
} | |
impl<T> Copy for Domain<'_, T> where T: BitStore | |
{ | |
} | |
macro_rules! fmt { | |
($($f:ty => $fwd:ident),+ $(,)?) => { $( | |
impl<T> $f for Domain<'_, T> | |
where T: BitStore | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
fmt.debug_list() | |
.entries(self.into_iter().map(FmtForward::$fwd)) | |
.finish() | |
} | |
} | |
)+ }; | |
} | |
fmt!( | |
Binary => fmt_binary, | |
LowerHex => fmt_lower_hex, | |
Octal => fmt_octal, | |
UpperHex => fmt_upper_hex, | |
); | |
} | |
pub mod field { | |
use crate::{ | |
access::BitAccess, | |
array::BitArray, | |
domain::{ | |
Domain, | |
DomainMut, | |
}, | |
index::BitMask, | |
mem::BitMemory, | |
order::{ | |
BitOrder, | |
Lsb0, | |
Msb0, | |
}, | |
slice::BitSlice, | |
store::BitStore, | |
view::BitView, | |
}; | |
use core::{ | |
mem, | |
ptr, | |
}; | |
use tap::pipe::Pipe; | |
#[cfg(feature = "alloc")] | |
use crate::{ | |
boxed::BitBox, | |
mem::BitRegister, | |
vec::BitVec, | |
}; | |
pub trait BitField { | |
#[inline(always)] | |
fn load<M>(&self) -> M | |
where M: BitMemory { | |
#[cfg(target_endian = "little")] | |
return self.load_le::<M>(); | |
#[cfg(target_endian = "big")] | |
return self.load_be::<M>(); | |
} | |
#[inline(always)] | |
fn store<M>(&mut self, value: M) | |
where M: BitMemory { | |
#[cfg(target_endian = "little")] | |
self.store_le(value); | |
#[cfg(target_endian = "big")] | |
self.store_be(value); | |
} | |
fn load_le<M>(&self) -> M | |
where M: BitMemory; | |
fn load_be<M>(&self) -> M | |
where M: BitMemory; | |
fn store_le<M>(&mut self, value: M) | |
where M: BitMemory; | |
fn store_be<M>(&mut self, value: M) | |
where M: BitMemory; | |
} | |
impl<T> BitField for BitSlice<Lsb0, T> | |
where T: BitStore | |
{ | |
#[inline] | |
fn load_le<M>(&self) -> M | |
where M: BitMemory { | |
check::<M>("load", self.len()); | |
match self.domain() { | |
Domain::Enclave { head, elem, tail } => { | |
get::<T, M>(elem, Lsb0::mask(head, tail), head.value()) | |
}, | |
Domain::Region { head, body, tail } => { | |
let mut accum = M::ZERO; | |
if let Some((elem, tail)) = tail { | |
accum = get::<T, M>(elem, Lsb0::mask(None, tail), 0); | |
} | |
for elem in body.iter().rev().copied() { | |
if M::BITS > T::Mem::BITS { | |
accum <<= T::Mem::BITS; | |
} | |
accum |= resize::<T::Mem, M>(elem); | |
} | |
if let Some((head, elem)) = head { | |
let shamt = head.value(); | |
accum <<= T::Mem::BITS - shamt; | |
accum |= get::<T, M>(elem, Lsb0::mask(head, None), shamt); | |
} | |
accum | |
}, | |
} | |
} | |
#[inline] | |
fn load_be<M>(&self) -> M | |
where M: BitMemory { | |
check::<M>("load", self.len()); | |
match self.domain() { | |
Domain::Enclave { head, elem, tail } => { | |
get::<T, M>(elem, Lsb0::mask(head, tail), head.value()) | |
}, | |
Domain::Region { head, body, tail } => { | |
let mut accum = M::ZERO; | |
if let Some((head, elem)) = head { | |
accum = | |
get::<T, M>(elem, Lsb0::mask(head, None), head.value()); | |
} | |
for elem in body.iter().copied() { | |
if M::BITS > T::Mem::BITS { | |
accum <<= T::Mem::BITS; | |
} | |
accum |= resize::<T::Mem, M>(elem); | |
} | |
if let Some((elem, tail)) = tail { | |
accum <<= tail.value(); | |
accum |= get::<T, M>(elem, Lsb0::mask(None, tail), 0); | |
} | |
accum | |
}, | |
} | |
} | |
#[inline] | |
fn store_le<M>(&mut self, mut value: M) | |
where M: BitMemory { | |
check::<M>("store", self.len()); | |
match self.domain_mut() { | |
DomainMut::Enclave { head, elem, tail } => { | |
set::<T, M>(elem, value, Lsb0::mask(head, tail), head.value()); | |
}, | |
DomainMut::Region { head, body, tail } => { | |
if let Some((head, elem)) = head { | |
let shamt = head.value(); | |
set::<T, M>(elem, value, Lsb0::mask(head, None), shamt); | |
value >>= T::Mem::BITS - shamt; | |
} | |
for elem in body.iter_mut() { | |
*elem = resize(value); | |
if M::BITS > T::Mem::BITS { | |
value >>= T::Mem::BITS; | |
} | |
} | |
if let Some((elem, tail)) = tail { | |
set::<T, M>(elem, value, Lsb0::mask(None, tail), 0); | |
} | |
}, | |
} | |
} | |
#[inline] | |
fn store_be<M>(&mut self, mut value: M) | |
where M: BitMemory { | |
check::<M>("store", self.len()); | |
match self.domain_mut() { | |
DomainMut::Enclave { head, elem, tail } => { | |
set::<T, M>(elem, value, Lsb0::mask(head, tail), head.value()); | |
}, | |
DomainMut::Region { head, body, tail } => { | |
if let Some((elem, tail)) = tail { | |
set::<T, M>(elem, value, Lsb0::mask(None, tail), 0); | |
value >>= tail.value() | |
} | |
for elem in body.iter_mut().rev() { | |
*elem = resize(value); | |
if M::BITS > T::Mem::BITS { | |
value >>= T::Mem::BITS; | |
} | |
} | |
if let Some((head, elem)) = head { | |
set::<T, M>( | |
elem, | |
value, | |
Lsb0::mask(head, None), | |
head.value(), | |
); | |
} | |
}, | |
} | |
} | |
} | |
impl<T> BitField for BitSlice<Msb0, T> | |
where T: BitStore | |
{ | |
#[inline] | |
fn load_le<M>(&self) -> M | |
where M: BitMemory { | |
check::<M>("load", self.len()); | |
match self.domain() { | |
Domain::Enclave { head, elem, tail } => get::<T, M>( | |
elem, | |
Msb0::mask(head, tail), | |
T::Mem::BITS - tail.value(), | |
), | |
Domain::Region { head, body, tail } => { | |
let mut accum = M::ZERO; | |
if let Some((elem, tail)) = tail { | |
accum = get::<T, M>( | |
elem, | |
Msb0::mask(None, tail), | |
T::Mem::BITS - tail.value(), | |
); | |
} | |
for elem in body.iter().rev().copied() { | |
if M::BITS > T::Mem::BITS { | |
accum <<= T::Mem::BITS; | |
} | |
accum |= resize::<T::Mem, M>(elem); | |
} | |
if let Some((head, elem)) = head { | |
accum <<= T::Mem::BITS - head.value(); | |
accum |= get::<T, M>(elem, Msb0::mask(head, None), 0); | |
} | |
accum | |
}, | |
} | |
} | |
#[inline] | |
fn load_be<M>(&self) -> M | |
where M: BitMemory { | |
check::<M>("load", self.len()); | |
match self.domain() { | |
Domain::Enclave { head, elem, tail } => get::<T, M>( | |
elem, | |
Msb0::mask(head, tail), | |
T::Mem::BITS - tail.value(), | |
), | |
Domain::Region { head, body, tail } => { | |
let mut accum = M::ZERO; | |
if let Some((head, elem)) = head { | |
accum = get::<T, M>(elem, Msb0::mask(head, None), 0); | |
} | |
for elem in body.iter().copied() { | |
if M::BITS > T::Mem::BITS { | |
accum <<= T::Mem::BITS; | |
} | |
accum |= resize::<T::Mem, M>(elem); | |
} | |
if let Some((elem, tail)) = tail { | |
let width = tail.value(); | |
accum <<= width; | |
accum |= get::<T, M>( | |
elem, | |
Msb0::mask(None, tail), | |
T::Mem::BITS - width, | |
); | |
} | |
accum | |
}, | |
} | |
} | |
#[inline] | |
fn store_le<M>(&mut self, mut value: M) | |
where M: BitMemory { | |
check::<M>("store", self.len()); | |
match self.domain_mut() { | |
DomainMut::Enclave { head, elem, tail } => set::<T, M>( | |
elem, | |
value, | |
Msb0::mask(head, tail), | |
T::Mem::BITS - tail.value(), | |
), | |
DomainMut::Region { head, body, tail } => { | |
if let Some((head, elem)) = head { | |
set::<T, M>(elem, value, Msb0::mask(head, None), 0); | |
value >>= T::Mem::BITS - head.value(); | |
} | |
for elem in body.iter_mut() { | |
*elem = resize(value); | |
if M::BITS > T::Mem::BITS { | |
value >>= T::Mem::BITS; | |
} | |
} | |
if let Some((elem, tail)) = tail { | |
set::<T, M>( | |
elem, | |
value, | |
Msb0::mask(None, tail), | |
T::Mem::BITS - tail.value(), | |
); | |
} | |
}, | |
} | |
} | |
#[inline] | |
fn store_be<M>(&mut self, mut value: M) | |
where M: BitMemory { | |
check::<M>("store", self.len()); | |
match self.domain_mut() { | |
DomainMut::Enclave { head, elem, tail } => set::<T, M>( | |
elem, | |
value, | |
Msb0::mask(head, tail), | |
T::Mem::BITS - tail.value(), | |
), | |
DomainMut::Region { head, body, tail } => { | |
if let Some((elem, tail)) = tail { | |
set::<T, M>( | |
elem, | |
value, | |
Msb0::mask(None, tail), | |
T::Mem::BITS - tail.value(), | |
); | |
value >>= tail.value(); | |
} | |
for elem in body.iter_mut().rev() { | |
*elem = resize(value); | |
if M::BITS > T::Mem::BITS { | |
value >>= T::Mem::BITS; | |
} | |
} | |
if let Some((head, elem)) = head { | |
set::<T, M>(elem, value, Msb0::mask(head, None), 0); | |
} | |
}, | |
} | |
} | |
} | |
impl<O, V> BitField for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
BitSlice<O, V::Mem>: BitField, | |
{ | |
#[inline] | |
fn load_le<M>(&self) -> M | |
where M: BitMemory { | |
self.as_bitslice().load_le() | |
} | |
#[inline] | |
fn load_be<M>(&self) -> M | |
where M: BitMemory { | |
self.as_bitslice().load_be() | |
} | |
#[inline] | |
fn store_le<M>(&mut self, value: M) | |
where M: BitMemory { | |
self.as_mut_bitslice().store_le(value) | |
} | |
#[inline] | |
fn store_be<M>(&mut self, value: M) | |
where M: BitMemory { | |
self.as_mut_bitslice().store_be(value) | |
} | |
} | |
#[cfg(feature = "alloc")] | |
impl<O, T> BitField for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitField, | |
{ | |
#[inline] | |
fn load_le<M>(&self) -> M | |
where M: BitMemory { | |
self.as_bitslice().load_le() | |
} | |
#[inline] | |
fn load_be<M>(&self) -> M | |
where M: BitMemory { | |
self.as_bitslice().load_be() | |
} | |
#[inline] | |
fn store_le<M>(&mut self, value: M) | |
where M: BitMemory { | |
self.as_mut_bitslice().store_le(value) | |
} | |
#[inline] | |
fn store_be<M>(&mut self, value: M) | |
where M: BitMemory { | |
self.as_mut_bitslice().store_be(value) | |
} | |
} | |
#[cfg(feature = "alloc")] | |
impl<O, T> BitField for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitField, | |
{ | |
#[inline] | |
fn load_le<M>(&self) -> M | |
where M: BitMemory { | |
self.as_bitslice().load_le() | |
} | |
#[inline] | |
fn load_be<M>(&self) -> M | |
where M: BitMemory { | |
self.as_bitslice().load_be() | |
} | |
#[inline] | |
fn store_le<M>(&mut self, value: M) | |
where M: BitMemory { | |
self.as_mut_bitslice().store_le(value) | |
} | |
#[inline] | |
fn store_be<M>(&mut self, value: M) | |
where M: BitMemory { | |
self.as_mut_bitslice().store_be(value) | |
} | |
} | |
#[inline] | |
fn check<M>(action: &'static str, len: usize) | |
where M: BitMemory { | |
if !(1 ..= M::BITS as usize).contains(&len) { | |
panic!( | |
"Cannot {} {} bits from a {}-bit region", | |
action, | |
M::BITS, | |
len | |
); | |
} | |
} | |
#[inline] | |
#[allow(clippy::op_ref)] | |
fn get<T, M>(elem: &T, mask: BitMask<T::Mem>, shamt: u8) -> M | |
where | |
T: BitStore, | |
M: BitMemory, | |
{ | |
elem.load_value() | |
.pipe(|val| val & &mask.value()) | |
.pipe(|val| val >> &(shamt as usize)) | |
.pipe(resize::<T::Mem, M>) | |
} | |
#[inline] | |
fn set<T, M>(elem: &T::Access, value: M, mask: BitMask<T::Mem>, shamt: u8) | |
where | |
T: BitStore, | |
M: BitMemory, | |
{ | |
let mask = unsafe { BitMask::new(mask.value()) }; | |
let value = value | |
.pipe(resize::<M, T::Mem>) | |
.pipe(|val| val << &(shamt as usize)) | |
.pipe(|val| mask & val); | |
elem.clear_bits(mask); | |
elem.set_bits(value); | |
} | |
#[inline] | |
fn resize<T, U>(value: T) -> U | |
where | |
T: BitMemory, | |
U: BitMemory, | |
{ | |
let mut out = U::ZERO; | |
let size_t = mem::size_of::<T>(); | |
let size_u = mem::size_of::<U>(); | |
unsafe { | |
resize_inner::<T, U>(&value, &mut out, size_t, size_u); | |
} | |
out | |
} | |
#[inline(always)] | |
#[cfg(target_endian = "little")] | |
unsafe fn resize_inner<T, U>( | |
src: &T, | |
dst: &mut U, | |
size_t: usize, | |
size_u: usize, | |
) | |
{ | |
ptr::copy_nonoverlapping( | |
src as *const T as *const u8, | |
dst as *mut U as *mut u8, | |
core::cmp::min(size_t, size_u), | |
); | |
} | |
#[inline(always)] | |
#[cfg(target_endian = "big")] | |
unsafe fn resize_inner<T, U>( | |
src: &T, | |
dst: &mut U, | |
size_t: usize, | |
size_u: usize, | |
) | |
{ | |
let src = src as *const T as *const u8; | |
let dst = dst as *mut U as *mut u8; | |
if size_t > size_u { | |
ptr::copy_nonoverlapping(src.add(size_t - size_u), dst, size_u); | |
} | |
else { | |
ptr::copy_nonoverlapping(src, dst.add(size_u - size_t), size_t); | |
} | |
} | |
#[cfg(not(any(target_endian = "big", target_endian = "little")))] | |
compile_fail!(concat!( | |
"This architecture is currently not supported. File an issue at ", | |
env!(CARGO_PKG_REPOSITORY) | |
)); | |
#[cfg(feature = "std")] | |
mod io { | |
#![cfg(feature = "std")] | |
use crate::{ | |
field::BitField, | |
mem::BitRegister, | |
order::BitOrder, | |
slice::BitSlice, | |
store::BitStore, | |
vec::BitVec, | |
}; | |
use core::mem; | |
use std::io::{ | |
self, | |
Read, | |
Write, | |
}; | |
impl<'a, O, T> Read for &'a BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
BitSlice<O, T>: BitField, | |
{ | |
#[inline] | |
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { | |
let mut idx = 0; | |
for (byte, slot) in self.chunks_exact(8).zip(buf.iter_mut()) { | |
*slot = byte.load(); | |
idx += 1; | |
} | |
*self = unsafe { self.get_unchecked(idx * 8 ..) }; | |
Ok(idx) | |
} | |
} | |
impl<'a, O, T> Write for &'a mut BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
BitSlice<O, T>: BitField, | |
{ | |
#[inline] | |
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { | |
let mut idx = 0; | |
for (slot, byte) in self | |
.chunks_exact_mut(8) | |
.remove_alias() | |
.zip(buf.iter().copied()) | |
{ | |
slot.store(byte); | |
idx += 1; | |
} | |
*self = unsafe { mem::take(self).get_unchecked_mut(idx * 8 ..) }; | |
Ok(idx) | |
} | |
#[inline(always)] | |
fn flush(&mut self) -> io::Result<()> { | |
Ok(()) | |
} | |
} | |
impl<O, T> Write for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitField, | |
{ | |
#[inline] | |
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { | |
let len = self.len(); | |
self.resize(len + buf.len() * 8, false); | |
unsafe { self.get_unchecked_mut(len ..) }.write(buf) | |
} | |
#[inline(always)] | |
fn flush(&mut self) -> io::Result<()> { | |
Ok(()) | |
} | |
} | |
} | |
} | |
pub mod index { | |
use crate::{ | |
mem::BitRegister, | |
order::BitOrder, | |
}; | |
use core::{ | |
any, | |
convert::TryFrom, | |
fmt::{ | |
self, | |
Binary, | |
Debug, | |
Display, | |
Formatter, | |
}, | |
iter::{ | |
FusedIterator, | |
Sum, | |
}, | |
marker::PhantomData, | |
ops::{ | |
BitAnd, | |
BitOr, | |
Not, | |
}, | |
}; | |
#[repr(transparent)] | |
#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] | |
pub struct BitIdx<R> | |
where R: BitRegister | |
{ | |
idx: u8, | |
_ty: PhantomData<R>, | |
} | |
impl<R> BitIdx<R> | |
where R: BitRegister | |
{ | |
pub const LAST: Self = Self { | |
idx: R::MASK, | |
_ty: PhantomData, | |
}; | |
pub const ZERO: Self = Self { | |
idx: 0, | |
_ty: PhantomData, | |
}; | |
#[inline] | |
pub(crate) fn new(value: u8) -> Result<Self, BitIdxErr<R>> { | |
if value >= R::BITS { | |
return Err(BitIdxErr::new(value)); | |
} | |
Ok(unsafe { Self::new_unchecked(value) }) | |
} | |
#[inline] | |
pub(crate) unsafe fn new_unchecked(value: u8) -> Self { | |
debug_assert!( | |
value < R::BITS, | |
"Bit index {} cannot exceed type width {}", | |
value, | |
R::BITS, | |
); | |
Self { | |
idx: value, | |
_ty: PhantomData, | |
} | |
} | |
#[inline(always)] | |
pub fn value(self) -> u8 { | |
self.idx | |
} | |
#[inline] | |
pub(crate) fn next(self) -> (Self, bool) { | |
let next = self.idx + 1; | |
( | |
unsafe { Self::new_unchecked(next & R::MASK) }, | |
next == R::BITS, | |
) | |
} | |
#[inline] | |
pub(crate) fn prev(self) -> (Self, bool) { | |
let prev = self.idx.wrapping_sub(1); | |
( | |
unsafe { Self::new_unchecked(prev & R::MASK) }, | |
self.idx == 0, | |
) | |
} | |
#[inline(always)] | |
pub fn position<O>(self) -> BitPos<R> | |
where O: BitOrder { | |
O::at::<R>(self) | |
} | |
#[inline(always)] | |
pub fn select<O>(self) -> BitSel<R> | |
where O: BitOrder { | |
O::select::<R>(self) | |
} | |
#[inline] | |
pub fn mask<O>(self) -> BitMask<R> | |
where O: BitOrder { | |
self.select::<O>().mask() | |
} | |
#[inline] | |
pub fn range( | |
self, | |
upto: BitTail<R>, | |
) -> impl Iterator<Item = Self> | |
+ DoubleEndedIterator | |
+ ExactSizeIterator | |
+ FusedIterator | |
{ | |
let (from, upto) = (self.value(), upto.value()); | |
debug_assert!(from <= upto, "Ranges must run from low to high"); | |
(from .. upto).map(|val| unsafe { Self::new_unchecked(val) }) | |
} | |
#[inline] | |
pub fn range_all() -> impl Iterator<Item = Self> | |
+ DoubleEndedIterator | |
+ ExactSizeIterator | |
+ FusedIterator { | |
(0 .. R::BITS).map(|val| unsafe { Self::new_unchecked(val) }) | |
} | |
#[inline] | |
pub fn offset(self, by: isize) -> (isize, Self) { | |
let val = self.value(); | |
let (far, ovf) = by.overflowing_add(val as isize); | |
if !ovf { | |
if (0 .. R::BITS as isize).contains(&far) { | |
(0, unsafe { Self::new_unchecked(far as u8) }) | |
} | |
else { | |
(far >> R::INDX, unsafe { | |
Self::new_unchecked(far as u8 & R::MASK) | |
}) | |
} | |
} | |
else { | |
let far = far as usize; | |
((far >> R::INDX) as isize, unsafe { | |
Self::new_unchecked(far as u8 & R::MASK) | |
}) | |
} | |
} | |
#[inline] | |
pub fn span(self, len: usize) -> (usize, BitTail<R>) { | |
unsafe { BitTail::<R>::new_unchecked(self.value()) }.span(len) | |
} | |
} | |
impl<R> TryFrom<u8> for BitIdx<R> | |
where R: BitRegister | |
{ | |
type Error = BitIdxErr<R>; | |
#[inline(always)] | |
fn try_from(value: u8) -> Result<Self, Self::Error> { | |
Self::new(value) | |
} | |
} | |
impl<R> Binary for BitIdx<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!(fmt, "{:0>1$b}", self.idx, R::INDX as usize) | |
} | |
} | |
impl<R> Debug for BitIdx<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!(fmt, "BitIdx<{}>({:b})", any::type_name::<R>(), self) | |
} | |
} | |
impl<R> Display for BitIdx<R> | |
where R: BitRegister | |
{ | |
#[inline(always)] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Binary::fmt(&self, fmt) | |
} | |
} | |
#[repr(transparent)] | |
#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] | |
pub struct BitIdxErr<R> | |
where R: BitRegister | |
{ | |
err: u8, | |
_ty: PhantomData<R>, | |
} | |
impl<R> BitIdxErr<R> | |
where R: BitRegister | |
{ | |
#[inline(always)] | |
pub(crate) fn new(value: u8) -> Self { | |
debug_assert!( | |
value >= R::BITS, | |
"Bit index {} is valid for type width {}", | |
value, | |
R::BITS | |
); | |
Self { | |
err: value, | |
_ty: PhantomData, | |
} | |
} | |
#[inline(always)] | |
pub fn value(self) -> u8 { | |
self.err | |
} | |
} | |
impl<R> Debug for BitIdxErr<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!(fmt, "BitIdxErr<{}>({})", any::type_name::<R>(), self.err) | |
} | |
} | |
impl<R> Display for BitIdxErr<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!( | |
fmt, | |
"The value {} is too large to index into {} ({} bits)", | |
self.err, | |
any::type_name::<R>(), | |
R::BITS | |
) | |
} | |
} | |
#[cfg(feature = "std")] | |
impl<R> std::error::Error for BitIdxErr<R> where R: BitRegister | |
{ | |
} | |
#[repr(transparent)] | |
#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] | |
pub struct BitTail<R> | |
where R: BitRegister | |
{ | |
end: u8, | |
_ty: PhantomData<R>, | |
} | |
impl<R> BitTail<R> | |
where R: BitRegister | |
{ | |
pub(crate) const LAST: Self = Self { | |
end: R::BITS, | |
_ty: PhantomData, | |
}; | |
pub(crate) const ZERO: Self = Self { | |
end: 0, | |
_ty: PhantomData, | |
}; | |
#[inline] | |
pub fn new(value: u8) -> Option<Self> { | |
if value > R::BITS { | |
return None; | |
} | |
Some(unsafe { Self::new_unchecked(value) }) | |
} | |
#[inline] | |
pub(crate) unsafe fn new_unchecked(value: u8) -> Self { | |
debug_assert!( | |
value <= R::BITS, | |
"Bit tail {} cannot exceed type width {}", | |
value, | |
R::BITS, | |
); | |
Self { | |
end: value, | |
_ty: PhantomData, | |
} | |
} | |
#[inline(always)] | |
pub fn value(self) -> u8 { | |
self.end | |
} | |
pub fn range_from( | |
from: BitIdx<R>, | |
) -> impl Iterator<Item = Self> | |
+ DoubleEndedIterator | |
+ ExactSizeIterator | |
+ FusedIterator { | |
(from.idx ..= Self::LAST.end) | |
.map(|tail| unsafe { BitTail::new_unchecked(tail) }) | |
} | |
#[inline] | |
pub(crate) fn span(self, len: usize) -> (usize, Self) { | |
if len == 0 { | |
return (0, self); | |
} | |
let val = self.end; | |
let head = val & R::MASK; | |
let bits_in_head = (R::BITS - head) as usize; | |
if len <= bits_in_head { | |
return (1, unsafe { Self::new_unchecked(head + len as u8) }); | |
} | |
let bits_after_head = len - bits_in_head; | |
let elts = bits_after_head >> R::INDX; | |
let tail = bits_after_head as u8 & R::MASK; | |
let is_zero = (tail == 0) as u8; | |
let edges = 2 - is_zero as usize; | |
(elts + edges, unsafe { | |
Self::new_unchecked((is_zero << R::INDX) | tail) | |
}) | |
} | |
} | |
impl<R> Binary for BitTail<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!(fmt, "{:0>1$b}", self.end, R::INDX as usize + 1) | |
} | |
} | |
impl<R> Debug for BitTail<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!(fmt, "BitTail<{}>({:b})", any::type_name::<R>(), self) | |
} | |
} | |
impl<R> Display for BitTail<R> | |
where R: BitRegister | |
{ | |
#[inline(always)] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Binary::fmt(&self, fmt) | |
} | |
} | |
#[repr(transparent)] | |
#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] | |
pub struct BitPos<R> | |
where R: BitRegister | |
{ | |
pos: u8, | |
_ty: PhantomData<R>, | |
} | |
impl<R> BitPos<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
pub fn new(value: u8) -> Option<Self> { | |
if value >= R::BITS { | |
return None; | |
} | |
Some(unsafe { Self::new_unchecked(value) }) | |
} | |
#[inline(always)] | |
pub unsafe fn new_unchecked(value: u8) -> Self { | |
debug_assert!( | |
value < R::BITS, | |
"Bit position {} cannot exceed type width {}", | |
value, | |
R::BITS, | |
); | |
Self { | |
pos: value, | |
_ty: PhantomData, | |
} | |
} | |
#[inline(always)] | |
pub fn value(self) -> u8 { | |
self.pos | |
} | |
#[inline] | |
pub fn select(self) -> BitSel<R> { | |
unsafe { BitSel::new_unchecked(R::ONE << self.pos) } | |
} | |
#[inline] | |
pub fn mask(self) -> BitMask<R> { | |
self.select().mask() | |
} | |
pub(crate) fn range_all() -> impl Iterator<Item = Self> | |
+ DoubleEndedIterator | |
+ ExactSizeIterator | |
+ FusedIterator { | |
BitIdx::<R>::range_all() | |
.map(|idx| unsafe { Self::new_unchecked(idx.value()) }) | |
} | |
} | |
impl<R> Binary for BitPos<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!(fmt, "{:0>1$b}", self.pos, R::INDX as usize) | |
} | |
} | |
impl<R> Debug for BitPos<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!(fmt, "BitPos<{}>({:b})", any::type_name::<R>(), self) | |
} | |
} | |
impl<R> Display for BitPos<R> | |
where R: BitRegister | |
{ | |
#[inline(always)] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Binary::fmt(&self, fmt) | |
} | |
} | |
#[repr(transparent)] | |
#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] | |
pub struct BitSel<R> | |
where R: BitRegister | |
{ | |
sel: R, | |
} | |
impl<R> BitSel<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
pub fn new(value: R) -> Option<Self> { | |
if value.count_ones() != 1 { | |
return None; | |
} | |
Some(unsafe { Self::new_unchecked(value) }) | |
} | |
#[inline] | |
pub unsafe fn new_unchecked(value: R) -> Self { | |
debug_assert!( | |
value.count_ones() == 1, | |
"Selections are required to have exactly one set bit: {:0>1$b}", | |
value, | |
R::BITS as usize, | |
); | |
Self { sel: value } | |
} | |
#[inline(always)] | |
pub fn value(self) -> R { | |
self.sel | |
} | |
#[inline(always)] | |
pub fn mask(self) -> BitMask<R> { | |
unsafe { BitMask::new(self.sel) } | |
} | |
pub fn range_all() -> impl Iterator<Item = Self> | |
+ DoubleEndedIterator | |
+ ExactSizeIterator | |
+ FusedIterator { | |
BitPos::<R>::range_all().map(BitPos::select) | |
} | |
} | |
impl<R> Binary for BitSel<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!(fmt, "{:0>1$b}", self.sel, R::BITS as usize) | |
} | |
} | |
impl<R> Debug for BitSel<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!(fmt, "BitSel<{}>({:b})", any::type_name::<R>(), self) | |
} | |
} | |
impl<R> Display for BitSel<R> | |
where R: BitRegister | |
{ | |
#[inline(always)] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Binary::fmt(&self, fmt) | |
} | |
} | |
#[repr(transparent)] | |
#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] | |
pub struct BitMask<R> | |
where R: BitRegister | |
{ | |
mask: R, | |
} | |
impl<R> BitMask<R> | |
where R: BitRegister | |
{ | |
pub const ALL: Self = Self { mask: R::ALL }; | |
pub const ZERO: Self = Self { mask: R::ZERO }; | |
#[inline(always)] | |
pub unsafe fn new(value: R) -> Self { | |
Self { mask: value } | |
} | |
#[inline(always)] | |
pub fn value(self) -> R { | |
self.mask | |
} | |
#[inline(always)] | |
pub fn test(&self, sel: BitSel<R>) -> bool { | |
self.mask & sel.sel != R::ZERO | |
} | |
#[inline(always)] | |
pub fn insert(&mut self, sel: BitSel<R>) { | |
self.mask |= sel.sel; | |
} | |
#[inline(always)] | |
pub fn combine(self, sel: BitSel<R>) -> Self { | |
Self { | |
mask: self.mask | sel.sel, | |
} | |
} | |
} | |
impl<R> Binary for BitMask<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!(fmt, "{:0>1$b}", self.mask, R::BITS as usize) | |
} | |
} | |
impl<R> Debug for BitMask<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
write!(fmt, "BitMask<{}>({:b})", any::type_name::<R>(), self) | |
} | |
} | |
impl<R> Display for BitMask<R> | |
where R: BitRegister | |
{ | |
#[inline(always)] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Binary::fmt(&self, fmt) | |
} | |
} | |
impl<R> Sum<BitSel<R>> for BitMask<R> | |
where R: BitRegister | |
{ | |
#[inline] | |
fn sum<I>(iter: I) -> Self | |
where I: Iterator<Item = BitSel<R>> { | |
iter.fold(Self::ZERO, Self::combine) | |
} | |
} | |
impl<R> BitAnd<R> for BitMask<R> | |
where R: BitRegister | |
{ | |
type Output = Self; | |
#[inline] | |
fn bitand(self, rhs: R) -> Self::Output { | |
Self { | |
mask: self.mask & rhs, | |
} | |
} | |
} | |
impl<R> BitOr<R> for BitMask<R> | |
where R: BitRegister | |
{ | |
type Output = Self; | |
#[inline] | |
fn bitor(self, rhs: R) -> Self::Output { | |
Self { | |
mask: self.mask | rhs, | |
} | |
} | |
} | |
impl<R> Not for BitMask<R> | |
where R: BitRegister | |
{ | |
type Output = Self; | |
#[inline] | |
fn not(self) -> Self::Output { | |
Self { mask: !self.mask } | |
} | |
} | |
} | |
pub mod mem { | |
use core::mem; | |
use funty::IsUnsigned; | |
use radium::marker::BitOps; | |
pub trait BitMemory: IsUnsigned + seal::Sealed { | |
const BITS: u8 = mem::size_of::<Self>() as u8 * 8; | |
const INDX: u8 = Self::BITS.trailing_zeros() as u8; | |
const MASK: u8 = Self::BITS - 1; | |
} | |
pub trait BitRegister: BitMemory + BitOps { | |
const ONE: Self; | |
const ALL: Self; | |
} | |
macro_rules! memory { | |
($($t:ident),+ $(,)?) => { $( | |
impl BitMemory for $t {} | |
impl seal::Sealed for $t {} | |
)+ }; | |
} | |
memory!(u8, u16, u32, u64, u128, usize); | |
macro_rules! register { | |
($($t:ident),+ $(,)?) => { $( | |
impl BitRegister for $t { | |
const ONE: Self = 1; | |
const ALL: Self = !0; | |
} | |
)+ }; | |
} | |
register!(u8, u16, u32); | |
#[cfg(target_pointer_width = "64")] | |
impl BitRegister for u64 { | |
const ALL: Self = !0; | |
const ONE: Self = 1; | |
} | |
register!(usize); | |
pub const fn elts<T>(bits: usize) -> usize { | |
let width = mem::size_of::<T>() * 8; | |
bits / width + (bits % width != 0) as usize | |
} | |
pub(crate) const fn aligned_to_size<T>() -> usize { | |
(mem::align_of::<T>() < mem::size_of::<T>()) as usize | |
} | |
pub(crate) const fn cmp_layout<A, B>() -> usize { | |
(mem::align_of::<A>() != mem::align_of::<B>()) as usize | |
+ (mem::size_of::<A>() != mem::size_of::<B>()) as usize | |
} | |
mod seal { | |
pub trait Sealed {} | |
} | |
} | |
pub mod order { | |
use crate::{ | |
index::{ | |
BitIdx, | |
BitMask, | |
BitPos, | |
BitSel, | |
BitTail, | |
}, | |
mem::BitRegister, | |
}; | |
pub unsafe trait BitOrder: 'static { | |
fn at<R>(index: BitIdx<R>) -> BitPos<R> | |
where R: BitRegister; | |
#[inline] | |
fn select<R>(index: BitIdx<R>) -> BitSel<R> | |
where R: BitRegister { | |
Self::at::<R>(index).select() | |
} | |
#[inline] | |
fn mask<R>( | |
from: impl Into<Option<BitIdx<R>>>, | |
upto: impl Into<Option<BitTail<R>>>, | |
) -> BitMask<R> | |
where | |
R: BitRegister, | |
{ | |
let (from, upto) = match (from.into(), upto.into()) { | |
(None, None) => return BitMask::ALL, | |
(Some(from), None) => (from, BitTail::LAST), | |
(None, Some(upto)) => (BitIdx::ZERO, upto), | |
(Some(from), Some(upto)) => (from, upto), | |
}; | |
from.range(upto).map(Self::select::<R>).sum() | |
} | |
} | |
#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] | |
pub struct Lsb0; | |
unsafe impl BitOrder for Lsb0 { | |
#[inline] | |
fn at<R>(index: BitIdx<R>) -> BitPos<R> | |
where R: BitRegister { | |
unsafe { BitPos::new_unchecked(index.value()) } | |
} | |
#[inline] | |
fn select<R>(index: BitIdx<R>) -> BitSel<R> | |
where R: BitRegister { | |
unsafe { BitSel::new_unchecked(R::ONE << index.value()) } | |
} | |
#[inline] | |
fn mask<R>( | |
from: impl Into<Option<BitIdx<R>>>, | |
upto: impl Into<Option<BitTail<R>>>, | |
) -> BitMask<R> | |
where | |
R: BitRegister, | |
{ | |
let from = from.into().unwrap_or(BitIdx::ZERO).value(); | |
let upto = upto.into().unwrap_or(BitTail::LAST).value(); | |
debug_assert!( | |
from <= upto, | |
"Ranges must run from low index ({}) to high ({})", | |
from, | |
upto | |
); | |
let ct = upto - from; | |
if ct == R::BITS { | |
return BitMask::ALL; | |
} | |
unsafe { BitMask::new(!(R::ALL << ct) << from) } | |
} | |
} | |
#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] | |
pub struct Msb0; | |
unsafe impl BitOrder for Msb0 { | |
#[inline] | |
fn at<R>(index: BitIdx<R>) -> BitPos<R> | |
where R: BitRegister { | |
unsafe { BitPos::new_unchecked(R::MASK - index.value()) } | |
} | |
#[inline] | |
fn select<R>(index: BitIdx<R>) -> BitSel<R> | |
where R: BitRegister { | |
let msbit: R = R::ONE << R::MASK; | |
unsafe { BitSel::new_unchecked(msbit >> index.value()) } | |
} | |
#[inline] | |
fn mask<R>( | |
from: impl Into<Option<BitIdx<R>>>, | |
upto: impl Into<Option<BitTail<R>>>, | |
) -> BitMask<R> | |
where | |
R: BitRegister, | |
{ | |
let from = from.into().unwrap_or(BitIdx::ZERO).value(); | |
let upto = upto.into().unwrap_or(BitTail::LAST).value(); | |
debug_assert!( | |
from <= upto, | |
"Ranges must run from low index ({}) to high ({})", | |
from, | |
upto | |
); | |
let ct = upto - from; | |
if ct == R::BITS { | |
return BitMask::ALL; | |
} | |
unsafe { BitMask::new(!(R::ALL >> ct) >> from) } | |
} | |
} | |
#[cfg(target_endian = "little")] | |
pub use self::Lsb0 as LocalBits; | |
#[cfg(target_endian = "big")] | |
pub use self::Msb0 as LocalBits; | |
#[cfg(not(any(target_endian = "big", target_endian = "little")))] | |
compile_fail!(concat!( | |
"This architecture is currently not supported. File an issue at ", | |
env!(CARGO_PKG_REPOSITORY) | |
)); | |
pub fn verify<O>(verbose: bool) | |
where O: BitOrder { | |
verify_for_type::<O, u8>(verbose); | |
verify_for_type::<O, u16>(verbose); | |
verify_for_type::<O, u32>(verbose); | |
verify_for_type::<O, usize>(verbose); | |
#[cfg(target_pointer_width = "64")] | |
verify_for_type::<O, u64>(verbose); | |
} | |
pub fn verify_for_type<O, R>(verbose: bool) | |
where | |
O: BitOrder, | |
R: BitRegister, | |
{ | |
use core::any::type_name; | |
let mut accum = BitMask::<R>::ZERO; | |
let oname = type_name::<O>(); | |
let mname = type_name::<R>(); | |
for n in 0 .. R::BITS { | |
let idx = unsafe { BitIdx::<R>::new_unchecked(n) }; | |
let pos = O::at::<R>(idx); | |
if verbose { | |
#[cfg(feature = "std")] | |
println!( | |
"`<{} as BitOrder>::at::<{}>({})` produces {}", | |
oname, | |
mname, | |
n, | |
pos.value(), | |
); | |
} | |
assert!( | |
pos.value() < R::BITS, | |
"Error when verifying the implementation of `BitOrder` for `{}`: \ | |
Index {} produces a bit position ({}) that exceeds the type width \ | |
{}", | |
oname, | |
n, | |
pos.value(), | |
R::BITS, | |
); | |
let sel = O::select::<R>(idx); | |
if verbose { | |
#[cfg(feature = "std")] | |
println!( | |
"`<{} as BitOrder>::select::<{}>({})` produces {:b}", | |
oname, mname, n, sel, | |
); | |
} | |
assert_eq!( | |
sel.value().count_ones(), | |
1, | |
"Error when verifying the implementation of `BitOrder` for `{}`: \ | |
Index {} produces a bit selector ({:b}) that is not a one-hot mask", | |
oname, | |
n, | |
sel, | |
); | |
let shl = pos.select(); | |
assert_eq!( | |
sel, | |
shl, | |
"Error when verifying the implementation of `BitOrder` for `{}`: \ | |
Index {} produces a bit selector ({:b}) that is not equal to `1 \ | |
<< {}` ({:b})", | |
oname, | |
n, | |
sel, | |
pos.value(), | |
shl, | |
); | |
assert!( | |
!accum.test(sel), | |
"Error when verifying the implementation of `BitOrder` for `{}`: \ | |
Index {} produces a bit position ({}) that has already been \ | |
produced by a prior index", | |
oname, | |
n, | |
pos.value(), | |
); | |
accum.insert(sel); | |
if verbose { | |
#[cfg(feature = "std")] | |
println!( | |
"`<{} as BitOrder>::at::<{}>({})` accumulates {:b}", | |
oname, mname, n, accum, | |
); | |
} | |
} | |
assert_eq!( | |
accum, | |
BitMask::ALL, | |
"Error when verifying the implementation of `BitOrder` for `{}`: The \ | |
bit positions marked with a `0` here were never produced from an \ | |
index, despite all possible indices being passed in for translation: \ | |
{:b}", | |
oname, | |
accum, | |
); | |
for from in BitIdx::<R>::range_all() { | |
for upto in BitTail::<R>::range_from(from) { | |
let mask = O::mask(from, upto); | |
let check = BitIdx::<R>::range(from, upto) | |
.map(O::at::<R>) | |
.map(BitPos::<R>::select) | |
.sum::<BitMask<R>>(); | |
assert_eq!( | |
mask, | |
check, | |
"Error when verifying the implementation of `BitOrder` for \ | |
`{o}`: `{o}::mask::<{m}>({f}, {u})` produced {bad:b}, but \ | |
expected {good:b}", | |
o = oname, | |
m = mname, | |
f = from, | |
u = upto, | |
bad = mask, | |
good = check, | |
); | |
} | |
} | |
} | |
} | |
pub mod prelude { | |
pub use crate::{ | |
array::BitArray, | |
bitarr, | |
bits, | |
field::BitField, | |
order::{ | |
BitOrder, | |
LocalBits, | |
Lsb0, | |
Msb0, | |
}, | |
slice::BitSlice, | |
store::BitStore, | |
view::BitView, | |
}; | |
#[cfg(feature = "alloc")] | |
pub use crate::{ | |
bitbox, | |
bitvec, | |
boxed::BitBox, | |
vec::BitVec, | |
}; | |
} | |
pub mod ptr { | |
use crate::{ | |
access::BitAccess, | |
domain::Domain, | |
index::{ | |
BitIdx, | |
BitTail, | |
}, | |
mem::BitMemory, | |
order::BitOrder, | |
slice::BitSlice, | |
store::BitStore, | |
}; | |
use core::{ | |
any, | |
fmt::{ | |
self, | |
Debug, | |
Formatter, | |
Pointer, | |
}, | |
marker::PhantomData, | |
ptr::{ | |
self, | |
NonNull, | |
}, | |
}; | |
use wyz::fmt::FmtForward; | |
#[repr(transparent)] | |
#[derive(Eq, Hash, Ord, PartialEq, PartialOrd)] | |
pub(crate) struct Address<T> | |
where T: BitStore | |
{ | |
inner: NonNull<T>, | |
} | |
impl<T> Address<T> | |
where T: BitStore | |
{ | |
#[inline(always)] | |
pub(crate) fn new(addr: usize) -> Option<Self> { | |
NonNull::new(addr as *mut T).map(|inner| Self { inner }) | |
} | |
#[inline(always)] | |
pub(crate) unsafe fn new_unchecked(addr: usize) -> Self { | |
Self { | |
inner: NonNull::new_unchecked(addr as *mut T), | |
} | |
} | |
#[inline(always)] | |
pub(crate) fn to_access(self) -> *const T::Access { | |
self.inner.as_ptr() as *const T::Access | |
} | |
#[inline(always)] | |
pub(crate) fn to_const(self) -> *const T { | |
self.inner.as_ptr() as *const T | |
} | |
#[inline(always)] | |
#[allow(clippy::wrong_self_convention)] | |
pub(crate) fn to_mut(self) -> *mut T { | |
self.inner.as_ptr() | |
} | |
#[cfg(feature = "alloc")] | |
pub(crate) fn to_nonnull(self) -> NonNull<T> { | |
self.inner | |
} | |
#[inline(always)] | |
pub(crate) fn value(self) -> usize { | |
self.inner.as_ptr() as usize | |
} | |
} | |
impl<T> Clone for Address<T> | |
where T: BitStore | |
{ | |
#[inline(always)] | |
fn clone(&self) -> Self { | |
Self { ..*self } | |
} | |
} | |
impl<T> From<&T> for Address<T> | |
where T: BitStore | |
{ | |
#[inline(always)] | |
fn from(addr: &T) -> Self { | |
(addr as *const T).into() | |
} | |
} | |
impl<T> From<*const T> for Address<T> | |
where T: BitStore | |
{ | |
#[inline] | |
fn from(addr: *const T) -> Self { | |
Self::new(addr as usize).expect("Cannot use a null pointer") | |
} | |
} | |
impl<T> From<&mut T> for Address<T> | |
where T: BitStore | |
{ | |
#[inline(always)] | |
fn from(addr: &mut T) -> Self { | |
(addr as *mut T).into() | |
} | |
} | |
impl<T> From<*mut T> for Address<T> | |
where T: BitStore | |
{ | |
#[inline] | |
fn from(addr: *mut T) -> Self { | |
Self::new(addr as usize).expect("Cannot use a null pointer") | |
} | |
} | |
impl<T> Debug for Address<T> | |
where T: BitStore | |
{ | |
#[inline(always)] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Pointer::fmt(self, fmt) | |
} | |
} | |
impl<T> Pointer for Address<T> | |
where T: BitStore | |
{ | |
#[inline(always)] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Pointer::fmt(&self.to_const(), fmt) | |
} | |
} | |
impl<T> Copy for Address<T> where T: BitStore | |
{ | |
} | |
pub fn bitslice_from_raw_parts<O, T>( | |
addr: *const T, | |
head: BitIdx<T::Mem>, | |
bits: usize, | |
) -> Option<*const BitSlice<O, T>> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
BitPtr::new(addr, head, bits).map(BitPtr::to_bitslice_ptr) | |
} | |
pub fn bitslice_from_raw_parts_mut<O, T>( | |
addr: *mut T, | |
head: BitIdx<T::Mem>, | |
bits: usize, | |
) -> Option<*mut BitSlice<O, T>> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
BitPtr::new(addr, head, bits).map(BitPtr::to_bitslice_ptr_mut) | |
} | |
#[repr(C)] | |
pub struct BitPtr<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
ptr: NonNull<u8>, | |
len: usize, | |
_or: PhantomData<O>, | |
_ty: PhantomData<Address<T>>, | |
} | |
impl<O, T> BitPtr<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
pub(crate) const EMPTY: Self = Self { | |
ptr: unsafe { | |
NonNull::new_unchecked(NonNull::<T>::dangling().as_ptr() as *mut u8) | |
}, | |
len: 0, | |
_or: PhantomData, | |
_ty: PhantomData, | |
}; | |
pub(crate) const LEN_HEAD_BITS: usize = 3; | |
pub(crate) const LEN_HEAD_MASK: usize = 0b111; | |
pub(crate) const PTR_ADDR_MASK: usize = !0 << Self::PTR_HEAD_BITS; | |
pub(crate) const PTR_HEAD_BITS: usize = | |
T::Mem::INDX as usize - Self::LEN_HEAD_BITS; | |
pub(crate) const PTR_HEAD_MASK: usize = !Self::PTR_ADDR_MASK; | |
pub(crate) const REGION_MAX_BITS: usize = !0 >> Self::LEN_HEAD_BITS; | |
pub(crate) const REGION_MAX_ELTS: usize = | |
crate::mem::elts::<T::Mem>(Self::REGION_MAX_BITS) + 1; | |
#[cfg(feature = "alloc")] | |
pub(crate) fn uninhabited(addr: impl Into<Address<T>>) -> Self { | |
let addr = addr.into(); | |
assert!( | |
addr.value().trailing_zeros() as usize >= Self::PTR_HEAD_BITS, | |
"Pointer {:p} does not satisfy minimum alignment requirements {}", | |
addr.to_const(), | |
Self::PTR_HEAD_BITS | |
); | |
Self { | |
ptr: match NonNull::new(addr.to_mut() as *mut u8) { | |
Some(nn) => nn, | |
None => return Self::EMPTY, | |
}, | |
len: 0, | |
_or: PhantomData, | |
_ty: PhantomData, | |
} | |
} | |
pub(crate) fn new( | |
addr: impl Into<Address<T>>, | |
head: BitIdx<T::Mem>, | |
bits: usize, | |
) -> Option<Self> | |
{ | |
let addr = addr.into(); | |
if addr.to_const().is_null() | |
|| (addr.value().trailing_zeros() as usize) < Self::PTR_HEAD_BITS | |
|| bits > Self::REGION_MAX_BITS | |
{ | |
return None; | |
} | |
let elts = head.span(bits).0; | |
let last = addr.to_const().wrapping_add(elts); | |
if last < addr.to_const() { | |
return None; | |
} | |
Some(unsafe { Self::new_unchecked(addr, head, bits) }) | |
} | |
#[inline] | |
pub(crate) unsafe fn new_unchecked( | |
addr: impl Into<Address<T>>, | |
head: BitIdx<T::Mem>, | |
bits: usize, | |
) -> Self | |
{ | |
let (addr, head) = (addr.into(), head.value() as usize); | |
let ptr_data = addr.value() & Self::PTR_ADDR_MASK; | |
let ptr_head = head >> Self::LEN_HEAD_BITS; | |
let len_head = head & Self::LEN_HEAD_MASK; | |
let len_bits = bits << Self::LEN_HEAD_BITS; | |
let ptr = Address::new(ptr_data | ptr_head) | |
.expect("Cannot use a null pointer"); | |
Self { | |
ptr: NonNull::new_unchecked(ptr.to_mut()), | |
len: len_bits | len_head, | |
_or: PhantomData, | |
_ty: PhantomData, | |
} | |
} | |
#[inline] | |
pub(crate) fn from_bitslice_ptr(raw: *const BitSlice<O, T>) -> Self { | |
let slice_nn = match NonNull::new(raw as *const [()] as *mut [()]) { | |
Some(nn) => nn, | |
None => return Self::EMPTY, | |
}; | |
let ptr = | |
unsafe { NonNull::new_unchecked(slice_nn.as_ptr() as *mut u8) }; | |
let len = unsafe { slice_nn.as_ref() }.len(); | |
Self { | |
ptr, | |
len, | |
_or: PhantomData, | |
_ty: PhantomData, | |
} | |
} | |
#[inline(always)] | |
#[cfg(feature = "alloc")] | |
pub(crate) fn from_bitslice_ptr_mut(raw: *mut BitSlice<O, T>) -> Self { | |
Self::from_bitslice_ptr(raw as *const BitSlice<O, T>) | |
} | |
#[inline] | |
pub(crate) fn to_bitslice_ptr(self) -> *const BitSlice<O, T> { | |
ptr::slice_from_raw_parts( | |
self.ptr.as_ptr() as *const u8 as *const (), | |
self.len, | |
) as *const BitSlice<O, T> | |
} | |
#[inline(always)] | |
pub(crate) fn to_bitslice_ptr_mut(self) -> *mut BitSlice<O, T> { | |
self.to_bitslice_ptr() as *mut BitSlice<O, T> | |
} | |
#[inline(always)] | |
pub(crate) fn to_bitslice_ref<'a>(self) -> &'a BitSlice<O, T> { | |
unsafe { &*self.to_bitslice_ptr() } | |
} | |
#[inline(always)] | |
pub(crate) fn to_bitslice_mut<'a>(self) -> &'a mut BitSlice<O, T> { | |
unsafe { &mut *self.to_bitslice_ptr_mut() } | |
} | |
#[inline] | |
#[cfg(feature = "alloc")] | |
pub(crate) fn to_nonnull(self) -> NonNull<BitSlice<O, T>> { | |
self.to_bitslice_mut().into() | |
} | |
pub(crate) unsafe fn align_to<U>(self) -> (Self, BitPtr<O, U>, Self) | |
where U: BitStore { | |
match self.to_bitslice_ref().domain() { | |
Domain::Enclave { .. } => { | |
return (self, BitPtr::EMPTY, BitPtr::EMPTY); | |
}, | |
Domain::Region { head, body, tail } => { | |
let (l, c, r) = body.align_to::<U::Mem>(); | |
let t_bits = T::Mem::BITS as usize; | |
let u_bits = U::Mem::BITS as usize; | |
let l_bits = l.len() * t_bits; | |
let c_bits = c.len() * u_bits; | |
let r_bits = r.len() * t_bits; | |
let l_addr = l.as_ptr() as *const T; | |
let c_addr = c.as_ptr() as *const U; | |
let r_addr = r.as_ptr() as *const T; | |
let l_ptr = match head { | |
Some((head, addr)) => BitPtr::new_unchecked( | |
addr, | |
head, | |
t_bits - head.value() as usize + l_bits, | |
), | |
None => { | |
if l_bits == 0 { | |
BitPtr::EMPTY | |
} | |
else { | |
BitPtr::new_unchecked(l_addr, BitIdx::ZERO, l_bits) | |
} | |
}, | |
}; | |
let c_ptr = if c_bits == 0 { | |
BitPtr::EMPTY | |
} | |
else { | |
BitPtr::new_unchecked(c_addr, BitIdx::ZERO, c_bits) | |
}; | |
let r_ptr = match tail { | |
Some((addr, tail)) => BitPtr::new_unchecked( | |
if r.is_empty() { addr } else { r_addr }, | |
BitIdx::ZERO, | |
tail.value() as usize + r_bits, | |
), | |
None => { | |
if !r.is_empty() { | |
BitPtr::new_unchecked(r_addr, BitIdx::ZERO, r_bits) | |
} | |
else { | |
BitPtr::EMPTY | |
} | |
}, | |
}; | |
(l_ptr, c_ptr, r_ptr) | |
}, | |
} | |
} | |
#[inline] | |
pub(crate) fn pointer(&self) -> Address<T> { | |
unsafe { | |
Address::new_unchecked( | |
self.ptr.as_ptr() as usize & Self::PTR_ADDR_MASK, | |
) | |
} | |
} | |
#[inline] | |
#[cfg(feature = "alloc")] | |
pub(crate) unsafe fn set_pointer(&mut self, addr: impl Into<Address<T>>) { | |
let addr = addr.into(); | |
if addr.to_const().is_null() { | |
*self = Self::EMPTY; | |
return; | |
} | |
let mut addr_value = addr.value(); | |
addr_value &= Self::PTR_ADDR_MASK; | |
addr_value |= self.ptr.as_ptr() as usize & Self::PTR_HEAD_MASK; | |
let addr = Address::new_unchecked(addr_value); | |
self.ptr = NonNull::new_unchecked(addr.to_mut() as *mut u8); | |
} | |
pub(crate) fn head(&self) -> BitIdx<T::Mem> { | |
let ptr = self.ptr.as_ptr() as usize; | |
let ptr_head = (ptr & Self::PTR_HEAD_MASK) << Self::LEN_HEAD_BITS; | |
let len_head = self.len & Self::LEN_HEAD_MASK; | |
unsafe { BitIdx::new_unchecked((ptr_head | len_head) as u8) } | |
} | |
#[cfg(feature = "alloc")] | |
pub(crate) unsafe fn set_head(&mut self, head: BitIdx<T::Mem>) { | |
let head = head.value() as usize; | |
let mut ptr = self.ptr.as_ptr() as usize; | |
ptr &= Self::PTR_ADDR_MASK; | |
ptr |= head >> Self::LEN_HEAD_BITS; | |
self.ptr = NonNull::new_unchecked(ptr as *mut u8); | |
self.len &= !Self::LEN_HEAD_MASK; | |
self.len |= head & Self::LEN_HEAD_MASK; | |
} | |
#[inline] | |
pub(crate) fn len(&self) -> usize { | |
self.len >> Self::LEN_HEAD_BITS | |
} | |
#[inline] | |
pub(crate) unsafe fn set_len(&mut self, new_len: usize) { | |
debug_assert!( | |
new_len <= Self::REGION_MAX_BITS, | |
"Length {} out of range", | |
new_len, | |
); | |
self.len &= Self::LEN_HEAD_MASK; | |
self.len |= new_len << Self::LEN_HEAD_BITS; | |
} | |
#[inline] | |
pub(crate) fn raw_parts(&self) -> (Address<T>, BitIdx<T::Mem>, usize) { | |
(self.pointer(), self.head(), self.len()) | |
} | |
pub(crate) fn elements(&self) -> usize { | |
let total = self.len() + self.head().value() as usize; | |
let base = total >> T::Mem::INDX; | |
let tail = total as u8 & T::Mem::MASK; | |
base + (tail != 0) as usize | |
} | |
#[inline] | |
pub(crate) fn tail(&self) -> BitTail<T::Mem> { | |
let (head, len) = (self.head(), self.len()); | |
if head.value() == 0 && len == 0 { | |
return BitTail::ZERO; | |
} | |
let tail = (head.value() as usize + len) & T::Mem::MASK as usize; | |
unsafe { | |
BitTail::new_unchecked( | |
(((tail == 0) as u8) << T::Mem::INDX) | tail as u8, | |
) | |
} | |
} | |
#[inline] | |
pub(crate) unsafe fn incr_head(&mut self) { | |
let head = self.head().value() as usize + 1; | |
self.len &= !Self::LEN_HEAD_MASK; | |
self.len |= head & Self::LEN_HEAD_MASK; | |
let head = head >> Self::LEN_HEAD_BITS; | |
let mut ptr = self.ptr.as_ptr() as usize; | |
ptr &= Self::PTR_ADDR_MASK; | |
ptr += head; | |
self.ptr = NonNull::new_unchecked(ptr as *mut u8); | |
} | |
#[inline] | |
pub(crate) unsafe fn read(&self, index: usize) -> bool { | |
let (elt, bit) = self.head().offset(index as isize); | |
let base = self.pointer().to_const(); | |
(&*base.offset(elt)).get_bit::<O>(bit) | |
} | |
#[inline] | |
pub(crate) unsafe fn write(&self, index: usize, value: bool) { | |
let (elt, bit) = self.head().offset(index as isize); | |
let base = self.pointer().to_access(); | |
(&*base.offset(elt)).write_bit::<O>(bit, value); | |
} | |
pub(crate) unsafe fn ptr_diff(&self, other: &Self) -> (isize, i8) { | |
let self_ptr = self.pointer(); | |
let other_ptr = other.pointer(); | |
let elts = other_ptr | |
.value() | |
.wrapping_sub(self_ptr.value()) | |
.wrapping_div(core::mem::size_of::<T>()) as isize; | |
let bits = other.head().value() as i8 - self.head().value() as i8; | |
(elts, bits) | |
} | |
#[inline] | |
pub(crate) fn render<'a>( | |
&'a self, | |
fmt: &'a mut Formatter, | |
name: &'a str, | |
fields: impl IntoIterator<Item = &'a (&'a str, &'a dyn Debug)>, | |
) -> fmt::Result | |
{ | |
write!( | |
fmt, | |
"Bit{}<{}, {}>", | |
name, | |
any::type_name::<O>(), | |
any::type_name::<T::Mem>() | |
)?; | |
let mut builder = fmt.debug_struct(""); | |
builder | |
.field("addr", &self.pointer().fmt_pointer()) | |
.field("head", &self.head().fmt_binary()) | |
.field("bits", &self.len()); | |
for (name, value) in fields { | |
builder.field(name, value); | |
} | |
builder.finish() | |
} | |
} | |
impl<O, T> Clone for BitPtr<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
fn clone(&self) -> Self { | |
Self { ..*self } | |
} | |
} | |
impl<O, T> Eq for BitPtr<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
} | |
impl<O, T, U> PartialEq<BitPtr<O, U>> for BitPtr<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
U: BitStore, | |
{ | |
fn eq(&self, other: &BitPtr<O, U>) -> bool { | |
let (addr_a, head_a, bits_a) = self.raw_parts(); | |
let (addr_b, head_b, bits_b) = other.raw_parts(); | |
T::Mem::BITS == U::Mem::BITS | |
&& addr_a.value() == addr_b.value() | |
&& head_a.value() == head_b.value() | |
&& bits_a == bits_b | |
} | |
} | |
impl<O, T> Default for BitPtr<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
fn default() -> Self { | |
Self::EMPTY | |
} | |
} | |
impl<O, T> Debug for BitPtr<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Pointer::fmt(self, fmt) | |
} | |
} | |
impl<O, T> Pointer for BitPtr<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
self.render(fmt, "Ptr", None) | |
} | |
} | |
impl<O, T> Copy for BitPtr<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
} | |
} | |
pub mod slice { | |
use crate::{ | |
access::{ | |
BitAccess, | |
BitSafe, | |
}, | |
devel as dvl, | |
domain::{ | |
BitDomain, | |
BitDomainMut, | |
Domain, | |
DomainMut, | |
}, | |
index::{ | |
BitIdx, | |
BitMask, | |
}, | |
mem::{ | |
BitMemory, | |
BitRegister, | |
}, | |
order::{ | |
BitOrder, | |
Lsb0, | |
Msb0, | |
}, | |
ptr::BitPtr, | |
store::BitStore, | |
}; | |
use core::{ | |
any::TypeId, | |
marker::PhantomData, | |
ops::RangeBounds, | |
ptr, | |
slice, | |
}; | |
use funty::IsInteger; | |
use tap::pipe::Pipe; | |
#[cfg(feature = "alloc")] | |
use crate::vec::BitVec; | |
#[repr(transparent)] | |
pub struct BitSlice<O = Lsb0, T = usize> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
_ord: PhantomData<O>, | |
_typ: PhantomData<[T]>, | |
_mem: [()], | |
} | |
impl<O, T> BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
pub fn from_element(elem: &T) -> &Self { | |
unsafe { | |
BitPtr::new_unchecked(elem, BitIdx::ZERO, T::Mem::BITS as usize) | |
} | |
.to_bitslice_ref() | |
} | |
#[inline] | |
pub fn from_element_mut(elem: &mut T) -> &mut Self { | |
unsafe { | |
BitPtr::new_unchecked(elem, BitIdx::ZERO, T::Mem::BITS as usize) | |
} | |
.to_bitslice_mut() | |
} | |
#[inline] | |
pub fn from_slice(slice: &[T]) -> Option<&Self> { | |
let elts = slice.len(); | |
if elts >= Self::MAX_ELTS { | |
return None; | |
} | |
Some(unsafe { Self::from_slice_unchecked(slice) }) | |
} | |
#[inline] | |
pub fn from_slice_mut(slice: &mut [T]) -> Option<&mut Self> { | |
let elts = slice.len(); | |
if elts >= Self::MAX_ELTS { | |
return None; | |
} | |
Some(unsafe { Self::from_slice_unchecked_mut(slice) }) | |
} | |
#[inline] | |
pub unsafe fn from_slice_unchecked(slice: &[T]) -> &Self { | |
let bits = slice.len().wrapping_mul(T::Mem::BITS as usize); | |
BitPtr::new_unchecked(slice.as_ptr(), BitIdx::ZERO, bits) | |
.to_bitslice_ref() | |
} | |
#[inline] | |
pub unsafe fn from_slice_unchecked_mut(slice: &mut [T]) -> &mut Self { | |
let bits = slice.len().wrapping_mul(T::Mem::BITS as usize); | |
BitPtr::new_unchecked(slice.as_ptr(), BitIdx::ZERO, bits) | |
.to_bitslice_mut() | |
} | |
} | |
impl<O, T> BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
pub fn empty<'a>() -> &'a Self { | |
BitPtr::EMPTY.to_bitslice_ref() | |
} | |
#[inline(always)] | |
pub fn empty_mut<'a>() -> &'a mut Self { | |
BitPtr::EMPTY.to_bitslice_mut() | |
} | |
#[inline] | |
pub fn set(&mut self, index: usize, value: bool) { | |
let len = self.len(); | |
assert!(index < len, "Index out of range: {} >= {}", index, len); | |
unsafe { | |
self.set_unchecked(index, value); | |
} | |
} | |
#[inline] | |
pub fn any(&self) -> bool { | |
match self.domain() { | |
Domain::Enclave { head, elem, tail } => { | |
O::mask(head, tail) & elem.load_value() != BitMask::ZERO | |
}, | |
Domain::Region { head, body, tail } => { | |
head.map_or(false, |(head, elem)| { | |
O::mask(head, None) & elem.load_value() != BitMask::ZERO | |
}) || body.iter().copied().any(|e| e != T::Mem::ZERO) | |
|| tail.map_or(false, |(elem, tail)| { | |
O::mask(None, tail) & elem.load_value() != BitMask::ZERO | |
}) | |
}, | |
} | |
} | |
#[inline] | |
pub fn all(&self) -> bool { | |
match self.domain() { | |
Domain::Enclave { head, elem, tail } => { | |
!O::mask(head, tail) | elem.load_value() == BitMask::ALL | |
}, | |
Domain::Region { head, body, tail } => { | |
head.map_or(true, |(head, elem)| { | |
!O::mask(head, None) | elem.load_value() == BitMask::ALL | |
}) && body.iter().copied().all(|e| e == T::Mem::ALL) | |
&& tail.map_or(true, |(elem, tail)| { | |
!O::mask(None, tail) | elem.load_value() == BitMask::ALL | |
}) | |
}, | |
} | |
} | |
#[inline(always)] | |
pub fn not_any(&self) -> bool { | |
!self.any() | |
} | |
#[inline(always)] | |
pub fn not_all(&self) -> bool { | |
!self.all() | |
} | |
#[inline] | |
pub fn some(&self) -> bool { | |
self.any() && self.not_all() | |
} | |
#[inline] | |
pub fn count_ones(&self) -> usize { | |
match self.domain() { | |
Domain::Enclave { head, elem, tail } => (O::mask(head, tail) | |
& elem.load_value()) | |
.value() | |
.count_ones() as usize, | |
Domain::Region { head, body, tail } => { | |
head.map_or(0, |(head, elem)| { | |
(O::mask(head, None) & elem.load_value()) | |
.value() | |
.count_ones() as usize | |
}) + body | |
.iter() | |
.copied() | |
.map(|e| e.count_ones() as usize) | |
.sum::<usize>() + tail.map_or(0, |(elem, tail)| { | |
(O::mask(None, tail) & elem.load_value()) | |
.value() | |
.count_ones() as usize | |
}) | |
}, | |
} | |
} | |
#[inline] | |
pub fn count_zeros(&self) -> usize { | |
match self.domain() { | |
Domain::Enclave { head, elem, tail } => (!O::mask(head, tail) | |
| elem.load_value()) | |
.value() | |
.count_zeros() as usize, | |
Domain::Region { head, body, tail } => { | |
head.map_or(0, |(head, elem)| { | |
(!O::mask(head, None) | elem.load_value()) | |
.value() | |
.count_zeros() as usize | |
}) + body | |
.iter() | |
.copied() | |
.map(|e| e.count_zeros() as usize) | |
.sum::<usize>() + tail.map_or(0, |(elem, tail)| { | |
(!O::mask(None, tail) | elem.load_value()) | |
.value() | |
.count_zeros() as usize | |
}) | |
}, | |
} | |
} | |
#[inline] | |
pub fn clone_from_bitslice<O2, T2>(&mut self, src: &BitSlice<O2, T2>) | |
where | |
O2: BitOrder, | |
T2: BitStore, | |
{ | |
assert_eq!( | |
self.len(), | |
src.len(), | |
"Cloning between slices requires equal lengths" | |
); | |
if TypeId::of::<O>() == TypeId::of::<O2>() | |
&& TypeId::of::<T>() == TypeId::of::<T2>() | |
{ | |
let that = src as *const _ as *const _; | |
unsafe { | |
self.copy_from_bitslice(&*that); | |
} | |
} | |
else { | |
for (to, from) in | |
self.iter_mut().remove_alias().zip(src.iter().copied()) | |
{ | |
to.set(from); | |
} | |
} | |
} | |
#[inline] | |
pub fn copy_from_bitslice(&mut self, src: &Self) { | |
assert_eq!( | |
self.len(), | |
src.len(), | |
"Copying between slices requires equal lengths" | |
); | |
let (d_head, s_head) = (self.bitptr().head(), src.bitptr().head()); | |
if d_head == s_head { | |
match (self.domain_mut(), src.domain()) { | |
( | |
DomainMut::Enclave { | |
elem: d_elem, tail, .. | |
}, | |
Domain::Enclave { elem: s_elem, .. }, | |
) => { | |
let mask = O::mask(d_head, tail); | |
d_elem.clear_bits(mask); | |
d_elem.set_bits(mask & s_elem.load_value()); | |
}, | |
( | |
DomainMut::Region { | |
head: d_head, | |
body: d_body, | |
tail: d_tail, | |
}, | |
Domain::Region { | |
head: s_head, | |
body: s_body, | |
tail: s_tail, | |
}, | |
) => { | |
if let (Some((h_idx, dh_elem)), Some((_, sh_elem))) = | |
(d_head, s_head) | |
{ | |
let mask = O::mask(h_idx, None); | |
dh_elem.clear_bits(mask); | |
dh_elem.set_bits(mask & sh_elem.load_value()); | |
} | |
d_body.copy_from_slice(s_body); | |
if let (Some((dt_elem, t_idx)), Some((st_elem, _))) = | |
(d_tail, s_tail) | |
{ | |
let mask = O::mask(None, t_idx); | |
dt_elem.clear_bits(mask); | |
dt_elem.set_bits(mask & st_elem.load_value()); | |
} | |
}, | |
_ => unreachable!( | |
"Slices with equal type parameters, lengths, and heads \ | |
will always have equal domains" | |
), | |
} | |
} | |
else if TypeId::of::<O>() == TypeId::of::<Lsb0>() { | |
let this: &mut BitSlice<Lsb0, T> = | |
unsafe { &mut *(self as *mut _ as *mut _) }; | |
let that: &BitSlice<Lsb0, T> = | |
unsafe { &*(src as *const _ as *const _) }; | |
this.sp_copy_from_bitslice(that); | |
} | |
else if TypeId::of::<O>() == TypeId::of::<Msb0>() { | |
let this: &mut BitSlice<Msb0, T> = | |
unsafe { &mut *(self as *mut _ as *mut _) }; | |
let that: &BitSlice<Msb0, T> = | |
unsafe { &*(src as *const _ as *const _) }; | |
this.sp_copy_from_bitslice(that); | |
} | |
else { | |
for (to, from) in | |
self.iter_mut().remove_alias().zip(src.iter().copied()) | |
{ | |
to.set(from); | |
} | |
} | |
} | |
#[inline] | |
pub fn swap_with_bitslice<O2, T2>(&mut self, other: &mut BitSlice<O2, T2>) | |
where | |
O2: BitOrder, | |
T2: BitStore, | |
{ | |
let len = self.len(); | |
assert_eq!(len, other.len()); | |
for (to, from) in self.iter_mut().zip(other.iter_mut()) { | |
let (this, that) = (*to, *from); | |
unsafe { BitMut::<O, T>::remove_alias(to) }.set(that); | |
unsafe { BitMut::<O2, T2>::remove_alias(from) }.set(this); | |
} | |
} | |
#[inline] | |
pub fn shift_left(&mut self, by: usize) { | |
let len = self.len(); | |
if by == 0 { | |
return; | |
} | |
assert!( | |
by < len, | |
"Cannot shift a slice by more than its length: {} exceeds {}", | |
by, | |
len | |
); | |
unsafe { | |
self.copy_within_unchecked(by .., 0); | |
} | |
let trunc = len - by; | |
self[trunc ..].set_all(false); | |
} | |
#[inline] | |
pub fn shift_right(&mut self, by: usize) { | |
let len = self.len(); | |
if by == 0 { | |
return; | |
} | |
assert!( | |
by < len, | |
"Cannot shift a slice by more than its length: {} exceeds {}", | |
by, | |
len | |
); | |
let trunc = len - by; | |
unsafe { | |
self.copy_within_unchecked(.. trunc, by); | |
} | |
self[.. by].set_all(false); | |
} | |
#[inline] | |
pub fn set_all(&mut self, value: bool) { | |
let setter = <T::Access>::get_writers(value); | |
match self.domain_mut() { | |
DomainMut::Enclave { head, elem, tail } => { | |
setter(elem, O::mask(head, tail)); | |
}, | |
DomainMut::Region { head, body, tail } => { | |
if let Some((head, elem)) = head { | |
setter(elem, O::mask(head, None)); | |
} | |
unsafe { | |
ptr::write_bytes( | |
body.as_mut_ptr(), | |
[0, !0][value as usize], | |
body.len(), | |
); | |
} | |
if let Some((elem, tail)) = tail { | |
setter(elem, O::mask(None, tail)); | |
} | |
}, | |
} | |
} | |
#[inline] | |
pub fn for_each<F>(&mut self, mut func: F) | |
where F: FnMut(usize, bool) -> bool { | |
for idx in 0 .. self.len() { | |
unsafe { | |
let tmp = *self.get_unchecked(idx); | |
let new = func(idx, tmp); | |
self.set_unchecked(idx, new); | |
} | |
} | |
} | |
pub fn offset_from(&self, other: &Self) -> isize { | |
let (elts, bits) = unsafe { self.bitptr().ptr_diff(&other.bitptr()) }; | |
elts.saturating_mul(T::Mem::BITS as isize) | |
.saturating_add(bits as isize) | |
} | |
pub fn electrical_distance(&self, other: &Self) -> isize { | |
let this = self.bitptr(); | |
let that = other.bitptr(); | |
let (elts, bits) = unsafe { | |
let this = BitPtr::<O, T>::new_unchecked( | |
this.pointer(), | |
BitIdx::new_unchecked(this.head().position::<O>().value()), | |
1, | |
); | |
let that = BitPtr::<O, T>::new_unchecked( | |
that.pointer(), | |
BitIdx::new_unchecked(that.head().position::<O>().value()), | |
1, | |
); | |
this.ptr_diff(&that) | |
}; | |
elts.saturating_mul(T::Mem::BITS as isize) | |
.saturating_add(bits as isize) | |
} | |
} | |
impl<O, T> BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
pub unsafe fn set_unchecked(&mut self, index: usize, value: bool) { | |
self.bitptr().write(index, value); | |
} | |
#[inline] | |
pub unsafe fn swap_unchecked(&mut self, a: usize, b: usize) { | |
let bit_a = *self.get_unchecked(a); | |
let bit_b = *self.get_unchecked(b); | |
self.set_unchecked(a, bit_b); | |
self.set_unchecked(b, bit_a); | |
} | |
#[inline] | |
pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&Self, &Self) { | |
match mid { | |
0 => (Self::empty(), self), | |
n if n == self.len() => (self, Self::empty()), | |
_ => (self.get_unchecked(.. mid), self.get_unchecked(mid ..)), | |
} | |
} | |
#[inline] | |
#[allow(clippy::type_complexity)] | |
pub unsafe fn split_at_unchecked_mut( | |
&mut self, | |
mid: usize, | |
) -> (&mut BitSlice<O, T::Alias>, &mut BitSlice<O, T::Alias>) | |
{ | |
let bp = self.alias_mut().bitptr(); | |
match mid { | |
0 => (BitSlice::empty_mut(), bp.to_bitslice_mut()), | |
n if n == self.len() => { | |
(bp.to_bitslice_mut(), BitSlice::empty_mut()) | |
}, | |
_ => ( | |
bp.to_bitslice_mut().get_unchecked_mut(.. mid), | |
bp.to_bitslice_mut().get_unchecked_mut(mid ..), | |
), | |
} | |
} | |
#[inline] | |
pub unsafe fn copy_within_unchecked<R>(&mut self, src: R, dest: usize) | |
where R: RangeBounds<usize> { | |
if TypeId::of::<O>() == TypeId::of::<Lsb0>() { | |
let this: &mut BitSlice<Lsb0, T> = &mut *(self as *mut _ as *mut _); | |
this.sp_copy_within_unchecked(src, dest); | |
} | |
else if TypeId::of::<O>() == TypeId::of::<Msb0>() { | |
let this: &mut BitSlice<Msb0, T> = &mut *(self as *mut _ as *mut _); | |
this.sp_copy_within_unchecked(src, dest); | |
} | |
else { | |
let source = dvl::normalize_range(src, self.len()); | |
let source_len = source.len(); | |
let rev = source.contains(&dest); | |
let iter = source.zip(dest .. dest + source_len); | |
if rev { | |
for (from, to) in iter.rev() { | |
let bit = *self.get_unchecked(from); | |
self.set_unchecked(to, bit); | |
} | |
} | |
else { | |
for (from, to) in iter { | |
let bit = *self.get_unchecked(from); | |
self.set_unchecked(to, bit); | |
} | |
} | |
} | |
} | |
} | |
impl<O, T> BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
pub fn as_bitptr(&self) -> *const Self { | |
self as *const Self | |
} | |
#[inline(always)] | |
pub fn as_mut_bitptr(&mut self) -> *mut Self { | |
self as *mut Self | |
} | |
#[inline(always)] | |
pub fn bit_domain(&self) -> BitDomain<O, T> { | |
BitDomain::new(self) | |
} | |
#[inline(always)] | |
pub fn bit_domain_mut(&mut self) -> BitDomainMut<O, T> { | |
BitDomainMut::new(self) | |
} | |
#[inline(always)] | |
pub fn domain(&self) -> Domain<T> { | |
Domain::new(self) | |
} | |
#[inline(always)] | |
pub fn domain_mut(&mut self) -> DomainMut<T> { | |
DomainMut::new(self) | |
} | |
#[inline] | |
pub fn as_slice(&self) -> &[T] { | |
let bitptr = self.bitptr(); | |
let (base, elts) = (bitptr.pointer().to_const(), bitptr.elements()); | |
unsafe { slice::from_raw_parts(base, elts) } | |
} | |
} | |
impl<O, T> BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
pub(crate) fn bitptr(&self) -> BitPtr<O, T> { | |
self.as_bitptr().pipe(BitPtr::from_bitslice_ptr) | |
} | |
#[inline(always)] | |
pub(crate) fn alias(&self) -> &BitSlice<O, T::Alias> { | |
unsafe { &*(self.as_bitptr() as *const BitSlice<O, T::Alias>) } | |
} | |
#[inline(always)] | |
pub(crate) fn alias_mut(&mut self) -> &mut BitSlice<O, T::Alias> { | |
unsafe { &mut *(self.as_mut_bitptr() as *mut BitSlice<O, T::Alias>) } | |
} | |
#[inline(always)] | |
pub(crate) unsafe fn unalias_mut( | |
this: &mut BitSlice<O, T::Alias>, | |
) -> &mut Self { | |
&mut *(this as *mut BitSlice<O, T::Alias> as *mut Self) | |
} | |
#[inline] | |
pub(crate) unsafe fn split_at_unchecked_mut_noalias( | |
&mut self, | |
mid: usize, | |
) -> (&mut Self, &mut Self) | |
{ | |
let (head, tail) = self.split_at_unchecked_mut(mid); | |
(Self::unalias_mut(head), Self::unalias_mut(tail)) | |
} | |
} | |
impl<O, T> BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitSafe + BitStore, | |
{ | |
#[inline] | |
pub fn split_at_aliased_mut( | |
&mut self, | |
mid: usize, | |
) -> (&mut Self, &mut Self) | |
{ | |
let (head, tail) = self.split_at_mut(mid); | |
unsafe { (Self::unalias_mut(head), Self::unalias_mut(tail)) } | |
} | |
} | |
impl<O, T> BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
pub const MAX_BITS: usize = BitPtr::<O, T>::REGION_MAX_BITS; | |
pub const MAX_ELTS: usize = BitPtr::<O, T>::REGION_MAX_ELTS; | |
} | |
#[cfg(feature = "alloc")] | |
impl<O, T> BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
pub fn to_bitvec(&self) -> BitVec<O, T::Mem> { | |
let vec: alloc::vec::Vec<_> = | |
self.as_slice().iter().map(BitStore::load_value).collect(); | |
let mut bitptr = self.bitptr(); | |
unsafe { | |
bitptr.set_pointer(vec.as_ptr() as *const T); | |
} | |
let ptr = bitptr.to_bitslice_ptr_mut(); | |
let capa = vec.capacity(); | |
core::mem::forget(vec); | |
unsafe { BitVec::from_raw_parts(ptr as *mut BitSlice<O, T::Mem>, capa) } | |
} | |
} | |
#[inline] | |
pub unsafe fn bits_from_raw_parts<'a, O, T>( | |
addr: *const T, | |
head: u8, | |
bits: usize, | |
) -> Option<&'a BitSlice<O, T>> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
let head = crate::index::BitIdx::new(head).ok()?; | |
BitPtr::new(addr, head, bits).map(BitPtr::to_bitslice_ref) | |
} | |
#[inline] | |
pub unsafe fn bits_from_raw_parts_mut<'a, O, T>( | |
addr: *mut T, | |
head: u8, | |
bits: usize, | |
) -> Option<&'a mut BitSlice<O, T>> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
let head = crate::index::BitIdx::new(head).ok()?; | |
BitPtr::new(addr, head, bits).map(BitPtr::to_bitslice_mut) | |
} | |
mod api { | |
use crate::{ | |
array::BitArray, | |
devel as dvl, | |
mem::{ | |
BitMemory, | |
BitRegister, | |
}, | |
order::BitOrder, | |
ptr::BitPtr, | |
slice::{ | |
iter::{ | |
Chunks, | |
ChunksExact, | |
ChunksExactMut, | |
ChunksMut, | |
Iter, | |
IterMut, | |
RChunks, | |
RChunksExact, | |
RChunksExactMut, | |
RChunksMut, | |
RSplit, | |
RSplitMut, | |
RSplitN, | |
RSplitNMut, | |
Split, | |
SplitMut, | |
SplitN, | |
SplitNMut, | |
Windows, | |
}, | |
BitMut, | |
BitSlice, | |
}, | |
store::BitStore, | |
}; | |
use core::{ | |
cmp, | |
ops::{ | |
Range, | |
RangeBounds, | |
RangeFrom, | |
RangeFull, | |
RangeInclusive, | |
RangeTo, | |
RangeToInclusive, | |
}, | |
}; | |
use tap::tap::Tap; | |
#[cfg(feature = "alloc")] | |
use crate::vec::BitVec; | |
impl<O, T> BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
pub fn len(&self) -> usize { | |
self.bitptr().len() | |
} | |
#[inline] | |
pub fn is_empty(&self) -> bool { | |
self.bitptr().len() == 0 | |
} | |
#[inline] | |
pub fn first(&self) -> Option<&bool> { | |
self.get(0) | |
} | |
#[inline] | |
pub fn first_mut(&mut self) -> Option<BitMut<O, T>> { | |
self.get_mut(0) | |
} | |
#[inline] | |
pub fn split_first(&self) -> Option<(&bool, &Self)> { | |
match self.len() { | |
0 => None, | |
_ => unsafe { | |
let (head, rest) = self.split_at_unchecked(1); | |
Some((head.get_unchecked(0), rest)) | |
}, | |
} | |
} | |
#[inline] | |
#[allow(clippy::type_complexity)] | |
pub fn split_first_mut( | |
&mut self, | |
) -> Option<(BitMut<O, T::Alias>, &mut BitSlice<O, T::Alias>)> { | |
match self.len() { | |
0 => None, | |
_ => unsafe { | |
let (head, rest) = self.split_at_unchecked_mut(1); | |
Some((head.get_unchecked_mut(0), rest)) | |
}, | |
} | |
} | |
#[inline] | |
pub fn split_last(&self) -> Option<(&bool, &Self)> { | |
match self.len() { | |
0 => None, | |
len => unsafe { | |
let (rest, tail) = self.split_at_unchecked(len.wrapping_sub(1)); | |
Some((tail.get_unchecked(0), rest)) | |
}, | |
} | |
} | |
#[inline] | |
#[allow(clippy::type_complexity)] | |
pub fn split_last_mut( | |
&mut self, | |
) -> Option<(BitMut<O, T::Alias>, &mut BitSlice<O, T::Alias>)> { | |
match self.len() { | |
0 => None, | |
len => unsafe { | |
let (rest, tail) = self.split_at_unchecked_mut(len - 1); | |
Some((tail.get_unchecked_mut(0), rest)) | |
}, | |
} | |
} | |
#[inline] | |
pub fn last(&self) -> Option<&bool> { | |
match self.len() { | |
0 => None, | |
len => Some(unsafe { self.get_unchecked(len - 1) }), | |
} | |
} | |
#[inline] | |
pub fn last_mut(&mut self) -> Option<BitMut<O, T>> { | |
match self.len() { | |
0 => None, | |
len => Some(unsafe { self.get_unchecked_mut(len - 1) }), | |
} | |
} | |
#[inline] | |
pub fn get<'a, I>(&'a self, index: I) -> Option<I::Immut> | |
where I: BitSliceIndex<'a, O, T> { | |
index.get(self) | |
} | |
#[inline] | |
pub fn get_mut<'a, I>(&'a mut self, index: I) -> Option<I::Mut> | |
where I: BitSliceIndex<'a, O, T> { | |
index.get_mut(self) | |
} | |
#[inline] | |
#[allow(clippy::missing_safety_doc)] | |
pub unsafe fn get_unchecked<'a, I>(&'a self, index: I) -> I::Immut | |
where I: BitSliceIndex<'a, O, T> { | |
index.get_unchecked(self) | |
} | |
#[inline] | |
#[allow(clippy::missing_safety_doc)] | |
pub unsafe fn get_unchecked_mut<'a, I>(&'a mut self, index: I) -> I::Mut | |
where I: BitSliceIndex<'a, O, T> { | |
index.get_unchecked_mut(self) | |
} | |
#[inline(always)] | |
#[deprecated = "Use `.as_bitptr()` to access the region pointer"] | |
pub fn as_ptr(&self) -> *const Self { | |
self.as_bitptr() | |
} | |
#[inline(always)] | |
#[deprecated = "Use `.as_mut_bitptr()` to access the region pointer"] | |
pub fn as_mut_ptr(&mut self) -> *mut Self { | |
self.as_mut_bitptr() | |
} | |
#[inline] | |
pub fn swap(&mut self, a: usize, b: usize) { | |
let len = self.len(); | |
assert!(a < len, "Index {} out of bounds: {}", a, len); | |
assert!(b < len, "Index {} out of bounds: {}", b, len); | |
unsafe { | |
self.swap_unchecked(a, b); | |
} | |
} | |
#[inline] | |
pub fn reverse(&mut self) { | |
let mut bitptr = self.bitptr(); | |
let mut len = bitptr.len(); | |
while len > 1 { | |
unsafe { | |
len -= 1; | |
bitptr.to_bitslice_mut().swap_unchecked(0, len); | |
bitptr.incr_head(); | |
len -= 1; | |
} | |
} | |
} | |
#[inline] | |
pub fn iter(&self) -> Iter<O, T> { | |
self.into_iter() | |
} | |
#[inline] | |
pub fn iter_mut(&mut self) -> IterMut<O, T> { | |
self.into_iter() | |
} | |
#[inline] | |
pub fn windows(&self, size: usize) -> Windows<O, T> { | |
assert_ne!(size, 0, "Window width cannot be 0"); | |
Windows::new(self, size) | |
} | |
#[inline] | |
pub fn chunks(&self, chunk_size: usize) -> Chunks<O, T> { | |
assert_ne!(chunk_size, 0, "Chunk width cannot be 0"); | |
Chunks::new(self, chunk_size) | |
} | |
#[inline] | |
pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<O, T> { | |
assert_ne!(chunk_size, 0, "Chunk width cannot be 0"); | |
ChunksMut::new(self, chunk_size) | |
} | |
#[inline] | |
pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<O, T> { | |
assert_ne!(chunk_size, 0, "Chunk width cannot be 0"); | |
ChunksExact::new(self, chunk_size) | |
} | |
#[inline] | |
pub fn chunks_exact_mut( | |
&mut self, | |
chunk_size: usize, | |
) -> ChunksExactMut<O, T> | |
{ | |
assert_ne!(chunk_size, 0, "Chunk width cannot be 0"); | |
ChunksExactMut::new(self, chunk_size) | |
} | |
#[inline] | |
pub fn rchunks(&self, chunk_size: usize) -> RChunks<O, T> { | |
assert_ne!(chunk_size, 0, "Chunk width cannot be 0"); | |
RChunks::new(self, chunk_size) | |
} | |
#[inline] | |
pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<O, T> { | |
assert_ne!(chunk_size, 0, "Chunk width cannot be 0"); | |
RChunksMut::new(self, chunk_size) | |
} | |
#[inline] | |
pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<O, T> { | |
assert_ne!(chunk_size, 0, "Chunk width cannot be 0"); | |
RChunksExact::new(self, chunk_size) | |
} | |
#[inline] | |
pub fn rchunks_exact_mut( | |
&mut self, | |
chunk_size: usize, | |
) -> RChunksExactMut<O, T> | |
{ | |
assert_ne!(chunk_size, 0, "Chunk width cannot be 0"); | |
RChunksExactMut::new(self, chunk_size) | |
} | |
#[inline] | |
pub fn split_at(&self, mid: usize) -> (&Self, &Self) { | |
let len = self.len(); | |
assert!(mid <= len, "Index {} out of bounds: {}", mid, len); | |
unsafe { self.split_at_unchecked(mid) } | |
} | |
#[inline] | |
#[allow(clippy::type_complexity)] | |
pub fn split_at_mut( | |
&mut self, | |
mid: usize, | |
) -> (&mut BitSlice<O, T::Alias>, &mut BitSlice<O, T::Alias>) | |
{ | |
let len = self.len(); | |
assert!(mid <= len, "Index {} out of bounds: {}", mid, len); | |
unsafe { self.split_at_unchecked_mut(mid) } | |
} | |
#[inline] | |
pub fn split<F>(&self, pred: F) -> Split<O, T, F> | |
where F: FnMut(usize, &bool) -> bool { | |
Split::new(self, pred) | |
} | |
#[inline] | |
pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<O, T, F> | |
where F: FnMut(usize, &bool) -> bool { | |
SplitMut::new(self.alias_mut(), pred) | |
} | |
#[inline] | |
pub fn rsplit<F>(&self, pred: F) -> RSplit<O, T, F> | |
where F: FnMut(usize, &bool) -> bool { | |
RSplit::new(self, pred) | |
} | |
#[inline] | |
pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<O, T, F> | |
where F: FnMut(usize, &bool) -> bool { | |
RSplitMut::new(self.alias_mut(), pred) | |
} | |
#[inline] | |
pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<O, T, F> | |
where F: FnMut(usize, &bool) -> bool { | |
SplitN::new(self, pred, n) | |
} | |
#[inline] | |
pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<O, T, F> | |
where F: FnMut(usize, &bool) -> bool { | |
SplitNMut::new(self.alias_mut(), pred, n) | |
} | |
#[inline] | |
pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<O, T, F> | |
where F: FnMut(usize, &bool) -> bool { | |
RSplitN::new(self, pred, n) | |
} | |
#[inline] | |
pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<O, T, F> | |
where F: FnMut(usize, &bool) -> bool { | |
RSplitNMut::new(self.alias_mut(), pred, n) | |
} | |
#[inline] | |
pub fn contains<O2, T2>(&self, x: &BitSlice<O2, T2>) -> bool | |
where | |
O2: BitOrder, | |
T2: BitStore, | |
{ | |
let len = x.len(); | |
if len > self.len() { | |
return false; | |
}; | |
self.windows(len).any(|s| s == x) | |
} | |
#[inline] | |
pub fn starts_with<O2, T2>(&self, needle: &BitSlice<O2, T2>) -> bool | |
where | |
O2: BitOrder, | |
T2: BitStore, | |
{ | |
let len = needle.len(); | |
self.len() >= len && needle == unsafe { self.get_unchecked(.. len) } | |
} | |
#[inline] | |
pub fn ends_with<O2, T2>(&self, needle: &BitSlice<O2, T2>) -> bool | |
where | |
O2: BitOrder, | |
T2: BitStore, | |
{ | |
let nlen = needle.len(); | |
let len = self.len(); | |
len >= nlen && needle == unsafe { self.get_unchecked(len - nlen ..) } | |
} | |
#[inline] | |
pub fn rotate_left(&mut self, mut by: usize) { | |
let len = self.len(); | |
assert!( | |
by <= len, | |
"Slices cannot be rotated by more than their length" | |
); | |
if by == 0 || by == len { | |
return; | |
} | |
let mut tmp = BitArray::<O, usize>::zeroed(); | |
while by > 0 { | |
let shamt = cmp::min(<usize as BitMemory>::BITS as usize, by); | |
unsafe { | |
let tmp_bits = tmp.get_unchecked_mut(.. shamt); | |
tmp_bits.clone_from_bitslice(self.get_unchecked(.. shamt)); | |
self.copy_within_unchecked(shamt .., 0); | |
self.get_unchecked_mut(len - shamt ..) | |
.clone_from_bitslice(tmp_bits); | |
} | |
by -= shamt; | |
} | |
} | |
#[inline] | |
pub fn rotate_right(&mut self, mut by: usize) { | |
let len = self.len(); | |
assert!( | |
by <= len, | |
"Slices cannot be rotated by more than their length" | |
); | |
if by == 0 || by == len { | |
return; | |
} | |
let mut tmp = BitArray::<O, usize>::zeroed(); | |
while by > 0 { | |
let shamt = cmp::min(<usize as BitMemory>::BITS as usize, by); | |
let mid = len - shamt; | |
unsafe { | |
let tmp_bits = tmp.get_unchecked_mut(.. shamt); | |
tmp_bits.clone_from_bitslice(self.get_unchecked(mid ..)); | |
self.copy_within_unchecked(.. mid, shamt); | |
self.get_unchecked_mut(.. shamt) | |
.clone_from_bitslice(tmp_bits); | |
} | |
by -= shamt; | |
} | |
} | |
#[inline(always)] | |
#[deprecated = "Use `.clone_from_bitslice()` to copy between bitslices"] | |
pub fn clone_from_slice<O2, T2>(&mut self, src: &BitSlice<O2, T2>) | |
where | |
O2: BitOrder, | |
T2: BitStore, | |
{ | |
self.clone_from_bitslice(src) | |
} | |
#[inline(always)] | |
#[deprecated = "Use `.copy_from_bitslice()` to copy between bitslices"] | |
pub fn copy_from_slice(&mut self, src: &Self) { | |
self.copy_from_bitslice(src) | |
} | |
#[inline] | |
pub fn copy_within<R>(&mut self, src: R, dest: usize) | |
where R: RangeBounds<usize> { | |
let len = self.len(); | |
let src = dvl::normalize_range(src, len); | |
dvl::assert_range(src.clone(), len); | |
dvl::assert_range(dest .. dest + (src.end - src.start), len); | |
unsafe { | |
self.copy_within_unchecked(src, dest); | |
} | |
} | |
#[inline(always)] | |
#[deprecated = "Use `.swap_with_bitslice()` to swap between bitslices"] | |
pub fn swap_with_slice<O2, T2>(&mut self, other: &mut BitSlice<O2, T2>) | |
where | |
O2: BitOrder, | |
T2: BitStore, | |
{ | |
self.swap_with_bitslice(other); | |
} | |
#[inline] | |
pub unsafe fn align_to<U>(&self) -> (&Self, &BitSlice<O, U>, &Self) | |
where U: BitStore { | |
let (l, c, r) = self.bitptr().align_to::<U>(); | |
( | |
l.to_bitslice_ref(), | |
c.to_bitslice_ref(), | |
r.to_bitslice_ref(), | |
) | |
} | |
#[inline] | |
pub unsafe fn align_to_mut<U>( | |
&mut self, | |
) -> (&mut Self, &mut BitSlice<O, U>, &mut Self) | |
where U: BitStore { | |
let (l, c, r) = self.bitptr().align_to::<U>(); | |
( | |
l.to_bitslice_mut(), | |
c.to_bitslice_mut(), | |
r.to_bitslice_mut(), | |
) | |
} | |
} | |
#[cfg(feature = "alloc")] | |
impl<O, T> BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
#[deprecated = "Prefer `.to_bitvec()`"] | |
pub fn to_vec(&self) -> BitVec<O, T::Mem> { | |
self.to_bitvec() | |
} | |
#[inline] | |
pub fn repeat(&self, n: usize) -> BitVec<O, T::Mem> { | |
let len = self.len(); | |
let total = len.checked_mul(n).expect("capacity overflow"); | |
let mut out = BitVec::repeat(false, total); | |
for chunk in out.chunks_exact_mut(len).remove_alias() { | |
chunk.clone_from_bitslice(self); | |
} | |
out | |
} | |
} | |
#[inline(always)] | |
pub fn from_ref<O, T>(elem: &T) -> &BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
BitSlice::from_element(elem) | |
} | |
#[inline(always)] | |
pub fn from_mut<O, T>(elem: &mut T) -> &mut BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
BitSlice::from_element_mut(elem) | |
} | |
#[inline] | |
pub unsafe fn from_raw_parts<'a, O, T>( | |
data: *const T, | |
len: usize, | |
) -> &'a BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
super::bits_from_raw_parts(data, 0, len * T::Mem::BITS as usize) | |
.unwrap_or_else(|| { | |
panic!( | |
"Failed to construct `&{}BitSlice` from invalid pointer {:p} \ | |
or element count {}", | |
"", data, len | |
) | |
}) | |
} | |
#[inline] | |
pub unsafe fn from_raw_parts_mut<'a, O, T>( | |
data: *mut T, | |
len: usize, | |
) -> &'a mut BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
super::bits_from_raw_parts_mut(data, 0, len * T::Mem::BITS as usize) | |
.unwrap_or_else(|| { | |
panic!( | |
"Failed to construct `&{}BitSlice` from invalid pointer {:p} \ | |
or element count {}", | |
"mut ", data, len | |
) | |
}) | |
} | |
pub trait BitSliceIndex<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
type Immut; | |
type Mut; | |
fn get(self, slice: &'a BitSlice<O, T>) -> Option<Self::Immut>; | |
fn get_mut(self, slice: &'a mut BitSlice<O, T>) -> Option<Self::Mut>; | |
unsafe fn get_unchecked(self, slice: &'a BitSlice<O, T>) -> Self::Immut; | |
unsafe fn get_unchecked_mut( | |
self, | |
slice: &'a mut BitSlice<O, T>, | |
) -> Self::Mut; | |
fn index(self, slice: &'a BitSlice<O, T>) -> Self::Immut; | |
fn index_mut(self, slice: &'a mut BitSlice<O, T>) -> Self::Mut; | |
} | |
impl<'a, O, T> BitSliceIndex<'a, O, T> for usize | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
type Immut = &'a bool; | |
type Mut = BitMut<'a, O, T>; | |
#[inline] | |
fn get(self, slice: &'a BitSlice<O, T>) -> Option<Self::Immut> { | |
if self < slice.len() { | |
Some(unsafe { self.get_unchecked(slice) }) | |
} | |
else { | |
None | |
} | |
} | |
#[inline] | |
fn get_mut(self, slice: &'a mut BitSlice<O, T>) -> Option<Self::Mut> { | |
if self < slice.len() { | |
Some(unsafe { self.get_unchecked_mut(slice) }) | |
} | |
else { | |
None | |
} | |
} | |
#[inline] | |
unsafe fn get_unchecked(self, slice: &'a BitSlice<O, T>) -> Self::Immut { | |
if slice.bitptr().read(self) { | |
&true | |
} | |
else { | |
&false | |
} | |
} | |
#[inline] | |
unsafe fn get_unchecked_mut( | |
self, | |
slice: &'a mut BitSlice<O, T>, | |
) -> Self::Mut | |
{ | |
let bitptr = slice.bitptr(); | |
let (elt, bit) = bitptr.head().offset(self as isize); | |
let addr = bitptr.pointer().to_access().offset(elt); | |
BitMut::new_unchecked(addr, bit) | |
} | |
#[inline] | |
fn index(self, slice: &'a BitSlice<O, T>) -> Self::Immut { | |
self.get(slice).unwrap_or_else(|| { | |
panic!("Index {} out of bounds: {}", self, slice.len()) | |
}) | |
} | |
#[inline] | |
fn index_mut(self, slice: &'a mut BitSlice<O, T>) -> Self::Mut { | |
let len = slice.len(); | |
self.get_mut(slice) | |
.unwrap_or_else(|| panic!("Index {} out of bounds: {}", self, len)) | |
} | |
} | |
macro_rules! range_impl { | |
( $r:ty { $get:item $unchecked:item } ) => { | |
impl<'a, O, T> BitSliceIndex<'a, O, T> for $r | |
where O: BitOrder, T: BitStore { | |
type Immut = &'a BitSlice<O, T>; | |
type Mut = &'a mut BitSlice<O, T>; | |
#[inline] | |
$get | |
#[inline] | |
fn get_mut(self, slice: Self::Mut) -> Option<Self::Mut> { | |
self.get(slice).map(|s| s.bitptr().to_bitslice_mut()) | |
} | |
#[inline] | |
$unchecked | |
#[inline] | |
unsafe fn get_unchecked_mut(self, slice: Self::Mut) -> Self::Mut { | |
self.get_unchecked(slice).bitptr().to_bitslice_mut() | |
} | |
fn index(self, slice: Self::Immut) -> Self::Immut { | |
let r = self.clone(); | |
let l = slice.len(); | |
self.get(slice) | |
.unwrap_or_else(|| { | |
panic!("Range {:?} out of bounds: {}", r, l) | |
}) | |
} | |
#[inline] | |
fn index_mut(self, slice: Self::Mut) -> Self::Mut { | |
self.index(slice).bitptr().to_bitslice_mut() | |
} | |
} | |
}; | |
( $( $r:ty => map $func:expr; )* ) => { $( | |
impl<'a, O, T> BitSliceIndex<'a, O, T> for $r | |
where O: BitOrder, T: BitStore { | |
type Immut = &'a BitSlice<O, T>; | |
type Mut = &'a mut BitSlice<O, T>; | |
#[inline] | |
fn get(self, slice: Self::Immut) -> Option<Self::Immut> { | |
$func(self).get(slice) | |
} | |
#[inline] | |
fn get_mut(self, slice: Self::Mut) -> Option<Self::Mut> { | |
$func(self).get_mut(slice) | |
} | |
#[inline] | |
unsafe fn get_unchecked(self, slice: Self::Immut) -> Self::Immut { | |
$func(self).get_unchecked(slice) | |
} | |
#[inline] | |
unsafe fn get_unchecked_mut(self, slice: Self::Mut) -> Self::Mut { | |
$func(self).get_unchecked_mut(slice) | |
} | |
#[inline] | |
fn index(self, slice: Self::Immut) -> Self::Immut { | |
$func(self).index(slice) | |
} | |
#[inline] | |
fn index_mut(self, slice: Self::Mut) -> Self::Mut { | |
$func(self).index_mut(slice) | |
} | |
} | |
)* }; | |
} | |
range_impl!(Range<usize> { | |
fn get(self, slice: Self::Immut) -> Option<Self::Immut> { | |
let len = slice.len(); | |
if self.start > len || self.end > len || self.start > self.end { | |
return None; | |
} | |
Some(unsafe { (self.start .. self.end).get_unchecked(slice) }) | |
} | |
unsafe fn get_unchecked(self, slice: Self::Immut) -> Self::Immut { | |
let (addr, head, _) = slice.bitptr().raw_parts(); | |
let (skip, new_head) = head.offset(self.start as isize); | |
BitPtr::new_unchecked( | |
addr.to_const().offset(skip), | |
new_head, | |
self.end - self.start, | |
).to_bitslice_ref() | |
} | |
}); | |
range_impl!(RangeFrom<usize> { | |
fn get(self, slice: Self::Immut) -> Option<Self::Immut> { | |
let len = slice.len(); | |
if self.start <= len { | |
Some(unsafe { (self.start ..).get_unchecked(slice) }) | |
} | |
else { | |
None | |
} | |
} | |
unsafe fn get_unchecked(self, slice: Self::Immut) -> Self::Immut { | |
let (addr, head, bits) = slice.bitptr().raw_parts(); | |
let (skip, new_head) = head.offset(self.start as isize); | |
BitPtr::new_unchecked( | |
addr.to_const().offset(skip), | |
new_head, | |
bits - self.start, | |
).to_bitslice_ref() | |
} | |
}); | |
range_impl!(RangeTo<usize> { | |
fn get(self, slice: Self::Immut) -> Option<Self::Immut> { | |
let len = slice.len(); | |
if self.end <= len { | |
Some(unsafe { (.. self.end).get_unchecked(slice) }) | |
} | |
else { | |
None | |
} | |
} | |
unsafe fn get_unchecked(self, slice: Self::Immut) -> Self::Immut { | |
slice.bitptr().tap_mut(|bp| bp.set_len(self.end)).to_bitslice_ref() | |
} | |
}); | |
range_impl! { | |
RangeInclusive<usize> => map |this: Self| { | |
#[allow(clippy::range_plus_one)] | |
(*this.start() .. *this.end() + 1) | |
}; | |
RangeToInclusive<usize> => map |RangeToInclusive { end }| { | |
#[allow(clippy::range_plus_one)] | |
(.. end + 1) | |
}; | |
} | |
impl<'a, O, T> BitSliceIndex<'a, O, T> for RangeFull | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
type Immut = &'a BitSlice<O, T>; | |
type Mut = &'a mut BitSlice<O, T>; | |
#[inline(always)] | |
fn get(self, slice: Self::Immut) -> Option<Self::Immut> { | |
Some(slice) | |
} | |
#[inline(always)] | |
fn get_mut(self, slice: Self::Mut) -> Option<Self::Mut> { | |
Some(slice) | |
} | |
#[inline(always)] | |
unsafe fn get_unchecked(self, slice: Self::Immut) -> Self::Immut { | |
slice | |
} | |
#[inline(always)] | |
unsafe fn get_unchecked_mut(self, slice: Self::Mut) -> Self::Mut { | |
slice | |
} | |
#[inline(always)] | |
fn index(self, slice: Self::Immut) -> Self::Immut { | |
slice | |
} | |
#[inline(always)] | |
fn index_mut(self, slice: Self::Mut) -> Self::Mut { | |
slice | |
} | |
} | |
} | |
mod iter { | |
use crate::{ | |
index::BitIdx, | |
mem::BitMemory, | |
order::BitOrder, | |
ptr::BitPtr, | |
slice::{ | |
proxy::BitMut, | |
BitSlice, | |
BitSliceIndex, | |
}, | |
store::BitStore, | |
}; | |
use core::{ | |
cmp, | |
fmt::{ | |
self, | |
Debug, | |
Formatter, | |
}, | |
iter::FusedIterator, | |
marker::PhantomData, | |
mem, | |
ptr::NonNull, | |
}; | |
#[repr(C)] | |
pub struct Iter<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
base: NonNull<T>, | |
last: NonNull<T>, | |
head: BitIdx<T::Mem>, | |
tail: BitIdx<T::Mem>, | |
_ref: PhantomData<&'a BitSlice<O, T>>, | |
} | |
#[repr(C)] | |
pub struct IterMut<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
base: NonNull<<T::Alias as BitStore>::Access>, | |
last: NonNull<<T::Alias as BitStore>::Access>, | |
head: BitIdx<<T::Alias as BitStore>::Mem>, | |
tail: BitIdx<<T::Alias as BitStore>::Mem>, | |
_ref: PhantomData<&'a mut BitSlice<O, T::Alias>>, | |
} | |
impl<'a, O, T> Iter<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
const EMPTY: Self = Self { | |
base: NonNull::dangling(), | |
last: NonNull::dangling(), | |
head: BitIdx::ZERO, | |
tail: BitIdx::ZERO, | |
_ref: PhantomData, | |
}; | |
#[inline] | |
pub fn as_bitslice(&self) -> &'a BitSlice<O, T> { | |
unsafe { | |
BitPtr::new_unchecked(self.base.as_ptr(), self.head, self.len()) | |
} | |
.to_bitslice_ref() | |
} | |
#[inline(always)] | |
#[deprecated = "Use `.as_bitslice()` to view the underlying slice"] | |
pub fn as_slice(&self) -> &'a BitSlice<O, T> { | |
self.as_bitslice() | |
} | |
fn pop_front(&mut self) -> <Self as Iterator>::Item { | |
let base_raw = self.base.as_ptr() as *const T; | |
let out = unsafe { &*base_raw }.get_bit::<O>(self.head); | |
let (head, incr) = self.head.next(); | |
self.set_base(unsafe { base_raw.add(incr as usize) as *mut T }); | |
self.head = head; | |
if out { &true } else { &false } | |
} | |
fn pop_back(&mut self) -> <Self as Iterator>::Item { | |
let (tail, offset) = self.tail.prev(); | |
self.set_last(unsafe { self.last.as_ptr().offset(-(offset as isize)) }); | |
self.tail = tail; | |
if unsafe { &*self.last.as_ptr() }.get_bit::<O>(self.tail) { | |
&true | |
} | |
else { | |
&false | |
} | |
} | |
#[inline(always)] | |
fn get_base(&self) -> *const T { | |
self.base.as_ptr() as *const T | |
} | |
#[inline(always)] | |
fn get_last(&self) -> *const T { | |
self.last.as_ptr() as *const T | |
} | |
#[inline(always)] | |
fn set_base(&mut self, base: *const T) { | |
self.base = unsafe { NonNull::new_unchecked(base as *mut T) } | |
} | |
#[inline(always)] | |
fn set_last(&mut self, last: *const T) { | |
self.last = unsafe { NonNull::new_unchecked(last as *mut T) } | |
} | |
} | |
impl<'a, O, T> IterMut<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
const EMPTY: Self = Self { | |
base: NonNull::dangling(), | |
last: NonNull::dangling(), | |
head: BitIdx::ZERO, | |
tail: BitIdx::ZERO, | |
_ref: PhantomData, | |
}; | |
#[inline] | |
pub fn into_bitslice(self) -> &'a mut BitSlice<O, T::Alias> { | |
unsafe { | |
BitPtr::new_unchecked( | |
self.base.as_ptr() | |
as *const <<T as BitStore>::Alias as BitStore>::Access | |
as *const <T as BitStore>::Alias, | |
self.head, | |
self.len(), | |
) | |
} | |
.to_bitslice_mut() | |
} | |
#[inline(always)] | |
#[deprecated = "Use `.into_bitslice()` to view the underlying slice"] | |
pub fn into_slice(self) -> &'a mut BitSlice<O, T::Alias> { | |
self.into_bitslice() | |
} | |
fn as_bitslice(&self) -> &BitSlice<O, T::Alias> { | |
unsafe { core::ptr::read(self) }.into_bitslice() | |
} | |
fn pop_front(&mut self) -> <Self as Iterator>::Item { | |
let out = | |
unsafe { BitMut::new_unchecked(self.base.as_ptr(), self.head) }; | |
let (head, incr) = self.head.next(); | |
self.set_base(unsafe { self.base.as_ptr().add(incr as usize) }); | |
self.head = head; | |
out | |
} | |
fn pop_back(&mut self) -> <Self as Iterator>::Item { | |
let (tail, decr) = self.tail.prev(); | |
self.set_last(unsafe { self.last.as_ptr().sub(decr as usize) }); | |
self.tail = tail; | |
unsafe { BitMut::new_unchecked(self.last.as_ptr(), self.tail) } | |
} | |
#[inline(always)] | |
fn get_base(&self) -> *mut <T::Alias as BitStore>::Access { | |
self.base.as_ptr() | |
} | |
#[inline(always)] | |
fn get_last(&self) -> *mut <T::Alias as BitStore>::Access { | |
self.last.as_ptr() | |
} | |
#[inline(always)] | |
fn set_base(&mut self, base: *mut <T::Alias as BitStore>::Access) { | |
self.base = unsafe { NonNull::new_unchecked(base) } | |
} | |
#[inline(always)] | |
fn set_last(&mut self, last: *mut <T::Alias as BitStore>::Access) { | |
self.last = unsafe { NonNull::new_unchecked(last) } | |
} | |
} | |
impl<O, T> Clone for Iter<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
fn clone(&self) -> Self { | |
*self | |
} | |
} | |
impl<O, T> AsRef<BitSlice<O, T>> for Iter<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
fn as_ref(&self) -> &BitSlice<O, T> { | |
self.as_bitslice() | |
} | |
} | |
impl<O, T> Debug for Iter<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
fmt.debug_tuple("Iter").field(&self.as_bitslice()).finish() | |
} | |
} | |
impl<O, T> Copy for Iter<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
} | |
impl<O, T> Debug for IterMut<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
fmt.debug_tuple("IterMut") | |
.field(&self.as_bitslice()) | |
.finish() | |
} | |
} | |
impl<'a, O, T> IntoIterator for &'a BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
type IntoIter = Iter<'a, O, T>; | |
type Item = <Self::IntoIter as Iterator>::Item; | |
fn into_iter(self) -> Self::IntoIter { | |
let (addr, head, bits) = self.bitptr().raw_parts(); | |
let addr = addr.to_mut(); | |
let base = unsafe { NonNull::new_unchecked(addr) }; | |
let (elts, tail) = head.offset(bits as isize); | |
let last = unsafe { NonNull::new_unchecked(addr.offset(elts)) }; | |
Self::IntoIter { | |
base, | |
last, | |
head, | |
tail, | |
_ref: PhantomData, | |
} | |
} | |
} | |
impl<'a, O, T> IntoIterator for &'a mut BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
type IntoIter = IterMut<'a, O, T>; | |
type Item = <Self::IntoIter as Iterator>::Item; | |
fn into_iter(self) -> Self::IntoIter { | |
let (addr, head, bits) = self.alias().bitptr().raw_parts(); | |
let addr = addr.to_access() | |
as *mut <<T as BitStore>::Alias as BitStore>::Access; | |
let base = unsafe { NonNull::new_unchecked(addr) }; | |
let (elts, tail) = head.offset(bits as isize); | |
let last = unsafe { NonNull::new_unchecked(addr.offset(elts)) }; | |
Self::IntoIter { | |
base, | |
last, | |
head, | |
tail, | |
_ref: PhantomData, | |
} | |
} | |
} | |
macro_rules! iter { | |
($($t:ident => $i:ty),+ $(,)?) => { $( | |
impl<'a, O, T> $t<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
fn inherent_is_empty(&self) -> bool { | |
self.base == self.last && self.head == self.tail | |
} | |
} | |
impl<'a, O, T> Iterator for $t<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
type Item = $i; | |
#[inline] | |
fn next(&mut self) -> Option<Self::Item> { | |
if self.inherent_is_empty() { | |
return None; | |
} | |
Some(self.pop_front()) | |
} | |
#[inline] | |
fn size_hint(&self) -> (usize, Option<usize>) { | |
let len = self.len(); | |
(len, Some(len)) | |
} | |
#[inline] | |
fn count(self) -> usize { | |
self.len() | |
} | |
#[inline] | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
if n >= self.len() { | |
*self = Self::EMPTY; | |
return None; | |
} | |
let (elts, head) = self.head.offset(n as isize); | |
self.set_base(unsafe{self.get_base().offset(elts)}); | |
self.head = head; | |
Some(self.pop_front()) | |
} | |
#[inline] | |
fn last(mut self) -> Option<Self::Item> { | |
self.next_back() | |
} | |
} | |
impl<'a, O, T> DoubleEndedIterator for $t <'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
fn next_back(&mut self) -> Option<Self::Item> { | |
if self.inherent_is_empty() { | |
return None; | |
} | |
Some(self.pop_back()) | |
} | |
#[inline] | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
if n >= self.len() { | |
*self = Self::EMPTY; | |
return None; | |
} | |
let (elts, tail) = self.tail.offset(-(n as isize)); | |
self.set_last(unsafe{self.get_last().offset(elts)}); | |
self.tail = tail; | |
Some(self.pop_back()) | |
} | |
} | |
impl<O, T> ExactSizeIterator for $t <'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
fn len(&self) -> usize { | |
let (base, last) = | |
(self.get_base() as usize, self.get_last() as usize); | |
last.wrapping_sub(base) | |
.wrapping_shl(<u8 as BitMemory>::INDX as u32) | |
.wrapping_add(self.tail.value() as usize) | |
.wrapping_sub(self.head.value() as usize) | |
} | |
} | |
impl<O, T> FusedIterator for $t <'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore | |
{ | |
} | |
unsafe impl<O, T> Send for $t <'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
} | |
unsafe impl<O, T> Sync for $t <'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
} | |
)+ }; | |
} | |
iter!( | |
Iter => <usize as BitSliceIndex<'a, O, T>>::Immut, | |
IterMut => <usize as BitSliceIndex<'a, O, T::Alias>>::Mut, | |
); | |
macro_rules! group { | |
( | |
$iter:ident => $item:ty $( where $alias:ident )? { | |
$next:item | |
$nth:item | |
$next_back:item | |
$nth_back:item | |
$len:item | |
} | |
) => { | |
impl<'a, O, T> Iterator for $iter <'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
type Item = $item; | |
#[inline] | |
$next | |
#[inline] | |
$nth | |
#[inline] | |
fn size_hint(&self) -> (usize, Option<usize>) { | |
let len = self.len(); | |
(len, Some(len)) | |
} | |
#[inline] | |
fn count(self) -> usize { | |
self.len() | |
} | |
#[inline] | |
fn last(mut self) -> Option<Self::Item> { | |
self.next_back() | |
} | |
} | |
impl<'a, O, T> DoubleEndedIterator for $iter <'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
$next_back | |
#[inline] | |
$nth_back | |
} | |
impl<O, T> ExactSizeIterator for $iter <'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
$len | |
} | |
impl<O, T> FusedIterator for $iter <'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
} | |
} | |
} | |
#[derive(Clone, Debug)] | |
pub struct Windows<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
slice: &'a BitSlice<O, T>, | |
width: usize, | |
} | |
group!(Windows => &'a BitSlice<O, T> { | |
fn next(&mut self) -> Option<Self::Item> { | |
if self.width > self.slice.len() { | |
self.slice = Default::default(); | |
return None; | |
} | |
unsafe { | |
let out = self.slice.get_unchecked(.. self.width); | |
self.slice = self.slice.get_unchecked(1 ..); | |
Some(out) | |
} | |
} | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
let (end, ovf) = self.width.overflowing_add(n); | |
if end > self.slice.len() || ovf { | |
self.slice = Default::default(); | |
return None; | |
} | |
unsafe { | |
let out = self.slice.get_unchecked(n .. end); | |
self.slice = self.slice.get_unchecked(n + 1 ..); | |
Some(out) | |
} | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
let len = self.slice.len(); | |
if self.width > len { | |
self.slice = Default::default(); | |
return None; | |
} | |
unsafe { | |
let out = self.slice.get_unchecked(len - self.width ..); | |
self.slice = self.slice.get_unchecked(.. len - 1); | |
Some(out) | |
} | |
} | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
let (end, ovf) = self.slice.len().overflowing_sub(n); | |
if end < self.width || ovf { | |
self.slice = Default::default(); | |
return None; | |
} | |
unsafe { | |
let out = self.slice.get_unchecked(end - self.width .. end); | |
self.slice = self.slice.get_unchecked(.. end - 1); | |
Some(out) | |
} | |
} | |
fn len(&self) -> usize { | |
let len = self.slice.len(); | |
if self.width > len { | |
return 0; | |
} | |
len - self.width + 1 | |
} | |
}); | |
#[derive(Clone, Debug)] | |
pub struct Chunks<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
slice: &'a BitSlice<O, T>, | |
width: usize, | |
} | |
group!(Chunks => &'a BitSlice<O, T> { | |
fn next(&mut self) -> Option<Self::Item> { | |
let len = self.slice.len(); | |
if len == 0 { | |
return None; | |
} | |
let mid = cmp::min(len, self.width); | |
let (out, rest) = unsafe { self.slice.split_at_unchecked(mid) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
let len = self.slice.len(); | |
let (start, ovf) = n.overflowing_mul(self.width); | |
if start >= len || ovf { | |
self.slice = Default::default(); | |
return None; | |
} | |
let (out, rest) = unsafe { | |
self.slice | |
.get_unchecked(start ..) | |
.split_at_unchecked(cmp::min(len, self.width)) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
match self.slice.len() { | |
0 => None, | |
len => { | |
let rem = len % self.width; | |
let size = if rem == 0 { self.width } else { rem }; | |
let (rest, out) = | |
unsafe { self.slice.split_at_unchecked(len - size) }; | |
self.slice = rest; | |
Some(out) | |
}, | |
} | |
} | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
let len = self.len(); | |
if n >= len { | |
self.slice = Default::default(); | |
return None; | |
} | |
let start = (len - 1 - n) * self.width; | |
let width = cmp::min(start + self.width, self.slice.len()); | |
let (rest, out) = unsafe { | |
self.slice | |
.get_unchecked(.. start + width) | |
.split_at_unchecked(start) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn len(&self) -> usize { | |
match self.slice.len() { | |
0 => 0, | |
len => { | |
let (n, r) = (len / self.width, len % self.width); | |
n + (r > 0) as usize | |
}, | |
} | |
} | |
}); | |
#[derive(Debug)] | |
pub struct ChunksMut<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
slice: &'a mut BitSlice<O, T::Alias>, | |
width: usize, | |
} | |
group!(ChunksMut => &'a mut BitSlice<O, T::Alias> { | |
fn next(&mut self) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
let len = slice.len(); | |
if len == 0 { | |
return None; | |
} | |
let mid = cmp::min(len, self.width); | |
let (out, rest) = unsafe { slice.split_at_unchecked_mut_noalias(mid) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
let len = slice.len(); | |
let (start, ovf) = n.overflowing_mul(self.width); | |
if start >= len || ovf { | |
return None; | |
} | |
let (out, rest) = unsafe { | |
slice | |
.get_unchecked_mut(start ..) | |
.split_at_unchecked_mut_noalias(cmp::min(len, self.width)) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
match slice.len() { | |
0 => None, | |
len => { | |
let rem = len % self.width; | |
let size = if rem == 0 { self.width } else { rem }; | |
let (rest, out) = | |
unsafe { slice.split_at_unchecked_mut_noalias(len - size) }; | |
self.slice = rest; | |
Some(out) | |
}, | |
} | |
} | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
let len = self.len(); | |
let slice = mem::take(&mut self.slice); | |
if n >= len { | |
return None; | |
} | |
let start = (len - 1 - n) * self.width; | |
let width = cmp::min(start + self.width, slice.len()); | |
let (rest, out) = unsafe { | |
slice | |
.get_unchecked_mut(.. start + width) | |
.split_at_unchecked_mut_noalias(start) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn len(&self) -> usize { | |
match self.slice.len() { | |
0 => 0, | |
len => { | |
let (n, r) = (len / self.width, len % self.width); | |
n + (r > 0) as usize | |
}, | |
} | |
} | |
}); | |
#[derive(Clone, Debug)] | |
pub struct ChunksExact<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
slice: &'a BitSlice<O, T>, | |
extra: &'a BitSlice<O, T>, | |
width: usize, | |
} | |
impl<'a, O, T> ChunksExact<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
pub(super) fn new(slice: &'a BitSlice<O, T>, width: usize) -> Self { | |
let len = slice.len(); | |
let rem = len % width; | |
let (slice, extra) = unsafe { slice.split_at_unchecked(len - rem) }; | |
Self { | |
slice, | |
extra, | |
width, | |
} | |
} | |
pub fn remainder(&self) -> &'a BitSlice<O, T> { | |
self.extra | |
} | |
} | |
group!(ChunksExact => &'a BitSlice<O, T> { | |
fn next(&mut self) -> Option<Self::Item> { | |
if self.slice.len() < self.width { | |
return None; | |
} | |
let (out, rest) = unsafe { self.slice.split_at_unchecked(self.width) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
let (start, ovf) = n.overflowing_mul(self.width); | |
if start + self.width >= self.slice.len() || ovf { | |
self.slice = Default::default(); | |
return None; | |
} | |
let (out, rest) = unsafe { | |
self.slice | |
.get_unchecked(start ..) | |
.split_at_unchecked(self.width) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
let len = self.slice.len(); | |
if len < self.width { | |
return None; | |
} | |
let (rest, out) = | |
unsafe { self.slice.split_at_unchecked(len - self.width) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
let len = self.len(); | |
if n >= len { | |
self.slice = Default::default(); | |
return None; | |
} | |
let end = (len - n) * self.width; | |
let (rest, out) = unsafe { | |
self.slice | |
.get_unchecked(.. end) | |
.split_at_unchecked(end - self.width) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn len(&self) -> usize { | |
self.slice.len() / self.width | |
} | |
}); | |
#[derive(Debug)] | |
pub struct ChunksExactMut<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
slice: &'a mut BitSlice<O, T::Alias>, | |
extra: &'a mut BitSlice<O, T::Alias>, | |
width: usize, | |
} | |
impl<'a, O, T> ChunksExactMut<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
pub(super) fn new(slice: &'a mut BitSlice<O, T>, width: usize) -> Self { | |
let len = slice.len(); | |
let rem = len % width; | |
let (slice, extra) = unsafe { slice.split_at_unchecked_mut(len - rem) }; | |
Self { | |
slice, | |
extra, | |
width, | |
} | |
} | |
#[inline] | |
pub fn into_remainder(self) -> &'a mut BitSlice<O, T::Alias> { | |
self.extra | |
} | |
} | |
group!(ChunksExactMut => &'a mut BitSlice<O, T::Alias> { | |
fn next(&mut self) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
if slice.len() < self.width { | |
return None; | |
} | |
let (out, rest) = | |
unsafe { slice.split_at_unchecked_mut_noalias(self.width) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
let (start, ovf) = n.overflowing_mul(self.width); | |
if start + self.width >= slice.len() || ovf { | |
return None; | |
} | |
let (out, rest) = unsafe { | |
slice.get_unchecked_mut(start ..) | |
.split_at_unchecked_mut_noalias(self.width) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
let len = slice.len(); | |
if len < self.width { | |
return None; | |
} | |
let (rest, out) = | |
unsafe { slice.split_at_unchecked_mut_noalias(len - self.width) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
let len = self.len(); | |
let slice = mem::take(&mut self.slice); | |
if n >= len { | |
return None; | |
} | |
let end = (len - n) * self.width; | |
let (rest, out) = unsafe { | |
slice.get_unchecked_mut(.. end) | |
.split_at_unchecked_mut_noalias(end - self.width) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn len(&self) -> usize { | |
self.slice.len() / self.width | |
} | |
}); | |
#[derive(Clone, Debug)] | |
pub struct RChunks<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
slice: &'a BitSlice<O, T>, | |
width: usize, | |
} | |
group!(RChunks => &'a BitSlice<O, T> { | |
fn next(&mut self) -> Option<Self::Item> { | |
let len = self.slice.len(); | |
if len == 0 { | |
return None; | |
} | |
let mid = len - cmp::min(len, self.width); | |
let (rest, out) = unsafe { self.slice.split_at_unchecked(mid) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
let len = self.slice.len(); | |
let (num, ovf) = n.overflowing_mul(self.width); | |
if num >= len || ovf { | |
self.slice = Default::default(); | |
return None; | |
} | |
let end = len - num; | |
let mid = end.saturating_sub(self.width); | |
let (rest, out) = unsafe { | |
self.slice | |
.get_unchecked(.. end) | |
.split_at_unchecked(mid) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
match self.slice.len() { | |
0 => None, | |
n => { | |
let rem = n % self.width; | |
let len = if rem == 0 { self.width } else { rem }; | |
let (out, rest) = unsafe { self.slice.split_at_unchecked(len) }; | |
self.slice = rest; | |
Some(out) | |
}, | |
} | |
} | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
let len = self.len(); | |
if n >= len { | |
self.slice = Default::default(); | |
return None; | |
} | |
let from_end = (len - 1 - n) * self.width; | |
let end = self.slice.len() - from_end; | |
let start = end.saturating_sub(self.width); | |
let (out, rest) = unsafe { self.slice.split_at_unchecked(end) }; | |
self.slice = rest; | |
Some(unsafe { out.get_unchecked(start ..) }) | |
} | |
fn len(&self) -> usize { | |
match self.slice.len() { | |
0 => 0, | |
len => { | |
let (n, r) = (len / self.width, len % self.width); | |
n + (r > 0) as usize | |
}, | |
} | |
} | |
}); | |
#[derive(Debug)] | |
pub struct RChunksMut<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
slice: &'a mut BitSlice<O, T::Alias>, | |
width: usize, | |
} | |
group!(RChunksMut => &'a mut BitSlice<O, T::Alias> { | |
fn next(&mut self) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
let len = slice.len(); | |
if len == 0 { | |
return None; | |
} | |
let mid = len - cmp::min(len, self.width); | |
let (rest, out) = unsafe { slice.split_at_unchecked_mut_noalias(mid) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
let len = slice.len(); | |
let (num, ovf) = n.overflowing_mul(self.width); | |
if num >= len || ovf { | |
return None; | |
} | |
let end = len - num; | |
let mid = end.saturating_sub(self.width); | |
let (rest, out) = unsafe { | |
slice.get_unchecked_mut(.. end) | |
.split_at_unchecked_mut_noalias(mid) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
match slice.len() { | |
0 => None, | |
n => { | |
let rem = n % self.width; | |
let len = if rem == 0 { self.width } else { rem }; | |
let (out, rest) = | |
unsafe { slice.split_at_unchecked_mut_noalias(len) }; | |
self.slice = rest; | |
Some(out) | |
}, | |
} | |
} | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
let len = self.len(); | |
let slice = mem::take(&mut self.slice); | |
if n >= len { | |
return None; | |
} | |
let from_end = (len - 1 - n) * self.width; | |
let end = slice.len() - from_end; | |
let start = end.saturating_sub(self.width); | |
let (out, rest) = unsafe { slice.split_at_unchecked_mut_noalias(end) }; | |
self.slice = rest; | |
Some(unsafe { out.get_unchecked_mut(start ..) }) | |
} | |
fn len(&self) -> usize { | |
match self.slice.len() { | |
0 => 0, | |
len => { | |
let (n, r) = (len / self.width, len % self.width); | |
n + (r > 0) as usize | |
}, | |
} | |
} | |
}); | |
#[derive(Clone, Debug)] | |
pub struct RChunksExact<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
slice: &'a BitSlice<O, T>, | |
extra: &'a BitSlice<O, T>, | |
width: usize, | |
} | |
impl<'a, O, T> RChunksExact<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
pub(super) fn new(slice: &'a BitSlice<O, T>, width: usize) -> Self { | |
let (extra, slice) = | |
unsafe { slice.split_at_unchecked(slice.len() % width) }; | |
Self { | |
slice, | |
extra, | |
width, | |
} | |
} | |
#[inline] | |
pub fn remainder(&self) -> &'a BitSlice<O, T> { | |
self.extra | |
} | |
} | |
group!(RChunksExact => &'a BitSlice<O, T> { | |
fn next(&mut self) -> Option<Self::Item> { | |
let len = self.slice.len(); | |
if len < self.width { | |
return None; | |
} | |
let (rest, out) = | |
unsafe { self.slice.split_at_unchecked(len - self.width) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
let len = self.slice.len(); | |
let (split, ovf) = n.overflowing_mul(self.width); | |
if split >= len || ovf { | |
self.slice = Default::default(); | |
return None; | |
} | |
let end = len - split; | |
let (rest, out) = unsafe { | |
self.slice | |
.get_unchecked(.. end) | |
.split_at_unchecked(end - self.width) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
if self.slice.len() < self.width { | |
return None; | |
} | |
let (out, rest) = unsafe { self.slice.split_at_unchecked(self.width) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
let len = self.slice.len(); | |
let (start, ovf) = n.overflowing_mul(self.width); | |
if start >= len || ovf { | |
self.slice = Default::default(); | |
return None; | |
} | |
let (out, rest) = unsafe { | |
self.slice.get_unchecked(start ..).split_at_unchecked(self.width) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn len(&self) -> usize { | |
self.slice.len() / self.width | |
} | |
}); | |
#[derive(Debug)] | |
pub struct RChunksExactMut<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
slice: &'a mut BitSlice<O, T::Alias>, | |
extra: &'a mut BitSlice<O, T::Alias>, | |
width: usize, | |
} | |
impl<'a, O, T> RChunksExactMut<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
pub(super) fn new(slice: &'a mut BitSlice<O, T>, width: usize) -> Self { | |
let (extra, slice) = | |
unsafe { slice.split_at_unchecked_mut(slice.len() % width) }; | |
Self { | |
slice, | |
extra, | |
width, | |
} | |
} | |
#[inline] | |
pub fn into_remainder(self) -> &'a mut BitSlice<O, T::Alias> { | |
self.extra | |
} | |
} | |
group!(RChunksExactMut => &'a mut BitSlice<O, T::Alias> { | |
fn next(&mut self) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
let len = slice.len(); | |
if len < self.width { | |
return None; | |
} | |
let (rest, out) = | |
unsafe { slice.split_at_unchecked_mut_noalias(len - self.width) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
let len = slice.len(); | |
let (split, ovf) = n.overflowing_mul(self.width); | |
if split >= len || ovf { | |
return None; | |
} | |
let end = len - split; | |
let (rest, out) = unsafe { | |
slice.get_unchecked_mut(.. end) | |
.split_at_unchecked_mut_noalias(end - self.width) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
if slice.len() < self.width { | |
return None; | |
} | |
let (out, rest) = | |
unsafe { slice.split_at_unchecked_mut_noalias(self.width) }; | |
self.slice = rest; | |
Some(out) | |
} | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
let slice = mem::take(&mut self.slice); | |
let len = slice.len(); | |
let (start, ovf) = n.overflowing_mul(self.width); | |
if start >= len || ovf { | |
return None; | |
} | |
let (out, rest) = unsafe { | |
slice.get_unchecked_mut(start ..) | |
.split_at_unchecked_mut_noalias(self.width) | |
}; | |
self.slice = rest; | |
Some(out) | |
} | |
fn len(&self) -> usize { | |
self.slice.len() / self.width | |
} | |
}); | |
macro_rules! new_group { | |
($($t:ident $($m:ident)? $( . $a:ident ())?),+ $(,)?) => { $( | |
impl<'a, O, T> $t <'a, O, T> | |
where | |
O: BitOrder, | |
T: BitStore | |
{ | |
#[inline(always)] | |
#[allow(clippy::redundant_field_names)] | |
pub(super) fn new( | |
slice: &'a $($m)? BitSlice<O, T>, | |
width: usize, | |
) -> Self { | |
Self { slice: slice $( . $a () )?, width } | |
} | |
} | |
)+ }; | |
} | |
new_group!( | |
Windows, | |
Chunks, | |
ChunksMut mut .alias_mut(), | |
RChunks, | |
RChunksMut mut .alias_mut(), | |
); | |
macro_rules! split { | |
($iter:ident => $item:ty $( where $alias:ident )? { | |
$next:item | |
$next_back:item | |
}) => { | |
impl<'a, O, T, P> $iter <'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
#[inline] | |
pub(super) fn new(slice: $item, pred: P) -> Self { | |
Self { | |
slice, | |
pred, | |
done: false, | |
} | |
} | |
} | |
impl<O, T, P> Debug for $iter <'_, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
fmt.debug_struct(stringify!($iter)) | |
.field("slice", &self.slice) | |
.field("done", &self.done) | |
.finish() | |
} | |
} | |
impl<'a, O, T, P> Iterator for $iter <'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
type Item = $item; | |
#[inline] | |
$next | |
#[inline] | |
fn size_hint(&self) -> (usize, Option<usize>) { | |
if self.done { | |
(0, Some(0)) | |
} | |
else { | |
(1, Some(self.slice.len() + 1)) | |
} | |
} | |
} | |
impl<'a, O, T, P> DoubleEndedIterator for $iter <'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
#[inline] | |
$next_back | |
} | |
impl<'a, O, T, P> core::iter::FusedIterator for $iter <'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
} | |
impl<'a, O, T, P> SplitIter for $iter <'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
#[inline] | |
fn finish(&mut self) -> Option<Self::Item> { | |
if self.done { | |
None | |
} | |
else { | |
self.done = true; | |
Some(mem::take(&mut self.slice)) | |
} | |
} | |
} | |
}; | |
} | |
#[derive(Clone)] | |
pub struct Split<'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
slice: &'a BitSlice<O, T>, | |
pred: P, | |
done: bool, | |
} | |
split!(Split => &'a BitSlice<O, T> { | |
fn next(&mut self) -> Option<Self::Item> { | |
if self.done { | |
return None; | |
} | |
match self.slice | |
.iter() | |
.enumerate() | |
.position(|(idx, bit)| (self.pred)(idx, bit)) | |
{ | |
None => self.finish(), | |
Some(idx) => unsafe { | |
let out = self.slice.get_unchecked(.. idx); | |
self.slice = self.slice.get_unchecked(idx + 1 ..); | |
Some(out) | |
}, | |
} | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
if self.done { | |
return None; | |
} | |
match self.slice | |
.iter() | |
.enumerate() | |
.rposition(|(idx, bit)| (self.pred)(idx, bit)) | |
{ | |
None => self.finish(), | |
Some(idx) => unsafe { | |
let out = self.slice.get_unchecked(idx + 1 ..); | |
self.slice = self.slice.get_unchecked(.. idx); | |
Some(out) | |
}, | |
} | |
} | |
}); | |
pub struct SplitMut<'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
slice: &'a mut BitSlice<O, T::Alias>, | |
pred: P, | |
done: bool, | |
} | |
split!(SplitMut => &'a mut BitSlice<O, T::Alias> { | |
fn next(&mut self) -> Option<Self::Item> { | |
if self.done { | |
return None; | |
} | |
let idx_opt = { | |
let pred = &mut self.pred; | |
self.slice | |
.iter() | |
.enumerate() | |
.position(|(idx, bit)| (pred)(idx, bit)) | |
}; | |
match idx_opt | |
{ | |
None => self.finish(), | |
Some(idx) => unsafe { | |
let slice = mem::take(&mut self.slice); | |
let (out, rest) = slice.split_at_unchecked_mut_noalias(idx); | |
self.slice = rest.get_unchecked_mut(1 ..); | |
Some(out) | |
}, | |
} | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
if self.done { | |
return None; | |
} | |
let idx_opt = { | |
let pred = &mut self.pred; | |
self.slice | |
.iter() | |
.enumerate() | |
.rposition(|(idx, bit)| (pred)(idx, bit)) | |
}; | |
match idx_opt | |
{ | |
None => self.finish(), | |
Some(idx) => unsafe { | |
let slice = mem::take(&mut self.slice); | |
let (rest, out) = slice.split_at_unchecked_mut_noalias(idx); | |
self.slice = rest; | |
Some(out.get_unchecked_mut(1 ..)) | |
}, | |
} | |
} | |
}); | |
#[derive(Clone)] | |
pub struct RSplit<'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
slice: &'a BitSlice<O, T>, | |
pred: P, | |
done: bool, | |
} | |
split!(RSplit => &'a BitSlice<O, T> { | |
fn next(&mut self) -> Option<Self::Item> { | |
let mut split = Split::<'a, O, T, &mut P> { | |
slice: mem::take(&mut self.slice), | |
pred: &mut self.pred, | |
done: self.done, | |
}; | |
let out = split.next_back(); | |
self.slice = mem::take(&mut split.slice); | |
self.done = split.done; | |
out | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
let mut split = Split::<'a, O, T, &mut P> { | |
slice: mem::take(&mut self.slice), | |
pred: &mut self.pred, | |
done: self.done, | |
}; | |
let out = split.next(); | |
self.slice = mem::take(&mut split.slice); | |
self.done = split.done; | |
out | |
} | |
}); | |
pub struct RSplitMut<'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
slice: &'a mut BitSlice<O, T::Alias>, | |
pred: P, | |
done: bool, | |
} | |
split!(RSplitMut => &'a mut BitSlice<O, T::Alias> { | |
fn next(&mut self) -> Option<Self::Item> { | |
let mut split = SplitMut::<'a, O, T, &mut P> { | |
slice: mem::take(&mut self.slice), | |
pred: &mut self.pred, | |
done: self.done, | |
}; | |
let out = split.next_back(); | |
self.slice = mem::take(&mut split.slice); | |
self.done = split.done; | |
out | |
} | |
fn next_back(&mut self) -> Option<Self::Item> { | |
let mut split = SplitMut::<'a, O, T, &mut P> { | |
slice: mem::take(&mut self.slice), | |
pred: &mut self.pred, | |
done: self.done, | |
}; | |
let out = split.next(); | |
self.slice = mem::take(&mut split.slice); | |
self.done = split.done; | |
out | |
} | |
}); | |
trait SplitIter: DoubleEndedIterator { | |
fn finish(&mut self) -> Option<Self::Item>; | |
} | |
pub struct SplitN<'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
inner: Split<'a, O, T, P>, | |
count: usize, | |
} | |
pub struct SplitNMut<'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
inner: SplitMut<'a, O, T, P>, | |
count: usize, | |
} | |
pub struct RSplitN<'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
inner: RSplit<'a, O, T, P>, | |
count: usize, | |
} | |
pub struct RSplitNMut<'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
inner: RSplitMut<'a, O, T, P>, | |
count: usize, | |
} | |
macro_rules! split_n { | |
($outer:ident => $inner:ident => $item:ty $( where $alias:ident )?) => { | |
impl<'a, O, T, P> $outer <'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
{ | |
pub(super) fn new( | |
slice: $item, | |
pred: P, | |
count: usize, | |
) -> Self | |
{Self{ | |
inner: <$inner<'a, O, T, P>>::new(slice, pred), | |
count, | |
}} | |
} | |
impl<O, T, P> Debug for $outer <'_, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool | |
{ | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
fmt.debug_struct(stringify!($outer)) | |
.field("slice", &self.inner.slice) | |
.field("count", &self.count) | |
.finish() | |
} | |
} | |
impl<'a, O, T, P> Iterator for $outer <'a, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
$( T::$alias: radium::Radium<<<T as BitStore>::Alias as BitStore>::Mem>, )? | |
{ | |
type Item = <$inner <'a, O, T, P> as Iterator>::Item; | |
#[inline] | |
fn next(&mut self) -> Option<Self::Item> { | |
match self.count { | |
0 => None, | |
1 => { | |
self.count -= 1; | |
self.inner.finish() | |
}, | |
_ => { | |
self.count -= 1; | |
self.inner.next() | |
}, | |
} | |
} | |
#[inline] | |
fn size_hint(&self) -> (usize, Option<usize>) { | |
let (low, hi) = self.inner.size_hint(); | |
(low, hi.map(|h| cmp::min(self.count, h))) | |
} | |
} | |
impl<O, T, P> core::iter::FusedIterator for $outer <'_, O, T, P> | |
where | |
O: BitOrder, | |
T: BitStore, | |
P: FnMut(usize, &bool) -> bool, | |
$( T::$alias: radium::Radium<<<T as BitStore>::Alias as BitStore>::Mem>, )? | |
{ | |
} | |
}; | |
} | |
split_n!(SplitN => Split => &'a BitSlice<O, T>); | |
split_n!(SplitNMut => SplitMut => &'a mut BitSlice<O, T::Alias> ); | |
split_n!(RSplitN => RSplit => &'a BitSlice<O, T>); | |
split_n!(RSplitNMut => RSplitMut => &'a mut BitSlice<O, T::Alias> ); | |
macro_rules! noalias { | |
( $( | |
$from:ident $( ( $p:ident ) )? | |
=> $alias:ty | |
=> $to:ident | |
=> $item:ty | |
=> $map:path | |
; | |
)+ ) => { $( | |
pub struct $to <'a, O, T $( , $p )? > | |
where | |
O: BitOrder, | |
T: BitStore, | |
$( $p : FnMut(usize, &bool) -> bool, )? | |
{ | |
inner: $from <'a, O, T $( , $p )? >, | |
} | |
impl<'a, O, T $( , $p )? > $from <'a, O, T $( , $p )? > | |
where | |
O: BitOrder, | |
T: BitStore, | |
$( $p : FnMut(usize, &bool) -> bool, )? | |
{ | |
#[inline(always)] | |
pub fn remove_alias(self) -> $to <'a, O, T $( , $p )? > { | |
$to ::new(self) | |
} | |
} | |
impl<'a, O, T $( , $p )? > $to <'a, O, T $( , $p )? > | |
where | |
O: BitOrder, | |
T: BitStore, | |
$( $p : FnMut(usize, &bool) -> bool, )? | |
{ | |
#[inline(always)] | |
fn new(inner: $from<'a, O, T $( , $p )? >) -> Self { | |
Self { inner } | |
} | |
} | |
impl<'a, O, T $( , $p )? > Iterator for $to <'a, O, T $( , $p )? > | |
where | |
O: BitOrder, | |
T: BitStore, | |
$( $p : FnMut(usize, &bool) -> bool, )? | |
{ | |
type Item = $item; | |
#[inline(always)] | |
fn next(&mut self) -> Option<Self::Item> { | |
self.inner.next().map(|item| unsafe { $map(item) }) | |
} | |
#[inline(always)] | |
fn size_hint(&self) -> (usize, Option<usize>) { | |
self.inner.size_hint() | |
} | |
#[inline(always)] | |
fn count(self) -> usize { | |
self.inner.count() | |
} | |
#[inline(always)] | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
self.inner.nth(n).map(|item| unsafe { $map(item) }) | |
} | |
#[inline(always)] | |
fn last(self) -> Option<Self::Item> { | |
self.inner.last().map(|item| unsafe { $map(item) }) | |
} | |
} | |
impl<'a, O, T $( , $p )? > DoubleEndedIterator for $to <'a, O, T $( , $p )? > | |
where | |
O: BitOrder, | |
T: BitStore, | |
$from <'a, O, T $( , $p )? >: DoubleEndedIterator<Item = $alias >, | |
$( $p : FnMut(usize, &bool) -> bool, )? | |
{ | |
#[inline(always)] | |
fn next_back(&mut self) -> Option<Self::Item> { | |
self.inner | |
.next_back() | |
.map(|item| unsafe { $map(item) }) | |
} | |
#[inline(always)] | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
self.inner | |
.nth_back(n) | |
.map(|item| unsafe { $map(item) }) | |
} | |
} | |
impl<'a, O, T $( , $p )? > ExactSizeIterator for $to <'a, O, T $( , $p )? > | |
where | |
O: BitOrder, | |
T: BitStore, | |
$from <'a, O, T $( , $p )? >: ExactSizeIterator, | |
$( $p : FnMut(usize, &bool) -> bool, )? | |
{ | |
#[inline(always)] | |
fn len(&self) -> usize { | |
self.inner.len() | |
} | |
} | |
impl<O, T $( , $p )? > FusedIterator for $to <'_, O, T $( , $p )? > | |
where | |
O: BitOrder, | |
T: BitStore, | |
$( $p : FnMut(usize, &bool) -> bool, )? | |
{ | |
} | |
unsafe impl<O, T $( , $p )? > Send for $to <'_, O, T $( , $p )? > | |
where | |
O: BitOrder, | |
T: BitStore, | |
$( $p : FnMut(usize, &bool) -> bool, )? | |
{ | |
} | |
unsafe impl<O, T $( , $p )? > Sync for $to <'_, O, T $( , $p )? > | |
where | |
O: BitOrder, | |
T: BitStore, | |
$( $p : FnMut(usize, &bool) -> bool, )? | |
{ | |
} | |
)+ }; | |
} | |
noalias! { | |
IterMut => <usize as BitSliceIndex<'a, O, T::Alias>>::Mut | |
=> IterMutNoAlias => <usize as BitSliceIndex<'a, O, T>>::Mut | |
=> BitMut::remove_alias; | |
ChunksMut => &'a mut BitSlice<O, T::Alias> | |
=> ChunksMutNoAlias => &'a mut BitSlice<O, T> | |
=> BitSlice::unalias_mut; | |
ChunksExactMut => &'a mut BitSlice<O, T::Alias> | |
=> ChunksExactMutNoAlias => &'a mut BitSlice<O, T> | |
=> BitSlice::unalias_mut; | |
RChunksMut => &'a mut BitSlice<O, T::Alias> | |
=> RChunksMutNoAlias => &'a mut BitSlice<O, T> | |
=> BitSlice::unalias_mut; | |
RChunksExactMut => &'a mut BitSlice<O, T::Alias> | |
=> RChunksExactMutNoAlias => &'a mut BitSlice<O, T> | |
=> BitSlice::unalias_mut; | |
SplitMut (P) => &'a mut BitSlice<O, T::Alias> | |
=> SplitMutNoAlias => &'a mut BitSlice<O, T> | |
=> BitSlice::unalias_mut; | |
RSplitMut (P) => &'a mut BitSlice<O, T::Alias> | |
=> RSplitMutNoAlias => &'a mut BitSlice<O, T> | |
=> BitSlice::unalias_mut; | |
SplitNMut (P) => &'a mut BitSlice<O, T::Alias> | |
=> SplitNMutNoAlias => &'a mut BitSlice<O, T> | |
=> BitSlice::unalias_mut; | |
RSplitNMut (P) => &'a mut BitSlice<O, T::Alias> | |
=> RSplitNMutNoAlias => &'a mut BitSlice<O, T> | |
=> BitSlice::unalias_mut; | |
} | |
} | |
mod ops { | |
use crate::{ | |
access::BitAccess, | |
domain::DomainMut, | |
order::BitOrder, | |
slice::{ | |
BitSlice, | |
BitSliceIndex, | |
}, | |
store::BitStore, | |
}; | |
use core::ops::{ | |
BitAndAssign, | |
BitOrAssign, | |
BitXorAssign, | |
Index, | |
IndexMut, | |
Not, | |
Range, | |
RangeFrom, | |
RangeFull, | |
RangeInclusive, | |
RangeTo, | |
RangeToInclusive, | |
}; | |
impl<O, T, Rhs> BitAndAssign<Rhs> for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
Rhs: IntoIterator<Item = bool>, | |
{ | |
fn bitand_assign(&mut self, rhs: Rhs) { | |
let mut iter = rhs.into_iter(); | |
self.for_each(|_, bit| bit & iter.next().unwrap_or(false)); | |
} | |
} | |
impl<O, T, Rhs> BitOrAssign<Rhs> for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
Rhs: IntoIterator<Item = bool>, | |
{ | |
fn bitor_assign(&mut self, rhs: Rhs) { | |
let mut iter = rhs.into_iter(); | |
self.for_each(|_, bit| bit | iter.next().unwrap_or(false)); | |
} | |
} | |
impl<O, T, Rhs> BitXorAssign<Rhs> for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
Rhs: IntoIterator<Item = bool>, | |
{ | |
fn bitxor_assign(&mut self, rhs: Rhs) { | |
let mut iter = rhs.into_iter(); | |
self.for_each(|_, bit| bit ^ iter.next().unwrap_or(false)); | |
} | |
} | |
impl<O, T> Index<usize> for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
type Output = bool; | |
fn index(&self, index: usize) -> &Self::Output { | |
index.index(self) | |
} | |
} | |
macro_rules! index { | |
($($t:ty),+ $(,)?) => { $( | |
impl<O, T> Index<$t> for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
type Output = Self; | |
fn index(&self, index: $t) -> &Self::Output { | |
index.index(self) | |
} | |
} | |
impl<O, T> IndexMut<$t> for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
fn index_mut(&mut self, index: $t) -> &mut Self::Output { | |
index.index_mut(self) | |
} | |
} | |
)+ }; | |
} | |
index!( | |
Range<usize>, | |
RangeFrom<usize>, | |
RangeFull, | |
RangeInclusive<usize>, | |
RangeTo<usize>, | |
RangeToInclusive<usize>, | |
); | |
impl<'a, O, T> Not for &'a mut BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
type Output = Self; | |
fn not(self) -> Self::Output { | |
match self.domain_mut() { | |
DomainMut::Enclave { head, elem, tail } => { | |
elem.invert_bits(O::mask(head, tail)); | |
}, | |
DomainMut::Region { head, body, tail } => { | |
if let Some((head, elem)) = head { | |
elem.invert_bits(O::mask(head, None)); | |
} | |
for elem in body { | |
*elem = !*elem; | |
} | |
if let Some((elem, tail)) = tail { | |
elem.invert_bits(O::mask(None, tail)); | |
} | |
}, | |
} | |
self | |
} | |
} | |
} | |
mod proxy { | |
use crate::{ | |
access::BitAccess, | |
index::BitIdx, | |
order::{ | |
BitOrder, | |
Lsb0, | |
}, | |
ptr::BitPtr, | |
slice::BitSlice, | |
store::BitStore, | |
}; | |
use core::{ | |
fmt::{ | |
self, | |
Debug, | |
Formatter, | |
}, | |
marker::PhantomData, | |
mem, | |
ops::{ | |
Deref, | |
DerefMut, | |
}, | |
ptr::NonNull, | |
}; | |
#[repr(C)] | |
pub struct BitMut<'a, O = Lsb0, T = usize> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
addr: NonNull<T::Access>, | |
head: BitIdx<T::Mem>, | |
data: bool, | |
_ref: PhantomData<&'a mut BitSlice<O, T>>, | |
} | |
impl<O, T> BitMut<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
pub(crate) unsafe fn new_unchecked( | |
addr: *const T::Access, | |
head: BitIdx<T::Mem>, | |
) -> Self | |
{ | |
Self { | |
addr: NonNull::new_unchecked(addr as *mut T::Access), | |
head, | |
data: (&*(addr as *const T)).get_bit::<O>(head), | |
_ref: PhantomData, | |
} | |
} | |
#[inline] | |
pub fn as_bitslice(&self) -> &BitSlice<O, T> { | |
unsafe { | |
BitPtr::new_unchecked(self.addr.as_ptr() as *const T, self.head, 1) | |
} | |
.to_bitslice_ref() | |
} | |
#[inline] | |
pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<O, T> { | |
unsafe { | |
BitPtr::new_unchecked(self.addr.as_ptr() as *mut T, self.head, 1) | |
} | |
.to_bitslice_mut() | |
} | |
pub(crate) unsafe fn remove_alias(this: BitMut<O, T::Alias>) -> Self { | |
core::mem::transmute(this) | |
} | |
#[inline] | |
pub fn set(mut self, value: bool) { | |
self.write(value); | |
mem::forget(self); | |
} | |
#[inline] | |
fn write(&mut self, value: bool) { | |
unsafe { (&*self.addr.as_ptr()).write_bit::<O>(self.head, value) } | |
} | |
} | |
impl<O, T> Debug for BitMut<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
let bitptr = self.as_bitslice().bitptr(); | |
bitptr.render(fmt, "Mut", &[("bit", &self.data as &dyn Debug)]) | |
} | |
} | |
impl<O, T> Deref for BitMut<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
type Target = bool; | |
#[inline] | |
fn deref(&self) -> &Self::Target { | |
&self.data | |
} | |
} | |
impl<O, T> DerefMut for BitMut<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
fn deref_mut(&mut self) -> &mut Self::Target { | |
&mut self.data | |
} | |
} | |
impl<O, T> Drop for BitMut<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
fn drop(&mut self) { | |
let value = self.data; | |
self.write(value); | |
} | |
} | |
} | |
mod specialization { | |
use crate::{ | |
devel as dvl, | |
field::BitField, | |
mem::BitMemory, | |
order::{ | |
Lsb0, | |
Msb0, | |
}, | |
slice::BitSlice, | |
store::BitStore, | |
}; | |
use core::ops::RangeBounds; | |
macro_rules! specialize { | |
($($func:item)*) => { | |
impl<T> BitSlice<Lsb0, T> | |
where T: BitStore { | |
$( #[inline] $func )* | |
} | |
impl<T> BitSlice<Msb0, T> | |
where T: BitStore { | |
$( #[inline] $func )* | |
} | |
}; | |
} | |
specialize! { | |
pub(crate) fn sp_copy_from_bitslice(&mut self, src: &Self) { | |
assert_eq!( | |
self.len(), | |
src.len(), | |
"Copying between slices requires equal lengths" | |
); | |
let chunk_size = <usize as BitMemory>::BITS as usize; | |
for (to, from) in self | |
.chunks_mut(chunk_size) | |
.remove_alias() | |
.zip(src.chunks(chunk_size)) | |
{ | |
to.store::<usize>(from.load::<usize>()) | |
} | |
} | |
pub(crate) unsafe fn sp_copy_within_unchecked<R>( | |
&mut self, | |
src: R, | |
dest: usize, | |
) | |
where R:RangeBounds<usize> { | |
let source = dvl::normalize_range(src, self.len()); | |
let rev = source.contains(&dest); | |
let dest = dest .. dest + source.len(); | |
let from: *const Self = self.get_unchecked(source) as *const _; | |
let to: *mut Self = self.get_unchecked_mut(dest) as *mut _; | |
let chunk_size = <usize as BitMemory>::BITS as usize; | |
if rev { | |
for (src, dst) in (&*from).alias() | |
.rchunks(chunk_size) | |
.zip((&mut *to).rchunks_mut(chunk_size)) | |
{ | |
dst.store::<usize>(src.load::<usize>()); | |
} | |
} | |
else { | |
for (src, dst) in (&*from).alias() | |
.chunks(chunk_size) | |
.zip((&mut *to).chunks_mut(chunk_size)) | |
{ | |
dst.store::<usize>(src.load::<usize>()); | |
} | |
} | |
} | |
pub(crate) fn sp_eq(&self, other: &Self) -> bool { | |
if self.len() != other.len() { | |
return false; | |
} | |
let chunk_size = <usize as BitMemory>::BITS as usize; | |
self.chunks(chunk_size) | |
.zip(other.chunks(chunk_size)) | |
.all(|(a, b)| a.load::<usize>() == b.load::<usize>()) | |
} | |
} | |
} | |
mod traits { | |
use crate::{ | |
domain::Domain, | |
mem::{ | |
BitMemory, | |
BitRegister, | |
}, | |
order::{ | |
BitOrder, | |
Lsb0, | |
Msb0, | |
}, | |
slice::BitSlice, | |
store::BitStore, | |
view::BitView, | |
}; | |
use core::{ | |
any::TypeId, | |
cmp, | |
convert::TryFrom, | |
fmt::{ | |
self, | |
Binary, | |
Debug, | |
Display, | |
Formatter, | |
LowerHex, | |
Octal, | |
UpperHex, | |
}, | |
hash::{ | |
Hash, | |
Hasher, | |
}, | |
str, | |
}; | |
use tap::pipe::Pipe; | |
#[cfg(feature = "alloc")] | |
use crate::vec::BitVec; | |
#[cfg(feature = "alloc")] | |
use alloc::borrow::ToOwned; | |
impl<O, T> Eq for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
} | |
impl<O, T> Ord for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
fn cmp(&self, rhs: &Self) -> cmp::Ordering { | |
self.partial_cmp(rhs) | |
.expect("BitSlice has a total ordering") | |
} | |
} | |
impl<O1, O2, T1, T2> PartialEq<BitSlice<O2, T2>> for BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
fn eq(&self, rhs: &BitSlice<O2, T2>) -> bool { | |
let fallback = || { | |
if self.len() != rhs.len() { | |
return false; | |
} | |
self.iter() | |
.copied() | |
.zip(rhs.iter().copied()) | |
.all(|(l, r)| l == r) | |
}; | |
if TypeId::of::<O1>() == TypeId::of::<O2>() | |
&& TypeId::of::<T1>() == TypeId::of::<T2>() | |
{ | |
if TypeId::of::<O1>() == TypeId::of::<Lsb0>() { | |
let this: &BitSlice<Lsb0, T1> = | |
unsafe { &*(self as *const _ as *const _) }; | |
let that: &BitSlice<Lsb0, T1> = | |
unsafe { &*(rhs as *const _ as *const _) }; | |
this.sp_eq(that) | |
} | |
else if TypeId::of::<O1>() == TypeId::of::<Msb0>() { | |
let this: &BitSlice<Msb0, T1> = | |
unsafe { &*(self as *const _ as *const _) }; | |
let that: &BitSlice<Msb0, T1> = | |
unsafe { &*(rhs as *const _ as *const _) }; | |
this.sp_eq(that) | |
} | |
else { | |
fallback() | |
} | |
} | |
else { | |
fallback() | |
} | |
} | |
} | |
impl<O1, O2, T1, T2> PartialEq<BitSlice<O2, T2>> for &BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
#[inline] | |
fn eq(&self, rhs: &BitSlice<O2, T2>) -> bool { | |
**self == rhs | |
} | |
} | |
impl<O1, O2, T1, T2> PartialEq<BitSlice<O2, T2>> for &mut BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
#[inline] | |
fn eq(&self, rhs: &BitSlice<O2, T2>) -> bool { | |
**self == rhs | |
} | |
} | |
impl<O1, O2, T1, T2> PartialEq<&BitSlice<O2, T2>> for BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
#[inline] | |
fn eq(&self, rhs: &&BitSlice<O2, T2>) -> bool { | |
*self == **rhs | |
} | |
} | |
impl<O1, O2, T1, T2> PartialEq<&mut BitSlice<O2, T2>> for BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
#[inline] | |
fn eq(&self, rhs: &&mut BitSlice<O2, T2>) -> bool { | |
*self == **rhs | |
} | |
} | |
impl<O1, O2, T1, T2> PartialOrd<BitSlice<O2, T2>> for BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
#[inline] | |
fn partial_cmp(&self, rhs: &BitSlice<O2, T2>) -> Option<cmp::Ordering> { | |
for (l, r) in self.iter().zip(rhs.iter()) { | |
match (l, r) { | |
(true, false) => return Some(cmp::Ordering::Greater), | |
(false, true) => return Some(cmp::Ordering::Less), | |
_ => continue, | |
} | |
} | |
self.len().partial_cmp(&rhs.len()) | |
} | |
} | |
impl<O1, O2, T1, T2> PartialOrd<BitSlice<O2, T2>> for &BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
#[inline] | |
fn partial_cmp(&self, rhs: &BitSlice<O2, T2>) -> Option<cmp::Ordering> { | |
(*self).partial_cmp(rhs) | |
} | |
} | |
impl<O1, O2, T1, T2> PartialOrd<BitSlice<O2, T2>> for &mut BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
#[inline] | |
fn partial_cmp(&self, rhs: &BitSlice<O2, T2>) -> Option<cmp::Ordering> { | |
(**self).partial_cmp(rhs) | |
} | |
} | |
impl<O1, O2, T1, T2> PartialOrd<&BitSlice<O2, T2>> for BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
#[inline] | |
fn partial_cmp(&self, rhs: &&BitSlice<O2, T2>) -> Option<cmp::Ordering> { | |
(*self).partial_cmp(&**rhs) | |
} | |
} | |
impl<O1, O2, T1, T2> PartialOrd<&mut BitSlice<O2, T2>> for BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
#[inline] | |
fn partial_cmp(&self, rhs: &&mut BitSlice<O2, T2>) -> Option<cmp::Ordering> { | |
(*self).partial_cmp(&**rhs) | |
} | |
} | |
impl<O1, O2, T1, T2> PartialOrd<&mut BitSlice<O2, T2>> for &BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
#[inline] | |
fn partial_cmp(&self, rhs: &&mut BitSlice<O2, T2>) -> Option<cmp::Ordering> { | |
(**self).partial_cmp(&**rhs) | |
} | |
} | |
impl<O1, O2, T1, T2> PartialOrd<&BitSlice<O2, T2>> for &mut BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitStore, | |
{ | |
#[inline] | |
fn partial_cmp(&self, rhs: &&BitSlice<O2, T2>) -> Option<cmp::Ordering> { | |
(**self).partial_cmp(&**rhs) | |
} | |
} | |
impl<'a, O, T> TryFrom<&'a [T]> for &'a BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type Error = &'a [T]; | |
#[inline] | |
fn try_from(slice: &'a [T]) -> Result<Self, Self::Error> { | |
BitSlice::from_slice(slice).ok_or(slice) | |
} | |
} | |
impl<O, T> Default for &BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
fn default() -> Self { | |
BitSlice::empty() | |
} | |
} | |
impl<O, T> Default for &mut BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
fn default() -> Self { | |
BitSlice::empty_mut() | |
} | |
} | |
impl<O, T> Debug for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
self.bitptr().render(fmt, "Slice", None)?; | |
fmt.write_str(" ")?; | |
Binary::fmt(self, fmt) | |
} | |
} | |
impl<O, T> Display for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline(always)] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Binary::fmt(self, fmt) | |
} | |
} | |
macro_rules! fmt { | |
($trait:ident, $base:expr, $pfx:expr, $blksz:expr) => { | |
impl<O, T> $trait for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
struct Seq<'a>(&'a [u8]); | |
impl Debug for Seq<'_> { | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
fmt.write_str(unsafe { | |
str::from_utf8_unchecked(self.0) | |
}) | |
} | |
} | |
let start = if fmt.alternate() { 0 } else { 2 }; | |
let mut dbg = fmt.debug_list(); | |
const W: usize = <usize as BitMemory>::BITS as usize / $blksz; | |
let mut w: [u8; W + 2] = [b'0'; W + 2]; | |
w[1] = $pfx; | |
let mut writer = |bits: &BitSlice<O, T::Mem>| { | |
let mut end = 2; | |
for (index, chunk) in bits.rchunks($blksz).rev().enumerate() | |
{ | |
let mut val = 0u8; | |
for bit in chunk { | |
val <<= 1; | |
val |= *bit as u8; | |
} | |
w[2 + index] = match val { | |
v @ 0 ..= 9 => b'0' + v, | |
v @ 10 ..= 16 => $base + (v - 10), | |
_ => unsafe { core::hint::unreachable_unchecked() }, | |
}; | |
end += 1; | |
} | |
dbg.entry(&Seq(&w[start .. end])); | |
}; | |
match self.domain() { | |
Domain::Enclave { head, elem, tail } => { | |
let tmp = elem.load_value(); | |
let bits = tmp.view_bits::<O>(); | |
unsafe { | |
bits.get_unchecked( | |
head.value() as usize .. tail.value() as usize, | |
) | |
} | |
.pipe(writer); | |
}, | |
Domain::Region { head, body, tail } => { | |
if let Some((head, elem)) = head { | |
let tmp = elem.load_value(); | |
let bits = tmp.view_bits::<O>(); | |
unsafe { | |
bits.get_unchecked(head.value() as usize ..) | |
} | |
.pipe(&mut writer); | |
} | |
for elem in body.iter() { | |
elem.view_bits::<O>().pipe(&mut writer); | |
} | |
if let Some((elem, tail)) = tail { | |
let tmp = elem.load_value(); | |
let bits = tmp.view_bits::<O>(); | |
unsafe { | |
bits.get_unchecked(.. tail.value() as usize) | |
} | |
.pipe(&mut writer); | |
} | |
}, | |
} | |
dbg.finish() | |
} | |
} | |
}; | |
} | |
fmt!(Binary, b'0', b'b', 1); | |
fmt!(Octal, b'0', b'o', 3); | |
fmt!(LowerHex, b'a', b'x', 4); | |
fmt!(UpperHex, b'A', b'x', 4); | |
impl<O, T> Hash for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
{ | |
#[inline] | |
fn hash<H>(&self, hasher: &mut H) | |
where H: Hasher { | |
for bit in self { | |
hasher.write_u8(*bit as u8); | |
} | |
} | |
} | |
unsafe impl<O, T> Send for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
T::Threadsafe: Send, | |
{ | |
} | |
unsafe impl<O, T> Sync for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
T::Threadsafe: Sync, | |
{ | |
} | |
#[cfg(feature = "alloc")] | |
impl<O, T> ToOwned for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type Owned = BitVec<O, T>; | |
#[inline] | |
fn to_owned(&self) -> Self::Owned { | |
BitVec::from_bitslice(self) | |
} | |
} | |
} | |
pub use self::{ | |
api::{ | |
from_mut, | |
from_raw_parts, | |
from_raw_parts_mut, | |
from_ref, | |
BitSliceIndex, | |
}, | |
iter::{ | |
Chunks, | |
ChunksExact, | |
ChunksExactMut, | |
ChunksMut, | |
Iter, | |
IterMut, | |
RChunks, | |
RChunksExact, | |
RChunksExactMut, | |
RChunksMut, | |
RSplit, | |
RSplitMut, | |
RSplitN, | |
RSplitNMut, | |
Split, | |
SplitMut, | |
SplitN, | |
SplitNMut, | |
Windows, | |
}, | |
proxy::BitMut, | |
}; | |
} | |
pub mod store { | |
use crate::{ | |
access::*, | |
index::{ | |
BitIdx, | |
BitMask, | |
}, | |
mem::{ | |
self, | |
BitRegister, | |
}, | |
order::BitOrder, | |
}; | |
use core::{ | |
cell::Cell, | |
fmt::Debug, | |
}; | |
#[cfg(feature = "atomic")] | |
use core::sync::atomic; | |
pub trait BitStore: 'static + seal::Sealed + Debug { | |
type Mem: BitRegister + BitStore; | |
type Access: BitAccess<Item = Self::Mem>; | |
type Alias: BitSafe<Mem = Self::Mem> + BitStore<Mem = Self::Mem>; | |
fn load_value(&self) -> Self::Mem; | |
fn get_bit<O>(&self, index: BitIdx<Self::Mem>) -> bool | |
where O: BitOrder { | |
unsafe { BitMask::new(self.load_value()) }.test(index.select::<O>()) | |
} | |
#[inline] | |
fn get_bits(&self, mask: BitMask<Self::Mem>) -> Self::Mem { | |
self.load_value() & mask.value() | |
} | |
type Threadsafe; | |
const __ALIGNED_TO_SIZE: [(); 0]; | |
const __ALIAS_WIDTH: [(); 0]; | |
} | |
macro_rules! store { | |
($($t:ident => $cw:ident => $aw:ident => $a:path),+ $(,)?) => { $( | |
impl BitStore for $t { | |
type Mem = $t; | |
type Access = Cell<$t>; | |
#[cfg(feature = "atomic")] | |
type Alias = $aw; | |
#[cfg(not(feature = "atomic"))] | |
type Alias = $cw; | |
#[inline(always)] | |
fn load_value(&self) -> $t { | |
*self | |
} | |
type Threadsafe = Self; | |
const __ALIGNED_TO_SIZE: [(); 0] | |
= [(); mem::aligned_to_size::<Self>()]; | |
const __ALIAS_WIDTH: [(); 0] | |
= [(); mem::cmp_layout::<Self::Mem, Self::Alias>()]; | |
} | |
impl BitStore for $cw { | |
type Mem = $t; | |
type Access = Cell<$t>; | |
type Alias = $cw; | |
#[inline(always)] | |
fn load_value(&self) -> $t { | |
self.load() | |
} | |
type Threadsafe = *const Self; | |
const __ALIAS_WIDTH: [(); 0] = []; | |
const __ALIGNED_TO_SIZE: [(); 0] = []; | |
} | |
#[cfg(feature = "atomic")] | |
impl BitStore for $aw { | |
type Mem = $t; | |
type Access = $a; | |
type Alias = $aw; | |
#[inline(always)] | |
fn load_value(&self) -> $t { | |
self.load() | |
} | |
type Threadsafe = Self; | |
const __ALIGNED_TO_SIZE: [(); 0] | |
= [(); mem::aligned_to_size::<Self>()]; | |
const __ALIAS_WIDTH: [(); 0] | |
= [(); mem::cmp_layout::<Self::Mem, Self::Alias>()]; | |
} | |
impl seal::Sealed for $t {} | |
impl seal::Sealed for $cw {} | |
#[cfg(feature = "atomic")] | |
impl seal::Sealed for $aw {} | |
)+ }; | |
} | |
store! { | |
u8 => BitSafeCellU8 => BitSafeAtomU8 => atomic::AtomicU8, | |
u16 => BitSafeCellU16 => BitSafeAtomU16 => atomic::AtomicU16, | |
u32 => BitSafeCellU32 => BitSafeAtomU32 => atomic::AtomicU32, | |
} | |
#[cfg(target_pointer_width = "64")] | |
store!(u64 => BitSafeCellU64 => BitSafeAtomU64 => atomic::AtomicU64); | |
store!(usize => BitSafeCellUsize => BitSafeAtomUsize => atomic::AtomicUsize); | |
#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] | |
compile_fail!(concat!( | |
"This architecture is currently not supported. File an issue at ", | |
env!("CARGO_PKG_REPOSITORY") | |
)); | |
mod seal { | |
pub trait Sealed {} | |
} | |
} | |
pub mod view { | |
use crate::{ | |
index::BitIdx, | |
mem::{ | |
BitMemory, | |
BitRegister, | |
}, | |
order::BitOrder, | |
ptr::BitPtr, | |
slice::BitSlice, | |
store::BitStore, | |
}; | |
pub trait BitView { | |
type Mem: BitRegister + BitStore; | |
fn view_bits<O>(&self) -> &BitSlice<O, Self::Mem> | |
where O: BitOrder; | |
#[inline(always)] | |
#[deprecated( | |
since = "0.18.0", | |
note = "The method is renamed to `.view_bits`" | |
)] | |
fn bits<O>(&self) -> &BitSlice<O, Self::Mem> | |
where O: BitOrder { | |
self.view_bits::<O>() | |
} | |
fn view_bits_mut<O>(&mut self) -> &mut BitSlice<O, Self::Mem> | |
where O: BitOrder; | |
#[inline(always)] | |
#[deprecated( | |
since = "0.18.0", | |
note = "The method is renamed to `.view_bits_mut`" | |
)] | |
fn bits_mut<O>(&mut self) -> &BitSlice<O, Self::Mem> | |
where O: BitOrder { | |
self.view_bits_mut::<O>() | |
} | |
fn const_bits() -> usize | |
where Self: Sized { | |
Self::const_elts() << <Self::Mem as BitMemory>::INDX | |
} | |
fn const_elts() -> usize | |
where Self: Sized; | |
} | |
impl<T> BitView for T | |
where T: BitRegister + BitStore | |
{ | |
type Mem = T; | |
#[inline(always)] | |
fn view_bits<O>(&self) -> &BitSlice<O, T> | |
where O: BitOrder { | |
BitSlice::from_element(self) | |
} | |
#[inline(always)] | |
fn view_bits_mut<O>(&mut self) -> &mut BitSlice<O, T> | |
where O: BitOrder { | |
BitSlice::from_element_mut(self) | |
} | |
#[inline(always)] | |
fn const_elts() -> usize { | |
1 | |
} | |
} | |
impl<T> BitView for [T] | |
where T: BitRegister + BitStore | |
{ | |
type Mem = T; | |
#[inline] | |
fn view_bits<O>(&self) -> &BitSlice<O, T> | |
where O: BitOrder { | |
BitSlice::from_slice(self).expect("slice was too long to view as bits") | |
} | |
#[inline] | |
fn view_bits_mut<O>(&mut self) -> &mut BitSlice<O, T> | |
where O: BitOrder { | |
BitSlice::from_slice_mut(self) | |
.expect("slice was too long to view as bits") | |
} | |
#[cold] | |
#[inline(never)] | |
fn const_elts() -> usize { | |
unreachable!("This cannot be called on unsized slices") | |
} | |
} | |
impl<T> BitView for [T; 0] | |
where T: BitRegister + BitStore | |
{ | |
type Mem = T; | |
#[inline(always)] | |
fn view_bits<O>(&self) -> &BitSlice<O, T> | |
where O: BitOrder { | |
BitSlice::empty() | |
} | |
#[inline(always)] | |
fn view_bits_mut<O>(&mut self) -> &mut BitSlice<O, T> | |
where O: BitOrder { | |
BitSlice::empty_mut() | |
} | |
#[inline(always)] | |
fn const_elts() -> usize { | |
0 | |
} | |
} | |
macro_rules! view_bits { | |
($($n:expr),+ $(,)?) => { $( | |
impl<T> BitView for [T; $n] | |
where T: BitRegister + BitStore { | |
type Mem = T; | |
#[inline] | |
fn view_bits<O>(&self) -> &BitSlice<O, T> | |
where O: BitOrder { | |
unsafe { | |
BitPtr::new_unchecked( | |
self.as_ptr(), | |
BitIdx::ZERO, | |
$n * T::Mem::BITS as usize, | |
) | |
} | |
.to_bitslice_ref() | |
} | |
#[inline] | |
fn view_bits_mut<O>(&mut self) -> &mut BitSlice<O, T> | |
where O: BitOrder { | |
unsafe { | |
BitPtr::new_unchecked( | |
self.as_mut_ptr(), | |
BitIdx::ZERO, | |
$n * T::Mem::BITS as usize, | |
) | |
} | |
.to_bitslice_mut() | |
} | |
#[inline(always)] | |
fn const_elts() -> usize { | |
$n | |
} | |
} | |
)+ }; | |
} | |
view_bits!( | |
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, | |
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, | |
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, | |
60, 61, 62, 63, 64 | |
); | |
pub trait AsBits<T> | |
where T: BitRegister + BitStore | |
{ | |
fn as_bits<O>(&self) -> &BitSlice<O, T> | |
where O: BitOrder; | |
} | |
pub trait AsBitsMut<T> | |
where T: BitRegister + BitStore | |
{ | |
fn as_bits_mut<O>(&mut self) -> &mut BitSlice<O, T> | |
where O: BitOrder; | |
} | |
impl<A, T> AsBits<T> for A | |
where | |
A: AsRef<[T]>, | |
T: BitStore + BitRegister, | |
{ | |
#[inline] | |
fn as_bits<O>(&self) -> &BitSlice<O, T> | |
where O: BitOrder { | |
self.as_ref().view_bits::<O>() | |
} | |
} | |
impl<A, T> AsBitsMut<T> for A | |
where | |
A: AsMut<[T]>, | |
T: BitStore + BitRegister, | |
{ | |
#[inline] | |
fn as_bits_mut<O>(&mut self) -> &mut BitSlice<O, T> | |
where O: BitOrder { | |
self.as_mut().view_bits_mut::<O>() | |
} | |
} | |
} | |
#[cfg(feature = "alloc")] | |
pub mod boxed { | |
#![cfg(feature = "alloc")] | |
use crate::{ | |
index::BitIdx, | |
mem::{ | |
BitMemory, | |
BitRegister, | |
}, | |
order::{ | |
BitOrder, | |
Lsb0, | |
}, | |
ptr::BitPtr, | |
slice::BitSlice, | |
store::BitStore, | |
vec::BitVec, | |
}; | |
use alloc::boxed::Box; | |
use core::{ | |
mem::{ | |
self, | |
ManuallyDrop, | |
}, | |
ptr::NonNull, | |
slice, | |
}; | |
use tap::pipe::Pipe; | |
#[repr(transparent)] | |
pub struct BitBox<O = Lsb0, T = usize> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
pointer: NonNull<BitSlice<O, T>>, | |
} | |
impl<O, T> BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
pub fn from_bitslice(slice: &BitSlice<O, T>) -> Self { | |
BitVec::from_bitslice(slice).into_boxed_bitslice() | |
} | |
#[inline] | |
pub fn from_boxed_slice(boxed: Box<[T]>) -> Self { | |
Self::try_from_boxed_slice(boxed) | |
.expect("Slice was too long to be converted into a `BitBox`") | |
} | |
#[inline] | |
pub fn try_from_boxed_slice(boxed: Box<[T]>) -> Result<Self, Box<[T]>> { | |
let len = boxed.len(); | |
if len > BitSlice::<O, T>::MAX_ELTS { | |
return Err(boxed); | |
} | |
let base = boxed.as_ptr(); | |
mem::forget(boxed); | |
Ok(Self { | |
pointer: unsafe { | |
BitPtr::new_unchecked( | |
base, | |
BitIdx::ZERO, | |
len * T::Mem::BITS as usize, | |
) | |
} | |
.to_nonnull(), | |
}) | |
} | |
#[inline] | |
pub fn into_boxed_slice(self) -> Box<[T]> { | |
let mut this = ManuallyDrop::new(self); | |
unsafe { Box::from_raw(this.as_mut_slice()) } | |
} | |
#[inline] | |
pub fn into_bitvec(self) -> BitVec<O, T> { | |
let mut bitptr = self.bitptr(); | |
let raw = self | |
.pipe(ManuallyDrop::new) | |
.with_box(|b| unsafe { ManuallyDrop::take(b) }) | |
.into_vec() | |
.pipe(ManuallyDrop::new); | |
unsafe { | |
bitptr.set_pointer(raw.as_ptr() as *const T as *mut T); | |
BitVec::from_raw_parts(bitptr.to_bitslice_ptr_mut(), raw.capacity()) | |
} | |
} | |
#[inline] | |
pub fn as_bitslice(&self) -> &BitSlice<O, T> { | |
self.bitptr().to_bitslice_ref() | |
} | |
#[inline] | |
pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<O, T> { | |
self.bitptr().to_bitslice_mut() | |
} | |
#[inline] | |
pub fn as_slice(&self) -> &[T] { | |
let bitptr = self.bitptr(); | |
let (base, elts) = (bitptr.pointer().to_const(), bitptr.elements()); | |
unsafe { slice::from_raw_parts(base, elts) } | |
} | |
#[inline] | |
pub fn as_mut_slice(&mut self) -> &mut [T] { | |
let bitptr = self.bitptr(); | |
let (base, elts) = (bitptr.pointer().to_mut(), bitptr.elements()); | |
unsafe { slice::from_raw_parts_mut(base, elts) } | |
} | |
#[inline] | |
pub fn set_uninitialized(&mut self, value: bool) { | |
let head = self.bitptr().head().value() as usize; | |
let tail = head + self.len(); | |
let elts = self.bitptr().elements() * T::Mem::BITS as usize; | |
let mut bp = self.bitptr(); | |
unsafe { | |
bp.set_head(BitIdx::ZERO); | |
bp.set_len(elts); | |
let bits = bp.to_bitslice_mut(); | |
bits.get_unchecked_mut(.. head).set_all(value); | |
bits.get_unchecked_mut(tail ..).set_all(value); | |
} | |
} | |
#[inline] | |
pub(crate) fn bitptr(&self) -> BitPtr<O, T> { | |
self.pointer.as_ptr().pipe(BitPtr::from_bitslice_ptr_mut) | |
} | |
fn with_box<F, R>(&mut self, func: F) -> R | |
where F: FnOnce(&mut ManuallyDrop<Box<[T]>>) -> R { | |
self.as_mut_slice() | |
.pipe(|raw| unsafe { Box::from_raw(raw) }) | |
.pipe(ManuallyDrop::new) | |
.pipe_ref_mut(func) | |
} | |
} | |
mod api { | |
use crate::{ | |
boxed::BitBox, | |
mem::BitRegister, | |
order::BitOrder, | |
ptr::BitPtr, | |
slice::BitSlice, | |
store::BitStore, | |
vec::BitVec, | |
}; | |
use core::{ | |
marker::Unpin, | |
mem::ManuallyDrop, | |
pin::Pin, | |
}; | |
use tap::pipe::Pipe; | |
impl<O, T> BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
#[deprecated = "Prefer `::from_bitslice()`"] | |
pub fn new(x: &BitSlice<O, T>) -> Self { | |
Self::from_bitslice(x) | |
} | |
#[inline] | |
pub fn pin(x: &BitSlice<O, T>) -> Pin<Self> | |
where | |
O: Unpin, | |
T: Unpin, | |
{ | |
x.pipe(Self::from_bitslice).pipe(Pin::new) | |
} | |
#[inline] | |
pub unsafe fn from_raw(raw: *mut BitSlice<O, T>) -> Self { | |
raw.pipe(BitPtr::from_bitslice_ptr_mut) | |
.to_nonnull() | |
.pipe(|pointer| Self { pointer }) | |
} | |
#[inline(always)] | |
pub fn into_raw(b: Self) -> *mut BitSlice<O, T> { | |
Self::leak(b) | |
} | |
#[inline] | |
pub fn leak<'a>(b: Self) -> &'a mut BitSlice<O, T> | |
where T: 'a { | |
b.pipe(ManuallyDrop::new).bitptr().to_bitslice_mut() | |
} | |
#[inline(always)] | |
#[deprecated = "Prefer `.into_bitvec()`"] | |
pub fn into_vec(self) -> BitVec<O, T> { | |
self.into_bitvec() | |
} | |
} | |
} | |
mod ops { | |
use crate::{ | |
boxed::BitBox, | |
mem::BitRegister, | |
order::BitOrder, | |
slice::BitSlice, | |
store::BitStore, | |
}; | |
use core::{ | |
mem::ManuallyDrop, | |
ops::{ | |
BitAnd, | |
BitAndAssign, | |
BitOr, | |
BitOrAssign, | |
BitXor, | |
BitXorAssign, | |
Deref, | |
DerefMut, | |
Index, | |
IndexMut, | |
Not, | |
}, | |
}; | |
impl<O, T, Rhs> BitAnd<Rhs> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitAndAssign<Rhs>, | |
{ | |
type Output = Self; | |
#[inline] | |
fn bitand(mut self, rhs: Rhs) -> Self::Output { | |
*self.as_mut_bitslice() &= rhs; | |
self | |
} | |
} | |
impl<O, T, Rhs> BitAndAssign<Rhs> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitAndAssign<Rhs>, | |
{ | |
#[inline] | |
fn bitand_assign(&mut self, rhs: Rhs) { | |
*self.as_mut_bitslice() &= rhs; | |
} | |
} | |
impl<O, T, Rhs> BitOr<Rhs> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitOrAssign<Rhs>, | |
{ | |
type Output = Self; | |
#[inline] | |
fn bitor(mut self, rhs: Rhs) -> Self::Output { | |
*self.as_mut_bitslice() |= rhs; | |
self | |
} | |
} | |
impl<O, T, Rhs> BitOrAssign<Rhs> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitOrAssign<Rhs>, | |
{ | |
#[inline] | |
fn bitor_assign(&mut self, rhs: Rhs) { | |
*self.as_mut_bitslice() |= rhs; | |
} | |
} | |
impl<O, T, Rhs> BitXor<Rhs> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitXorAssign<Rhs>, | |
{ | |
type Output = Self; | |
#[inline] | |
fn bitxor(mut self, rhs: Rhs) -> Self::Output { | |
*self.as_mut_bitslice() ^= rhs; | |
self | |
} | |
} | |
impl<O, T, Rhs> BitXorAssign<Rhs> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitXorAssign<Rhs>, | |
{ | |
#[inline] | |
fn bitxor_assign(&mut self, rhs: Rhs) { | |
*self.as_mut_bitslice() ^= rhs; | |
} | |
} | |
impl<O, T> Deref for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type Target = BitSlice<O, T>; | |
#[inline(always)] | |
fn deref(&self) -> &Self::Target { | |
self.as_bitslice() | |
} | |
} | |
impl<O, T> DerefMut for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn deref_mut(&mut self) -> &mut Self::Target { | |
self.as_mut_bitslice() | |
} | |
} | |
impl<O, T> Drop for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn drop(&mut self) { | |
self.with_box(|slot| unsafe { ManuallyDrop::drop(slot) }); | |
} | |
} | |
impl<O, T, Idx> Index<Idx> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: Index<Idx>, | |
{ | |
type Output = <BitSlice<O, T> as Index<Idx>>::Output; | |
#[inline] | |
fn index(&self, index: Idx) -> &Self::Output { | |
self.as_bitslice().index(index) | |
} | |
} | |
impl<O, T, Idx> IndexMut<Idx> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: IndexMut<Idx>, | |
{ | |
#[inline] | |
fn index_mut(&mut self, index: Idx) -> &mut Self::Output { | |
self.as_mut_bitslice().index_mut(index) | |
} | |
} | |
impl<O, T> Not for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type Output = Self; | |
#[inline] | |
fn not(mut self) -> Self::Output { | |
for elem in self.as_mut_slice().iter_mut() { | |
*elem = !*elem; | |
} | |
self | |
} | |
} | |
} | |
mod traits { | |
use crate::{ | |
boxed::BitBox, | |
mem::BitRegister, | |
order::BitOrder, | |
ptr::BitPtr, | |
slice::BitSlice, | |
store::BitStore, | |
vec::BitVec, | |
}; | |
use alloc::boxed::Box; | |
use core::{ | |
borrow::{ | |
Borrow, | |
BorrowMut, | |
}, | |
cmp, | |
convert::TryFrom, | |
fmt::{ | |
self, | |
Binary, | |
Debug, | |
Display, | |
Formatter, | |
LowerHex, | |
Octal, | |
Pointer, | |
UpperHex, | |
}, | |
hash::{ | |
Hash, | |
Hasher, | |
}, | |
}; | |
use tap::pipe::Pipe; | |
impl<O, T> Borrow<BitSlice<O, T>> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn borrow(&self) -> &BitSlice<O, T> { | |
self.as_bitslice() | |
} | |
} | |
impl<O, T> BorrowMut<BitSlice<O, T>> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn borrow_mut(&mut self) -> &mut BitSlice<O, T> { | |
self.as_mut_bitslice() | |
} | |
} | |
impl<O, T> Clone for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn clone(&self) -> Self { | |
self.as_bitslice().pipe(Self::from_bitslice) | |
} | |
} | |
impl<O, T> Eq for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
impl<O, T> Ord for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn cmp(&self, other: &Self) -> cmp::Ordering { | |
self.as_bitslice().cmp(other.as_bitslice()) | |
} | |
} | |
impl<O1, O2, T1, T2> PartialEq<BitBox<O2, T2>> for BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn eq(&self, other: &BitBox<O2, T2>) -> bool { | |
self == other.as_bitslice() | |
} | |
} | |
impl<O1, O2, T1, T2> PartialEq<BitBox<O2, T2>> for &BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn eq(&self, other: &BitBox<O2, T2>) -> bool { | |
*self == other.as_bitslice() | |
} | |
} | |
impl<O1, O2, T1, T2> PartialEq<BitBox<O2, T2>> for &mut BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn eq(&self, other: &BitBox<O2, T2>) -> bool { | |
**self == other.as_bitslice() | |
} | |
} | |
impl<O, T, Rhs> PartialEq<Rhs> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
Rhs: ?Sized + PartialEq<BitSlice<O, T>>, | |
{ | |
#[inline] | |
fn eq(&self, other: &Rhs) -> bool { | |
other == self.as_bitslice() | |
} | |
} | |
impl<O, T> PartialOrd<BitBox<O, T>> for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn partial_cmp(&self, other: &BitBox<O, T>) -> Option<cmp::Ordering> { | |
self.partial_cmp(other.as_bitslice()) | |
} | |
} | |
impl<O, T, Rhs> PartialOrd<Rhs> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
Rhs: ?Sized + PartialOrd<BitSlice<O, T>>, | |
{ | |
#[inline] | |
fn partial_cmp(&self, other: &Rhs) -> Option<cmp::Ordering> { | |
other.partial_cmp(self.as_bitslice()) | |
} | |
} | |
impl<O, T> AsRef<BitSlice<O, T>> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn as_ref(&self) -> &BitSlice<O, T> { | |
self.as_bitslice() | |
} | |
} | |
impl<O, T> AsMut<BitSlice<O, T>> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn as_mut(&mut self) -> &mut BitSlice<O, T> { | |
self.as_mut_bitslice() | |
} | |
} | |
impl<'a, O, T> From<&'a BitSlice<O, T>> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn from(slice: &'a BitSlice<O, T>) -> Self { | |
Self::from_bitslice(slice) | |
} | |
} | |
impl<O, T> From<BitVec<O, T>> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn from(bv: BitVec<O, T>) -> Self { | |
bv.into_boxed_bitslice() | |
} | |
} | |
impl<O, T> Into<Box<[T]>> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn into(self) -> Box<[T]> { | |
self.into_boxed_slice() | |
} | |
} | |
impl<O, T> TryFrom<Box<[T]>> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type Error = Box<[T]>; | |
#[inline(always)] | |
fn try_from(boxed: Box<[T]>) -> Result<Self, Self::Error> { | |
Self::try_from_boxed_slice(boxed) | |
} | |
} | |
impl<O, T> Default for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn default() -> Self { | |
Self { | |
pointer: BitPtr::EMPTY.to_nonnull(), | |
} | |
} | |
} | |
impl<O, T> Debug for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
self.bitptr().render(fmt, "Box", None)?; | |
fmt.write_str(" ")?; | |
Binary::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> Display for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Display::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> Binary for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Binary::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> LowerHex for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
LowerHex::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> Octal for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Octal::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> Pointer for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
self.bitptr().render(fmt, "Box", None) | |
} | |
} | |
impl<O, T> UpperHex for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
UpperHex::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> Hash for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn hash<H>(&self, state: &mut H) | |
where H: Hasher { | |
self.as_bitslice().hash(state) | |
} | |
} | |
unsafe impl<O, T> Send for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
unsafe impl<O, T> Sync for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
impl<O, T> Unpin for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
} | |
} | |
#[cfg(feature = "alloc")] | |
pub mod vec { | |
#![cfg(feature = "alloc")] | |
use crate::{ | |
boxed::BitBox, | |
domain::Domain, | |
index::BitIdx, | |
mem::{ | |
BitMemory, | |
BitRegister, | |
}, | |
order::{ | |
BitOrder, | |
Lsb0, | |
}, | |
ptr::BitPtr, | |
slice::BitSlice, | |
store::BitStore, | |
}; | |
use alloc::vec::Vec; | |
use core::{ | |
mem::ManuallyDrop, | |
ptr::NonNull, | |
}; | |
use tap::{ | |
pipe::Pipe, | |
tap::Tap, | |
}; | |
#[repr(C)] | |
pub struct BitVec<O = Lsb0, T = usize> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
pointer: NonNull<BitSlice<O, T>>, | |
capacity: usize, | |
} | |
impl<O, T> BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
pub fn repeat(bit: bool, len: usize) -> Self { | |
let mut out = Self::with_capacity(len); | |
unsafe { | |
out.set_len(len); | |
} | |
out.set_elements(if bit { T::ALL } else { T::ZERO }); | |
out | |
} | |
#[inline] | |
pub fn from_bitslice(slice: &BitSlice<O, T>) -> Self { | |
let mut bitptr = slice.bitptr(); | |
let mut vec = bitptr | |
.elements() | |
.pipe(Vec::with_capacity) | |
.pipe(ManuallyDrop::new); | |
match slice.domain() { | |
Domain::Enclave { elem, .. } => vec.push(elem.load_value()), | |
Domain::Region { head, body, tail } => { | |
if let Some((_, elem)) = head { | |
vec.push(elem.load_value()); | |
} | |
vec.extend_from_slice(body); | |
if let Some((elem, _)) = tail { | |
vec.push(elem.load_value()); | |
} | |
}, | |
} | |
unsafe { | |
bitptr.set_pointer(vec.as_ptr() as *const T); | |
} | |
let capacity = vec.capacity(); | |
Self { | |
pointer: bitptr.to_nonnull(), | |
capacity, | |
} | |
} | |
#[inline] | |
pub fn from_vec(vec: Vec<T>) -> Self { | |
Self::try_from_vec(vec) | |
.expect("Vector was too long to be converted into a `BitVec`") | |
} | |
#[inline] | |
pub fn try_from_vec(vec: Vec<T>) -> Result<Self, Vec<T>> { | |
let len = vec.len(); | |
if len > BitSlice::<O, T>::MAX_ELTS { | |
return Err(vec); | |
} | |
let vec = ManuallyDrop::new(vec); | |
let (base, capacity) = (vec.as_ptr(), vec.capacity()); | |
Ok(Self { | |
pointer: unsafe { | |
BitPtr::new_unchecked( | |
base, | |
BitIdx::ZERO, | |
len * T::Mem::BITS as usize, | |
) | |
} | |
.to_nonnull(), | |
capacity, | |
}) | |
} | |
#[inline] | |
pub fn extend_from_bitslice<O2, T2>(&mut self, other: &BitSlice<O2, T2>) | |
where | |
O2: BitOrder, | |
T2: BitStore, | |
{ | |
let len = self.len(); | |
let olen = other.len(); | |
self.resize(len + olen, false); | |
unsafe { self.get_unchecked_mut(len ..) }.clone_from_bitslice(other); | |
} | |
#[inline] | |
pub fn alloc_capacity(&self) -> usize { | |
self.capacity | |
} | |
#[inline] | |
pub fn elements(&self) -> usize { | |
self.bitptr().elements() | |
} | |
#[inline] | |
pub fn into_boxed_bitslice(self) -> BitBox<O, T> { | |
let mut bitptr = self.bitptr(); | |
let boxed = self.into_boxed_slice().pipe(ManuallyDrop::new); | |
unsafe { | |
bitptr.set_pointer(boxed.as_ptr()); | |
} | |
unsafe { BitBox::from_raw(bitptr.to_bitslice_ptr_mut()) } | |
} | |
#[inline] | |
pub fn into_vec(self) -> Vec<T> { | |
let mut this = ManuallyDrop::new(self); | |
let buf = this.as_mut_slice(); | |
unsafe { | |
Vec::from_raw_parts( | |
buf.as_mut_ptr() as *mut T, | |
buf.len(), | |
this.capacity, | |
) | |
} | |
} | |
#[inline] | |
pub fn set_elements(&mut self, element: T) { | |
self.as_mut_slice() | |
.iter_mut() | |
.for_each(|elt| *elt = element); | |
} | |
#[inline] | |
pub fn set_uninitialized(&mut self, value: bool) { | |
let head = self.bitptr().head().value() as usize; | |
let tail = head + self.len(); | |
let capa = self.capacity(); | |
let mut bp = self.bitptr(); | |
unsafe { | |
bp.set_head(BitIdx::ZERO); | |
bp.set_len(capa); | |
let bits = bp.to_bitslice_mut(); | |
bits.get_unchecked_mut(.. head).set_all(value); | |
bits.get_unchecked_mut(tail ..).set_all(value); | |
} | |
} | |
#[inline] | |
pub fn force_align(&mut self) { | |
let bitptr = self.bitptr(); | |
let head = bitptr.head().value() as usize; | |
if head == 0 { | |
return; | |
} | |
let last = bitptr.len() + head; | |
unsafe { | |
self.pointer = | |
bitptr.tap_mut(|bp| bp.set_head(BitIdx::ZERO)).to_nonnull(); | |
self.copy_within_unchecked(head .. last, 0); | |
} | |
} | |
#[inline] | |
pub(crate) unsafe fn set_len_unchecked(&mut self, new_len: usize) { | |
self.pointer = | |
self.bitptr().tap_mut(|bp| bp.set_len(new_len)).to_nonnull() | |
} | |
#[inline] | |
pub fn as_bitslice(&self) -> &BitSlice<O, T> { | |
unsafe { &*self.pointer.as_ptr() } | |
} | |
#[inline] | |
pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<O, T> { | |
unsafe { &mut *self.pointer.as_ptr() } | |
} | |
#[inline] | |
pub fn as_bitptr(&self) -> *const BitSlice<O, T> { | |
self.pointer.as_ptr() as *const BitSlice<O, T> | |
} | |
#[inline] | |
pub fn as_mut_bitptr(&mut self) -> *mut BitSlice<O, T> { | |
self.pointer.as_ptr() | |
} | |
#[inline] | |
pub(crate) fn bitptr(&self) -> BitPtr<O, T> { | |
self.pointer.as_ptr().pipe(BitPtr::from_bitslice_ptr_mut) | |
} | |
fn with_vec<F, R>(&mut self, func: F) -> R | |
where F: FnOnce(&mut ManuallyDrop<Vec<T>>) -> R { | |
let cap = self.capacity; | |
let mut bitptr = self.bitptr(); | |
let (base, elts) = (bitptr.pointer().to_mut(), bitptr.elements()); | |
let mut vec = unsafe { Vec::from_raw_parts(base, elts, cap) } | |
.pipe(ManuallyDrop::new); | |
let out = func(&mut vec); | |
unsafe { | |
bitptr.set_pointer(vec.as_ptr()); | |
} | |
self.pointer = bitptr.to_nonnull(); | |
self.capacity = vec.capacity(); | |
out | |
} | |
} | |
mod api { | |
use crate::{ | |
mem::{ | |
BitMemory, | |
BitRegister, | |
}, | |
order::BitOrder, | |
ptr::BitPtr, | |
slice::BitSlice, | |
store::BitStore, | |
vec::{ | |
iter::{ | |
Drain, | |
Splice, | |
}, | |
BitVec, | |
}, | |
}; | |
use alloc::{ | |
boxed::Box, | |
vec::Vec, | |
}; | |
use core::{ | |
mem::{ | |
self, | |
ManuallyDrop, | |
}, | |
ops::RangeBounds, | |
slice, | |
}; | |
use tap::pipe::Pipe; | |
impl<O, T> BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
pub fn new() -> Self { | |
Self { | |
pointer: BitPtr::<O, T>::EMPTY.to_nonnull(), | |
capacity: 0, | |
} | |
} | |
#[inline] | |
pub fn with_capacity(capacity: usize) -> Self { | |
assert!( | |
capacity <= BitSlice::<O, T>::MAX_BITS, | |
"Vector capacity exceeded: {} > {}", | |
capacity, | |
BitSlice::<O, T>::MAX_BITS | |
); | |
let vec = capacity | |
.pipe(crate::mem::elts::<T>) | |
.pipe(Vec::<T>::with_capacity) | |
.pipe(ManuallyDrop::new); | |
let (ptr, capacity) = (vec.as_ptr(), vec.capacity()); | |
let pointer = ptr.pipe(BitPtr::uninhabited).pipe(BitPtr::to_nonnull); | |
Self { pointer, capacity } | |
} | |
#[inline] | |
pub fn into_raw_parts(self) -> (*mut BitSlice<O, T>, usize) { | |
let mut this = ManuallyDrop::new(self); | |
(this.as_mut_bitptr(), this.alloc_capacity()) | |
} | |
#[inline] | |
pub unsafe fn from_raw_parts( | |
pointer: *mut BitSlice<O, T>, | |
capacity: usize, | |
) -> Self | |
{ | |
if (pointer as *mut [()]).is_null() { | |
panic!("Attempted to reconstruct a `BitVec` from a null pointer"); | |
} | |
let pointer = pointer.pipe(BitPtr::from_bitslice_ptr_mut).to_nonnull(); | |
Self { pointer, capacity } | |
} | |
#[inline] | |
pub fn capacity(&self) -> usize { | |
self.capacity | |
.checked_mul(T::Mem::BITS as usize) | |
.expect("Vector capacity exceeded") | |
.saturating_sub(self.bitptr().head().value() as usize) | |
} | |
#[inline] | |
pub fn reserve(&mut self, additional: usize) { | |
let len = self.len(); | |
let new_len = len | |
.checked_add(additional) | |
.expect("Vector capacity exceeded"); | |
assert!( | |
new_len <= BitSlice::<O, T>::MAX_BITS, | |
"Vector capacity exceeded: {} > {}", | |
new_len, | |
BitSlice::<O, T>::MAX_BITS | |
); | |
let bitptr = self.bitptr(); | |
let head = bitptr.head(); | |
let elts = bitptr.elements(); | |
if let Some(extra) = head.span(new_len).0.checked_sub(elts) { | |
self.with_vec(|v| v.reserve(extra)); | |
let capa = self.capacity(); | |
unsafe { self.get_unchecked_mut(len .. capa) }.set_all(false); | |
} | |
} | |
#[inline] | |
pub fn reserve_exact(&mut self, additional: usize) { | |
let new_len = self | |
.len() | |
.checked_add(additional) | |
.expect("Vector capacity exceeded"); | |
assert!( | |
new_len <= BitSlice::<O, T>::MAX_BITS, | |
"Vector capacity exceeded: {} > {}", | |
new_len, | |
BitSlice::<O, T>::MAX_BITS | |
); | |
let bitptr = self.bitptr(); | |
let head = bitptr.head(); | |
let elts = bitptr.elements(); | |
if let Some(extra) = head.span(new_len).0.checked_sub(elts) { | |
self.with_vec(|v| v.reserve_exact(extra)); | |
} | |
} | |
#[inline] | |
pub fn shrink_to_fit(&mut self) { | |
self.with_vec(|v| v.shrink_to_fit()); | |
} | |
#[inline] | |
pub fn into_boxed_slice(self) -> Box<[T]> { | |
self.into_vec().into_boxed_slice() | |
} | |
#[inline] | |
pub fn truncate(&mut self, len: usize) { | |
if len < self.len() { | |
unsafe { self.set_len_unchecked(len) } | |
} | |
} | |
#[inline] | |
pub fn as_slice(&self) -> &[T] { | |
let bitptr = self.bitptr(); | |
let (base, elts) = (bitptr.pointer().to_const(), bitptr.elements()); | |
unsafe { slice::from_raw_parts(base, elts) } | |
} | |
#[inline] | |
pub fn as_mut_slice(&mut self) -> &mut [T] { | |
let bitptr = self.bitptr(); | |
let (base, elts) = (bitptr.pointer().to_mut(), bitptr.elements()); | |
unsafe { slice::from_raw_parts_mut(base, elts) } | |
} | |
#[inline] | |
pub fn as_ptr(&self) -> *const T { | |
self.bitptr().pointer().to_const() | |
} | |
#[inline] | |
pub fn as_mut_ptr(&mut self) -> *mut T { | |
self.bitptr().pointer().to_mut() | |
} | |
#[inline] | |
pub unsafe fn set_len(&mut self, new_len: usize) { | |
assert!( | |
new_len <= BitSlice::<O, T>::MAX_BITS, | |
"Capacity exceeded: {} exceeds maximum length {}", | |
new_len, | |
BitSlice::<O, T>::MAX_BITS, | |
); | |
let cap = self.capacity(); | |
assert!( | |
new_len <= cap, | |
"Capacity exceeded: {} exceeds allocation size {}", | |
new_len, | |
cap, | |
); | |
self.set_len_unchecked(new_len); | |
} | |
#[inline] | |
pub fn swap_remove(&mut self, index: usize) -> bool { | |
let len = self.len(); | |
assert!(index < len, "Index {} out of bounds: {}", index, len); | |
let last = len - 1; | |
unsafe { | |
self.swap_unchecked(index, last); | |
self.set_len(last); | |
*self.get_unchecked(last) | |
} | |
} | |
#[inline] | |
pub fn insert(&mut self, index: usize, value: bool) { | |
let len = self.len(); | |
if index <= len { | |
self.push(value); | |
unsafe { self.get_unchecked_mut(index ..) }.rotate_right(1); | |
} | |
else { | |
self.resize(index, false); | |
self.push(value); | |
} | |
} | |
#[inline] | |
pub fn remove(&mut self, index: usize) -> bool { | |
let len = self.len(); | |
assert!(index < len, "Index {} out of bounds: {}", index, len); | |
let last = len - 1; | |
unsafe { | |
self.get_unchecked_mut(index ..).rotate_left(1); | |
self.set_len(last); | |
*self.get_unchecked(last) | |
} | |
} | |
#[inline] | |
pub fn retain<F>(&mut self, mut func: F) | |
where F: FnMut(usize, &bool) -> bool { | |
for n in (0 .. self.len()).rev() { | |
if !func(n, unsafe { self.get_unchecked(n) }) { | |
self.remove(n); | |
} | |
} | |
} | |
#[inline] | |
pub fn push(&mut self, value: bool) { | |
let len = self.len(); | |
assert!( | |
len <= BitSlice::<O, T>::MAX_BITS, | |
"Exceeded capacity: {} >= {}", | |
len, | |
BitSlice::<O, T>::MAX_BITS, | |
); | |
if self.is_empty() || self.bitptr().tail().value() == T::BITS { | |
self.with_vec(|v| v.push(T::ZERO)); | |
} | |
unsafe { | |
self.set_len_unchecked(len + 1); | |
self.set_unchecked(len, value); | |
} | |
} | |
#[inline] | |
pub fn pop(&mut self) -> Option<bool> { | |
match self.len() { | |
0 => None, | |
n => unsafe { | |
let new_len = n - 1; | |
self.set_len_unchecked(new_len); | |
Some(*self.get_unchecked(new_len)) | |
}, | |
} | |
} | |
#[inline] | |
pub fn append<O2, T2>(&mut self, other: &mut BitVec<O2, T2>) | |
where | |
O2: BitOrder, | |
T2: BitRegister + BitStore, | |
{ | |
let this_len = self.len(); | |
let new_len = this_len + other.len(); | |
self.resize(new_len, false); | |
unsafe { self.get_unchecked_mut(this_len .. new_len) } | |
.clone_from_bitslice(other.as_bitslice()); | |
other.clear(); | |
} | |
#[inline] | |
pub fn drain<R>(&mut self, range: R) -> Drain<O, T> | |
where R: RangeBounds<usize> { | |
Drain::new(self, range) | |
} | |
#[inline] | |
pub fn clear(&mut self) { | |
self.pointer = BitPtr::uninhabited(self.as_mut_ptr()).to_nonnull(); | |
} | |
#[inline] | |
pub fn len(&self) -> usize { | |
self.as_bitslice().len() | |
} | |
#[inline] | |
pub fn is_empty(&self) -> bool { | |
self.as_bitslice().is_empty() | |
} | |
#[inline] | |
#[must_use = "use `.truncate()` if you don't need the other half"] | |
pub fn split_off(&mut self, at: usize) -> Self { | |
let len = self.len(); | |
assert!(at <= len, "Index {} out of bounds: {}", at, len); | |
match at { | |
0 => mem::replace(self, Self::new()), | |
n if n == len => Self::new(), | |
_ => unsafe { | |
self.set_len(at); | |
let (ptr, capa) = | |
self.get_unchecked(at .. len).to_bitvec().into_raw_parts(); | |
Self::from_raw_parts(ptr as *mut BitSlice<O, T>, capa) | |
}, | |
} | |
} | |
#[inline] | |
pub fn resize_with<F>(&mut self, new_len: usize, mut func: F) | |
where F: FnMut() -> bool { | |
let len = self.len(); | |
if new_len > len { | |
let ext = new_len - len; | |
self.reserve(ext); | |
unsafe { | |
self.get_unchecked_mut(len .. new_len) | |
.for_each(|_, _| func()); | |
} | |
} | |
unsafe { | |
self.set_len(new_len); | |
} | |
} | |
#[inline] | |
pub fn leak<'a>(self) -> &'a mut BitSlice<O, T> { | |
self.pipe(ManuallyDrop::new) | |
.as_mut_bitslice() | |
.bitptr() | |
.to_bitslice_mut() | |
} | |
#[inline] | |
pub fn resize(&mut self, new_len: usize, value: bool) { | |
let len = self.len(); | |
if new_len > len { | |
let ext = new_len - len; | |
self.reserve(ext); | |
unsafe { | |
self.get_unchecked_mut(len .. new_len).set_all(value); | |
} | |
} | |
unsafe { | |
self.set_len(new_len); | |
} | |
} | |
#[inline] | |
#[deprecated = "Prefer `.extend()`, or converting your `[bool]` into a \ | |
`BitSlice`"] | |
pub fn extend_from_slice(&mut self, other: &[bool]) { | |
self.extend(other.iter().copied()); | |
} | |
#[inline] | |
pub fn splice<R, I>( | |
&mut self, | |
range: R, | |
replace_with: I, | |
) -> Splice<O, T, I::IntoIter> | |
where | |
R: RangeBounds<usize>, | |
I: IntoIterator<Item = bool>, | |
{ | |
Splice::new(self.drain(range), replace_with) | |
} | |
} | |
} | |
mod iter { | |
use crate::{ | |
devel as dvl, | |
mem::BitRegister, | |
order::BitOrder, | |
slice::{ | |
BitSlice, | |
Iter, | |
}, | |
store::BitStore, | |
vec::BitVec, | |
}; | |
use alloc::vec::Vec; | |
use core::{ | |
fmt::{ | |
self, | |
Debug, | |
Formatter, | |
}, | |
iter::{ | |
FromIterator, | |
FusedIterator, | |
}, | |
mem::{ | |
self, | |
ManuallyDrop, | |
}, | |
ops::{ | |
Range, | |
RangeBounds, | |
}, | |
ptr::NonNull, | |
}; | |
use tap::{ | |
pipe::Pipe, | |
tap::TapOptional, | |
}; | |
impl<O, T> Extend<bool> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn extend<I>(&mut self, iter: I) | |
where I: IntoIterator<Item = bool> { | |
let mut iter = iter.into_iter(); | |
match iter.size_hint() { | |
(n, None) | (_, Some(n)) => { | |
self.reserve(n); | |
let len = self.len(); | |
let new_len = len + n; | |
let new = unsafe { self.get_unchecked_mut(len .. new_len) }; | |
let mut pulled = 0; | |
for (slot, bit) in | |
new.iter_mut().remove_alias().zip(iter.by_ref()) | |
{ | |
slot.set(bit); | |
pulled += 1; | |
} | |
unsafe { | |
self.set_len(len + pulled); | |
} | |
}, | |
} | |
iter.for_each(|bit| self.push(bit)); | |
} | |
} | |
impl<'a, O, T> Extend<&'a bool> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn extend<I>(&mut self, iter: I) | |
where I: IntoIterator<Item = &'a bool> { | |
self.extend(iter.into_iter().copied()); | |
} | |
} | |
impl<O, T> FromIterator<bool> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn from_iter<I>(iter: I) -> Self | |
where I: IntoIterator<Item = bool> { | |
let iter = iter.into_iter(); | |
let mut out = match iter.size_hint() { | |
(n, None) | (_, Some(n)) => Self::with_capacity(n), | |
}; | |
out.extend(iter); | |
out | |
} | |
} | |
impl<'a, O, T> FromIterator<&'a bool> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn from_iter<I>(iter: I) -> Self | |
where I: IntoIterator<Item = &'a bool> { | |
iter.into_iter().copied().pipe(Self::from_iter) | |
} | |
} | |
impl<O, T> IntoIterator for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type IntoIter = IntoIter<O, T>; | |
type Item = bool; | |
#[inline(always)] | |
fn into_iter(self) -> Self::IntoIter { | |
IntoIter::new(self) | |
} | |
} | |
impl<'a, O, T> IntoIterator for &'a BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type IntoIter = <&'a BitSlice<O, T> as IntoIterator>::IntoIter; | |
type Item = <&'a BitSlice<O, T> as IntoIterator>::Item; | |
#[inline] | |
fn into_iter(self) -> Self::IntoIter { | |
self.as_bitslice().into_iter() | |
} | |
} | |
impl<'a, O, T> IntoIterator for &'a mut BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type IntoIter = <&'a mut BitSlice<O, T> as IntoIterator>::IntoIter; | |
type Item = <&'a mut BitSlice<O, T> as IntoIterator>::Item; | |
#[inline] | |
fn into_iter(self) -> Self::IntoIter { | |
self.as_mut_bitslice().into_iter() | |
} | |
} | |
pub struct IntoIter<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
base: NonNull<T>, | |
capa: usize, | |
iter: Iter<'static, O, T>, | |
} | |
impl<O, T> IntoIter<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn new(bv: BitVec<O, T>) -> Self { | |
let bv = ManuallyDrop::new(bv); | |
let iter = bv.as_bitslice().bitptr().to_bitslice_ref().iter(); | |
let base = bv.bitptr().pointer().to_nonnull(); | |
let capa = bv.alloc_capacity(); | |
Self { base, capa, iter } | |
} | |
#[inline(always)] | |
pub fn as_bitslice(&self) -> &BitSlice<O, T> { | |
self.iter.as_bitslice() | |
} | |
#[inline(always)] | |
#[deprecated = "Use `.as_bitslice()` to view the underlying slice"] | |
pub fn as_slice(&self) -> &BitSlice<O, T> { | |
self.as_bitslice() | |
} | |
#[inline] | |
pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<O, T> { | |
self.iter.as_bitslice().bitptr().to_bitslice_mut() | |
} | |
#[inline(always)] | |
#[deprecated = "Use `.as_mut_bitslice()` to view the underlying slice"] | |
pub fn as_mut_slice(&mut self) -> &mut BitSlice<O, T> { | |
self.as_mut_bitslice() | |
} | |
} | |
impl<O, T> Debug for IntoIter<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
fmt.debug_tuple("IntoIter") | |
.field(&self.as_bitslice()) | |
.finish() | |
} | |
} | |
impl<O, T> Iterator for IntoIter<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type Item = bool; | |
#[inline(always)] | |
fn next(&mut self) -> Option<Self::Item> { | |
self.iter.next().copied() | |
} | |
#[inline(always)] | |
fn size_hint(&self) -> (usize, Option<usize>) { | |
self.iter.size_hint() | |
} | |
#[inline(always)] | |
fn count(self) -> usize { | |
self.len() | |
} | |
#[inline(always)] | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
self.iter.nth(n).copied() | |
} | |
#[inline(always)] | |
fn last(mut self) -> Option<Self::Item> { | |
self.next_back() | |
} | |
} | |
impl<O, T> DoubleEndedIterator for IntoIter<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn next_back(&mut self) -> Option<Self::Item> { | |
self.iter.next_back().copied() | |
} | |
#[inline(always)] | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
self.iter.nth_back(n).copied() | |
} | |
} | |
impl<O, T> ExactSizeIterator for IntoIter<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn len(&self) -> usize { | |
self.iter.len() | |
} | |
} | |
impl<O, T> FusedIterator for IntoIter<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
impl<O, T> Drop for IntoIter<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn drop(&mut self) { | |
drop(unsafe { Vec::from_raw_parts(self.base.as_ptr(), 0, self.capa) }); | |
} | |
} | |
pub struct Drain<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
source: NonNull<BitVec<O, T>>, | |
drain: Iter<'a, O, T>, | |
tail: Range<usize>, | |
} | |
impl<'a, O, T> Drain<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
pub(super) fn new<R>(source: &'a mut BitVec<O, T>, range: R) -> Self | |
where R: RangeBounds<usize> { | |
let len = source.len(); | |
let drain = dvl::normalize_range(range, len); | |
dvl::assert_range(drain.clone(), len); | |
let tail = drain.end .. len; | |
let drain = unsafe { | |
source.set_len(drain.start); | |
source | |
.as_bitslice() | |
.get_unchecked(drain) | |
.bitptr() | |
.to_bitslice_ref() | |
.iter() | |
}; | |
let source = source.into(); | |
Self { | |
source, | |
drain, | |
tail, | |
} | |
} | |
#[inline(always)] | |
pub fn as_bitslice(&self) -> &'a BitSlice<O, T> { | |
self.drain.as_bitslice() | |
} | |
#[inline] | |
#[allow(deprecated)] | |
#[deprecated = "Use `.as_bitslice()` to view the underlying slice"] | |
pub fn as_slice(&self) -> &BitSlice<O, T> { | |
self.drain.as_slice() | |
} | |
#[inline] | |
fn fill<I>(&mut self, iter: &mut I) -> FillStatus | |
where I: Iterator<Item = bool> { | |
let bitvec = unsafe { self.source.as_mut() }; | |
let mut len = bitvec.len(); | |
let span = unsafe { bitvec.get_unchecked_mut(len .. self.tail.start) }; | |
let mut out = FillStatus::FullSpan; | |
for slot in span { | |
if let Some(bit) = iter.next() { | |
slot.set(bit); | |
len += 1; | |
} | |
else { | |
out = FillStatus::EmptyInput; | |
break; | |
} | |
} | |
unsafe { | |
bitvec.set_len(len); | |
} | |
out | |
} | |
#[inline] | |
unsafe fn move_tail(&mut self, additional: usize) { | |
if additional == 0 { | |
return; | |
} | |
let bitvec = self.source.as_mut(); | |
let tail_len = self.tail.end - self.tail.start; | |
let full_len = additional + tail_len; | |
bitvec.reserve(full_len); | |
let new_tail_start = additional + self.tail.start; | |
let orig_tail = mem::replace( | |
&mut self.tail, | |
new_tail_start .. new_tail_start + tail_len, | |
); | |
let len = bitvec.len(); | |
bitvec.set_len(full_len); | |
bitvec.copy_within_unchecked(orig_tail, new_tail_start); | |
bitvec.set_len(len); | |
} | |
} | |
impl<O, T> AsRef<BitSlice<O, T>> for Drain<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn as_ref(&self) -> &BitSlice<O, T> { | |
self.as_bitslice() | |
} | |
} | |
impl<'a, O, T> Debug for Drain<'a, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
fmt.debug_tuple("Drain") | |
.field(&self.drain.as_bitslice()) | |
.finish() | |
} | |
} | |
impl<O, T> Iterator for Drain<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type Item = bool; | |
#[inline(always)] | |
fn next(&mut self) -> Option<Self::Item> { | |
self.drain.next().copied() | |
} | |
#[inline(always)] | |
fn size_hint(&self) -> (usize, Option<usize>) { | |
self.drain.size_hint() | |
} | |
#[inline(always)] | |
fn count(self) -> usize { | |
self.len() | |
} | |
#[inline(always)] | |
fn nth(&mut self, n: usize) -> Option<Self::Item> { | |
self.drain.nth(n).copied() | |
} | |
#[inline(always)] | |
fn last(mut self) -> Option<Self::Item> { | |
self.next_back() | |
} | |
} | |
impl<O, T> DoubleEndedIterator for Drain<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn next_back(&mut self) -> Option<Self::Item> { | |
self.drain.next_back().copied() | |
} | |
#[inline(always)] | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
self.drain.nth_back(n).copied() | |
} | |
} | |
impl<O, T> ExactSizeIterator for Drain<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn len(&self) -> usize { | |
self.drain.len() | |
} | |
} | |
impl<O, T> FusedIterator for Drain<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
unsafe impl<O, T> Send for Drain<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
unsafe impl<O, T> Sync for Drain<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
impl<O, T> Drop for Drain<'_, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn drop(&mut self) { | |
let tail = self.tail.clone(); | |
let tail_len = tail.end - tail.start; | |
if tail_len == 0 { | |
return; | |
} | |
let bitvec = unsafe { self.source.as_mut() }; | |
let old_len = bitvec.len(); | |
let new_len = old_len + tail_len; | |
unsafe { | |
bitvec.set_len(new_len); | |
bitvec.copy_within_unchecked(tail, old_len); | |
} | |
} | |
} | |
#[repr(u8)] | |
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] | |
enum FillStatus { | |
FullSpan = 0, | |
EmptyInput = 1, | |
} | |
#[derive(Debug)] | |
pub struct Splice<'a, O, T, I> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
I: Iterator<Item = bool>, | |
{ | |
drain: Drain<'a, O, T>, | |
splice: I, | |
} | |
impl<'a, O, T, I> Splice<'a, O, T, I> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
I: Iterator<Item = bool>, | |
{ | |
pub(super) fn new<II>(drain: Drain<'a, O, T>, splice: II) -> Self | |
where II: IntoIterator<IntoIter = I, Item = bool> { | |
let splice = splice.into_iter(); | |
Self { drain, splice } | |
} | |
} | |
impl<O, T, I> Iterator for Splice<'_, O, T, I> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
I: Iterator<Item = bool>, | |
{ | |
type Item = bool; | |
#[inline] | |
fn next(&mut self) -> Option<Self::Item> { | |
self.drain.next().tap_some(|_| { | |
if let Some(bit) = self.splice.next() { | |
unsafe { | |
let bv = self.drain.source.as_mut(); | |
let len = bv.len(); | |
bv.set_len_unchecked(len + 1); | |
bv.set_unchecked(len, bit); | |
} | |
} | |
}) | |
} | |
#[inline(always)] | |
fn size_hint(&self) -> (usize, Option<usize>) { | |
self.drain.size_hint() | |
} | |
#[inline(always)] | |
fn count(self) -> usize { | |
self.drain.len() | |
} | |
} | |
impl<O, T, I> DoubleEndedIterator for Splice<'_, O, T, I> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
I: Iterator<Item = bool>, | |
{ | |
#[inline(always)] | |
fn next_back(&mut self) -> Option<Self::Item> { | |
self.drain.next_back() | |
} | |
#[inline(always)] | |
fn nth_back(&mut self, n: usize) -> Option<Self::Item> { | |
self.drain.nth_back(n) | |
} | |
} | |
impl<O, T, I> ExactSizeIterator for Splice<'_, O, T, I> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
I: Iterator<Item = bool>, | |
{ | |
#[inline(always)] | |
fn len(&self) -> usize { | |
self.drain.len() | |
} | |
} | |
impl<O, T, I> FusedIterator for Splice<'_, O, T, I> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
I: Iterator<Item = bool>, | |
{ | |
} | |
impl<O, T, I> Drop for Splice<'_, O, T, I> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
I: Iterator<Item = bool>, | |
{ | |
#[inline] | |
fn drop(&mut self) { | |
let tail = self.drain.tail.clone(); | |
let tail_len = tail.end - tail.start; | |
let bitvec = unsafe { self.drain.source.as_mut() }; | |
if tail_len == 0 { | |
bitvec.extend(self.splice.by_ref()); | |
return; | |
} | |
if let FillStatus::EmptyInput = self.drain.fill(&mut self.splice) { | |
return; | |
} | |
let len = match self.splice.size_hint() { | |
(n, None) | (_, Some(n)) => n, | |
}; | |
unsafe { | |
self.drain.move_tail(len); | |
} | |
if let FillStatus::EmptyInput = self.drain.fill(&mut self.splice) { | |
return; | |
} | |
let mut collected = self.splice.by_ref().collect::<BitVec>().into_iter(); | |
let len = collected.len(); | |
if len > 0 { | |
unsafe { | |
self.drain.move_tail(len); | |
} | |
let filled = self.drain.fill(&mut collected); | |
debug_assert_eq!(filled, FillStatus::EmptyInput); | |
debug_assert_eq!(collected.len(), 0); | |
} | |
} | |
} | |
} | |
mod ops { | |
use crate::{ | |
mem::BitRegister, | |
order::BitOrder, | |
slice::BitSlice, | |
store::BitStore, | |
vec::BitVec, | |
}; | |
use core::{ | |
mem::ManuallyDrop, | |
ops::{ | |
BitAnd, | |
BitAndAssign, | |
BitOr, | |
BitOrAssign, | |
BitXor, | |
BitXorAssign, | |
Deref, | |
DerefMut, | |
Index, | |
IndexMut, | |
Not, | |
}, | |
}; | |
impl<O, T, Rhs> BitAnd<Rhs> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitAndAssign<Rhs>, | |
{ | |
type Output = Self; | |
#[inline] | |
fn bitand(mut self, rhs: Rhs) -> Self::Output { | |
*self.as_mut_bitslice() &= rhs; | |
self | |
} | |
} | |
impl<O, T, Rhs> BitAndAssign<Rhs> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitAndAssign<Rhs>, | |
{ | |
#[inline] | |
fn bitand_assign(&mut self, rhs: Rhs) { | |
*self.as_mut_bitslice() &= rhs; | |
} | |
} | |
impl<O, T, Rhs> BitOr<Rhs> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitOrAssign<Rhs>, | |
{ | |
type Output = Self; | |
#[inline] | |
fn bitor(mut self, rhs: Rhs) -> Self::Output { | |
*self.as_mut_bitslice() |= rhs; | |
self | |
} | |
} | |
impl<O, T, Rhs> BitOrAssign<Rhs> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitOrAssign<Rhs>, | |
{ | |
#[inline] | |
fn bitor_assign(&mut self, rhs: Rhs) { | |
*self.as_mut_bitslice() |= rhs; | |
} | |
} | |
impl<O, T, Rhs> BitXor<Rhs> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitXorAssign<Rhs>, | |
{ | |
type Output = Self; | |
#[inline] | |
fn bitxor(mut self, rhs: Rhs) -> Self::Output { | |
*self.as_mut_bitslice() ^= rhs; | |
self | |
} | |
} | |
impl<O, T, Rhs> BitXorAssign<Rhs> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: BitXorAssign<Rhs>, | |
{ | |
#[inline] | |
fn bitxor_assign(&mut self, rhs: Rhs) { | |
*self.as_mut_bitslice() ^= rhs; | |
} | |
} | |
impl<O, T> Deref for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type Target = BitSlice<O, T>; | |
#[inline(always)] | |
fn deref(&self) -> &Self::Target { | |
self.as_bitslice() | |
} | |
} | |
impl<O, T> DerefMut for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn deref_mut(&mut self) -> &mut Self::Target { | |
self.as_mut_bitslice() | |
} | |
} | |
impl<O, T> Drop for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn drop(&mut self) { | |
self.with_vec(|slot| unsafe { ManuallyDrop::drop(slot) }); | |
} | |
} | |
impl<O, T, Idx> Index<Idx> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: Index<Idx>, | |
{ | |
type Output = <BitSlice<O, T> as Index<Idx>>::Output; | |
#[inline] | |
fn index(&self, index: Idx) -> &Self::Output { | |
self.as_bitslice().index(index) | |
} | |
} | |
impl<O, T, Idx> IndexMut<Idx> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
BitSlice<O, T>: IndexMut<Idx>, | |
{ | |
#[inline] | |
fn index_mut(&mut self, index: Idx) -> &mut Self::Output { | |
self.as_mut_bitslice().index_mut(index) | |
} | |
} | |
impl<O, T> Not for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type Output = Self; | |
#[inline] | |
fn not(mut self) -> Self::Output { | |
for elem in self.as_mut_slice() { | |
*elem = !*elem; | |
} | |
self | |
} | |
} | |
} | |
mod traits { | |
use crate::{ | |
boxed::BitBox, | |
mem::BitRegister, | |
order::BitOrder, | |
slice::BitSlice, | |
store::BitStore, | |
vec::BitVec, | |
}; | |
use alloc::vec::Vec; | |
use core::{ | |
borrow::{ | |
Borrow, | |
BorrowMut, | |
}, | |
cmp, | |
convert::TryFrom, | |
fmt::{ | |
self, | |
Binary, | |
Debug, | |
Display, | |
Formatter, | |
LowerHex, | |
Octal, | |
UpperHex, | |
}, | |
hash::{ | |
Hash, | |
Hasher, | |
}, | |
}; | |
impl<O, T> Borrow<BitSlice<O, T>> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn borrow(&self) -> &BitSlice<O, T> { | |
self.as_bitslice() | |
} | |
} | |
impl<O, T> BorrowMut<BitSlice<O, T>> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn borrow_mut(&mut self) -> &mut BitSlice<O, T> { | |
self.as_mut_bitslice() | |
} | |
} | |
impl<O, T> Clone for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn clone(&self) -> Self { | |
let mut out = Self::repeat(false, self.len()); | |
out.copy_from_bitslice(self.as_bitslice()); | |
out | |
} | |
#[inline] | |
fn clone_from(&mut self, source: &Self) { | |
self.clear(); | |
self.resize(source.len(), false); | |
self.copy_from_bitslice(source.as_bitslice()); | |
} | |
} | |
impl<O, T> Eq for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
impl<O, T> Ord for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn cmp(&self, other: &Self) -> cmp::Ordering { | |
self.as_bitslice().cmp(other.as_bitslice()) | |
} | |
} | |
impl<O1, O2, T1, T2> PartialEq<BitVec<O2, T2>> for BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn eq(&self, other: &BitVec<O2, T2>) -> bool { | |
self == other.as_bitslice() | |
} | |
} | |
impl<O1, O2, T1, T2> PartialEq<BitVec<O2, T2>> for &BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn eq(&self, other: &BitVec<O2, T2>) -> bool { | |
*self == other.as_bitslice() | |
} | |
} | |
impl<O1, O2, T1, T2> PartialEq<BitVec<O2, T2>> for &mut BitSlice<O1, T1> | |
where | |
O1: BitOrder, | |
O2: BitOrder, | |
T1: BitStore, | |
T2: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn eq(&self, other: &BitVec<O2, T2>) -> bool { | |
**self == other.as_bitslice() | |
} | |
} | |
impl<O, T, Rhs> PartialEq<Rhs> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
Rhs: ?Sized + PartialEq<BitSlice<O, T>>, | |
{ | |
#[inline] | |
fn eq(&self, other: &Rhs) -> bool { | |
other == self.as_bitslice() | |
} | |
} | |
impl<O, T> PartialOrd<BitVec<O, T>> for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn partial_cmp(&self, other: &BitVec<O, T>) -> Option<cmp::Ordering> { | |
self.partial_cmp(other.as_bitslice()) | |
} | |
} | |
impl<O, T, Rhs> PartialOrd<Rhs> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
Rhs: ?Sized + PartialOrd<BitSlice<O, T>>, | |
{ | |
#[inline] | |
fn partial_cmp(&self, other: &Rhs) -> Option<cmp::Ordering> { | |
other.partial_cmp(self.as_bitslice()) | |
} | |
} | |
impl<O, T> AsRef<BitSlice<O, T>> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn as_ref(&self) -> &BitSlice<O, T> { | |
self.as_bitslice() | |
} | |
} | |
impl<O, T> AsMut<BitSlice<O, T>> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn as_mut(&mut self) -> &mut BitSlice<O, T> { | |
self.as_mut_bitslice() | |
} | |
} | |
impl<'a, O, T> From<&'a BitSlice<O, T>> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn from(slice: &'a BitSlice<O, T>) -> Self { | |
Self::from_bitslice(slice) | |
} | |
} | |
impl<'a, O, T> From<&'a mut BitSlice<O, T>> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn from(slice: &'a mut BitSlice<O, T>) -> Self { | |
Self::from_bitslice(slice) | |
} | |
} | |
impl<O, T> From<BitBox<O, T>> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn from(boxed: BitBox<O, T>) -> Self { | |
boxed.into_bitvec() | |
} | |
} | |
impl<O, T> Into<Vec<T>> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn into(self) -> Vec<T> { | |
self.into_vec() | |
} | |
} | |
impl<O, T> TryFrom<Vec<T>> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
type Error = Vec<T>; | |
#[inline(always)] | |
fn try_from(vec: Vec<T>) -> Result<Self, Self::Error> { | |
Self::try_from_vec(vec) | |
} | |
} | |
impl<O, T> Default for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline(always)] | |
fn default() -> Self { | |
Self::new() | |
} | |
} | |
impl<O, T> Debug for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
self.bitptr().render(fmt, "Vec", &[( | |
"capacity", | |
&self.capacity() as &dyn Debug, | |
)])?; | |
fmt.write_str(" ")?; | |
Binary::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> Display for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Display::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> Binary for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Binary::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> LowerHex for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
LowerHex::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> Octal for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Octal::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> UpperHex for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
UpperHex::fmt(self.as_bitslice(), fmt) | |
} | |
} | |
impl<O, T> Hash for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
#[inline] | |
fn hash<H>(&self, state: &mut H) | |
where H: Hasher { | |
self.as_bitslice().hash(state) | |
} | |
} | |
unsafe impl<O, T> Send for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
unsafe impl<O, T> Sync for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
impl<O, T> Unpin for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
{ | |
} | |
} | |
pub use self::iter::{ | |
Drain, | |
IntoIter, | |
Splice, | |
}; | |
} | |
#[cfg(feature = "serde")] | |
mod serdes { | |
#![cfg(feature = "serde")] | |
use crate::{ | |
array::BitArray, | |
domain::Domain, | |
index::BitIdxErr, | |
mem::BitRegister, | |
order::BitOrder, | |
ptr::BitPtr, | |
slice::BitSlice, | |
store::BitStore, | |
view::BitView, | |
}; | |
use core::{ | |
cmp, | |
convert::TryInto, | |
fmt::{ | |
self, | |
Formatter, | |
}, | |
marker::PhantomData, | |
mem::ManuallyDrop, | |
}; | |
use serde::{ | |
de::{ | |
self, | |
Deserialize, | |
Deserializer, | |
MapAccess, | |
SeqAccess, | |
Unexpected, | |
Visitor, | |
}, | |
ser::{ | |
Serialize, | |
SerializeSeq, | |
SerializeStruct, | |
Serializer, | |
}, | |
}; | |
use tap::pipe::Pipe; | |
#[cfg(feature = "alloc")] | |
use crate::{ | |
boxed::BitBox, | |
vec::BitVec, | |
}; | |
impl<O, T> Serialize for BitSlice<O, T> | |
where | |
O: BitOrder, | |
T: BitStore, | |
T::Mem: Serialize, | |
{ | |
#[inline] | |
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> | |
where S: Serializer { | |
let head = self.bitptr().head(); | |
let mut state = serializer.serialize_struct("BitSeq", 3)?; | |
state.serialize_field("head", &head.value())?; | |
state.serialize_field("bits", &(self.len() as u64))?; | |
state.serialize_field("data", &self.domain())?; | |
state.end() | |
} | |
} | |
impl<T> Serialize for Domain<'_, T> | |
where | |
T: BitStore, | |
T::Mem: Serialize, | |
{ | |
#[inline] | |
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> | |
where S: Serializer { | |
let mut state = serializer.serialize_seq(Some(self.len()))?; | |
for elem in *self { | |
state.serialize_element(&elem)?; | |
} | |
state.end() | |
} | |
} | |
impl<O, V> Serialize for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView, | |
V::Mem: Serialize, | |
{ | |
#[inline] | |
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> | |
where S: Serializer { | |
self.as_slice().serialize(serializer) | |
} | |
} | |
#[cfg(feature = "alloc")] | |
impl<O, T> Serialize for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
T::Mem: Serialize, | |
{ | |
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> | |
where S: Serializer { | |
self.as_bitslice().serialize(serializer) | |
} | |
} | |
#[cfg(feature = "alloc")] | |
impl<O, T> Serialize for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore, | |
T::Mem: Serialize, | |
{ | |
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> | |
where S: Serializer { | |
self.as_bitslice().serialize(serializer) | |
} | |
} | |
impl<'de, O, V> Deserialize<'de> for BitArray<O, V> | |
where | |
O: BitOrder, | |
V: BitView + Deserialize<'de>, | |
{ | |
#[inline] | |
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> | |
where D: Deserializer<'de> { | |
deserializer | |
.pipe(<V as Deserialize<'de>>::deserialize) | |
.map(Self::new) | |
} | |
} | |
#[cfg(feature = "alloc")] | |
#[derive(Clone, Copy, Debug, Default)] | |
struct BitVecVisitor<'de, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore + Deserialize<'de>, | |
{ | |
_lt: PhantomData<&'de ()>, | |
_bv: PhantomData<BitVec<O, T>>, | |
} | |
#[cfg(feature = "alloc")] | |
impl<'de, O, T> BitVecVisitor<'de, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore + Deserialize<'de>, | |
{ | |
const THIS: Self = Self { | |
_lt: PhantomData, | |
_bv: PhantomData, | |
}; | |
fn assemble<E>( | |
&self, | |
head: u8, | |
bits: usize, | |
data: Vec<T>, | |
) -> Result<<Self as Visitor<'de>>::Value, E> | |
where | |
E: de::Error, | |
{ | |
let data = ManuallyDrop::new(data); | |
BitPtr::new( | |
data.as_ptr() as *mut T, | |
head.try_into().map_err(|val: BitIdxErr<_>| { | |
de::Error::invalid_value( | |
Unexpected::Unsigned(val.value() as u64), | |
&"a head-bit index less than the deserialized element \ | |
type’s bit width", | |
) | |
})?, | |
cmp::min(bits, data.len().saturating_mul(T::BITS as usize)), | |
) | |
.ok_or_else(|| { | |
de::Error::invalid_value( | |
Unexpected::Other("invalid bit-region source data"), | |
self, | |
) | |
}) | |
.map(BitPtr::to_bitslice_ptr_mut) | |
.map(|bp| unsafe { BitVec::from_raw_parts(bp, data.capacity()) }) | |
} | |
} | |
#[cfg(feature = "alloc")] | |
impl<'de, O, T> Visitor<'de> for BitVecVisitor<'de, O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore + Deserialize<'de>, | |
{ | |
type Value = BitVec<O, T>; | |
#[inline] | |
fn expecting(&self, fmt: &mut Formatter) -> fmt::Result { | |
fmt.write_str("a BitSeq data series") | |
} | |
#[inline] | |
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error> | |
where V: SeqAccess<'de> { | |
let head = seq | |
.next_element::<u8>()? | |
.ok_or_else(|| de::Error::invalid_length(0, &self))?; | |
let bits = seq | |
.next_element::<u64>()? | |
.ok_or_else(|| de::Error::invalid_length(1, &self))?; | |
let data = seq | |
.next_element::<Vec<T>>()? | |
.ok_or_else(|| de::Error::invalid_length(2, &self))?; | |
self.assemble(head, bits as usize, data) | |
} | |
fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error> | |
where V: MapAccess<'de> { | |
let mut head: Option<u8> = None; | |
let mut bits: Option<u64> = None; | |
let mut data: Option<Vec<T>> = None; | |
while let Some(key) = map.next_key()? { | |
match key { | |
"head" => { | |
if head.replace(map.next_value()?).is_some() { | |
return Err(de::Error::duplicate_field("head")); | |
} | |
}, | |
"bits" => { | |
if bits.replace(map.next_value()?).is_some() { | |
return Err(de::Error::duplicate_field("bits")); | |
} | |
}, | |
"data" => { | |
if data.replace(map.next_value()?).is_some() { | |
return Err(de::Error::duplicate_field("data")); | |
} | |
}, | |
f => { | |
let _ = map.next_value::<()>(); | |
return Err(de::Error::unknown_field(f, &[ | |
"head", "bits", "data", | |
])); | |
}, | |
} | |
} | |
let head = head.ok_or_else(|| de::Error::missing_field("head"))?; | |
let bits = bits.ok_or_else(|| de::Error::missing_field("bits"))?; | |
let data = data.ok_or_else(|| de::Error::missing_field("data"))?; | |
self.assemble(head, bits as usize, data) | |
} | |
} | |
#[cfg(feature = "alloc")] | |
impl<'de, O, T> Deserialize<'de> for BitBox<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore + Deserialize<'de>, | |
{ | |
#[inline] | |
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> | |
where D: Deserializer<'de> { | |
deserializer | |
.pipe(<BitVec<O, T> as Deserialize<'de>>::deserialize) | |
.map(BitVec::into_boxed_bitslice) | |
} | |
} | |
#[cfg(feature = "alloc")] | |
impl<'de, O, T> Deserialize<'de> for BitVec<O, T> | |
where | |
O: BitOrder, | |
T: BitRegister + BitStore + Deserialize<'de>, | |
{ | |
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> | |
where D: Deserializer<'de> { | |
deserializer.deserialize_struct( | |
"BitSeq", | |
&["head", "bits", "data"], | |
BitVecVisitor::THIS, | |
) | |
} | |
} | |
} | |
pub mod funty { | |
use core::{ | |
convert::{ | |
TryFrom, | |
TryInto, | |
}, | |
fmt::{ | |
Binary, | |
Debug, | |
Display, | |
LowerExp, | |
LowerHex, | |
Octal, | |
UpperExp, | |
UpperHex, | |
}, | |
hash::Hash, | |
iter::{ | |
Product, | |
Sum, | |
}, | |
num::{ | |
FpCategory, | |
ParseIntError, | |
}, | |
ops::{ | |
Add, | |
AddAssign, | |
BitAnd, | |
BitAndAssign, | |
BitOr, | |
BitOrAssign, | |
BitXor, | |
BitXorAssign, | |
Div, | |
DivAssign, | |
Mul, | |
MulAssign, | |
Neg, | |
Not, | |
Rem, | |
RemAssign, | |
Shl, | |
ShlAssign, | |
Shr, | |
ShrAssign, | |
Sub, | |
SubAssign, | |
}, | |
str::FromStr, | |
}; | |
pub trait IsNumber: Sized | |
+ Send | |
+ Sync | |
+ Unpin | |
+ Clone | |
+ Copy | |
+ Default | |
+ FromStr | |
+ PartialEq<Self> | |
+ PartialOrd<Self> | |
+ Debug | |
+ Display | |
+ Product<Self> | |
+ for<'a> Product<&'a Self> | |
+ Sum<Self> | |
+ for<'a> Sum<&'a Self> | |
+ Add<Self, Output = Self> | |
+ for<'a> Add<&'a Self, Output = Self> | |
+ AddAssign<Self> | |
+ for<'a> AddAssign<&'a Self> | |
+ Sub<Self, Output = Self> | |
+ for<'a> Sub<&'a Self, Output = Self> | |
+ SubAssign<Self> | |
+ for<'a> SubAssign<&'a Self> | |
+ Mul<Self, Output = Self> | |
+ for<'a> Mul<&'a Self, Output = Self> | |
+ MulAssign<Self> | |
+ for<'a> MulAssign<&'a Self> | |
+ Div<Self, Output = Self> | |
+ for<'a> Div<&'a Self, Output = Self> | |
+ DivAssign<Self> | |
+ for<'a> DivAssign<&'a Self> | |
+ Rem<Self, Output = Self> | |
+ for<'a> Rem<&'a Self, Output = Self> | |
+ RemAssign<Self> | |
+ for<'a> RemAssign<&'a Self> | |
{ | |
type Bytes; | |
fn to_be_bytes(self) -> Self::Bytes; | |
fn to_le_bytes(self) -> Self::Bytes; | |
fn to_ne_bytes(self) -> Self::Bytes; | |
fn from_be_bytes(bytes: Self::Bytes) -> Self; | |
fn from_le_bytes(bytes: Self::Bytes) -> Self; | |
fn from_ne_bytes(bytes: Self::Bytes) -> Self; | |
} | |
pub trait IsInteger: | |
IsNumber | |
+ Hash | |
+ Eq | |
+ Ord | |
+ Binary | |
+ LowerHex | |
+ UpperHex | |
+ Octal | |
+ BitAnd<Self, Output = Self> | |
+ for<'a> BitAnd<&'a Self, Output = Self> | |
+ BitAndAssign<Self> | |
+ for<'a> BitAndAssign<&'a Self> | |
+ BitOr<Self, Output = Self> | |
+ for<'a> BitOr<&'a Self, Output = Self> | |
+ BitOrAssign<Self> | |
+ for<'a> BitOrAssign<&'a Self> | |
+ BitXor<Self, Output = Self> | |
+ for<'a> BitXor<&'a Self, Output = Self> | |
+ BitXorAssign<Self> | |
+ for<'a> BitXorAssign<&'a Self> | |
+ Not<Output = Self> | |
+ TryFrom<i8> | |
+ TryFrom<u8> | |
+ TryFrom<i16> | |
+ TryFrom<u16> | |
+ TryFrom<i32> | |
+ TryFrom<u32> | |
+ TryFrom<i64> | |
+ TryFrom<u64> | |
+ TryFrom<i128> | |
+ TryFrom<u128> | |
+ TryFrom<isize> | |
+ TryFrom<usize> | |
+ TryInto<i8> | |
+ TryInto<u8> | |
+ TryInto<i16> | |
+ TryInto<u16> | |
+ TryInto<i32> | |
+ TryInto<u32> | |
+ TryInto<i64> | |
+ TryInto<u64> | |
+ TryInto<i128> | |
+ TryInto<u128> | |
+ TryInto<isize> | |
+ TryInto<usize> | |
+ Shl<i8, Output = Self> | |
+ for<'a> Shl<&'a i8, Output = Self> | |
+ ShlAssign<i8> | |
+ for<'a> ShlAssign<&'a i8> | |
+ Shr<i8, Output = Self> | |
+ for<'a> Shr<&'a i8, Output = Self> | |
+ ShrAssign<i8> | |
+ for<'a> ShrAssign<&'a i8> | |
+ Shl<u8, Output = Self> | |
+ for<'a> Shl<&'a u8, Output = Self> | |
+ ShlAssign<u8> | |
+ for<'a> ShlAssign<&'a u8> | |
+ Shr<u8, Output = Self> | |
+ for<'a> Shr<&'a u8, Output = Self> | |
+ ShrAssign<u8> | |
+ for<'a> ShrAssign<&'a u8> | |
+ Shl<i16, Output = Self> | |
+ for<'a> Shl<&'a i16, Output = Self> | |
+ ShlAssign<i16> | |
+ for<'a> ShlAssign<&'a i16> | |
+ Shr<i16, Output = Self> | |
+ for<'a> Shr<&'a i16, Output = Self> | |
+ ShrAssign<i16> | |
+ for<'a> ShrAssign<&'a i16> | |
+ Shl<u16, Output = Self> | |
+ for<'a> Shl<&'a u16, Output = Self> | |
+ ShlAssign<u16> | |
+ for<'a> ShlAssign<&'a u16> | |
+ Shr<u16, Output = Self> | |
+ for<'a> Shr<&'a u16, Output = Self> | |
+ ShrAssign<u16> | |
+ for<'a> ShrAssign<&'a u16> | |
+ Shl<i32, Output = Self> | |
+ for<'a> Shl<&'a i32, Output = Self> | |
+ ShlAssign<i32> | |
+ for<'a> ShlAssign<&'a i32> | |
+ Shr<i32, Output = Self> | |
+ for<'a> Shr<&'a i32, Output = Self> | |
+ ShrAssign<i32> | |
+ for<'a> ShrAssign<&'a i32> | |
+ Shl<u32, Output = Self> | |
+ for<'a> Shl<&'a u32, Output = Self> | |
+ ShlAssign<u32> | |
+ for<'a> ShlAssign<&'a u32> | |
+ Shr<u32, Output = Self> | |
+ for<'a> Shr<&'a u32, Output = Self> | |
+ ShrAssign<u32> | |
+ for<'a> ShrAssign<&'a u32> | |
+ Shl<i64, Output = Self> | |
+ for<'a> Shl<&'a i64, Output = Self> | |
+ ShlAssign<i64> | |
+ for<'a> ShlAssign<&'a i64> | |
+ Shr<i64, Output = Self> | |
+ for<'a> Shr<&'a i64, Output = Self> | |
+ ShrAssign<i64> | |
+ for<'a> ShrAssign<&'a i64> | |
+ Shl<u64, Output = Self> | |
+ for<'a> Shl<&'a u64, Output = Self> | |
+ ShlAssign<u64> | |
+ for<'a> ShlAssign<&'a u64> | |
+ Shr<u64, Output = Self> | |
+ for<'a> Shr<&'a u64, Output = Self> | |
+ ShrAssign<u64> | |
+ for<'a> ShrAssign<&'a u64> | |
+ Shl<i128, Output = Self> | |
+ for<'a> Shl<&'a i128, Output = Self> | |
+ ShlAssign<i128> | |
+ for<'a> ShlAssign<&'a i128> | |
+ Shr<i128, Output = Self> | |
+ for<'a> Shr<&'a i128, Output = Self> | |
+ ShrAssign<i128> | |
+ for<'a> ShrAssign<&'a i128> | |
+ Shl<u128, Output = Self> | |
+ for<'a> Shl<&'a u128, Output = Self> | |
+ ShlAssign<u128> | |
+ for<'a> ShlAssign<&'a u128> | |
+ Shr<u128, Output = Self> | |
+ for<'a> Shr<&'a u128, Output = Self> | |
+ ShrAssign<u128> | |
+ for<'a> ShrAssign<&'a u128> | |
+ Shl<isize, Output = Self> | |
+ for<'a> Shl<&'a isize, Output = Self> | |
+ ShlAssign<isize> | |
+ for<'a> ShlAssign<&'a isize> | |
+ Shr<isize, Output = Self> | |
+ for<'a> Shr<&'a isize, Output = Self> | |
+ ShrAssign<isize> | |
+ for<'a> ShrAssign<&'a isize> | |
+ Shl<usize, Output = Self> | |
+ for<'a> Shl<&'a usize, Output = Self> | |
+ ShlAssign<usize> | |
+ for<'a> ShlAssign<&'a usize> | |
+ Shr<usize, Output = Self> | |
+ for<'a> Shr<&'a usize, Output = Self> | |
+ ShrAssign<usize> | |
+ for<'a> ShrAssign<&'a usize> | |
{ | |
const ZERO: Self; | |
const MIN: Self; | |
const MAX: Self; | |
fn min_value() -> Self; | |
fn max_value() -> Self; | |
fn from_str_radix(src: &str, radix: u32) -> Result<Self, ParseIntError>; | |
fn count_ones(self) -> u32; | |
fn count_zeros(self) -> u32; | |
fn leading_zeros(self) -> u32; | |
fn trailing_zeros(self) -> u32; | |
fn rotate_left(self, n: u32) -> Self; | |
fn rotate_right(self, n: u32) -> Self; | |
fn swap_bytes(self) -> Self; | |
fn reverse_bits(self) -> Self; | |
fn from_be(self) -> Self; | |
fn from_le(self) -> Self; | |
fn to_be(self) -> Self; | |
fn to_le(self) -> Self; | |
fn checked_add(self, rhs: Self) -> Option<Self>; | |
fn checked_sub(self, rhs: Self) -> Option<Self>; | |
fn checked_mul(self, rhs: Self) -> Option<Self>; | |
fn checked_div(self, rhs: Self) -> Option<Self>; | |
fn checked_div_euclid(self, rhs: Self) -> Option<Self>; | |
fn checked_rem(self, rhs: Self) -> Option<Self>; | |
fn checked_rem_euclid(self, rhs: Self) -> Option<Self>; | |
fn checked_neg(self) -> Option<Self>; | |
fn checked_shl(self, rhs: u32) -> Option<Self>; | |
fn checked_shr(self, rhs: u32) -> Option<Self>; | |
fn checked_pow(self, rhs: u32) -> Option<Self>; | |
fn saturating_add(self, rhs: Self) -> Self; | |
fn saturating_sub(self, rhs: Self) -> Self; | |
fn saturating_mul(self, rhs: Self) -> Self; | |
fn saturating_pow(self, rhs: u32) -> Self; | |
fn wrapping_add(self, rhs: Self) -> Self; | |
fn wrapping_sub(self, rhs: Self) -> Self; | |
fn wrapping_mul(self, rhs: Self) -> Self; | |
fn wrapping_div(self, rhs: Self) -> Self; | |
fn wrapping_div_euclid(self, rhs: Self) -> Self; | |
fn wrapping_rem(self, rhs: Self) -> Self; | |
fn wrapping_rem_euclid(self, rhs: Self) -> Self; | |
fn wrapping_neg(self) -> Self; | |
fn wrapping_shl(self, rhs: u32) -> Self; | |
fn wrapping_shr(self, rhs: u32) -> Self; | |
fn wrapping_pow(self, rhs: u32) -> Self; | |
fn overflowing_add(self, rhs: Self) -> (Self, bool); | |
fn overflowing_sub(self, rhs: Self) -> (Self, bool); | |
fn overflowing_mul(self, rhs: Self) -> (Self, bool); | |
fn overflowing_div(self, rhs: Self) -> (Self, bool); | |
fn overflowing_div_euclid(self, rhs: Self) -> (Self, bool); | |
fn overflowing_rem(self, rhs: Self) -> (Self, bool); | |
fn overflowing_rem_euclid(self, rhs: Self) -> (Self, bool); | |
fn overflowing_neg(self) -> (Self, bool); | |
fn overflowing_shl(self, rhs: u32) -> (Self, bool); | |
fn overflowing_shr(self, rhs: u32) -> (Self, bool); | |
fn overflowing_pow(self, rhs: u32) -> (Self, bool); | |
fn pow(self, rhs: u32) -> Self; | |
fn div_euclid(self, rhs: Self) -> Self; | |
fn rem_euclid(self, rhs: Self) -> Self; | |
} | |
pub trait IsSigned: IsInteger + Neg { | |
fn checked_abs(self) -> Option<Self>; | |
fn wrapping_abs(self) -> Self; | |
fn overflowing_abs(self) -> (Self, bool); | |
fn abs(self) -> Self; | |
fn signum(self) -> Self; | |
fn is_positive(self) -> bool; | |
fn is_negative(self) -> bool; | |
} | |
pub trait IsUnsigned: IsInteger { | |
fn is_power_of_two(self) -> bool; | |
fn next_power_of_two(self) -> Self; | |
fn checked_next_power_of_two(self) -> Option<Self>; | |
} | |
pub trait IsFloat: | |
IsNumber | |
+ LowerExp | |
+ UpperExp | |
+ Neg | |
+ From<f32> | |
+ From<i8> | |
+ From<i16> | |
+ From<u8> | |
+ From<u16> | |
{ | |
type Raw: IsUnsigned; | |
const RADIX: u32; | |
const MANTISSA_DIGITS: u32; | |
const DIGITS: u32; | |
const EPSILON: Self; | |
const MIN: Self; | |
const MIN_POSITIVE: Self; | |
const MAX: Self; | |
const MIN_EXP: i32; | |
const MAX_EXP: i32; | |
const MIN_10_EXP: i32; | |
const MAX_10_EXP: i32; | |
const NAN: Self; | |
const INFINITY: Self; | |
const NEG_INFINITY: Self; | |
const PI: Self; | |
const FRAC_PI_2: Self; | |
const FRAC_PI_3: Self; | |
const FRAC_PI_4: Self; | |
const FRAC_PI_6: Self; | |
const FRAC_PI_8: Self; | |
const FRAC_1_PI: Self; | |
const FRAC_2_PI: Self; | |
const FRAC_2_SQRT_PI: Self; | |
const SQRT_2: Self; | |
const FRAC_1_SQRT_2: Self; | |
const E: Self; | |
const LOG2_E: Self; | |
const LOG10_E: Self; | |
const LN_2: Self; | |
const LN_10: Self; | |
#[cfg(feature = "std")] | |
fn floor(self) -> Self; | |
#[cfg(feature = "std")] | |
fn ceil(self) -> Self; | |
#[cfg(feature = "std")] | |
fn round(self) -> Self; | |
#[cfg(feature = "std")] | |
fn trunc(self) -> Self; | |
#[cfg(feature = "std")] | |
fn fract(self) -> Self; | |
#[cfg(feature = "std")] | |
fn abs(self) -> Self; | |
#[cfg(feature = "std")] | |
fn signum(self) -> Self; | |
#[cfg(feature = "std")] | |
fn copysign(self, sign: Self) -> Self; | |
#[cfg(feature = "std")] | |
fn mul_add(self, a: Self, b: Self) -> Self; | |
#[cfg(feature = "std")] | |
fn div_euclid(self, rhs: Self) -> Self; | |
#[cfg(feature = "std")] | |
fn rem_euclid(self, rhs: Self) -> Self; | |
#[cfg(feature = "std")] | |
fn powi(self, n: i32) -> Self; | |
#[cfg(feature = "std")] | |
fn powf(self, n: Self) -> Self; | |
#[cfg(feature = "std")] | |
fn sqrt(self) -> Self; | |
#[cfg(feature = "std")] | |
fn exp(self) -> Self; | |
#[cfg(feature = "std")] | |
fn exp2(self) -> Self; | |
#[cfg(feature = "std")] | |
fn ln(self) -> Self; | |
#[cfg(feature = "std")] | |
fn log(self, base: Self) -> Self; | |
#[cfg(feature = "std")] | |
fn log2(self) -> Self; | |
#[cfg(feature = "std")] | |
fn log10(self) -> Self; | |
#[cfg(feature = "std")] | |
fn cbrt(self) -> Self; | |
#[cfg(feature = "std")] | |
fn hypot(self, other: Self) -> Self; | |
#[cfg(feature = "std")] | |
fn sin(self) -> Self; | |
#[cfg(feature = "std")] | |
fn cos(self) -> Self; | |
#[cfg(feature = "std")] | |
fn tan(self) -> Self; | |
#[cfg(feature = "std")] | |
fn asin(self) -> Self; | |
#[cfg(feature = "std")] | |
fn acos(self) -> Self; | |
#[cfg(feature = "std")] | |
fn atan(self) -> Self; | |
#[cfg(feature = "std")] | |
fn atan2(self, other: Self) -> Self; | |
#[cfg(feature = "std")] | |
fn sin_cos(self) -> (Self, Self); | |
#[cfg(feature = "std")] | |
fn exp_m1(self) -> Self; | |
#[cfg(feature = "std")] | |
fn ln_1p(self) -> Self; | |
#[cfg(feature = "std")] | |
fn sinh(self) -> Self; | |
#[cfg(feature = "std")] | |
fn cosh(self) -> Self; | |
#[cfg(feature = "std")] | |
fn tanh(self) -> Self; | |
#[cfg(feature = "std")] | |
fn asinh(self) -> Self; | |
#[cfg(feature = "std")] | |
fn acosh(self) -> Self; | |
#[cfg(feature = "std")] | |
fn atanh(self) -> Self; | |
fn is_nan(self) -> bool; | |
fn is_infinite(self) -> bool; | |
fn is_finite(self) -> bool; | |
fn is_normal(self) -> bool; | |
fn classify(self) -> FpCategory; | |
fn is_sign_positive(self) -> bool; | |
fn is_sign_negative(self) -> bool; | |
fn recip(self) -> Self; | |
fn to_degrees(self) -> Self; | |
fn to_radians(self) -> Self; | |
fn max(self, other: Self) -> Self; | |
fn min(self, other: Self) -> Self; | |
fn to_bits(self) -> Self::Raw; | |
fn from_bits(bits: Self::Raw) -> Self; | |
} | |
pub trait Is8: IsNumber {} | |
pub trait Is16: IsNumber {} | |
pub trait Is32: IsNumber {} | |
pub trait Is64: IsNumber {} | |
pub trait Is128: IsNumber {} | |
pub trait AtLeast8: IsNumber {} | |
pub trait AtLeast16: IsNumber {} | |
pub trait AtLeast32: IsNumber {} | |
pub trait AtLeast64: IsNumber {} | |
pub trait AtLeast128: IsNumber {} | |
pub trait AtMost8: IsNumber {} | |
pub trait AtMost16: IsNumber {} | |
pub trait AtMost32: IsNumber {} | |
pub trait AtMost64: IsNumber {} | |
pub trait AtMost128: IsNumber {} | |
macro_rules! func { | |
( $name:ident ( self $(, $arg:ident : $t:ty)* ) $( -> $ret:ty )? ) => { | |
fn $name ( self $(, $arg : $t )* ) $( -> $ret )? { <Self>:: $name ( self $(, $arg )* )} | |
}; | |
( $name:ident ( &self $(, $arg:ident : $t:ty)* ) $( -> $ret:ty )? ) => { | |
fn $name ( &self $(, $arg : $t )* ) $( -> $ret )? { <Self>:: $name ( &self $(, $arg )* )} | |
}; | |
( $name:ident ( &mut self $(, $arg:ident : $t:ty)* ) $( -> $ret:ty )? ) => { | |
fn $name ( &mut self $(, $arg : $t )* ) $( -> $ret )? { <Self>:: $name ( &mut self $(, $arg )* )} | |
}; | |
( $name:ident ( $($arg:ident : $t:ty),* ) $( -> $ret:ty )? ) => { | |
fn $name ( $($arg : $t ),* ) $( -> $ret )? { <Self>:: $name ( $( $arg ),* )} | |
}; | |
} | |
macro_rules! stdfunc { | |
( $name:ident ( self $(, $arg:ident : $t:ty)* ) $( -> $ret:ty )? ) => { | |
#[cfg(feature = "std")] | |
fn $name ( self $(, $arg : $t )* ) $( -> $ret )? { <Self>:: $name ( self $(, $arg )* )} | |
}; | |
( $name:ident ( &self $(, $arg:ident : $t:ty)* ) $( -> $ret:ty )? ) => { | |
#[cfg(feature = "std")] | |
fn $name ( &self $(, $arg : $t )* ) $( -> $ret )? { <Self>:: $name ( &self $(, $arg )* )} | |
}; | |
( $name:ident ( &mut self $(, $arg:ident : $t:ty)* ) $( -> $ret:ty )? ) => { | |
#[cfg(feature = "std")] | |
fn $name ( &mut self $(, $arg : $t )* ) $( -> $ret )? { <Self>:: $name ( &mut self $(, $arg )* )} | |
}; | |
( $name:ident ( $($arg:ident : $t:ty),* ) $( -> $ret:ty )? ) => { | |
#[cfg(feature = "std")] | |
fn $name ( $($arg : $t ),* ) $( -> $ret )? { <Self>:: $name ( $( $arg ),* )} | |
}; | |
} | |
macro_rules! impl_for { | |
( IsNumber => $($t:ty),+ $(,)? ) => { $( | |
impl IsNumber for $t { | |
type Bytes = [u8; core::mem::size_of::<Self>()]; | |
func!(to_be_bytes(self) -> Self::Bytes); | |
func!(to_le_bytes(self) -> Self::Bytes); | |
func!(to_ne_bytes(self) -> Self::Bytes); | |
func!(from_be_bytes(bytes: Self::Bytes) -> Self); | |
func!(from_le_bytes(bytes: Self::Bytes) -> Self); | |
func!(from_ne_bytes(bytes: Self::Bytes) -> Self); | |
} | |
)+ }; | |
( IsInteger => $($t:ty),+ $(,)? ) => { $( | |
impl IsInteger for $t { | |
const ZERO: Self = 0; | |
const MIN: Self = <Self>::min_value(); | |
const MAX: Self = <Self>::max_value(); | |
func!(min_value() -> Self); | |
func!(max_value() -> Self); | |
func!(from_str_radix(src: &str, radix: u32) -> Result<Self, ParseIntError>); | |
func!(count_ones(self) -> u32); | |
func!(count_zeros(self) -> u32); | |
func!(leading_zeros(self) -> u32); | |
func!(trailing_zeros(self) -> u32); | |
func!(rotate_left(self, n: u32) -> Self); | |
func!(rotate_right(self, n: u32) -> Self); | |
func!(swap_bytes(self) -> Self); | |
func!(reverse_bits(self) -> Self); | |
func!(from_be(self) -> Self); | |
func!(from_le(self) -> Self); | |
func!(to_be(self) -> Self); | |
func!(to_le(self) -> Self); | |
func!(checked_add(self, rhs: Self) -> Option<Self>); | |
func!(checked_sub(self, rhs: Self) -> Option<Self>); | |
func!(checked_mul(self, rhs: Self) -> Option<Self>); | |
func!(checked_div(self, rhs: Self) -> Option<Self>); | |
func!(checked_div_euclid(self, rhs: Self) -> Option<Self>); | |
func!(checked_rem(self, rhs: Self) -> Option<Self>); | |
func!(checked_rem_euclid(self, rhs: Self) -> Option<Self>); | |
func!(checked_neg(self) -> Option<Self>); | |
func!(checked_shl(self, rhs: u32) -> Option<Self>); | |
func!(checked_shr(self, rhs: u32) -> Option<Self>); | |
func!(checked_pow(self, rhs: u32) -> Option<Self>); | |
func!(saturating_add(self, rhs: Self) -> Self); | |
func!(saturating_sub(self, rhs: Self) -> Self); | |
func!(saturating_mul(self, rhs: Self) -> Self); | |
func!(saturating_pow(self, rhs: u32) -> Self); | |
func!(wrapping_add(self, rhs: Self) -> Self); | |
func!(wrapping_sub(self, rhs: Self) -> Self); | |
func!(wrapping_mul(self, rhs: Self) -> Self); | |
func!(wrapping_div(self, rhs: Self) -> Self); | |
func!(wrapping_div_euclid(self, rhs: Self) -> Self); | |
func!(wrapping_rem(self, rhs: Self) -> Self); | |
func!(wrapping_rem_euclid(self, rhs: Self) -> Self); | |
func!(wrapping_neg(self) -> Self); | |
func!(wrapping_shl(self, rhs: u32) -> Self); | |
func!(wrapping_shr(self, rhs: u32) -> Self); | |
func!(wrapping_pow(self, rhs: u32) -> Self); | |
func!(overflowing_add(self, rhs: Self) -> (Self, bool)); | |
func!(overflowing_sub(self, rhs: Self) -> (Self, bool)); | |
func!(overflowing_mul(self, rhs: Self) -> (Self, bool)); | |
func!(overflowing_div(self, rhs: Self) -> (Self, bool)); | |
func!(overflowing_div_euclid(self, rhs: Self) -> (Self, bool)); | |
func!(overflowing_rem(self, rhs: Self) -> (Self, bool)); | |
func!(overflowing_rem_euclid(self, rhs: Self) -> (Self, bool)); | |
func!(overflowing_neg(self) -> (Self, bool)); | |
func!(overflowing_shl(self, rhs: u32) -> (Self, bool)); | |
func!(overflowing_shr(self, rhs: u32) -> (Self, bool)); | |
func!(overflowing_pow(self, rhs: u32) -> (Self, bool)); | |
func!(pow(self, rhs: u32) -> Self); | |
func!(div_euclid(self, rhs: Self) -> Self); | |
func!(rem_euclid(self, rhs: Self) -> Self); | |
} | |
)+ }; | |
( IsSigned => $($t:ty),+ $(,)? ) => { $( | |
impl IsSigned for $t { | |
func!(checked_abs(self) -> Option<Self>); | |
func!(wrapping_abs(self) -> Self); | |
func!(overflowing_abs(self) -> (Self, bool)); | |
func!(abs(self) -> Self); | |
func!(signum(self) -> Self); | |
func!(is_positive(self) -> bool); | |
func!(is_negative(self) -> bool); | |
} | |
)+ }; | |
( IsUnsigned => $($t:ty),+ $(,)? ) => { $( | |
impl IsUnsigned for $t { | |
func!(is_power_of_two(self) -> bool); | |
func!(next_power_of_two(self) -> Self); | |
func!(checked_next_power_of_two(self) -> Option<Self>); | |
} | |
)+ }; | |
( IsFloat => $($t:ident | $u:ty),+ $(,)? ) => { $( | |
impl IsFloat for $t { | |
type Raw = $u; | |
const RADIX: u32 = core::$t::RADIX; | |
const MANTISSA_DIGITS: u32 = core::$t::MANTISSA_DIGITS; | |
const DIGITS: u32 = core::$t::DIGITS; | |
const EPSILON: Self = core::$t::EPSILON; | |
const MIN: Self = core::$t::MIN; | |
const MIN_POSITIVE: Self = core::$t::MIN_POSITIVE; | |
const MAX: Self = core::$t::MAX; | |
const MIN_EXP: i32 = core::$t::MIN_EXP; | |
const MAX_EXP: i32 = core::$t::MAX_EXP; | |
const MIN_10_EXP: i32 = core::$t::MIN_10_EXP; | |
const MAX_10_EXP: i32 = core::$t::MAX_10_EXP; | |
const NAN: Self = core::$t::NAN; | |
const INFINITY: Self = core::$t::INFINITY; | |
const NEG_INFINITY: Self = core::$t::NEG_INFINITY; | |
const PI: Self = core::$t::consts::PI; | |
const FRAC_PI_2: Self = core::$t::consts::FRAC_PI_2; | |
const FRAC_PI_3: Self = core::$t::consts::FRAC_PI_3; | |
const FRAC_PI_4: Self = core::$t::consts::FRAC_PI_4; | |
const FRAC_PI_6: Self = core::$t::consts::FRAC_PI_6; | |
const FRAC_PI_8: Self = core::$t::consts::FRAC_PI_8; | |
const FRAC_1_PI: Self = core::$t::consts::FRAC_1_PI; | |
const FRAC_2_PI: Self = core::$t::consts::FRAC_2_PI; | |
const FRAC_2_SQRT_PI: Self = core::$t::consts::FRAC_2_SQRT_PI; | |
const SQRT_2: Self = core::$t::consts::SQRT_2; | |
const FRAC_1_SQRT_2: Self = core::$t::consts::FRAC_1_SQRT_2; | |
const E: Self = core::$t::consts::E; | |
const LOG2_E: Self = core::$t::consts::LOG2_E; | |
const LOG10_E: Self = core::$t::consts::LOG10_E; | |
const LN_2: Self = core::$t::consts::LN_2; | |
const LN_10: Self = core::$t::consts::LN_10; | |
stdfunc!(floor(self) -> Self); | |
stdfunc!(ceil(self) -> Self); | |
stdfunc!(round(self) -> Self); | |
stdfunc!(trunc(self) -> Self); | |
stdfunc!(fract(self) -> Self); | |
stdfunc!(abs(self) -> Self); | |
stdfunc!(signum(self) -> Self); | |
stdfunc!(copysign(self, sign: Self) -> Self); | |
stdfunc!(mul_add(self, a: Self, b: Self) -> Self); | |
stdfunc!(div_euclid(self, rhs: Self) -> Self); | |
stdfunc!(rem_euclid(self, rhs: Self) -> Self); | |
stdfunc!(powi(self, n: i32) -> Self); | |
stdfunc!(powf(self, n: Self) -> Self); | |
stdfunc!(sqrt(self) -> Self); | |
stdfunc!(exp(self) -> Self); | |
stdfunc!(exp2(self) -> Self); | |
stdfunc!(ln(self) -> Self); | |
stdfunc!(log(self, base: Self) -> Self); | |
stdfunc!(log2(self) -> Self); | |
stdfunc!(log10(self) -> Self); | |
stdfunc!(cbrt(self) -> Self); | |
stdfunc!(hypot(self, other: Self) -> Self); | |
stdfunc!(sin(self) -> Self); | |
stdfunc!(cos(self) -> Self); | |
stdfunc!(tan(self) -> Self); | |
stdfunc!(asin(self) -> Self); | |
stdfunc!(acos(self) -> Self); | |
stdfunc!(atan(self) -> Self); | |
stdfunc!(atan2(self, other: Self) -> Self); | |
stdfunc!(sin_cos(self) -> (Self, Self)); | |
stdfunc!(exp_m1(self) -> Self); | |
stdfunc!(ln_1p(self) -> Self); | |
stdfunc!(sinh(self) -> Self); | |
stdfunc!(cosh(self) -> Self); | |
stdfunc!(tanh(self) -> Self); | |
stdfunc!(asinh(self) -> Self); | |
stdfunc!(acosh(self) -> Self); | |
stdfunc!(atanh(self) -> Self); | |
func!(is_nan(self) -> bool); | |
func!(is_infinite(self) -> bool); | |
func!(is_finite(self) -> bool); | |
func!(is_normal(self) -> bool); | |
func!(classify(self) -> FpCategory); | |
func!(is_sign_positive(self) -> bool); | |
func!(is_sign_negative(self) -> bool); | |
func!(recip(self) -> Self); | |
func!(to_degrees(self) -> Self); | |
func!(to_radians(self) -> Self); | |
func!(max(self, other: Self) -> Self); | |
func!(min(self, other: Self) -> Self); | |
func!(to_bits(self) -> Self::Raw); | |
func!(from_bits(bits: Self::Raw) -> Self); | |
} | |
)+ }; | |
( $which:ty => $($t:ty),+ $(,)? ) => { $( | |
impl $which for $t {} | |
)+ }; | |
} | |
impl_for!(IsNumber => i8, i16, i32, i64, i128, isize, u8, u16, u32, u64, u128, usize, f32, f64); | |
impl_for!(IsInteger => i8, i16, i32, i64, i128, isize, u8, u16, u32, u64, u128, usize); | |
impl_for!(IsSigned => i8, i16, i32, i64, i128, isize); | |
impl_for!(IsUnsigned => u8, u16, u32, u64, u128, usize); | |
impl_for!(IsFloat => f32 | u32, f64 | u64); | |
impl_for!(Is8 => i8, u8); | |
impl_for!(Is16 => i16, u16); | |
impl_for!(Is32 => i32, u32, f32); | |
impl_for!(Is64 => i64, u64, f64); | |
impl_for!(Is128 => i128, u128); | |
#[cfg(target_pointer_width = "16")] | |
impl_for!(Is16 => isize, usize); | |
#[cfg(target_pointer_width = "32")] | |
impl_for!(Is32 => isize, usize); | |
#[cfg(target_pointer_width = "64")] | |
impl_for!(Is64 => isize, usize); | |
impl_for!(AtLeast8 => i8, i16, i32, i64, i128, isize, u8, u16, u32, u64, u128, usize, f32, f64); | |
impl_for!(AtLeast16 => i16, i32, i64, i128, u16, u32, u64, u128, f32, f64); | |
impl_for!(AtLeast32 => i32, i64, i128, u32, u64, u128, f32, f64); | |
impl_for!(AtLeast64 => i64, i128, u64, u128, f64); | |
impl_for!(AtLeast128 => i128, u128); | |
#[cfg(any( | |
target_pointer_width = "16", | |
target_pointer_width = "32", | |
target_pointer_width = "64" | |
))] | |
impl_for!(AtLeast16 => isize, usize); | |
#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] | |
impl_for!(AtLeast32 => isize, usize); | |
#[cfg(target_pointer_width = "64")] | |
impl_for!(AtLeast64 => isize, usize); | |
impl_for!(AtMost8 => i8, u8); | |
impl_for!(AtMost16 => i8, i16, u8, u16); | |
impl_for!(AtMost32 => i8, i16, i32, u8, u16, u32, f32); | |
impl_for!(AtMost64 => i8, i16, i32, i64, isize, u8, u16, u32, u64, usize, f32, f64); | |
impl_for!(AtMost128 => i8, i16, i32, i64, i128, isize, u8, u16, u32, u64, u128, usize, f32, f64); | |
#[cfg(target_pointer_width = "16")] | |
impl_for!(AtMost16 => isize, usize); | |
#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))] | |
impl_for!(AtMost32 => isize, usize); | |
} | |
mod radium { | |
use core::cell::Cell; | |
use core::sync::atomic::Ordering; | |
use core::sync::atomic::{AtomicBool, AtomicI8, AtomicU8}; | |
use core::sync::atomic::{AtomicI16, AtomicU16}; | |
use core::sync::atomic::{AtomicI32, AtomicU32}; | |
use core::sync::atomic::{AtomicI64, AtomicU64}; | |
use core::sync::atomic::{AtomicIsize, AtomicPtr, AtomicUsize}; | |
pub trait Radium { | |
type Item; | |
fn new(value: Self::Item) -> Self; | |
fn fence(order: Ordering); | |
fn get_mut(&mut self) -> &mut Self::Item; | |
fn into_inner(self) -> Self::Item; | |
fn load(&self, order: Ordering) -> Self::Item; | |
fn store(&self, value: Self::Item, order: Ordering); | |
fn swap(&self, value: Self::Item, order: Ordering) -> Self::Item; | |
fn compare_and_swap(&self, current: Self::Item, new: Self::Item, order: Ordering) | |
-> Self::Item; | |
fn compare_exchange( | |
&self, | |
current: Self::Item, | |
new: Self::Item, | |
success: Ordering, | |
failure: Ordering, | |
) -> Result<Self::Item, Self::Item>; | |
fn compare_exchange_weak( | |
&self, | |
current: Self::Item, | |
new: Self::Item, | |
success: Ordering, | |
failure: Ordering, | |
) -> Result<Self::Item, Self::Item>; | |
fn fetch_and(&self, value: Self::Item, order: Ordering) -> Self::Item | |
where | |
Self::Item: marker::BitOps; | |
fn fetch_nand(&self, value: Self::Item, order: Ordering) -> Self::Item | |
where | |
Self::Item: marker::BitOps; | |
fn fetch_or(&self, value: Self::Item, order: Ordering) -> Self::Item | |
where | |
Self::Item: marker::BitOps; | |
fn fetch_xor(&self, value: Self::Item, order: Ordering) -> Self::Item | |
where | |
Self::Item: marker::BitOps; | |
fn fetch_add(&self, value: Self::Item, order: Ordering) -> Self::Item | |
where | |
Self::Item: marker::NumericOps; | |
fn fetch_sub(&self, value: Self::Item, order: Ordering) -> Self::Item | |
where | |
Self::Item: marker::NumericOps; | |
} | |
pub mod marker { | |
pub trait BitOps {} | |
pub trait NumericOps: BitOps {} | |
} | |
macro_rules! radium { | |
( atom $base:ty ) => { | |
#[inline] | |
fn new(value: $base) -> Self { | |
Self::new(value) | |
} | |
#[inline] | |
fn fence(order: Ordering) { | |
core::sync::atomic::fence(order); | |
} | |
#[inline] | |
fn get_mut(&mut self) -> &mut $base { | |
self.get_mut() | |
} | |
#[inline] | |
fn into_inner(self) -> $base { | |
self.into_inner() | |
} | |
#[inline] | |
fn load(&self, order: Ordering) -> $base { | |
self.load(order) | |
} | |
#[inline] | |
fn store(&self, value: $base, order: Ordering) { | |
self.store(value, order); | |
} | |
#[inline] | |
fn swap(&self, value: $base, order: Ordering) -> $base { | |
self.swap(value, order) | |
} | |
#[inline] | |
fn compare_and_swap( | |
&self, | |
current: $base, | |
new: $base, | |
order: Ordering, | |
) -> $base { | |
self.compare_and_swap(current, new, order) | |
} | |
#[inline] | |
fn compare_exchange( | |
&self, | |
current: $base, | |
new: $base, | |
success: Ordering, | |
failure: Ordering, | |
) -> Result<$base, $base> { | |
self.compare_exchange(current, new, success, failure) | |
} | |
#[inline] | |
fn compare_exchange_weak( | |
&self, | |
current: $base, | |
new: $base, | |
success: Ordering, | |
failure: Ordering, | |
) -> Result<$base, $base> { | |
self.compare_exchange_weak(current, new, success, failure) | |
} | |
}; | |
( atom_bit $base:ty ) => { | |
#[inline] | |
fn fetch_and(&self, value: $base, order: Ordering) -> $base { | |
self.fetch_and(value, order) | |
} | |
#[inline] | |
fn fetch_nand(&self, value: $base, order: Ordering) -> $base { | |
self.fetch_nand(value, order) | |
} | |
#[inline] | |
fn fetch_or(&self, value: $base, order: Ordering) -> $base { | |
self.fetch_or(value, order) | |
} | |
#[inline] | |
fn fetch_xor(&self, value: $base, order: Ordering) -> $base { | |
self.fetch_xor(value, order) | |
} | |
}; | |
( atom_int $base:ty ) => { | |
#[inline] | |
fn fetch_add(&self, value: $base, order: Ordering) -> $base { | |
self.fetch_add(value, order) | |
} | |
#[inline] | |
fn fetch_sub(&self, value: $base, order: Ordering) -> $base { | |
self.fetch_sub(value, order) | |
} | |
}; | |
( cell $base:ty ) => { | |
#[inline] | |
fn new(value: $base) -> Self { | |
Cell::new(value) | |
} | |
#[inline] | |
fn fence(_: Ordering) {} | |
#[inline] | |
fn get_mut(&mut self) -> &mut $base { | |
self.get_mut() | |
} | |
#[inline] | |
fn into_inner(self) -> $base { | |
self.into_inner() | |
} | |
#[inline] | |
fn load(&self, _: Ordering) -> $base { | |
self.get() | |
} | |
#[inline] | |
fn store(&self, value: $base, _: Ordering) { | |
self.set(value); | |
} | |
#[inline] | |
fn swap(&self, value: $base, _: Ordering) -> $base { | |
self.replace(value) | |
} | |
#[inline] | |
fn compare_and_swap( | |
&self, | |
current: $base, | |
new: $base, | |
_: Ordering, | |
) -> $base { | |
if self.get() == current { | |
self.replace(new) | |
} else { | |
self.get() | |
} | |
} | |
#[inline] | |
fn compare_exchange( | |
&self, | |
current: $base, | |
new: $base, | |
_: Ordering, | |
_: Ordering, | |
) -> Result<$base, $base> { | |
if self.get() == current { | |
Ok(self.replace(new)) | |
} else { | |
Err(self.get()) | |
} | |
} | |
#[inline] | |
fn compare_exchange_weak( | |
&self, | |
current: $base, | |
new: $base, | |
success: Ordering, | |
failure: Ordering, | |
) -> Result<$base, $base> { | |
Radium::compare_exchange(self, current, new, success, failure) | |
} | |
}; | |
( cell_bit $base:ty ) => { | |
#[inline] | |
fn fetch_and(&self, value: $base, _: Ordering) -> $base { | |
self.replace(self.get() & value) | |
} | |
#[inline] | |
fn fetch_nand(&self, value: $base, _: Ordering) -> $base { | |
self.replace(!(self.get() & value)) | |
} | |
#[inline] | |
fn fetch_or(&self, value: $base, _: Ordering) -> $base { | |
self.replace(self.get() | value) | |
} | |
#[inline] | |
fn fetch_xor(&self, value: $base, _: Ordering) -> $base { | |
self.replace(self.get() ^ value) | |
} | |
}; | |
( cell_int $base:ty ) => { | |
#[inline] | |
fn fetch_add(&self, value: $base, _: Ordering) -> $base { | |
self.replace(self.get().wrapping_add(value)) | |
} | |
#[inline] | |
fn fetch_sub(&self, value: $base, _: Ordering) -> $base { | |
self.replace(self.get().wrapping_sub(value)) | |
} | |
}; | |
( int $flag:ident $( $base:ty , $atom:ty ; )* ) => { $( | |
impl marker::BitOps for $base {} | |
impl marker::NumericOps for $base {} | |
#[cfg($flag)] | |
impl Radium for $atom { | |
type Item = $base; | |
radium!(atom $base); | |
radium!(atom_bit $base); | |
radium!(atom_int $base); | |
} | |
impl Radium for Cell<$base> { | |
type Item = $base; | |
radium!(cell $base); | |
radium!(cell_bit $base); | |
radium!(cell_int $base); | |
} | |
)* }; | |
} | |
radium![int radium_atomic_8 i8, AtomicI8; u8, AtomicU8;]; | |
radium![int radium_atomic_16 i16, AtomicI16; u16, AtomicU16;]; | |
radium![int radium_atomic_32 i32, AtomicI32; u32, AtomicU32;]; | |
radium![int radium_atomic_64 i64, AtomicI64; u64, AtomicU64;]; | |
radium![int radium_atomic_ptr isize, AtomicIsize; usize, AtomicUsize;]; | |
impl marker::BitOps for bool {} | |
impl Radium for AtomicBool { | |
type Item = bool; | |
radium!(atom bool); | |
radium!(atom_bit bool); | |
fn fetch_add(&self, _value: bool, _order: Ordering) -> bool { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_sub(&self, _value: bool, _order: Ordering) -> bool { | |
unreachable!("This method statically cannot be called") | |
} | |
} | |
impl Radium for Cell<bool> { | |
type Item = bool; | |
radium!(cell bool); | |
radium!(cell_bit bool); | |
fn fetch_add(&self, _value: bool, _order: Ordering) -> bool { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_sub(&self, _value: bool, _order: Ordering) -> bool { | |
unreachable!("This method statically cannot be called") | |
} | |
} | |
impl<T> Radium for AtomicPtr<T> { | |
type Item = *mut T; | |
radium!(atom *mut T); | |
fn fetch_and(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_nand(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_or(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_xor(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_add(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_sub(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
} | |
impl<T> Radium for Cell<*mut T> { | |
type Item = *mut T; | |
radium!(cell *mut T); | |
fn fetch_and(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_nand(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_or(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_xor(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_add(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
fn fetch_sub(&self, _value: *mut T, _order: Ordering) -> *mut T { | |
unreachable!("This method statically cannot be called") | |
} | |
} | |
} | |
pub mod tap { | |
pub mod pipe { | |
use core::{ | |
borrow::{Borrow, BorrowMut}, | |
ops::{Deref, DerefMut}, | |
}; | |
pub trait Pipe { | |
#[inline(always)] | |
fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> R | |
where | |
Self: Sized, | |
R: Sized, | |
{ | |
func(self) | |
} | |
#[inline(always)] | |
fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> R | |
where | |
R: 'a + Sized, | |
{ | |
func(self) | |
} | |
#[inline(always)] | |
fn pipe_ref_mut<'a, R>( | |
&'a mut self, | |
func: impl FnOnce(&'a mut Self) -> R, | |
) -> R | |
where | |
R: 'a + Sized, | |
{ | |
func(self) | |
} | |
#[inline(always)] | |
fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R | |
where | |
Self: Borrow<B>, | |
B: 'a + ?Sized, | |
R: 'a + Sized, | |
{ | |
func(Borrow::<B>::borrow(self)) | |
} | |
#[inline(always)] | |
fn pipe_borrow_mut<'a, B, R>( | |
&'a mut self, | |
func: impl FnOnce(&'a mut B) -> R, | |
) -> R | |
where | |
Self: BorrowMut<B>, | |
B: 'a + ?Sized, | |
R: 'a + Sized, | |
{ | |
func(BorrowMut::<B>::borrow_mut(self)) | |
} | |
#[inline(always)] | |
fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R | |
where | |
Self: AsRef<U>, | |
U: 'a + ?Sized, | |
R: 'a + Sized, | |
{ | |
func(AsRef::<U>::as_ref(self)) | |
} | |
#[inline(always)] | |
fn pipe_as_mut<'a, U, R>( | |
&'a mut self, | |
func: impl FnOnce(&'a mut U) -> R, | |
) -> R | |
where | |
Self: AsMut<U>, | |
U: 'a + ?Sized, | |
R: 'a + Sized, | |
{ | |
func(AsMut::<U>::as_mut(self)) | |
} | |
#[inline(always)] | |
fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R | |
where | |
Self: Deref<Target = T>, | |
T: 'a + ?Sized, | |
R: 'a + Sized, | |
{ | |
func(Deref::deref(self)) | |
} | |
#[inline(always)] | |
fn pipe_deref_mut<'a, T, R>( | |
&'a mut self, | |
func: impl FnOnce(&'a mut T) -> R, | |
) -> R | |
where | |
Self: DerefMut + Deref<Target = T>, | |
T: 'a + ?Sized, | |
R: 'a + Sized, | |
{ | |
func(DerefMut::deref_mut(self)) | |
} | |
} | |
impl<T> Pipe for T where T: ?Sized {} | |
} | |
pub mod tap { | |
use core::{ | |
borrow::{Borrow, BorrowMut}, | |
ops::{Deref, DerefMut}, | |
}; | |
pub trait Tap | |
where | |
Self: Sized, | |
{ | |
#[inline(always)] | |
fn tap(self, func: impl FnOnce(&Self)) -> Self { | |
func(&self); | |
self | |
} | |
#[inline(always)] | |
fn tap_mut(mut self, func: impl FnOnce(&mut Self)) -> Self { | |
func(&mut self); | |
self | |
} | |
#[inline(always)] | |
fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self | |
where | |
Self: Borrow<B>, | |
B: ?Sized, | |
{ | |
func(Borrow::<B>::borrow(&self)); | |
self | |
} | |
#[inline(always)] | |
fn tap_borrow_mut<B>(mut self, func: impl FnOnce(&mut B)) -> Self | |
where | |
Self: BorrowMut<B>, | |
B: ?Sized, | |
{ | |
func(BorrowMut::<B>::borrow_mut(&mut self)); | |
self | |
} | |
#[inline(always)] | |
fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self | |
where | |
Self: AsRef<R>, | |
R: ?Sized, | |
{ | |
func(AsRef::<R>::as_ref(&self)); | |
self | |
} | |
#[inline(always)] | |
fn tap_ref_mut<R>(mut self, func: impl FnOnce(&mut R)) -> Self | |
where | |
Self: AsMut<R>, | |
R: ?Sized, | |
{ | |
func(AsMut::<R>::as_mut(&mut self)); | |
self | |
} | |
#[inline(always)] | |
fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self | |
where | |
Self: Deref<Target = T>, | |
T: ?Sized, | |
{ | |
func(Deref::deref(&self)); | |
self | |
} | |
#[inline(always)] | |
fn tap_deref_mut<T>(mut self, func: impl FnOnce(&mut T)) -> Self | |
where | |
Self: DerefMut + Deref<Target = T>, | |
T: ?Sized, | |
{ | |
func(DerefMut::deref_mut(&mut self)); | |
self | |
} | |
#[inline(always)] | |
fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self { | |
if cfg!(debug_assertions) { | |
func(&self); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_mut_dbg(mut self, func: impl FnOnce(&mut Self)) -> Self { | |
if cfg!(debug_assertions) { | |
func(&mut self); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self | |
where | |
Self: Borrow<B>, | |
B: ?Sized, | |
{ | |
if cfg!(debug_assertions) { | |
func(Borrow::<B>::borrow(&self)); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_borrow_mut_dbg<B>(mut self, func: impl FnOnce(&mut B)) -> Self | |
where | |
Self: BorrowMut<B>, | |
B: ?Sized, | |
{ | |
if cfg!(debug_assertions) { | |
func(BorrowMut::<B>::borrow_mut(&mut self)); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self | |
where | |
Self: AsRef<R>, | |
R: ?Sized, | |
{ | |
if cfg!(debug_assertions) { | |
func(AsRef::<R>::as_ref(&self)); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_ref_mut_dbg<R>(mut self, func: impl FnOnce(&mut R)) -> Self | |
where | |
Self: AsMut<R>, | |
R: ?Sized, | |
{ | |
if cfg!(debug_assertions) { | |
func(AsMut::<R>::as_mut(&mut self)); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self | |
where | |
Self: Deref<Target = T>, | |
T: ?Sized, | |
{ | |
if cfg!(debug_assertions) { | |
func(Deref::deref(&self)); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_deref_mut_dbg<T>(mut self, func: impl FnOnce(&mut T)) -> Self | |
where | |
Self: DerefMut + Deref<Target = T>, | |
T: ?Sized, | |
{ | |
if cfg!(debug_assertions) { | |
func(DerefMut::deref_mut(&mut self)); | |
} | |
self | |
} | |
} | |
impl<T> Tap for T where T: Sized {} | |
pub trait TapOptional | |
where | |
Self: Sized, | |
{ | |
type Val: ?Sized; | |
fn tap_some(self, func: impl FnOnce(&Self::Val)) -> Self; | |
fn tap_some_mut(self, func: impl FnOnce(&mut Self::Val)) -> Self; | |
fn tap_none(self, func: impl FnOnce()) -> Self; | |
#[inline(always)] | |
fn tap_some_dbg(self, func: impl FnOnce(&Self::Val)) -> Self { | |
if cfg!(debug_assertions) { | |
self.tap_some(func) | |
} else { | |
self | |
} | |
} | |
#[inline(always)] | |
fn tap_some_mut_dbg(self, func: impl FnOnce(&mut Self::Val)) -> Self { | |
if cfg!(debug_assertions) { | |
self.tap_some_mut(func) | |
} else { | |
self | |
} | |
} | |
#[inline(always)] | |
fn tap_none_dbg(self, func: impl FnOnce()) -> Self { | |
if cfg!(debug_assertions) { | |
self.tap_none(func) | |
} else { | |
self | |
} | |
} | |
} | |
impl<T> TapOptional for Option<T> { | |
type Val = T; | |
#[inline(always)] | |
fn tap_some(self, func: impl FnOnce(&T)) -> Self { | |
if let Some(ref val) = self { | |
func(val); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_some_mut(mut self, func: impl FnOnce(&mut T)) -> Self { | |
if let Some(ref mut val) = self { | |
func(val); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_none(self, func: impl FnOnce()) -> Self { | |
if self.is_none() { | |
func(); | |
} | |
self | |
} | |
} | |
pub trait TapFallible | |
where | |
Self: Sized, | |
{ | |
type Ok: ?Sized; | |
type Err: ?Sized; | |
fn tap_ok(self, func: impl FnOnce(&Self::Ok)) -> Self; | |
fn tap_ok_mut(self, func: impl FnOnce(&mut Self::Ok)) -> Self; | |
fn tap_err(self, func: impl FnOnce(&Self::Err)) -> Self; | |
fn tap_err_mut(self, func: impl FnOnce(&mut Self::Err)) -> Self; | |
#[inline(always)] | |
fn tap_ok_dbg(self, func: impl FnOnce(&Self::Ok)) -> Self { | |
if cfg!(debug_assertions) { | |
self.tap_ok(func) | |
} else { | |
self | |
} | |
} | |
#[inline(always)] | |
fn tap_ok_mut_dbg(self, func: impl FnOnce(&mut Self::Ok)) -> Self { | |
if cfg!(debug_assertions) { | |
self.tap_ok_mut(func) | |
} else { | |
self | |
} | |
} | |
#[inline(always)] | |
fn tap_err_dbg(self, func: impl FnOnce(&Self::Err)) -> Self { | |
if cfg!(debug_assertions) { | |
self.tap_err(func) | |
} else { | |
self | |
} | |
} | |
#[inline(always)] | |
fn tap_err_mut_dbg(self, func: impl FnOnce(&mut Self::Err)) -> Self { | |
if cfg!(debug_assertions) { | |
self.tap_err_mut(func) | |
} else { | |
self | |
} | |
} | |
} | |
impl<T, E> TapFallible for Result<T, E> { | |
type Ok = T; | |
type Err = E; | |
#[inline(always)] | |
fn tap_ok(self, func: impl FnOnce(&T)) -> Self { | |
if let Ok(ref val) = self { | |
func(val); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_ok_mut(mut self, func: impl FnOnce(&mut T)) -> Self { | |
if let Ok(ref mut val) = self { | |
func(val); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_err(self, func: impl FnOnce(&E)) -> Self { | |
if let Err(ref val) = self { | |
func(val); | |
} | |
self | |
} | |
#[inline(always)] | |
fn tap_err_mut(mut self, func: impl FnOnce(&mut E)) -> Self { | |
if let Err(ref mut val) = self { | |
func(val); | |
} | |
self | |
} | |
} | |
} | |
} | |
pub mod wyz { | |
pub mod fmt { | |
use core::{ | |
fmt::{ | |
self, | |
Binary, | |
Debug, | |
Display, | |
Formatter, | |
LowerExp, | |
LowerHex, | |
Octal, | |
Pointer, | |
UpperExp, | |
UpperHex, | |
}, | |
ops::{ | |
Deref, | |
DerefMut, | |
}, | |
}; | |
pub trait FmtForward: Sized { | |
fn fmt_binary(self) -> FmtBinary<Self> | |
where Self: Binary { | |
FmtBinary(self) | |
} | |
fn fmt_display(self) -> FmtDisplay<Self> | |
where Self: Display { | |
FmtDisplay(self) | |
} | |
fn fmt_lower_exp(self) -> FmtLowerExp<Self> | |
where Self: LowerExp { | |
FmtLowerExp(self) | |
} | |
fn fmt_lower_hex(self) -> FmtLowerHex<Self> | |
where Self: LowerHex { | |
FmtLowerHex(self) | |
} | |
fn fmt_octal(self) -> FmtOctal<Self> | |
where Self: Octal { | |
FmtOctal(self) | |
} | |
fn fmt_pointer(self) -> FmtPointer<Self> | |
where Self: Pointer { | |
FmtPointer(self) | |
} | |
fn fmt_upper_exp(self) -> FmtUpperExp<Self> | |
where Self: UpperExp { | |
FmtUpperExp(self) | |
} | |
fn fmt_upper_hex(self) -> FmtUpperHex<Self> | |
where Self: UpperHex { | |
FmtUpperHex(self) | |
} | |
} | |
impl<T: Sized> FmtForward for T { | |
} | |
#[repr(transparent)] | |
pub struct FmtBinary<T: Binary>(pub T); | |
#[repr(transparent)] | |
pub struct FmtDisplay<T: Display>(pub T); | |
#[repr(transparent)] | |
pub struct FmtLowerExp<T: LowerExp>(pub T); | |
#[repr(transparent)] | |
pub struct FmtLowerHex<T: LowerHex>(pub T); | |
#[repr(transparent)] | |
pub struct FmtOctal<T: Octal>(pub T); | |
#[repr(transparent)] | |
pub struct FmtPointer<T: Pointer>(pub T); | |
#[repr(transparent)] | |
pub struct FmtUpperExp<T: UpperExp>(pub T); | |
#[repr(transparent)] | |
pub struct FmtUpperHex<T: UpperHex>(pub T); | |
macro_rules! fmt { | |
($($w:ty => $t:ident),* $(,)?) => { $( | |
impl<T: $t + Binary> Binary for $w { | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Binary::fmt(&self.0, fmt) | |
} | |
} | |
impl<T: $t> Debug for $w { | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
<T as $t>::fmt(&self.0, fmt) | |
} | |
} | |
impl<T: $t + Display> Display for $w { | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Display::fmt(&self.0, fmt) | |
} | |
} | |
impl<T: $t + LowerExp> LowerExp for $w { | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
LowerExp::fmt(&self.0, fmt) | |
} | |
} | |
impl<T: $t + LowerHex> LowerHex for $w { | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
LowerHex::fmt(&self.0, fmt) | |
} | |
} | |
impl<T: $t + Octal> Octal for $w { | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Octal::fmt(&self.0, fmt) | |
} | |
} | |
impl<T: $t + Pointer> Pointer for $w { | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
Pointer::fmt(&self.0, fmt) | |
} | |
} | |
impl<T: $t + UpperExp> UpperExp for $w { | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
UpperExp::fmt(&self.0, fmt) | |
} | |
} | |
impl<T: $t + UpperHex> UpperHex for $w { | |
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { | |
UpperHex::fmt(&self.0, fmt) | |
} | |
} | |
impl<T: $t> Deref for $w { | |
type Target = T; | |
fn deref(&self) -> &Self::Target { | |
&self.0 | |
} | |
} | |
impl<T: $t> DerefMut for $w { | |
fn deref_mut(&mut self) -> &mut Self::Target { | |
&mut self.0 | |
} | |
} | |
impl<T: $t> AsRef<T> for $w { | |
fn as_ref(&self) -> &T { | |
&self.0 | |
} | |
} | |
impl<T: $t> AsMut<T> for $w { | |
fn as_mut(&mut self) -> &mut T { | |
&mut self.0 | |
} | |
} | |
)* }; | |
} | |
fmt!( | |
FmtBinary<T> => Binary, | |
FmtDisplay<T> => Display, | |
FmtLowerExp<T> => LowerExp, | |
FmtLowerHex<T> => LowerHex, | |
FmtOctal<T> => Octal, | |
FmtPointer<T> => Pointer, | |
FmtUpperExp<T> => UpperExp, | |
FmtUpperHex<T> => UpperHex, | |
); | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment