diff --git a/iblt/Cargo.toml b/iblt/Cargo.toml index 41232812d..c9cf42ad7 100644 --- a/iblt/Cargo.toml +++ b/iblt/Cargo.toml @@ -13,26 +13,3 @@ panic = 'abort' [dependencies] crc32fast = "^1" -zerocopy = { version = "0.6.1", features = ["alloc"] } - -[dev-dependencies] -rand = ">=0" -criterion = ">=0" - -[lib] - -[[bench]] -name = "to_from_bytes" -harness = false - -[[bench]] -name = "clone" -harness = false - -[[bench]] -name = "list" -harness = false - -[[bench]] -name = "merge" -harness = false diff --git a/iblt/benches/clone.rs b/iblt/benches/clone.rs deleted file mode 100644 index 0199de2e2..000000000 --- a/iblt/benches/clone.rs +++ /dev/null @@ -1,19 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; -use iblt::IBLT; - -const CAPACITY: usize = 4096; -type OurIBLT = IBLT<[u8; 32], CAPACITY, 3>; - -pub fn criterion_benchmark(c: &mut Criterion) { - let mut iblt = OurIBLT::new(); - for _ in 0..CAPACITY { - let mut v = [0u8; 32]; - v.fill_with(rand::random); - iblt.insert(&v); - } - - c.bench_function("clone", |b| b.iter(|| iblt.clone())); -} - -criterion_group!(benches, criterion_benchmark); -criterion_main!(benches); diff --git a/iblt/benches/list.rs b/iblt/benches/list.rs deleted file mode 100644 index 9987ab3df..000000000 --- a/iblt/benches/list.rs +++ /dev/null @@ -1,39 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; -use iblt::IBLT; - -const CAPACITY: usize = 4096; -type IBLT32 = IBLT<[u8; 32], CAPACITY, 3>; -type IBLT16 = IBLT<[u8; 16], CAPACITY, 3>; -type IBLT8 = IBLT<[u8; 8], CAPACITY, 3>; - -pub fn criterion_benchmark(c: &mut Criterion) { - let mut iblt = IBLT32::new(); - for _ in 0..CAPACITY { - let mut v = [0u8; 32]; - v.fill_with(rand::random); - iblt.insert(&v); - } - - c.bench_function("list 32", |b| b.iter(|| iblt.list(|_, _| {}))); - - let mut iblt = IBLT16::new(); - for _ in 0..CAPACITY { - let mut v = [0u8; 16]; - v.fill_with(rand::random); - iblt.insert(&v); - } - - c.bench_function("list 16", |b| b.iter(|| iblt.list(|_, _| {}))); - - let mut iblt = IBLT8::new(); - for _ in 0..CAPACITY { - let mut v = [0u8; 8]; - v.fill_with(rand::random); - iblt.insert(&v); - } - - c.bench_function("list 8", |b| b.iter(|| iblt.list(|_, _| {}))); -} - -criterion_group!(benches, criterion_benchmark); -criterion_main!(benches); diff --git a/iblt/benches/merge.rs b/iblt/benches/merge.rs deleted file mode 100644 index 05d994562..000000000 --- a/iblt/benches/merge.rs +++ /dev/null @@ -1,78 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; -use iblt::IBLT; - -const CAPACITY: usize = 4096; -type IBLT32 = IBLT<[u8; 32], CAPACITY, 3>; -type IBLT16 = IBLT<[u8; 16], CAPACITY, 3>; -type IBLT8 = IBLT<[u8; 8], CAPACITY, 3>; - -pub fn criterion_benchmark(c: &mut Criterion) { - let mut orig = IBLT32::new(); - let mut new = IBLT32::new(); - for _ in 0..CAPACITY { - let mut v = [0u8; 32]; - v.fill_with(rand::random); - orig.insert(&v); - } - - for _ in 0..CAPACITY { - let mut v = [0u8; 32]; - v.fill_with(rand::random); - new.insert(&v); - } - - c.bench_function("merge 32", |b| { - b.iter(|| { - let mut new2 = new.clone(); - orig.subtract(&new); - new2.subtract(&orig); - }) - }); - - let mut orig = IBLT16::new(); - let mut new = IBLT16::new(); - for _ in 0..CAPACITY { - let mut v = [0u8; 16]; - v.fill_with(rand::random); - orig.insert(&v); - } - - for _ in 0..CAPACITY { - let mut v = [0u8; 16]; - v.fill_with(rand::random); - new.insert(&v); - } - - c.bench_function("merge 16", |b| { - b.iter(|| { - let mut new2 = new.clone(); - orig.subtract(&new); - new2.subtract(&orig); - }) - }); - - let mut orig = IBLT8::new(); - let mut new = IBLT8::new(); - for _ in 0..CAPACITY { - let mut v = [0u8; 8]; - v.fill_with(rand::random); - orig.insert(&v); - } - - for _ in 0..CAPACITY { - let mut v = [0u8; 8]; - v.fill_with(rand::random); - new.insert(&v); - } - - c.bench_function("merge 8", |b| { - b.iter(|| { - let mut new2 = new.clone(); - orig.subtract(&new); - new2.subtract(&orig); - }) - }); -} - -criterion_group!(benches, criterion_benchmark); -criterion_main!(benches); diff --git a/iblt/benches/to_from_bytes.rs b/iblt/benches/to_from_bytes.rs deleted file mode 100644 index ede76d504..000000000 --- a/iblt/benches/to_from_bytes.rs +++ /dev/null @@ -1,19 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; -use iblt::IBLT; - -const CAPACITY: usize = 4096; -type OurIBLT = IBLT<[u8; 32], CAPACITY, 3>; - -pub fn criterion_benchmark(c: &mut Criterion) { - let mut iblt = OurIBLT::new(); - for _ in 0..CAPACITY { - let mut v = [0u8; 32]; - v.fill_with(rand::random); - iblt.insert(&v); - } - - c.bench_function("to_from_bytes", |b| b.iter(|| OurIBLT::from_bytes(iblt.as_bytes()))); -} - -criterion_group!(benches, criterion_benchmark); -criterion_main!(benches); diff --git a/iblt/src/lib.rs b/iblt/src/lib.rs index a420fd3b1..2cd0954cb 100644 --- a/iblt/src/lib.rs +++ b/iblt/src/lib.rs @@ -6,41 +6,7 @@ * https://www.zerotier.com/ */ -use zerocopy::{AsBytes, FromBytes}; - -#[cfg(not(any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64", target_arch = "powerpc64")))] -#[inline(always)] -fn xor_with(x: &mut T, y: &T) -where - T: FromBytes + AsBytes + Sized, -{ - x.as_bytes_mut().iter_mut().zip(y.as_bytes().iter()).for_each(|(a, b)| *a ^= *b); -} - -#[cfg(any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64", target_arch = "powerpc64"))] -#[inline(always)] -fn xor_with(x: &mut T, y: &T) -where - T: FromBytes + AsBytes + Sized, -{ - let size = std::mem::size_of::(); - - if size >= 16 { - for i in 0..(size / 16) { - unsafe { *x.as_bytes_mut().as_mut_ptr().cast::().add(i) ^= *y.as_bytes().as_ptr().cast::().add(i) }; - } - for i in (size - (size % 16))..size { - unsafe { *x.as_bytes_mut().as_mut_ptr().add(i) ^= *y.as_bytes().as_ptr().add(i) }; - } - } else { - for i in 0..(size / 8) { - unsafe { *x.as_bytes_mut().as_mut_ptr().cast::().add(i) ^= *y.as_bytes().as_ptr().cast::().add(i) }; - } - for i in (size - (size % 8))..size { - unsafe { *x.as_bytes_mut().as_mut_ptr().add(i) ^= *y.as_bytes().as_ptr().add(i) }; - } - } -} +use std::borrow::Cow; #[inline(always)] fn murmurhash32_mix32(mut x: u32) -> u32 { @@ -52,6 +18,13 @@ fn murmurhash32_mix32(mut x: u32) -> u32 { x } +#[inline(always)] +fn xor_with(x: &mut [u8; L], y: &[u8; L]) { + for i in 0..L { + x[i] ^= y[i]; + } +} + /// An Invertible Bloom Lookup Table for set reconciliation. /// /// Usage inspired by this paper: @@ -69,38 +42,31 @@ fn murmurhash32_mix32(mut x: u32) -> u32 { /// /// The best value for HASHES seems to be 3 for an optimal fill of 75%. #[repr(C)] -pub struct IBLT -where - T: FromBytes + AsBytes + Default + Sized + Clone, -{ - check_hash: Box>, - count: Box>, - key: Box>, +pub struct IBLT { + check_hash: [u32; BUCKETS], + count: [i8; BUCKETS], + key: [[u8; ITEM_BYTES]; BUCKETS], } -impl Clone for IBLT -where - T: FromBytes + AsBytes + Default + Sized + Clone, -{ +impl Clone for IBLT { + #[inline(always)] fn clone(&self) -> Self { + // Intentionally designed to work on any platform even if unaligned. The default way + // of implementing clone() may not since check_hash[] is u32. unsafe { - let mut tmp = Self::new(); - std::ptr::copy_nonoverlapping(self.check_hash.as_ptr(), tmp.check_hash.as_mut_ptr(), BUCKETS); - std::ptr::copy_nonoverlapping(self.count.as_ptr(), tmp.count.as_mut_ptr(), BUCKETS); - std::ptr::copy_nonoverlapping(self.key.as_ptr(), tmp.key.as_mut_ptr(), BUCKETS); + let mut tmp: Self = std::mem::MaybeUninit::uninit().assume_init(); + std::ptr::copy_nonoverlapping((self as *const Self).cast::(), (&mut tmp as *mut Self).cast::(), Self::SIZE_BYTES); tmp } } } -impl IBLT -where - T: FromBytes + AsBytes + Default + Sized + Clone, -{ +impl IBLT { /// Number of bytes each bucket consumes (not congituously, but doesn't matter). - const BUCKET_SIZE_BYTES: usize = std::mem::size_of::() + 4 + 1; + const BUCKET_SIZE_BYTES: usize = ITEM_BYTES + 4 + 1; /// Number of buckets in this IBLT. + #[allow(unused)] pub const BUCKETS: usize = BUCKETS; /// Size of this IBLT in bytes. @@ -109,34 +75,24 @@ where /// Create a new zeroed IBLT. #[inline(always)] pub fn new() -> Self { - assert!(BUCKETS < (i32::MAX as usize)); - - let mut s = Self { - check_hash: Box::new(Vec::with_capacity(BUCKETS)), - count: Box::new(Vec::with_capacity(BUCKETS)), - key: Box::new(Vec::with_capacity(BUCKETS)), - }; - - s.reset(); - s + assert!(Self::SIZE_BYTES <= std::mem::size_of::()); + unsafe { std::mem::zeroed() } } + /// Create a new zeroed IBLT on the heap. + /// + /// This is useful to create and use IBLT instances too large to fit on the stack. #[inline(always)] - pub fn as_bytes(&self) -> Box> { - let check_hash_len = BUCKETS * 4; - let t_len = BUCKETS * std::mem::size_of::(); - let len = check_hash_len + BUCKETS + t_len; + pub fn new_boxed() -> Box { + assert!(Self::SIZE_BYTES <= std::mem::size_of::()); + unsafe { Box::from_raw(std::alloc::alloc_zeroed(std::alloc::Layout::new::()).cast()) } + } - let mut buf = Box::new(Vec::with_capacity(len)); - buf.resize(len, 0); - - let byt = buf.as_bytes_mut(); - - byt[0..check_hash_len].copy_from_slice(self.check_hash.as_bytes()); - byt[check_hash_len..BUCKETS + check_hash_len].copy_from_slice(self.count.as_bytes()); - byt[len - t_len..len].copy_from_slice(self.key.as_bytes()); - - buf + /// Get this IBLT as a byte slice (free cast operation). + /// The returned slice is always SIZE_BYTES in length. + #[inline(always)] + pub fn as_bytes(&self) -> &[u8] { + unsafe { &*std::ptr::slice_from_raw_parts((self as *const Self).cast::(), Self::SIZE_BYTES) } } /// Obtain an IBLT from bytes in memory. @@ -145,33 +101,21 @@ where /// Cow to 'b' that is just a cast. If re-alignment is necessary it returns an owned Cow containing a properly /// aligned copy. This makes conversion a nearly free cast when alignment adjustment isn't needed. #[inline(always)] - pub fn from_bytes(b: Box>) -> Option { + pub fn from_bytes<'a>(b: &'a [u8]) -> Option> { if b.len() == Self::SIZE_BYTES { - // FIXME I commented this out because I do not have access to the architectures needed. - // #[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "powerpc64", target_arch = "aarch64")))] - // { - // if b.as_ptr().align_offset(8) == 0 { - // Some(Cow::Borrowed(unsafe { &*b.as_ptr().cast() })) - // } else { - // // NOTE: clone() is implemented above using a raw copy so that alignment doesn't matter. - // Some(Cow::Owned(unsafe { &*b.as_ptr().cast::() }.clone())) - // } - // } #[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "powerpc64", target_arch = "aarch64"))] { - let mut tmp = Self::new(); + Some(Cow::Borrowed(unsafe { &*b.as_ptr().cast() })) + } - let mut i = 0; - - tmp.check_hash.as_bytes_mut().copy_from_slice(&b[0..BUCKETS * 4]); - i += BUCKETS * 4; - - tmp.count.as_bytes_mut().copy_from_slice(&b[i..i + BUCKETS]); - i += BUCKETS; - - tmp.key.as_bytes_mut().copy_from_slice(&b[i..i + std::mem::size_of::() * BUCKETS]); - - Some(tmp) + #[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "powerpc64", target_arch = "aarch64")))] + { + if b.as_ptr().align_offset(4) == 0 { + Some(Cow::Borrowed(unsafe { &*b.as_ptr().cast() })) + } else { + // NOTE: clone() is implemented above using a raw copy so that alignment doesn't matter. + Some(Cow::Owned(unsafe { &*b.as_ptr().cast::() }.clone())) + } } } else { None @@ -181,45 +125,42 @@ where /// Zero this IBLT. #[inline(always)] pub fn reset(&mut self) { - self.check_hash.clear(); - self.count.clear(); - self.key.clear(); - self.check_hash.resize(BUCKETS, 0); - self.count.resize(BUCKETS, 0); - self.key.resize(BUCKETS, Default::default()); + unsafe { std::ptr::write_bytes((self as *mut Self).cast::(), 0, std::mem::size_of::()) }; } - pub(crate) fn ins_rem(&mut self, key: &T, delta: i8) { - let check_hash = crc32fast::hash(key.as_bytes()); + pub(crate) fn ins_rem(&mut self, key: &[u8; ITEM_BYTES], delta: i8) { + let check_hash = crc32fast::hash(key); let mut iteration_index = u32::from_le(check_hash).wrapping_add(1); for _ in 0..(HASHES as u64) { iteration_index = murmurhash32_mix32(iteration_index); let i = (iteration_index as usize) % BUCKETS; self.check_hash[i] ^= check_hash; self.count[i] = self.count[i].wrapping_add(delta); - xor_with(&mut self.key[i], &key); + xor_with(&mut self.key[i], key); } } /// Insert a set item into this set. /// This will panic if the slice is smaller than ITEM_BYTES. #[inline(always)] - pub fn insert(&mut self, key: &T) { - self.ins_rem(key, 1); + pub fn insert(&mut self, key: &[u8]) { + assert!(key.len() >= ITEM_BYTES); + self.ins_rem(unsafe { &*key.as_ptr().cast() }, 1); } /// Insert a set item into this set. /// This will panic if the slice is smaller than ITEM_BYTES. #[inline(always)] - pub fn remove(&mut self, key: &T) { - self.ins_rem(key, -1); + pub fn remove(&mut self, key: &[u8]) { + assert!(key.len() >= ITEM_BYTES); + self.ins_rem(unsafe { &*key.as_ptr().cast() }, -1); } /// Subtract another IBLT from this one to get a set difference. pub fn subtract(&mut self, other: &Self) { self.check_hash.iter_mut().zip(other.check_hash.iter()).for_each(|(a, b)| *a ^= *b); self.count.iter_mut().zip(other.count.iter()).for_each(|(a, b)| *a = a.wrapping_sub(*b)); - self.key.iter_mut().zip(other.key.iter()).for_each(|(a, b)| xor_with(a, &b)); + self.key.iter_mut().zip(other.key.iter()).for_each(|(a, b)| xor_with(a, b)); } /// List as many entries in this IBLT as can be extracted. @@ -236,12 +177,13 @@ where /// Due to the small check hash sizes used in this IBLT there is a very small chance this will list /// bogus items that were never added. This is not an issue with this protocol as it would just result /// in an unsatisfied record request. - pub fn list(&mut self, mut f: F) -> bool { - let mut queue: Box> = Box::new(Vec::with_capacity(BUCKETS)); + pub fn list(mut self, mut f: F) -> bool { + assert!(BUCKETS <= (u32::MAX as usize)); + let mut queue: Vec = Vec::with_capacity(BUCKETS); for i in 0..BUCKETS { let count = self.count[i]; - if (count == 1 || count == -1) && crc32fast::hash(&self.key[i].as_bytes()) == self.check_hash[i] { + if (count == 1 || count == -1) && crc32fast::hash(&self.key[i]) == self.check_hash[i] { queue.push(i as u32); } } @@ -257,7 +199,7 @@ where let check_hash = self.check_hash[i]; let count = self.count[i]; let key = &self.key[i]; - if (count == 1 || count == -1) && check_hash == crc32fast::hash(key.as_bytes()) { + if (count == 1 || count == -1) && check_hash == crc32fast::hash(key) { let key = key.clone(); let mut iteration_index = u32::from_le(check_hash).wrapping_add(1); @@ -270,8 +212,8 @@ where self.check_hash[i2] = check_hash2; self.count[i2] = count2; xor_with(key2, &key); - if (count2 == 1 || count2 == -1) && check_hash2 == crc32fast::hash(key2.as_bytes()) { - if queue.len() > BUCKETS { + if (count2 == 1 || count2 == -1) && check_hash2 == crc32fast::hash(key2) { + if queue.len() >= BUCKETS { // sanity check, should be impossible break 'list_main; } @@ -287,17 +229,14 @@ where } } -impl PartialEq for IBLT -where - T: AsBytes + FromBytes + Default + Clone, -{ +impl PartialEq for IBLT { #[inline(always)] fn eq(&self, other: &Self) -> bool { - self.as_bytes().eq(&other.as_bytes()) + self.as_bytes().eq(other.as_bytes()) } } -impl Eq for IBLT where T: AsBytes + FromBytes + Default + Clone {} +impl Eq for IBLT {} #[cfg(test)] mod tests { @@ -317,68 +256,31 @@ mod tests { const HASHES: usize = 3; - fn check_xor_with2() { - let with = [17_u8; L]; - let mut expected = [69_u8; L]; - let mut actual = [69_u8; L]; - expected.iter_mut().zip(with.iter()).for_each(|(a, b)| *a ^= *b); - xor_with(&mut actual, &with); - assert!(actual.eq(&expected)); - } - - fn typical_iblt() -> IBLT<[u8; 32], 16, 3> { + #[test] + fn struct_packing() { // Typical case - let mut tmp = IBLT::<[u8; 32], 16, 3>::new(); + let mut tmp = IBLT::<64, 16, 3>::new(); tmp.check_hash.fill(0x01010101); tmp.count.fill(1); tmp.key.iter_mut().for_each(|x| x.fill(1)); - tmp - } - - #[test] - fn check_xor_with() { - check_xor_with2::<128>(); - check_xor_with2::<65>(); - check_xor_with2::<64>(); - check_xor_with2::<63>(); - check_xor_with2::<33>(); - check_xor_with2::<32>(); - check_xor_with2::<31>(); - check_xor_with2::<17>(); - check_xor_with2::<16>(); - check_xor_with2::<15>(); - check_xor_with2::<9>(); - check_xor_with2::<8>(); - check_xor_with2::<7>(); - check_xor_with2::<6>(); - check_xor_with2::<5>(); - check_xor_with2::<4>(); - check_xor_with2::<3>(); - check_xor_with2::<2>(); - check_xor_with2::<1>(); - } - - #[test] - fn struct_packing() { - let tmp = typical_iblt(); assert!(tmp.as_bytes().iter().all(|x| *x == 1)); // Pathological alignment case #1 - let mut tmp = IBLT::<[u8; 17], 13, 3>::new(); + let mut tmp = IBLT::<17, 13, 3>::new(); tmp.check_hash.fill(0x01010101); tmp.count.fill(1); tmp.key.iter_mut().for_each(|x| x.fill(1)); assert!(tmp.as_bytes().iter().all(|x| *x == 1)); // Pathological alignment case #2 - let mut tmp = IBLT::<[u8; 17], 8, 3>::new(); + let mut tmp = IBLT::<17, 8, 3>::new(); tmp.check_hash.fill(0x01010101); tmp.count.fill(1); tmp.key.iter_mut().for_each(|x| x.fill(1)); assert!(tmp.as_bytes().iter().all(|x| *x == 1)); // Pathological alignment case #3 - let mut tmp = IBLT::<[u8; 16], 7, 3>::new(); + let mut tmp = IBLT::<16, 7, 3>::new(); tmp.check_hash.fill(0x01010101); tmp.count.fill(1); tmp.key.iter_mut().for_each(|x| x.fill(1)); @@ -387,13 +289,12 @@ mod tests { #[test] fn fill_list_performance() { - const LENGTH: usize = 16; const CAPACITY: usize = 4096; let mut rn: u128 = 0xd3b07384d113edec49eaa6238ad5ff00; - let mut expected: HashSet = HashSet::with_capacity(CAPACITY); - let mut count = LENGTH; + let mut expected: HashSet = HashSet::with_capacity(4096); + let mut count = 64; while count <= CAPACITY { - let mut test = IBLT::<[u8; LENGTH], CAPACITY, HASHES>::new(); + let mut test = IBLT::::new_boxed(); expected.clear(); for _ in 0..count { @@ -410,16 +311,15 @@ mod tests { }); println!("inserted: {}\tlisted: {}\tcapacity: {}\tscore: {:.4}\tfill: {:.4}", count, list_count, CAPACITY, (list_count as f64) / (count as f64), (count as f64) / (CAPACITY as f64)); - count += LENGTH; + count += 32; } } #[test] fn merge_sets() { - const CAPACITY: usize = 4096; // previously 16384; + const CAPACITY: usize = 16384; const REMOTE_SIZE: usize = 1024 * 1024 * 2; const STEP: usize = 1024; - const LENGTH: usize = 16; let mut rn: u128 = 0xd3b07384d113edec49eaa6238ad5ff00; let mut missing_count = 1024; let mut missing: HashSet = HashSet::with_capacity(CAPACITY * 2); @@ -427,8 +327,8 @@ mod tests { while missing_count <= CAPACITY { missing.clear(); all.clear(); - let mut local = IBLT::<[u8; LENGTH], CAPACITY, HASHES>::new(); - let mut remote = IBLT::<[u8; LENGTH], CAPACITY, HASHES>::new(); + let mut local = IBLT::::new_boxed(); + let mut remote = IBLT::::new_boxed(); let mut k = 0; while k < REMOTE_SIZE { @@ -463,87 +363,4 @@ mod tests { missing_count += STEP; } } - - #[derive(Eq, PartialEq, Clone, AsBytes, FromBytes, Debug)] - #[repr(C)] - struct TestType { - thing: [u8; 256], - other_thing: [u8; 32], - } - - impl Default for TestType { - fn default() -> Self { - Self::zeroed() - } - } - - impl TestType { - pub fn zeroed() -> Self { - unsafe { std::mem::zeroed() } - } - - pub fn new() -> Self { - let mut newtype = Self::zeroed(); - newtype.thing.fill_with(|| rand::random()); - newtype.other_thing.fill_with(|| rand::random()); - newtype - } - } - - #[test] - fn test_polymorphism() { - const CAPACITY: usize = 4096; - let mut full = Box::new(IBLT::::new()); - let mut zero = Box::new(IBLT::::new()); - - for _ in 0..CAPACITY { - zero.insert(&TestType::zeroed()); - full.insert(&TestType::new()); - } - - zero.subtract(&full); - - zero.list(|item, new| { - if new { - assert_eq!(item, TestType::zeroed()); - } else { - assert_ne!(item, TestType::zeroed()); - } - }); - - zero.reset(); - full.reset(); - - for _ in 0..CAPACITY { - zero.insert(&TestType::zeroed()); - full.insert(&TestType::new()); - } - - full.subtract(&zero); - full.list(|item, new| { - if new { - assert_ne!(item, TestType::zeroed()); - } else { - assert_eq!(item, TestType::zeroed()); - } - }); - } - - #[test] - fn test_to_from_bytes() { - let tmp = typical_iblt(); - let mut tmp2 = IBLT::<[u8; 32], 16, 3>::from_bytes(tmp.as_bytes()).unwrap(); - - tmp2.subtract(&tmp); - tmp2.list(|_, new| assert!(!new)); - } - - #[test] - fn test_clone() { - let tmp = typical_iblt(); - let mut tmp2 = tmp.clone(); - - tmp2.subtract(&tmp); - tmp2.list(|_, new| assert!(!new)); - } }