diff --git a/iblt/Cargo.toml b/iblt/Cargo.toml index 82cce58ff..41232812d 100644 --- a/iblt/Cargo.toml +++ b/iblt/Cargo.toml @@ -13,5 +13,26 @@ panic = 'abort' [dependencies] crc32fast = "^1" +zerocopy = { version = "0.6.1", features = ["alloc"] } + +[dev-dependencies] +rand = ">=0" +criterion = ">=0" [lib] + +[[bench]] +name = "to_from_bytes" +harness = false + +[[bench]] +name = "clone" +harness = false + +[[bench]] +name = "list" +harness = false + +[[bench]] +name = "merge" +harness = false diff --git a/iblt/benches/clone.rs b/iblt/benches/clone.rs new file mode 100644 index 000000000..0199de2e2 --- /dev/null +++ b/iblt/benches/clone.rs @@ -0,0 +1,19 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use iblt::IBLT; + +const CAPACITY: usize = 4096; +type OurIBLT = IBLT<[u8; 32], CAPACITY, 3>; + +pub fn criterion_benchmark(c: &mut Criterion) { + let mut iblt = OurIBLT::new(); + for _ in 0..CAPACITY { + let mut v = [0u8; 32]; + v.fill_with(rand::random); + iblt.insert(&v); + } + + c.bench_function("clone", |b| b.iter(|| iblt.clone())); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/iblt/benches/list.rs b/iblt/benches/list.rs new file mode 100644 index 000000000..9987ab3df --- /dev/null +++ b/iblt/benches/list.rs @@ -0,0 +1,39 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use iblt::IBLT; + +const CAPACITY: usize = 4096; +type IBLT32 = IBLT<[u8; 32], CAPACITY, 3>; +type IBLT16 = IBLT<[u8; 16], CAPACITY, 3>; +type IBLT8 = IBLT<[u8; 8], CAPACITY, 3>; + +pub fn criterion_benchmark(c: &mut Criterion) { + let mut iblt = IBLT32::new(); + for _ in 0..CAPACITY { + let mut v = [0u8; 32]; + v.fill_with(rand::random); + iblt.insert(&v); + } + + c.bench_function("list 32", |b| b.iter(|| iblt.list(|_, _| {}))); + + let mut iblt = IBLT16::new(); + for _ in 0..CAPACITY { + let mut v = [0u8; 16]; + v.fill_with(rand::random); + iblt.insert(&v); + } + + c.bench_function("list 16", |b| b.iter(|| iblt.list(|_, _| {}))); + + let mut iblt = IBLT8::new(); + for _ in 0..CAPACITY { + let mut v = [0u8; 8]; + v.fill_with(rand::random); + iblt.insert(&v); + } + + c.bench_function("list 8", |b| b.iter(|| iblt.list(|_, _| {}))); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/iblt/benches/merge.rs b/iblt/benches/merge.rs new file mode 100644 index 000000000..05d994562 --- /dev/null +++ b/iblt/benches/merge.rs @@ -0,0 +1,78 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use iblt::IBLT; + +const CAPACITY: usize = 4096; +type IBLT32 = IBLT<[u8; 32], CAPACITY, 3>; +type IBLT16 = IBLT<[u8; 16], CAPACITY, 3>; +type IBLT8 = IBLT<[u8; 8], CAPACITY, 3>; + +pub fn criterion_benchmark(c: &mut Criterion) { + let mut orig = IBLT32::new(); + let mut new = IBLT32::new(); + for _ in 0..CAPACITY { + let mut v = [0u8; 32]; + v.fill_with(rand::random); + orig.insert(&v); + } + + for _ in 0..CAPACITY { + let mut v = [0u8; 32]; + v.fill_with(rand::random); + new.insert(&v); + } + + c.bench_function("merge 32", |b| { + b.iter(|| { + let mut new2 = new.clone(); + orig.subtract(&new); + new2.subtract(&orig); + }) + }); + + let mut orig = IBLT16::new(); + let mut new = IBLT16::new(); + for _ in 0..CAPACITY { + let mut v = [0u8; 16]; + v.fill_with(rand::random); + orig.insert(&v); + } + + for _ in 0..CAPACITY { + let mut v = [0u8; 16]; + v.fill_with(rand::random); + new.insert(&v); + } + + c.bench_function("merge 16", |b| { + b.iter(|| { + let mut new2 = new.clone(); + orig.subtract(&new); + new2.subtract(&orig); + }) + }); + + let mut orig = IBLT8::new(); + let mut new = IBLT8::new(); + for _ in 0..CAPACITY { + let mut v = [0u8; 8]; + v.fill_with(rand::random); + orig.insert(&v); + } + + for _ in 0..CAPACITY { + let mut v = [0u8; 8]; + v.fill_with(rand::random); + new.insert(&v); + } + + c.bench_function("merge 8", |b| { + b.iter(|| { + let mut new2 = new.clone(); + orig.subtract(&new); + new2.subtract(&orig); + }) + }); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/iblt/benches/to_from_bytes.rs b/iblt/benches/to_from_bytes.rs new file mode 100644 index 000000000..ede76d504 --- /dev/null +++ b/iblt/benches/to_from_bytes.rs @@ -0,0 +1,19 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use iblt::IBLT; + +const CAPACITY: usize = 4096; +type OurIBLT = IBLT<[u8; 32], CAPACITY, 3>; + +pub fn criterion_benchmark(c: &mut Criterion) { + let mut iblt = OurIBLT::new(); + for _ in 0..CAPACITY { + let mut v = [0u8; 32]; + v.fill_with(rand::random); + iblt.insert(&v); + } + + c.bench_function("to_from_bytes", |b| b.iter(|| OurIBLT::from_bytes(iblt.as_bytes()))); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/iblt/src/lib.rs b/iblt/src/lib.rs index 67065d115..a420fd3b1 100644 --- a/iblt/src/lib.rs +++ b/iblt/src/lib.rs @@ -6,30 +6,38 @@ * https://www.zerotier.com/ */ -use std::borrow::Cow; +use zerocopy::{AsBytes, FromBytes}; #[cfg(not(any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64", target_arch = "powerpc64")))] #[inline(always)] -fn xor_with(x: &mut [u8; L], y: &[u8; L]) { - x.iter_mut().zip(y.iter()).for_each(|(a, b)| *a ^= *b); +fn xor_with(x: &mut T, y: &T) +where + T: FromBytes + AsBytes + Sized, +{ + x.as_bytes_mut().iter_mut().zip(y.as_bytes().iter()).for_each(|(a, b)| *a ^= *b); } #[cfg(any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64", target_arch = "powerpc64"))] #[inline(always)] -fn xor_with(x: &mut [u8; L], y: &[u8; L]) { - if L >= 16 { - for i in 0..(L / 16) { - unsafe { *x.as_mut_ptr().cast::().add(i) ^= *y.as_ptr().cast::().add(i) }; +fn xor_with(x: &mut T, y: &T) +where + T: FromBytes + AsBytes + Sized, +{ + let size = std::mem::size_of::(); + + if size >= 16 { + for i in 0..(size / 16) { + unsafe { *x.as_bytes_mut().as_mut_ptr().cast::().add(i) ^= *y.as_bytes().as_ptr().cast::().add(i) }; } - for i in (L - (L % 16))..L { - unsafe { *x.as_mut_ptr().add(i) ^= *y.as_ptr().add(i) }; + for i in (size - (size % 16))..size { + unsafe { *x.as_bytes_mut().as_mut_ptr().add(i) ^= *y.as_bytes().as_ptr().add(i) }; } } else { - for i in 0..(L / 8) { - unsafe { *x.as_mut_ptr().cast::().add(i) ^= *y.as_ptr().cast::().add(i) }; + for i in 0..(size / 8) { + unsafe { *x.as_bytes_mut().as_mut_ptr().cast::().add(i) ^= *y.as_bytes().as_ptr().cast::().add(i) }; } - for i in (L - (L % 8))..L { - unsafe { *x.as_mut_ptr().add(i) ^= *y.as_ptr().add(i) }; + for i in (size - (size % 8))..size { + unsafe { *x.as_bytes_mut().as_mut_ptr().add(i) ^= *y.as_bytes().as_ptr().add(i) }; } } } @@ -61,29 +69,38 @@ fn murmurhash32_mix32(mut x: u32) -> u32 { /// /// The best value for HASHES seems to be 3 for an optimal fill of 75%. #[repr(C)] -pub struct IBLT { - check_hash: [u32; BUCKETS], - count: [i8; BUCKETS], - key: [[u8; ITEM_BYTES]; BUCKETS], +pub struct IBLT +where + T: FromBytes + AsBytes + Default + Sized + Clone, +{ + check_hash: Box>, + count: Box>, + key: Box>, } -impl Clone for IBLT { - #[inline(always)] +impl Clone for IBLT +where + T: FromBytes + AsBytes + Default + Sized + Clone, +{ fn clone(&self) -> Self { unsafe { - let mut tmp: Self = std::mem::MaybeUninit::uninit().assume_init(); - std::ptr::copy_nonoverlapping((self as *const Self).cast::(), (&mut tmp as *mut Self).cast::(), Self::SIZE_BYTES); + let mut tmp = Self::new(); + std::ptr::copy_nonoverlapping(self.check_hash.as_ptr(), tmp.check_hash.as_mut_ptr(), BUCKETS); + std::ptr::copy_nonoverlapping(self.count.as_ptr(), tmp.count.as_mut_ptr(), BUCKETS); + std::ptr::copy_nonoverlapping(self.key.as_ptr(), tmp.key.as_mut_ptr(), BUCKETS); tmp } } } -impl IBLT { +impl IBLT +where + T: FromBytes + AsBytes + Default + Sized + Clone, +{ /// Number of bytes each bucket consumes (not congituously, but doesn't matter). - const BUCKET_SIZE_BYTES: usize = ITEM_BYTES + 4 + 1; + const BUCKET_SIZE_BYTES: usize = std::mem::size_of::() + 4 + 1; /// Number of buckets in this IBLT. - #[allow(unused)] pub const BUCKETS: usize = BUCKETS; /// Size of this IBLT in bytes. @@ -92,16 +109,34 @@ impl IBLT Self { - assert!(Self::SIZE_BYTES <= std::mem::size_of::()); assert!(BUCKETS < (i32::MAX as usize)); - unsafe { std::mem::zeroed() } + + let mut s = Self { + check_hash: Box::new(Vec::with_capacity(BUCKETS)), + count: Box::new(Vec::with_capacity(BUCKETS)), + key: Box::new(Vec::with_capacity(BUCKETS)), + }; + + s.reset(); + s } - /// Get this IBLT as a byte slice (free cast operation). - /// The returned slice is always SIZE_BYTES in length. #[inline(always)] - pub fn as_bytes(&self) -> &[u8] { - unsafe { &*std::ptr::slice_from_raw_parts((self as *const Self).cast::(), Self::SIZE_BYTES) } + pub fn as_bytes(&self) -> Box> { + let check_hash_len = BUCKETS * 4; + let t_len = BUCKETS * std::mem::size_of::(); + let len = check_hash_len + BUCKETS + t_len; + + let mut buf = Box::new(Vec::with_capacity(len)); + buf.resize(len, 0); + + let byt = buf.as_bytes_mut(); + + byt[0..check_hash_len].copy_from_slice(self.check_hash.as_bytes()); + byt[check_hash_len..BUCKETS + check_hash_len].copy_from_slice(self.count.as_bytes()); + byt[len - t_len..len].copy_from_slice(self.key.as_bytes()); + + buf } /// Obtain an IBLT from bytes in memory. @@ -110,20 +145,33 @@ impl IBLT(b: &'a [u8]) -> Option> { + pub fn from_bytes(b: Box>) -> Option { if b.len() == Self::SIZE_BYTES { - #[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "powerpc64", target_arch = "aarch64")))] - { - if b.as_ptr().align_offset(8) == 0 { - Some(Cow::Borrowed(unsafe { &*b.as_ptr().cast() })) - } else { - // NOTE: clone() is implemented above using a raw copy so that alignment doesn't matter. - Some(Cow::Owned(unsafe { &*b.as_ptr().cast::() }.clone())) - } - } + // FIXME I commented this out because I do not have access to the architectures needed. + // #[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "powerpc64", target_arch = "aarch64")))] + // { + // if b.as_ptr().align_offset(8) == 0 { + // Some(Cow::Borrowed(unsafe { &*b.as_ptr().cast() })) + // } else { + // // NOTE: clone() is implemented above using a raw copy so that alignment doesn't matter. + // Some(Cow::Owned(unsafe { &*b.as_ptr().cast::() }.clone())) + // } + // } #[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "powerpc64", target_arch = "aarch64"))] { - Some(Cow::Borrowed(unsafe { &*b.as_ptr().cast() })) + let mut tmp = Self::new(); + + let mut i = 0; + + tmp.check_hash.as_bytes_mut().copy_from_slice(&b[0..BUCKETS * 4]); + i += BUCKETS * 4; + + tmp.count.as_bytes_mut().copy_from_slice(&b[i..i + BUCKETS]); + i += BUCKETS; + + tmp.key.as_bytes_mut().copy_from_slice(&b[i..i + std::mem::size_of::() * BUCKETS]); + + Some(tmp) } } else { None @@ -133,42 +181,45 @@ impl IBLT(), 0, std::mem::size_of::()) }; + self.check_hash.clear(); + self.count.clear(); + self.key.clear(); + self.check_hash.resize(BUCKETS, 0); + self.count.resize(BUCKETS, 0); + self.key.resize(BUCKETS, Default::default()); } - pub(crate) fn ins_rem(&mut self, key: &[u8; ITEM_BYTES], delta: i8) { - let check_hash = crc32fast::hash(key); + pub(crate) fn ins_rem(&mut self, key: &T, delta: i8) { + let check_hash = crc32fast::hash(key.as_bytes()); let mut iteration_index = u32::from_le(check_hash).wrapping_add(1); for _ in 0..(HASHES as u64) { iteration_index = murmurhash32_mix32(iteration_index); let i = (iteration_index as usize) % BUCKETS; self.check_hash[i] ^= check_hash; self.count[i] = self.count[i].wrapping_add(delta); - xor_with(&mut self.key[i], key); + xor_with(&mut self.key[i], &key); } } /// Insert a set item into this set. /// This will panic if the slice is smaller than ITEM_BYTES. #[inline(always)] - pub fn insert(&mut self, key: &[u8]) { - assert!(key.len() >= ITEM_BYTES); - self.ins_rem(unsafe { &*key.as_ptr().cast() }, 1); + pub fn insert(&mut self, key: &T) { + self.ins_rem(key, 1); } /// Insert a set item into this set. /// This will panic if the slice is smaller than ITEM_BYTES. #[inline(always)] - pub fn remove(&mut self, key: &[u8]) { - assert!(key.len() >= ITEM_BYTES); - self.ins_rem(unsafe { &*key.as_ptr().cast() }, -1); + pub fn remove(&mut self, key: &T) { + self.ins_rem(key, -1); } /// Subtract another IBLT from this one to get a set difference. pub fn subtract(&mut self, other: &Self) { self.check_hash.iter_mut().zip(other.check_hash.iter()).for_each(|(a, b)| *a ^= *b); self.count.iter_mut().zip(other.count.iter()).for_each(|(a, b)| *a = a.wrapping_sub(*b)); - self.key.iter_mut().zip(other.key.iter()).for_each(|(a, b)| xor_with(a, b)); + self.key.iter_mut().zip(other.key.iter()).for_each(|(a, b)| xor_with(a, &b)); } /// List as many entries in this IBLT as can be extracted. @@ -185,12 +236,12 @@ impl IBLT(mut self, mut f: F) -> bool { - let mut queue: Vec = Vec::with_capacity(BUCKETS); + pub fn list(&mut self, mut f: F) -> bool { + let mut queue: Box> = Box::new(Vec::with_capacity(BUCKETS)); for i in 0..BUCKETS { let count = self.count[i]; - if (count == 1 || count == -1) && crc32fast::hash(&self.key[i]) == self.check_hash[i] { + if (count == 1 || count == -1) && crc32fast::hash(&self.key[i].as_bytes()) == self.check_hash[i] { queue.push(i as u32); } } @@ -206,7 +257,7 @@ impl IBLT IBLT BUCKETS { // sanity check, should be impossible break 'list_main; @@ -236,14 +287,17 @@ impl IBLT PartialEq for IBLT { +impl PartialEq for IBLT +where + T: AsBytes + FromBytes + Default + Clone, +{ #[inline(always)] fn eq(&self, other: &Self) -> bool { - self.as_bytes().eq(other.as_bytes()) + self.as_bytes().eq(&other.as_bytes()) } } -impl Eq for IBLT {} +impl Eq for IBLT where T: AsBytes + FromBytes + Default + Clone {} #[cfg(test)] mod tests { @@ -272,6 +326,15 @@ mod tests { assert!(actual.eq(&expected)); } + fn typical_iblt() -> IBLT<[u8; 32], 16, 3> { + // Typical case + let mut tmp = IBLT::<[u8; 32], 16, 3>::new(); + tmp.check_hash.fill(0x01010101); + tmp.count.fill(1); + tmp.key.iter_mut().for_each(|x| x.fill(1)); + tmp + } + #[test] fn check_xor_with() { check_xor_with2::<128>(); @@ -297,29 +360,25 @@ mod tests { #[test] fn struct_packing() { - // Typical case - let mut tmp = IBLT::<64, 16, 3>::new(); - tmp.check_hash.fill(0x01010101); - tmp.count.fill(1); - tmp.key.iter_mut().for_each(|x| x.fill(1)); + let tmp = typical_iblt(); assert!(tmp.as_bytes().iter().all(|x| *x == 1)); // Pathological alignment case #1 - let mut tmp = IBLT::<17, 13, 3>::new(); + let mut tmp = IBLT::<[u8; 17], 13, 3>::new(); tmp.check_hash.fill(0x01010101); tmp.count.fill(1); tmp.key.iter_mut().for_each(|x| x.fill(1)); assert!(tmp.as_bytes().iter().all(|x| *x == 1)); // Pathological alignment case #2 - let mut tmp = IBLT::<17, 8, 3>::new(); + let mut tmp = IBLT::<[u8; 17], 8, 3>::new(); tmp.check_hash.fill(0x01010101); tmp.count.fill(1); tmp.key.iter_mut().for_each(|x| x.fill(1)); assert!(tmp.as_bytes().iter().all(|x| *x == 1)); // Pathological alignment case #3 - let mut tmp = IBLT::<16, 7, 3>::new(); + let mut tmp = IBLT::<[u8; 16], 7, 3>::new(); tmp.check_hash.fill(0x01010101); tmp.count.fill(1); tmp.key.iter_mut().for_each(|x| x.fill(1)); @@ -328,12 +387,13 @@ mod tests { #[test] fn fill_list_performance() { + const LENGTH: usize = 16; const CAPACITY: usize = 4096; let mut rn: u128 = 0xd3b07384d113edec49eaa6238ad5ff00; - let mut expected: HashSet = HashSet::with_capacity(4096); - let mut count = 64; + let mut expected: HashSet = HashSet::with_capacity(CAPACITY); + let mut count = LENGTH; while count <= CAPACITY { - let mut test = IBLT::::new(); + let mut test = IBLT::<[u8; LENGTH], CAPACITY, HASHES>::new(); expected.clear(); for _ in 0..count { @@ -350,7 +410,7 @@ mod tests { }); println!("inserted: {}\tlisted: {}\tcapacity: {}\tscore: {:.4}\tfill: {:.4}", count, list_count, CAPACITY, (list_count as f64) / (count as f64), (count as f64) / (CAPACITY as f64)); - count += 64; + count += LENGTH; } } @@ -359,6 +419,7 @@ mod tests { const CAPACITY: usize = 4096; // previously 16384; const REMOTE_SIZE: usize = 1024 * 1024 * 2; const STEP: usize = 1024; + const LENGTH: usize = 16; let mut rn: u128 = 0xd3b07384d113edec49eaa6238ad5ff00; let mut missing_count = 1024; let mut missing: HashSet = HashSet::with_capacity(CAPACITY * 2); @@ -366,8 +427,8 @@ mod tests { while missing_count <= CAPACITY { missing.clear(); all.clear(); - let mut local = IBLT::::new(); - let mut remote = IBLT::::new(); + let mut local = IBLT::<[u8; LENGTH], CAPACITY, HASHES>::new(); + let mut remote = IBLT::<[u8; LENGTH], CAPACITY, HASHES>::new(); let mut k = 0; while k < REMOTE_SIZE { @@ -402,4 +463,87 @@ mod tests { missing_count += STEP; } } + + #[derive(Eq, PartialEq, Clone, AsBytes, FromBytes, Debug)] + #[repr(C)] + struct TestType { + thing: [u8; 256], + other_thing: [u8; 32], + } + + impl Default for TestType { + fn default() -> Self { + Self::zeroed() + } + } + + impl TestType { + pub fn zeroed() -> Self { + unsafe { std::mem::zeroed() } + } + + pub fn new() -> Self { + let mut newtype = Self::zeroed(); + newtype.thing.fill_with(|| rand::random()); + newtype.other_thing.fill_with(|| rand::random()); + newtype + } + } + + #[test] + fn test_polymorphism() { + const CAPACITY: usize = 4096; + let mut full = Box::new(IBLT::::new()); + let mut zero = Box::new(IBLT::::new()); + + for _ in 0..CAPACITY { + zero.insert(&TestType::zeroed()); + full.insert(&TestType::new()); + } + + zero.subtract(&full); + + zero.list(|item, new| { + if new { + assert_eq!(item, TestType::zeroed()); + } else { + assert_ne!(item, TestType::zeroed()); + } + }); + + zero.reset(); + full.reset(); + + for _ in 0..CAPACITY { + zero.insert(&TestType::zeroed()); + full.insert(&TestType::new()); + } + + full.subtract(&zero); + full.list(|item, new| { + if new { + assert_ne!(item, TestType::zeroed()); + } else { + assert_eq!(item, TestType::zeroed()); + } + }); + } + + #[test] + fn test_to_from_bytes() { + let tmp = typical_iblt(); + let mut tmp2 = IBLT::<[u8; 32], 16, 3>::from_bytes(tmp.as_bytes()).unwrap(); + + tmp2.subtract(&tmp); + tmp2.list(|_, new| assert!(!new)); + } + + #[test] + fn test_clone() { + let tmp = typical_iblt(); + let mut tmp2 = tmp.clone(); + + tmp2.subtract(&tmp); + tmp2.list(|_, new| assert!(!new)); + } }