From b346f3ff07f8c389ca84ecc2efc214109c8488b4 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Wed, 27 Apr 2022 12:54:58 -0400 Subject: [PATCH] A bunch of nit-picky cleanup. --- .../src/util/buffer.rs | 119 ++++++++---------- zerotier-network-hypervisor/src/util/gate.rs | 24 ++-- zerotier-network-hypervisor/src/util/mod.rs | 10 +- zerotier-network-hypervisor/src/util/pool.rs | 6 +- .../src/vl1/dictionary.rs | 1 + 5 files changed, 70 insertions(+), 90 deletions(-) diff --git a/zerotier-network-hypervisor/src/util/buffer.rs b/zerotier-network-hypervisor/src/util/buffer.rs index 25738af19..0ac2f3143 100644 --- a/zerotier-network-hypervisor/src/util/buffer.rs +++ b/zerotier-network-hypervisor/src/util/buffer.rs @@ -13,10 +13,9 @@ use crate::util::pool::PoolFactory; /// Annotates a structure as containing only primitive types. /// -/// This indicates structs that are safe to abuse like raw memory by casting from -/// byte arrays of the same size, etc. It also generally implies packed representation -/// and alignment should not be assumed since these can be fetched using struct -/// extracting methods of Buffer that do not check alignment. +/// The structure must be safe to copy in raw form and access without concern for alignment, or if +/// it does contain elements that require alignment special care must be taken when accessing them +/// at least on platforms where it matters. pub unsafe trait RawObject: Sized {} /// A safe bounds checked I/O buffer with extensions for convenient appending of RawObject types. @@ -27,25 +26,27 @@ unsafe impl RawObject for Buffer {} impl Default for Buffer { #[inline(always)] fn default() -> Self { - Self(0, [0_u8; L]) + Self::new() } } -const OVERFLOW_ERR_MSG: &'static str = "overflow"; +fn overflow_err() -> std::io::Error { + std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "buffer overflow") +} impl Buffer { pub const CAPACITY: usize = L; - pub const fn capacity(&self) -> usize { - L - } - + /// Create an empty zeroed buffer. #[inline(always)] pub fn new() -> Self { Self(0, [0_u8; L]) } - /// Create an empty buffer without zeroing its memory (saving a bit of CPU). + /// Create an empty buffer without internally zeroing its memory. + /// + /// This is technically unsafe because unwritten memory in the buffer will have undefined contents. + /// Otherwise it behaves exactly like new(). #[inline(always)] pub unsafe fn new_without_memzero() -> Self { Self(0, MaybeUninit::uninit().assume_init()) @@ -60,7 +61,7 @@ impl Buffer { tmp.1[0..l].copy_from_slice(b); Ok(tmp) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -74,11 +75,6 @@ impl Buffer { &mut self.1[0..self.0] } - #[inline(always)] - pub fn as_range_fixed(&self) -> &[u8; LEN] { - crate::util::byte_array_range::(&self.1) - } - #[inline(always)] pub fn as_ptr(&self) -> *const u8 { self.1.as_ptr() @@ -89,39 +85,28 @@ impl Buffer { self.1.as_mut_ptr() } - #[inline(always)] - pub fn as_mut_range_fixed(&mut self) -> &mut [u8; LEN] { - crate::util::byte_array_range_mut::(&mut self.1) - } - /// Get all bytes after a given position. #[inline(always)] pub fn as_bytes_starting_at(&self, start: usize) -> std::io::Result<&[u8]> { if start <= self.0 { Ok(&self.1[start..]) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } #[inline(always)] pub fn clear(&mut self) { - let prev_len = self.0; + self.1[0..self.0].fill(0); self.0 = 0; - self.1[0..prev_len].fill(0); } /// Load array into buffer. /// This will panic if the array is larger than L. - #[inline(always)] pub fn set_to(&mut self, b: &[u8]) { - let prev_len = self.0; let len = b.len(); self.0 = len; self.1[0..len].copy_from_slice(b); - if len < prev_len { - self.1[len..prev_len].fill(0); - } } #[inline(always)] @@ -136,24 +121,24 @@ impl Buffer { /// Set the size of this buffer's data. /// - /// If the new size is larger than L (the capacity) it will be limited to L. Any new - /// space will be filled with zeroes. + /// This will panic if the specified size is larger than L. If the size is larger + /// than the current size uninitialized space will be zeroed. #[inline(always)] pub fn set_size(&mut self, s: usize) { let prev_len = self.0; - if s < prev_len { - self.0 = s; - self.1[s..prev_len].fill(0); - } else { - self.0 = s.min(L); + self.0 = s; + if s > prev_len { + self.1[prev_len..s].fill(0); } } + /// Set the size of the data in this buffer without checking bounds or zeroing new space. #[inline(always)] pub unsafe fn set_size_unchecked(&mut self, s: usize) { self.0 = s; } + /// Get a byte from this buffer without checking bounds. #[inline(always)] pub unsafe fn get_unchecked(&self, i: usize) -> u8 { *self.1.get_unchecked(i) @@ -168,7 +153,7 @@ impl Buffer { self.0 = end; Ok(unsafe { &mut *self.1.as_mut_ptr().add(ptr).cast() }) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -181,11 +166,12 @@ impl Buffer { self.0 = end; Ok(unsafe { &mut *self.1.as_mut_ptr().add(ptr).cast() }) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } /// Append a runtime sized array and return a mutable reference to its memory. + #[inline(always)] pub fn append_bytes_get_mut(&mut self, s: usize) -> std::io::Result<&mut [u8]> { let ptr = self.0; let end = ptr + s; @@ -193,11 +179,10 @@ impl Buffer { self.0 = end; Ok(&mut self.1[ptr..end]) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } - #[inline(always)] pub fn append_padding(&mut self, b: u8, count: usize) -> std::io::Result<()> { let ptr = self.0; let end = ptr + count; @@ -206,11 +191,10 @@ impl Buffer { self.1[ptr..end].fill(b); Ok(()) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } - #[inline(always)] pub fn append_bytes(&mut self, buf: &[u8]) -> std::io::Result<()> { let ptr = self.0; let end = ptr + buf.len(); @@ -219,11 +203,10 @@ impl Buffer { self.1[ptr..end].copy_from_slice(buf); Ok(()) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } - #[inline(always)] pub fn append_bytes_fixed(&mut self, buf: &[u8; S]) -> std::io::Result<()> { let ptr = self.0; let end = ptr + S; @@ -232,7 +215,7 @@ impl Buffer { self.1[ptr..end].copy_from_slice(buf); Ok(()) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -249,7 +232,7 @@ impl Buffer { self.1[ptr] = i; Ok(()) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -263,7 +246,7 @@ impl Buffer { unsafe { *self.1.as_mut_ptr().add(ptr).cast::() = i.to_be() }; Ok(()) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -277,7 +260,7 @@ impl Buffer { self.1[ptr..end].copy_from_slice(&i.to_be_bytes()); Ok(()) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -291,7 +274,7 @@ impl Buffer { unsafe { *self.1.as_mut_ptr().add(ptr).cast::() = i.to_be() }; Ok(()) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -305,7 +288,7 @@ impl Buffer { self.1[ptr..end].copy_from_slice(&i.to_be_bytes()); Ok(()) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -319,7 +302,7 @@ impl Buffer { unsafe { *self.1.as_mut_ptr().add(ptr).cast::() = i.to_be() }; Ok(()) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -333,7 +316,7 @@ impl Buffer { self.1[ptr..end].copy_from_slice(&i.to_be_bytes()); Ok(()) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -343,7 +326,7 @@ impl Buffer { if (ptr + size_of::()) <= self.0 { unsafe { Ok(&*self.1.as_ptr().cast::().offset(ptr as isize).cast::()) } } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -353,7 +336,7 @@ impl Buffer { if (ptr + size_of::()) <= self.0 { unsafe { Ok(&mut *self.1.as_mut_ptr().cast::().offset(ptr as isize).cast::()) } } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -362,7 +345,7 @@ impl Buffer { if ptr < self.0 { Ok(self.1[ptr]) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -376,7 +359,7 @@ impl Buffer { *cursor = end; unsafe { Ok(&*self.1.as_ptr().cast::().offset(ptr as isize).cast::()) } } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -389,7 +372,7 @@ impl Buffer { *cursor = end; unsafe { Ok(&*self.1.as_ptr().cast::().offset(ptr as isize).cast::<[u8; S]>()) } } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -402,7 +385,7 @@ impl Buffer { *cursor = end; Ok(&self.1[ptr..end]) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -417,7 +400,7 @@ impl Buffer { r.0 }) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -429,7 +412,7 @@ impl Buffer { *cursor = ptr + 1; Ok(self.1[ptr]) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -443,7 +426,7 @@ impl Buffer { *cursor = end; Ok(u16::from_be(unsafe { *self.1.as_ptr().add(ptr).cast::() })) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -457,7 +440,7 @@ impl Buffer { *cursor = end; Ok(u16::from_be_bytes(*self.1[ptr..end])) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -471,7 +454,7 @@ impl Buffer { *cursor = end; Ok(u32::from_be(unsafe { *self.1.as_ptr().add(ptr).cast::() })) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -485,7 +468,7 @@ impl Buffer { *cursor = end; Ok(u32::from_be_bytes(*self.1[ptr..end])) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -499,7 +482,7 @@ impl Buffer { *cursor = end; Ok(u64::from_be(unsafe { *self.1.as_ptr().add(ptr).cast::() })) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } @@ -513,7 +496,7 @@ impl Buffer { *cursor = end; Ok(u64::from_be_bytes(*self.1[ptr..end])) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } } @@ -537,7 +520,7 @@ impl Write for Buffer { self.1[ptr..end].copy_from_slice(buf); Ok(buf.len()) } else { - Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG)) + Err(overflow_err()) } } diff --git a/zerotier-network-hypervisor/src/util/gate.rs b/zerotier-network-hypervisor/src/util/gate.rs index af5f1a1d9..0825068a9 100644 --- a/zerotier-network-hypervisor/src/util/gate.rs +++ b/zerotier-network-hypervisor/src/util/gate.rs @@ -8,7 +8,7 @@ use std::sync::atomic::{AtomicI64, Ordering}; -/// Boolean rate limiter with normal (non-atomic) semantics. +/// Boolean rate limiter with normal (non-atomic, thread unsafe) semantics. #[repr(transparent)] pub struct IntervalGate(i64); @@ -25,11 +25,6 @@ impl IntervalGate { Self(initial_ts) } - #[inline(always)] - pub fn reset(&mut self) { - self.0 = 0; - } - #[inline(always)] pub fn gate(&mut self, time: i64) -> bool { if (time - self.0) >= FREQ { @@ -41,7 +36,9 @@ impl IntervalGate { } } -/// Boolean rate limiter with atomic semantics. +unsafe impl Send for IntervalGate {} + +/// Boolean rate limiter with atomic (thread safe) semantics. #[repr(transparent)] pub struct AtomicIntervalGate(AtomicI64); @@ -58,19 +55,14 @@ impl AtomicIntervalGate { Self(AtomicI64::new(initial_ts)) } - #[inline(always)] - pub fn reset(&self) { - self.0.store(0, Ordering::Relaxed); - } - #[inline(always)] pub fn gate(&self, mut time: i64) -> bool { - let prev_time = self.0.load(Ordering::Relaxed); + let prev_time = self.0.load(Ordering::Acquire); if (time - prev_time) < FREQ { false } else { loop { - let pt = self.0.swap(time, Ordering::Relaxed); + let pt = self.0.swap(time, Ordering::AcqRel); if pt <= time { break; } else { @@ -81,3 +73,7 @@ impl AtomicIntervalGate { } } } + +unsafe impl Send for AtomicIntervalGate {} + +unsafe impl Sync for AtomicIntervalGate {} diff --git a/zerotier-network-hypervisor/src/util/mod.rs b/zerotier-network-hypervisor/src/util/mod.rs index 966f6ab98..502b02063 100644 --- a/zerotier-network-hypervisor/src/util/mod.rs +++ b/zerotier-network-hypervisor/src/util/mod.rs @@ -39,9 +39,7 @@ pub(crate) fn hash64_noncrypt(mut x: u64) -> u64 { x ^ x.wrapping_shr(31) } -/// A hasher for u64 maps that just returns u64 values as-is. -/// -/// Used with things like ZeroTier addresses and network IDs that are already randomly distributed. +/// A super-minimal hasher for u64 keys for keys already fairly randomly distributed like addresses and network IDs. #[derive(Copy, Clone)] pub(crate) struct U64NoOpHasher(u64); @@ -55,7 +53,7 @@ impl U64NoOpHasher { impl std::hash::Hasher for U64NoOpHasher { #[inline(always)] fn finish(&self) -> u64 { - self.0 + self.0.wrapping_add(self.0.wrapping_shr(32)) } #[inline(always)] @@ -65,12 +63,12 @@ impl std::hash::Hasher for U64NoOpHasher { #[inline(always)] fn write_u64(&mut self, i: u64) { - self.0 += i; + self.0 = self.0.wrapping_add(i); } #[inline(always)] fn write_i64(&mut self, i: i64) { - self.0 += i as u64; + self.0 = self.0.wrapping_add(i as u64); } } diff --git a/zerotier-network-hypervisor/src/util/pool.rs b/zerotier-network-hypervisor/src/util/pool.rs index 28729e6ef..1a4196d5d 100644 --- a/zerotier-network-hypervisor/src/util/pool.rs +++ b/zerotier-network-hypervisor/src/util/pool.rs @@ -46,7 +46,9 @@ impl> Pooled { /// from_raw() or memory will leak. #[inline(always)] pub unsafe fn into_raw(self) -> *mut O { + // Verify that the structure is not padded before 'obj'. debug_assert_eq!((&self.0.as_ref().obj as *const O).cast::(), (self.0.as_ref() as *const PoolEntry).cast::()); + let ptr = self.0.as_ptr().cast::(); std::mem::forget(self); ptr @@ -100,9 +102,10 @@ impl> Drop for Pooled { #[inline(always)] fn drop(&mut self) { unsafe { + // Return to pool if the pool still exists. Deallocate otherwise. let p = Weak::upgrade(&self.0.as_ref().return_pool); if p.is_some() { - let p = p.unwrap(); + let p = p.unwrap_unchecked(); p.factory.reset(&mut self.0.as_mut().obj); p.pool.lock().push(self.0); } else { @@ -128,7 +131,6 @@ impl> Pool { /// Get a pooled object, or allocate one if the pool is empty. #[inline(always)] pub fn get(&self) -> Pooled { - //let _ = self.0.outstanding_count.fetch_add(1, Ordering::Acquire); Pooled::(self.0.pool.lock().pop().unwrap_or_else(|| unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(PoolEntry:: { obj: self.0.factory.create(), diff --git a/zerotier-network-hypervisor/src/vl1/dictionary.rs b/zerotier-network-hypervisor/src/vl1/dictionary.rs index c10f1bdc7..414b83c07 100644 --- a/zerotier-network-hypervisor/src/vl1/dictionary.rs +++ b/zerotier-network-hypervisor/src/vl1/dictionary.rs @@ -12,6 +12,7 @@ use std::io::Write; use crate::util::hex::HEX_CHARS; /// Dictionary is an extremely simple key=value serialization format. +/// /// It's designed for extreme parsing simplicity and is human readable if keys and values are strings. /// It also supports binary keys and values which will be minimally escaped but render the result not /// entirely human readable. Keys are serialized in natural sort order so the result can be consistently