From b8e65b667cbc5f39bf4cfbeca2636e38befb7729 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Wed, 14 Sep 2022 11:03:00 -0400 Subject: [PATCH] Build fix, cruft simplification. --- crypto/src/zssp.rs | 14 +- network-hypervisor/src/vl1/peer.rs | 1 - utils/src/memory.rs | 249 +++++------------------------ 3 files changed, 51 insertions(+), 213 deletions(-) diff --git a/crypto/src/zssp.rs b/crypto/src/zssp.rs index eb10a11c1..5a2fbc19e 100644 --- a/crypto/src/zssp.rs +++ b/crypto/src/zssp.rs @@ -572,14 +572,14 @@ impl ReceiveContext { return Err(Error::InvalidPacket); } - let counter = memory::u32_from_le_bytes(incoming_packet); - let packet_type_fragment_info = memory::u16_from_le_bytes(&incoming_packet[14..16]); + let counter = u32::from_le(memory::load_raw(incoming_packet)); + let packet_type_fragment_info = u16::from_le(memory::load_raw(&incoming_packet[14..16])); let packet_type = (packet_type_fragment_info & 0x0f) as u8; let fragment_count = ((packet_type_fragment_info.wrapping_shr(4) + 1) as u8) & 63; let fragment_no = packet_type_fragment_info.wrapping_shr(10) as u8; if let Some(local_session_id) = - SessionId::new_from_u64(memory::u64_from_le_bytes(&incoming_packet[8..16]) & SessionId::MAX_BIT_MASK) + SessionId::new_from_u64(u64::from_le(memory::load_raw(&incoming_packet[8..16])) & SessionId::MAX_BIT_MASK) { if let Some(session) = host.session_lookup(local_session_id) { if check_header_mac(incoming_packet, &session.header_check_cipher) { @@ -1393,9 +1393,9 @@ fn create_packet_header( if fragment_count <= MAX_FRAGMENTS { // CCCC____IIIIIITF - memory::u64_to_le_bytes(counter.to_u32() as u64, header); - memory::u64_to_le_bytes( - recipient_session_id | (packet_type as u64).wrapping_shl(48) | ((fragment_count - 1) as u64).wrapping_shl(52), + memory::store_raw((counter.to_u32() as u64).to_le(), header); + memory::store_raw( + (recipient_session_id | (packet_type as u64).wrapping_shl(48) | ((fragment_count - 1) as u64).wrapping_shl(52)).to_le(), &mut header[8..], ); Ok(()) @@ -1446,7 +1446,7 @@ fn check_header_mac(packet: &[u8], header_check_cipher: &Aes) -> bool { debug_assert!(packet.len() >= MIN_PACKET_SIZE); let mut header_mac = 0u128.to_ne_bytes(); header_check_cipher.encrypt_block(&packet[8..24], &mut header_mac); - memory::u32_from_ne_bytes(&packet[4..8]) == memory::u32_from_ne_bytes(&header_mac) + memory::load_raw::(&packet[4..8]) == memory::load_raw::(&header_mac) } /// Parse KEY_OFFER and KEY_COUNTER_OFFER starting after the unencrypted public key part. diff --git a/network-hypervisor/src/vl1/peer.rs b/network-hypervisor/src/vl1/peer.rs index 0088a51fd..0895c392b 100644 --- a/network-hypervisor/src/vl1/peer.rs +++ b/network-hypervisor/src/vl1/peer.rs @@ -176,7 +176,6 @@ impl Peer { last_receive_time_ticks: AtomicI64::new(crate::util::NEVER_HAPPENED_TICKS), last_forward_time_ticks: AtomicI64::new(crate::util::NEVER_HAPPENED_TICKS), last_hello_reply_time_ticks: AtomicI64::new(crate::util::NEVER_HAPPENED_TICKS), - last_incoming_message_id: AtomicU64::new(0), create_time_ticks: time_ticks, random_ticks_offset: random::xorshift64_random() as u32, message_id_counter: AtomicU64::new(random::xorshift64_random()), diff --git a/utils/src/memory.rs b/utils/src/memory.rs index 4d2c6dcba..0132ad64c 100644 --- a/utils/src/memory.rs +++ b/utils/src/memory.rs @@ -1,216 +1,54 @@ // (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md. -use std::mem::size_of; +#[allow(unused_imports)] +use std::mem::{needs_drop, size_of, MaybeUninit}; -// Version for architectures that definitely don't care about unaligned memory access. -#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))] -#[allow(unused)] -mod fast_int_memory_access { - #[inline(always)] - pub fn u64_to_le_bytes(i: u64, b: &mut [u8]) { - assert!(b.len() >= 8); - unsafe { *b.as_mut_ptr().cast() = i.to_le() }; - } +#[allow(unused_imports)] +use std::ptr::copy_nonoverlapping; - #[inline(always)] - pub fn u32_to_le_bytes(i: u32, b: &mut [u8]) { - assert!(b.len() >= 4); - unsafe { *b.as_mut_ptr().cast() = i.to_le() }; - } - - #[inline(always)] - pub fn u16_to_le_bytes(i: u16, b: &mut [u8]) { - assert!(b.len() >= 2); - unsafe { *b.as_mut_ptr().cast() = i.to_le() }; - } - - #[inline(always)] - pub fn u64_from_le_bytes(b: &[u8]) -> u64 { - assert!(b.len() >= 8); - unsafe { u64::from_le(*b.as_ptr().cast()) } - } - - #[inline(always)] - pub fn u32_from_le_bytes(b: &[u8]) -> u32 { - assert!(b.len() >= 4); - unsafe { u32::from_le(*b.as_ptr().cast()) } - } - - #[inline(always)] - pub fn u16_from_le_bytes(b: &[u8]) -> u16 { - assert!(b.len() >= 2); - unsafe { u16::from_le(*b.as_ptr().cast()) } - } - - #[inline(always)] - pub fn u64_to_ne_bytes(i: u64, b: &mut [u8]) { - assert!(b.len() >= 8); - unsafe { *b.as_mut_ptr().cast() = i }; - } - - #[inline(always)] - pub fn u32_to_ne_bytes(i: u32, b: &mut [u8]) { - assert!(b.len() >= 4); - unsafe { *b.as_mut_ptr().cast() = i }; - } - - #[inline(always)] - pub fn u16_to_ne_bytes(i: u16, b: &mut [u8]) { - assert!(b.len() >= 2); - unsafe { *b.as_mut_ptr().cast() = i }; - } - - #[inline(always)] - pub fn u64_from_ne_bytes(b: &[u8]) -> u64 { - assert!(b.len() >= 8); - unsafe { *b.as_ptr().cast() } - } - - #[inline(always)] - pub fn u32_from_ne_bytes(b: &[u8]) -> u32 { - assert!(b.len() >= 4); - unsafe { *b.as_ptr().cast() } - } - - #[inline(always)] - pub fn u16_from_ne_bytes(b: &[u8]) -> u16 { - assert!(b.len() >= 2); - unsafe { *b.as_ptr().cast() } - } - - #[inline(always)] - pub fn u64_to_be_bytes(i: u64, b: &mut [u8]) { - assert!(b.len() >= 8); - unsafe { *b.as_mut_ptr().cast() = i.to_be() }; - } - - #[inline(always)] - pub fn u32_to_be_bytes(i: u32, b: &mut [u8]) { - assert!(b.len() >= 4); - unsafe { *b.as_mut_ptr().cast() = i.to_be() }; - } - - #[inline(always)] - pub fn u16_to_be_bytes(i: u16, b: &mut [u8]) { - assert!(b.len() >= 2); - unsafe { *b.as_mut_ptr().cast() = i.to_be() }; - } - - #[inline(always)] - pub fn u64_from_be_bytes(b: &[u8]) -> u64 { - assert!(b.len() >= 8); - unsafe { *b.as_ptr().cast::() }.to_be() - } - - #[inline(always)] - pub fn u32_from_be_bytes(b: &[u8]) -> u32 { - assert!(b.len() >= 4); - unsafe { *b.as_ptr().cast::() }.to_be() - } - - #[inline(always)] - pub fn u16_from_be_bytes(b: &[u8]) -> u16 { - assert!(b.len() >= 2); - unsafe { *b.as_ptr().cast::() }.to_be() - } +/// Store a raw object to a byte array (for architectures known not to care about unaligned access). +/// This will panic if the slice is too small or the object requires drop. +#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64"))] +#[inline(always)] +pub fn store_raw(o: T, dest: &mut [u8]) { + assert!(!std::mem::needs_drop::()); + assert!(dest.len() >= size_of::()); + unsafe { *dest.as_mut_ptr().cast() = o }; } -// Version for architectures that might care about unaligned memory access. -#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))] -#[allow(unused)] -mod fast_int_memory_access { - #[inline(always)] - pub fn u64_to_le_bytes(i: u64, b: &mut [u8]) { - b[..8].copy_from_slice(&i.to_le_bytes()); - } - - #[inline(always)] - pub fn u32_to_le_bytes(i: u32, b: &mut [u8]) { - b[..4].copy_from_slice(&i.to_le_bytes()); - } - - #[inline(always)] - pub fn u16_to_le_bytes(i: u16, b: &mut [u8]) { - b[..2].copy_from_slice(&i.to_le_bytes()); - } - - #[inline(always)] - pub fn u64_from_le_bytes(b: &[u8]) -> u64 { - u64::from_le_bytes(b[..8].try_into().unwrap()) - } - - #[inline(always)] - pub fn u32_from_le_bytes(b: &[u8]) -> u32 { - u32::from_le_bytes(b[..4].try_into().unwrap()) - } - - #[inline(always)] - pub fn u16_from_le_bytes(b: &[u8]) -> u16 { - u16::from_le_bytes(b[..2].try_into().unwrap()) - } - - #[inline(always)] - pub fn u64_to_ne_bytes(i: u64, b: &mut [u8]) { - b[..8].copy_from_slice(&i.to_ne_bytes()); - } - - #[inline(always)] - pub fn u32_to_ne_bytes(i: u32, b: &mut [u8]) { - b[..4].copy_from_slice(&i.to_ne_bytes()); - } - - #[inline(always)] - pub fn u16_to_ne_bytes(i: u16, b: &mut [u8]) { - b[..2].copy_from_slice(&i.to_ne_bytes()); - } - - #[inline(always)] - pub fn u64_from_ne_bytes(b: &[u8]) -> u64 { - u64::from_ne_bytes(b[..8].try_into().unwrap()) - } - - #[inline(always)] - pub fn u32_from_ne_bytes(b: &[u8]) -> u32 { - u32::from_ne_bytes(b[..4].try_into().unwrap()) - } - - #[inline(always)] - pub fn u16_from_ne_bytes(b: &[u8]) -> u16 { - u16::from_ne_bytes(b[..2].try_into().unwrap()) - } - - #[inline(always)] - pub fn u64_to_be_bytes(i: u64, b: &mut [u8]) { - b[..8].copy_from_slice(&i.to_be_bytes()); - } - - #[inline(always)] - pub fn u32_to_be_bytes(i: u32, b: &mut [u8]) { - b[..4].copy_from_slice(&i.to_be_bytes()); - } - - #[inline(always)] - pub fn u16_to_be_bytes(i: u16, b: &mut [u8]) { - b[..2].copy_from_slice(&i.to_be_bytes()); - } - - #[inline(always)] - pub fn u64_from_be_bytes(b: &[u8]) -> u64 { - u64::from_be_bytes(b[..8].try_into().unwrap()) - } - - #[inline(always)] - pub fn u32_from_be_bytes(b: &[u8]) -> u32 { - u32::from_be_bytes(b[..4].try_into().unwrap()) - } - - #[inline(always)] - pub fn u16_from_be_bytes(b: &[u8]) -> u16 { - u16::from_be_bytes(b[..2].try_into().unwrap()) - } +/// Store a raw object to a byte array (portable). +/// This will panic if the slice is too small or the object requires drop. +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64")))] +#[inline(always)] +pub fn store_raw(o: T, dest: &mut [u8]) { + assert!(!std::mem::needs_drop::()); + assert!(dest.len() >= size_of::()); + unsafe { copy_nonoverlapping((&o as *const T).cast(), dest.as_mut_ptr(), size_of::()) }; } -pub use fast_int_memory_access::*; +/// Load a raw object from a byte array (for architectures known not to care about unaligned access). +/// This will panic if the slice is too small or the object requires drop. +#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64"))] +#[inline(always)] +pub fn load_raw(src: &[u8]) -> T { + assert!(!std::mem::needs_drop::()); + assert!(src.len() >= size_of::()); + unsafe { *src.as_ptr().cast() } +} + +/// Load a raw object from a byte array (portable). +/// This will panic if the slice is too small or the object requires drop. +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64")))] +#[inline(always)] +pub fn load_raw(src: &[u8]) -> T { + assert!(!std::mem::needs_drop::()); + assert!(src.len() >= size_of::()); + unsafe { + let mut tmp: T = MaybeUninit::uninit().assume_init(); + copy_nonoverlapping(src.as_ptr(), (&mut tmp as *mut T).cast(), size_of::()); + tmp + } +} /// Obtain a view into an array cast as another array. /// This will panic if the template parameters would result in out of bounds access. @@ -231,6 +69,7 @@ pub fn as_byte_array(o: &T) -> &[u8; S] { /// Get a byte array as a flat object. /// /// WARNING: while this is technically safe, care must be taken if the object requires aligned access. +#[inline(always)] pub fn as_flat_object(b: &[u8; S]) -> &T { assert!(std::mem::size_of::() <= S); unsafe { &*b.as_ptr().cast() }