From 1f79a2a7077903401b7067c93599673689bdb0e4 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 29 Apr 2022 16:19:21 -0400 Subject: [PATCH] A bunch of reorg and other cleanup. --- zerotier-core-crypto/src/x25519.rs | 39 +++++++++------ .../src/util/buffer.rs | 23 +++++---- zerotier-network-hypervisor/src/util/mod.rs | 30 +++++++++-- .../src/vl1/endpoint.rs | 4 +- .../src/vl1/fragmentedpacket.rs | 42 ++++++++++++---- .../src/vl1/identity.rs | 1 - .../src/vl1/inetaddress.rs | 15 ++++-- zerotier-network-hypervisor/src/vl1/mod.rs | 13 +++-- zerotier-network-hypervisor/src/vl1/path.rs | 50 ++++++------------- zerotier-network-hypervisor/src/vl1/peer.rs | 19 +++++-- .../src/vl1/protocol.rs | 16 +++--- 11 files changed, 152 insertions(+), 100 deletions(-) diff --git a/zerotier-core-crypto/src/x25519.rs b/zerotier-core-crypto/src/x25519.rs index 478f56858..20d3d195b 100644 --- a/zerotier-core-crypto/src/x25519.rs +++ b/zerotier-core-crypto/src/x25519.rs @@ -35,21 +35,30 @@ impl C25519KeyPair { pub fn from_bytes(public_key: &[u8], secret_key: &[u8]) -> Option { if public_key.len() == 32 && secret_key.len() == 32 { - // NOTE: we keep the original secret separately from x25519_dalek's StaticSecret - // due to how "clamping" is done in the old C++ code vs x25519_dalek. Clamping - // is explained here: - // - // https://www.jcraige.com/an-explainer-on-ed25519-clamping - // - // In the old C++ code clamping is done when the secret key is actually used. - // In x25519_dalek it's done when the key is loaded into one of the secret - // containers. Unfortunately this means that identities' secret keys won't look - // the same in the actual identity structure vs. what you would get from the C++ - // v0 ZeroTier implementation. The cryptographic results are identical but we - // still need to have our identity spit out identical bits when exported. - // - // Newly generated keys will be clamped at generation time, which will also yield - // identical results in both cases. + /* NOTE: we keep the original secret separately from x25519_dalek's StaticSecret + * due to how "clamping" is done in the old C++ code vs x25519_dalek. Clamping + * is explained here: + * + * https://www.jcraige.com/an-explainer-on-ed25519-clamping + * + * The old code does clamping at the time of use. In other words the code that + * performs things like key agreement or signing clamps the secret before doing + * the operation. The x25519_dalek code does clamping at generation or when + * from() is used to get a key from a raw byte array. + * + * Unfortunately this introduces issues when interoperating with old code. The + * old system generates secrets that are not clamped (since they're clamped at + * use!) and assumes that these exact binary keys will be preserved in e.g. + * identities. So to preserve this behavior we store the secret separately + * so secret_bytes() will return it as-is. + * + * The new code will still clamp at generation resulting in secrets that are + * pre-clamped, but the old code won't care about this. It's only a problem when + * going the other way. + * + * This has no cryptographic implication since regardless of where, the clamping + * is done. It's just an API thing. + */ let pk: [u8; 32] = public_key.try_into().unwrap(); let sk_orig: Secret<32> = Secret(secret_key.try_into().unwrap()); let pk = x25519_dalek::PublicKey::from(pk); diff --git a/zerotier-network-hypervisor/src/util/buffer.rs b/zerotier-network-hypervisor/src/util/buffer.rs index 0ac2f3143..af9947b25 100644 --- a/zerotier-network-hypervisor/src/util/buffer.rs +++ b/zerotier-network-hypervisor/src/util/buffer.rs @@ -13,15 +13,20 @@ use crate::util::pool::PoolFactory; /// Annotates a structure as containing only primitive types. /// -/// The structure must be safe to copy in raw form and access without concern for alignment, or if -/// it does contain elements that require alignment special care must be taken when accessing them -/// at least on platforms where it matters. -pub unsafe trait RawObject: Sized {} +/// This means the structure is safe to copy in raw form, does not need to be dropped, and otherwise +/// contains nothing complex that requires any special handling. It also implies that it is safe to +/// access without concern for alignment on platforms on which this is an issue, or at least that +/// the implementer must take care to guard any unaligned access in appropriate ways. FlatBlob +/// structures are generally repr(C, packed) as well to make them deterministic across systems. +/// +/// The Buffer has special methods allowing these structs to be read and written in place, which +/// would be unsafe without these concerns being flagged as not applicable. +pub unsafe trait FlatBlob: Sized {} /// A safe bounds checked I/O buffer with extensions for convenient appending of RawObject types. pub struct Buffer(usize, [u8; L]); -unsafe impl RawObject for Buffer {} +unsafe impl FlatBlob for Buffer {} impl Default for Buffer { #[inline(always)] @@ -146,7 +151,7 @@ impl Buffer { /// Append a structure and return a mutable reference to its memory. #[inline(always)] - pub fn append_struct_get_mut(&mut self) -> std::io::Result<&mut T> { + pub fn append_struct_get_mut(&mut self) -> std::io::Result<&mut T> { let ptr = self.0; let end = ptr + size_of::(); if end <= L { @@ -322,7 +327,7 @@ impl Buffer { /// Get a structure at a given position in the buffer. #[inline(always)] - pub fn struct_at(&self, ptr: usize) -> std::io::Result<&T> { + pub fn struct_at(&self, ptr: usize) -> std::io::Result<&T> { if (ptr + size_of::()) <= self.0 { unsafe { Ok(&*self.1.as_ptr().cast::().offset(ptr as isize).cast::()) } } else { @@ -332,7 +337,7 @@ impl Buffer { /// Get a structure at a given position in the buffer. #[inline(always)] - pub fn struct_mut_at(&mut self, ptr: usize) -> std::io::Result<&mut T> { + pub fn struct_mut_at(&mut self, ptr: usize) -> std::io::Result<&mut T> { if (ptr + size_of::()) <= self.0 { unsafe { Ok(&mut *self.1.as_mut_ptr().cast::().offset(ptr as isize).cast::()) } } else { @@ -351,7 +356,7 @@ impl Buffer { /// Get a structure at a given position in the buffer and advance the cursor. #[inline(always)] - pub fn read_struct(&self, cursor: &mut usize) -> std::io::Result<&T> { + pub fn read_struct(&self, cursor: &mut usize) -> std::io::Result<&T> { let ptr = *cursor; let end = ptr + size_of::(); debug_assert!(end <= L); diff --git a/zerotier-network-hypervisor/src/util/mod.rs b/zerotier-network-hypervisor/src/util/mod.rs index 7023583f5..b7ab5cfcf 100644 --- a/zerotier-network-hypervisor/src/util/mod.rs +++ b/zerotier-network-hypervisor/src/util/mod.rs @@ -56,11 +56,6 @@ impl std::hash::Hasher for U64NoOpHasher { self.0.wrapping_add(self.0.wrapping_shr(32)) } - #[inline(always)] - fn write(&mut self, _: &[u8]) { - panic!("U64NoOpHasher should only be used with u64 and i64 types"); - } - #[inline(always)] fn write_u64(&mut self, i: u64) { self.0 = self.0.wrapping_add(i); @@ -70,6 +65,31 @@ impl std::hash::Hasher for U64NoOpHasher { fn write_i64(&mut self, i: i64) { self.0 = self.0.wrapping_add(i as u64); } + + #[inline(always)] + fn write_usize(&mut self, i: usize) { + self.0 = self.0.wrapping_add(i as u64); + } + + #[inline(always)] + fn write_isize(&mut self, i: isize) { + self.0 = self.0.wrapping_add(i as u64); + } + + #[inline(always)] + fn write_u32(&mut self, i: u32) { + self.0 = self.0.wrapping_add(i as u64); + } + + #[inline(always)] + fn write_i32(&mut self, i: i32) { + self.0 = self.0.wrapping_add(i as u64); + } + + #[inline(always)] + fn write(&mut self, _: &[u8]) { + panic!("U64NoOpHasher should only be used with u64 and i64 types"); + } } impl std::hash::BuildHasher for U64NoOpHasher { diff --git a/zerotier-network-hypervisor/src/vl1/endpoint.rs b/zerotier-network-hypervisor/src/vl1/endpoint.rs index 3fb836120..86b18241f 100644 --- a/zerotier-network-hypervisor/src/vl1/endpoint.rs +++ b/zerotier-network-hypervisor/src/vl1/endpoint.rs @@ -40,7 +40,7 @@ pub enum Endpoint { Nil, /// Via another node using unencapsulated relaying (e.g. via a root) - /// Hash is a full hash of the identity for strong verification. + /// This is the address and the full identity fingerprint. ZeroTier(Address, [u8; SHA512_HASH_SIZE]), /// Direct L2 Ethernet @@ -68,7 +68,7 @@ pub enum Endpoint { WebRTC(Vec), /// Via another node using inner encapsulation via VERB_ENCAP. - /// Hash is a full hash of the identity for strong verification. + /// This is the address and the full identity fingerprint. ZeroTierEncap(Address, [u8; SHA512_HASH_SIZE]), } diff --git a/zerotier-network-hypervisor/src/vl1/fragmentedpacket.rs b/zerotier-network-hypervisor/src/vl1/fragmentedpacket.rs index 3787eba89..5c179f20d 100644 --- a/zerotier-network-hypervisor/src/vl1/fragmentedpacket.rs +++ b/zerotier-network-hypervisor/src/vl1/fragmentedpacket.rs @@ -14,6 +14,9 @@ use crate::PacketBuffer; /// Performance note: PacketBuffer is Pooled which is NotNull<*mut Buffer>. /// That means Option is just a pointer, since NotNull permits the /// compiler to optimize out any additional state in Option. +/// +/// This will need to be modified if we ever support more than 8 fragments to increase +/// the size of frags[] and the number of bits in 'have' and 'expecting'. pub(crate) struct FragmentedPacket { pub ts_ticks: i64, pub frags: [Option; PACKET_FRAGMENT_COUNT_MAX], @@ -24,6 +27,7 @@ pub(crate) struct FragmentedPacket { impl FragmentedPacket { #[inline(always)] pub fn new(ts: i64) -> Self { + debug_assert_eq!(PACKET_FRAGMENT_COUNT_MAX, 8); Self { ts_ticks: ts, frags: [None, None, None, None, None, None, None, None], @@ -36,17 +40,33 @@ impl FragmentedPacket { #[inline(always)] pub fn add_fragment(&mut self, frag: PacketBuffer, no: u8, expecting: u8) -> bool { self.frags.get_mut(no as usize).map_or(false, |entry| { - // Note that a duplicate fragment just gets silently replaced. This shouldn't happen - // unless a dupe occurred at the network level, in which case this is usually a - // no-op event. There is no security implication since the whole packet gets MAC'd - // after assembly. - if entry.replace(frag).is_none() { - self.have += 1; - self.expecting |= expecting; // expecting is either 0 or the expected total - self.have == self.expecting - } else { - false - } + /* + * This works by setting bit N in the 'have' bit mask and then setting X bits + * in 'expecting' if the 'expecting' field is non-zero. Since the packet head + * does not carry the expecting fragment count (it will be provided as zero) and + * all subsequent fragments should have the same fragment count, this will yield + * a 'have' of 1 and an 'expecting' of 0 after the head arrives. Then 'expecting' + * will be set to the right bit pattern by the first fragment and 'true' will get + * returned once all fragments have arrived and therefore all flags in 'have' are + * set. + * + * Receipt of a four-fragment packet would look like: + * + * after head : have == 0x01, expecting == 0x00 -> false + * after fragment 1: have == 0x03, expecting == 0x0f -> false + * after fragment 2: have == 0x07, expecting == 0x0f -> false + * after fragment 3: have == 0x0f, expecting == 0x0f -> true (done!) + * + * This algorithm is just a few instructions in ASM and also correctly handles + * duplicated packet fragments. If all fragments never arrive receipt eventually + * times out and this is discarded. + */ + + let _ = entry.insert(frag); + + self.have |= 1_u8.wrapping_shl(no as u32); + self.expecting |= 0xff_u8.wrapping_shr(8 - (expecting as u32)); + self.have == self.expecting }) } } diff --git a/zerotier-network-hypervisor/src/vl1/identity.rs b/zerotier-network-hypervisor/src/vl1/identity.rs index 11f189be0..e7360469f 100644 --- a/zerotier-network-hypervisor/src/vl1/identity.rs +++ b/zerotier-network-hypervisor/src/vl1/identity.rs @@ -349,7 +349,6 @@ impl Identity { } } - #[inline(always)] pub fn to_bytes(&self, include_algorithms: u8, include_private: bool) -> Buffer { let mut b: Buffer = Buffer::new(); assert!(self.marshal(&mut b, include_algorithms, include_private).is_ok()); diff --git a/zerotier-network-hypervisor/src/vl1/inetaddress.rs b/zerotier-network-hypervisor/src/vl1/inetaddress.rs index fbc27ac8c..c22a2bc01 100644 --- a/zerotier-network-hypervisor/src/vl1/inetaddress.rs +++ b/zerotier-network-hypervisor/src/vl1/inetaddress.rs @@ -81,6 +81,7 @@ pub union InetAddress { impl TryInto for InetAddress { type Error = crate::error::InvalidParameterError; + #[inline(always)] fn try_into(self) -> Result { match unsafe { self.sa.sa_family } { AF_INET => Ok(IpAddr::V4(Ipv4Addr::from(unsafe { self.sin.sin_addr.s_addr.to_ne_bytes() }))), @@ -93,6 +94,7 @@ impl TryInto for InetAddress { impl TryInto for InetAddress { type Error = crate::error::InvalidParameterError; + #[inline(always)] fn try_into(self) -> Result { match unsafe { self.sa.sa_family } { AF_INET => Ok(Ipv4Addr::from(unsafe { self.sin.sin_addr.s_addr.to_ne_bytes() })), @@ -104,6 +106,7 @@ impl TryInto for InetAddress { impl TryInto for InetAddress { type Error = crate::error::InvalidParameterError; + #[inline(always)] fn try_into(self) -> Result { match unsafe { self.sa.sa_family } { AF_INET6 => Ok(Ipv6Addr::from(unsafe { self.sin6.sin6_addr.s6_addr })), @@ -115,6 +118,7 @@ impl TryInto for InetAddress { impl TryInto for InetAddress { type Error = crate::error::InvalidParameterError; + #[inline(always)] fn try_into(self) -> Result { unsafe { match self.sa.sa_family { @@ -129,6 +133,7 @@ impl TryInto for InetAddress { impl TryInto for InetAddress { type Error = crate::error::InvalidParameterError; + #[inline(always)] fn try_into(self) -> Result { unsafe { match self.sa.sa_family { @@ -142,6 +147,7 @@ impl TryInto for InetAddress { impl TryInto for InetAddress { type Error = crate::error::InvalidParameterError; + #[inline(always)] fn try_into(self) -> Result { unsafe { match self.sa.sa_family { @@ -153,6 +159,7 @@ impl TryInto for InetAddress { } impl From<&IpAddr> for InetAddress { + #[inline(always)] fn from(ip: &IpAddr) -> Self { match ip { IpAddr::V4(ip4) => Self::from(ip4), @@ -197,6 +204,7 @@ impl From for InetAddress { } impl From<&SocketAddr> for InetAddress { + #[inline(always)] fn from(sa: &SocketAddr) -> Self { match sa { SocketAddr::V4(sa4) => Self::from(sa4), @@ -255,6 +263,7 @@ impl Default for InetAddress { } impl std::fmt::Debug for InetAddress { + #[inline(always)] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(&self.to_string()) } @@ -406,6 +415,7 @@ impl InetAddress { } /// Get the address family of this InetAddress: AF_INET, AF_INET6, or 0 if uninitialized. + #[inline(always)] pub fn family(&self) -> u8 { unsafe { self.sa.sa_family as u8 } } @@ -449,7 +459,6 @@ impl InetAddress { } /// Get raw IP bytes, with length dependent on address family (4 or 16). - #[inline(always)] pub fn ip_bytes(&self) -> &[u8] { unsafe { match self.sa.sa_family as u8 { @@ -464,7 +473,7 @@ impl InetAddress { /// Bytes are packed in native endian so the resulting u128 may not be the same between systems. /// This value is intended for local lookup use only. #[inline(always)] - pub fn ip_as_native_u128(&self) -> u128 { + pub(crate) fn ip_as_native_u128(&self) -> u128 { unsafe { match self.sa.sa_family as u8 { AF_INET => self.sin.sin_addr.s_addr as u128, @@ -475,7 +484,6 @@ impl InetAddress { } /// Get the IP port for this InetAddress. - #[inline(always)] pub fn port(&self) -> u16 { unsafe { u16::from_be(match self.sa.sa_family as u8 { @@ -490,7 +498,6 @@ impl InetAddress { /// /// This does nothing on uninitialized InetAddress objects. An address must first /// be initialized with an IP to select the correct address type. - #[inline(always)] pub fn set_port(&mut self, port: u16) { let port = port.to_be(); unsafe { diff --git a/zerotier-network-hypervisor/src/vl1/mod.rs b/zerotier-network-hypervisor/src/vl1/mod.rs index 0538cd007..e4dde6734 100644 --- a/zerotier-network-hypervisor/src/vl1/mod.rs +++ b/zerotier-network-hypervisor/src/vl1/mod.rs @@ -10,14 +10,15 @@ pub mod endpoint; pub mod identity; pub mod inetaddress; -pub(crate) mod address; -pub(crate) mod dictionary; +mod address; +mod dictionary; +mod mac; +mod path; +mod peer; + pub(crate) mod fragmentedpacket; pub(crate) mod hybridkey; -pub(crate) mod mac; pub(crate) mod node; -pub(crate) mod path; -pub(crate) mod peer; #[allow(unused)] pub(crate) mod protocol; pub(crate) mod symmetricsecret; @@ -32,5 +33,3 @@ pub use mac::MAC; pub use node::{Node, SystemInterface}; pub use path::Path; pub use peer::Peer; - -pub use protocol::{PACKET_FRAGMENT_COUNT_MAX, PACKET_SIZE_MAX}; diff --git a/zerotier-network-hypervisor/src/vl1/path.rs b/zerotier-network-hypervisor/src/vl1/path.rs index bb668fb60..0e1290bd4 100644 --- a/zerotier-network-hypervisor/src/vl1/path.rs +++ b/zerotier-network-hypervisor/src/vl1/path.rs @@ -41,8 +41,8 @@ lazy_static! { /// for them and uniform application of things like keepalives. pub struct Path { endpoint: Mutex>, - local_socket: Option, - local_interface: Option, + pub(crate) local_socket: Option, + pub(crate) local_interface: Option, last_send_time_ticks: AtomicI64, last_receive_time_ticks: AtomicI64, fragmented_packets: Mutex>, @@ -98,26 +98,6 @@ impl Path { self.endpoint.lock().clone() } - #[inline(always)] - pub fn local_socket(&self) -> Option { - self.local_socket - } - - #[inline(always)] - pub fn local_interface(&self) -> Option { - self.local_interface - } - - #[inline(always)] - pub fn last_send_time_ticks(&self) -> i64 { - self.last_send_time_ticks.load(Ordering::Relaxed) - } - - #[inline(always)] - pub fn last_receive_time_ticks(&self) -> i64 { - self.last_receive_time_ticks.load(Ordering::Relaxed) - } - /// Receive a fragment and return a FragmentedPacket if the entire packet was assembled. /// This returns None if more fragments are needed to assemble the packet. pub(crate) fn receive_fragment(&self, packet_id: u64, fragment_no: u8, fragment_expecting_count: u8, packet: PacketBuffer, time_ticks: i64) -> Option { @@ -145,31 +125,34 @@ impl Path { } } + /// Called when any packet is received. #[inline(always)] pub(crate) fn log_receive_anything(&self, time_ticks: i64) { self.last_receive_time_ticks.store(time_ticks, Ordering::Relaxed); } + /// Called when a real packet is received and passes authentication checks. pub(crate) fn log_receive_authenticated_packet(&self, _bytes: usize, source_endpoint: &Endpoint) { - let mut replace = false; match source_endpoint { Endpoint::IpUdp(ip) => { - let ep = self.endpoint.lock().clone(); - match ep.as_ref() { - Endpoint::IpUdp(ip_orig) => { - debug_assert!(ip_orig.ip_bytes().eq(ip.ip_bytes())); - if ip_orig.port() != ip.port() { - replace = true; + // If an IPv4 UDP remote IP is the same but the port changes, learn the new port by replacing the + // endpoint with the new one. This is because IPv4 NATs will occasionally remap IPs at random. + if ip.is_ipv4() { + let mut ep = self.endpoint.lock(); + match ep.as_ref() { + Endpoint::IpUdp(ip_orig) => { + // These should always be equal because this path would have been looked up by IP, but sanity check in debug. + debug_assert_eq!(ip_orig.ip_bytes(), ip.ip_bytes()); + if ip_orig.port() != ip.port() { + (*ep) = Arc::new(source_endpoint.clone()); + } } + _ => {} } - _ => {} } } _ => {} } - if replace { - (*self.endpoint.lock()) = Arc::new(source_endpoint.clone()); - } } #[inline(always)] @@ -178,7 +161,6 @@ impl Path { } pub(crate) const CALL_EVERY_INTERVAL_MS: i64 = PATH_KEEPALIVE_INTERVAL; - pub(crate) fn call_every_interval(&self, _si: &SI, time_ticks: i64) { self.fragmented_packets.lock().retain(|_, frag| (time_ticks - frag.ts_ticks) < PACKET_FRAGMENT_EXPIRATION); } diff --git a/zerotier-network-hypervisor/src/vl1/peer.rs b/zerotier-network-hypervisor/src/vl1/peer.rs index 1b97d01cb..303ae04fa 100644 --- a/zerotier-network-hypervisor/src/vl1/peer.rs +++ b/zerotier-network-hypervisor/src/vl1/peer.rs @@ -221,7 +221,18 @@ impl Peer { /// /// If the packet comes in multiple fragments, the fragments slice should contain all /// those fragments after the main packet header and first chunk. - pub(crate) fn receive(&self, node: &Node, si: &SI, vi: &VI, time_ticks: i64, source_endpoint: &Endpoint, source_path: &Arc, header: &PacketHeader, frag0: &Buffer<{ PACKET_SIZE_MAX }>, fragments: &[Option]) { + pub(crate) fn receive( + &self, + node: &Node, + si: &SI, + vi: &VI, + time_ticks: i64, + source_endpoint: &Endpoint, + source_path: &Arc, + header: &PacketHeader, + frag0: &Buffer<{ PACKET_SIZE_MAX }>, + fragments: &[Option], + ) { let _ = frag0.as_bytes_starting_at(PACKET_VERB_INDEX).map(|packet_frag0_payload_bytes| { let mut payload: Buffer = unsafe { Buffer::new_without_memzero() }; @@ -375,7 +386,7 @@ impl Peer { /// via a root or some other route. pub(crate) fn send(&self, si: &SI, node: &Node, time_ticks: i64, packet: &Buffer<{ PACKET_SIZE_MAX }>) -> bool { self.path(node).map_or(false, |path| { - if self.send_to_endpoint(si, path.endpoint().as_ref(), path.local_socket(), path.local_interface(), packet) { + if self.send_to_endpoint(si, path.endpoint().as_ref(), path.local_socket, path.local_interface, packet) { self.last_send_time_ticks.store(time_ticks, Ordering::Relaxed); self.total_bytes_sent.fetch_add(packet.len() as u64, Ordering::Relaxed); true @@ -394,7 +405,7 @@ impl Peer { /// Intermediates don't need to adjust fragmentation. pub(crate) fn forward(&self, si: &SI, time_ticks: i64, packet: &Buffer<{ PACKET_SIZE_MAX }>) -> bool { self.direct_path().map_or(false, |path| { - if si.wire_send(path.endpoint().as_ref(), path.local_socket(), path.local_interface(), &[packet.as_bytes()], 0) { + if si.wire_send(path.endpoint().as_ref(), path.local_socket, path.local_interface, &[packet.as_bytes()], 0) { self.last_forward_time_ticks.store(time_ticks, Ordering::Relaxed); self.total_bytes_forwarded.fetch_add(packet.len() as u64, Ordering::Relaxed); true @@ -517,7 +528,7 @@ impl Peer { path.map_or_else( || self.send_to_endpoint(si, &destination, None, None, &packet), |p| { - if self.send_to_endpoint(si, &destination, p.local_socket(), p.local_interface(), &packet) { + if self.send_to_endpoint(si, &destination, p.local_socket, p.local_interface, &packet) { p.log_send_anything(time_ticks); true } else { diff --git a/zerotier-network-hypervisor/src/vl1/protocol.rs b/zerotier-network-hypervisor/src/vl1/protocol.rs index 917f43391..169e1664e 100644 --- a/zerotier-network-hypervisor/src/vl1/protocol.rs +++ b/zerotier-network-hypervisor/src/vl1/protocol.rs @@ -9,7 +9,7 @@ use std::convert::TryFrom; use std::mem::MaybeUninit; -use crate::util::buffer::{Buffer, RawObject}; +use crate::util::buffer::{Buffer, FlatBlob}; use crate::vl1::Address; pub const VERB_VL1_NOP: u8 = 0x00; @@ -220,7 +220,7 @@ pub struct PacketHeader { pub mac: [u8; 8], } -unsafe impl RawObject for PacketHeader {} +unsafe impl FlatBlob for PacketHeader {} impl PacketHeader { #[inline(always)] @@ -284,7 +284,7 @@ pub struct FragmentHeader { pub reserved_hops: u8, // rrrrrHHH (3 hops bits, rest reserved) } -unsafe impl RawObject for FragmentHeader {} +unsafe impl FlatBlob for FragmentHeader {} impl FragmentHeader { #[inline(always)] @@ -322,7 +322,7 @@ impl FragmentHeader { } pub(crate) mod message_component_structs { - use crate::util::buffer::RawObject; + use crate::util::buffer::FlatBlob; #[repr(C, packed)] pub struct OkHeader { @@ -330,7 +330,7 @@ pub(crate) mod message_component_structs { pub in_re_message_id: [u8; 8], } - unsafe impl RawObject for OkHeader {} + unsafe impl FlatBlob for OkHeader {} #[repr(C, packed)] pub struct ErrorHeader { @@ -339,7 +339,7 @@ pub(crate) mod message_component_structs { pub error_code: u8, } - unsafe impl RawObject for ErrorHeader {} + unsafe impl FlatBlob for ErrorHeader {} #[repr(C, packed)] pub struct HelloFixedHeaderFields { @@ -351,7 +351,7 @@ pub(crate) mod message_component_structs { pub timestamp: [u8; 8], // u64 } - unsafe impl RawObject for HelloFixedHeaderFields {} + unsafe impl FlatBlob for HelloFixedHeaderFields {} #[repr(C, packed)] pub struct OkHelloFixedHeaderFields { @@ -362,7 +362,7 @@ pub(crate) mod message_component_structs { pub version_revision: [u8; 2], // u16 } - unsafe impl RawObject for OkHelloFixedHeaderFields {} + unsafe impl FlatBlob for OkHelloFixedHeaderFields {} } #[cfg(test)]