Just a whole bucket of Rust.

This commit is contained in:
Adam Ierymenko 2021-12-06 09:40:41 -05:00
parent c99fc2f36e
commit 810a1fb229
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
26 changed files with 536 additions and 585 deletions

View file

@ -32,6 +32,17 @@ impl SHA512 {
h
}
pub fn hmac_multipart(key: &[u8], msg: &[&[u8]]) -> [u8; SHA512_HASH_SIZE] {
let mut m = gcrypt::mac::Mac::new(gcrypt::mac::Algorithm::HmacSha512).unwrap();
m.set_key(key).expect("FATAL: invalid HMAC-SHA512 key");
for msg_part in msg.iter() {
m.update(*msg_part).expect("FATAL: HMAC-SHA512 failed");
}
let mut h = [0_u8; SHA512_HASH_SIZE];
m.get_mac(&mut h).expect("FATAL: HMAC-SHA512 failed");
h
}
#[inline(always)]
pub fn new() -> Self { Self(gcrypt::digest::MessageDigest::new(gcrypt::digest::Algorithm::Sha512).unwrap()) }
@ -84,6 +95,17 @@ impl SHA384 {
h
}
pub fn hmac_multipart(key: &[u8], msg: &[&[u8]]) -> [u8; SHA384_HASH_SIZE] {
let mut m = gcrypt::mac::Mac::new(gcrypt::mac::Algorithm::HmacSha384).unwrap();
m.set_key(key).expect("FATAL: invalid HMAC-SHA512 key");
for msg_part in msg.iter() {
m.update(*msg_part).expect("FATAL: HMAC-SHA512 failed");
}
let mut h = [0_u8; SHA384_HASH_SIZE];
m.get_mac(&mut h).expect("FATAL: HMAC-SHA512 failed");
h
}
#[inline(always)]
pub fn new() -> Self { Self(gcrypt::digest::MessageDigest::new(gcrypt::digest::Algorithm::Sha384).unwrap()) }

View file

@ -23,7 +23,7 @@ pub struct NetworkHypervisor {
}
impl NetworkHypervisor {
pub fn new<CI: Interface>(ci: &CI, auto_generate_identity_type: Option<crate::vl1::identity::Type>) -> Result<NetworkHypervisor, InvalidParameterError> {
pub fn new<CI: Interface>(ci: &CI, auto_generate_identity_type: Option<crate::vl1::identity::IdentityType>) -> Result<NetworkHypervisor, InvalidParameterError> {
Ok(NetworkHypervisor {
vl1: Node::new(ci, auto_generate_identity_type)?,
vl2: Switch::new(),

View file

@ -7,7 +7,7 @@
*/
use std::io::Write;
use std::mem::size_of;
use std::mem::{MaybeUninit, size_of};
use crate::util::pool::PoolFactory;
@ -39,6 +39,10 @@ impl<const L: usize> Buffer<L> {
#[inline(always)]
pub fn new() -> Self { Self(0, [0_u8; L]) }
/// Create a zero size buffer without zeroing its actual memory.
#[inline(always)]
pub unsafe fn new_nozero() -> Self { Self(0, MaybeUninit::uninit().assume_init()) }
/// Get a Buffer initialized with a copy of a byte slice.
#[inline(always)]
pub fn from_bytes(b: &[u8]) -> std::io::Result<Self> {
@ -82,6 +86,19 @@ impl<const L: usize> Buffer<L> {
self.1[0..prev_len].fill(0);
}
/// Load array into buffer.
/// This will panic if the array is larger than L.
#[inline(always)]
pub fn set_to(&mut self, b: &[u8]) {
let prev_len = self.0;
let len = b.len();
self.0 = len;
self.1[0..len].copy_from_slice(b);
if len < prev_len {
self.1[len..prev_len].fill(0);
}
}
#[inline(always)]
pub fn len(&self) -> usize { self.0 }
@ -109,48 +126,39 @@ impl<const L: usize> Buffer<L> {
#[inline(always)]
pub unsafe fn get_unchecked(&self, i: usize) -> u8 { *self.1.get_unchecked(i) }
/// Append a packed structure and call a function to initialize it in place.
/// Anything not initialized will be zero.
/// Append a structure and return a mutable reference to its memory.
#[inline(always)]
pub fn append_and_init_struct<T: RawObject, R, F: FnOnce(&mut T) -> R>(&mut self, initializer: F) -> std::io::Result<R> {
pub fn append_struct_get_mut<T: RawObject>(&mut self) -> std::io::Result<&mut T> {
let ptr = self.0;
let end = ptr + size_of::<T>();
if end <= L {
self.0 = end;
unsafe {
Ok(initializer(&mut *self.1.as_mut_ptr().cast::<u8>().offset(ptr as isize).cast::<T>()))
}
Ok(unsafe { &mut *self.1.as_mut_ptr().add(ptr).cast() })
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
/// Append and initialize a byte array with a fixed size set at compile time.
/// This is more efficient than setting a size at runtime as it may allow the compiler to
/// skip some bounds checking. Any bytes not initialized will be zero.
/// Append a fixed size array and return a mutable reference to its memory.
#[inline(always)]
pub fn append_and_init_bytes_fixed<R, F: FnOnce(&mut [u8; N]) -> R, const N: usize>(&mut self, initializer: F) -> std::io::Result<R> {
pub fn append_bytes_fixed_get_mut<const S: usize>(&mut self) -> std::io::Result<&mut [u8; S]> {
let ptr = self.0;
let end = ptr + N;
let end = ptr + S;
if end <= L {
self.0 = end;
unsafe {
Ok(initializer(&mut *self.1.as_mut_ptr().cast::<u8>().offset(ptr as isize).cast::<[u8; N]>()))
}
Ok(unsafe { &mut *self.1.as_mut_ptr().add(ptr).cast() })
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
/// Append and initialize a slice with a size that is set at runtime.
/// Any bytes not initialized will be zero.
#[inline(always)]
pub fn append_and_init_bytes<R, F: FnOnce(&mut [u8]) -> R>(&mut self, l: usize, initializer: F) -> std::io::Result<R> {
/// Append a runtime sized array and return a mutable reference to its memory.
pub fn append_bytes_get_mut(&mut self, s: usize) -> std::io::Result<&mut [u8]> {
let ptr = self.0;
let end = ptr + l;
if end <= L {
self.0 = end;
Ok(initializer(&mut self.1[ptr..end]))
Ok(&mut self.1[ptr..end])
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
@ -171,7 +179,6 @@ impl<const L: usize> Buffer<L> {
}
/// Append a fixed length byte array (copy into buffer).
/// Use append_and_init_ functions if possible as these avoid extra copies.
#[inline(always)]
pub fn append_bytes_fixed<const S: usize>(&mut self, buf: &[u8; S]) -> std::io::Result<()> {
let ptr = self.0;
@ -364,7 +371,7 @@ impl<const L: usize> Buffer<L> {
debug_assert!(end <= L);
if end <= self.0 {
*cursor = end;
Ok(crate::util::load_u16_be(&self.1[ptr..end]))
Ok(u16::from_be_bytes(unsafe { *self.1.as_ptr().add(ptr).cast() }))
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
@ -378,7 +385,7 @@ impl<const L: usize> Buffer<L> {
debug_assert!(end <= L);
if end <= self.0 {
*cursor = end;
Ok(crate::util::load_u32_be(&self.1[ptr..end]))
Ok(u32::from_be_bytes(unsafe { *self.1.as_ptr().add(ptr).cast() }))
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
@ -392,7 +399,7 @@ impl<const L: usize> Buffer<L> {
debug_assert!(end <= L);
if end <= self.0 {
*cursor = end;
Ok(crate::util::load_u64_be(&self.1[ptr..end]))
Ok(u64::from_be_bytes(unsafe { *self.1.as_ptr().add(ptr).cast() }))
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}

View file

@ -15,140 +15,38 @@ pub use zerotier_core_crypto::varint;
pub(crate) const ZEROES: [u8; 64] = [0_u8; 64];
/// Obtain a reference to a sub-array within an existing array.
/// Attempts to violate array bounds will panic or fail to compile.
#[inline(always)]
pub(crate) unsafe fn equal_ptr(a: *const u8, b: *const u8, l: usize) -> bool {
for i in 0..l {
if *a.offset(i as isize) != *b.offset(i as isize) {
return false;
}
}
true
pub(crate) fn array_range<T, const A: usize, const START: usize, const LEN: usize>(a: &[T; A]) -> &[T; LEN] {
assert!((START + LEN) <= A);
unsafe { &*a.as_ptr().add(std::mem::size_of::<T>() * start_index).cast::<[T; LEN]>() }
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))]
/// Cast a u64 reference to a byte array in place.
/// Going the other direction is not safe on some architectures, but this should be safe everywhere.
#[inline(always)]
pub(crate) fn store_u16_be(i: u16, d: &mut [u8]) {
unsafe { *d.as_mut_ptr().cast::<u16>() = i.to_be() };
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")))]
#[inline(always)]
pub(crate) fn store_u16_be(i: u16, d: &mut [u8]) {
d[0] = (i >> 8) as u8;
d[1] = i as u8;
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))]
#[inline(always)]
pub(crate) fn store_u32_be(i: u32, d: &mut [u8]) {
unsafe { *d.as_mut_ptr().cast::<u32>() = i.to_be() };
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")))]
#[inline(always)]
pub(crate) fn store_u32_be(i: u32, d: &mut [u8]) {
d[0] = (i >> 24) as u8;
d[1] = (i >> 16) as u8;
d[2] = (i >> 8) as u8;
d[3] = i as u8;
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))]
#[inline(always)]
pub(crate) fn store_u64_be(i: u64, d: &mut [u8]) {
unsafe { *d.as_mut_ptr().cast::<u64>() = i.to_be() };
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")))]
#[inline(always)]
pub(crate) fn store_u64_be(i: u64, d: &mut [u8]) {
d[0] = (i >> 56) as u8;
d[1] = (i >> 48) as u8;
d[2] = (i >> 40) as u8;
d[3] = (i >> 32) as u8;
d[4] = (i >> 24) as u8;
d[5] = (i >> 16) as u8;
d[6] = (i >> 8) as u8;
d[7] = i as u8;
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))]
#[inline(always)]
pub(crate) fn load_u16_be(d: &[u8]) -> u16 {
u16::from_be(unsafe { *d.as_ptr().cast::<u16>() })
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")))]
#[inline(always)]
pub(crate) fn load_u16_be(d: &[u8]) -> u16 {
(d[0] as u16) << 8 | (d[1] as u16)
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))]
#[inline(always)]
pub(crate) fn load_u32_be(d: &[u8]) -> u32 {
u32::from_be(unsafe { *d.as_ptr().cast::<u32>() })
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")))]
#[inline(always)]
pub(crate) fn load_u32_be(d: &[u8]) -> u32 {
(d[0] as u32) << 24 | (d[1] as u32) << 16 | (d[2] as u32) << 8 | (d[3] as u32)
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))]
#[inline(always)]
pub(crate) fn load_u64_be(d: &[u8]) -> u64 {
u64::from_be(unsafe { *d.as_ptr().cast::<u64>() })
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")))]
#[inline(always)]
pub(crate) fn load_u64_be(d: &[u8]) -> u64 {
(d[0] as u64) << 56 | (d[1] as u64) << 48 | (d[2] as u64) << 40 | (d[3] as u64) << 32 | (d[4] as u64) << 24 | (d[5] as u64) << 16 | (d[6] as u64) << 8 | (d[7] as u64)
}
/// Mix bits in a 64-bit integer.
/// https://nullprogram.com/blog/2018/07/31/
#[inline(always)]
pub(crate) fn hash64(mut x: u64) -> u64 {
x ^= x.wrapping_shr(30);
x = x.wrapping_mul(0xbf58476d1ce4e5b9);
x ^= x.wrapping_shr(27);
x = x.wrapping_mul(0x94d049bb133111eb);
x ^ x.wrapping_shr(31)
}
/// Mix bits in 32-bit integer.
/// https://nullprogram.com/blog/2018/07/31/
#[inline(always)]
pub(crate) fn hash32(mut x: u32) -> u32 {
x ^= x.wrapping_shr(16);
x = x.wrapping_mul(0x7feb352d);
x ^= x.wrapping_shr(15);
x = x.wrapping_mul(0x846ca68b);
x ^ x.wrapping_shr(16)
}
pub(crate) fn u64_as_bytes(i: &u64) -> &[u8; 8] { unsafe { &*(i as *const u64).cast() } }
/// A hasher for maps that just returns u64 values as-is.
///
/// This should be used only for things like ZeroTier addresses that are already random
/// and that aren't vulnerable to malicious crafting of identifiers.
#[derive(Copy, Clone)]
pub(crate) struct U64PassThroughHasher(u64);
pub struct U64NoOpHasher(u64);
impl U64PassThroughHasher {
impl U64NoOpHasher {
#[inline(always)]
pub fn new() -> Self { Self(0) }
}
impl std::hash::Hasher for U64PassThroughHasher {
impl std::hash::Hasher for U64NoOpHasher {
#[inline(always)]
fn finish(&self) -> u64 { self.0 }
#[inline(always)]
fn write(&mut self, _: &[u8]) {
panic!("U64PassThroughHasher can only be used with u64 and i64");
panic!("U64NoOpHasher should only be used with u64 and i64 types");
}
#[inline(always)]
@ -158,7 +56,7 @@ impl std::hash::Hasher for U64PassThroughHasher {
fn write_i64(&mut self, i: i64) { self.0 += i as u64; }
}
impl std::hash::BuildHasher for U64PassThroughHasher {
impl std::hash::BuildHasher for U64NoOpHasher {
type Hasher = Self;
#[inline(always)]

View file

@ -24,22 +24,19 @@ impl Address {
#[inline(always)]
pub fn from_u64(mut i: u64) -> Option<Address> {
i &= 0xffffffffff;
if i != 0 && (i >> 32) != ADDRESS_RESERVED_PREFIX as u64 {
Some(Address(unsafe { NonZeroU64::new_unchecked(i) }))
} else {
None
}
NonZeroU64::new(i).and_then(|ii| {
if (i >> 32) != ADDRESS_RESERVED_PREFIX as u64 {
Some(Address(ii))
} else {
None
}
})
}
#[inline(always)]
pub fn from_bytes(b: &[u8]) -> Option<Address> {
if b.len() >= ADDRESS_SIZE {
let i = (b[0] as u64) << 32 | (b[1] as u64) << 24 | (b[2] as u64) << 16 | (b[3] as u64) << 8 | b[4] as u64;
if i != 0 && (i >> 32) != ADDRESS_RESERVED_PREFIX as u64 {
Some(Address(unsafe { NonZeroU64::new_unchecked(i) }))
} else {
None
}
Self::from_u64((b[0] as u64) << 32 | (b[1] as u64) << 24 | (b[2] as u64) << 16 | (b[3] as u64) << 8 | b[4] as u64)
} else {
None
}
@ -47,12 +44,7 @@ impl Address {
#[inline(always)]
pub fn from_bytes_fixed(b: &[u8; ADDRESS_SIZE]) -> Option<Address> {
let i = (b[0] as u64) << 32 | (b[1] as u64) << 24 | (b[2] as u64) << 16 | (b[3] as u64) << 8 | b[4] as u64;
if i != 0 && (i >> 32) != ADDRESS_RESERVED_PREFIX as u64 {
Some(Address(unsafe { NonZeroU64::new_unchecked(i) }))
} else {
None
}
Self::from_u64((b[0] as u64) << 32 | (b[1] as u64) << 24 | (b[2] as u64) << 16 | (b[3] as u64) << 8 | b[4] as u64)
}
#[inline(always)]

View file

@ -147,7 +147,7 @@ impl Endpoint {
let b: &[u8; 18] = buf.read_bytes_fixed(cursor)?;
Ok(Endpoint::IpUdp(InetAddress::from_ip_port(&b[0..16], crate::util::load_u16_be(&b[16..18]))))
} else {
Ok(Endpoint::Nil)
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "unrecognized endpoint type in stream"))
}
} else {
let read_mac = |buf: &Buffer<BL>, cursor: &mut usize| {
@ -158,6 +158,7 @@ impl Endpoint {
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid MAC address"))
}
};
match type_byte - 16 {
TYPE_NIL => Ok(Endpoint::Nil),
TYPE_ZEROTIER => {

View file

@ -247,7 +247,7 @@ impl EphemeralKeyPairSet {
/// Symmetric secret representing a step in the ephemeral keying ratchet.
pub struct EphemeralSymmetricSecret {
secret: SymmetricSecret,
pub secret: SymmetricSecret,
ratchet_state: [u8; 16],
rekey_time: i64,
expire_time: i64,

View file

@ -16,7 +16,7 @@ use crate::vl1::protocol::*;
/// compiler to optimize out any additional state in Option.
pub(crate) struct FragmentedPacket {
pub ts_ticks: i64,
pub frags: [Option<PacketBuffer>; FRAGMENT_COUNT_MAX],
pub frags: [Option<PacketBuffer>; PACKET_FRAGMENT_COUNT_MAX],
pub have: u8,
pub expecting: u8,
}
@ -33,14 +33,13 @@ impl FragmentedPacket {
}
/// Add a fragment to this fragment set and return true if the packet appears complete.
/// This will panic if 'no' is out of bounds.
#[inline(always)]
pub fn add_fragment(&mut self, frag: PacketBuffer, no: u8, expecting: u8) -> bool {
if no < FRAGMENT_COUNT_MAX as u8 {
if no < PACKET_FRAGMENT_COUNT_MAX as u8 {
if self.frags[no as usize].replace(frag).is_none() {
self.have = self.have.wrapping_add(1);
self.expecting |= expecting; // in valid streams expecting is either 0 or the (same) total
return self.have == self.expecting && self.have < FRAGMENT_COUNT_MAX as u8;
return self.have == self.expecting && self.have < PACKET_FRAGMENT_COUNT_MAX as u8;
}
}
false

View file

@ -85,7 +85,7 @@ fn concat_v1_secret_keys(c25519: &[u8], ed25519: &[u8], p521_ecdh: &[u8], p521_e
#[derive(Copy, Clone)]
#[repr(u8)]
pub enum Type {
pub enum IdentityType {
/// Curve25519 / Ed25519 identity (type 0)
C25519 = 0,
/// Dual NIST P-521 ECDH / ECDSA + Curve25519 / Ed25519 (type 1)
@ -220,10 +220,10 @@ impl Identity {
/// take tens to hundreds of milliseconds on a typical 2020 system, while V1 identites
/// take about 500ms. Generation can take a lot longer on low power devices, but only
/// has to be done once.
pub fn generate(id_type: Type) -> Identity {
pub fn generate(id_type: IdentityType) -> Identity {
match id_type {
Type::C25519 => Self::generate_c25519(),
Type::P521 => Self::generate_p521()
IdentityType::C25519 => Self::generate_c25519(),
IdentityType::P521 => Self::generate_p521()
}
}
@ -330,7 +330,7 @@ impl Identity {
/// Get this identity's type.
#[inline(always)]
pub fn id_type(&self) -> Type { if self.v1.is_some() { Type::P521 } else { Type::C25519 } }
pub fn id_type(&self) -> IdentityType { if self.v1.is_some() { IdentityType::P521 } else { IdentityType::C25519 } }
/// Returns true if this identity also holds its secret keys.
#[inline(always)]
@ -390,7 +390,7 @@ impl Identity {
let addr = addr.unwrap();
let id_type = buf.read_u8(cursor)?;
if id_type == Type::C25519 as u8 {
if id_type == IdentityType::C25519 as u8 {
let c25519_public_bytes = buf.read_bytes_fixed::<{ C25519_PUBLIC_KEY_SIZE }>(cursor)?;
let ed25519_public_bytes = buf.read_bytes_fixed::<{ ED25519_PUBLIC_KEY_SIZE }>(cursor)?;
@ -421,7 +421,7 @@ impl Identity {
std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "unrecognized secret key length (type 0)"))
}
} else if id_type == Type::P521 as u8 {
} else if id_type == IdentityType::P521 as u8 {
let c25519_public_bytes = buf.read_bytes_fixed::<{ C25519_PUBLIC_KEY_SIZE }>(cursor)?;
let ed25519_public_bytes = buf.read_bytes_fixed::<{ ED25519_PUBLIC_KEY_SIZE }>(cursor)?;
@ -638,11 +638,11 @@ impl Hash for Identity {
mod tests {
use std::str::FromStr;
use crate::vl1::identity::{Identity, Type};
use crate::vl1::identity::{Identity, IdentityType};
#[test]
fn type0() {
let id = Identity::generate(Type::C25519);
let id = Identity::generate(IdentityType::C25519);
//println!("V0: {}", id.to_string());
if !id.locally_validate() {
panic!("new V0 identity validation failed");
@ -674,7 +674,7 @@ mod tests {
#[test]
fn type1() {
let start = std::time::SystemTime::now();
let id = Identity::generate(Type::P521);
let id = Identity::generate(IdentityType::P521);
let end = std::time::SystemTime::now();
println!("V1 generate: {}ms {}", end.duration_since(start).unwrap().as_millis(), id.to_string());
if !id.locally_validate() {

View file

@ -17,7 +17,6 @@ use std::str::FromStr;
use winapi::um::winsock2 as winsock2;
use crate::error::InvalidFormatError;
use crate::util::equal_ptr;
use crate::util::buffer::Buffer;
#[allow(non_camel_case_types)]
@ -463,7 +462,7 @@ impl PartialEq for InetAddress {
AF_INET => { self.sin.sin_port == other.sin.sin_port && self.sin.sin_addr.s_addr == other.sin.sin_addr.s_addr }
AF_INET6 => {
if self.sin6.sin6_port == other.sin6.sin6_port {
equal_ptr((&(self.sin6.sin6_addr) as *const in6_addr).cast(), (&(other.sin6.sin6_addr) as *const in6_addr).cast(), 16)
(*(&(self.sin6.sin6_addr) as *const in6_addr).cast::<[u8; 16]>()).eq(&*(&(other.sin6.sin6_addr) as *const in6_addr).cast::<[u8; 16]>())
} else {
false
}

View file

@ -19,7 +19,7 @@ pub struct MAC(NonZeroU64);
impl MAC {
#[inline(always)]
pub fn from_u64(i: u64) -> Option<MAC> { NonZeroU64::new(i & 0xffffffffffff).map_or(None, |i| Some(MAC(i))) }
pub fn from_u64(i: u64) -> Option<MAC> { NonZeroU64::new(i & 0xffffffffffff).map(|i| MAC(i)) }
#[inline(always)]
pub fn from_bytes(b: &[u8]) -> Option<MAC> {

View file

@ -6,12 +6,12 @@
* https://www.zerotier.com/
*/
pub mod identity;
pub mod inetaddress;
pub mod endpoint;
pub mod rootset;
#[allow(unused)]
pub(crate) mod identity;
pub(crate) mod protocol;
pub(crate) mod node;
pub(crate) mod path;
@ -26,10 +26,12 @@ pub(crate) mod symmetricsecret;
pub use address::Address;
pub use mac::MAC;
pub use identity::Identity;
pub use identity::{Identity, IdentityType, IDENTITY_TYPE_0_SIGNATURE_SIZE, IDENTITY_TYPE_1_SIGNATURE_SIZE};
pub use endpoint::Endpoint;
pub use dictionary::Dictionary;
pub use inetaddress::InetAddress;
pub use peer::Peer;
pub use path::Path;
pub use node::{Node, NodeInterface};
pub use protocol::{PACKET_SIZE_MAX, PACKET_FRAGMENT_COUNT_MAX};

View file

@ -21,7 +21,7 @@ use crate::error::InvalidParameterError;
use crate::util::gate::IntervalGate;
use crate::util::pool::{Pool, Pooled};
use crate::util::buffer::Buffer;
use crate::vl1::{Address, Endpoint, Identity};
use crate::vl1::{Address, Endpoint, Identity, IdentityType};
use crate::vl1::path::Path;
use crate::vl1::peer::Peer;
use crate::vl1::protocol::*;
@ -46,7 +46,7 @@ pub trait NodeInterface {
fn event_user_message(&self, source: &Identity, message_type: u64, message: &[u8]);
/// Load this node's identity from the data store.
fn load_node_identity(&self) -> Option<&[u8]>;
fn load_node_identity(&self) -> Option<Vec<u8>>;
/// Save this node's identity.
/// Note that this is only called on first startup (after up) and after identity_changed.
@ -115,7 +115,7 @@ pub trait VL1PacketHandler {
#[derive(Default)]
struct BackgroundTaskIntervals {
whois: IntervalGate<{ WhoisQueue::INTERVAL }>,
paths: IntervalGate<{ Path::INTERVAL }>,
paths: IntervalGate<{ Path::CALL_EVERY_INTERVAL_INTERVAL }>,
peers: IntervalGate<{ Peer::INTERVAL }>,
}
@ -137,7 +137,7 @@ impl Node {
///
/// If the auto-generate identity type is not None, a new identity will be generated if
/// no identity is currently stored in the data store.
pub fn new<I: NodeInterface>(ci: &I, auto_generate_identity_type: Option<crate::vl1::identity::Type>) -> Result<Self, InvalidParameterError> {
pub fn new<I: NodeInterface>(ci: &I, auto_generate_identity_type: Option<IdentityType>) -> Result<Self, InvalidParameterError> {
let id = {
let id_str = ci.load_node_identity();
if id_str.is_none() {
@ -149,7 +149,7 @@ impl Node {
id
}
} else {
let id_str = String::from_utf8_lossy(id_str.unwrap());
let id_str = String::from_utf8_lossy(id_str.as_ref().unwrap().as_slice());
let id = Identity::from_str(id_str.as_ref());
if id.is_err() {
return Err(InvalidParameterError("invalid identity"));

View file

@ -13,7 +13,7 @@ use std::sync::atomic::{AtomicI64, Ordering};
use parking_lot::Mutex;
use crate::PacketBuffer;
use crate::util::U64PassThroughHasher;
use crate::util::U64NoOpHasher;
use crate::vl1::Endpoint;
use crate::vl1::fragmentedpacket::FragmentedPacket;
use crate::vl1::node::NodeInterface;
@ -32,12 +32,10 @@ pub struct Path {
local_interface: Option<NonZeroI64>,
last_send_time_ticks: AtomicI64,
last_receive_time_ticks: AtomicI64,
fragmented_packets: Mutex<HashMap<u64, FragmentedPacket, U64PassThroughHasher>>,
fragmented_packets: Mutex<HashMap<u64, FragmentedPacket, U64NoOpHasher>>,
}
impl Path {
pub(crate) const INTERVAL: i64 = PATH_KEEPALIVE_INTERVAL;
#[inline(always)]
pub fn new(endpoint: Endpoint, local_socket: Option<NonZeroI64>, local_interface: Option<NonZeroI64>) -> Self {
Self {
@ -46,7 +44,7 @@ impl Path {
local_interface,
last_send_time_ticks: AtomicI64::new(0),
last_receive_time_ticks: AtomicI64::new(0),
fragmented_packets: Mutex::new(HashMap::with_capacity_and_hasher(4, U64PassThroughHasher::new())),
fragmented_packets: Mutex::new(HashMap::with_capacity_and_hasher(4, U64NoOpHasher::new())),
}
}
@ -71,10 +69,10 @@ impl Path {
pub(crate) fn receive_fragment(&self, packet_id: PacketID, fragment_no: u8, fragment_expecting_count: u8, packet: PacketBuffer, time_ticks: i64) -> Option<FragmentedPacket> {
let mut fp = self.fragmented_packets.lock();
// This is mostly a defense against denial of service attacks or broken peers. It will
// trim off about 1/3 of waiting packets if the total is over the limit.
// Discard some old waiting packets if the total incoming fragments for a path exceeds a
// sanity limit. This is to prevent memory exhaustion DOS attacks.
let fps = fp.len();
if fps > FRAGMENT_MAX_INBOUND_PACKETS_PER_PATH {
if fps > PACKET_FRAGMENT_MAX_INBOUND_PACKETS_PER_PATH {
let mut entries: Vec<(i64, u64)> = Vec::new();
entries.reserve(fps);
for f in fp.iter() {
@ -82,10 +80,11 @@ impl Path {
}
entries.sort_unstable_by(|a, b| (*a).0.cmp(&(*b).0));
for i in 0..(fps / 3) {
let _ = fp.remove(&(*unsafe { entries.get_unchecked(i) }).1);
let _ = fp.remove(&(*entries.get(i).unwrap()).1);
}
}
// This is optimized for the fragmented case because that's the most common when transferring data.
if fp.entry(packet_id).or_insert_with(|| FragmentedPacket::new(time_ticks)).add_fragment(packet, fragment_no, fragment_expecting_count) {
fp.remove(&packet_id)
} else {
@ -103,9 +102,12 @@ impl Path {
self.last_send_time_ticks.store(time_ticks, Ordering::Relaxed);
}
/// Desired period between calls to call_every_interval().
pub(crate) const CALL_EVERY_INTERVAL_INTERVAL: i64 = PATH_KEEPALIVE_INTERVAL;
/// Called every INTERVAL during background tasks.
#[inline(always)]
pub(crate) fn call_every_interval<CI: NodeInterface>(&self, ct: &CI, time_ticks: i64) {
self.fragmented_packets.lock().retain(|packet_id, frag| (time_ticks - frag.ts_ticks) < FRAGMENT_EXPIRATION);
self.fragmented_packets.lock().retain(|packet_id, frag| (time_ticks - frag.ts_ticks) < PACKET_FRAGMENT_EXPIRATION);
}
}

View file

@ -7,6 +7,8 @@
*/
use std::convert::TryInto;
use std::intrinsics::try;
use std::mem::MaybeUninit;
use std::num::NonZeroI64;
use std::ptr::copy_nonoverlapping;
use std::sync::Arc;
@ -28,9 +30,12 @@ use crate::{VERSION_MAJOR, VERSION_MINOR, VERSION_PROTO, VERSION_REVISION, Packe
use crate::defaults::UDP_DEFAULT_MTU;
use crate::util::pool::{Pool, PoolFactory};
use crate::util::buffer::Buffer;
use crate::util::{array_range, u64_as_bytes};
use crate::vl1::{Dictionary, Endpoint, Identity, InetAddress, Path};
use crate::vl1::ephemeral::EphemeralSymmetricSecret;
use crate::vl1::node::*;
use crate::vl1::protocol::*;
use crate::vl1::symmetricsecret::SymmetricSecret;
/// Interval for servicing and background operations on peers.
pub(crate) const PEER_SERVICE_INTERVAL: i64 = 30000;
@ -45,6 +50,7 @@ impl PoolFactory<AesGmacSiv> for AesGmacSivPoolFactory {
fn reset(&self, obj: &mut AesGmacSiv) { obj.reset(); }
}
/// A secret key with all its derived forms and initialized ciphers.
struct PeerSecret {
// Time secret was created in ticks for ephemeral secrets, or -1 for static secrets.
create_time_ticks: i64,
@ -60,14 +66,6 @@ struct PeerSecret {
aes: Pool<AesGmacSiv, AesGmacSivPoolFactory>,
}
struct EphemeralKeyPair {
// Time ephemeral key pair was created.
create_time_ticks: i64,
// SHA384(c25519 public | p521 public)
public_keys_hash: [u8; 48],
}
/// A remote peer known to this node.
/// Sending-related and receiving-related fields are locked separately since concurrent
/// send/receive is not uncommon.
@ -76,7 +74,7 @@ pub struct Peer {
identity: Identity,
// Static shared secret computed from agreement with identity.
static_secret: PeerSecret,
static_secret: SymmetricSecret,
// Derived static secret (in initialized cipher) used to encrypt the dictionary part of HELLO.
static_secret_hello_dictionary: Mutex<AesCtr>,
@ -85,10 +83,7 @@ pub struct Peer {
static_secret_packet_hmac: Secret<48>,
// Latest ephemeral secret acknowledged with OK(HELLO).
ephemeral_secret: Mutex<Option<Arc<PeerSecret>>>,
// Either None or the current ephemeral key pair whose public keys are on offer.
ephemeral_pair: Mutex<Option<EphemeralKeyPair>>,
ephemeral_secret: Mutex<Option<Arc<EphemeralSymmetricSecret>>>,
// Paths sorted in descending order of quality / preference.
paths: Mutex<Vec<Arc<Path>>>,
@ -106,8 +101,8 @@ pub struct Peer {
total_bytes_received_indirect: AtomicU64,
total_bytes_forwarded: AtomicU64,
// Counter for assigning packet IV's a.k.a. PacketIDs.
packet_id_counter: AtomicU64,
// Counter for assigning sequential message IDs.
message_id_counter: AtomicU64,
// Remote peer version information.
remote_version: AtomicU64,
@ -137,14 +132,84 @@ fn salsa_derive_per_packet_key(key: &Secret<48>, header: &PacketHeader, packet_s
/// Create initialized instances of Salsa20/12 and Poly1305 for a packet.
#[inline(always)]
fn salsa_poly_create(secret: &PeerSecret, header: &PacketHeader, packet_size: usize) -> (Salsa, Poly1305) {
let key = salsa_derive_per_packet_key(&secret.secret, header, packet_size);
fn salsa_poly_create(secret: &SymmetricSecret, header: &PacketHeader, packet_size: usize) -> (Salsa, Poly1305) {
let key = salsa_derive_per_packet_key(&secret.key, header, packet_size);
let mut salsa = Salsa::new(&key.0[0..32], header.id_bytes(), true).unwrap();
let mut poly1305_key = [0_u8; 32];
salsa.crypt_in_place(&mut poly1305_key);
(salsa, Poly1305::new(&poly1305_key).unwrap())
}
/// Attempt AEAD packet encryption and MAC validation.
fn try_aead_decrypt(secret: &SymmetricSecret, packet_frag0_payload_bytes: &[u8], header: &PacketHeader, fragments: &[Option<PacketBuffer>], payload: &mut Buffer<PACKET_SIZE_MAX>, message_id: &mut u64) -> bool {
packet_frag0_payload_bytes.get(0).map_or(false, |verb| {
match header.cipher() {
CIPHER_NOCRYPT_POLY1305 => {
if (verb & VERB_MASK) == VERB_VL1_HELLO {
let _ = payload.append_bytes(packet_frag0_payload_bytes);
for f in fragments.iter() {
let _ = f.as_ref().map(|f| f.as_bytes_starting_at(FRAGMENT_HEADER_SIZE).map(|f| payload.append_bytes(f)));
}
let (_, mut poly) = salsa_poly_create(secret, header, packet.len());
poly.update(payload.as_bytes());
if poly.finish()[0..8].eq(&header.mac) {
*message_id = u64::from_ne_bytes(header.id);
true
} else {
false
}
} else {
// Only HELLO is permitted without payload encryption. Drop other packet types if sent this way.
false
}
}
CIPHER_SALSA2012_POLY1305 => {
let (mut salsa, mut poly) = salsa_poly_create(secret, header, packet.len());
poly.update(packet_frag0_payload_bytes);
let _ = payload.append_bytes_get_mut(packet_frag0_payload_bytes.len()).map(|b| salsa.crypt(packet_frag0_payload_bytes, b));
for f in fragments.iter() {
let _ = f.as_ref().map(|f| f.as_bytes_starting_at(FRAGMENT_HEADER_SIZE).map(|f| {
poly.update(f);
let _ = payload.append_bytes_get_mut(f.len()).map(|b| salsa.crypt(f, b));
}));
}
if poly.finish()[0..8].eq(&header.mac) {
*message_id = u64::from_ne_bytes(header.id);
true
} else {
false
}
}
CIPHER_AES_GMAC_SIV => {
let mut aes = secret.aes_gmac_siv.get();
aes.decrypt_init(&header.aes_gmac_siv_tag());
aes.decrypt_set_aad(&header.aad_bytes());
// NOTE: if there are somehow missing fragments this part will silently fail,
// but the packet will fail MAC check in decrypt_finish() so meh.
let _ = payload.append_bytes_get_mut(packet_frag0_payload_bytes.len()).map(|b| aes.decrypt(packet_frag0_payload_bytes, b));
for f in fragments.iter() {
f.as_ref().map(|f| {
f.as_bytes_starting_at(FRAGMENT_HEADER_SIZE).map(|f| {
let _ = payload.append_bytes_get_mut(f.len()).map(|b| aes.decrypt(f, b));
})
});
}
aes.decrypt_finish().map_or_else(false, |tag| {
// AES-GMAC-SIV encrypts the packet ID too as part of its computation of a single
// opaque 128-bit tag, so to get the original packet ID we have to grab it from the
// decrypted tag.
*mesasge_id = u64::from_ne_bytes(*array_range::<u8, 16, 0, 8>(tag));
true
})
}
_ => false,
}
})
}
impl Peer {
pub(crate) const INTERVAL: i64 = PEER_SERVICE_INTERVAL;
@ -153,23 +218,12 @@ impl Peer {
/// fatal error occurs performing key agreement between the two identities.
pub(crate) fn new(this_node_identity: &Identity, id: Identity) -> Option<Peer> {
this_node_identity.agree(&id).map(|static_secret| {
let aes_factory = AesGmacSivPoolFactory(
zt_kbkdf_hmac_sha384(&static_secret.0, KBKDF_KEY_USAGE_LABEL_AES_GMAC_SIV_K0, 0, 0),
zt_kbkdf_hmac_sha384(&static_secret.0, KBKDF_KEY_USAGE_LABEL_AES_GMAC_SIV_K1, 0, 0));
let static_secret_hello_dictionary = zt_kbkdf_hmac_sha384(&static_secret.0, KBKDF_KEY_USAGE_LABEL_HELLO_DICTIONARY_ENCRYPT, 0, 0);
let static_secret_packet_hmac = zt_kbkdf_hmac_sha384(&static_secret.0, KBKDF_KEY_USAGE_LABEL_PACKET_HMAC, 0, 0);
Peer {
identity: id,
static_secret: PeerSecret {
create_time_ticks: -1,
encrypt_count: AtomicU64::new(0),
secret: static_secret,
aes: Pool::new(4, aes_factory),
},
static_secret: SymmetricSecret::new(static_secret),
static_secret_hello_dictionary: Mutex::new(AesCtr::new(&static_secret_hello_dictionary.0[0..32])),
static_secret_packet_hmac,
ephemeral_secret: Mutex::new(None),
ephemeral_pair: Mutex::new(None),
paths: Mutex::new(Vec::new()),
reported_local_ip: Mutex::new(None),
last_send_time_ticks: AtomicI64::new(0),
@ -180,207 +234,146 @@ impl Peer {
total_bytes_received: AtomicU64::new(0),
total_bytes_received_indirect: AtomicU64::new(0),
total_bytes_forwarded: AtomicU64::new(0),
packet_id_counter: AtomicU64::new(next_u64_secure()),
message_id_counter: AtomicU64::new(next_u64_secure()),
remote_version: AtomicU64::new(0),
remote_protocol_version: AtomicU8::new(0),
}
})
}
/// Get the next packet ID / IV.
/// Get the next message ID.
#[inline(always)]
pub(crate) fn next_packet_id(&self) -> PacketID { self.packet_id_counter.fetch_add(1, Ordering::Relaxed) }
pub(crate) fn next_message_id(&self) -> u64 { self.message_id_counter.fetch_add(1, Ordering::Relaxed) }
/// Receive, decrypt, authenticate, and process an incoming packet from this peer.
/// If the packet comes in multiple fragments, the fragments slice should contain all
/// those fragments after the main packet header and first chunk.
pub(crate) fn receive<CI: NodeInterface, PH: VL1PacketHandler>(&self, node: &Node, ci: &CI, ph: &PH, time_ticks: i64, source_path: &Arc<Path>, header: &PacketHeader, packet: &Buffer<{ PACKET_SIZE_MAX }>, fragments: &[Option<PacketBuffer>]) {
let _ = packet.as_bytes_starting_at(PACKET_VERB_INDEX).map(|packet_frag0_payload_bytes| {
let mut payload = node.get_packet_buffer();
let mut forward_secrecy = true; // set to false below if ephemeral fails
let mut packet_id = header.id as u64;
let cipher = header.cipher();
let ephemeral_secret = self.ephemeral_secret.lock().clone();
for secret in [ephemeral_secret.as_ref().map_or(&self.static_secret, |s| s.as_ref()), &self.static_secret] {
match cipher {
CIPHER_NOCRYPT_POLY1305 => {
if (packet_frag0_payload_bytes[0] & VERB_MASK) == VERB_VL1_HELLO {
let _ = payload.append_bytes(packet_frag0_payload_bytes);
for f in fragments.iter() {
let _ = f.as_ref().map(|f| f.as_bytes_starting_at(FRAGMENT_HEADER_SIZE).map(|f| payload.append_bytes(f)));
}
// FIPS note: for FIPS purposes the HMAC-SHA384 tag at the end of V2 HELLOs
// will be considered the "real" handshake authentication. This authentication
// is technically deprecated in V2.
let (_, mut poly) = salsa_poly_create(secret, header, packet.len());
poly.update(payload.as_bytes());
if poly.finish()[0..8].eq(&header.message_auth) {
break;
}
} else {
// Only HELLO is permitted without payload encryption. Drop other packet types if sent this way.
return;
}
}
CIPHER_SALSA2012_POLY1305 => {
let (mut salsa, mut poly) = salsa_poly_create(secret, header, packet.len());
poly.update(packet_frag0_payload_bytes);
let _ = payload.append_and_init_bytes(packet_frag0_payload_bytes.len(), |b| salsa.crypt(packet_frag0_payload_bytes, b));
for f in fragments.iter() {
let _ = f.as_ref().map(|f| f.as_bytes_starting_at(FRAGMENT_HEADER_SIZE).map(|f| {
poly.update(f);
let _ = payload.append_and_init_bytes(f.len(), |b| salsa.crypt(f, b));
}));
}
if poly.finish()[0..8].eq(&header.message_auth) {
break;
}
}
CIPHER_AES_GMAC_SIV => {
let mut aes = secret.aes.get();
aes.decrypt_init(&header.aes_gmac_siv_tag());
aes.decrypt_set_aad(&header.aad_bytes());
let _ = payload.append_and_init_bytes(packet_frag0_payload_bytes.len(), |b| aes.decrypt(packet_frag0_payload_bytes, b));
for f in fragments.iter() {
let _ = f.as_ref().map(|f| f.as_bytes_starting_at(FRAGMENT_HEADER_SIZE).map(|f| payload.append_and_init_bytes(f.len(), |b| aes.decrypt(f, b))));
}
let tag = aes.decrypt_finish();
if tag.is_some() {
// For AES-GMAC-SIV we need to grab the original packet ID from the decrypted tag.
let tag = tag.unwrap();
unsafe { copy_nonoverlapping(tag.as_ptr(), (&mut packet_id as *mut u64).cast(), 8) };
break;
}
}
_ => {
return;
}
}
if (secret as *const PeerSecret) == (&self.static_secret as *const PeerSecret) {
// If the static secret failed to authenticate it means we either didn't have an
// ephemeral key or the ephemeral also failed (as it's tried first).
let mut payload: Buffer<PACKET_SIZE_MAX> = unsafe { Buffer::new_nozero() };
let mut message_id = 0_u64;
let mut forward_secrecy = true;
let ephemeral_secret: Option<Arc<EphemeralSymmetricSecret>> = self.ephemeral_secret.lock().clone();
if !ephemeral_secret.map_or(false, |ephemeral_secret| try_aead_decrypt(&ephemeral_secret.secret, packet_frag0_payload_bytes, header, fragments, &mut payload, &mut message_id)) {
unsafe { payload.set_size_unchecked(0); }
if !try_aead_decrypt(&self.static_secret, packet_frag0_payload_bytes, header, fragments, &mut payload, &mut message_id) {
return;
} else {
// If ephemeral failed, static secret will be tried. Set forward secrecy to false.
forward_secrecy = false;
payload.clear();
}
forward_secrecy = false;
}
drop(ephemeral_secret);
// If decryption and authentication succeeded, the code above will break out of the
// for loop and end up here. Otherwise it returns from the whole function.
self.last_receive_time_ticks.store(time_ticks, Ordering::Relaxed);
self.total_bytes_received.fetch_add((payload.len() + PACKET_HEADER_SIZE) as u64, Ordering::Relaxed);
let _ = payload.u8_at(0).map(|mut verb| {
let extended_authentication = (verb & VERB_FLAG_EXTENDED_AUTHENTICATION) != 0;
if extended_authentication {
let auth_bytes = payload.as_bytes();
if auth_bytes.len() >= (1 + SHA384_HASH_SIZE) {
let packet_hmac_start = auth_bytes.len() - SHA384_HASH_SIZE;
if !SHA384::hmac(self.static_secret_packet_hmac.as_ref(), &auth_bytes[1..packet_hmac_start]).eq(&auth_bytes[packet_hmac_start..]) {
return;
}
let new_len = payload.len() - SHA384_HASH_SIZE;
payload.set_size(new_len);
} else {
debug_assert!(!payload.is_empty()); // should be impossible since this fails in try_aead_decrypt()
let mut verb = payload.as_bytes()[0];
// If this flag is set, the end of the payload is a full HMAC-SHA384 authentication
// tag for much stronger authentication.
let extended_authentication = (verb & VERB_FLAG_EXTENDED_AUTHENTICATION) != 0;
if extended_authentication {
if payload.len() >= (1 + SHA384_HASH_SIZE) {
let actual_end_of_payload = payload.len() - SHA384_HASH_SIZE;
let hmac = SHA384::hmac_multipart(self.static_secret_packet_hmac.as_ref(), &[u64_as_bytes(&message_id), payload.as_bytes()]);
if !hmac.eq(&(payload.as_bytes()[actual_end_of_payload..])) {
return;
}
payload.set_size(actual_end_of_payload);
} else {
return;
}
}
if (verb & VERB_FLAG_COMPRESSED) != 0 {
let mut decompressed_payload = node.get_packet_buffer();
let _ = decompressed_payload.append_u8(verb);
let dlen = lz4_flex::block::decompress_into(&payload.as_bytes()[1..], &mut decompressed_payload.as_bytes_mut());
if dlen.is_ok() {
decompressed_payload.set_size(dlen.unwrap());
payload = decompressed_payload;
} else {
return;
}
if (verb & VERB_FLAG_COMPRESSED) != 0 {
let mut decompressed_payload: [u8; PACKET_SIZE_MAX] = unsafe { MaybeUninit::uninit().assume_init() };
decompressed_payload[0] = verb;
let dlen = lz4_flex::block::decompress_into(&payload.as_bytes()[1..], &mut decompressed_payload[1..]);
if dlen.is_ok() {
payload.set_to(&decompressed_payload[0..(dlen.unwrap() + 1)]);
} else {
return;
}
}
// For performance reasons we let VL2 handle packets first. It returns false
// if it didn't handle the packet, in which case it's handled at VL1. This is
// because the most performance critical path is the handling of the ???_FRAME
// verbs, which are in VL2.
verb &= VERB_MASK;
if !ph.handle_packet(self, source_path, forward_secrecy, extended_authentication, verb, payload.as_ref()) {
match verb {
//VERB_VL1_NOP => {}
VERB_VL1_HELLO => self.receive_hello(ci, node, time_ticks, source_path, payload.as_ref()),
VERB_VL1_ERROR => self.receive_error(ci, ph, node, time_ticks, source_path, forward_secrecy, extended_authentication, payload.as_ref()),
VERB_VL1_OK => self.receive_ok(ci, ph, node, time_ticks, source_path, forward_secrecy, extended_authentication, payload.as_ref()),
VERB_VL1_WHOIS => self.receive_whois(ci, node, time_ticks, source_path, payload.as_ref()),
VERB_VL1_RENDEZVOUS => self.receive_rendezvous(ci, node, time_ticks, source_path, payload.as_ref()),
VERB_VL1_ECHO => self.receive_echo(ci, node, time_ticks, source_path, payload.as_ref()),
VERB_VL1_PUSH_DIRECT_PATHS => self.receive_push_direct_paths(ci, node, time_ticks, source_path, payload.as_ref()),
VERB_VL1_USER_MESSAGE => self.receive_user_message(ci, node, time_ticks, source_path, payload.as_ref()),
_ => {}
}
// For performance reasons we let VL2 handle packets first. It returns false
// if it didn't handle the packet, in which case it's handled at VL1. This is
// because the most performance critical path is the handling of the ???_FRAME
// verbs, which are in VL2.
verb &= VERB_MASK;
if !ph.handle_packet(self, source_path, forward_secrecy, extended_authentication, verb, &payload) {
match verb {
//VERB_VL1_NOP => {}
VERB_VL1_HELLO => self.receive_hello(ci, node, time_ticks, source_path, &payload),
VERB_VL1_ERROR => self.receive_error(ci, ph, node, time_ticks, source_path, forward_secrecy, extended_authentication, &payload),
VERB_VL1_OK => self.receive_ok(ci, ph, node, time_ticks, source_path, forward_secrecy, extended_authentication, &payload),
VERB_VL1_WHOIS => self.receive_whois(ci, node, time_ticks, source_path, &payload),
VERB_VL1_RENDEZVOUS => self.receive_rendezvous(ci, node, time_ticks, source_path, &payload),
VERB_VL1_ECHO => self.receive_echo(ci, node, time_ticks, source_path, &payload),
VERB_VL1_PUSH_DIRECT_PATHS => self.receive_push_direct_paths(ci, node, time_ticks, source_path, &payload),
VERB_VL1_USER_MESSAGE => self.receive_user_message(ci, node, time_ticks, source_path, &payload),
_ => {}
}
});
}
});
}
fn send_to_endpoint<CI: NodeInterface>(&self, ci: &CI, endpoint: &Endpoint, local_socket: Option<NonZeroI64>, local_interface: Option<NonZeroI64>, packet_id: PacketID, packet: &Buffer<{ PACKET_SIZE_MAX }>) -> bool {
fn send_to_endpoint<CI: NodeInterface>(&self, ci: &CI, endpoint: &Endpoint, local_socket: Option<NonZeroI64>, local_interface: Option<NonZeroI64>, packet: &Buffer<{ PACKET_SIZE_MAX }>) -> bool {
debug_assert!(packet.len() <= PACKET_SIZE_MAX);
if matches!(endpoint, Endpoint::IpUdp(_)) {
let packet_size = packet.len();
if packet_size > UDP_DEFAULT_MTU {
let bytes = packet.as_bytes();
if !ci.wire_send(endpoint, local_socket, local_interface, &[&bytes[0..UDP_DEFAULT_MTU]], 0) {
return false;
}
let mut pos = UDP_DEFAULT_MTU;
let fragment_count = (((packet_size - UDP_DEFAULT_MTU) as u32) / ((UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE) as u32)) + ((((packet_size - UDP_DEFAULT_MTU) as u32) % ((UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE) as u32)) != 0) as u32;
debug_assert!(fragment_count <= FRAGMENT_COUNT_MAX as u32);
let mut header = FragmentHeader {
id: packet_id,
dest: bytes[PACKET_DESTINATION_INDEX..PACKET_DESTINATION_INDEX + ADDRESS_SIZE].try_into().unwrap(),
fragment_indicator: FRAGMENT_INDICATOR,
total_and_fragment_no: ((fragment_count + 1) << 4) as u8,
reserved_hops: 0,
};
let mut chunk_size = (packet_size - pos).min(UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE);
loop {
header.total_and_fragment_no += 1;
let next_pos = pos + chunk_size;
if !ci.wire_send(endpoint, local_socket, local_interface, &[header.as_bytes(), &bytes[pos..next_pos]], 0) {
debug_assert!(packet.len() >= PACKET_SIZE_MIN);
match endpoint {
Endpoint::Ip(_) | Endpoint::IpUdp(_) | Endpoint::Ethernet(_) | Endpoint::Bluetooth(_) | Endpoint::WifiDirect(_) => {
let packet_size = packet.len();
if packet_size > UDP_DEFAULT_MTU {
let bytes = packet.as_bytes();
if !ci.wire_send(endpoint, local_socket, local_interface, &[&bytes[0..UDP_DEFAULT_MTU]], 0) {
return false;
}
pos = next_pos;
if pos < packet_size {
chunk_size = (packet_size - pos).min(UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE);
} else {
return true;
let mut pos = UDP_DEFAULT_MTU;
let overrun_size = (packet_size - UDP_DEFAULT_MTU) as u32;
let fragment_count = (overrun_size / (UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE) as u32) + (((overrun_size % (UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE) as u32) != 0) as u32);
debug_assert!(fragment_count <= PACKET_FRAGMENT_COUNT_MAX as u32);
let mut header = FragmentHeader {
id: unsafe { *packet.as_bytes().as_ptr().cast::<[u8; 8]>() },
dest: bytes[PACKET_DESTINATION_INDEX..PACKET_DESTINATION_INDEX + ADDRESS_SIZE].try_into().unwrap(),
fragment_indicator: PACKET_FRAGMENT_INDICATOR,
total_and_fragment_no: ((fragment_count + 1) << 4) as u8,
reserved_hops: 0,
};
let mut chunk_size = (packet_size - pos).min(UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE);
loop {
header.total_and_fragment_no += 1;
let next_pos = pos + chunk_size;
if !ci.wire_send(endpoint, local_socket, local_interface, &[header.as_bytes(), &bytes[pos..next_pos]], 0) {
return false;
}
pos = next_pos;
if pos < packet_size {
chunk_size = (packet_size - pos).min(UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE);
} else {
return true;
}
}
} else {
return ci.wire_send(endpoint, local_socket, local_interface, &[packet.as_bytes()], 0);
}
}
_ => {
return ci.wire_send(endpoint, local_socket, local_interface, &[packet.as_bytes()], 0);
}
}
return ci.wire_send(endpoint, local_socket, local_interface, &[packet.as_bytes()], 0);
}
/// Send a packet to this peer.
///
/// This will go directly if there is an active path, or otherwise indirectly
/// via a root or some other route.
pub(crate) fn send<CI: NodeInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, packet_id: PacketID, packet: &Buffer<{ PACKET_SIZE_MAX }>) -> bool {
pub(crate) fn send<CI: NodeInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, packet: &Buffer<{ PACKET_SIZE_MAX }>) -> bool {
self.path(node).map_or(false, |path| {
if self.send_to_endpoint(ci, path.endpoint(), path.local_socket(), path.local_interface(), packet_id, packet) {
if self.send_to_endpoint(ci, path.endpoint(), path.local_socket(), path.local_interface(), packet) {
self.last_send_time_ticks.store(time_ticks, Ordering::Relaxed);
self.total_bytes_sent.fetch_add(packet.len() as u64, Ordering::Relaxed);
true
@ -422,84 +415,70 @@ impl Peer {
let mut packet: Buffer<{ PACKET_SIZE_MAX }> = Buffer::new();
let time_ticks = ci.time_ticks();
let packet_id = self.next_packet_id();
debug_assert!(packet.append_and_init_struct(|header: &mut PacketHeader| {
header.id = packet_id;
header.dest = self.identity.address().to_bytes();
header.src = node.address().to_bytes();
header.flags_cipher_hops = CIPHER_NOCRYPT_POLY1305;
}).is_ok());
debug_assert!(packet.append_and_init_struct(|header: &mut message_component_structs::HelloFixedHeaderFields| {
header.verb = VERB_VL1_HELLO | VERB_FLAG_EXTENDED_AUTHENTICATION;
header.version_proto = VERSION_PROTO;
header.version_major = VERSION_MAJOR;
header.version_minor = VERSION_MINOR;
header.version_revision = (VERSION_REVISION as u16).to_be();
header.timestamp = (time_ticks as u64).to_be();
}).is_ok());
let message_id = self.next_message_id();
let packet_header: &mut PacketHeader = packet.append_struct_get_mut().unwrap();
let hello_fixed_headers: &mut message_component_structs::HelloFixedHeaderFields = packet.append_struct_get_mut().unwrap();
packet_header.id = message_id.to_ne_bytes(); // packet ID and message ID are the same when Poly1305 MAC is used
packet_header.dest = self.identity.address().to_bytes();
packet_header.src = node.address().to_bytes();
packet_header.flags_cipher_hops = CIPHER_NOCRYPT_POLY1305;
hello_fixed_headers.verb = VERB_VL1_HELLO | VERB_FLAG_EXTENDED_AUTHENTICATION;
hello_fixed_headers.version_proto = VERSION_PROTO;
hello_fixed_headers.version_major = VERSION_MAJOR;
hello_fixed_headers.version_minor = VERSION_MINOR;
hello_fixed_headers.version_revision = (VERSION_REVISION as u16).to_be();
hello_fixed_headers.timestamp = (time_ticks as u64).to_be();
debug_assert!(self.identity.marshal(&mut packet, false).is_ok());
debug_assert!(endpoint.marshal(&mut packet).is_ok());
// Write an IV for AES-CTR encryption of the dictionary and allocate two more
// bytes for reserved legacy use below.
let aes_ctr_iv_position = packet.len();
debug_assert!(packet.append_and_init_bytes_fixed(|iv: &mut [u8; 18]| {
zerotier_core_crypto::random::fill_bytes_secure(&mut iv[0..16]);
iv[12] &= 0x7f; // mask off MSB of counter in iv to play nice with some AES-CTR implementations
let aes_ctr_iv: &mut [u8; 18] = packet.append_bytes_fixed_get_mut().unwrap();
zerotier_core_crypto::random::fill_bytes_secure(&mut aes_ctr_iv[0..16]);
aes_ctr_iv[12] &= 0x7f; // mask off MSB of counter in iv to play nice with some AES-CTR implementations
// LEGACY: create a 16-bit encrypted field that specifies zero moons. This is ignored by v2
// but causes v1 nodes to be able to parse this packet properly. This is not significant in
// terms of encryption or authentication.
let mut salsa_iv = packet_id.to_ne_bytes();
salsa_iv[7] &= 0xf8;
Salsa::new(&self.static_secret.secret.0[0..32], &salsa_iv, true).unwrap().crypt(&[0_u8, 0_u8], &mut salsa_iv[16..18]);
}).is_ok());
// LEGACY: create a 16-bit encrypted field that specifies zero "moons." This is ignored now
// but causes old nodes to be able to parse this packet properly. This is not significant in
// terms of encryption or authentication and can disappear once old versions are dead. Newer
// versions ignore these bytes.
let mut salsa_iv = message_id.to_ne_bytes();
salsa_iv[7] &= 0xf8;
Salsa::new(&self.static_secret.secret.0[0..32], &salsa_iv, true).unwrap().crypt(&[0_u8, 0_u8], &mut aes_ctr_iv[16..18]);
// Create dictionary that contains extended HELLO fields.
let dict_start_position = packet.len();
let mut dict = Dictionary::new();
dict.set_u64(HELLO_DICT_KEY_INSTANCE_ID, node.instance_id);
dict.set_u64(HELLO_DICT_KEY_CLOCK, ci.time_clock() as u64);
if node.is_peer_root(self) {
// If the peer is a root we include some extra information for diagnostic and statistics
// purposes such as the CPU type, bits, and OS info. This is not sent to other peers.
dict.set_str(HELLO_DICT_KEY_SYS_ARCH, std::env::consts::ARCH);
#[cfg(target_pointer_width = "32")] {
dict.set_u64(HELLO_DICT_KEY_SYS_BITS, 32);
}
#[cfg(target_pointer_width = "64")] {
dict.set_u64(HELLO_DICT_KEY_SYS_BITS, 64);
}
dict.set_str(HELLO_DICT_KEY_OS_NAME, std::env::consts::OS);
}
let mut flags = String::new();
// TODO
//if node.fips_mode() {
// flags.push('F');
//}
dict.set_str(HELLO_DICT_KEY_FLAGS, flags.as_str());
debug_assert!(dict.write_to(&mut packet).is_ok());
// Encrypt extended fields with AES-CTR.
let mut dict_aes = self.static_secret_hello_dictionary.lock();
dict_aes.init(&packet.as_bytes()[aes_ctr_iv_position..aes_ctr_iv_position + 16]);
dict_aes.crypt_in_place(&mut packet.as_bytes_mut()[dict_start_position..]);
drop(dict_aes);
debug_assert!(packet.append_u16(0).is_ok());
debug_assert!(packet.append_bytes_fixed(&SHA384::hmac(self.static_secret_packet_hmac.as_ref(), packet.as_bytes_starting_at(PACKET_HEADER_SIZE).unwrap())).is_ok());
// Append extended authentication HMAC.
debug_assert!(packet.append_bytes_fixed(&SHA384::hmac_multipart(self.static_secret_packet_hmac.as_ref(), &[u64_as_bytes(&message_id), &packet.as_bytes()[PACKET_HEADER_SIZE..]])).is_ok());
// Set outer packet MAC. We use legacy poly1305 for HELLO for backward
// compatibility, but note that newer nodes and roots will check the full
// HMAC-SHA384 above.
let (_, mut poly) = salsa_poly_create(&self.static_secret, packet.struct_at::<PacketHeader>(0).unwrap(), packet.len());
poly.update(packet.as_bytes_starting_at(PACKET_HEADER_SIZE).unwrap());
packet.as_bytes_mut()[HEADER_MAC_FIELD_INDEX..HEADER_MAC_FIELD_INDEX + 8].copy_from_slice(&poly.finish()[0..8]);
packet_header.mac.copy_from_slice(&poly.finish()[0..8]);
self.static_secret.encrypt_count.fetch_add(1, Ordering::Relaxed);
self.last_send_time_ticks.store(time_ticks, Ordering::Relaxed);
self.total_bytes_sent.fetch_add(packet.len() as u64, Ordering::Relaxed);
path.as_ref().map_or_else(|| {
self.send_to_endpoint(ci, endpoint, None, None, packet_id, &packet)
self.send_to_endpoint(ci, endpoint, None, None, &packet)
}, |path| {
path.log_send(time_ticks);
self.send_to_endpoint(ci, endpoint, path.local_socket(), path.local_interface(), packet_id, &packet)
self.send_to_endpoint(ci, endpoint, path.local_socket(), path.local_interface(), &packet)
})
})
}
@ -516,7 +495,7 @@ impl Peer {
let mut cursor: usize = 0;
let _ = payload.read_struct::<message_component_structs::ErrorHeader>(&mut cursor).map(|error_header| {
let in_re_packet_id = error_header.in_re_packet_id;
let current_packet_id_counter = self.packet_id_counter.load(Ordering::Relaxed);
let current_packet_id_counter = self.message_id_counter.load(Ordering::Relaxed);
if current_packet_id_counter.checked_sub(in_re_packet_id).map_or_else(|| {
(!in_re_packet_id).wrapping_add(current_packet_id_counter) < PACKET_RESPONSE_COUNTER_DELTA_MAX
}, |packets_ago| {
@ -536,7 +515,7 @@ impl Peer {
let mut cursor: usize = 0;
let _ = payload.read_struct::<message_component_structs::OkHeader>(&mut cursor).map(|ok_header| {
let in_re_packet_id = ok_header.in_re_packet_id;
let current_packet_id_counter = self.packet_id_counter.load(Ordering::Relaxed);
let current_packet_id_counter = self.message_id_counter.load(Ordering::Relaxed);
if current_packet_id_counter.checked_sub(in_re_packet_id).map_or_else(|| {
(!in_re_packet_id).wrapping_add(current_packet_id_counter) < PACKET_RESPONSE_COUNTER_DELTA_MAX
}, |packets_ago| {
@ -571,7 +550,10 @@ impl Peer {
fn receive_user_message<CI: NodeInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {}
/// Get current best path or None if there are no direct paths to this peer.
pub fn direct_path(&self) -> Option<Arc<Path>> { self.paths.lock().first().map(|p| p.clone()) }
#[inline(always)]
pub fn direct_path(&self) -> Option<Arc<Path>> {
self.paths.lock().first().map(|p| p.clone())
}
/// Get either the current best direct path or an indirect path.
pub fn path(&self, node: &Node) -> Option<Arc<Path>> {

View file

@ -24,16 +24,6 @@ pub const VERB_VL1_USER_MESSAGE: u8 = 0x14;
pub const HELLO_DICT_KEY_INSTANCE_ID: &'static str = "I";
pub const HELLO_DICT_KEY_CLOCK: &'static str = "C";
pub const HELLO_DICT_KEY_EPHEMERAL_PUBLIC: &'static str = "E";
pub const HELLO_DICT_KEY_EPHEMERAL_ACK: &'static str = "e";
pub const HELLO_DICT_KEY_HELLO_ORIGIN: &'static str = "@";
pub const HELLO_DICT_KEY_SYS_ARCH: &'static str = "Sa";
pub const HELLO_DICT_KEY_SYS_BITS: &'static str = "Sb";
pub const HELLO_DICT_KEY_OS_NAME: &'static str = "So";
pub const HELLO_DICT_KEY_OS_VERSION: &'static str = "Sv";
pub const HELLO_DICT_KEY_OS_VARIANT: &'static str = "St";
pub const HELLO_DICT_KEY_VENDOR: &'static str = "V";
pub const HELLO_DICT_KEY_FLAGS: &'static str = "+";
/// KBKDF usage label indicating a key used to encrypt the dictionary inside HELLO.
pub const KBKDF_KEY_USAGE_LABEL_HELLO_DICTIONARY_ENCRYPT: u8 = b'H';
@ -126,26 +116,26 @@ pub const CIPHER_AES_GMAC_SIV: u8 = 0x30;
pub const HEADER_FLAG_FRAGMENTED: u8 = 0x40;
/// Minimum size of a fragment.
pub const FRAGMENT_SIZE_MIN: usize = 16;
pub const PACKET_FRAGMENT_SIZE_MIN: usize = 16;
/// Size of fragment header after which data begins.
pub const FRAGMENT_HEADER_SIZE: usize = 16;
/// Maximum allowed number of fragments.
pub const FRAGMENT_COUNT_MAX: usize = 8;
pub const PACKET_FRAGMENT_COUNT_MAX: usize = 8;
/// Time after which an incomplete fragmented packet expires.
pub const FRAGMENT_EXPIRATION: i64 = 1500;
pub const PACKET_FRAGMENT_EXPIRATION: i64 = 1500;
/// Maximum number of inbound fragmented packets to handle at once per path.
/// This is a sanity limit to prevent memory exhaustion due to DOS attacks or broken peers.
pub const FRAGMENT_MAX_INBOUND_PACKETS_PER_PATH: usize = 256;
pub const PACKET_FRAGMENT_MAX_INBOUND_PACKETS_PER_PATH: usize = 256;
/// Index of packet fragment indicator byte to detect fragments.
pub const FRAGMENT_INDICATOR_INDEX: usize = 13;
pub const PACKET_FRAGMENT_INDICATOR_INDEX: usize = 13;
/// Byte found at FRAGMENT_INDICATOR_INDEX to indicate a fragment.
pub const FRAGMENT_INDICATOR: u8 = 0xff;
pub const PACKET_FRAGMENT_INDICATOR: u8 = 0xff;
/// Verb (inner) flag indicating that the packet's payload (after the verb) is LZ4 compressed.
pub const VERB_FLAG_COMPRESSED: u8 = 0x80;
@ -232,33 +222,17 @@ pub fn compress_packet(src: &[u8], dest: &mut Buffer<{ PACKET_SIZE_MAX }>) -> bo
return false;
}
/// Add HMAC-SHA384 to the end of a packet and set verb flag.
#[inline(always)]
pub fn add_extended_auth(pkt: &mut Buffer<{ PACKET_SIZE_MAX }>, hmac_secret_key: &[u8]) -> std::io::Result<()> {
pkt.append_bytes_fixed(&zerotier_core_crypto::hash::SHA384::hmac(hmac_secret_key, pkt.as_bytes_starting_at(PACKET_VERB_INDEX + 1)?))?;
pkt.as_bytes_mut()[PACKET_VERB_INDEX] |= VERB_FLAG_EXTENDED_AUTHENTICATION;
Ok(())
}
/// A unique packet identifier, also the cryptographic nonce.
///
/// Packet IDs are stored as u64s for efficiency but they should be treated as
/// [u8; 8] fields in that their endianness is "wire" endian. If for some reason
/// packet IDs need to be portably compared or shared across systems they should
/// be treated as bytes not integers.
pub type PacketID = u64;
/// ZeroTier unencrypted outer packet header
///
/// This is the header for a complete packet. If the fragmented flag is set, it will
/// arrive with one or more fragments that must be assembled to complete it.
#[repr(packed)]
pub struct PacketHeader {
pub id: PacketID,
pub id: [u8; 8],
pub dest: [u8; 5],
pub src: [u8; 5],
pub flags_cipher_hops: u8,
pub message_auth: [u8; 8],
pub mac: [u8; 8],
}
unsafe impl RawObject for PacketHeader {}
@ -281,9 +255,6 @@ impl PacketHeader {
#[inline(always)]
pub fn is_fragmented(&self) -> bool { (self.flags_cipher_hops & HEADER_FLAG_FRAGMENTED) != 0 }
#[inline(always)]
pub fn id_bytes(&self) -> &[u8; 8] { unsafe { &*(self as *const Self).cast::<[u8; 8]>() } }
#[inline(always)]
pub fn as_bytes(&self) -> &[u8; PACKET_HEADER_SIZE] { unsafe { &*(self as *const Self).cast::<[u8; PACKET_HEADER_SIZE]>() } }
@ -300,7 +271,7 @@ impl PacketHeader {
pub fn aes_gmac_siv_tag(&self) -> [u8; 16] {
let mut id = unsafe { MaybeUninit::<[u8; 16]>::uninit().assume_init() };
id[0..8].copy_from_slice(self.id_bytes());
id[8..16].copy_from_slice(&self.message_auth);
id[8..16].copy_from_slice(&self.mac);
id
}
}
@ -313,7 +284,7 @@ impl PacketHeader {
/// bit set and remaining fragments being these.
#[repr(packed)]
pub struct FragmentHeader {
pub id: PacketID, // packet ID
pub id: [u8; 8], // (outer) packet ID
pub dest: [u8; 5], // destination address
pub fragment_indicator: u8, // always 0xff in fragments
pub total_and_fragment_no: u8, // TTTTNNNN (fragment number, total fragments)
@ -324,7 +295,7 @@ unsafe impl RawObject for FragmentHeader {}
impl FragmentHeader {
#[inline(always)]
pub fn is_fragment(&self) -> bool { self.fragment_indicator == FRAGMENT_INDICATOR }
pub fn is_fragment(&self) -> bool { self.fragment_indicator == PACKET_FRAGMENT_INDICATOR }
#[inline(always)]
pub fn total_fragments(&self) -> u8 { self.total_and_fragment_no >> 4 }
@ -412,11 +383,11 @@ mod tests {
}
let bar = PacketHeader{
id: 0x0102030405060708_u64.to_be(),
id: [1_u8, 2, 3, 4, 5, 6, 7, 8],
dest: [0_u8; 5],
src: [0_u8; 5],
flags_cipher_hops: 0,
message_auth: [0_u8; 8],
mac: [0_u8; 8],
};
assert_eq!(bar.id_bytes().clone(), [1_u8, 2, 3, 4, 5, 6, 7, 8]);
}

View file

@ -2236,6 +2236,7 @@ dependencies = [
"digest_auth",
"libc",
"mach",
"num-traits",
"num_cpus",
"parking_lot",
"serde",

View file

@ -12,6 +12,7 @@ codegen-units = 1
panic = 'abort'
[dependencies]
num-traits = "^0"
zerotier-network-hypervisor = { path = "../zerotier-network-hypervisor" }
zerotier-core-crypto = { path = "../zerotier-core-crypto" }
serde = { version = "^1", features = ["derive"], default-features = false }

View file

@ -6,7 +6,16 @@
* https://www.zerotier.com/
*/
/*
* This is a threaded UDP socket listener for high performance. The fastest way to receive UDP
* (without heroic efforts like kernel bypass) on most platforms is to create a separate socket
* for each thread using options like SO_REUSEPORT and concurrent packet listening.
*/
use std::mem::{MaybeUninit, transmute, zeroed};
use std::num::NonZeroI64;
use std::os::raw::c_int;
use std::ptr::null_mut;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
@ -14,18 +23,11 @@ use num_traits::cast::AsPrimitive;
use zerotier_network_hypervisor::vl1::InetAddress;
use zerotier_network_hypervisor::{PacketBuffer, PacketBufferPool};
use crate::debug;
/*
* This is a threaded UDP socket listener for high performance. The fastest way to receive UDP
* (without heroic efforts like kernel bypass) on most platforms is to create a separate socket
* for each thread using options like SO_REUSEPORT and concurrent packet listening.
*/
const FAST_UDP_SOCKET_MAX_THREADS: usize = 4;
#[cfg(windows)]
pub(crate) type FastUDPRawOsSocket = winsock2::SOCKET;
#[cfg(unix)]
pub(crate) type FastUDPRawOsSocket = c_int;
pub(crate) type FastUDPRawOsSocket = NonZeroI64;
#[cfg(unix)]
fn bind_udp_socket(_device_name: &str, address: &InetAddress) -> Result<FastUDPRawOsSocket, &'static str> {
@ -39,7 +41,7 @@ fn bind_udp_socket(_device_name: &str, address: &InetAddress) -> Result<FastUDPR
};
let s = libc::socket(af.as_(), libc::SOCK_DGRAM, 0);
if s < 0 {
if s <= 0 {
return Err("unable to create socket");
}
@ -118,7 +120,7 @@ fn bind_udp_socket(_device_name: &str, address: &InetAddress) -> Result<FastUDPR
return Err("bind to address failed");
}
Ok(s)
Ok(NonZeroI64::new(s as i64).unwrap())
}
}
@ -133,36 +135,47 @@ pub(crate) struct FastUDPSocket {
#[cfg(unix)]
#[inline(always)]
fn fast_udp_socket_close(socket: &FastUDPRawOsSocket) {
unsafe { libc::close(*socket); }
}
#[inline(always)]
pub(crate) fn fast_udp_socket_to_i64(socket: &FastUDPRawOsSocket) -> i64 { (*socket) as i64 }
#[inline(always)]
pub(crate) fn fast_udp_socket_from_i64(socket: i64) -> Option<FastUDPRawOsSocket> {
if socket >= 0 {
Some(socket as FastUDPRawOsSocket)
} else {
None
}
unsafe { libc::close(socket.get().as_()); }
}
/// Send to a raw UDP socket with optional packet TTL.
/// If the packet_ttl option is <=0, packet is sent with the default TTL. TTL setting is only used
/// in ZeroTier right now to do escalating TTL probes for IPv4 NAT traversal.
#[cfg(unix)]
#[inline(always)]
pub(crate) fn fast_udp_socket_sendto(socket: &FastUDPRawOsSocket, to_address: &InetAddress, data: &[u8], packet_ttl: i32) {
pub(crate) fn fast_udp_socket_sendto(socket: &FastUDPRawOsSocket, to_address: &InetAddress, data: &[&[u8]], packet_ttl: u8) {
unsafe {
if packet_ttl <= 0 {
libc::sendto(*socket, data.as_ptr().cast(), data.len().as_(), 0, (to_address as *const InetAddress).cast(), std::mem::size_of::<InetAddress>().as_());
debug_assert!(zerotier_network_hypervisor::vl1::PACKET_FRAGMENT_COUNT_MAX < 16);
debug_assert!(data.len() <= 16);
let mut iov: [libc::iovec; 16] = MaybeUninit::uninit().assume_init();
let data_len = data.len() & 15;
let mut data_ptr = 0;
while data_ptr < data_len {
let v = iov.get_unchecked_mut(data_ptr);
let d = *data.get_unchecked(data_ptr);
data_ptr += 1;
v.iov_base = transmute(d.as_ptr()); // iov_base is mut even though data is not changed
debug_assert!(d.len() > 0);
v.iov_len = d.len();
}
let mhdr = libc::msghdr {
msg_name: transmute(to_address as *const InetAddress), // also mut even though it's not modified
msg_namelen: std::mem::size_of::<InetAddress>().as_(),
msg_iov: iov.as_mut_ptr(),
msg_iovlen: data_len.as_(),
msg_control: null_mut(),
msg_controllen: 0,
msg_flags: 0
};
if packet_ttl == 0 || to_address.is_ipv6() {
let _ = libc::sendmsg(socket.get().as_(), transmute(&mhdr as *const libc::msghdr), 0);
} else {
let mut ttl = packet_ttl as c_int;
libc::setsockopt(*socket, libc::IPPROTO_IP.as_(), libc::IP_TTL.as_(), (&mut ttl as *mut c_int).cast(), std::mem::size_of::<c_int>().as_());
libc::sendto(*socket, data.as_ptr().cast(), data.len().as_(), 0, (to_address as *const InetAddress).cast(), std::mem::size_of::<InetAddress>().as_());
libc::setsockopt(socket.get().as_(), libc::IPPROTO_IP.as_(), libc::IP_TTL.as_(), (&mut ttl as *mut c_int).cast(), std::mem::size_of::<c_int>().as_());
let _ = libc::sendmsg(socket.get().as_(), transmute(&mhdr as *const libc::msghdr), 0);
ttl = 255;
libc::setsockopt(*socket, libc::IPPROTO_IP.as_(), libc::IP_TTL.as_(), (&mut ttl as *mut c_int).cast(), std::mem::size_of::<c_int>().as_());
libc::setsockopt(socket.get().as_(), libc::IPPROTO_IP.as_(), libc::IP_TTL.as_(), (&mut ttl as *mut c_int).cast(), std::mem::size_of::<c_int>().as_());
}
}
}
@ -172,7 +185,7 @@ pub(crate) fn fast_udp_socket_sendto(socket: &FastUDPRawOsSocket, to_address: &I
fn fast_udp_socket_recvfrom(socket: &FastUDPRawOsSocket, buf: &mut PacketBuffer, from_address: &mut InetAddress) -> isize {
unsafe {
let mut addrlen = std::mem::size_of::<InetAddress>() as libc::socklen_t;
let s = libc::recvfrom(*socket, buf.as_mut_ptr().cast(), buf.capacity().as_(), 0, (from_address as *mut InetAddress).cast(), &mut addrlen) as isize;
let s = libc::recvfrom(socket.get().as_(), buf.as_mut_ptr().cast(), buf.capacity().as_(), 0, (from_address as *mut InetAddress).cast(), &mut addrlen) as isize;
if s > 0 {
buf.set_size_unchecked(s as usize);
}
@ -182,7 +195,7 @@ fn fast_udp_socket_recvfrom(socket: &FastUDPRawOsSocket, buf: &mut PacketBuffer,
impl FastUDPSocket {
pub fn new<F: Fn(&FastUDPRawOsSocket, &InetAddress, PacketBuffer) + Send + Sync + Clone + 'static>(device_name: &str, address: &InetAddress, packet_buffer_pool: &Arc<PacketBufferPool>, handler: F) -> Result<Self, String> {
let thread_count = num_cpus::get_physical().clamp(1, 4);
let thread_count = num_cpus::get_physical().clamp(1, FAST_UDP_SOCKET_MAX_THREADS);
let mut s = Self {
thread_run: Arc::new(AtomicBool::new(true)),
@ -230,19 +243,16 @@ impl FastUDPSocket {
Ok(s)
}
#[inline(always)]
pub fn all_sockets(&self) -> &[FastUDPRawOsSocket] {
self.sockets.as_slice()
}
#[inline(always)]
pub fn send(&self, to_address: &InetAddress, data: &[u8], packet_ttl: i32) {
fast_udp_socket_sendto(self.sockets.get(0).unwrap(), to_address, data, packet_ttl);
debug_assert!(!self.sockets.is_empty());
fast_udp_socket_sendto(unsafe { self.sockets.get_unchecked(0) }, to_address, data, packet_ttl);
}
#[inline(always)]
pub fn raw_socket(&self) -> FastUDPRawOsSocket {
*self.sockets.get(0).unwrap()
pub fn raw_socket(&self) -> &FastUDPRawOsSocket {
debug_assert!(!self.sockets.is_empty());
unsafe { self.sockets.get_unchecked(0) }
}
}
@ -258,17 +268,17 @@ impl Drop for FastUDPSocket {
self.thread_run.store(false, Ordering::Relaxed);
for s in self.sockets.iter() {
unsafe {
libc::sendto(*s, tmp.as_ptr().cast(), 0, 0, (&self.bind_address as *const InetAddress).cast(), std::mem::size_of::<InetAddress>() as osdep::socklen_t);
libc::sendto(s.get().as_(), tmp.as_ptr().cast(), 0, 0, (&self.bind_address as *const InetAddress).cast(), std::mem::size_of::<InetAddress>() as osdep::socklen_t);
}
}
for s in self.sockets.iter() {
unsafe {
libc::shutdown(*s, libc::SHUT_RDWR.as_());
libc::shutdown(s.get().as_(), libc::SHUT_RDWR.as_());
}
}
for s in self.sockets.iter() {
unsafe {
libc::close(*s);
libc::close(s.get().as_());
}
}
while !self.threads.is_empty() {

View file

@ -151,10 +151,6 @@ impl Default for LocalConfigLogSettings {
pub struct LocalConfigSettings {
#[serde(rename = "primaryPort")]
pub primary_port: u16,
#[serde(rename = "secondaryPort")]
pub secondary_port: Option<u16>,
#[serde(rename = "autoPortSearch")]
pub auto_port_search: bool,
#[serde(rename = "portMapping")]
pub port_mapping: bool,
#[serde(rename = "log")]
@ -175,8 +171,6 @@ impl Default for LocalConfigSettings {
LocalConfigSettings {
primary_port: zerotier_core::DEFAULT_PORT,
secondary_port: Some(zerotier_core::DEFAULT_SECONDARY_PORT),
auto_port_search: true,
port_mapping: true,
log: LocalConfigLogSettings::default(),
interface_prefix_blacklist: bl,
@ -187,7 +181,7 @@ impl Default for LocalConfigSettings {
impl LocalConfigSettings {
#[cfg(target_os = "macos")]
const DEFAULT_PREFIX_BLACKLIST: [&'static str; 8] = ["lo", "utun", "gif", "stf", "iptap", "pktap", "feth", "zt"];
const DEFAULT_PREFIX_BLACKLIST: [&'static str; 9] = ["lo", "utun", "gif", "stf", "iptap", "pktap", "feth", "zt", "llw"];
#[cfg(target_os = "linux")]
const DEFAULT_PREFIX_BLACKLIST: [&'static str; 5] = ["lo", "tun", "tap", "ipsec", "zt"];

View file

@ -132,16 +132,24 @@ impl Log {
}
#[macro_export]
macro_rules! l(
macro_rules! log(
($logger:expr, $($arg:tt)*) => {
$logger.lock().log(format!($($arg)*))
$logger.lock().log(format!($($arg)*));
}
);
#[macro_export]
macro_rules! d(
macro_rules! debug(
($logger:expr, $($arg:tt)*) => {
$logger.lock().debug(format!($($arg)*))
$logger.lock().debug(format!($($arg)*));
}
);
#[macro_export]
macro_rules! fatal(
($logger:expr, $($arg:tt)*) => {
$logger.lock().fatal(format!($($arg)*));
std::process::exit(-1);
}
);

View file

@ -22,6 +22,7 @@ use std::str::FromStr;
use clap::{App, Arg, ArgMatches, ErrorKind};
use zerotier_network_hypervisor::{VERSION_MAJOR, VERSION_MINOR, VERSION_REVISION};
use crate::store::platform_default_home_path;
pub const HTTP_API_OBJECT_SIZE_LIMIT: usize = 131072;
@ -113,6 +114,9 @@ pub fn print_help(long_help: bool) {
pub struct GlobalCommandLineFlags {
pub json_output: bool,
pub base_path: String,
pub auth_token_path_override: Option<String>,
pub auth_token_override: Option<String>
}
fn main() {
@ -207,7 +211,10 @@ fn main() {
});
let global_cli_flags = GlobalCommandLineFlags {
json_output: cli_args.is_present("json")
json_output: cli_args.is_present("json"),
base_path: cli_args.value_of("path").map_or_else(|| platform_default_home_path(), |p| p.into_string()),
auth_token_path_override: cli_args.value_of("token_path").map(|p| p.into_string()),
auth_token_override: cli_args.value_of("token").map(|t| t.into_string())
};
std::process::exit({
@ -233,7 +240,7 @@ fn main() {
("leave", Some(sub_cli_args)) => todo!(),
("service", None) => {
drop(cli_args); // free no longer needed memory before entering service
service::run()
service::run(&global_cli_flags)
}
("controller", Some(sub_cli_args)) => todo!(),
("identity", Some(sub_cli_args)) => todo!(),

View file

@ -8,20 +8,27 @@
use std::num::NonZeroI64;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use parking_lot::Mutex;
use zerotier_network_hypervisor::{Interface, NetworkHypervisor};
use zerotier_network_hypervisor::vl1::{Endpoint, Identity, NodeInterface};
use zerotier_network_hypervisor::vl1::{Endpoint, Identity, IdentityType, Node, NodeInterface};
use zerotier_network_hypervisor::vl2::SwitchInterface;
use crate::fastudpsocket::{fast_udp_socket_sendto, FastUDPRawOsSocket};
use crate::GlobalCommandLineFlags;
use crate::log::Log;
use crate::utils::{ms_monotonic, ms_since_epoch};
use crate::localconfig::LocalConfig;
use crate::store::{platform_default_home_path, StateObjectType, Store};
struct ServiceInterface {
pub log: Arc<Mutex<Log>>,
pub config: Mutex<LocalConfig>
pub store: Store,
pub config: Arc<Mutex<LocalConfig>>,
pub online: AtomicBool,
pub all_sockets: Mutex<Vec<FastUDPRawOsSocket>>,
pub all_sockets_spin_ptr: AtomicUsize,
}
impl NodeInterface for ServiceInterface {
@ -31,27 +38,55 @@ impl NodeInterface for ServiceInterface {
fn event_identity_collision(&self) {}
fn event_online_status_change(&self, online: bool) {}
#[inline(always)]
fn event_online_status_change(&self, online: bool) {
self.online.store(online, Ordering::Relaxed);
}
fn event_user_message(&self, source: &Identity, message_type: u64, message: &[u8]) {}
fn load_node_identity(&self) -> Option<&[u8]> {
todo!()
#[inline(always)]
fn load_node_identity(&self) -> Option<Vec<u8>> {
self.store.load_object(StateObjectType::IdentitySecret, &[]).map_or(None, |b| Some(b))
}
fn save_node_identity(&self, id: &Identity, public: &[u8], secret: &[u8]) {}
fn save_node_identity(&self, _: &Identity, public: &[u8], secret: &[u8]) {
let _ = self.store.store_object(StateObjectType::IdentityPublic, &[], public);
let _ = self.store.store_object(StateObjectType::IdentitySecret, &[], secret);
}
#[inline(always)]
fn wire_send(&self, endpoint: &Endpoint, local_socket: Option<NonZeroI64>, local_interface: Option<NonZeroI64>, data: &[&[u8]], packet_ttl: u8) -> bool {
todo!()
match endpoint {
Endpoint::IpUdp(ip) => {
local_socket.map_or_else(|| {
let ptr = self.all_sockets_spin_ptr.fetch_add(1, Ordering::Relaxed);
let all_sockets = self.all_sockets.lock();
if !all_sockets.is_empty() {
let s = unsafe { all_sockets.get_unchecked(ptr % all_sockets.len()) }.clone();
drop(all_sockets); // release mutex
fast_udp_socket_sendto(&s, ip, data, packet_ttl);
true
} else {
false
}
}, |local_socket| {
fast_udp_socket_sendto(&local_socket, ip, data, packet_ttl);
true
})
}
_ => false
}
}
fn check_path(&self, id: &Identity, endpoint: &Endpoint, local_socket: Option<NonZeroI64>, local_interface: Option<NonZeroI64>) -> bool {
// TODO
true
}
fn get_path_hints(&self, id: &Identity) -> Option<&[(&Endpoint, Option<NonZeroI64>, Option<NonZeroI64>)]> {
todo!()
// TODO
None
}
#[inline(always)]
@ -65,7 +100,21 @@ impl SwitchInterface for ServiceInterface {}
impl Interface for ServiceInterface {}
pub fn run() -> i32 {
pub fn run(global_cli_flags: &GlobalCommandLineFlags) -> i32 {
let store = Store::new(global_cli_flags.base_path.as_str(), &global_cli_flags.auth_token_path_override, &global_cli_flags.auth_token_override);
if store.is_err() {
}
let si = ServiceInterface {
store: store.unwrap(),
config: Arc::new(Mutex::new(LocalConfig::default())),
online: AtomicBool::new(false),
all_sockets: Mutex::new(Vec::new()),
all_sockets_spin_ptr: AtomicUsize::new(0),
};
let node = Node::new(&si, Some(IdentityType::C25519));
0
}

View file

@ -22,11 +22,24 @@ const LOCAL_CONF: &'static str = "local.conf";
const AUTHTOKEN_SECRET: &'static str = "authtoken.secret";
const SERVICE_LOG: &'static str = "service.log";
#[derive(Clone, Copy)]
pub enum StateObjectType {
IdentityPublic,
IdentitySecret,
NetworkConfig,
Peer
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub fn platform_default_home_path() -> String {
"/Library/Application Support/ZeroTier".into_string()
}
/// In-filesystem data store for configuration and objects.
pub(crate) struct Store {
pub base_path: Box<Path>,
pub default_log_path: Box<Path>,
prev_local_config: Mutex<String>,
previous_local_config_on_disk: Mutex<String>,
peers_path: Box<Path>,
controller_path: Box<Path>,
networks_path: Box<Path>,
@ -36,19 +49,13 @@ pub(crate) struct Store {
/// Restrict file permissions using OS-specific code in osdep/OSUtils.cpp.
pub fn lock_down_file(path: &str) {
let p = CString::new(path.as_bytes());
if p.is_ok() {
let p = p.unwrap();
unsafe {
crate::osdep::lockDownFile(p.as_ptr(), 0);
}
}
// TODO: need both Windows and Unix implementations
}
impl Store {
const MAX_OBJECT_SIZE: usize = 262144; // sanity limit
pub fn new(base_path: &str, auth_token_path_override: Option<String>, auth_token_override: Option<String>) -> std::io::Result<Store> {
pub fn new(base_path: &str, auth_token_path_override: &Option<String>, auth_token_override: &Option<String>) -> std::io::Result<Store> {
let bp = Path::new(base_path);
let _ = std::fs::create_dir_all(bp);
let md = bp.metadata()?;
@ -59,7 +66,7 @@ impl Store {
let s = Store {
base_path: bp.to_path_buf().into_boxed_path(),
default_log_path: bp.join(SERVICE_LOG).into_boxed_path(),
prev_local_config: Mutex::new(String::new()),
previous_local_config_on_disk: Mutex::new(String::new()),
peers_path: bp.join("peers.d").into_boxed_path(),
controller_path: bp.join("controller.d").into_boxed_path(),
networks_path: bp.join("networks.d").into_boxed_path(),
@ -82,12 +89,10 @@ impl Store {
Ok(s)
}
fn make_obj_path_internal(&self, obj_type: &StateObjectType, obj_id: &[u64]) -> Option<PathBuf> {
fn make_obj_path_internal(&self, obj_type: StateObjectType, obj_id: &[u64]) -> Option<PathBuf> {
match obj_type {
StateObjectType::IdentityPublic => Some(self.base_path.join("identity.public")),
StateObjectType::IdentitySecret => Some(self.base_path.join("identity.secret")),
StateObjectType::TrustStore => Some(self.base_path.join("truststore")),
StateObjectType::Locator => Some(self.base_path.join("locator")),
StateObjectType::NetworkConfig => {
if obj_id.len() < 1 {
None
@ -204,13 +209,13 @@ impl Store {
}
let data = data.unwrap();
if skip_if_unchanged {
let mut prev = self.prev_local_config.lock().unwrap();
let mut prev = self.previous_local_config_on_disk.lock().unwrap();
if prev.eq(&data) {
return None;
}
*prev = data.clone();
} else {
*(self.prev_local_config.lock().unwrap()) = data.clone();
*(self.previous_local_config_on_disk.lock().unwrap()) = data.clone();
}
let lc = serde_json::from_str::<LocalConfig>(data.as_str());
if lc.is_err() {
@ -244,40 +249,22 @@ impl Store {
let _ = std::fs::remove_file(self.base_path.join(ZEROTIER_PID));
}
pub fn write_uri(&self, uri: &str) -> std::io::Result<()> {
self.write_file(ZEROTIER_URI, uri.as_bytes())
}
pub fn load_uri(&self) -> std::io::Result<hyper::Uri> {
let uri = String::from_utf8(self.read_file(ZEROTIER_URI)?);
uri.map_or_else(|e| {
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))
}, |uri| {
let uri = hyper::Uri::from_str(uri.trim());
uri.map_or_else(|e| {
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))
}, |uri| {
Ok(uri)
})
})
}
pub fn load_object(&self, obj_type: &StateObjectType, obj_id: &[u64]) -> std::io::Result<Vec<u8>> {
let obj_path = self.make_obj_path_internal(&obj_type, obj_id);
pub fn load_object(&self, obj_type: StateObjectType, obj_id: &[u64]) -> std::io::Result<Vec<u8>> {
let obj_path = self.make_obj_path_internal(obj_type, obj_id);
if obj_path.is_some() {
return self.read_internal(obj_path.unwrap());
}
Err(std::io::Error::new(std::io::ErrorKind::NotFound, "does not exist or is not readable"))
}
pub fn erase_object(&self, obj_type: &StateObjectType, obj_id: &[u64]) {
pub fn erase_object(&self, obj_type: StateObjectType, obj_id: &[u64]) {
let obj_path = self.make_obj_path_internal(obj_type, obj_id);
if obj_path.is_some() {
let _ = std::fs::remove_file(obj_path.unwrap());
}
}
pub fn store_object(&self, obj_type: &StateObjectType, obj_id: &[u64], obj_data: &[u8]) -> std::io::Result<()> {
pub fn store_object(&self, obj_type: StateObjectType, obj_id: &[u64], obj_data: &[u8]) -> std::io::Result<()> {
let obj_path = self.make_obj_path_internal(obj_type, obj_id);
if obj_path.is_some() {
let obj_path = obj_path.unwrap();

View file

@ -16,6 +16,7 @@ use serde::de::DeserializeOwned;
use serde::Serialize;
use zerotier_network_hypervisor::vl1::Identity;
use zerotier_core_crypto::hex;
use crate::osdep;
@ -29,13 +30,18 @@ pub fn ms_monotonic() -> i64 {
let mut tb: mach::mach_time::mach_timebase_info_data_t = std::mem::zeroed();
if mach::mach_time::mach_timebase_info(&mut tb) == 0 {
let mt = mach::mach_time::mach_continuous_approximate_time();
(((mt as u128) * tb.numer as u128 * 1000000_u128) / (tb.denom as u128)) as i64
(((mt as u128) * tb.numer as u128 * 1000000_u128) / (tb.denom as u128)) as i64 // milliseconds since X
} else {
panic!("FATAL: mach_timebase_info() failed");
}
}
}
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
pub fn ms_monotonic() -> i64 {
std::time::Instant::now().elapsed().as_millis() as i64
}
pub fn parse_bool(v: &str) -> Result<bool, String> {
if !v.is_empty() {
match v.chars().next().unwrap() {
@ -180,3 +186,18 @@ pub fn json_patch_object<O: Serialize + DeserializeOwned + Eq>(obj: O, patch: &s
})
})
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use crate::utils::ms_monotonic;
#[test]
fn monotonic_clock_sanity_check() {
let start = ms_monotonic();
std::thread::sleep(Duration::from_millis(500));
let end = ms_monotonic();
assert!((end - start).abs() > 450);
assert!((end - start).abs() < 550);
}
}

View file

@ -7,13 +7,11 @@
*/
use std::collections::HashSet;
#[allow(unused_imports)]
use num_traits::AsPrimitive;
#[allow(unused_imports)]
use std::os::raw::c_int;
#[allow(unused_imports)]
use num_traits::AsPrimitive;
#[allow(unused_imports)]
use zerotier_network_hypervisor::vl1::MAC;
@ -35,7 +33,7 @@ extern "C" {
#[cfg(any(target_os = "macos", target_os = "ios", target_os = "netbsd", target_os = "openbsd", target_os = "dragonfly", target_os = "freebsd", target_os = "darwin"))]
pub fn get_l2_multicast_subscriptions(dev: &str) -> HashSet<MAC> {
let mut groups: HashSet<MulticastGroup> = HashSet::new();
let mut groups: HashSet<MAC> = HashSet::new();
let dev = dev.as_bytes();
unsafe {
let mut maddrs: *mut ifmaddrs = std::ptr::null_mut();
@ -47,7 +45,7 @@ pub fn get_l2_multicast_subscriptions(dev: &str) -> HashSet<MAC> {
let la: &libc::sockaddr_dl = &*((*i).ifma_addr.cast());
if la.sdl_alen == 6 && in_.sdl_nlen <= dev.len().as_() && crate::libc::memcmp(dev.as_ptr().cast(), in_.sdl_data.as_ptr().cast(), in_.sdl_nlen.as_()) == 0 {
let mi = la.sdl_nlen as usize;
groups.insert(MAC::from_u64((la.sdl_data[mi] as u64) << 40 | (la.sdl_data[mi+1] as u64) << 32 | (la.sdl_data[mi+2] as u64) << 24 | (la.sdl_data[mi+3] as u64) << 16 | (la.sdl_data[mi+4] as u64) << 8 | la.sdl_data[mi+5] as u64));
MAC::from_u64((la.sdl_data[mi] as u64) << 40 | (la.sdl_data[mi+1] as u64) << 32 | (la.sdl_data[mi+2] as u64) << 24 | (la.sdl_data[mi+3] as u64) << 16 | (la.sdl_data[mi+4] as u64) << 8 | la.sdl_data[mi+5] as u64).map(|mac| groups.insert(mac));
}
}
i = (*i).ifma_next;