Implement identity V1 so as to make it backward compatibile with old versions, and tons of build fixes.

This commit is contained in:
Adam Ierymenko 2021-12-10 21:57:50 -05:00
parent 018889d3b4
commit 4a9938dfd3
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
29 changed files with 960 additions and 970 deletions

View file

@ -1,3 +1,11 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2021 ZeroTier, Inc.
* https://www.zerotier.com/
*/
// AES-GMAC-SIV implemented using libgcrypt.
use std::io::Write;

View file

@ -1,3 +1,11 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2021 ZeroTier, Inc.
* https://www.zerotier.com/
*/
// AES-GMAC-SIV implemented using MacOS/iOS CommonCrypto (MacOS 10.13 or newer required).
use std::os::raw::{c_void, c_int};

View file

@ -1,3 +1,11 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2021 ZeroTier, Inc.
* https://www.zerotier.com/
*/
// AES-GMAC-SIV implemented using OpenSSL.
use openssl::symm::{Crypter, Cipher, Mode};

View file

@ -1,3 +1,11 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2021 ZeroTier, Inc.
* https://www.zerotier.com/
*/
#[cfg(any(target_os = "macos", target_os = "ios"))]
mod impl_macos;

View file

@ -1,72 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2021 ZeroTier, Inc.
* https://www.zerotier.com/
*/
use std::mem::MaybeUninit;
#[inline(always)]
pub fn concat_2_slices<const S0: usize, const S1: usize, const S: usize>(s0: &[u8], s1: &[u8]) -> [u8; S] {
debug_assert_eq!(S0 + S1, S);
let mut tmp: [u8; S] = unsafe { MaybeUninit::uninit().assume_init() };
tmp[..S0].copy_from_slice(s0);
tmp[S0..].copy_from_slice(s1);
tmp
}
#[inline(always)]
pub fn concat_2_arrays<const S0: usize, const S1: usize, const S: usize>(s0: &[u8; S0], s1: &[u8; S1]) -> [u8; S] {
concat_2_slices::<S0, S1, S>(s0, s1)
}
#[inline(always)]
pub fn concat_3_slices<const S0: usize, const S1: usize, const S2: usize, const S: usize>(s0: &[u8], s1: &[u8], s2: &[u8]) -> [u8; S] {
debug_assert_eq!(S0 + S1 + S2, S);
let mut tmp: [u8; S] = unsafe { MaybeUninit::uninit().assume_init() };
tmp[..S0].copy_from_slice(s0);
tmp[S0..S1].copy_from_slice(s1);
tmp[(S0 + S1)..].copy_from_slice(s2);
tmp
}
#[inline(always)]
pub fn concat_3_arrays<const S0: usize, const S1: usize, const S2: usize, const S: usize>(s0: &[u8; S0], s1: &[u8; S1], s2: &[u8; S2]) -> [u8; S] {
concat_3_slices::<S0, S1, S2, S>(s0, s1, s2)
}
#[inline(always)]
pub fn concat_4_slices<const S0: usize, const S1: usize, const S2: usize, const S3: usize, const S: usize>(s0: &[u8], s1: &[u8], s2: &[u8], s3: &[u8]) -> [u8; S] {
debug_assert_eq!(S0 + S1 + S2 + S3, S);
let mut tmp: [u8; S] = unsafe { MaybeUninit::uninit().assume_init() };
tmp[..S0].copy_from_slice(s0);
tmp[S0..S1].copy_from_slice(s1);
tmp[(S0 + S1)..(S0 + S1 + S2)].copy_from_slice(s2);
tmp[(S0 + S1 + S2)..].copy_from_slice(s3);
tmp
}
#[inline(always)]
pub fn concat_4_arrays<const S0: usize, const S1: usize, const S2: usize, const S3: usize, const S: usize>(s0: &[u8; S0], s1: &[u8; S1], s2: &[u8; S2], s3: &[u8; S3]) -> [u8; S] {
concat_4_slices::<S0, S1, S2, S3, S>(s0, s1, s2, s3)
}
#[inline(always)]
pub fn concat_5_slices<const S0: usize, const S1: usize, const S2: usize, const S3: usize, const S4: usize, const S: usize>(s0: &[u8], s1: &[u8], s2: &[u8], s3: &[u8], s4: &[u8]) -> [u8; S] {
debug_assert_eq!(S0 + S1 + S2 + S3 + S4, S);
let mut tmp: [u8; S] = unsafe { MaybeUninit::uninit().assume_init() };
tmp[..S0].copy_from_slice(s0);
tmp[S0..S1].copy_from_slice(s1);
tmp[(S0 + S1)..(S0 + S1 + S2)].copy_from_slice(s2);
tmp[(S0 + S1 + S2)..(S0 + S1 + S2 + S3)].copy_from_slice(s3);
tmp[(S0 + S1 + S2 + S3)..].copy_from_slice(s4);
tmp
}
#[inline(always)]
pub fn concat_5_arrays<const S0: usize, const S1: usize, const S2: usize, const S3: usize, const S4: usize, const S: usize>(s0: &[u8; S0], s1: &[u8; S1], s2: &[u8; S2], s3: &[u8; S3], s4: &[u8; S4]) -> [u8; S] {
concat_5_slices::<S0, S1, S2, S3, S4, S>(s0, s1, s2, s3, s4)
}

View file

@ -59,6 +59,10 @@ impl C25519KeyPair {
}
}
impl Clone for C25519KeyPair {
fn clone(&self) -> Self { Self(x25519_dalek::StaticSecret::from(self.0.to_bytes()), x25519_dalek::PublicKey::from(self.1.to_bytes())) }
}
/// Ed25519 key pair for EDDSA signatures.
pub struct Ed25519KeyPair(ed25519_dalek::Keypair);
@ -113,6 +117,10 @@ impl Ed25519KeyPair {
}
}
impl Clone for Ed25519KeyPair {
fn clone(&self) -> Self { Self(ed25519_dalek::Keypair::from_bytes(&self.0.to_bytes()).unwrap()) }
}
pub fn ed25519_verify(public_key: &[u8], signature: &[u8], msg: &[u8]) -> bool {
if public_key.len() == 32 && signature.len() >= 64 {
ed25519_dalek::PublicKey::from_bytes(public_key).map_or(false, |pk| {

View file

@ -18,7 +18,7 @@ pub mod secret;
pub mod hex;
pub mod varint;
pub mod sidhp751;
pub mod array_concat;
pub use aes_gmac_siv;
pub use rand_core;
pub use subtle;

View file

@ -70,6 +70,55 @@ pub struct P521PublicKey {
public_key_bytes: [u8; P521_PUBLIC_KEY_SIZE],
}
impl P521PublicKey {
/// Construct a public key from a byte serialized representation.
/// None is returned if the input is not valid. No advanced checking such as
/// determining if this is a point on the curve is performed.
pub fn from_bytes(b: &[u8]) -> Option<P521PublicKey> {
if b.len() == P521_PUBLIC_KEY_SIZE {
Some(P521PublicKey {
public_key: SExpression::from_str(format!("(public-key(ecc(curve nistp521)(q #04{}#)))", crate::hex::to_string(b)).as_str()).unwrap(),
public_key_bytes: b.try_into().unwrap(),
})
} else {
None
}
}
/// Verify a signature.
/// Message data does not need to be pre-hashed.
pub fn verify(&self, msg: &[u8], signature: &[u8]) -> bool {
if signature.len() == P521_ECDSA_SIGNATURE_SIZE {
let data = SExpression::from_str(unsafe { std::str::from_utf8_unchecked(&hash_to_data_sexp(msg)) }).unwrap();
let sig = SExpression::from_str(format!("(sig-val(ecdsa(r #{}#)(s #{}#)))", crate::hex::to_string(&signature[0..66]), crate::hex::to_string(&signature[66..132])).as_str()).unwrap();
gcrypt::pkey::verify(&self.public_key, &data, &sig).is_ok()
} else {
false
}
}
#[inline(always)]
pub fn public_key_bytes(&self) -> &[u8; P521_PUBLIC_KEY_SIZE] {
&self.public_key_bytes
}
}
impl PartialEq for P521PublicKey {
#[inline(always)]
fn eq(&self, other: &Self) -> bool { self.public_key_bytes.eq(&other.public_key_bytes) }
}
impl Eq for P521PublicKey {}
impl Clone for P521PublicKey {
fn clone(&self) -> Self {
Self {
public_key: SExpression::from_bytes(self.public_key.get_bytes(0).unwrap()).unwrap(),
public_key_bytes: self.public_key_bytes.clone()
}
}
}
/// NIST P-521 elliptic curve key pair.
/// This supports both ECDSA signing and ECDH key agreement. In practice the same key pair
/// is not used for both functions as this is considred bad practice.
@ -181,48 +230,21 @@ impl P521KeyPair {
}
}
impl P521PublicKey {
/// Construct a public key from a byte serialized representation.
/// None is returned if the input is not valid. No advanced checking such as
/// determining if this is a point on the curve is performed.
pub fn from_bytes(b: &[u8]) -> Option<P521PublicKey> {
if b.len() == P521_PUBLIC_KEY_SIZE {
Some(P521PublicKey {
public_key: SExpression::from_str(format!("(public-key(ecc(curve nistp521)(q #04{}#)))", crate::hex::to_string(b)).as_str()).unwrap(),
public_key_bytes: b.try_into().unwrap(),
})
} else {
None
}
}
/// Verify a signature.
/// Message data does not need to be pre-hashed.
pub fn verify(&self, msg: &[u8], signature: &[u8]) -> bool {
if signature.len() == P521_ECDSA_SIGNATURE_SIZE {
let data = SExpression::from_str(unsafe { std::str::from_utf8_unchecked(&hash_to_data_sexp(msg)) }).unwrap();
let sig = SExpression::from_str(format!("(sig-val(ecdsa(r #{}#)(s #{}#)))", crate::hex::to_string(&signature[0..66]), crate::hex::to_string(&signature[66..132])).as_str()).unwrap();
gcrypt::pkey::verify(&self.public_key, &data, &sig).is_ok()
} else {
false
}
}
#[inline(always)]
pub fn public_key_bytes(&self) -> &[u8; P521_PUBLIC_KEY_SIZE] {
&self.public_key_bytes
}
impl PartialEq for P521KeyPair {
fn eq(&self, other: &Self) -> bool { self.secret_key_bytes.0.eq(&other.secret_key_bytes.0) }
}
impl PartialEq for P521PublicKey {
#[inline(always)]
fn eq(&self, other: &Self) -> bool { self.public_key_bytes.eq(&other.public_key_bytes) }
}
impl Eq for P521KeyPair {}
impl Eq for P521PublicKey {}
impl Clone for P521PublicKey {
fn clone(&self) -> Self { P521PublicKey::from_bytes(&self.public_key_bytes).unwrap() }
impl Clone for P521KeyPair {
fn clone(&self) -> Self {
Self {
public_key: self.public_key.clone(),
secret_key_for_ecdsa: SExpression::from_bytes(self.secret_key_for_ecdsa.get_bytes(0).unwrap()).unwrap(),
secret_key_for_ecdh: SExpression::from_bytes(self.secret_key_for_ecdh.get_bytes(0).unwrap()).unwrap(),
secret_key_bytes: self.secret_key_bytes.clone(),
}
}
}
#[cfg(test)]

View file

@ -17,6 +17,7 @@ base64 = "^0"
lz4_flex = { version = "^0", features = ["safe-encode", "safe-decode", "checked-decode"] }
dashmap = "^4"
parking_lot = "^0"
lazy_static = "^1"
[target."cfg(not(windows))".dependencies]
libc = "^0"

View file

@ -6,6 +6,9 @@
* https://www.zerotier.com/
*/
#[macro_use]
extern crate lazy_static;
pub mod util;
pub mod error;
pub mod vl1;

View file

@ -23,9 +23,9 @@ pub struct NetworkHypervisor {
}
impl NetworkHypervisor {
pub fn new<CI: Interface>(ci: &CI, auto_generate_identity_type: Option<crate::vl1::identity::IdentityType>) -> Result<NetworkHypervisor, InvalidParameterError> {
pub fn new<CI: Interface>(ci: &CI, auto_generate_identity: bool) -> Result<NetworkHypervisor, InvalidParameterError> {
Ok(NetworkHypervisor {
vl1: Node::new(ci, auto_generate_identity_type)?,
vl1: Node::new(ci, auto_generate_identity)?,
vl2: Switch::new(),
})
}

View file

@ -39,11 +39,10 @@ impl<const L: usize> Buffer<L> {
#[inline(always)]
pub fn new() -> Self { Self(0, [0_u8; L]) }
/// Create a zero size buffer without zeroing its actual memory.
/// Create an empty buffer without zeroing its memory (saving a bit of CPU).
#[inline(always)]
pub unsafe fn new_nozero() -> Self { Self(0, MaybeUninit::uninit().assume_init()) }
pub unsafe fn new_without_memzero() -> Self { Self(0, MaybeUninit::uninit().assume_init()) }
/// Get a Buffer initialized with a copy of a byte slice.
#[inline(always)]
pub fn from_bytes(b: &[u8]) -> std::io::Result<Self> {
let l = b.len();
@ -155,7 +154,7 @@ impl<const L: usize> Buffer<L> {
/// Append a runtime sized array and return a mutable reference to its memory.
pub fn append_bytes_get_mut(&mut self, s: usize) -> std::io::Result<&mut [u8]> {
let ptr = self.0;
let end = ptr + l;
let end = ptr + s;
if end <= L {
self.0 = end;
Ok(&mut self.1[ptr..end])
@ -164,7 +163,6 @@ impl<const L: usize> Buffer<L> {
}
}
/// Append a dynamic byte slice (copy into buffer).
#[inline(always)]
pub fn append_bytes(&mut self, buf: &[u8]) -> std::io::Result<()> {
let ptr = self.0;
@ -178,7 +176,6 @@ impl<const L: usize> Buffer<L> {
}
}
/// Append a fixed length byte array (copy into buffer).
#[inline(always)]
pub fn append_bytes_fixed<const S: usize>(&mut self, buf: &[u8; S]) -> std::io::Result<()> {
let ptr = self.0;
@ -192,13 +189,11 @@ impl<const L: usize> Buffer<L> {
}
}
/// Append a variable length integer to this buffer.
#[inline(always)]
pub fn append_varint(&mut self, mut i: u64) -> std::io::Result<()> {
crate::util::varint::write(self, i)
}
/// Append a byte
#[inline(always)]
pub fn append_u8(&mut self, i: u8) -> std::io::Result<()> {
let ptr = self.0;
@ -211,42 +206,84 @@ impl<const L: usize> Buffer<L> {
}
}
/// Append a 16-bit integer (in big-endian form)
#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))]
#[inline(always)]
pub fn append_u16(&mut self, i: u16) -> std::io::Result<()> {
let ptr = self.0;
let end = ptr + 2;
if end <= L {
self.0 = end;
crate::util::store_u16_be(i, &mut self.1[ptr..end]);
unsafe { *self.1.as_mut_ptr().add(ptr).cast::<u16>() = i };
Ok(())
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
/// Append a 32-bit integer (in big-endian form)
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))]
#[inline(always)]
pub fn append_u16(&mut self, i: u16) -> std::io::Result<()> {
let ptr = self.0;
let end = ptr + 2;
if end <= L {
self.0 = end;
self.1[ptr..end].copy_from_slice(&i.to_be_bytes());
Ok(())
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))]
#[inline(always)]
pub fn append_u32(&mut self, i: u32) -> std::io::Result<()> {
let ptr = self.0;
let end = ptr + 4;
if end <= L {
self.0 = end;
crate::util::store_u32_be(i, &mut self.1[ptr..end]);
unsafe { *self.1.as_mut_ptr().add(ptr).cast::<u32>() = i };
Ok(())
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
/// Append a 64-bit integer (in big-endian form)
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))]
#[inline(always)]
pub fn append_u32(&mut self, i: u32) -> std::io::Result<()> {
let ptr = self.0;
let end = ptr + 4;
if end <= L {
self.0 = end;
self.1[ptr..end].copy_from_slice(&i.to_be_bytes());
Ok(())
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))]
#[inline(always)]
pub fn append_u64(&mut self, i: u64) -> std::io::Result<()> {
let ptr = self.0;
let end = ptr + 8;
if end <= L {
self.0 = end;
crate::util::store_u64_be(i, &mut self.1[ptr..end]);
unsafe { *self.1.as_mut_ptr().add(ptr).cast::<u64>() = i };
Ok(())
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))]
#[inline(always)]
pub fn append_u64(&mut self, i: u64) -> std::io::Result<()> {
let ptr = self.0;
let end = ptr + 8;
if end <= L {
self.0 = end;
self.1[ptr..end].copy_from_slice(&i.to_be_bytes());
Ok(())
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
@ -277,7 +314,6 @@ impl<const L: usize> Buffer<L> {
}
}
/// Get a byte at a fixed position.
#[inline(always)]
pub fn u8_at(&self, ptr: usize) -> std::io::Result<u8> {
if ptr < self.0 {
@ -303,8 +339,6 @@ impl<const L: usize> Buffer<L> {
}
}
/// Get a fixed length byte array and advance the cursor.
/// This is slightly more efficient than reading a runtime sized byte slice.
#[inline(always)]
pub fn read_bytes_fixed<const S: usize>(&self, cursor: &mut usize) -> std::io::Result<&[u8; S]> {
let ptr = *cursor;
@ -320,7 +354,6 @@ impl<const L: usize> Buffer<L> {
}
}
/// Get a runtime specified length byte slice and advance the cursor.
#[inline(always)]
pub fn read_bytes(&self, l: usize, cursor: &mut usize) -> std::io::Result<&[u8]> {
let ptr = *cursor;
@ -334,7 +367,6 @@ impl<const L: usize> Buffer<L> {
}
}
/// Get the next variable length integer and advance the cursor by its length in bytes.
#[inline(always)]
pub fn read_varint(&self, cursor: &mut usize) -> std::io::Result<u64> {
let c = *cursor;
@ -350,7 +382,6 @@ impl<const L: usize> Buffer<L> {
}
}
/// Get the next u8 and advance the cursor.
#[inline(always)]
pub fn read_u8(&self, cursor: &mut usize) -> std::io::Result<u8> {
let ptr = *cursor;
@ -363,7 +394,7 @@ impl<const L: usize> Buffer<L> {
}
}
/// Get the next u16 and advance the cursor.
#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))]
#[inline(always)]
pub fn read_u16(&self, cursor: &mut usize) -> std::io::Result<u16> {
let ptr = *cursor;
@ -371,13 +402,27 @@ impl<const L: usize> Buffer<L> {
debug_assert!(end <= L);
if end <= self.0 {
*cursor = end;
Ok(u16::from_be_bytes(unsafe { *self.1.as_ptr().add(ptr).cast() }))
Ok((unsafe { *self.1.as_ptr().add(ptr).cast::<u16>() }).to_be())
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
/// Get the next u32 and advance the cursor.
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))]
#[inline(always)]
pub fn read_u16(&self, cursor: &mut usize) -> std::io::Result<u16> {
let ptr = *cursor;
let end = ptr + 2;
debug_assert!(end <= L);
if end <= self.0 {
*cursor = end;
Ok(u16::from_be_bytes(*self.1[ptr..end]))
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))]
#[inline(always)]
pub fn read_u32(&self, cursor: &mut usize) -> std::io::Result<u32> {
let ptr = *cursor;
@ -385,13 +430,27 @@ impl<const L: usize> Buffer<L> {
debug_assert!(end <= L);
if end <= self.0 {
*cursor = end;
Ok(u32::from_be_bytes(unsafe { *self.1.as_ptr().add(ptr).cast() }))
Ok((unsafe { *self.1.as_ptr().add(ptr).cast::<u32>() }).to_be())
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
/// Get the next u64 and advance the cursor.
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))]
#[inline(always)]
pub fn read_u32(&self, cursor: &mut usize) -> std::io::Result<u16> {
let ptr = *cursor;
let end = ptr + 4;
debug_assert!(end <= L);
if end <= self.0 {
*cursor = end;
Ok(u32::from_be_bytes(*self.1[ptr..end]))
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))]
#[inline(always)]
pub fn read_u64(&self, cursor: &mut usize) -> std::io::Result<u64> {
let ptr = *cursor;
@ -399,7 +458,21 @@ impl<const L: usize> Buffer<L> {
debug_assert!(end <= L);
if end <= self.0 {
*cursor = end;
Ok(u64::from_be_bytes(unsafe { *self.1.as_ptr().add(ptr).cast() }))
Ok((unsafe { *self.1.as_ptr().add(ptr).cast::<u64>() }).to_be())
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))]
#[inline(always)]
pub fn read_u64(&self, cursor: &mut usize) -> std::io::Result<u16> {
let ptr = *cursor;
let end = ptr + 8;
debug_assert!(end <= L);
if end <= self.0 {
*cursor = end;
Ok(u64::from_be_bytes(*self.1[ptr..end]))
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}

View file

@ -16,15 +16,12 @@ pub use zerotier_core_crypto::varint;
pub(crate) const ZEROES: [u8; 64] = [0_u8; 64];
/// Obtain a reference to a sub-array within an existing array.
/// Attempts to violate array bounds will panic or fail to compile.
#[inline(always)]
pub(crate) fn array_range<T, const A: usize, const START: usize, const LEN: usize>(a: &[T; A]) -> &[T; LEN] {
assert!((START + LEN) <= A);
unsafe { &*a.as_ptr().add(std::mem::size_of::<T>() * start_index).cast::<[T; LEN]>() }
unsafe { &*a.as_ptr().add(START).cast::<[T; LEN]>() }
}
/// Cast a u64 reference to a byte array in place.
/// Going the other direction is not safe on some architectures, but this should be safe everywhere.
#[inline(always)]
pub(crate) fn u64_as_bytes(i: &u64) -> &[u8; 8] { unsafe { &*(i as *const u64).cast() } }

View file

@ -8,6 +8,7 @@
use std::ptr::NonNull;
use std::sync::{Arc, Weak};
use std::sync::atomic::{AtomicIsize, Ordering};
use parking_lot::Mutex;
@ -22,7 +23,11 @@ struct PoolEntry<O, F: PoolFactory<O>> {
return_pool: Weak<PoolInner<O, F>>,
}
struct PoolInner<O, F: PoolFactory<O>>(F, Mutex<Vec<NonNull<PoolEntry<O, F>>>>);
struct PoolInner<O, F: PoolFactory<O>> {
factory: F,
pool: Mutex<Vec<NonNull<PoolEntry<O, F>>>>,
//outstanding_count: AtomicIsize
}
/// Container for pooled objects that have been checked out of the pool.
///
@ -100,9 +105,9 @@ impl<O, F: PoolFactory<O>> Drop for Pooled<O, F> {
let p = Weak::upgrade(&self.0.as_ref().return_pool);
if p.is_some() {
let p = p.unwrap();
p.0.reset(&mut self.0.as_mut().obj);
let mut q = p.1.lock();
q.push(self.0.clone())
p.factory.reset(&mut self.0.as_mut().obj);
p.pool.lock().push(self.0);
//let _ = p.outstanding_count.fetch_sub(1, Ordering::Release);
} else {
drop(Box::from_raw(self.0.as_ptr()))
}
@ -117,21 +122,52 @@ pub struct Pool<O, F: PoolFactory<O>>(Arc<PoolInner<O, F>>);
impl<O, F: PoolFactory<O>> Pool<O, F> {
pub fn new(initial_stack_capacity: usize, factory: F) -> Self {
Self(Arc::new(PoolInner::<O, F>(factory, Mutex::new(Vec::with_capacity(initial_stack_capacity)))))
Self(Arc::new(PoolInner::<O, F> {
factory,
pool: Mutex::new(Vec::with_capacity(initial_stack_capacity)),
//outstanding_count: AtomicIsize::new(0)
}))
}
/// Get a pooled object, or allocate one if the pool is empty.
#[inline(always)]
pub fn get(&self) -> Pooled<O, F> {
unsafe {
Pooled::<O, F>(self.0.1.lock().pop().unwrap_or_else(|| {
//let _ = self.0.outstanding_count.fetch_add(1, Ordering::Acquire);
Pooled::<O, F>(self.0.pool.lock().pop().unwrap_or_else(|| {
unsafe {
NonNull::new_unchecked(Box::into_raw(Box::new(PoolEntry::<O, F> {
obj: self.0.0.create(),
obj: self.0.factory.create(),
return_pool: Arc::downgrade(&self.0),
})))
}))
}
}))
}
/*
/// Get a pooled object, or allocate one if the pool is empty.
/// This will return None if there are more outstanding pooled objects than the limit.
/// The limit is exclusive, so a value of 0 will mean that only one outstanding
/// object will be permitted as in this case there were zero outstanding at time
/// of checkout.
#[inline(always)]
pub fn try_get(&self, outstanding_pooled_object_limit: usize) -> Option<Pooled<O, F>> {
let outstanding = self.0.outstanding_count.fetch_add(1, Ordering::Acquire);
debug_assert!(outstanding >= 0);
if outstanding as usize > outstanding_pooled_object_limit {
let _ = self.0.outstanding_count.fetch_sub(1, Ordering::Release);
None
} else {
Some(Pooled::<O, F>(self.0.pool.lock().pop().unwrap_or_else(|| {
unsafe {
NonNull::new_unchecked(Box::into_raw(Box::new(PoolEntry::<O, F> {
obj: self.0.pool.create(),
return_pool: Arc::downgrade(&self.0),
})))
}
})))
}
}
*/
/// Dispose of all pooled objects, freeing any memory they use.
///
@ -139,7 +175,7 @@ impl<O, F: PoolFactory<O>> Pool<O, F> {
/// objects will still be returned on drop unless the pool itself is dropped. This can
/// be done to free some memory if there has been a spike in memory use.
pub fn purge(&self) {
let mut p = self.0.1.lock();
let mut p = self.0.pool.lock();
loop {
let o = p.pop();
if o.is_some() {

View file

@ -58,14 +58,14 @@ impl Address {
#[inline(always)]
pub(crate) fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> std::io::Result<()> {
buf.append_and_init_bytes_fixed(|b: &mut [u8; ADDRESS_SIZE]| {
let i = self.0.get();
b[0] = (i >> 32) as u8;
b[1] = (i >> 24) as u8;
b[2] = (i >> 16) as u8;
b[3] = (i >> 8) as u8;
b[4] = i as u8;
})
let b = buf.append_bytes_fixed_get_mut::<ADDRESS_SIZE>()?;
let i = self.0.get();
(*b)[0] = (i >> 32) as u8;
(*b)[1] = (i >> 24) as u8;
(*b)[2] = (i >> 16) as u8;
(*b)[3] = (i >> 8) as u8;
(*b)[4] = i as u8;
Ok(())
}
#[inline(always)]

View file

@ -142,10 +142,10 @@ impl Endpoint {
if type_byte < 16 {
if type_byte == 4 {
let b: &[u8; 6] = buf.read_bytes_fixed(cursor)?;
Ok(Endpoint::IpUdp(InetAddress::from_ip_port(&b[0..4], crate::util::load_u16_be(&b[4..6]))))
Ok(Endpoint::IpUdp(InetAddress::from_ip_port(&b[0..4], u16::from_be_bytes(b[4..6].try_into().unwrap()))))
} else if type_byte == 6 {
let b: &[u8; 18] = buf.read_bytes_fixed(cursor)?;
Ok(Endpoint::IpUdp(InetAddress::from_ip_port(&b[0..16], crate::util::load_u16_be(&b[16..18]))))
Ok(Endpoint::IpUdp(InetAddress::from_ip_port(&b[0..16], u16::from_be_bytes(b[16..18].try_into().unwrap()))))
} else {
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "unrecognized endpoint type in stream"))
}

View file

@ -36,8 +36,10 @@ impl EphemeralKeyPairSet {
/// This contains key pairs for the asymmetric key agreement algorithms used and a
/// timestamp used to enforce TTL.
///
/// SIDH is only used the first time and then 1/255 of remaining ratchet clicks because
/// it's slower than the others.
/// SIDH is only used once per ratchet sequence because it's much more CPU intensive
/// than ECDH. The threat model for SIDH is forward secrecy on the order of 5-15 years
/// from now when a quantum computer capable of attacking elliptic curve may exist,
/// it's incredibly unlikely that a p2p link would ever persist that long.
pub fn new(local_address: Address, remote_address: Address, previous_ephemeral_secret: Option<&EphemeralSymmetricSecret>) -> Self {
let (sidhp751, previous_ratchet_state) = previous_ephemeral_secret.map_or_else(|| {
(
@ -46,7 +48,7 @@ impl EphemeralKeyPairSet {
)
}, |previous_ephemeral_secret| {
(
if previous_ephemeral_secret.ratchet_state[0] == 0 { Some(SIDHEphemeralKeyPair::generate(local_address, remote_address)) } else { None },
None,
Some(previous_ephemeral_secret.ratchet_state.clone())
)
});

File diff suppressed because it is too large Load diff

View file

@ -365,20 +365,20 @@ impl InetAddress {
unsafe {
match self.sa.sa_family as u8 {
AF_INET => {
buf.append_and_init_bytes_fixed(|b: &mut [u8; 7]| {
b[0] = 4;
copy_nonoverlapping((&self.sin.sin_addr.s_addr as *const u32).cast::<u8>(), b.as_mut_ptr().offset(1), 4);
b[5] = *(&self.sin.sin_port as *const u16).cast::<u8>();
b[6] = *(&self.sin.sin_port as *const u16).cast::<u8>().offset(1);
})
let b = buf.append_bytes_fixed_get_mut::<7>()?;
b[0] = 4;
copy_nonoverlapping((&self.sin.sin_addr.s_addr as *const u32).cast::<u8>(), b.as_mut_ptr().offset(1), 4);
b[5] = *(&self.sin.sin_port as *const u16).cast::<u8>();
b[6] = *(&self.sin.sin_port as *const u16).cast::<u8>().offset(1);
Ok(())
}
AF_INET6 => {
buf.append_and_init_bytes_fixed(|b: &mut [u8; 19]| {
b[0] = 6;
copy_nonoverlapping((&(self.sin6.sin6_addr) as *const in6_addr).cast::<u8>(), b.as_mut_ptr().offset(1), 16);
b[17] = *(&self.sin6.sin6_port as *const u16).cast::<u8>();
b[18] = *(&self.sin6.sin6_port as *const u16).cast::<u8>().offset(1);
})
let b = buf.append_bytes_fixed_get_mut::<19>()?;
b[0] = 6;
copy_nonoverlapping((&(self.sin6.sin6_addr) as *const in6_addr).cast::<u8>(), b.as_mut_ptr().offset(1), 16);
b[17] = *(&self.sin6.sin6_port as *const u16).cast::<u8>();
b[18] = *(&self.sin6.sin6_port as *const u16).cast::<u8>().offset(1);
Ok(())
}
_ => buf.append_u8(0)
}
@ -389,10 +389,10 @@ impl InetAddress {
let t = buf.read_u8(cursor)?;
if t == 4 {
let b: &[u8; 6] = buf.read_bytes_fixed(cursor)?;
Ok(InetAddress::from_ip_port(&b[0..4], crate::util::load_u16_be(&b[4..6])))
Ok(InetAddress::from_ip_port(&b[0..4], u16::from_be_bytes(b[4..6].try_into().unwrap())))
} else if t == 6 {
let b: &[u8; 18] = buf.read_bytes_fixed(cursor)?;
Ok(InetAddress::from_ip_port(&b[0..16], crate::util::load_u16_be(&b[16..18])))
Ok(InetAddress::from_ip_port(&b[0..16], u16::from_be_bytes(b[16..18].try_into().unwrap())))
} else {
Ok(InetAddress::new())
}

View file

@ -46,15 +46,15 @@ impl MAC {
#[inline(always)]
pub(crate) fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> std::io::Result<()> {
buf.append_and_init_bytes_fixed(|b: &mut [u8; 6]| {
let i = self.0.get();
b[0] = (i >> 40) as u8;
b[1] = (i >> 32) as u8;
b[2] = (i >> 24) as u8;
b[3] = (i >> 16) as u8;
b[4] = (i >> 8) as u8;
b[5] = i as u8;
})
let b = buf.append_bytes_fixed_get_mut::<6>()?;
let i = self.0.get();
(*b)[0] = (i >> 40) as u8;
(*b)[1] = (i >> 32) as u8;
(*b)[2] = (i >> 24) as u8;
(*b)[3] = (i >> 16) as u8;
(*b)[4] = (i >> 8) as u8;
(*b)[5] = i as u8;
Ok(())
}
#[inline(always)]

View file

@ -9,9 +9,9 @@
pub mod inetaddress;
pub mod endpoint;
pub mod rootset;
pub mod identity;
#[allow(unused)]
pub(crate) mod identity;
pub(crate) mod protocol;
pub(crate) mod node;
pub(crate) mod path;
@ -26,7 +26,7 @@ pub(crate) mod symmetricsecret;
pub use address::Address;
pub use mac::MAC;
pub use identity::{Identity, IdentityType, IDENTITY_TYPE_0_SIGNATURE_SIZE, IDENTITY_TYPE_1_SIGNATURE_SIZE};
pub use identity::Identity;
pub use endpoint::Endpoint;
pub use dictionary::Dictionary;
pub use inetaddress::InetAddress;

View file

@ -21,7 +21,7 @@ use crate::error::InvalidParameterError;
use crate::util::gate::IntervalGate;
use crate::util::pool::{Pool, Pooled};
use crate::util::buffer::Buffer;
use crate::vl1::{Address, Endpoint, Identity, IdentityType};
use crate::vl1::{Address, Endpoint, Identity};
use crate::vl1::path::Path;
use crate::vl1::peer::Peer;
use crate::vl1::protocol::*;
@ -106,10 +106,10 @@ pub trait VL1PacketHandler {
fn handle_packet(&self, peer: &Peer, source_path: &Arc<Path>, forward_secrecy: bool, extended_authentication: bool, verb: u8, payload: &Buffer<{ PACKET_SIZE_MAX }>) -> bool;
/// Handle errors, returning true if the error was recognized.
fn handle_error(&self, peer: &Peer, source_path: &Arc<Path>, forward_secrecy: bool, extended_authentication: bool, in_re_verb: u8, in_re_packet_id: PacketID, error_code: u8, payload: &Buffer<{ PACKET_SIZE_MAX }>, cursor: &mut usize) -> bool;
fn handle_error(&self, peer: &Peer, source_path: &Arc<Path>, forward_secrecy: bool, extended_authentication: bool, in_re_verb: u8, in_re_message_id: u64, error_code: u8, payload: &Buffer<{ PACKET_SIZE_MAX }>, cursor: &mut usize) -> bool;
/// Handle an OK, returing true if the OK was recognized.
fn handle_ok(&self, peer: &Peer, source_path: &Arc<Path>, forward_secrecy: bool, extended_authentication: bool, in_re_verb: u8, in_re_packet_id: PacketID, payload: &Buffer<{ PACKET_SIZE_MAX }>, cursor: &mut usize) -> bool;
fn handle_ok(&self, peer: &Peer, source_path: &Arc<Path>, forward_secrecy: bool, extended_authentication: bool, in_re_verb: u8, in_re_message_id: u64, payload: &Buffer<{ PACKET_SIZE_MAX }>, cursor: &mut usize) -> bool;
}
#[derive(Default)]
@ -134,17 +134,14 @@ pub struct Node {
impl Node {
/// Create a new Node.
///
/// If the auto-generate identity type is not None, a new identity will be generated if
/// no identity is currently stored in the data store.
pub fn new<I: NodeInterface>(ci: &I, auto_generate_identity_type: Option<IdentityType>) -> Result<Self, InvalidParameterError> {
pub fn new<I: NodeInterface>(ci: &I, auto_generate_identity: bool) -> Result<Self, InvalidParameterError> {
let id = {
let id_str = ci.load_node_identity();
if id_str.is_none() {
if auto_generate_identity_type.is_none() {
if !auto_generate_identity {
return Err(InvalidParameterError("no identity found and auto-generate not enabled"));
} else {
let id = Identity::generate(auto_generate_identity_type.unwrap());
let id = Identity::generate();
ci.save_node_identity(&id, id.to_string().as_bytes(), id.to_secret_string().as_bytes());
id
}
@ -180,7 +177,7 @@ impl Node {
pub fn packet_buffer_pool(&self) -> &Arc<PacketBufferPool> { &self.buffer_pool }
#[inline(always)]
pub fn address(&self) -> Address { self.identity.address() }
pub fn address(&self) -> Address { self.identity.address }
#[inline(always)]
pub fn identity(&self) -> &Identity { &self.identity }
@ -235,7 +232,7 @@ impl Node {
if dest.is_some() {
let time_ticks = ci.time_ticks();
let dest = dest.unwrap();
if dest == self.identity.address() {
if dest == self.identity.address {
// Handle packets addressed to this node.
let path = self.path(source_endpoint, source_local_socket, source_local_interface);
@ -243,7 +240,7 @@ impl Node {
if fragment_header.is_fragment() {
let _ = path.receive_fragment(fragment_header.id, fragment_header.fragment_no(), fragment_header.total_fragments(), data, time_ticks).map(|assembled_packet| {
let _ = path.receive_fragment(u64::from_ne_bytes(fragment_header.id), fragment_header.fragment_no(), fragment_header.total_fragments(), data, time_ticks).map(|assembled_packet| {
if assembled_packet.frags[0].is_some() {
let frag0 = assembled_packet.frags[0].as_ref().unwrap();
let packet_header = frag0.struct_at::<PacketHeader>(0);

View file

@ -66,7 +66,7 @@ impl Path {
/// Receive a fragment and return a FragmentedPacket if the entire packet was assembled.
/// This returns None if more fragments are needed to assemble the packet.
#[inline(always)]
pub(crate) fn receive_fragment(&self, packet_id: PacketID, fragment_no: u8, fragment_expecting_count: u8, packet: PacketBuffer, time_ticks: i64) -> Option<FragmentedPacket> {
pub(crate) fn receive_fragment(&self, packet_id: u64, fragment_no: u8, fragment_expecting_count: u8, packet: PacketBuffer, time_ticks: i64) -> Option<FragmentedPacket> {
let mut fp = self.fragmented_packets.lock();
// Discard some old waiting packets if the total incoming fragments for a path exceeds a

View file

@ -7,12 +7,12 @@
*/
use std::convert::TryInto;
use std::intrinsics::try;
use std::mem::MaybeUninit;
use std::num::NonZeroI64;
use std::ptr::copy_nonoverlapping;
use std::sync::Arc;
use std::sync::atomic::{AtomicI64, AtomicU64, AtomicU8, Ordering};
use libc::uname;
use parking_lot::Mutex;
@ -32,6 +32,7 @@ use crate::util::buffer::Buffer;
use crate::util::pool::{Pool, PoolFactory};
use crate::vl1::{Dictionary, Endpoint, Identity, InetAddress, Path};
use crate::vl1::ephemeral::EphemeralSymmetricSecret;
use crate::vl1::identity::{IDENTITY_CIPHER_SUITE_INCLUDE_ALL, IDENTITY_CIPHER_SUITE_X25519};
use crate::vl1::node::*;
use crate::vl1::protocol::*;
use crate::vl1::symmetricsecret::SymmetricSecret;
@ -72,12 +73,6 @@ pub struct Peer {
// Static shared secret computed from agreement with identity.
static_secret: SymmetricSecret,
// Derived static secret (in initialized cipher) used to encrypt the dictionary part of HELLO.
static_secret_hello_dictionary: Mutex<AesCtr>,
// Derived static secret used to add full HMAC-SHA384 to packets, currently just HELLO.
static_secret_packet_hmac: Secret<48>,
// Latest ephemeral secret acknowledged with OK(HELLO).
ephemeral_secret: Mutex<Option<Arc<EphemeralSymmetricSecret>>>,
@ -130,7 +125,7 @@ fn salsa_derive_per_packet_key(key: &Secret<48>, header: &PacketHeader, packet_s
#[inline(always)]
fn salsa_poly_create(secret: &SymmetricSecret, header: &PacketHeader, packet_size: usize) -> (Salsa, Poly1305) {
let key = salsa_derive_per_packet_key(&secret.key, header, packet_size);
let mut salsa = Salsa::new(&key.0[0..32], header.id_bytes(), true).unwrap();
let mut salsa = Salsa::new(&key.0[0..32], &header.id, true).unwrap();
let mut poly1305_key = [0_u8; 32];
salsa.crypt_in_place(&mut poly1305_key);
(salsa, Poly1305::new(&poly1305_key).unwrap())
@ -142,11 +137,15 @@ fn try_aead_decrypt(secret: &SymmetricSecret, packet_frag0_payload_bytes: &[u8],
match header.cipher() {
CIPHER_NOCRYPT_POLY1305 => {
if (verb & VERB_MASK) == VERB_VL1_HELLO {
let mut total_packet_len = packet_frag0_payload_bytes.len() + PACKET_HEADER_SIZE;
for f in fragments.iter() {
total_packet_len += f.as_ref().map_or(0, |f| f.len());
}
let _ = payload.append_bytes(packet_frag0_payload_bytes);
for f in fragments.iter() {
let _ = f.as_ref().map(|f| f.as_bytes_starting_at(FRAGMENT_HEADER_SIZE).map(|f| payload.append_bytes(f)));
}
let (_, mut poly) = salsa_poly_create(secret, header, packet.len());
let (_, mut poly) = salsa_poly_create(secret, header, total_packet_len);
poly.update(payload.as_bytes());
if poly.finish()[0..8].eq(&header.mac) {
*message_id = u64::from_ne_bytes(header.id);
@ -161,7 +160,11 @@ fn try_aead_decrypt(secret: &SymmetricSecret, packet_frag0_payload_bytes: &[u8],
}
CIPHER_SALSA2012_POLY1305 => {
let (mut salsa, mut poly) = salsa_poly_create(secret, header, packet.len());
let mut total_packet_len = packet_frag0_payload_bytes.len() + PACKET_HEADER_SIZE;
for f in fragments.iter() {
total_packet_len += f.as_ref().map_or(0, |f| f.len());
}
let (mut salsa, mut poly) = salsa_poly_create(secret, header, total_packet_len);
poly.update(packet_frag0_payload_bytes);
let _ = payload.append_bytes_get_mut(packet_frag0_payload_bytes.len()).map(|b| salsa.crypt(packet_frag0_payload_bytes, b));
for f in fragments.iter() {
@ -196,7 +199,7 @@ fn try_aead_decrypt(secret: &SymmetricSecret, packet_frag0_payload_bytes: &[u8],
// AES-GMAC-SIV encrypts the packet ID too as part of its computation of a single
// opaque 128-bit tag, so to get the original packet ID we have to grab it from the
// decrypted tag.
*mesasge_id = u64::from_ne_bytes(*array_range::<u8, 16, 0, 8>(tag));
*message_id = u64::from_ne_bytes(*array_range::<u8, 16, 0, 8>(tag));
true
})
}
@ -215,8 +218,6 @@ impl Peer {
Peer {
identity: id,
static_secret: SymmetricSecret::new(static_secret),
static_secret_hello_dictionary: Mutex::new(AesCtr::new(&static_secret_hello_dictionary.0[0..32])),
static_secret_packet_hmac,
ephemeral_secret: Mutex::new(None),
paths: Mutex::new(Vec::new()),
reported_local_ip: Mutex::new(None),
@ -244,7 +245,7 @@ impl Peer {
/// those fragments after the main packet header and first chunk.
pub(crate) fn receive<CI: NodeInterface, PH: VL1PacketHandler>(&self, node: &Node, ci: &CI, ph: &PH, time_ticks: i64, source_path: &Arc<Path>, header: &PacketHeader, packet: &Buffer<{ PACKET_SIZE_MAX }>, fragments: &[Option<PacketBuffer>]) {
let _ = packet.as_bytes_starting_at(PACKET_VERB_INDEX).map(|packet_frag0_payload_bytes| {
let mut payload: Buffer<PACKET_SIZE_MAX> = unsafe { Buffer::new_nozero() };
let mut payload: Buffer<PACKET_SIZE_MAX> = unsafe { Buffer::new_without_memzero() };
let mut message_id = 0_u64;
let ephemeral_secret: Option<Arc<EphemeralSymmetricSecret>> = self.ephemeral_secret.lock().clone();
let forward_secrecy = if !ephemeral_secret.map_or(false, |ephemeral_secret| try_aead_decrypt(&ephemeral_secret.secret, packet_frag0_payload_bytes, header, fragments, &mut payload, &mut message_id)) {
@ -269,7 +270,7 @@ impl Peer {
if extended_authentication {
if payload.len() >= (1 + SHA384_HASH_SIZE) {
let actual_end_of_payload = payload.len() - SHA384_HASH_SIZE;
let hmac = SHA384::hmac_multipart(self.static_secret_packet_hmac.as_ref(), &[u64_as_bytes(&message_id), payload.as_bytes()]);
let hmac = SHA384::hmac_multipart(self.static_secret.packet_hmac_key.as_ref(), &[u64_as_bytes(&message_id), payload.as_bytes()]);
if !hmac.eq(&(payload.as_bytes()[actual_end_of_payload..])) {
return;
}
@ -399,82 +400,78 @@ impl Peer {
/// Send a HELLO to this peer.
///
/// If try_new_endpoint is not None the packet will be sent directly to this endpoint.
/// Otherwise it will be sent via the best direct or indirect path.
///
/// This has its own send logic so it can handle either an explicit endpoint or a
/// known one.
pub(crate) fn send_hello<CI: NodeInterface>(&self, ci: &CI, node: &Node, explicit_endpoint: Option<Endpoint>) -> bool {
let path = if explicit_endpoint.is_none() { self.path(node) } else { None };
explicit_endpoint.as_ref().map_or_else(|| Some(path.as_ref().unwrap().endpoint()), |ep| Some(ep)).map_or(false, |endpoint| {
let mut packet: Buffer<{ PACKET_SIZE_MAX }> = Buffer::new();
let time_ticks = ci.time_ticks();
/// If explicit_endpoint is not None the packet will be sent directly to this endpoint.
/// Otherwise it will be sent via the best direct or indirect path known.
pub(crate) fn send_hello<CI: NodeInterface>(&self, ci: &CI, node: &Node, explicit_endpoint: Option<&Endpoint>) -> bool {
let (path, endpoint) = if explicit_endpoint.is_some() {
(None, explicit_endpoint.unwrap())
} else {
let p = self.path(node);
if p.is_none() {
return false;
}
(p, p.as_ref().unwrap().endpoint())
};
let message_id = self.next_message_id();
let packet_header: &mut PacketHeader = packet.append_struct_get_mut().unwrap();
let hello_fixed_headers: &mut message_component_structs::HelloFixedHeaderFields = packet.append_struct_get_mut().unwrap();
packet_header.id = message_id.to_ne_bytes(); // packet ID and message ID are the same when Poly1305 MAC is used
packet_header.dest = self.identity.address().to_bytes();
packet_header.src = node.address().to_bytes();
packet_header.flags_cipher_hops = CIPHER_NOCRYPT_POLY1305;
hello_fixed_headers.verb = VERB_VL1_HELLO | VERB_FLAG_EXTENDED_AUTHENTICATION;
hello_fixed_headers.version_proto = VERSION_PROTO;
hello_fixed_headers.version_major = VERSION_MAJOR;
hello_fixed_headers.version_minor = VERSION_MINOR;
hello_fixed_headers.version_revision = (VERSION_REVISION as u16).to_be();
hello_fixed_headers.timestamp = (time_ticks as u64).to_be();
let mut packet: Buffer<{ PACKET_SIZE_MAX }> = Buffer::new();
let time_ticks = ci.time_ticks();
debug_assert!(self.identity.marshal(&mut packet, false).is_ok());
debug_assert!(endpoint.marshal(&mut packet).is_ok());
let message_id = self.next_message_id();
let packet_header: &mut PacketHeader = packet.append_struct_get_mut().unwrap();
let hello_fixed_headers: &mut message_component_structs::HelloFixedHeaderFields = packet.append_struct_get_mut().unwrap();
// Write an IV for AES-CTR encryption of the dictionary and allocate two more
// bytes for reserved legacy use below.
let aes_ctr_iv_position = packet.len();
let aes_ctr_iv: &mut [u8; 18] = packet.append_bytes_fixed_get_mut().unwrap();
zerotier_core_crypto::random::fill_bytes_secure(&mut aes_ctr_iv[0..16]);
aes_ctr_iv[12] &= 0x7f; // mask off MSB of counter in iv to play nice with some AES-CTR implementations
packet_header.id = message_id.to_ne_bytes(); // packet ID and message ID are the same when Poly1305 MAC is used
packet_header.dest = self.identity.address.to_bytes();
packet_header.src = node.address().to_bytes();
packet_header.flags_cipher_hops = CIPHER_NOCRYPT_POLY1305;
// LEGACY: create a 16-bit encrypted field that specifies zero "moons." This is ignored now
// but causes old nodes to be able to parse this packet properly. This is not significant in
// terms of encryption or authentication and can disappear once old versions are dead. Newer
// versions ignore these bytes.
let mut salsa_iv = message_id.to_ne_bytes();
salsa_iv[7] &= 0xf8;
Salsa::new(&self.static_secret.secret.0[0..32], &salsa_iv, true).unwrap().crypt(&[0_u8, 0_u8], &mut aes_ctr_iv[16..18]);
hello_fixed_headers.verb = VERB_VL1_HELLO | VERB_FLAG_EXTENDED_AUTHENTICATION;
hello_fixed_headers.version_proto = VERSION_PROTO;
hello_fixed_headers.version_major = VERSION_MAJOR;
hello_fixed_headers.version_minor = VERSION_MINOR;
hello_fixed_headers.version_revision = (VERSION_REVISION as u16).to_be_bytes();
hello_fixed_headers.timestamp = (time_ticks as u64).to_be_bytes();
// Create dictionary that contains extended HELLO fields.
let dict_start_position = packet.len();
let mut dict = Dictionary::new();
dict.set_u64(HELLO_DICT_KEY_INSTANCE_ID, node.instance_id);
dict.set_u64(HELLO_DICT_KEY_CLOCK, ci.time_clock() as u64);
debug_assert!(dict.write_to(&mut packet).is_ok());
assert!(self.identity.marshal(&mut packet, IDENTITY_CIPHER_SUITE_INCLUDE_ALL, false).is_ok());
if self.identity.cipher_suites() == IDENTITY_CIPHER_SUITE_X25519 {
// LEGACY: append an extra zero when marshaling identities containing only
// x25519 keys. This is interpreted as an empty InetAddress by old nodes.
// This isn't needed if a NIST P-521 key or other new key types are present.
// See comments before IDENTITY_CIPHER_SUITE_EC_NIST_P521 in identity.rs.
assert!(packet.append_u8(0).is_ok());
}
// Encrypt extended fields with AES-CTR.
let mut dict_aes = self.static_secret_hello_dictionary.lock();
dict_aes.init(&packet.as_bytes()[aes_ctr_iv_position..aes_ctr_iv_position + 16]);
dict_aes.crypt_in_place(&mut packet.as_bytes_mut()[dict_start_position..]);
drop(dict_aes);
assert!(packet.append_u64(0).is_ok()); // reserved, must be zero for legacy compatibility
assert!(packet.append_u64(node.instance_id).is_ok());
// Append extended authentication HMAC.
debug_assert!(packet.append_bytes_fixed(&SHA384::hmac_multipart(self.static_secret_packet_hmac.as_ref(), &[u64_as_bytes(&message_id), &packet.as_bytes()[PACKET_HEADER_SIZE..]])).is_ok());
// LEGACY: create a 16-bit encrypted field that specifies zero "moons." This is ignored now
// but causes old nodes to be able to parse this packet properly. This is not significant in
// terms of encryption or authentication and can disappear once old versions are dead. Newer
// versions ignore these bytes.
let zero_moon_count = packet.append_bytes_fixed_get_mut::<2>().unwrap();
let mut salsa_iv = message_id.to_ne_bytes();
salsa_iv[7] &= 0xf8;
Salsa::new(&self.static_secret.key.0[0..32], &salsa_iv, true).unwrap().crypt(&[0_u8, 0_u8], zero_moon_count);
// Set outer packet MAC. We use legacy poly1305 for HELLO for backward
// compatibility, but note that newer nodes and roots will check the full
// HMAC-SHA384 above.
let (_, mut poly) = salsa_poly_create(&self.static_secret, packet.struct_at::<PacketHeader>(0).unwrap(), packet.len());
poly.update(packet.as_bytes_starting_at(PACKET_HEADER_SIZE).unwrap());
packet_header.mac.copy_from_slice(&poly.finish()[0..8]);
// Size of dictionary with optional fields, currently none. For future use.
assert!(packet.append_u16(0).is_ok());
self.static_secret.encrypt_count.fetch_add(1, Ordering::Relaxed);
self.last_send_time_ticks.store(time_ticks, Ordering::Relaxed);
self.total_bytes_sent.fetch_add(packet.len() as u64, Ordering::Relaxed);
// Add full HMAC for strong authentication with newer nodes.
assert!(packet.append_bytes_fixed(&SHA384::hmac_multipart(&self.static_secret.packet_hmac_key.0, &[u64_as_bytes(&message_id), &packet.as_bytes()[PACKET_HEADER_SIZE..]])).is_ok());
path.as_ref().map_or_else(|| {
self.send_to_endpoint(ci, endpoint, None, None, &packet)
}, |path| {
path.log_send(time_ticks);
self.send_to_endpoint(ci, endpoint, path.local_socket(), path.local_interface(), &packet)
})
// LEGACY: set MAC field in header to poly1305 for older nodes.
let (_, mut poly) = salsa_poly_create(&self.static_secret, packet.struct_at::<PacketHeader>(0).unwrap(), packet.len());
poly.update(packet.as_bytes_starting_at(PACKET_HEADER_SIZE).unwrap());
packet_header.mac.copy_from_slice(&poly.finish()[0..8]);
self.last_send_time_ticks.store(time_ticks, Ordering::Relaxed);
self.total_bytes_sent.fetch_add(packet.len() as u64, Ordering::Relaxed);
path.as_ref().map_or_else(|| {
self.send_to_endpoint(ci, endpoint, None, None, &packet)
}, |path| {
path.log_send(time_ticks);
self.send_to_endpoint(ci, endpoint, path.local_socket(), path.local_interface(), &packet)
})
}
@ -491,16 +488,12 @@ impl Peer {
fn receive_error<CI: NodeInterface, PH: VL1PacketHandler>(&self, ci: &CI, ph: &PH, node: &Node, time_ticks: i64, source_path: &Arc<Path>, forward_secrecy: bool, extended_authentication: bool, payload: &Buffer<{ PACKET_SIZE_MAX }>) {
let mut cursor: usize = 0;
let _ = payload.read_struct::<message_component_structs::ErrorHeader>(&mut cursor).map(|error_header| {
let in_re_packet_id = error_header.in_re_packet_id;
let in_re_message_id = u64::from_ne_bytes(error_header.in_re_message_id);
let current_packet_id_counter = self.message_id_counter.load(Ordering::Relaxed);
if current_packet_id_counter.checked_sub(in_re_packet_id).map_or_else(|| {
(!in_re_packet_id).wrapping_add(current_packet_id_counter) < PACKET_RESPONSE_COUNTER_DELTA_MAX
}, |packets_ago| {
packets_ago <= PACKET_RESPONSE_COUNTER_DELTA_MAX
}) {
if current_packet_id_counter.wrapping_sub(in_re_message_id) <= PACKET_RESPONSE_COUNTER_DELTA_MAX {
match error_header.in_re_verb {
_ => {
ph.handle_error(self, source_path, forward_secrecy, extended_authentication, error_header.in_re_verb, in_re_packet_id, error_header.error_code, payload, &mut cursor);
ph.handle_error(self, source_path, forward_secrecy, extended_authentication, error_header.in_re_verb, in_re_message_id, error_header.error_code, payload, &mut cursor);
}
}
}
@ -511,20 +504,16 @@ impl Peer {
fn receive_ok<CI: NodeInterface, PH: VL1PacketHandler>(&self, ci: &CI, ph: &PH, node: &Node, time_ticks: i64, source_path: &Arc<Path>, forward_secrecy: bool, extended_authentication: bool, payload: &Buffer<{ PACKET_SIZE_MAX }>) {
let mut cursor: usize = 0;
let _ = payload.read_struct::<message_component_structs::OkHeader>(&mut cursor).map(|ok_header| {
let in_re_packet_id = ok_header.in_re_packet_id;
let in_re_message_id = u64::from_ne_bytes(ok_header.in_re_message_id);
let current_packet_id_counter = self.message_id_counter.load(Ordering::Relaxed);
if current_packet_id_counter.checked_sub(in_re_packet_id).map_or_else(|| {
(!in_re_packet_id).wrapping_add(current_packet_id_counter) < PACKET_RESPONSE_COUNTER_DELTA_MAX
}, |packets_ago| {
packets_ago <= PACKET_RESPONSE_COUNTER_DELTA_MAX
}) {
if current_packet_id_counter.wrapping_sub(in_re_message_id) <= PACKET_RESPONSE_COUNTER_DELTA_MAX {
match ok_header.in_re_verb {
VERB_VL1_HELLO => {
}
VERB_VL1_WHOIS => {
}
_ => {
ph.handle_ok(self, source_path, forward_secrecy, extended_authentication, ok_header.in_re_verb, in_re_packet_id, payload, &mut cursor);
ph.handle_ok(self, source_path, forward_secrecy, extended_authentication, ok_header.in_re_verb, in_re_message_id, payload, &mut cursor);
}
}
}

View file

@ -22,9 +22,6 @@ pub const VERB_VL1_ECHO: u8 = 0x08;
pub const VERB_VL1_PUSH_DIRECT_PATHS: u8 = 0x10;
pub const VERB_VL1_USER_MESSAGE: u8 = 0x14;
pub const HELLO_DICT_KEY_INSTANCE_ID: &'static str = "I";
pub const HELLO_DICT_KEY_CLOCK: &'static str = "C";
/// Default maximum payload size for UDP transport.
///
/// This is small enough to traverse numerous weird networks including PPPoE and Google Cloud's
@ -32,10 +29,7 @@ pub const HELLO_DICT_KEY_CLOCK: &'static str = "C";
/// two fragments.
pub const UDP_DEFAULT_MTU: usize = 1432;
/// KBKDF usage label indicating a key used to encrypt the dictionary inside HELLO.
pub const KBKDF_KEY_USAGE_LABEL_HELLO_DICTIONARY_ENCRYPT: u8 = b'H';
/// KBKDF usage label indicating a key used to HMAC packets, which is currently only used for HELLO.
/// KBKDF usage label indicating a key used to HMAC packets for extended authentication.
pub const KBKDF_KEY_USAGE_LABEL_PACKET_HMAC: u8 = b'M';
/// KBKDF usage label for the first AES-GMAC-SIV key.
@ -62,6 +56,9 @@ pub const EPHEMERAL_SECRET_REJECT_AFTER_USES: u32 = 2147483648; // NIST/FIPS sec
/// Length of an address in bytes.
pub const ADDRESS_SIZE: usize = 5;
/// Length of an address in string format.
pub const ADDRESS_SIZE_STRING: usize = 10;
/// Prefix indicating reserved addresses (that can't actually be addresses).
pub const ADDRESS_RESERVED_PREFIX: u8 = 0xff;
@ -174,6 +171,14 @@ pub const WHOIS_RETRY_MAX: u16 = 3;
/// Maximum number of packets to queue up behind a WHOIS.
pub const WHOIS_MAX_WAITING_PACKETS: usize = 64;
/// Proof of work difficulty (threshold) for old v0 identities.
pub const IDENTITY_V0_POW_THRESHOLD: u8 = 17;
/// Proof of work difficulty (threshold) for new v1 identities.
/// This is lower than the V0 threshold, causing the V0 part of V1 identities to
/// verify on old nodes.
pub const IDENTITY_V1_POW_THRESHOLD: u8 = 5;
#[derive(Clone, Copy)]
#[repr(u8)]
pub enum EphemeralKeyAgreementAlgorithm {
@ -277,7 +282,7 @@ impl PacketHeader {
#[inline(always)]
pub fn aes_gmac_siv_tag(&self) -> [u8; 16] {
let mut id = unsafe { MaybeUninit::<[u8; 16]>::uninit().assume_init() };
id[0..8].copy_from_slice(self.id_bytes());
id[0..8].copy_from_slice(&self.id);
id[8..16].copy_from_slice(&self.mac);
id
}
@ -327,12 +332,11 @@ impl FragmentHeader {
pub(crate) mod message_component_structs {
use crate::util::buffer::RawObject;
use crate::vl1::protocol::PacketID;
#[repr(packed)]
pub struct OkHeader {
pub in_re_verb: u8,
pub in_re_packet_id: PacketID,
pub in_re_message_id: [u8; 8],
}
unsafe impl RawObject for OkHeader {}
@ -340,7 +344,7 @@ pub(crate) mod message_component_structs {
#[repr(packed)]
pub struct ErrorHeader {
pub in_re_verb: u8,
pub in_re_packet_id: PacketID,
pub in_re_message_id: [u8; 8],
pub error_code: u8,
}
@ -352,19 +356,19 @@ pub(crate) mod message_component_structs {
pub version_proto: u8,
pub version_major: u8,
pub version_minor: u8,
pub version_revision: u16,
pub timestamp: u64,
pub version_revision: [u8; 2], // u16
pub timestamp: [u8; 8], // u64
}
unsafe impl RawObject for HelloFixedHeaderFields {}
#[repr(packed)]
pub struct OkHelloFixedHeaderFields {
pub timestamp_echo: u64,
pub timestamp_echo: [u8; 8], // u64
pub version_proto: u8,
pub version_major: u8,
pub version_minor: u8,
pub version_revision: u16,
pub version_revision: [u8; 2], // u16
}
unsafe impl RawObject for OkHelloFixedHeaderFields {}
@ -388,14 +392,5 @@ mod tests {
(*foo.as_mut_ptr().cast::<PacketHeader>()).src[0] = 0xff;
assert_eq!((*foo.as_ptr().cast::<FragmentHeader>()).fragment_indicator, 0xff);
}
let bar = PacketHeader{
id: [1_u8, 2, 3, 4, 5, 6, 7, 8],
dest: [0_u8; 5],
src: [0_u8; 5],
flags_cipher_hops: 0,
mac: [0_u8; 8],
};
assert_eq!(bar.id_bytes().clone(), [1_u8, 2, 3, 4, 5, 6, 7, 8]);
}
}

View file

@ -17,6 +17,7 @@ use zerotier_core_crypto::secret::Secret;
use zerotier_core_crypto::hash::SHA384;
use std::cmp::Ordering;
use crate::vl1::identity::IDENTITY_CIPHER_SUITE_INCLUDE_ALL;
/// Old "planet" type with Ed25519 authenticated updates from ZeroTier v1.
const ROOT_SET_TYPE_LEGACY_PLANET: u8 = 1;
@ -174,7 +175,7 @@ impl RootSet {
buf.append_varint(self.roots.len() as u64)?;
for root in self.roots.iter() {
root.identity.marshal(buf, false)?;
root.identity.marshal(buf, IDENTITY_CIPHER_SUITE_INCLUDE_ALL, false)?;
if (self.type_ == ROOT_SET_TYPE_LEGACY_PLANET || self.type_ == ROOT_SET_TYPE_LEGACY_MOON) && root.endpoints.len() > 127 {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid legacy type root set"));
}

View file

@ -39,9 +39,6 @@ pub struct SymmetricSecret {
/// A key used as input to the ephemeral key ratcheting mechanism.
pub next_ephemeral_ratchet_key: Secret<SHA384_HASH_SIZE>,
/// A key used to encrypt the secret portion of a HELLO packet.
pub hello_dictionary_keyed_cipher: Mutex<AesCtr>,
/// A pool of reusable keyed and initialized AES-GMAC-SIV ciphers.
pub aes_gmac_siv: Pool<AesGmacSiv, AesGmacSivPoolFactory>,
}
@ -58,7 +55,6 @@ impl SymmetricSecret {
pub fn new(base_key: Secret<SHA384_HASH_SIZE>) -> SymmetricSecret {
let usage_packet_hmac = zt_kbkdf_hmac_sha384(&base_key.0, KBKDF_KEY_USAGE_LABEL_PACKET_HMAC, 0, 0);
let usage_ephemeral_ratchet = zt_kbkdf_hmac_sha384(&base_key.0, KBKDF_KEY_USAGE_LABEL_EPHEMERAL_RATCHET, 0, 0);
let usage_hello_dictionary_key = zt_kbkdf_hmac_sha384(&base_key.0, KBKDF_KEY_USAGE_LABEL_HELLO_DICTIONARY_ENCRYPT, 0, 0);
let aes_factory = AesGmacSivPoolFactory(
zt_kbkdf_hmac_sha384(&base_key.0, KBKDF_KEY_USAGE_LABEL_AES_GMAC_SIV_K0, 0, 0),
zt_kbkdf_hmac_sha384(&base_key.0, KBKDF_KEY_USAGE_LABEL_AES_GMAC_SIV_K1, 0, 0));
@ -66,7 +62,6 @@ impl SymmetricSecret {
key: base_key,
packet_hmac_key: usage_packet_hmac,
next_ephemeral_ratchet_key: usage_ephemeral_ratchet,
hello_dictionary_keyed_cipher: Mutex::new(AesCtr::new(&usage_hello_dictionary_key.0[0..32])),
aes_gmac_siv: Pool::new(2, aes_factory),
}
}

View file

@ -11,7 +11,7 @@ use std::sync::Arc;
use crate::util::buffer::Buffer;
use crate::vl1::node::VL1PacketHandler;
use crate::vl1::{Peer, Path};
use crate::vl1::protocol::{PACKET_SIZE_MAX, PacketID};
use crate::vl1::protocol::*;
pub trait SwitchInterface {
}
@ -24,11 +24,11 @@ impl VL1PacketHandler for Switch {
false
}
fn handle_error(&self, peer: &Peer, source_path: &Arc<Path>, forward_secrecy: bool, extended_authentication: bool, in_re_verb: u8, in_re_packet_id: PacketID, error_code: u8, payload: &Buffer<{ PACKET_SIZE_MAX }>, cursor: &mut usize) -> bool {
fn handle_error(&self, peer: &Peer, source_path: &Arc<Path>, forward_secrecy: bool, extended_authentication: bool, in_re_verb: u8, in_re_message_id: u64, error_code: u8, payload: &Buffer<{ PACKET_SIZE_MAX }>, cursor: &mut usize) -> bool {
false
}
fn handle_ok(&self, peer: &Peer, source_path: &Arc<Path>, forward_secrecy: bool, extended_authentication: bool, in_re_verb: u8, in_re_packet_id: PacketID, payload: &Buffer<{ PACKET_SIZE_MAX }>, cursor: &mut usize) -> bool {
fn handle_ok(&self, peer: &Peer, source_path: &Arc<Path>, forward_secrecy: bool, extended_authentication: bool, in_re_verb: u8, in_re_message_id: u64, payload: &Buffer<{ PACKET_SIZE_MAX }>, cursor: &mut usize) -> bool {
false
}
}

View file

@ -2219,6 +2219,7 @@ version = "2.0.0"
dependencies = [
"base64",
"dashmap",
"lazy_static",
"libc",
"lz4_flex",
"parking_lot",