diff --git a/network-hypervisor/Cargo.lock b/network-hypervisor/Cargo.lock
index ff63a0c1a..764dd23c3 100644
--- a/network-hypervisor/Cargo.lock
+++ b/network-hypervisor/Cargo.lock
@@ -9,6 +9,12 @@ dependencies = [
"gcrypt",
]
+[[package]]
+name = "base64"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
+
[[package]]
name = "bitflags"
version = "1.2.1"
@@ -413,6 +419,7 @@ name = "zerotier-network-hypervisor"
version = "2.0.0"
dependencies = [
"aes-gmac-siv",
+ "base64",
"ed25519-dalek",
"gcrypt",
"rand_core",
diff --git a/network-hypervisor/Cargo.toml b/network-hypervisor/Cargo.toml
index 44939c6d2..154ebf4a3 100644
--- a/network-hypervisor/Cargo.toml
+++ b/network-hypervisor/Cargo.toml
@@ -9,3 +9,4 @@ aes-gmac-siv = { path = "../aes-gmac-siv" }
x25519-dalek = "^1"
ed25519-dalek = "^1"
gcrypt = "^0"
+base64 = "^0"
diff --git a/network-hypervisor/src/crypto/c25519.rs b/network-hypervisor/src/crypto/c25519.rs
index 18a531386..8ae85c288 100644
--- a/network-hypervisor/src/crypto/c25519.rs
+++ b/network-hypervisor/src/crypto/c25519.rs
@@ -1,7 +1,8 @@
use std::convert::TryInto;
-use ed25519_dalek::Digest;
use std::io::Write;
+use ed25519_dalek::Digest;
+
pub const C25519_PUBLIC_KEY_SIZE: usize = 32;
pub const C25519_SECRET_KEY_SIZE: usize = 32;
pub const C25519_SHARED_SECRET_SIZE: usize = 32;
diff --git a/network-hypervisor/src/crypto/hash.rs b/network-hypervisor/src/crypto/hash.rs
index 2d7de9f96..9b53be1c8 100644
--- a/network-hypervisor/src/crypto/hash.rs
+++ b/network-hypervisor/src/crypto/hash.rs
@@ -3,7 +3,7 @@ use std::convert::TryInto;
use std::io::Write;
pub const SHA512_HASH_SIZE: usize = 64;
-pub const SHA384_HASH_SIZE: usize = 64;
+pub const SHA384_HASH_SIZE: usize = 48;
pub struct SHA512(gcrypt::digest::MessageDigest);
diff --git a/network-hypervisor/src/crypto/p521.rs b/network-hypervisor/src/crypto/p521.rs
index c96438431..72ead04a4 100644
--- a/network-hypervisor/src/crypto/p521.rs
+++ b/network-hypervisor/src/crypto/p521.rs
@@ -131,6 +131,7 @@ impl P521KeyPair {
}
/// Create an ECDSA signature of the input message.
+ /// Message data does not need to be pre-hashed.
pub fn sign(&self, msg: &[u8]) -> Option<[u8; P521_ECDSA_SIGNATURE_SIZE]> {
let data = SExpression::from_str(unsafe { std::str::from_utf8_unchecked(&hash_to_data_sexp(msg)) }).unwrap();
gcrypt::pkey::sign(&self.secret_key_for_ecdsa, &data).map_or(None, |sig| {
@@ -171,6 +172,8 @@ impl P521PublicKey {
}
}
+ /// Verify a signature.
+ /// Message data does not need to be pre-hashed.
pub fn verify(&self, msg: &[u8], signature: &[u8]) -> bool {
if signature.len() == P521_ECDSA_SIGNATURE_SIZE {
let data = SExpression::from_str(unsafe { std::str::from_utf8_unchecked(&hash_to_data_sexp(msg)) }).unwrap();
diff --git a/network-hypervisor/src/lib.rs b/network-hypervisor/src/lib.rs
index 5e5d45548..86b0e1063 100644
--- a/network-hypervisor/src/lib.rs
+++ b/network-hypervisor/src/lib.rs
@@ -2,3 +2,4 @@ pub mod crypto;
pub mod vl1;
pub mod util;
pub mod error;
+pub mod vl2;
diff --git a/network-hypervisor/src/util/mod.rs b/network-hypervisor/src/util/mod.rs
index ce02e677d..05eadb521 100644
--- a/network-hypervisor/src/util/mod.rs
+++ b/network-hypervisor/src/util/mod.rs
@@ -1 +1,42 @@
pub mod hex;
+
+#[inline(always)]
+pub(crate) fn integer_store_be_u16(i: u16, d: &mut [u8]) {
+ d[0] = (i >> 8) as u8;
+ d[1] = i as u8;
+}
+
+#[inline(always)]
+pub(crate) fn integer_store_be_u32(i: u32, d: &mut [u8]) {
+ d[0] = (i >> 24) as u8;
+ d[1] = (i >> 16) as u8;
+ d[2] = (i >> 8) as u8;
+ d[3] = i as u8;
+}
+
+#[inline(always)]
+pub(crate) fn integer_store_be_u64(i: u64, d: &mut [u8]) {
+ d[0] = (i >> 56) as u8;
+ d[1] = (i >> 48) as u8;
+ d[2] = (i >> 40) as u8;
+ d[3] = (i >> 32) as u8;
+ d[4] = (i >> 24) as u8;
+ d[5] = (i >> 16) as u8;
+ d[6] = (i >> 8) as u8;
+ d[7] = i as u8;
+}
+
+#[inline(always)]
+pub(crate) fn integer_load_be_u16(d: &[u8]) -> u16 {
+ (d[0] as u16) << 8 | (d[1] as u16)
+}
+
+#[inline(always)]
+pub(crate) fn integer_load_be_u32(d: &[u8]) -> u32 {
+ (d[0] as u32) << 24 | (d[1] as u32) << 16 | (d[2] as u32) << 8 | (d[3] as u32)
+}
+
+#[inline(always)]
+pub(crate) fn integer_load_be_u64(d: &[u8]) -> u64 {
+ (d[0] as u64) << 56 | (d[1] as u64) << 48 | (d[2] as u64) << 40 | (d[3] as u64) << 32 | (d[4] as u64) << 24 | (d[5] as u64) << 16 | (d[6] as u64) << 8 | (d[7] as u64)
+}
diff --git a/network-hypervisor/src/vl1/address.rs b/network-hypervisor/src/vl1/address.rs
index 2370466ac..0c985e988 100644
--- a/network-hypervisor/src/vl1/address.rs
+++ b/network-hypervisor/src/vl1/address.rs
@@ -13,7 +13,7 @@ impl Address {
#[inline(always)]
pub fn from_bytes(b: &[u8]) -> Result
{
if b.len() >= 5 {
- Ok(Address((b[0] as u64) << 32 | (b[1] as u64) << 24 | (b[2] as u64) << 16 | (b[3] as u64) << 8 as u64 | b[4] as u64))
+ Ok(Address((b[0] as u64) << 32 | (b[1] as u64) << 24 | (b[2] as u64) << 16 | (b[3] as u64) << 8 | b[4] as u64))
} else {
Err(InvalidFormatError("invalid ZeroTier address"))
}
@@ -25,13 +25,13 @@ impl Address {
}
#[inline(always)]
- pub fn is_valid(&self) -> bool {
- self.0 != 0 && !self.is_reserved()
+ pub fn is_nil(&self) -> bool {
+ self.0 == 0
}
#[inline(always)]
- pub fn is_nil(&self) -> bool {
- self.0 == 0
+ pub fn is_valid(&self) -> bool {
+ !self.is_nil() && !self.is_reserved()
}
#[inline(always)]
@@ -46,7 +46,6 @@ impl Address {
}
impl ToString for Address {
- #[inline(always)]
fn to_string(&self) -> String {
let mut v = self.0 << 24;
let mut s = String::new();
@@ -62,7 +61,6 @@ impl ToString for Address {
impl FromStr for Address {
type Err = InvalidFormatError;
- #[inline(always)]
fn from_str(s: &str) -> Result {
Address::from_bytes(crate::util::hex::from_string(s).as_slice())
}
@@ -85,7 +83,7 @@ impl Hash for Address {
impl From<&[u8; 5]> for Address {
#[inline(always)]
fn from(b: &[u8; 5]) -> Address {
- Address((b[0] as u64) << 32 | (b[1] as u64) << 24 | (b[2] as u64) << 16 | (b[3] as u64) << 8 as u64 | b[4] as u64)
+ Address((b[0] as u64) << 32 | (b[1] as u64) << 24 | (b[2] as u64) << 16 | (b[3] as u64) << 8 | b[4] as u64)
}
}
diff --git a/network-hypervisor/src/vl1/buffer.rs b/network-hypervisor/src/vl1/buffer.rs
index f0a246cc2..3dd4619d3 100644
--- a/network-hypervisor/src/vl1/buffer.rs
+++ b/network-hypervisor/src/vl1/buffer.rs
@@ -1,8 +1,6 @@
use std::mem::size_of;
use std::marker::PhantomData;
-
-const FAULT_BIT: usize = 1_usize << ((size_of::() * 8) - 1);
-const FAULT_CLEAR_MASK: usize = !FAULT_BIT;
+use std::io::Write;
/// Annotates a type as containing only primitive types like integers and arrays.
/// This means it's safe to abuse with raw copy, raw zero, or "type punning."
@@ -19,13 +17,6 @@ unsafe impl RawObject for NoHeader {}
/// This also supports a generic header that must be a RawObject and will always be
/// placed at the beginning of the buffer. When you construct or clear() a buffer
/// space will be maintained for the header. Use NoHeader if you don't want a header.
-///
-/// If a write overflow occurs during append operations, the operations fail silently
-/// without increasing the buffer's size and an internal fault bit is set. The
-/// check_overflow() method must be used before the buffer is actually complete to
-/// ensure that no write overflows occurred. If this check isn't performed a buffer
-/// could be used with incomplete or corrupt data, but no crash or memory errors will
-/// occur.
#[derive(Clone)]
pub struct Buffer(usize, [u8; L], PhantomData);
@@ -34,7 +25,6 @@ unsafe impl RawObject for Buffer {}
impl Default for Buffer {
#[inline(always)]
fn default() -> Self {
- assert!(size_of::() <= L);
Buffer(size_of::(), [0_u8; L], PhantomData::default())
}
}
@@ -45,125 +35,285 @@ impl Buffer {
Self::default()
}
- /// Returns true if there has been a write overflow.
- #[inline(always)]
- pub fn check_overflow(&self) -> bool {
- (self.0 & FAULT_BIT) != 0
- }
-
/// Get a slice containing the entire buffer in raw form including the header.
#[inline(always)]
pub fn as_bytes(&self) -> &[u8] {
- &self.1[0..(self.0 & FAULT_CLEAR_MASK)]
+ &self.1[0..self.0]
}
/// Erase contents and reset size to the size of the header.
#[inline(always)]
pub fn clear(&mut self) {
+ self.1[0..self.0].fill(0);
self.0 = size_of::();
- self.1.fill(0);
}
/// Get the length of this buffer (including header, if any).
#[inline(always)]
pub fn len(&self) -> usize {
- self.0 & FAULT_CLEAR_MASK
+ self.0
}
/// Get a reference to the header (in place).
#[inline(always)]
pub fn header(&self) -> &H {
+ debug_assert!(size_of::() <= L);
unsafe { &*self.1.as_ptr().cast::() }
}
/// Get a mutable reference to the header (in place).
#[inline(always)]
pub fn header_mut(&mut self) -> &mut H {
+ debug_assert!(size_of::() <= L);
unsafe { &mut *self.1.as_mut_ptr().cast::() }
}
- /// Append a packed structure and initializing it in place via the supplied function.
- ///
- /// If an overflow occurs the overflow fault bit is set internally (see check_overflow())
- /// and the supplied function will never be called.
+ /// Append a packed structure and call a function to initialize it in place.
+ /// Anything not initialized will be zero.
#[inline(always)]
- pub fn append_and_init_struct(&mut self, initializer: F) {
- let bl = self.0;
- let s = bl + size_of::();
- if s <= L {
+ pub fn append_and_init_struct R>(&mut self, initializer: F) -> std::io::Result {
+ let ptr = self.0;
+ let end = ptr + size_of::();
+ if end <= L {
+ self.0 = end;
unsafe {
- self.0 = s;
- initializer(&mut *self.1.as_mut_ptr().cast::().offset(bl as isize).cast::());
+ Ok(initializer(&mut *self.1.as_mut_ptr().cast::().offset(ptr as isize).cast::()))
}
} else {
- self.0 = bl | FAULT_BIT;
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
}
}
/// Append and initialize a byte array with a fixed size set at compile time.
- ///
/// This is more efficient than setting a size at runtime as it may allow the compiler to
- /// skip some bounds checking.
- ///
- /// If an overflow occurs the overflow fault bit is set internally (see check_overflow())
- /// and the supplied function will never be called.
+ /// skip some bounds checking. Any bytes not initialized will be zero.
#[inline(always)]
- pub fn append_and_init_bytes_fixed(&mut self, initializer: F) {
- let bl = self.0;
- let s = bl + N;
- if s <= L {
+ pub fn append_and_init_bytes_fixed R, const N: usize>(&mut self, initializer: F) -> std::io::Result {
+ let ptr = self.0;
+ let end = ptr + N;
+ if end <= L {
+ self.0 = end;
unsafe {
- let ptr = self.1.as_mut_ptr().cast::().offset(bl as isize);
- self.0 = s;
- initializer(&mut *ptr.cast::<[u8; N]>());
+ Ok(initializer(&mut *self.1.as_mut_ptr().cast::().offset(ptr as isize).cast::<[u8; N]>()))
}
} else {
- self.0 = bl | FAULT_BIT;
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
}
}
/// Append and initialize a slice with a size that is set at runtime.
- ///
- /// If an overflow occurs the overflow fault bit is set internally (see check_overflow())
- /// and the supplied function will never be called.
+ /// Any bytes not initialized will be zero.
#[inline(always)]
- pub fn append_and_init_bytes(&mut self, l: usize, initializer: F) {
- let bl = self.0;
- let s = bl + l;
- if s <= L {
- self.0 = s;
- initializer(&mut self.1[bl..s]);
+ pub fn append_and_init_bytes R>(&mut self, l: usize, initializer: F) -> std::io::Result {
+ let ptr = self.0;
+ let end = ptr + l;
+ if end <= L {
+ self.0 = end;
+ Ok(initializer(&mut self.1[ptr..end]))
} else {
- self.0 = bl | FAULT_BIT;
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
}
}
- pub fn read_payload(&self) -> Reader {
- Reader {
- buffer: self,
- ptr: size_of::(),
+ /// Append a dynamic byte slice (copy into buffer).
+ /// Use append_and_init_ functions if possible as these avoid extra copies.
+ #[inline(always)]
+ fn append_bytes(&mut self, buf: &[u8]) -> std::io::Result<()> {
+ let ptr = self.0;
+ let end = ptr + buf.len();
+ if end <= L {
+ self.0 = end;
+ self.1[ptr..end].copy_from_slice(buf);
+ Ok(())
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
}
}
-}
-pub struct Reader<'a, H: RawObject, const L: usize> {
- ptr: usize,
- buffer: &'a Buffer,
-}
+ /// Append a fixed length byte array (copy into buffer).
+ /// Use append_and_init_ functions if possible as these avoid extra copies.
+ #[inline(always)]
+ fn append_bytes_fixed(&mut self, buf: &[u8; S]) -> std::io::Result<()> {
+ let ptr = self.0;
+ let end = ptr + S;
+ if end <= L {
+ self.0 = end;
+ self.1[ptr..end].copy_from_slice(buf);
+ Ok(())
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
-impl<'a, H: RawObject, const L: usize> Reader<'a, H, L> {
- pub fn read_struct bool>(&mut self, visitor: F) -> bool {
- let rl = self.ptr;
- let s = rl + size_of::();
- if s <= L {
+ /// Append a byte
+ #[inline(always)]
+ fn append_u8(&mut self, i: u8) -> std::io::Result<()> {
+ let ptr = self.0;
+ if ptr < L {
+ self.0 = ptr + 1;
+ self.1[ptr] = i;
+ Ok(())
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
+
+ /// Append a 16-bit integer (in big-endian form)
+ #[inline(always)]
+ fn append_u16(&mut self, i: u16) -> std::io::Result<()> {
+ let ptr = self.0;
+ let end = ptr + 2;
+ if end <= L {
+ self.0 = end;
+ crate::util::integer_store_be_u16(i, &mut self.1[ptr..end]);
+ Ok(())
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
+
+ /// Append a 32-bit integer (in big-endian form)
+ #[inline(always)]
+ fn append_u32(&mut self, i: u32) -> std::io::Result<()> {
+ let ptr = self.0;
+ let end = ptr + 4;
+ if end <= L {
+ self.0 = end;
+ crate::util::integer_store_be_u32(i, &mut self.1[ptr..end]);
+ Ok(())
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
+
+ /// Append a 64-bit integer (in big-endian form)
+ #[inline(always)]
+ fn append_u64(&mut self, i: u64) -> std::io::Result<()> {
+ let ptr = self.0;
+ let end = ptr + 8;
+ if end <= L {
+ self.0 = end;
+ crate::util::integer_store_be_u64(i, &mut self.1[ptr..end]);
+ Ok(())
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
+
+ /// Get a structure at a given position in the buffer and advance the cursor.
+ #[inline(always)]
+ pub fn get_struct(&self, cursor: &mut usize) -> std::io::Result<&T> {
+ let ptr = *cursor;
+ let end = ptr + size_of::();
+ if end <= self.0 {
+ *cursor = end;
unsafe {
- self.ptr = s;
- visitor(&*self.buffer.1.as_ptr().cast::().offset(rl as isize).cast::(), self)
+ Ok(&*self.1.as_ptr().cast::().offset(ptr as isize).cast::())
}
} else {
- false
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
}
}
+
+ /// Get a fixed length byte array and advance the cursor.
+ /// This is slightly more efficient than reading a runtime sized byte slice.
+ #[inline(always)]
+ pub fn get_bytes_fixed(&self, cursor: &mut usize) -> std::io::Result<&[u8; S]> {
+ let ptr = *cursor;
+ let end = ptr + S;
+ if end <= self.0 {
+ *cursor = end;
+ unsafe {
+ Ok(&*self.1.as_ptr().cast::().offset(ptr as isize).cast::<[u8; S]>())
+ }
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
+
+ /// Get a runtime specified length byte slice and advance the cursor.
+ #[inline(always)]
+ pub fn get_bytes(&self, l: usize, cursor: &mut usize) -> std::io::Result<&[u8]> {
+ let ptr = *cursor;
+ let end = ptr + l;
+ if end <= self.0 {
+ *cursor = end;
+ Ok(&self.1[ptr..end])
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
+
+ /// Get the next u8 and advance the cursor.
+ #[inline(always)]
+ pub fn get_u8(&self, cursor: &mut usize) -> std::io::Result {
+ let ptr = *cursor;
+ if ptr < self.0 {
+ *cursor = ptr + 1;
+ Ok(self.1[ptr])
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
+
+ /// Get the next u16 and advance the cursor.
+ #[inline(always)]
+ pub fn get_u16(&self, cursor: &mut usize) -> std::io::Result {
+ let ptr = *cursor;
+ let end = ptr + 2;
+ if end <= self.0 {
+ *cursor = end;
+ Ok(crate::util::integer_load_be_u16(&self.1[ptr..end]))
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
+
+ /// Get the next u32 and advance the cursor.
+ #[inline(always)]
+ pub fn get_u32(&self, cursor: &mut usize) -> std::io::Result {
+ let ptr = *cursor;
+ let end = ptr + 4;
+ if end <= self.0 {
+ *cursor = end;
+ Ok(crate::util::integer_load_be_u32(&self.1[ptr..end]))
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
+
+ /// Get the next u64 and advance the cursor.
+ #[inline(always)]
+ pub fn get_u64(&self, cursor: &mut usize) -> std::io::Result {
+ let ptr = *cursor;
+ let end = ptr + 8;
+ if end <= self.0 {
+ *cursor = end;
+ Ok(crate::util::integer_load_be_u64(&self.1[ptr..end]))
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
+}
+
+impl Write for Buffer {
+ #[inline(always)]
+ fn write(&mut self, buf: &[u8]) -> std::io::Result {
+ let ptr = self.0;
+ let end = ptr + buf.len();
+ if end <= L {
+ self.0 = end;
+ self.1[ptr..end].copy_from_slice(buf);
+ Ok(buf.len())
+ } else {
+ std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "overflow"))
+ }
+ }
+
+ #[inline(always)]
+ fn flush(&mut self) -> std::io::Result<()> {
+ Ok(())
+ }
}
#[cfg(test)]
diff --git a/network-hypervisor/src/vl1/identity.rs b/network-hypervisor/src/vl1/identity.rs
new file mode 100644
index 000000000..c23e189d0
--- /dev/null
+++ b/network-hypervisor/src/vl1/identity.rs
@@ -0,0 +1,133 @@
+use std::io::Write;
+
+use crate::crypto::c25519::{C25519_PUBLIC_KEY_SIZE, ED25519_PUBLIC_KEY_SIZE, C25519_SECRET_KEY_SIZE, ED25519_SECRET_KEY_SIZE, C25519KeyPair, Ed25519KeyPair};
+use crate::crypto::p521::{P521KeyPair, P521PublicKey, P521_ECDSA_SIGNATURE_SIZE, P521_PUBLIC_KEY_SIZE, P521_SECRET_KEY_SIZE};
+use crate::vl1::Address;
+use crate::crypto::hash::SHA384;
+
+#[derive(Copy, Clone)]
+#[repr(u8)]
+pub enum Type {
+ /// Curve25519 / Ed25519 identity (type 0)
+ C25519 = 0,
+ /// NIST P-521 ECDH / ECDSA identity (also has c25519/ed25519 keys for backward compability) (type 1)
+ P521 = 1
+}
+
+struct IdentitySecrets {
+ c25519: C25519KeyPair,
+ ed25519: Ed25519KeyPair,
+ p521: Option<(P521KeyPair, P521KeyPair)>, // ecdh key, ecdsa key
+}
+
+pub struct Identity {
+ address: Address,
+ c25519_public: [u8; C25519_PUBLIC_KEY_SIZE],
+ ed25519_public: [u8; ED25519_PUBLIC_KEY_SIZE],
+ p521_public: Option<(P521PublicKey, P521PublicKey, [u8; P521_ECDSA_SIGNATURE_SIZE])>, // ecdh key, ecdsa key, ecdsa signature of all keys
+ secrets: Option,
+}
+
+impl Identity {
+ fn generate_c25519() {
+ }
+
+ fn generate_p521() {
+ }
+
+ /// Generate a new identity.
+ /// This is time consuming due to the one-time anti-collision proof of work required
+ /// to generate an address corresponding with a set of identity keys. V0 identities
+ /// take tens to hundreds of milliseconds on a typical 2020 system, while V1 identites
+ /// take about 500ms. Generation can take a lot longer on low power devices, but only
+ /// has to be done once.
+ pub fn generate(id_type: Type) {
+ match id_type {
+ Type::C25519 => Self::generate_c25519(),
+ Type::P521 => Self::generate_p521()
+ }
+ }
+
+ /// Execute ECDH key agreement and return SHA384(shared secret).
+ /// If both keys are type 1, key agreement is done with NIST P-521. Otherwise it's done
+ /// with Curve25519. None is returned if there is an error such as this identity missing
+ /// its secrets or a key being invalid.
+ pub fn agree(&self, other_identity: &Identity) -> Option<[u8; 48]> {
+ self.secrets.as_ref().map_or(None, |secrets| {
+ secrets.p521.as_ref().map_or_else(|| {
+ Some(SHA384::hash(&secrets.c25519.agree(&other_identity.c25519_public)))
+ }, |p521_secret| {
+ other_identity.p521_public.as_ref().map_or_else(|| {
+ Some(SHA384::hash(&secrets.c25519.agree(&other_identity.c25519_public)))
+ }, |other_p521_public| {
+ p521_secret.0.agree(&other_p521_public.0).map_or(None, |secret| Some(SHA384::hash(&secret)))
+ })
+ })
+ })
+ }
+
+ /// Sign this message with this identity.
+ /// Signature is performed using ed25519 EDDSA or NIST P-521 ECDSA depending on the identity
+ /// type. None is returned if this identity lacks secret keys or another error occurs.
+ pub fn sign(&self, msg: &[u8]) -> Option> {
+ self.secrets.as_ref().map_or(None, |secrets| {
+ secrets.p521.as_ref().map_or_else(|| {
+ Some(secrets.ed25519.sign(msg).to_vec())
+ }, |p521_secret| {
+ p521_secret.1.sign(msg).map_or(None, |sig| Some(sig.to_vec()))
+ })
+ })
+ }
+
+ /// Get this identity's type.
+ #[inline(always)]
+ pub fn id_type(&self) -> Type {
+ if self.p521_public.is_some() {
+ Type::P521
+ } else {
+ Type::C25519
+ }
+ }
+
+ /// Returns true if this identity also holds its secret keys.
+ #[inline(always)]
+ pub fn has_secrets(&self) -> bool {
+ self.secrets.is_some()
+ }
+
+ /// Get this identity in string format, including its secret keys.
+ pub fn to_secret_string(&self) -> String {
+ self.secrets.as_ref().map_or_else(|| {
+ self.to_string()
+ }, |secrets| {
+ secrets.p521.as_ref().map_or_else(|| {
+ format!("{}:{}{}", self.to_string(), crate::util::hex::to_string(&secrets.c25519.secret_bytes()), crate::util::hex::to_string(&secrets.ed25519.secret_bytes()))
+ }, |p521_secret| {
+ let mut secret_key_blob: Vec = Vec::new();
+ secret_key_blob.reserve(C25519_SECRET_KEY_SIZE + ED25519_SECRET_KEY_SIZE + P521_SECRET_KEY_SIZE + P521_SECRET_KEY_SIZE);
+ let _ = secret_key_blob.write_all(&secrets.c25519.secret_bytes());
+ let _ = secret_key_blob.write_all(&secrets.ed25519.secret_bytes());
+ let _ = secret_key_blob.write_all(p521_secret.0.secret_key_bytes());
+ let _ = secret_key_blob.write_all(p521_secret.1.secret_key_bytes());
+ format!("{}:{}", self.to_string(), base64::encode_config(secret_key_blob.as_slice(), base64::URL_SAFE_NO_PAD))
+ })
+ })
+ }
+}
+
+impl ToString for Identity {
+ fn to_string(&self) -> String {
+ self.p521_public.as_ref().map_or_else(|| {
+ format!("{:0>10x}:0:{}{}", self.address.to_u64(), crate::util::hex::to_string(&self.c25519_public), crate::util::hex::to_string(&self.ed25519_public))
+ }, |p521_public| {
+ let mut public_key_blob: Vec = Vec::new();
+ public_key_blob.reserve(C25519_PUBLIC_KEY_SIZE + ED25519_PUBLIC_KEY_SIZE + P521_PUBLIC_KEY_SIZE + P521_PUBLIC_KEY_SIZE + P521_ECDSA_SIGNATURE_SIZE);
+ let _ = public_key_blob.write_all(&self.c25519_public);
+ let _ = public_key_blob.write_all(&self.ed25519_public);
+ let _ = public_key_blob.write_all(p521_public.0.public_key_bytes());
+ let _ = public_key_blob.write_all(p521_public.1.public_key_bytes());
+ let _ = public_key_blob.write_all(&p521_public.2);
+ format!("{:0>10x}:1:{}", self.address.to_u64(), base64::encode_config(public_key_blob.as_slice(), base64::URL_SAFE_NO_PAD))
+ })
+ }
+}
diff --git a/network-hypervisor/src/vl1/mod.rs b/network-hypervisor/src/vl1/mod.rs
index 7ae531cc2..4ca557fe1 100644
--- a/network-hypervisor/src/vl1/mod.rs
+++ b/network-hypervisor/src/vl1/mod.rs
@@ -1,8 +1,8 @@
pub mod protocol;
pub mod packet;
pub mod buffer;
+pub mod node;
mod address;
-mod mac;
+mod identity;
pub use address::Address;
-pub use mac::MAC;
diff --git a/network-hypervisor/src/vl1/node.rs b/network-hypervisor/src/vl1/node.rs
new file mode 100644
index 000000000..b404175b9
--- /dev/null
+++ b/network-hypervisor/src/vl1/node.rs
@@ -0,0 +1,59 @@
+//use crate::vl1::Address;
+
+/*
+/// Handler for events generated by the node that pertain to VL1.
+pub trait VL1NodeEventHandler: Sync + Send {
+ /// Called when a core ZeroTier event occurs.
+ fn event(&self, event: Event, event_data: &[u8]);
+
+ /// Called to store an object into the object store.
+ fn state_put(&self, obj_type: StateObjectType, obj_id: &[u64], obj_data: &[u8]) -> std::io::Result<()>;
+
+ /// Called to retrieve an object from the object store.
+ fn state_get(&self, obj_type: StateObjectType, obj_id: &[u64]) -> std::io::Result>;
+
+ /// Called to send a packet over the physical network (virtual -> physical).
+ fn wire_packet_send(&self, local_socket: i64, sock_addr: &InetAddress, data: &[u8], packet_ttl: u32) -> i32;
+
+ /// Called to check and see if a physical address should be used for ZeroTier traffic.
+ fn path_check(&self, address: Address, id: &Identity, local_socket: i64, sock_addr: &InetAddress) -> bool;
+
+ /// Called to look up a path to a known node, allowing out of band lookup methods for physical paths to nodes.
+ fn path_lookup(&self, address: Address, id: &Identity, desired_family: InetAddressFamily) -> Option;
+}
+
+pub struct Node {
+ handler: H,
+}
+
+impl Node {
+ pub fn new(handler: H) -> Self {
+ Self {
+ handler,
+ }
+ }
+
+ pub fn handler(&self) -> &H {
+ &self.handler
+ }
+
+ pub fn handler_mut(&mut self) -> &mut H {
+ &mut self.handler
+ }
+
+ /// Perform periodic background tasks.
+ /// The first call should happen no more than NODE_BACKGROUND_TASKS_MAX_INTERVAL milliseconds
+ /// since the node was created, and after this runs it returns the amount of time the caller
+ /// should wait before calling it again.
+ #[inline(always)]
+ pub fn process_background_tasks(&self, clock: i64, ticks: i64) -> i64 {
+ 0
+ }
+
+ /// Get the address of this node.
+ #[inline(always)]
+ pub fn address(&self) -> Address {
+ Address::default()
+ }
+}
+*/
\ No newline at end of file
diff --git a/network-hypervisor/src/vl1/packet.rs b/network-hypervisor/src/vl1/packet.rs
index a5923ec13..5746d189f 100644
--- a/network-hypervisor/src/vl1/packet.rs
+++ b/network-hypervisor/src/vl1/packet.rs
@@ -51,7 +51,6 @@ impl Header {
}
}
-/// Packet is a Buffer with the packet Header and the packet max payload size.
pub type Packet = Buffer;
#[derive(Clone)]
@@ -83,6 +82,8 @@ impl FragmentHeader {
}
}
+type Fragment = Buffer;
+
#[cfg(test)]
mod tests {
use std::mem::size_of;
diff --git a/network-hypervisor/src/vl1/mac.rs b/network-hypervisor/src/vl2/mac.rs
similarity index 100%
rename from network-hypervisor/src/vl1/mac.rs
rename to network-hypervisor/src/vl2/mac.rs
diff --git a/network-hypervisor/src/vl2/mod.rs b/network-hypervisor/src/vl2/mod.rs
new file mode 100644
index 000000000..9ac77518a
--- /dev/null
+++ b/network-hypervisor/src/vl2/mod.rs
@@ -0,0 +1,3 @@
+mod mac;
+
+pub use mac::MAC;
diff --git a/vli/Cargo.lock b/vli/Cargo.lock
new file mode 100644
index 000000000..a38e846a1
--- /dev/null
+++ b/vli/Cargo.lock
@@ -0,0 +1,7 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "vli"
+version = "0.1.0"
diff --git a/vli/Cargo.toml b/vli/Cargo.toml
new file mode 100644
index 000000000..021a7a1c9
--- /dev/null
+++ b/vli/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "vli"
+version = "0.1.0"
+edition = "2018"
+
+[profile.test]
+opt-level = 3
+lto = true
+codegen-units = 1
+
+[dependencies]
diff --git a/vli/src/lib.rs b/vli/src/lib.rs
new file mode 100644
index 000000000..d4ef3302d
--- /dev/null
+++ b/vli/src/lib.rs
@@ -0,0 +1,411 @@
+use std::cmp::Ordering;
+use std::ops::{Add, AddAssign, Sub, Shl, Shr, SubAssign, ShlAssign, ShrAssign};
+use std::io::Write;
+use std::mem::MaybeUninit;
+
+const HEX_CHARS: [char; 16] = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'];
+
+/// Arbitrarily large unsigned integer supporting basic and modular arithmetic.
+///
+/// LIMBS is the number of 64-bit "limbs" (large digits) in this VLI. The number of bits is
+/// 64 times LIMBS, so for 1024 bits use 16 LIMBS. We don't have a generic BITS instead
+/// because of current limitations on Rust const generics and what can be done with them.
+/// This also means the size of a VLI must be a multiple of 64 bits. Note that the actual
+/// integer in it need not be exactly that length, just the capacity of the container.
+#[derive(Clone, PartialEq, Eq)]
+pub struct VLI {
+ n: [u64; LIMBS]
+}
+
+impl Default for VLI {
+ #[inline(always)]
+ fn default() -> Self {
+ Self { n: [0_u64; LIMBS ]}
+ }
+}
+
+impl VLI {
+ /// Create a new zero VLI.
+ #[inline(always)]
+ pub fn new() -> Self {
+ Self { n: [0_u64; LIMBS ]}
+ }
+
+ /// Set to zero.
+ #[inline(always)]
+ pub fn zero(&mut self) {
+ self.n.fill(0)
+ }
+
+ /// Test whether bit is set (numbered from right to left).
+ /// This will panic if bit is out of range.
+ #[inline(always)]
+ pub fn test_bit(&self, bit: usize) -> bool {
+ (self.n[bit >> 6] & (1_u64 << (bit & 63))) != 0
+ }
+
+ /// Count the number of non-zero bits.
+ pub fn count_ones(&self) -> u32 {
+ let mut ones = 0_u32;
+ for n in self.n {
+ ones += n.count_ones();
+ }
+ ones
+ }
+
+ /// Count the number of zero bits.
+ #[inline(always)]
+ pub fn count_zeros(&self) -> u32 {
+ let mut zeros = 0_u32;
+ for n in self.n {
+ zeros += n.count_zeros();
+ }
+ zeros
+ }
+
+ /// Returns true if this integer is zero.
+ #[inline(always)]
+ pub fn is_zero(&self) -> bool {
+ for n in self.n {
+ if n != 0 {
+ return false;
+ }
+ }
+ true
+ }
+
+ /// Returns true if this is an odd number.
+ #[inline(always)]
+ pub fn is_odd(&self) -> bool {
+ (self.n[0] & 1) != 0
+ }
+
+ /// Returns true if this is an even number.
+ #[inline(always)]
+ pub fn is_even(&self) -> bool {
+ (self.n[0] & 1) == 0
+ }
+
+ /// Add to this integer and return any overflow carry bits.
+ pub fn add_assign_carry(&mut self, rhs: &Self) -> u64 {
+ let mut carry = 0_u64;
+ for i in 0..LIMBS {
+ let left_ptr = unsafe { self.n.get_unchecked_mut(i) };
+ let left = *left_ptr;
+ let sum = left + *unsafe { rhs.n.get_unchecked(i) } + carry;
+ carry = (sum < left) as u64;
+ *left_ptr = sum;
+ }
+ carry
+ }
+
+ /// Multiply two inputs half the size of this integer to yield a full size result in this integer..
+ /// The multiplicand sizes MULT_LIMBS must be one half the LIMBS size of this integer.
+ /// This is checked with an assertion. This isn't computed with the type system due
+ /// to current limitations in const generics.
+ pub fn mul_extend_assign(&mut self, lhs: &VLI<{ MULT_LIMBS }>, rhs: &VLI<{ MULT_LIMBS }>) {
+ assert_eq!(MULT_LIMBS, LIMBS / 2);
+ let mut r01 = 0_u128;
+ let mut r2 = 0_u64;
+ let mut k = 0_usize;
+ while k < MULT_LIMBS {
+ for i in 0..k {
+ let l_product = (*unsafe { lhs.get_unchecked(i) } as u128) * (*unsafe { rhs.get_unchecked(k - i) } as u128);
+ r01 += l_product;
+ r2 += (r01 < l_product) as u64;
+ }
+ *unsafe { self.n.get_unchecked_mut(k) } = r01 as u64;
+ r01 += (r01 >> 64) | ((r2 as u128) << 64);
+ r2 = 0;
+ k += 1;
+ }
+ while k < (LIMBS - 1) {
+ let mut i = (k + 1) - MULT_LIMBS;
+ while i < k && i < MULT_LIMBS {
+ let l_product = (*unsafe { lhs.get_unchecked(i) } as u128) * (*unsafe { rhs.get_unchecked(k - i) } as u128);
+ r01 += l_product;
+ r2 += (r01 < l_product) as u64;
+ i += 1;
+ }
+ *unsafe { self.n.get_unchecked_mut(k) } = r01 as u64;
+ r01 += (r01 >> 64) | ((r2 as u128) << 64);
+ r2 = 0;
+ k += 1;
+ }
+ *unsafe { self.n.get_unchecked_mut(LIMBS - 1) } = r01 as u64;
+ }
+
+ /// Get this integer as a big-endian byte array.
+ /// If skip_leading_zeroes is true the returned byte vector will be the minimum size
+ /// needed to hold the integer, or empty if it is zero. Otherwise it will always be
+ /// LIMBS * 8 bytes in length.
+ pub fn to_vec(&self, skip_leading_zeroes: bool) -> Vec {
+ let mut bytes: Vec = Vec::new();
+ bytes.reserve(LIMBS * 8);
+
+ let mut i = LIMBS as isize - 1;
+ if skip_leading_zeroes {
+ while i >= 0 {
+ let x: u64 = *unsafe { self.n.get_unchecked(i as usize) };
+ if x != 0 {
+ let x = x.to_be_bytes();
+ for j in 0..8 {
+ if x[j] != 0 {
+ let _ = bytes.write_all(&x[j..8]);
+ break;
+ }
+ }
+ break;
+ }
+ i -= 1;
+ }
+ }
+ while i >= 0 {
+ let _ = bytes.write_all(&(unsafe { self.n.get_unchecked(i as usize) }.to_be_bytes()));
+ i -= 1;
+ }
+
+ bytes
+ }
+
+ /// Get this integer as a hex string.
+ /// If skip_leading_zeroes is true, the returned string will not be left padded with zeroes
+ /// to the size it would be if the integer's bits were saturated.
+ pub fn to_hex_string(&self, skip_leading_zeroes: bool) -> String {
+ let mut s = String::new();
+ s.reserve(LIMBS * 16);
+
+ let mut i = LIMBS as isize - 1;
+ if skip_leading_zeroes {
+ while i >= 0 {
+ let mut x: u64 = *unsafe { self.n.get_unchecked(i as usize) };
+ if x != 0 {
+ let mut j = 0;
+ while j < 16 {
+ if (x >> 60) != 0 {
+ break;
+ }
+ x <<= 4;
+ j += 1;
+ }
+ while j < 16 {
+ s.push(HEX_CHARS[(x >> 60) as usize]);
+ x <<= 4;
+ j += 1;
+ }
+ break;
+ }
+ i -= 1;
+ }
+ }
+ while i >= 0 {
+ let mut x: u64 = *unsafe { self.n.get_unchecked(i as usize) };
+ for _ in 0..16 {
+ s.push(HEX_CHARS[(x >> 60) as usize]);
+ x <<= 4;
+ }
+ i -= 1;
+ }
+
+ if s.is_empty() {
+ s.push('0');
+ }
+
+ s
+ }
+}
+
+impl Add<&Self> for VLI {
+ type Output = Self;
+
+ #[inline(always)]
+ fn add(mut self, rhs: &Self) -> Self::Output {
+ self.add_assign(rhs);
+ self
+ }
+}
+
+impl AddAssign<&Self> for VLI {
+ fn add_assign(&mut self, rhs: &Self) {
+ let mut carry = 0_u64;
+ for i in 0..LIMBS {
+ let left_ptr = unsafe { self.n.get_unchecked_mut(i) };
+ let left = *left_ptr;
+ let sum = left + *unsafe { rhs.n.get_unchecked(i) } + carry;
+ carry = (sum < left) as u64;
+ *left_ptr = sum;
+ }
+ }
+}
+
+impl Sub<&Self> for VLI {
+ type Output = Self;
+
+ #[inline(always)]
+ fn sub(mut self, rhs: &Self) -> Self::Output {
+ self.sub_assign(rhs);
+ self
+ }
+}
+
+impl SubAssign<&Self> for VLI {
+ fn sub_assign(&mut self, rhs: &Self) {
+ let mut borrow = 0_u64;
+ for i in 0..LIMBS {
+ let left_ptr = unsafe { self.n.get_unchecked_mut(i) };
+ let left = *left_ptr;
+ let diff = left - *unsafe { rhs.n.get_unchecked(i) } - borrow;
+ borrow = (diff > left) as u64;
+ *left_ptr = diff;
+ }
+ }
+}
+
+impl Shl for VLI {
+ type Output = Self;
+
+ #[inline(always)]
+ fn shl(mut self, rhs: usize) -> Self::Output {
+ self.shl_assign(rhs);
+ self
+ }
+}
+
+impl ShlAssign for VLI {
+ fn shl_assign(&mut self, rhs: usize) {
+ if rhs != 0 {
+ if rhs < 64 {
+ let mut carry = 0_u64;
+ for i in 0..LIMBS {
+ let x_ptr = unsafe { self.n.get_unchecked_mut(i) };
+ let x = *x_ptr;
+ *x_ptr = (x << rhs) | carry;
+ carry = x >> (64 - rhs);
+ }
+ } else {
+ self.zero();
+ }
+ }
+ }
+}
+
+impl Shr for VLI {
+ type Output = Self;
+
+ #[inline(always)]
+ fn shr(mut self, rhs: usize) -> Self::Output {
+ self.shr_assign(rhs);
+ self
+ }
+}
+
+impl ShrAssign for VLI {
+ fn shr_assign(&mut self, rhs: usize) {
+ if rhs != 0 {
+ if rhs < 64 {
+ let mut carry = 0_u64;
+ let mut i = LIMBS as isize - 1;
+ while i >= 0 {
+ let x_ptr = unsafe { self.n.get_unchecked_mut(i as usize) };
+ let x = *x_ptr;
+ *x_ptr = (x >> rhs) | carry;
+ carry = x << (64 - rhs);
+ i -= 1;
+ }
+ } else {
+ self.zero();
+ }
+ }
+ }
+}
+
+impl PartialOrd for VLI {
+ #[inline(always)]
+ fn partial_cmp(&self, other: &Self) -> Option {
+ Some(self.cmp(other))
+ }
+
+ fn lt(&self, other: &Self) -> bool {
+ let mut i = LIMBS as isize - 1;
+ while i >= 0 {
+ let a = *unsafe { self.n.get_unchecked(i as usize) };
+ let b = *unsafe { other.n.get_unchecked(i as usize) };
+ if a > b {
+ return false;
+ } else if a < b {
+ return true;
+ }
+ i -= 1;
+ }
+ false
+ }
+
+ fn le(&self, other: &Self) -> bool {
+ let mut i = LIMBS as isize - 1;
+ while i >= 0 {
+ let a = *unsafe { self.n.get_unchecked(i as usize) };
+ let b = *unsafe { other.n.get_unchecked(i as usize) };
+ if a > b {
+ return false;
+ } else if a < b {
+ return true;
+ }
+ i -= 1;
+ }
+ true
+ }
+
+ fn gt(&self, other: &Self) -> bool {
+ let mut i = LIMBS as isize - 1;
+ while i >= 0 {
+ let a = *unsafe { self.n.get_unchecked(i as usize) };
+ let b = *unsafe { other.n.get_unchecked(i as usize) };
+ if a > b {
+ return true;
+ } else if a < b {
+ return false;
+ }
+ i -= 1;
+ }
+ false
+ }
+
+ fn ge(&self, other: &Self) -> bool {
+ let mut i = LIMBS as isize - 1;
+ while i >= 0 {
+ let a = *unsafe { self.n.get_unchecked(i as usize) };
+ let b = *unsafe { other.n.get_unchecked(i as usize) };
+ if a > b {
+ return true;
+ } else if a < b {
+ return false;
+ }
+ i -= 1;
+ }
+ true
+ }
+}
+
+impl Ord for VLI {
+ fn cmp(&self, other: &Self) -> Ordering {
+ let mut i = LIMBS as isize - 1;
+ while i >= 0 {
+ let a = *unsafe { self.n.get_unchecked(i as usize) };
+ let b = *unsafe { other.n.get_unchecked(i as usize) };
+ if a > b {
+ return Ordering::Greater;
+ } else if a < b {
+ return Ordering::Less;
+ }
+ i -= 1;
+ }
+ Ordering::Equal
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #[test]
+ fn arithmetic() {
+ }
+}