Loads more V2 work on address encoding, full vs partial addresses, WHOIS.

This commit is contained in:
Adam Ierymenko 2023-03-23 20:22:53 -04:00
parent 773531f6e7
commit 86652ec969
16 changed files with 551 additions and 447 deletions

View file

@ -306,8 +306,8 @@ pub mod v1 {
#[inline(always)]
pub fn get_packet_aad_bytes(destination: &Address, source: &Address, flags_cipher_hops: u8) -> [u8; 11] {
let mut id = [0u8; 11];
id[0..5].copy_from_slice(destination.legacy_address().as_bytes());
id[5..10].copy_from_slice(source.legacy_address().as_bytes());
id[0..5].copy_from_slice(destination.legacy_bytes());
id[5..10].copy_from_slice(source.legacy_bytes());
id[10] = flags_cipher_hops & FLAGS_FIELD_MASK_HIDE_HOPS;
id
}

View file

@ -1,151 +1,92 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::borrow::Borrow;
use std::fmt::Debug;
use std::hash::Hash;
use std::num::NonZeroU64;
use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use zerotier_utils::error::InvalidFormatError;
use zerotier_utils::base24;
use zerotier_utils::error::InvalidParameterError;
use zerotier_utils::hex;
use zerotier_utils::memory;
const BASE62_ALPHABET: &'static [u8; 62] = b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
const BASE62_ALPHABET_REVERSE: [u8; 256] = [0; 256];
#[derive(Clone, PartialEq, Eq)]
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct Address([u128; 3]);
pub struct Address(pub(super) [u8; Self::SIZE_BYTES]);
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct PartialAddress(pub(super) Address, pub(super) u16);
impl Address {
/// Size of a full length address in bytes.
pub const SIZE_BYTES: usize = 48;
/// Addresses may not begin with 0xff; reserved for special signaling or future use.
/// The first byte of an address cannot be 0xff.
pub const RESERVED_PREFIX: u8 = 0xff;
#[inline(always)]
pub(crate) fn new_uninitialized() -> Self {
Self([0, 0, 0])
pub(super) fn new_uninitialized() -> Self {
Self([0u8; Self::SIZE_BYTES])
}
#[inline(always)]
pub(crate) fn as_bytes_mut(&mut self) -> &mut [u8; 48] {
memory::as_byte_array_mut(&mut self.0)
}
#[inline(always)]
pub fn from_bytes(b: &[u8]) -> Result<Self, InvalidFormatError> {
if b.len() >= Self::SIZE_BYTES {
let a = Self(memory::load_raw(b));
if b[0] != Self::RESERVED_PREFIX && memory::load_raw::<u64>(b) != 0 {
Ok(a)
} else {
Err(InvalidFormatError)
}
#[inline]
pub fn from_bytes(b: &[u8]) -> Result<Self, InvalidParameterError> {
if b.len() == Self::SIZE_BYTES && b[0] != Address::RESERVED_PREFIX && b[..PartialAddress::LEGACY_SIZE_BYTES].iter().any(|i| *i != 0) {
Ok(Self(b.try_into().unwrap()))
} else {
Err(InvalidFormatError)
Err(InvalidParameterError("invalid address"))
}
}
/// Get the first 40 bits of this address (for legacy use)
#[inline(always)]
pub fn from_bytes_exact(b: &[u8; Self::SIZE_BYTES]) -> Result<Self, InvalidFormatError> {
let a = Self(memory::load_raw(b));
if b[0] != Self::RESERVED_PREFIX && memory::load_raw::<u64>(b) != 0 {
Ok(a)
} else {
Err(InvalidFormatError)
}
pub(crate) fn legacy_bytes(&self) -> &[u8; 5] {
memory::array_range::<u8, { Address::SIZE_BYTES }, 0, { PartialAddress::LEGACY_SIZE_BYTES }>(&self.0)
}
/// Get a partial address object (with full specificity) for this address
#[inline(always)]
pub fn legacy_address(&self) -> LegacyAddress {
LegacyAddress(NonZeroU64::new(memory::load_raw::<u64>(self.as_bytes())).unwrap())
pub fn to_partial(&self) -> PartialAddress {
PartialAddress(Address(self.0), Self::SIZE_BYTES as u16)
}
/// Get all bits in this address (last 344 will be zero if this is only a V1 address).
#[inline(always)]
pub fn as_bytes(&self) -> &[u8; Self::SIZE_BYTES] {
memory::as_byte_array(&self.0)
&self.0
}
}
impl Borrow<[u8; Self::SIZE_BYTES]> for Address {
#[inline(always)]
fn borrow(&self) -> &[u8; Self::SIZE_BYTES] {
&self.0
}
}
impl ToString for Address {
fn to_string(&self) -> String {
let mut s = String::with_capacity(66);
let mut remainders = 0u16;
for qq in self.0.iter() {
let mut q = u128::from_be(*qq);
for _ in 0..21 {
let (x, y) = (q % 62, q / 62);
q = y;
s.push(BASE62_ALPHABET[x as usize] as char);
}
debug_assert!(q <= 7);
remainders = remainders.wrapping_shl(3);
remainders |= q as u16;
}
debug_assert!(remainders <= 511);
s.push(BASE62_ALPHABET[(remainders % 62) as usize] as char);
s.push(BASE62_ALPHABET[(remainders / 62) as usize] as char);
s
let mut tmp = String::with_capacity(Self::SIZE_BYTES * 2);
base24::encode_into(&self.0, &mut tmp);
tmp
}
}
impl FromStr for Address {
type Err = InvalidFormatError;
type Err = InvalidParameterError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut s = s.as_bytes();
let mut a = Self([0, 0, 0]);
for qi in 0..3 {
let mut q = 0u128;
for _ in 0..21 {
let r = BASE62_ALPHABET_REVERSE[s[0] as usize];
s = &s[1..];
if r == 255 {
return Err(InvalidFormatError);
}
q *= 62;
q += r as u128;
}
a.0[qi] = q;
}
let mut remainders = 0u16;
for _ in 0..2 {
let r = BASE62_ALPHABET_REVERSE[s[0] as usize];
s = &s[1..];
if r == 255 {
return Err(InvalidFormatError);
}
remainders *= 62;
remainders += r as u16;
}
if remainders > 511 {
return Err(InvalidFormatError);
}
a.0[0] += (remainders.wrapping_shr(6) & 7) as u128;
a.0[1] += (remainders.wrapping_shr(3) & 7) as u128;
a.0[2] += (remainders & 7) as u128;
return Ok(a);
let mut tmp = Vec::with_capacity(Self::SIZE_BYTES);
base24::decode_into(s, &mut tmp);
Self::from_bytes(tmp.as_slice())
}
}
impl PartialOrd for Address {
impl Hash for Address {
#[inline(always)]
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Address {
#[inline]
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
u128::from_be(self.0[0])
.cmp(&u128::from_be(other.0[0]))
.then(u128::from_be(self.0[1]).cmp(&u128::from_be(other.0[1])))
.then(u128::from_be(self.0[2]).cmp(&u128::from_be(other.0[2])))
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
// Since this contains a random hash, the first 64 bits should be enough for a local HashMap etc.
state.write_u64(memory::load_raw(&self.0))
}
}
@ -156,13 +97,6 @@ impl Debug for Address {
}
}
impl Hash for Address {
#[inline(always)]
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
state.write_u128(self.0[0])
}
}
impl Serialize for Address {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@ -177,9 +111,9 @@ impl Serialize for Address {
}
}
struct AddressVisitor;
struct AddressDeserializeVisitor;
impl<'de> serde::de::Visitor<'de> for AddressVisitor {
impl<'de> serde::de::Visitor<'de> for AddressDeserializeVisitor {
type Value = Address;
#[inline]
@ -192,13 +126,10 @@ impl<'de> serde::de::Visitor<'de> for AddressVisitor {
where
E: serde::de::Error,
{
if let Ok(v) = Address::from_bytes(v) {
Ok(v)
} else {
Err(E::custom("invalid address"))
}
Address::from_bytes(v).map_err(|_| E::custom("invalid address"))
}
#[inline]
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
@ -209,91 +140,173 @@ impl<'de> serde::de::Visitor<'de> for AddressVisitor {
impl<'de> Deserialize<'de> for Address {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<Address, D::Error>
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
if deserializer.is_human_readable() {
deserializer.deserialize_str(AddressVisitor)
deserializer.deserialize_str(AddressDeserializeVisitor)
} else {
deserializer.deserialize_bytes(AddressVisitor)
deserializer.deserialize_bytes(AddressDeserializeVisitor)
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
#[repr(transparent)]
pub struct LegacyAddress(NonZeroU64);
impl PartialAddress {
/// Minimum number of specified bits in an address.
pub const MIN_SPECIFICITY: usize = Self::MIN_SIZE_BYTES * 8;
impl LegacyAddress {
pub const SIZE_BYTES: usize = 5;
pub const SIZE_HEX_STRING: usize = 10;
/// Maximum number of specified bits in an address.
pub const MAX_SPECIFICITY: usize = Self::MAX_SIZE_BYTES * 8;
pub const LEGACY_SIZE_BYTES: usize = 5;
pub const MIN_SIZE_BYTES: usize = Self::LEGACY_SIZE_BYTES;
pub const MAX_SIZE_BYTES: usize = Address::SIZE_BYTES;
/// Create an invalid uninitialized address (used when generating Identity)
pub(super) fn new_uninitialized() -> Self {
Self(Address([0u8; Self::MAX_SIZE_BYTES]), 0)
}
/// Construct an address from a byte slice with its length determining specificity.
#[inline]
pub fn from_bytes(b: &[u8]) -> Result<Self, InvalidParameterError> {
if b.len() >= Self::MIN_SIZE_BYTES
&& b.len() <= Self::MAX_SIZE_BYTES
&& b[0] != Address::RESERVED_PREFIX
&& b[..Self::LEGACY_SIZE_BYTES].iter().any(|i| *i != 0)
{
let mut a = Self(Address([0u8; Address::SIZE_BYTES]), b.len() as u16);
a.0 .0[..b.len()].copy_from_slice(b);
Ok(a)
} else {
Err(InvalidParameterError("invalid address"))
}
}
#[inline]
pub(crate) fn from_legacy_address_bytes(b: &[u8; 5]) -> Result<Self, InvalidParameterError> {
if b[0] != Address::RESERVED_PREFIX && b.iter().any(|i| *i != 0) {
Ok(Self(
Address({
let mut tmp = [0u8; Self::MAX_SIZE_BYTES];
tmp[..5].copy_from_slice(b);
tmp
}),
Self::LEGACY_SIZE_BYTES as u16,
))
} else {
Err(InvalidParameterError("invalid address"))
}
}
#[inline]
pub(crate) fn from_legacy_address_u64(mut b: u64) -> Result<Self, InvalidParameterError> {
b &= 0xffffffffff;
if b.wrapping_shr(32) != (Address::RESERVED_PREFIX as u64) && b != 0 {
Ok(Self(
Address({
let mut tmp = [0u8; Self::MAX_SIZE_BYTES];
tmp[..5].copy_from_slice(&b.to_be_bytes()[..5]);
tmp
}),
Self::LEGACY_SIZE_BYTES as u16,
))
} else {
Err(InvalidParameterError("invalid address"))
}
}
#[inline(always)]
pub fn from_bytes(b: &[u8]) -> Option<Self> {
if b.len() >= Self::SIZE_BYTES && b[0] != Address::RESERVED_PREFIX {
let mut tmp = 0u64.to_ne_bytes();
tmp[..Address::SIZE_BYTES].copy_from_slice(b);
NonZeroU64::new(u64::from_ne_bytes(tmp)).map(|i| Self(i))
pub fn as_bytes(&self) -> &[u8] {
debug_assert!(self.1 >= Self::MIN_SIZE_BYTES as u16);
&self.0 .0[..self.1 as usize]
}
#[inline(always)]
pub(crate) fn legacy_bytes(&self) -> &[u8; 5] {
debug_assert!(self.1 >= Self::MIN_SIZE_BYTES as u16);
memory::array_range::<u8, { Address::SIZE_BYTES }, 0, { PartialAddress::LEGACY_SIZE_BYTES }>(&self.0 .0)
}
#[inline(always)]
pub(super) fn matches(&self, k: &Address) -> bool {
debug_assert!(self.1 >= Self::MIN_SIZE_BYTES as u16);
let l = self.1 as usize;
self.0 .0[..l].eq(&k.0[..l])
}
/// Get the number of bits of specificity in this address
#[inline(always)]
pub fn specificity(&self) -> usize {
(self.1 * 8) as usize
}
/// Returns true if this address has legacy 40 bit specificity (V1 ZeroTier address)
#[inline(always)]
pub fn is_legacy(&self) -> bool {
self.1 == Self::LEGACY_SIZE_BYTES as u16
}
/// Get a full length address if this partial address is actually complete (384 bits of specificity)
#[inline(always)]
pub fn as_address(&self) -> Option<&Address> {
if self.1 == Self::MAX_SIZE_BYTES as u16 {
Some(&self.0)
} else {
None
}
}
/// Returns true if specificity is at the maximum value (384 bits)
#[inline(always)]
pub fn from_bytes_exact(b: &[u8; Self::SIZE_BYTES]) -> Option<Self> {
if b[0] != Address::RESERVED_PREFIX {
let mut tmp = 0u64.to_ne_bytes();
tmp[..Address::SIZE_BYTES].copy_from_slice(b);
NonZeroU64::new(u64::from_ne_bytes(tmp)).map(|i| Self(i))
} else {
None
}
}
#[inline(always)]
pub(crate) fn from_u64(i: u64) -> Option<Self> {
NonZeroU64::new(i.wrapping_shl(24).to_be()).map(|i| Self(i))
}
#[inline(always)]
pub(crate) fn to_u64(&self) -> u64 {
u64::from_be(self.0.get()).wrapping_shr(24)
}
#[inline(always)]
pub fn as_bytes(&self) -> &[u8; Self::SIZE_BYTES] {
debug_assert_eq!(std::mem::size_of::<NonZeroU64>(), 8);
memory::array_range::<u8, 8, 0, 5>(memory::as_byte_array::<NonZeroU64, 8>(&self.0))
pub fn is_complete(&self) -> bool {
self.1 == Self::MAX_SIZE_BYTES as u16
}
}
impl ToString for LegacyAddress {
impl ToString for PartialAddress {
fn to_string(&self) -> String {
hex::to_string(&memory::as_byte_array::<NonZeroU64, 8>(&self.0)[..Self::SIZE_BYTES])
if self.is_legacy() {
hex::to_string(&self.0 .0[..Self::LEGACY_SIZE_BYTES])
} else {
let mut tmp = String::with_capacity(Self::MAX_SIZE_BYTES * 2);
base24::encode_into(&self.0 .0[..self.1 as usize], &mut tmp);
tmp
}
}
}
impl FromStr for LegacyAddress {
type Err = InvalidFormatError;
impl FromStr for PartialAddress {
type Err = InvalidParameterError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.len() == Self::SIZE_HEX_STRING {
Self::from_bytes(hex::from_string(s).as_slice()).ok_or(InvalidFormatError)
if s.len() == 10 {
return Self::from_bytes(hex::from_string(s).as_slice());
} else {
Err(InvalidFormatError)
let mut tmp = Vec::with_capacity(Self::MAX_SIZE_BYTES);
base24::decode_into(s, &mut tmp)?;
return Self::from_bytes(tmp.as_slice());
}
}
}
impl Debug for LegacyAddress {
impl Hash for PartialAddress {
#[inline(always)]
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
// Since this contains a random hash, the first 64 bits should be enough for a local HashMap etc.
state.write_u64(memory::load_raw(&self.0 .0))
}
}
impl Debug for PartialAddress {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.to_string().as_str())
}
}
impl Serialize for LegacyAddress {
impl Serialize for PartialAddress {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -307,10 +320,10 @@ impl Serialize for LegacyAddress {
}
}
struct LegacyAddressVisitor;
struct PartialAddressDeserializeVisitor;
impl<'de> serde::de::Visitor<'de> for LegacyAddressVisitor {
type Value = LegacyAddress;
impl<'de> serde::de::Visitor<'de> for PartialAddressDeserializeVisitor {
type Value = PartialAddress;
#[inline]
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
@ -322,31 +335,28 @@ impl<'de> serde::de::Visitor<'de> for LegacyAddressVisitor {
where
E: serde::de::Error,
{
if let Some(v) = LegacyAddress::from_bytes(v) {
Ok(v)
} else {
Err(E::custom("invalid address"))
}
PartialAddress::from_bytes(v).map_err(|_| E::custom("invalid address"))
}
#[inline]
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
LegacyAddress::from_str(v).map_err(|e| E::custom(e.to_string()))
PartialAddress::from_str(v).map_err(|e| E::custom(e.to_string()))
}
}
impl<'de> Deserialize<'de> for LegacyAddress {
impl<'de> Deserialize<'de> for PartialAddress {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
if deserializer.is_human_readable() {
deserializer.deserialize_str(LegacyAddressVisitor)
deserializer.deserialize_str(PartialAddressDeserializeVisitor)
} else {
deserializer.deserialize_bytes(LegacyAddressVisitor)
deserializer.deserialize_bytes(PartialAddressDeserializeVisitor)
}
}
}

View file

@ -6,8 +6,8 @@ use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::vl1::inetaddress::InetAddress;
use crate::vl1::{Address, MAC};
use super::inetaddress::InetAddress;
use super::{Address, MAC};
use zerotier_utils::buffer::{Buffer, OutOfBoundsError};
use zerotier_utils::error::InvalidFormatError;
@ -343,10 +343,11 @@ impl FromStr for Endpoint {
let (endpoint_type, endpoint_data) = ss.unwrap();
match endpoint_type {
"zt" | "zte" => {
let a = Address::from_str(endpoint_data).map_err(|_| InvalidFormatError)?;
if endpoint_type == "zt" {
return Ok(Endpoint::ZeroTier(Address::from_str(endpoint_data)?));
return Ok(Endpoint::ZeroTier(a));
} else {
return Ok(Endpoint::ZeroTierEncap(Address::from_str(endpoint_data)?));
return Ok(Endpoint::ZeroTierEncap(a));
}
}
"eth" => return Ok(Endpoint::Ethernet(MAC::from_str(endpoint_data)?)),

View file

@ -1,6 +1,6 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use crate::vl1::identity::Identity;
use super::identity::Identity;
#[derive(Clone)]
pub enum Event {

View file

@ -5,7 +5,7 @@ use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use super::{Address, LegacyAddress};
use super::address::{Address, PartialAddress};
use zerotier_crypto::hash::{SHA384, SHA512};
use zerotier_crypto::p384::*;
@ -92,7 +92,7 @@ impl Identity {
let mut legacy_address_derivation_hash = legacy_address_derivation_hash.finish();
legacy_address_derivation_work_function(&mut legacy_address_derivation_hash);
if legacy_address_derivation_hash[0] < Self::V0_IDENTITY_POW_THRESHOLD && legacy_address_derivation_hash[59] != Address::RESERVED_PREFIX {
secret.public.address.as_bytes_mut()[..5].copy_from_slice(&legacy_address_derivation_hash[59..64]);
secret.public.address.0[..PartialAddress::LEGACY_SIZE_BYTES].copy_from_slice(&legacy_address_derivation_hash[59..64]);
break;
} else {
// Regenerate one of the two keys until we meet the legacy address work function criteria.
@ -150,7 +150,7 @@ impl Identity {
/// Populate bits 40-384 of the address with a hash of everything else.
fn populate_extended_address_bits(&mut self) {
let mut sha = SHA384::new();
sha.update(self.address.legacy_address().as_bytes()); // including the short address means we can elide the expensive legacy hash in the future
sha.update(&self.address.0[..PartialAddress::LEGACY_SIZE_BYTES]); // include short address in full hash
sha.update(&[Self::ALGORITHM_X25519
| if self.p384.is_some() {
Self::ALGORITHM_P384
@ -164,15 +164,11 @@ impl Identity {
sha.update(p384.ecdsa.as_bytes());
}
let sha = sha.finish();
self.address.as_bytes_mut()[LegacyAddress::SIZE_BYTES..].copy_from_slice(&sha[..Address::SIZE_BYTES - LegacyAddress::SIZE_BYTES]);
self.address.0[PartialAddress::LEGACY_SIZE_BYTES..].copy_from_slice(&sha[..Address::SIZE_BYTES - PartialAddress::LEGACY_SIZE_BYTES]);
}
/// Encode for self-signing, used only with p384 keys enabled and panics otherwise.
fn encode_for_self_signing(
&self,
buf: &mut [u8; Address::SIZE_BYTES + 1 + C25519_PUBLIC_KEY_SIZE + ED25519_PUBLIC_KEY_SIZE + P384_PUBLIC_KEY_SIZE + P384_PUBLIC_KEY_SIZE],
) {
let mut buf = &mut buf[Address::SIZE_BYTES + 1..];
fn encode_for_self_signing(&self, mut buf: &mut [u8]) {
let _ = buf.write_all(self.address.as_bytes());
let _ = buf.write_all(&[Self::ALGORITHM_X25519 | Self::ALGORITHM_P384]);
let _ = buf.write_all(&self.x25519.ecdh);
@ -183,7 +179,7 @@ impl Identity {
}
pub fn from_bytes(b: &[u8]) -> Result<Self, InvalidFormatError> {
if b.len() == packed::V2_PUBLIC_SIZE && b[LegacyAddress::SIZE_BYTES] == (Self::ALGORITHM_X25519 | Self::ALGORITHM_P384) {
if b.len() == packed::V2_PUBLIC_SIZE && b[PartialAddress::LEGACY_SIZE_BYTES] == (Self::ALGORITHM_X25519 | Self::ALGORITHM_P384) {
let p: &packed::V2Public = memory::cast_to_struct(b);
let mut id = Self {
address: Address::new_uninitialized(),
@ -195,27 +191,28 @@ impl Identity {
p384_self_signature: p.p384_self_signature,
}),
};
id.address.as_bytes_mut()[..LegacyAddress::SIZE_BYTES].copy_from_slice(&p.short_address);
id.address.0[..PartialAddress::LEGACY_SIZE_BYTES].copy_from_slice(&p.short_address);
id.populate_extended_address_bits();
return Ok(id);
} else if b.len() == packed::V1_PUBLIC_SIZE && b[LegacyAddress::SIZE_BYTES] == Self::ALGORITHM_X25519 {
} else if b.len() == packed::V1_PUBLIC_SIZE && b[PartialAddress::LEGACY_SIZE_BYTES] == Self::ALGORITHM_X25519 {
let p: &packed::V1Public = memory::cast_to_struct(b);
let mut id = Self {
address: Address::new_uninitialized(),
x25519: X25519 { ecdh: p.c25519, eddsa: p.ed25519 },
p384: None,
};
id.address.as_bytes_mut()[..LegacyAddress::SIZE_BYTES].copy_from_slice(&p.short_address);
id.address.0[..PartialAddress::LEGACY_SIZE_BYTES].copy_from_slice(&p.short_address);
id.populate_extended_address_bits();
return Ok(id);
} else {
return Err(InvalidFormatError);
}
return Err(InvalidFormatError);
}
pub fn write_bytes<W: Write>(&self, w: &mut W, x25519_only: bool) -> Result<(), std::io::Error> {
if let (false, Some(p384)) = (x25519_only, self.p384.as_ref()) {
w.write_all(memory::as_byte_array::<packed::V2Public, { packed::V2_PUBLIC_SIZE }>(&packed::V2Public {
short_address: *self.address.legacy_address().as_bytes(),
short_address: *self.address.legacy_bytes(),
algorithms: Self::ALGORITHM_X25519 | Self::ALGORITHM_P384,
c25519: self.x25519.ecdh,
ed25519: self.x25519.eddsa,
@ -226,7 +223,7 @@ impl Identity {
}))
} else {
w.write_all(memory::as_byte_array::<packed::V1Public, { packed::V1_PUBLIC_SIZE }>(&packed::V1Public {
short_address: *self.address.legacy_address().as_bytes(),
short_address: *self.address.legacy_bytes(),
algorithms: Self::ALGORITHM_X25519,
c25519: self.x25519.ecdh,
ed25519: self.x25519.eddsa,
@ -252,7 +249,7 @@ impl ToString for Identity {
} else {
format!(
"{}:0:{}:{}",
self.address.legacy_address().to_string(),
hex::to_string(self.address.legacy_bytes()),
hex::to_string(&self.x25519.ecdh),
hex::to_string(&self.x25519.eddsa)
)
@ -300,7 +297,7 @@ impl Marshalable for Identity {
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> Result<Self, UnmarshalError> {
const V1_ALG: u8 = Identity::ALGORITHM_X25519;
const V2_ALG: u8 = Identity::ALGORITHM_X25519 | Identity::ALGORITHM_P384;
match buf.u8_at(*cursor + LegacyAddress::SIZE_BYTES)? {
match buf.u8_at(*cursor + PartialAddress::LEGACY_SIZE_BYTES)? {
V1_ALG => Identity::from_bytes(buf.read_bytes_fixed::<{ packed::V1_PUBLIC_SIZE }>(cursor)?).map_err(|_| UnmarshalError::InvalidData),
V2_ALG => Identity::from_bytes(buf.read_bytes_fixed::<{ packed::V2_PUBLIC_SIZE }>(cursor)?).map_err(|_| UnmarshalError::InvalidData),
_ => Err(UnmarshalError::UnsupportedVersion),

View file

@ -9,11 +9,12 @@ mod path;
mod peer;
mod peermap;
mod rootset;
mod whois;
pub mod identity;
pub mod inetaddress;
pub use address::{Address, LegacyAddress};
pub use address::Address;
pub use endpoint::Endpoint;
pub use event::Event;
pub use inetaddress::InetAddress;

View file

@ -1,14 +1,13 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::collections::HashMap;
use std::convert::Infallible;
use std::hash::Hash;
use std::sync::atomic::Ordering;
use std::sync::{Arc, Mutex, RwLock, Weak};
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
use crate::protocol::*;
use crate::vl1::address::{Address, LegacyAddress};
use crate::vl1::address::{Address, PartialAddress};
use crate::vl1::debug_event;
use crate::vl1::endpoint::Endpoint;
use crate::vl1::event::Event;
@ -22,7 +21,6 @@ use zerotier_crypto::typestate::{Valid, Verified};
use zerotier_utils::gate::IntervalGate;
use zerotier_utils::hex;
use zerotier_utils::marshalable::Marshalable;
use zerotier_utils::ringbuffer::RingBuffer;
use zerotier_utils::tokio::io::AsyncWriteExt;
/// Interface trait to be implemented by code that's using the ZeroTier network hypervisor.
@ -49,7 +47,12 @@ pub trait ApplicationLayer: Sync + Send + 'static {
fn local_socket_is_valid(&self, socket: &Self::LocalSocket) -> bool;
/// Check if this node should respond to messages from a given peer at all.
fn should_respond_to(&self, id: &Valid<Identity>) -> bool;
///
/// The default implementation always returns true. Typically this is what you want for a
/// controller or a root but not a regular node (unless required for backward compatibility).
fn should_respond_to(&self, id: &Valid<Identity>) -> bool {
true
}
/// Called to send a packet over the physical network (virtual -> physical).
///
@ -107,7 +110,7 @@ pub trait ApplicationLayer: Sync + Send + 'static {
fn time_clock(&self) -> i64;
}
/// Result of a packet handler.
/// Result of a packet handler in the InnerProtocolLayer trait.
pub enum PacketHandlerResult {
/// Packet was handled successfully.
Ok,
@ -201,7 +204,7 @@ struct RootInfo<Application: ApplicationLayer + ?Sized> {
}
/// How often to check the root cluster definitions against the root list and update.
const ROOT_SYNC_INTERVAL_MS: i64 = 1000;
const ROOT_SYNC_INTERVAL_MS: i64 = 2000;
#[derive(Default)]
struct BackgroundTaskIntervals {
@ -213,20 +216,13 @@ struct BackgroundTaskIntervals {
whois_queue_retry: IntervalGate<{ WHOIS_RETRY_INTERVAL }>,
}
struct WhoisQueueItem<Application: ApplicationLayer + ?Sized> {
v1_proto_waiting_packets: RingBuffer<(Weak<Path<Application>>, PooledPacketBuffer), WHOIS_MAX_WAITING_PACKETS>,
last_retry_time: i64,
retry_count: u16,
}
pub struct Node<Application: ApplicationLayer + ?Sized> {
identity_secret: IdentitySecret,
pub(super) identity_secret: IdentitySecret,
intervals: Mutex<BackgroundTaskIntervals>,
paths: RwLock<HashMap<PathKey<'static, 'static, Application::LocalSocket>, Arc<Path<Application>>>>,
peers: PeerMap<Application>,
pub(super) peers: PeerMap<Application>,
roots: RwLock<RootInfo<Application>>,
best_root: RwLock<Option<Arc<Peer<Application>>>>,
whois_queue: Mutex<HashMap<LegacyAddress, WhoisQueueItem<Application>>>,
}
impl<Application: ApplicationLayer + ?Sized> Node<Application> {
@ -244,7 +240,6 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
online: false,
}),
best_root: RwLock::new(None),
whois_queue: Mutex::new(HashMap::new()),
}
}
@ -255,12 +250,7 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
#[inline(always)]
pub fn peer(&self, a: &Address) -> Option<Arc<Peer<Application>>> {
self.peers.get(a)
}
#[inline(always)]
pub(crate) fn peer_legacy(&self, a: &LegacyAddress) -> Option<Arc<Peer<Application>>> {
self.peers.get_legacy(a)
self.peers.get_exact(a)
}
#[inline]
@ -373,14 +363,11 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
m.identity.address.to_string(),
m.endpoints.as_ref().map_or(0, |e| e.len())
);
if let Some(peer) = self.peers.get(&m.identity.address) {
if let Some(peer) = self.peers.get_exact(&m.identity.address) {
new_roots.insert(peer.clone(), m.endpoints.as_ref().unwrap().iter().cloned().collect());
} else {
if let Some(peer) = Peer::new(&self.identity_secret, Valid::mark_valid(m.identity.clone()), time_ticks) {
new_roots.insert(
self.peers.insert_if_unique(Arc::new(peer)).0,
m.endpoints.as_ref().unwrap().iter().cloned().collect(),
);
new_roots.insert(self.peers.add(Arc::new(peer)).0, m.endpoints.as_ref().unwrap().iter().cloned().collect());
} else {
bad_identities.push(m.identity.clone());
}
@ -544,6 +531,7 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
if whois_queue_retry {
/*
let need_whois = {
let mut need_whois = Vec::new();
let mut whois_queue = self.whois_queue.lock().unwrap();
@ -560,6 +548,7 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
if !need_whois.is_empty() {
self.send_whois(app, need_whois.as_slice(), time_ticks);
}
*/
}
INTERVAL
@ -591,10 +580,10 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
// Legacy ZeroTier V1 packet handling
if let Ok(fragment_header) = packet.struct_mut_at::<v1::FragmentHeader>(0) {
if let Some(dest) = LegacyAddress::from_bytes_exact(&fragment_header.dest) {
if let Ok(dest) = PartialAddress::from_legacy_address_bytes(&fragment_header.dest) {
// Packet is addressed to this node.
if dest == self.identity_secret.public.address.legacy_address() {
if dest.matches(&self.identity_secret.public.address) {
let fragment_header = &*fragment_header; // discard mut
let path = self.canonical_path(source_endpoint, source_local_socket, source_local_interface, time_ticks);
path.log_receive_anything(time_ticks);
@ -622,8 +611,8 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
debug_event!(app, "[vl1] [v1] #{:0>16x} packet fully assembled!", fragment_header_id);
if let Ok(packet_header) = frag0.struct_at::<v1::PacketHeader>(0) {
if let Some(source) = LegacyAddress::from_bytes_exact(&packet_header.src) {
if let Some(peer) = self.peers.get_legacy(&source) {
if let Ok(source) = PartialAddress::from_legacy_address_bytes(&packet_header.src) {
if let Some(peer) = self.peers.get_unambiguous(&source) {
peer.v1_proto_receive(
self,
app,
@ -647,7 +636,8 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
}
if ok {
self.whois(app, source.clone(), Some((Arc::downgrade(&path), combined_packet)), time_ticks);
// TODO
//self.whois(app, source.clone(), Some((Arc::downgrade(&path), combined_packet)), time_ticks);
}
}
} // else source address invalid
@ -657,11 +647,12 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
} else if let Ok(packet_header) = packet.struct_at::<v1::PacketHeader>(0) {
debug_event!(app, "[vl1] [v1] #{:0>16x} is unfragmented", u64::from_be_bytes(packet_header.id));
if let Some(source) = LegacyAddress::from_bytes_exact(&packet_header.src) {
if let Some(peer) = self.peers.get_legacy(&source) {
if let Ok(source) = PartialAddress::from_legacy_address_bytes(&packet_header.src) {
if let Some(peer) = self.peers.get_unambiguous(&source) {
peer.v1_proto_receive(self, app, inner, time_ticks, &path, packet_header, packet.as_ref(), &[]);
} else {
self.whois(app, source, Some((Arc::downgrade(&path), packet)), time_ticks);
// TODO
//self.whois(app, source, Some((Arc::downgrade(&path), packet)), time_ticks);
}
}
} // else not fragment and header incomplete
@ -707,7 +698,7 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
return;
}
if let Some(peer) = self.peers.get_legacy(&dest) {
if let Some(peer) = self.peers.get_unambiguous(&dest) {
if let Some(forward_path) = peer.direct_path() {
app.wire_send(
&forward_path.endpoint,
@ -728,110 +719,12 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
}
/// Enqueue and send a WHOIS query for a given address, adding the supplied packet (if any) to the list to be processed on reply.
fn whois(
&self,
app: &Application,
address: LegacyAddress,
waiting_packet: Option<(Weak<Path<Application>>, PooledPacketBuffer)>,
time_ticks: i64,
) {
{
let mut whois_queue = self.whois_queue.lock().unwrap();
let qi = whois_queue.entry(address).or_insert_with(|| WhoisQueueItem {
v1_proto_waiting_packets: RingBuffer::new(),
last_retry_time: 0,
retry_count: 0,
});
if let Some(p) = waiting_packet {
qi.v1_proto_waiting_packets.add(p);
}
if qi.retry_count > 0 {
return;
} else {
qi.last_retry_time = time_ticks;
qi.retry_count += 1;
}
}
self.send_whois(app, &[address], time_ticks);
}
/// Send a WHOIS query to the current best root.
fn send_whois(&self, app: &Application, mut addresses: &[LegacyAddress], time_ticks: i64) {
debug_assert!(!addresses.is_empty());
debug_event!(app, "[vl1] [v1] sending WHOIS for {}", {
let mut tmp = String::new();
for a in addresses.iter() {
if !tmp.is_empty() {
tmp.push(',');
}
tmp.push_str(a.to_string().as_str());
}
tmp
});
if let Some(root) = self.best_root() {
while !addresses.is_empty() {
if !root
.send(app, self, None, time_ticks, |packet| -> Result<(), Infallible> {
assert!(packet.append_u8(message_type::VL1_WHOIS).is_ok());
while !addresses.is_empty() && (packet.len() + ADDRESS_SIZE) <= UDP_DEFAULT_MTU {
assert!(packet.append_bytes_fixed(addresses[0].as_bytes()).is_ok());
addresses = &addresses[1..];
}
Ok(())
})
.is_some()
{
break;
}
}
}
}
/// Called by Peer when an identity is received from another node, e.g. via OK(WHOIS).
pub(crate) fn handle_incoming_identity<Inner: InnerProtocolLayer + ?Sized>(
&self,
app: &Application,
inner: &Inner,
received_identity: Identity,
time_ticks: i64,
authoritative: bool,
) {
if authoritative {
if let Some(received_identity) = received_identity.validate() {
let mut whois_queue = self.whois_queue.lock().unwrap();
if let Some(qi) = whois_queue.get_mut(&received_identity.address.legacy_address()) {
let address = received_identity.address.legacy_address();
/*
if app.should_respond_to(&received_identity) {
if let Some(peer) = peers.get(&address).cloned().or_else(|| {
Peer::new(&self.identity_secret, received_identity, time_ticks)
.map(|p| Arc::new(p))
.and_then(|peer| Some(peers.entry(address.clone()).or_insert(peer).clone()))
}) {
drop(peers);
for p in qi.v1_proto_waiting_packets.iter() {
if let Some(path) = p.0.upgrade() {
if let Ok(packet_header) = p.1.struct_at::<v1::PacketHeader>(0) {
peer.v1_proto_receive(self, app, inner, time_ticks, &path, packet_header, &p.1, &[]);
}
}
}
}
}
*/
whois_queue.remove(&address);
}
}
}
}
/// Called when a remote node sends us a root set update, applying the update if it is valid and applicable.
///
/// This will only replace an existing root set with a newer one. It won't add a new root set, which must be
/// done by an authorized user or administrator not just by a root.
#[allow(unused)]
pub(crate) fn on_remote_update_root_set(&self, received_from: &Identity, rs: Verified<RootSet>) {
pub(super) fn on_remote_update_root_set(&self, received_from: &Identity, rs: Verified<RootSet>) {
let mut roots = self.roots.write().unwrap();
if let Some(entry) = roots.sets.get_mut(&rs.name) {
if entry.members.iter().any(|m| m.identity.eq(received_from)) && rs.should_replace(entry) {
@ -842,7 +735,7 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
/// Get the canonical Path object corresponding to an endpoint.
pub(crate) fn canonical_path(
pub(super) fn canonical_path(
&self,
ep: &Endpoint,
local_socket: &Application::LocalSocket,

View file

@ -5,14 +5,13 @@ use std::hash::{BuildHasher, Hasher};
use std::sync::atomic::{AtomicI64, Ordering};
use std::sync::Mutex;
use super::endpoint::Endpoint;
use super::ApplicationLayer;
use crate::protocol;
use crate::vl1::endpoint::Endpoint;
use zerotier_crypto::random;
use zerotier_utils::NEVER_HAPPENED_TICKS;
use super::ApplicationLayer;
pub(crate) const SERVICE_INTERVAL_MS: i64 = protocol::PATH_KEEPALIVE_INTERVAL;
pub(crate) enum PathServiceResult {
@ -33,7 +32,7 @@ pub struct Path<Application: ApplicationLayer + ?Sized> {
last_send_time_ticks: AtomicI64,
last_receive_time_ticks: AtomicI64,
create_time_ticks: i64,
fragmented_packets: Mutex<HashMap<u64, protocol::v1::FragmentedPacket, PacketIdHasher>>,
v1_fragmented_packets: Mutex<HashMap<u64, protocol::v1::FragmentedPacket, PacketIdHasher>>,
}
impl<Application: ApplicationLayer + ?Sized> Path<Application> {
@ -50,7 +49,7 @@ impl<Application: ApplicationLayer + ?Sized> Path<Application> {
last_send_time_ticks: AtomicI64::new(NEVER_HAPPENED_TICKS),
last_receive_time_ticks: AtomicI64::new(NEVER_HAPPENED_TICKS),
create_time_ticks: time_ticks,
fragmented_packets: Mutex::new(HashMap::with_capacity_and_hasher(4, PacketIdHasher(random::xorshift64_random()))),
v1_fragmented_packets: Mutex::new(HashMap::with_capacity_and_hasher(4, PacketIdHasher(random::xorshift64_random()))),
}
}
@ -64,7 +63,7 @@ impl<Application: ApplicationLayer + ?Sized> Path<Application> {
packet: protocol::PooledPacketBuffer,
time_ticks: i64,
) -> Option<protocol::v1::FragmentedPacket> {
let mut fp = self.fragmented_packets.lock().unwrap();
let mut fp = self.v1_fragmented_packets.lock().unwrap();
// Discard some old waiting packets if the total incoming fragments for a path exceeds a
// sanity limit. This is to prevent memory exhaustion DOS attacks.
@ -103,7 +102,7 @@ impl<Application: ApplicationLayer + ?Sized> Path<Application> {
}
pub(crate) fn service(&self, time_ticks: i64) -> PathServiceResult {
self.fragmented_packets
self.v1_fragmented_packets
.lock()
.unwrap()
.retain(|_, frag| (time_ticks - frag.ts_ticks) < protocol::v1::FRAGMENT_EXPIRATION);

View file

@ -19,11 +19,9 @@ use crate::vl1::debug_event;
use crate::vl1::identity::{Identity, IdentitySecret};
use crate::vl1::node::*;
use crate::vl1::Valid;
use crate::vl1::{Endpoint, Path};
use crate::vl1::{Address, Endpoint, Path};
use crate::{VERSION_MAJOR, VERSION_MINOR, VERSION_REVISION};
use super::LegacyAddress;
pub(crate) const SERVICE_INTERVAL_MS: i64 = 10000;
pub struct Peer<Application: ApplicationLayer + ?Sized> {
@ -339,8 +337,8 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
let header = packet.struct_mut_at::<v1::PacketHeader>(0).unwrap();
header.id.copy_from_slice(&tag[0..8]);
header.dest = *self.identity.address.legacy_address().as_bytes();
header.src = *node.identity().address.legacy_address().as_bytes();
header.dest = *self.identity.address.legacy_bytes();
header.src = *node.identity().address.legacy_bytes();
header.flags_cipher_hops = flags_cipher_hops;
header.mac.copy_from_slice(&tag[8..16]);
} else {
@ -356,8 +354,8 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
{
let header = packet.struct_mut_at::<v1::PacketHeader>(0).unwrap();
header.id = self.v1_proto_next_message_id().to_be_bytes();
header.dest = *self.identity.address.legacy_address().as_bytes();
header.src = *node.identity().address.legacy_address().as_bytes();
header.dest = *self.identity.address.legacy_bytes();
header.src = *node.identity().address.legacy_bytes();
header.flags_cipher_hops = flags_cipher_hops;
header
},
@ -414,8 +412,8 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
{
let f: &mut (v1::PacketHeader, v1::message_component_structs::HelloFixedHeaderFields) = packet.append_struct_get_mut().unwrap();
f.0.id = message_id.to_ne_bytes();
f.0.dest = *self.identity.address.legacy_address().as_bytes();
f.0.src = *node.identity().address.legacy_address().as_bytes();
f.0.dest = *self.identity.address.legacy_bytes();
f.0.src = *node.identity().address.legacy_bytes();
f.0.flags_cipher_hops = v1::CIPHER_NOCRYPT_POLY1305;
f.1.verb = message_type::VL1_HELLO;
f.1.version_proto = PROTOCOL_VERSION;
@ -722,7 +720,8 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
self.identity.address.to_string(),
received_identity.to_string()
);
node.handle_incoming_identity(app, inner, received_identity, time_ticks, true);
// TODO
//node.handle_incoming_identity(app, inner, received_identity, time_ticks, true);
} else {
debug_event!(
app,
@ -771,8 +770,8 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
if !self
.send(app, node, None, time_ticks, |packet| {
while addresses.len() >= ADDRESS_SIZE && (packet.len() + Identity::MAX_MARSHAL_SIZE) <= UDP_DEFAULT_MTU {
if let Some(zt_address) = LegacyAddress::from_bytes(&addresses[..ADDRESS_SIZE]) {
if let Some(peer) = node.peer_legacy(&zt_address) {
if let Ok(zt_address) = Address::from_bytes(&addresses[..ADDRESS_SIZE]) {
if let Some(peer) = node.peer(&zt_address) {
peer.identity.write_bytes(packet, !self.is_v2())?;
}
}

View file

@ -1,95 +1,83 @@
use std::collections::HashMap;
use std::collections::BTreeMap;
use std::ops::Bound;
use std::sync::{Arc, RwLock};
use crate::vl1::address::{Address, LegacyAddress};
use crate::vl1::node::ApplicationLayer;
use crate::vl1::Peer;
use super::address::{Address, PartialAddress};
use super::identity::{Identity, IdentitySecret};
use super::node::ApplicationLayer;
use super::peer::Peer;
use zerotier_utils::oneormore::OneOrMore;
use zerotier_crypto::typestate::Valid;
/// Mapping of addresses (and short legacy addresses) to peers.
///
/// Collisions in the legacy 40-bit address space are very rare, so the OneOrMore<> optimization is
/// used to allow lookups to almost always happen by way of a simple u64 key.
pub struct PeerMap<Application: ApplicationLayer + ?Sized> {
peers: RwLock<HashMap<LegacyAddress, OneOrMore<Arc<Peer<Application>>>>>,
maps: [RwLock<BTreeMap<Address, Arc<Peer<Application>>>>; 256],
}
impl<Application: ApplicationLayer + ?Sized> PeerMap<Application> {
pub fn new() -> Self {
Self { peers: RwLock::new(HashMap::new()) }
Self { maps: std::array::from_fn(|_| RwLock::new(BTreeMap::new())) }
}
pub fn each<F: FnMut(&Arc<Peer<Application>>)>(&self, mut f: F) {
let peers = self.peers.read().unwrap();
for (_, pl) in peers.iter() {
for p in pl.iter() {
for m in self.maps.iter() {
let mm = m.read().unwrap();
for (_, p) in mm.iter() {
f(p);
}
}
}
pub fn remove(&self, address: &Address) {
let peers = self.peers.write().unwrap();
if let Some(list) = peers.get_mut(&address.legacy_address()) {
list.remove_if(|peer| peer.identity.address.eq(address));
if list.is_empty() {
peers.remove(&address.legacy_address());
pub fn remove(&self, address: &Address) -> Option<Arc<Peer<Application>>> {
self.maps[address.0[0] as usize].write().unwrap().remove(address)
}
/// Get an exact match for a full specificity address.
/// This always returns None if the address provided does not have 384 bits of specificity.
pub fn get_exact(&self, address: &Address) -> Option<Arc<Peer<Application>>> {
self.maps[address.0[0] as usize].read().unwrap().get(address).cloned()
}
/// Get a matching peer for a partial address of any specificity, but return None if the match is ambiguous.
pub fn get_unambiguous(&self, address: &PartialAddress) -> Option<Arc<Peer<Application>>> {
let mm = self.maps[address.0 .0[0] as usize].read().unwrap();
let matches = mm.range::<[u8; 48], (Bound<&[u8; 48]>, Bound<&[u8; 48]>)>((Bound::Included(&address.0 .0), Bound::Unbounded));
let mut r = None;
for m in matches {
if address.matches(m.0) {
if r.is_none() {
r.insert(m.1);
} else {
return None;
}
} else {
break;
}
}
return r.cloned();
}
pub fn get(&self, address: &Address) -> Option<Arc<Peer<Application>>> {
self.peers.read().unwrap().get(&address.legacy_address()).and_then(|list| {
for p in list.iter() {
if p.identity.address.eq(address) {
return Some(p.clone());
}
}
return None;
})
}
/// Get a peer by only its short 40-bit address.
///
/// This is only used in V1 compatibility mode to look up peers by V1 address. The rule here
/// is that only one V1 peer can map to one V1 address.
pub(crate) fn get_legacy(&self, legacy_address: &LegacyAddress) -> Option<Arc<Peer<Application>>> {
self.peers.read().unwrap().get(legacy_address).and_then(|list| {
// First, get the matching peer whose identity is of the legacy x25519-only type.
for p in list.iter() {
if p.identity.p384.is_none() {
return Some(p.clone());
}
}
// Then, if that doesn't exist, get the first matching peer with the same short address.
return list.front().cloned();
})
}
/// Insert the supplied peer if it is in fact unique.
///
/// This returns either the new peer or the existing one if the new peer is a duplicate. True is returned
/// for the second return value if the new peer is new or false if it was a duplicate.
///
/// Short 40-bit addresses are unique within the domain of peers with V1 identities, meaning identities
/// that lack P-384 keys. Otherwise the full 384-bit key space is used.
pub fn insert_if_unique(&self, peer: Arc<Peer<Application>>) -> (Arc<Peer<Application>>, bool) {
let peers = self.peers.write().unwrap();
if let Some(list) = peers.get(&peer.identity.address.legacy_address()) {
for p in list.iter() {
if (p.identity.p384.is_none()
&& peer.identity.p384.is_none()
&& p.identity.address.legacy_address() == peer.identity.address.legacy_address())
|| p.identity.address.eq(&peer.identity.address)
{
return (p.clone(), false);
}
}
list.push_front(peer.clone());
/// Insert the supplied peer if it is in fact new, otherwise return the existing peer with the same address.
pub fn add(&self, peer: Arc<Peer<Application>>) -> (Arc<Peer<Application>>, bool) {
let mm = self.maps[peer.identity.address.0[0] as usize].write().unwrap();
let p = mm.entry(peer.identity.address).or_insert(peer.clone());
if Arc::ptr_eq(p, &peer) {
(peer, true)
} else {
peers.insert(peer.identity.address.legacy_address(), OneOrMore::new_one(peer.clone()));
(p.clone(), false)
}
return (peer, true);
}
/// Get a peer or create one if not found.
/// This should be used when the peer will almost always be new, such as on OK(WHOIS).
pub fn get_or_add(&self, this_node_identity: &IdentitySecret, peer_identity: Valid<Identity>, time_ticks: i64) -> Option<Arc<Peer<Application>>> {
let peer = Arc::new(Peer::new(this_node_identity, peer_identity, time_ticks)?);
Some(
self.maps[peer_identity.address.0[0] as usize]
.write()
.unwrap()
.entry(peer.identity.address)
.or_insert(peer)
.clone(),
)
}
}

View file

@ -3,8 +3,8 @@
use std::collections::BTreeSet;
use std::io::Write;
use crate::vl1::identity::Identity;
use crate::vl1::Endpoint;
use super::endpoint::Endpoint;
use super::identity::{Identity, IdentitySecret};
use zerotier_crypto::typestate::Verified;
use zerotier_utils::arrayvec::ArrayVec;
@ -13,8 +13,6 @@ use zerotier_utils::marshalable::{Marshalable, UnmarshalError};
use serde::{Deserialize, Serialize};
use super::identity::IdentitySecret;
/// Description of a member of a root cluster.
///
/// Natural sort order is in order of identity address.

View file

@ -0,0 +1,115 @@
use std::collections::BTreeMap;
use std::convert::Infallible;
use std::ops::Bound;
use std::sync::{Mutex, Weak};
use super::address::PartialAddress;
use super::identity::Identity;
use super::node::{ApplicationLayer, InnerProtocolLayer, Node};
use super::path::Path;
use crate::debug_event;
use crate::protocol;
use zerotier_crypto::typestate::Valid;
use zerotier_utils::ringbuffer::RingBuffer;
pub struct Whois<Application: ApplicationLayer + ?Sized> {
whois_queue: Mutex<BTreeMap<PartialAddress, WhoisQueueItem<Application>>>,
}
struct WhoisQueueItem<Application: ApplicationLayer + ?Sized> {
pending_v1_packets: RingBuffer<(Weak<Path<Application>>, protocol::PooledPacketBuffer), { protocol::WHOIS_MAX_WAITING_PACKETS }>,
last_retry_time: i64,
retry_count: u16,
}
impl<Application: ApplicationLayer + ?Sized> Whois<Application> {
pub fn new() -> Self {
Self { whois_queue: Mutex::new(BTreeMap::new()) }
}
pub fn query(
&self,
app: &Application,
address: &PartialAddress,
waiting_packet: Option<(Weak<Path<Application>>, protocol::PooledPacketBuffer)>,
time_ticks: i64,
) {
}
pub fn handle_incoming_identity<Inner: InnerProtocolLayer + ?Sized>(
&self,
app: &Application,
node: &Node<Application>,
inner: &Inner,
time_ticks: i64,
identity: Valid<Identity>,
) {
let mut queued_items = Vec::with_capacity(2);
{
// Iterate "up" the sorted list of pending requests since less specific addresses will be sorted
// before more specific addresses. We keep going up until we find a non-matching address, matching
// all partials that this full identity matches.
let mut q = self.whois_queue.lock().unwrap();
let mut to_delete = Vec::with_capacity(2);
for qi in q.range((Bound::Unbounded, Bound::Included(identity.address.to_partial()))).rev() {
if qi.0.matches(&identity.address) {
to_delete.push(qi.0);
// TODO
} else {
break;
}
}
for a in to_delete {
queued_items.push(q.remove(a).unwrap());
}
}
if let Some(peer) = node.peers.get_or_add(&node.identity_secret, identity, time_ticks) {
for qi in queued_items.iter() {
for pkt in qi.pending_v1_packets.iter() {
if let Some(source_path) = pkt.0.upgrade() {
if let Ok(packet_header) = pkt.1.struct_at::<protocol::v1::PacketHeader>(0) {
peer.v1_proto_receive(node, app, inner, time_ticks, &source_path, packet_header, &pkt.1, &[]);
}
}
}
}
}
}
pub fn retry_queued(&self) {}
fn send_whois(&self, app: &Application, node: &Node<Application>, addresses: &[PartialAddress], time_ticks: i64) {
debug_assert!(!addresses.is_empty());
debug_event!(app, "[vl1] [v1] sending WHOIS for {}", {
let mut tmp = String::new();
for a in addresses.iter() {
if !tmp.is_empty() {
tmp.push(',');
}
tmp.push_str(a.to_string().as_str());
}
tmp
});
if let Some(root) = node.best_root() {
while !addresses.is_empty() {
if !root
.send(app, node, None, time_ticks, |packet| -> Result<(), Infallible> {
assert!(packet.append_u8(protocol::message_type::VL1_WHOIS).is_ok());
while !addresses.is_empty() && (packet.len() + addresses[0].as_bytes().len()) <= protocol::UDP_DEFAULT_MTU {
debug_assert_eq!(addresses[0].as_bytes().len(), PartialAddress::LEGACY_SIZE_BYTES); // will need protocol work to support different partial sizes
assert!(packet.append_bytes(addresses[0].as_bytes()).is_ok());
addresses = &addresses[1..];
}
Ok(())
})
.is_some()
{
break;
}
}
}
}
}

View file

@ -1,18 +1,10 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::num::NonZeroU64;
use std::str::FromStr;
pub type NetworkId = u64;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use zerotier_utils::error::InvalidFormatError;
use zerotier_utils::hex;
use zerotier_utils::hex::HEX_CHARS;
use crate::vl1::Address;
//pub struct NetworkId;
/*
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct NetworkId(NonZeroU64);
@ -172,3 +164,4 @@ impl<'de> Deserialize<'de> for NetworkId {
}
}
}
*/

View file

@ -10,7 +10,7 @@ use zerotier_utils::buffer::{Buffer, OutOfBoundsError};
use zerotier_utils::marshalable::{Marshalable, UnmarshalError};
use crate::protocol;
use crate::vl1::{Address, InetAddress, LegacyAddress, MAC};
use crate::vl1::{Address, InetAddress, MAC};
#[allow(unused)]
pub const RULES_ENGINE_REVISION: u8 = 1;
@ -174,16 +174,16 @@ impl Default for RuleValue {
pub trait RuleVisitor {
fn action_drop(&mut self) -> bool;
fn action_accept(&mut self) -> bool;
fn action_tee(&mut self, address: LegacyAddress, flags: u32, length: u16) -> bool;
fn action_watch(&mut self, address: LegacyAddress, flags: u32, length: u16) -> bool;
fn action_redirect(&mut self, address: LegacyAddress, flags: u32, length: u16) -> bool;
fn action_tee(&mut self, address: Address, flags: u32, length: u16) -> bool;
fn action_watch(&mut self, address: Address, flags: u32, length: u16) -> bool;
fn action_redirect(&mut self, address: Address, flags: u32, length: u16) -> bool;
fn action_break(&mut self) -> bool;
fn action_priority(&mut self, qos_bucket: u8) -> bool;
fn invalid_rule(&mut self) -> bool;
fn match_source_zerotier_address(&mut self, not: bool, or: bool, address: LegacyAddress);
fn match_dest_zerotier_address(&mut self, not: bool, or: bool, address: LegacyAddress);
fn match_source_zerotier_address(&mut self, not: bool, or: bool, address: Address);
fn match_dest_zerotier_address(&mut self, not: bool, or: bool, address: Address);
fn match_vlan_id(&mut self, not: bool, or: bool, id: u16);
fn match_vlan_pcp(&mut self, not: bool, or: bool, pcp: u8);
fn match_vlan_dei(&mut self, not: bool, or: bool, dei: u8);

110
utils/src/base24.rs Normal file
View file

@ -0,0 +1,110 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c) ZeroTier, Inc.
* https://www.zerotier.com/
*/
use std::io::Write;
use crate::error::InvalidParameterError;
// All unambiguous letters, thus easy to type on the alphabetic keyboards on phones without extra shift taps.
const BASE24_ALPHABET: [u8; 24] = *(b"abcdefghjkmnopqrstuvwxyz"); // avoids 'i' and 'l'
const BASE24_ALPHABET_INV: [u8; 26] = [
0, 1, 2, 3, 4, 5, 6, 7, 255, 8, 9, 255, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
];
/// Encode a byte slice into base24 ASCII format (no padding)
pub fn encode_into(mut b: &[u8], s: &mut String) {
while b.len() >= 4 {
let mut n = u32::from_le_bytes(b[..4].try_into().unwrap());
for _ in 0..6 {
s.push(BASE24_ALPHABET[(n % 24) as usize] as char);
n /= 24;
}
s.push(BASE24_ALPHABET[n as usize] as char);
b = &b[4..];
}
if !b.is_empty() {
let mut n = 0u32;
for i in 0..b.len() {
n |= (b[i] as u32).wrapping_shl((i as u32) * 8);
}
for _ in 0..(b.len() * 2) {
s.push(BASE24_ALPHABET[(n % 24) as usize] as char);
n /= 24;
}
}
}
fn decode_up_to_u32(s: &[u8]) -> Result<u32, InvalidParameterError> {
let mut n = 0u32;
for c in s.iter().rev() {
let mut c = *c;
if c >= 97 && c <= 122 {
c -= 97;
} else if c >= 65 && c <= 90 {
c -= 65;
} else {
return Err(InvalidParameterError("invalid base24 character"));
}
let i = BASE24_ALPHABET_INV[c as usize];
if i == 255 {
return Err(InvalidParameterError("invalid base24 character"));
}
n *= 24;
n = n.wrapping_add(i as u32);
}
return Ok(n);
}
/// Decode a base24 ASCII slice into bytes (no padding, length determines output length)
pub fn decode_into(s: &str, b: &mut Vec<u8>) -> Result<(), InvalidParameterError> {
let mut s = s.as_bytes();
while s.len() >= 7 {
let _ = b.write_all(&decode_up_to_u32(&s[..7])?.to_le_bytes());
s = &s[7..];
}
if !s.is_empty() {
let _ = b.write_all(
&decode_up_to_u32(s)?.to_le_bytes()[..match s.len() {
2 => 1,
4 => 2,
6 => 3,
_ => return Err(InvalidParameterError("invalid base24 length")),
}],
);
}
return Ok(());
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn base24_encode_decode() {
let mut tmp = [0xffu8; 256];
for _ in 0..3 {
let mut s = String::with_capacity(1024);
let mut v: Vec<u8> = Vec::with_capacity(256);
for i in 1..256 {
s.clear();
encode_into(&tmp[..i], &mut s);
//println!("{}", s);
v.clear();
decode_into(s.as_str(), &mut v).expect("decode error");
assert!(v.as_slice().eq(&tmp[..i]));
}
for b in tmp.iter_mut() {
*b -= 3;
}
}
}
}

View file

@ -7,6 +7,7 @@
*/
pub mod arrayvec;
pub mod base24;
pub mod blob;
pub mod buffer;
pub mod cast;
@ -22,7 +23,6 @@ pub mod io;
pub mod json;
pub mod marshalable;
pub mod memory;
pub mod oneormore;
pub mod pool;
pub mod proquint;
#[cfg(feature = "tokio")]