A ton more reorg work and controller work.

This commit is contained in:
Adam Ierymenko 2022-09-20 12:10:05 -04:00
parent 7724092551
commit 7ec46540fa
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
44 changed files with 1072 additions and 642 deletions

View file

@ -1,5 +1,4 @@
[workspace]
members = [
"crypto",
"network-hypervisor",
@ -11,6 +10,7 @@ members = [
[profile.release]
opt-level = 3
lto = true
strip = true
debug = false
codegen-units = 1
panic = 'abort'
lto = true

View file

@ -14,3 +14,4 @@ zerotier-network-hypervisor = { path = "../network-hypervisor" }
zerotier-vl1-service = { path = "../vl1-service" }
serde = { version = "^1", features = ["derive"], default-features = false }
serde_json = { version = "^1", features = ["std"], default-features = false }
async-trait = "^0"

View file

@ -0,0 +1,105 @@
use std::sync::Arc;
use crate::database::Database;
use async_trait::async_trait;
use zerotier_network_hypervisor::protocol::{verbs, PacketBuffer};
use zerotier_network_hypervisor::util::dictionary::Dictionary;
use zerotier_network_hypervisor::util::marshalable::MarshalUnmarshalError;
use zerotier_network_hypervisor::vl1::{HostSystem, Identity, InnerProtocol, Path, Peer};
use zerotier_network_hypervisor::vl2::NetworkId;
pub struct Controller<DatabaseImpl: Database> {
pub database: Arc<DatabaseImpl>,
}
impl<DatabaseImpl: Database> Controller<DatabaseImpl> {
pub async fn new(database: Arc<DatabaseImpl>) -> Arc<Self> {
Arc::new(Self { database })
}
async fn handle_network_config_request<HostSystemImpl: HostSystem>(
&self,
source: &Peer<HostSystemImpl>,
source_path: &Path<HostSystemImpl>,
payload: &PacketBuffer,
) -> Result<(), MarshalUnmarshalError> {
let mut cursor = 0;
let network_id = NetworkId::from_u64(payload.read_u64(&mut cursor)?);
if network_id.is_none() {
return Err(MarshalUnmarshalError::InvalidData);
}
let network_id = network_id.unwrap();
let meta_data = if cursor < payload.len() {
let meta_data_len = payload.read_u16(&mut cursor)?;
let d = Dictionary::from_bytes(payload.read_bytes(meta_data_len as usize, &mut cursor)?);
if d.is_none() {
return Err(MarshalUnmarshalError::InvalidData);
}
d.unwrap()
} else {
Dictionary::new()
};
let (have_revision, have_timestamp) = if cursor < payload.len() {
let r = payload.read_u64(&mut cursor)?;
let t = payload.read_u64(&mut cursor)?;
(Some(r), Some(t))
} else {
(None, None)
};
if let Ok(Some(network)) = self.database.get_network(network_id).await {}
return Ok(());
}
}
#[async_trait]
impl<DatabaseImpl: Database> InnerProtocol for Controller<DatabaseImpl> {
async fn handle_packet<HostSystemImpl: HostSystem>(
&self,
source: &Peer<HostSystemImpl>,
source_path: &Path<HostSystemImpl>,
verb: u8,
payload: &PacketBuffer,
) -> bool {
match verb {
verbs::VL2_VERB_NETWORK_CONFIG_REQUEST => {
let _ = self.handle_network_config_request(source, source_path, payload).await;
// TODO: display/log errors
true
}
_ => false,
}
}
async fn handle_error<HostSystemImpl: HostSystem>(
&self,
source: &Peer<HostSystemImpl>,
source_path: &Path<HostSystemImpl>,
in_re_verb: u8,
in_re_message_id: u64,
error_code: u8,
payload: &PacketBuffer,
cursor: &mut usize,
) -> bool {
false
}
async fn handle_ok<HostSystemImpl: HostSystem>(
&self,
source: &Peer<HostSystemImpl>,
source_path: &Path<HostSystemImpl>,
in_re_verb: u8,
in_re_message_id: u64,
payload: &PacketBuffer,
cursor: &mut usize,
) -> bool {
false
}
fn should_communicate_with(&self, _: &Identity) -> bool {
true
}
}

View file

@ -0,0 +1,19 @@
use async_trait::async_trait;
use zerotier_network_hypervisor::vl1::{Address, NodeStorage};
use zerotier_network_hypervisor::vl2::NetworkId;
use crate::model::*;
#[async_trait]
pub trait Database: NodeStorage + Sync + Send + 'static {
type Error;
async fn get_network(&self, id: NetworkId) -> Result<Option<Network>, Self::Error>;
async fn save_network(&self, obj: Network) -> Result<(), Self::Error>;
async fn get_network_members(&self, id: NetworkId) -> Result<Vec<Address>, Self::Error>;
async fn get_member(&self, network_id: NetworkId, node_id: Address) -> Result<Option<Member>, Self::Error>;
async fn save_member(&self, network_id: NetworkId, node_id: Address) -> Result<Option<Member>, Self::Error>;
}

View file

@ -1,3 +1,5 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
pub mod controller;
pub mod database;
pub mod model;

View file

@ -8,6 +8,7 @@ pub mod poly1305;
pub mod random;
pub mod salsa;
pub mod secret;
pub mod verified;
pub mod x25519;
pub mod zssp;

35
crypto/src/verified.rs Normal file
View file

@ -0,0 +1,35 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
use std::ops::{Deref, DerefMut};
/// A zero-overhead wrapper that signals that a credential is verified.
///
/// This is used when a function expects to receive an object that is already verified to
/// make code more self-documenting and make it semantically harder to accidentally use
/// an untrusted object.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct Verified<T>(pub T);
impl<T> Deref for Verified<T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for Verified<T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T> Verified<T> {
#[inline(always)]
pub fn unwrap(self) -> T {
self.0
}
}

View file

@ -5,8 +5,8 @@ pub const VERSION_MINOR: u8 = 99;
pub const VERSION_REVISION: u16 = 1;
pub mod error;
#[allow(unused)]
pub mod protocol;
pub mod util;
pub mod vl1;
pub mod vl2;
pub use vl1::protocol::{PacketBuffer, PooledPacketBuffer};

View file

@ -3,9 +3,11 @@
use std::convert::TryFrom;
use std::mem::MaybeUninit;
use crate::util::buffer::Buffer;
use crate::vl1::Address;
use zerotier_utils::buffer::{Buffer, PooledBufferFactory};
use zerotier_utils::pool::{Pool, Pooled};
/*
* Protocol versions
*
@ -36,13 +38,6 @@ use crate::vl1::Address;
* + Contained early pre-alpha versions of multipath, which are deprecated
* 11 - 1.6.0 ... 2.0.0
* + Supports and prefers AES-GMAC-SIV symmetric crypto, backported.
*
* 20 - 2.0.0 ... CURRENT
* + Forward secrecy with cryptographic ratchet! Finally!!!
* + New identity format including both x25519 and NIST P-521 keys.
* + AES-GMAC-SIV, a FIPS-compliant SIV construction using AES.
* + HELLO and OK(HELLO) include an extra HMAC to harden authentication
* + HELLO and OK(HELLO) use a dictionary for better extensibilit.
*/
pub const PROTOCOL_VERSION: u8 = 20;
@ -55,13 +50,13 @@ pub const PROTOCOL_VERSION_MIN: u8 = 11;
pub type PacketBuffer = Buffer<{ v1::SIZE_MAX }>;
/// Factory type to supply to a new PacketBufferPool, used in PooledPacketBuffer and PacketBufferPool types.
pub type PacketBufferFactory = crate::util::buffer::PooledBufferFactory<{ crate::vl1::protocol::v1::SIZE_MAX }>;
pub type PacketBufferFactory = PooledBufferFactory<{ crate::protocol::v1::SIZE_MAX }>;
/// Packet buffer checked out of pool, automatically returns on drop.
pub type PooledPacketBuffer = zerotier_utils::pool::Pooled<PacketBuffer, PacketBufferFactory>;
pub type PooledPacketBuffer = Pooled<PacketBuffer, PacketBufferFactory>;
/// Source for instances of PacketBuffer
pub type PacketBufferPool = zerotier_utils::pool::Pool<PacketBuffer, PacketBufferFactory>;
pub type PacketBufferPool = Pool<PacketBuffer, PacketBufferFactory>;
/// 64-bit packet (outer) ID.
pub type PacketId = u64;
@ -124,7 +119,7 @@ pub const ADDRESS_RESERVED_PREFIX: u8 = 0xff;
/// Size of an identity fingerprint (SHA384)
pub const IDENTITY_FINGERPRINT_SIZE: usize = 48;
pub mod v1 {
pub(crate) mod v1 {
use super::*;
/// Size of packet header that lies outside the encryption envelope.
@ -466,7 +461,7 @@ pub const IDENTITY_POW_THRESHOLD: u8 = 17;
mod tests {
use std::mem::size_of;
use crate::vl1::protocol::*;
use crate::protocol::*;
#[test]
fn representation() {

View file

@ -1,30 +1,74 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
use crate::util::buffer::Buffer;
use std::error::Error;
use std::fmt::{Debug, Display};
use zerotier_utils::buffer::{Buffer, OutOfBoundsError};
/// Must be larger than any object we want to use with to_bytes() or from_bytes().
/// This hack can go away once Rust allows us to reference trait consts as generics.
const TEMP_BUF_SIZE: usize = 8192;
pub enum MarshalUnmarshalError {
OutOfBounds,
InvalidData,
UnsupportedVersion,
IoError(std::io::Error),
}
impl Display for MarshalUnmarshalError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::OutOfBounds => f.write_str("out of bounds"),
Self::InvalidData => f.write_str("invalid data"),
Self::UnsupportedVersion => f.write_str("unsupported version"),
Self::IoError(e) => f.write_str(e.to_string().as_str()),
}
}
}
impl Debug for MarshalUnmarshalError {
#[inline(always)]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(self, f)
}
}
impl Error for MarshalUnmarshalError {}
impl From<OutOfBoundsError> for MarshalUnmarshalError {
#[inline(always)]
fn from(_: OutOfBoundsError) -> Self {
Self::OutOfBounds
}
}
impl From<std::io::Error> for MarshalUnmarshalError {
#[inline(always)]
fn from(e: std::io::Error) -> Self {
Self::IoError(e)
}
}
/// A super-lightweight zero-allocation serialization interface.
pub trait Marshalable: Sized {
const MAX_MARSHAL_SIZE: usize;
/// Write this object into a buffer.
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> std::io::Result<()>;
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), MarshalUnmarshalError>;
/// Read this object from a buffer.
///
/// The supplied cursor is advanced by the number of bytes read. If an Err is returned
/// the value of the cursor is undefined but likely points to about where the error
/// occurred. It may also point beyond the buffer, which would indicate an overrun error.
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> std::io::Result<Self>;
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> Result<Self, MarshalUnmarshalError>;
/// Write this marshalable entity into a buffer of the given size.
///
/// This will return an Err if the buffer is too small or some other error occurs. It's just
/// a shortcut to creating a buffer and marshaling into it.
fn to_buffer<const BL: usize>(&self) -> std::io::Result<Buffer<BL>> {
fn to_buffer<const BL: usize>(&self) -> Result<Buffer<BL>, MarshalUnmarshalError> {
let mut tmp = Buffer::new();
self.marshal(&mut tmp)?;
Ok(tmp)
@ -33,7 +77,7 @@ pub trait Marshalable: Sized {
/// Unmarshal this object from a buffer.
///
/// This is just a shortcut to calling unmarshal() with a zero cursor and then discarding the cursor.
fn from_buffer<const BL: usize>(buf: &Buffer<BL>) -> std::io::Result<Self> {
fn from_buffer<const BL: usize>(buf: &Buffer<BL>) -> Result<Self, MarshalUnmarshalError> {
let mut tmp = 0;
Self::unmarshal(buf, &mut tmp)
}
@ -46,14 +90,14 @@ pub trait Marshalable: Sized {
}
/// Unmarshal from a raw slice.
fn from_bytes(b: &[u8]) -> std::io::Result<Self> {
fn from_bytes(b: &[u8]) -> Result<Self, MarshalUnmarshalError> {
if b.len() <= TEMP_BUF_SIZE {
let mut tmp = Buffer::<TEMP_BUF_SIZE>::new_boxed();
assert!(tmp.append_bytes(b).is_ok());
let mut cursor = 0;
Self::unmarshal(&tmp, &mut cursor)
} else {
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "object too large"))
Err(MarshalUnmarshalError::OutOfBounds)
}
}
}

View file

@ -1,26 +1,8 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
pub mod buffer;
pub mod dictionary;
pub(crate) mod gate;
pub mod marshalable;
/// A value for ticks that indicates that something never happened, and is thus very long before zero ticks.
pub(crate) const NEVER_HAPPENED_TICKS: i64 = -2147483648;
#[cfg(feature = "debug_events")]
#[allow(unused_macros)]
macro_rules! debug_event {
($si:expr, $fmt:expr $(, $($arg:tt)*)?) => {
$si.event(crate::vl1::Event::Debug(file!(), line!(), format!($fmt, $($($arg)*)?)));
}
}
#[cfg(not(feature = "debug_events"))]
#[allow(unused_macros)]
macro_rules! debug_event {
($si:expr, $fmt:expr $(, $($arg:tt)*)?) => {};
}
#[allow(unused_imports)]
pub(crate) use debug_event;

View file

@ -7,10 +7,10 @@ use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::error::InvalidFormatError;
use crate::util::buffer::Buffer;
use crate::util::marshalable::Marshalable;
use crate::vl1::protocol::{ADDRESS_RESERVED_PREFIX, ADDRESS_SIZE};
use crate::protocol::{ADDRESS_RESERVED_PREFIX, ADDRESS_SIZE};
use crate::util::marshalable::*;
use zerotier_utils::buffer::Buffer;
use zerotier_utils::hex;
use zerotier_utils::hex::HEX_CHARS;
@ -72,16 +72,14 @@ impl Marshalable for Address {
const MAX_MARSHAL_SIZE: usize = ADDRESS_SIZE;
#[inline(always)]
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> std::io::Result<()> {
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), MarshalUnmarshalError> {
buf.append_bytes(&self.0.get().to_be_bytes()[8 - ADDRESS_SIZE..])
.map_err(|_| MarshalUnmarshalError::OutOfBounds)
}
#[inline(always)]
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> std::io::Result<Self> {
Self::from_bytes_fixed(buf.read_bytes_fixed(cursor)?).map_or_else(
|| Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "cannot be zero")),
|a| Ok(a),
)
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> Result<Self, MarshalUnmarshalError> {
Self::from_bytes_fixed(buf.read_bytes_fixed(cursor)?).ok_or(MarshalUnmarshalError::InvalidData)
}
}
@ -194,13 +192,13 @@ mod tests {
rawaddr = 0;
assert!(super::Address::from_u64(rawaddr).is_none());
rawaddr = (crate::vl1::protocol::ADDRESS_RESERVED_PREFIX as u64) << 32;
rawaddr = (crate::protocol::ADDRESS_RESERVED_PREFIX as u64) << 32;
assert!(super::Address::from_u64(rawaddr).is_none());
}
#[test]
fn address_marshal_bytes() {
use crate::vl1::protocol::ADDRESS_SIZE;
use crate::protocol::ADDRESS_SIZE;
let mut v: Vec<u8> = Vec::with_capacity(ADDRESS_SIZE);
let mut i = 0;
while i < ADDRESS_SIZE {

View file

@ -7,12 +7,13 @@ use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::error::InvalidFormatError;
use crate::util::buffer::Buffer;
use crate::util::marshalable::Marshalable;
use crate::protocol::IDENTITY_FINGERPRINT_SIZE;
use crate::util::marshalable::*;
use crate::vl1::inetaddress::InetAddress;
use crate::vl1::protocol::IDENTITY_FINGERPRINT_SIZE;
use crate::vl1::{Address, MAC};
use zerotier_utils::buffer::Buffer;
pub const TYPE_NIL: u8 = 0;
pub const TYPE_ZEROTIER: u8 = 1;
pub const TYPE_ETHERNET: u8 = 2;
@ -129,29 +130,31 @@ impl Endpoint {
impl Marshalable for Endpoint {
const MAX_MARSHAL_SIZE: usize = MAX_MARSHAL_SIZE;
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> std::io::Result<()> {
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), MarshalUnmarshalError> {
match self {
Endpoint::Nil => buf.append_u8(16 + TYPE_NIL),
Endpoint::Nil => {
buf.append_u8(16 + TYPE_NIL)?;
}
Endpoint::ZeroTier(a, h) => {
buf.append_u8(16 + TYPE_ZEROTIER)?;
buf.append_bytes_fixed(&a.to_bytes())?;
buf.append_bytes_fixed(h)
buf.append_bytes_fixed(h)?;
}
Endpoint::Ethernet(m) => {
buf.append_u8(16 + TYPE_ETHERNET)?;
buf.append_bytes_fixed(&m.to_bytes())
buf.append_bytes_fixed(&m.to_bytes())?;
}
Endpoint::WifiDirect(m) => {
buf.append_u8(16 + TYPE_WIFIDIRECT)?;
buf.append_bytes_fixed(&m.to_bytes())
buf.append_bytes_fixed(&m.to_bytes())?;
}
Endpoint::Bluetooth(m) => {
buf.append_u8(16 + TYPE_BLUETOOTH)?;
buf.append_bytes_fixed(&m.to_bytes())
buf.append_bytes_fixed(&m.to_bytes())?;
}
Endpoint::Icmp(ip) => {
buf.append_u8(16 + TYPE_ICMP)?;
ip.marshal(buf)
ip.marshal(buf)?;
}
Endpoint::IpUdp(ip) => {
// Wire encoding of IP/UDP type endpoints is the same as naked InetAddress
@ -159,33 +162,34 @@ impl Marshalable for Endpoint {
// here as an IP/UDP Endpoint and vice versa. Supporting this is why 16 is added
// to all Endpoint type IDs for wire encoding so that values of 4 or 6 can be
// interpreted as IP/UDP InetAddress.
ip.marshal(buf)
ip.marshal(buf)?;
}
Endpoint::IpTcp(ip) => {
buf.append_u8(16 + TYPE_IPTCP)?;
ip.marshal(buf)
ip.marshal(buf)?;
}
Endpoint::Http(url) => {
buf.append_u8(16 + TYPE_HTTP)?;
let b = url.as_bytes();
buf.append_varint(b.len() as u64)?;
buf.append_bytes(b)
buf.append_bytes(b)?;
}
Endpoint::WebRTC(offer) => {
buf.append_u8(16 + TYPE_WEBRTC)?;
let b = offer.as_slice();
buf.append_varint(b.len() as u64)?;
buf.append_bytes(b)
buf.append_bytes(b)?;
}
Endpoint::ZeroTierEncap(a, h) => {
buf.append_u8(16 + TYPE_ZEROTIER_ENCAP)?;
buf.append_bytes_fixed(&a.to_bytes())?;
buf.append_bytes_fixed(h)
buf.append_bytes_fixed(h)?;
}
}
Ok(())
}
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> std::io::Result<Endpoint> {
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> Result<Endpoint, MarshalUnmarshalError> {
let type_byte = buf.read_u8(cursor)?;
if type_byte < 16 {
if type_byte == 4 {
@ -201,10 +205,7 @@ impl Marshalable for Endpoint {
u16::from_be_bytes(b[16..18].try_into().unwrap()),
)))
} else {
Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"unrecognized endpoint type in stream",
))
Err(MarshalUnmarshalError::InvalidData)
}
} else {
match type_byte - 16 {
@ -232,10 +233,7 @@ impl Marshalable for Endpoint {
let zt = Address::unmarshal(buf, cursor)?;
Ok(Endpoint::ZeroTierEncap(zt, buf.read_bytes_fixed(cursor)?.clone()))
}
_ => Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"unrecognized endpoint type in stream",
)),
_ => Err(MarshalUnmarshalError::InvalidData),
}
}
}
@ -451,13 +449,10 @@ impl<'de> Deserialize<'de> for Endpoint {
#[cfg(test)]
mod tests {
use super::{Endpoint, MAX_MARSHAL_SIZE};
use crate::{
util::marshalable::Marshalable,
vl1::{
protocol::{ADDRESS_RESERVED_PREFIX, ADDRESS_SIZE, IDENTITY_FINGERPRINT_SIZE},
Address,
},
};
use crate::protocol::*;
use crate::util::marshalable::*;
use crate::vl1::address::Address;
use zerotier_utils::buffer::*;
fn randstring(len: u8) -> String {
(0..len)
@ -488,8 +483,6 @@ mod tests {
#[test]
fn endpoint_marshal_nil() {
use crate::util::buffer::Buffer;
let n = Endpoint::Nil;
let mut buf = Buffer::<1>::new();
@ -506,8 +499,6 @@ mod tests {
#[test]
fn endpoint_marshal_zerotier() {
use crate::util::buffer::Buffer;
for _ in 0..1000 {
let mut hash = [0u8; IDENTITY_FINGERPRINT_SIZE];
hash.fill_with(|| rand::random());
@ -538,8 +529,6 @@ mod tests {
#[test]
fn endpoint_marshal_zerotier_encap() {
use crate::util::buffer::Buffer;
for _ in 0..1000 {
let mut hash = [0u8; IDENTITY_FINGERPRINT_SIZE];
hash.fill_with(|| rand::random());
@ -570,8 +559,6 @@ mod tests {
#[test]
fn endpoint_marshal_mac() {
use crate::util::buffer::Buffer;
for _ in 0..1000 {
let mac = crate::vl1::MAC::from_u64(rand::random()).unwrap();
@ -596,8 +583,6 @@ mod tests {
#[test]
fn endpoint_marshal_inetaddress() {
use crate::util::buffer::Buffer;
for _ in 0..1000 {
let mut v = [0u8; 16];
v.fill_with(|| rand::random());
@ -625,8 +610,6 @@ mod tests {
#[test]
fn endpoint_marshal_http() {
use crate::util::buffer::Buffer;
for _ in 0..1000 {
let http = Endpoint::Http(randstring(30));
let mut buf = Buffer::<33>::new();
@ -643,8 +626,6 @@ mod tests {
#[test]
fn endpoint_marshal_webrtc() {
use crate::util::buffer::Buffer;
for _ in 0..1000 {
let mut v = Vec::with_capacity(100);
v.fill_with(|| rand::random());

View file

@ -1,6 +1,6 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
use crate::vl1::protocol::*;
use crate::protocol::*;
/// Packet fragment re-assembler and container.
///

View file

@ -14,11 +14,12 @@ use zerotier_crypto::salsa::Salsa;
use zerotier_crypto::secret::Secret;
use zerotier_crypto::x25519::*;
use zerotier_utils::arrayvec::ArrayVec;
use zerotier_utils::hex;
use zerotier_utils::memory::{as_byte_array, as_flat_object};
use crate::error::{InvalidFormatError, InvalidParameterError};
use crate::vl1::protocol::{ADDRESS_SIZE, ADDRESS_SIZE_STRING, IDENTITY_FINGERPRINT_SIZE, IDENTITY_POW_THRESHOLD};
use crate::protocol::{ADDRESS_SIZE, ADDRESS_SIZE_STRING, IDENTITY_FINGERPRINT_SIZE, IDENTITY_POW_THRESHOLD};
use crate::vl1::Address;
/// Current maximum size for an identity signature.
@ -357,17 +358,17 @@ impl Identity {
/// set the old 96-byte signature plus hash format used in ZeroTier v1 is used.
///
/// A return of None happens if we don't have our secret key(s) or some other error occurs.
pub fn sign(&self, msg: &[u8], legacy_ed25519_only: bool) -> Option<Vec<u8>> {
pub fn sign(&self, msg: &[u8], legacy_ed25519_only: bool) -> Option<ArrayVec<u8, MAX_SIGNATURE_SIZE>> {
if let Some(secret) = self.secret.as_ref() {
if legacy_ed25519_only {
Some(secret.ed25519.sign_zt(msg).to_vec())
Some(secret.ed25519.sign_zt(msg).into())
} else if let Some(p384s) = secret.p384.as_ref() {
let mut tmp: Vec<u8> = Vec::with_capacity(1 + P384_ECDSA_SIGNATURE_SIZE);
let mut tmp = ArrayVec::new();
tmp.push(Self::ALGORITHM_EC_NIST_P384);
let _ = tmp.write_all(&p384s.ecdsa.sign(msg));
Some(tmp)
} else {
let mut tmp: Vec<u8> = Vec::with_capacity(1 + ED25519_SIGNATURE_SIZE);
let mut tmp = ArrayVec::new();
tmp.push(Self::ALGORITHM_X25519);
let _ = tmp.write_all(&secret.ed25519.sign(msg));
Some(tmp)

View file

@ -9,14 +9,15 @@ use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::util::marshalable::Marshalable;
use crate::util::marshalable::*;
use crate::error::InvalidFormatError;
use zerotier_utils::buffer::Buffer;
#[cfg(windows)]
use winapi::um::winsock2;
use crate::error::InvalidFormatError;
use crate::util::buffer::Buffer;
#[allow(non_camel_case_types)]
#[cfg(not(windows))]
type sockaddr = libc::sockaddr;
@ -782,7 +783,7 @@ impl InetAddress {
impl Marshalable for InetAddress {
const MAX_MARSHAL_SIZE: usize = 19;
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> std::io::Result<()> {
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), MarshalUnmarshalError> {
unsafe {
match self.sa.sa_family as AddressFamilyType {
AF_INET => {
@ -791,7 +792,6 @@ impl Marshalable for InetAddress {
copy_nonoverlapping((&self.sin.sin_addr.s_addr as *const u32).cast::<u8>(), b.as_mut_ptr().offset(1), 4);
b[5] = *(&self.sin.sin_port as *const u16).cast::<u8>();
b[6] = *(&self.sin.sin_port as *const u16).cast::<u8>().offset(1);
Ok(())
}
AF_INET6 => {
let b = buf.append_bytes_fixed_get_mut::<19>()?;
@ -803,14 +803,14 @@ impl Marshalable for InetAddress {
);
b[17] = *(&self.sin6.sin6_port as *const u16).cast::<u8>();
b[18] = *(&self.sin6.sin6_port as *const u16).cast::<u8>().offset(1);
Ok(())
}
_ => buf.append_u8(0),
_ => buf.append_u8(0)?,
}
Ok(())
}
}
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> std::io::Result<InetAddress> {
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> Result<InetAddress, MarshalUnmarshalError> {
let t = buf.read_u8(cursor)?;
if t == 4 {
let b: &[u8; 6] = buf.read_bytes_fixed(cursor)?;

View file

@ -8,9 +8,9 @@ use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::error::InvalidFormatError;
use crate::util::buffer::Buffer;
use crate::util::marshalable::Marshalable;
use crate::util::marshalable::*;
use zerotier_utils::buffer::Buffer;
use zerotier_utils::hex;
/// An Ethernet MAC address.
@ -87,16 +87,14 @@ impl Marshalable for MAC {
const MAX_MARSHAL_SIZE: usize = 6;
#[inline(always)]
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> std::io::Result<()> {
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), MarshalUnmarshalError> {
buf.append_bytes(&self.0.get().to_be_bytes()[2..])
.map_err(|_| MarshalUnmarshalError::OutOfBounds)
}
#[inline(always)]
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> std::io::Result<Self> {
Self::from_bytes_fixed(buf.read_bytes_fixed(cursor)?).map_or_else(
|| Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "cannot be zero")),
|a| Ok(a),
)
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> Result<Self, MarshalUnmarshalError> {
Self::from_bytes_fixed(buf.read_bytes_fixed(cursor)?).ok_or(MarshalUnmarshalError::InvalidData)
}
}

View file

@ -12,8 +12,6 @@ mod symmetricsecret;
mod whoisqueue;
pub(crate) mod node;
#[allow(unused)]
pub(crate) mod protocol;
pub mod identity;
pub mod inetaddress;
@ -24,7 +22,24 @@ pub use event::Event;
pub use identity::Identity;
pub use inetaddress::InetAddress;
pub use mac::MAC;
pub use node::{DummyInnerProtocol, DummyPathFilter, HostSystem, InnerProtocol, Node, PathFilter, Storage};
pub use node::{DummyInnerProtocol, DummyPathFilter, HostSystem, InnerProtocol, Node, NodeStorage, PathFilter};
pub use path::Path;
pub use peer::Peer;
pub use rootset::{Root, RootSet};
#[cfg(feature = "debug_events")]
#[allow(unused_macros)]
macro_rules! debug_event {
($si:expr, $fmt:expr $(, $($arg:tt)*)?) => {
$si.event(crate::vl1::Event::Debug(file!(), line!(), format!($fmt, $($($arg)*)?)));
}
}
#[cfg(not(feature = "debug_events"))]
#[allow(unused_macros)]
macro_rules! debug_event {
($si:expr, $fmt:expr $(, $($arg:tt)*)?) => {};
}
#[allow(unused_imports)]
pub(crate) use debug_event;

View file

@ -8,23 +8,23 @@ use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use parking_lot::{Mutex, RwLock};
use crate::error::InvalidParameterError;
use crate::util::debug_event;
use crate::protocol::*;
use crate::util::gate::IntervalGate;
use crate::util::marshalable::Marshalable;
use crate::vl1::address::Address;
use crate::vl1::debug_event;
use crate::vl1::endpoint::Endpoint;
use crate::vl1::event::Event;
use crate::vl1::identity::Identity;
use crate::vl1::path::{Path, PathServiceResult};
use crate::vl1::peer::Peer;
use crate::vl1::protocol::*;
use crate::vl1::rootset::RootSet;
use crate::vl1::whoisqueue::{QueuedPacket, WhoisQueue};
use zerotier_crypto::random;
use zerotier_crypto::verified::Verified;
use zerotier_utils::hex;
/// Trait implemented by external code to handle events and provide an interface to the system or application.
@ -87,7 +87,7 @@ pub trait HostSystem: Sync + Send + 'static {
/// Trait to be implemented by outside code to provide object storage to VL1
#[async_trait]
pub trait Storage: Sync + Send + 'static {
pub trait NodeStorage: Sync + Send + 'static {
/// Load this node's identity from the data store.
async fn load_node_identity(&self) -> Option<Identity>;
@ -177,14 +177,27 @@ struct BackgroundTaskIntervals {
whois_service: IntervalGate<{ crate::vl1::whoisqueue::SERVICE_INTERVAL_MS }>,
}
/// Mutable fields related to roots and root sets.
struct RootInfo<HostSystemImpl: HostSystem> {
sets: HashMap<String, RootSet>,
/// Root sets to which we are a member.
sets: HashMap<String, Verified<RootSet>>,
/// Root peers and their statically defined endpoints (from root sets).
roots: HashMap<Arc<Peer<HostSystemImpl>>, Vec<Endpoint>>,
/// If this node is a root, these are the root sets to which it's a member in binary serialized form.
/// Set to None if this node is not a root, meaning it doesn't appear in any of its root sets.
this_root_sets: Option<Vec<u8>>,
/// True if sets have been modified and things like 'roots' need to be rebuilt.
sets_modified: bool,
/// True if this node is online, which means it can talk to at least one of its roots.
online: bool,
}
/// Key used to look up paths in a hash map
/// This supports copied keys for storing and refs for fast lookup without having to copy anything.
enum PathKey<'a, HostSystemImpl: HostSystem> {
Copied(Endpoint, HostSystemImpl::LocalSocket),
Ref(&'a Endpoint, &'a HostSystemImpl::LocalSocket),
@ -227,7 +240,6 @@ impl<'a, HostSystemImpl: HostSystem> PathKey<'a, HostSystemImpl> {
}
}
#[inline(always)]
fn to_copied(&self) -> PathKey<'static, HostSystemImpl> {
match self {
Self::Copied(ep, ls) => PathKey::<'static, HostSystemImpl>::Copied(ep.clone(), ls.clone()),
@ -236,16 +248,19 @@ impl<'a, HostSystemImpl: HostSystem> PathKey<'a, HostSystemImpl> {
}
}
/// A VL1 global P2P network node.
/// A ZeroTier VL1 node that can communicate securely with the ZeroTier peer-to-peer network.
pub struct Node<HostSystemImpl: HostSystem> {
/// A random ID generated to identify this particular running instance.
///
/// This can be used to implement multi-homing by allowing remote nodes to distinguish instances
/// that share an identity.
pub instance_id: [u8; 16],
/// This node's identity and permanent keys.
pub identity: Identity,
/// Interval latches for periodic background tasks.
intervals: Mutex<BackgroundTaskIntervals>,
intervals: parking_lot::Mutex<BackgroundTaskIntervals>,
/// Canonicalized network paths, held as Weak<> to be automatically cleaned when no longer in use.
paths: parking_lot::RwLock<HashMap<PathKey<'static, HostSystemImpl>, Arc<Path<HostSystemImpl>>>>,
@ -254,22 +269,19 @@ pub struct Node<HostSystemImpl: HostSystem> {
peers: parking_lot::RwLock<HashMap<Address, Arc<Peer<HostSystemImpl>>>>,
/// This node's trusted roots, sorted in ascending order of quality/preference, and cluster definitions.
roots: RwLock<RootInfo<HostSystemImpl>>,
roots: parking_lot::RwLock<RootInfo<HostSystemImpl>>,
/// Current best root.
best_root: RwLock<Option<Arc<Peer<HostSystemImpl>>>>,
best_root: parking_lot::RwLock<Option<Arc<Peer<HostSystemImpl>>>>,
/// Identity lookup queue, also holds packets waiting on a lookup.
whois: WhoisQueue,
/// Reusable network buffer pool.
buffer_pool: PacketBufferPool,
}
impl<HostSystemImpl: HostSystem> Node<HostSystemImpl> {
pub async fn new<StorageImpl: Storage>(
pub async fn new<NodeStorageImpl: NodeStorage>(
host_system: &HostSystemImpl,
storage: &StorageImpl,
storage: &NodeStorageImpl,
auto_generate_identity: bool,
auto_upgrade_identity: bool,
) -> Result<Self, InvalidParameterError> {
@ -302,27 +314,21 @@ impl<HostSystemImpl: HostSystem> Node<HostSystemImpl> {
Ok(Self {
instance_id: random::get_bytes_secure(),
identity: id,
intervals: Mutex::new(BackgroundTaskIntervals::default()),
intervals: parking_lot::Mutex::new(BackgroundTaskIntervals::default()),
paths: parking_lot::RwLock::new(HashMap::new()),
peers: parking_lot::RwLock::new(HashMap::new()),
roots: RwLock::new(RootInfo {
roots: parking_lot::RwLock::new(RootInfo {
sets: HashMap::new(),
roots: HashMap::new(),
this_root_sets: None,
sets_modified: false,
online: false,
}),
best_root: RwLock::new(None),
best_root: parking_lot::RwLock::new(None),
whois: WhoisQueue::new(),
buffer_pool: PacketBufferPool::new(64, PacketBufferFactory::new()),
})
}
#[inline(always)]
pub fn get_packet_buffer(&self) -> PooledPacketBuffer {
self.buffer_pool.get()
}
pub fn peer(&self, a: Address) -> Option<Arc<Peer<HostSystemImpl>>> {
self.peers.read().get(&a).cloned()
}
@ -404,7 +410,7 @@ impl<HostSystemImpl: HostSystem> Node<HostSystemImpl> {
)
};
// We only "spam" if we are offline.
// We only "spam" (try to contact roots more often) if we are offline.
if root_spam_hello {
root_spam_hello = !self.is_online();
}
@ -778,18 +784,21 @@ impl<HostSystemImpl: HostSystem> Node<HostSystemImpl> {
self.best_root.read().clone()
}
/// Check whether this peer is a root according to any root set trusted by this node.
/// Check whether a peer is a root according to any root set trusted by this node.
pub fn is_peer_root(&self, peer: &Peer<HostSystemImpl>) -> bool {
self.roots.read().roots.keys().any(|p| p.identity.eq(&peer.identity))
}
/// Returns true if this node is a member of a root set (that it knows about).
pub fn this_node_is_root(&self) -> bool {
self.roots.read().this_root_sets.is_some()
}
/// Called when a remote node sends us a root set update, applying the update if it is valid and applicable.
///
/// This will only replace an existing root set with a newer one. It won't add a new root set, which must be
/// done by an authorized user or administrator not just by a root.
///
/// SECURITY NOTE: this DOES NOT validate certificates in the supplied root set! Caller must do that first!
pub(crate) fn remote_update_root_set(&self, received_from: &Identity, rs: RootSet) {
pub(crate) fn remote_update_root_set(&self, received_from: &Identity, rs: Verified<RootSet>) {
let mut roots = self.roots.write();
if let Some(entry) = roots.sets.get_mut(&rs.name) {
if entry.members.iter().any(|m| m.identity.eq(received_from)) && rs.should_replace(entry) {
@ -799,20 +808,22 @@ impl<HostSystemImpl: HostSystem> Node<HostSystemImpl> {
}
}
pub fn add_update_root_set(&self, rs: RootSet) -> bool {
/// Add a new root set or update the existing root set if the new root set is newer and otherwise matches.
pub fn add_update_root_set(&self, rs: Verified<RootSet>) -> bool {
let mut roots = self.roots.write();
if let Some(entry) = roots.sets.get_mut(&rs.name) {
if rs.should_replace(entry) {
*entry = rs;
roots.sets_modified = true;
return true;
true
} else {
false
}
} else if rs.verify() {
roots.sets.insert(rs.name.clone(), rs);
} else {
let _ = roots.sets.insert(rs.name.clone(), rs);
roots.sets_modified = true;
return true;
true
}
return false;
}
/// Returns whether or not this node has any root sets defined.
@ -831,12 +842,7 @@ impl<HostSystemImpl: HostSystem> Node<HostSystemImpl> {
/// Get the root sets that this node trusts.
pub fn root_sets(&self) -> Vec<RootSet> {
self.roots.read().sets.values().cloned().collect()
}
/// Returns true if this node is a member of a root set (that it knows about).
pub fn this_node_is_root(&self) -> bool {
self.roots.read().this_root_sets.is_some()
self.roots.read().sets.values().cloned().map(|s| s.unwrap()).collect()
}
/// Get the canonical Path object corresponding to an endpoint.

View file

@ -6,10 +6,10 @@ use std::sync::atomic::{AtomicI64, Ordering};
use parking_lot::Mutex;
use crate::protocol::*;
use crate::vl1::endpoint::Endpoint;
use crate::vl1::fragmentedpacket::FragmentedPacket;
use crate::vl1::node::*;
use crate::vl1::protocol::*;
use zerotier_crypto::random;

View file

@ -11,14 +11,14 @@ use zerotier_crypto::poly1305;
use zerotier_crypto::random;
use zerotier_crypto::salsa::Salsa;
use zerotier_crypto::secret::Secret;
use zerotier_utils::buffer::BufferReader;
use zerotier_utils::memory::array_range;
use crate::util::buffer::BufferReader;
use crate::util::debug_event;
use crate::protocol::*;
use crate::util::marshalable::Marshalable;
use crate::vl1::address::Address;
use crate::vl1::debug_event;
use crate::vl1::node::*;
use crate::vl1::protocol::*;
use crate::vl1::symmetricsecret::SymmetricSecret;
use crate::vl1::{Endpoint, Identity, Path};
use crate::{VERSION_MAJOR, VERSION_MINOR, VERSION_REVISION};

View file

@ -3,11 +3,15 @@
use std::collections::BTreeSet;
use std::io::Write;
use crate::util::buffer::{Buffer, BufferReader};
use crate::util::marshalable::Marshalable;
use crate::vl1::identity::*;
use crate::util::marshalable::*;
use crate::vl1::identity::{Identity, MAX_SIGNATURE_SIZE};
use crate::vl1::Endpoint;
use zerotier_utils::arrayvec::ArrayVec;
use zerotier_utils::buffer::{Buffer, BufferReader};
use zerotier_crypto::verified::Verified;
use serde::{Deserialize, Serialize};
/// Description of a member of a root cluster.
@ -29,7 +33,7 @@ pub struct Root {
/// This is populated by the sign() method when the completed root set is signed by each member.
/// All member roots must sign.
#[serde(default)]
pub signature: Vec<u8>,
pub signature: ArrayVec<u8, MAX_SIGNATURE_SIZE>,
/// Priority (higher number is lower priority, 0 is default).
///
@ -88,16 +92,15 @@ impl RootSet {
}
/// Get the ZeroTier default root set, which contains roots run by ZeroTier Inc.
pub fn zerotier_default() -> Self {
pub fn zerotier_default() -> Verified<Self> {
let mut cursor = 0;
//let rs = include_bytes!("../../default-rootset/root.zerotier.com.bin");
let rs = include_bytes!("../../default-rootset/test-root.bin");
let rs = Self::unmarshal(&Buffer::from(rs), &mut cursor).unwrap();
assert!(rs.verify());
rs
rs.verify().unwrap()
}
fn marshal_internal<const BL: usize>(&self, buf: &mut Buffer<BL>, include_signatures: bool) -> std::io::Result<()> {
fn marshal_internal<const BL: usize>(&self, buf: &mut Buffer<BL>, include_signatures: bool) -> Result<(), MarshalUnmarshalError> {
buf.append_u8(0)?; // version byte for future use
buf.append_varint(self.name.as_bytes().len() as u64)?;
buf.append_bytes(self.name.as_bytes())?;
@ -123,7 +126,7 @@ impl RootSet {
}
if include_signatures {
buf.append_varint(m.signature.len() as u64)?;
buf.append_bytes(m.signature.as_slice())?;
buf.append_bytes(m.signature.as_ref())?;
}
buf.append_varint(0)?; // flags, currently always 0
buf.append_u8(m.priority)?;
@ -142,19 +145,19 @@ impl RootSet {
}
/// Verify signatures present in this root cluster definition.
pub fn verify(&self) -> bool {
pub fn verify(self) -> Option<Verified<Self>> {
if self.members.is_empty() {
return false;
return None;
}
let tmp = self.marshal_for_signing();
for m in self.members.iter() {
if m.signature.is_empty() || !m.identity.verify(tmp.as_bytes(), m.signature.as_slice()) {
return false;
if m.signature.is_empty() || !m.identity.verify(tmp.as_bytes(), m.signature.as_ref()) {
return None;
}
}
return true;
return Some(Verified(self));
}
/// Add a member to this definition, replacing any current entry with this address.
@ -175,7 +178,7 @@ impl RootSet {
}
tmp
}),
signature: Vec::new(),
signature: ArrayVec::new(),
priority,
protocol_version,
});
@ -228,13 +231,10 @@ impl RootSet {
/// member must sign the next update. N-1 is not permitted to be less than one. If that was
/// not the case it would be possible for anyone to update a one-member definition!
///
/// This DOES call verify() on itself prior to checking to avoid the disastrous error
/// of forgetting to verify signatures on a new definition.
///
/// Be sure the semantics are right and this method is being called with 'self' being the
/// new root cluster definition and 'previous' being the current/old one.
pub fn should_replace(&self, previous: &Self) -> bool {
if self.name.eq(&previous.name) && self.revision > previous.revision && self.verify() {
if self.name.eq(&previous.name) && self.revision > previous.revision {
let mut my_signers = BTreeSet::new();
for m in self.members.iter() {
my_signers.insert(m.identity.fingerprint.clone());
@ -257,29 +257,25 @@ impl RootSet {
}
impl Marshalable for RootSet {
const MAX_MARSHAL_SIZE: usize = crate::vl1::protocol::v1::SIZE_MAX;
const MAX_MARSHAL_SIZE: usize = crate::protocol::v1::SIZE_MAX;
#[inline(always)]
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> std::io::Result<()> {
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), MarshalUnmarshalError> {
self.marshal_internal(buf, true)
}
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> std::io::Result<Self> {
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> Result<Self, MarshalUnmarshalError> {
let mut rc = Self::new(String::new(), None, 0);
if buf.read_u8(cursor)? != 0 {
return std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "unsupported version"));
return Err(MarshalUnmarshalError::UnsupportedVersion);
}
let name_len = buf.read_varint(cursor)?;
rc.name = String::from_utf8(buf.read_bytes(name_len as usize, cursor)?.to_vec())
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid UTF8"))?;
rc.name = String::from_utf8_lossy(buf.read_bytes(name_len as usize, cursor)?).to_string();
let url_len = buf.read_varint(cursor)?;
if url_len > 0 {
rc.url = Some(
String::from_utf8(buf.read_bytes(url_len as usize, cursor)?.to_vec())
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid UTF8"))?,
);
rc.url = Some(String::from_utf8_lossy(buf.read_bytes(url_len as usize, cursor)?).to_string());
}
rc.revision = buf.read_varint(cursor)?;
@ -287,9 +283,9 @@ impl Marshalable for RootSet {
let member_count = buf.read_varint(cursor)?;
for _ in 0..member_count {
let mut m = Root {
identity: Identity::read_bytes(&mut BufferReader::new(buf, cursor))?,
identity: Identity::read_bytes(&mut BufferReader::new(buf, cursor)).map_err(|e| MarshalUnmarshalError::IoError(e))?,
endpoints: None,
signature: Vec::new(),
signature: ArrayVec::new(),
priority: 0,
protocol_version: 0,
};
@ -317,7 +313,7 @@ impl Marshalable for RootSet {
*cursor += buf.read_varint(cursor)? as usize;
if *cursor > buf.len() {
return std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid length"));
return Err(MarshalUnmarshalError::OutOfBounds);
}
rc.members.sort();

View file

@ -4,7 +4,7 @@ use zerotier_crypto::aes_gmac_siv::AesGmacSiv;
use zerotier_crypto::hash::hmac_sha384;
use zerotier_crypto::secret::Secret;
use crate::vl1::protocol::*;
use crate::protocol::*;
use zerotier_utils::pool::{Pool, PoolFactory};

View file

@ -4,10 +4,10 @@ use std::collections::{HashMap, LinkedList};
use parking_lot::Mutex;
use crate::protocol::{PooledPacketBuffer, WHOIS_MAX_WAITING_PACKETS, WHOIS_RETRY_INTERVAL, WHOIS_RETRY_MAX};
use crate::util::gate::IntervalGate;
use crate::vl1::fragmentedpacket::FragmentedPacket;
use crate::vl1::node::{HostSystem, Node};
use crate::vl1::protocol::{PooledPacketBuffer, WHOIS_MAX_WAITING_PACKETS, WHOIS_RETRY_INTERVAL, WHOIS_RETRY_MAX};
use crate::vl1::Address;
pub(crate) const SERVICE_INTERVAL_MS: i64 = WHOIS_RETRY_INTERVAL;

View file

@ -5,13 +5,14 @@ use crate::vl2::NetworkId;
use serde::{Deserialize, Serialize};
use zerotier_utils::arrayvec::ArrayVec;
use zerotier_utils::blob::Blob;
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct CertificateOfMembership {
pub issued_to: Address,
//pub issued_to_fingerprint: [u8; 48],
pub issued_to_fingerprint: Blob<48>,
pub network_id: NetworkId,
pub timestamp: i64,
pub max_delta: i64,
//pub signature: ArrayVec<u8, { identity::MAX_SIGNATURE_SIZE }>,
pub signature: ArrayVec<u8, { identity::MAX_SIGNATURE_SIZE }>,
}

View file

@ -7,9 +7,9 @@ use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::error::InvalidFormatError;
use crate::util::buffer::Buffer;
use crate::util::marshalable::Marshalable;
use crate::util::marshalable::*;
use zerotier_utils::buffer::Buffer;
use zerotier_utils::hex;
use zerotier_utils::hex::HEX_CHARS;
@ -61,16 +61,13 @@ impl Marshalable for NetworkId {
const MAX_MARSHAL_SIZE: usize = 8;
#[inline(always)]
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> std::io::Result<()> {
buf.append_u64(self.0.get())
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), MarshalUnmarshalError> {
buf.append_u64(self.0.get()).map_err(|_| MarshalUnmarshalError::OutOfBounds)
}
#[inline(always)]
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> std::io::Result<Self> {
Self::from_u64(buf.read_u64(cursor)?).map_or_else(
|| Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "cannot be zero")),
|a| Ok(a),
)
fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> Result<Self, MarshalUnmarshalError> {
Self::from_u64(buf.read_u64(cursor)?).ok_or(MarshalUnmarshalError::InvalidData)
}
}

View file

@ -2,8 +2,8 @@
use async_trait::async_trait;
use crate::protocol::PacketBuffer;
use crate::vl1::node::{HostSystem, InnerProtocol};
use crate::vl1::protocol::*;
use crate::vl1::{Identity, Path, Peer};
pub trait SwitchInterface: Sync + Send {}

View file

@ -14,5 +14,5 @@ pub struct Tag {
pub timestamp: i64,
pub issued_to: Address,
pub signed_by: Address,
//pub signature: ArrayVec<u8, { identity::MAX_SIGNATURE_SIZE }>,
pub signature: ArrayVec<u8, { identity::MAX_SIGNATURE_SIZE }>,
}

View file

@ -8,6 +8,7 @@ use crate::{exitcode, Flags};
use zerotier_network_hypervisor::util::marshalable::Marshalable;
use zerotier_network_hypervisor::vl1::RootSet;
use zerotier_utils::json::to_json_pretty;
pub async fn cmd(_: Flags, cmd_args: &ArgMatches) -> i32 {
match cmd_args.subcommand() {
@ -49,7 +50,7 @@ pub async fn cmd(_: Flags, cmd_args: &ArgMatches) -> i32 {
eprintln!("ERROR: root set signing failed, invalid identity?");
return exitcode::ERR_INTERNAL;
}
println!("{}", crate::utils::to_json_pretty(&root_set));
println!("{}", to_json_pretty(&root_set));
} else {
eprintln!("ERROR: 'rootset sign' requires a path to a root set in JSON format and a secret identity.");
return exitcode::ERR_IOERR;
@ -107,7 +108,7 @@ pub async fn cmd(_: Flags, cmd_args: &ArgMatches) -> i32 {
}
Some(("restoredefault", _)) => {
let _ = std::io::stdout().write_all(crate::utils::to_json_pretty(&RootSet::zerotier_default()).as_bytes());
let _ = std::io::stdout().write_all(to_json_pretty(&RootSet::zerotier_default()).as_bytes());
}
_ => panic!(),

View file

@ -13,6 +13,7 @@ use parking_lot::{Mutex, RwLock};
use zerotier_crypto::random::next_u32_secure;
use zerotier_network_hypervisor::vl1::{Identity, Storage};
use zerotier_utils::json::to_json_pretty;
const AUTH_TOKEN_DEFAULT_LENGTH: usize = 48;
const AUTH_TOKEN_POSSIBLE_CHARS: &'static str = "0123456789abcdefghijklmnopqrstuvwxyz";
@ -119,7 +120,7 @@ impl DataDir {
/// Save a modified copy of the configuration and replace the internal copy in this structure (if it's actually changed).
pub async fn save_config(&self, modified_config: Config) -> std::io::Result<()> {
if !modified_config.eq(&self.config.read()) {
let config_data = crate::utils::to_json_pretty(&modified_config);
let config_data = to_json_pretty(&modified_config);
tokio::fs::write(self.base_path.join(CONFIG_FILENAME), config_data.as_bytes()).await?;
*self.config.write() = Arc::new(modified_config);
}

View file

@ -1,133 +0,0 @@
/* This is a forked and hacked version of PrettyFormatter from:
*
* https://github.com/serde-rs/json/blob/master/src/ser.rs
*
* It is therefore under the same Apache license.
*/
use serde_json::ser::Formatter;
pub struct JsonFormatter<'a> {
current_indent: usize,
has_value: bool,
indent: &'a [u8],
}
fn indent<W>(wr: &mut W, n: usize, s: &[u8]) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
for _ in 0..n {
wr.write_all(s)?;
}
Ok(())
}
impl<'a> JsonFormatter<'a> {
pub fn new() -> Self {
JsonFormatter::with_indent(b" ")
}
pub fn with_indent(indent: &'a [u8]) -> Self {
JsonFormatter { current_indent: 0, has_value: false, indent }
}
}
impl<'a> Default for JsonFormatter<'a> {
fn default() -> Self {
JsonFormatter::new()
}
}
impl<'a> Formatter for JsonFormatter<'a> {
fn begin_array<W>(&mut self, writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.current_indent += 1;
self.has_value = false;
writer.write_all(b"[")
}
fn end_array<W>(&mut self, writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.current_indent -= 1;
if self.has_value {
writer.write_all(b" ]")
} else {
writer.write_all(b"]")
}
}
fn begin_array_value<W>(&mut self, writer: &mut W, first: bool) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
if first {
writer.write_all(b" ")?;
} else {
writer.write_all(b", ")?;
}
Ok(())
}
fn end_array_value<W>(&mut self, _writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.has_value = true;
Ok(())
}
fn begin_object<W>(&mut self, writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.current_indent += 1;
self.has_value = false;
writer.write_all(b"{")
}
fn end_object<W>(&mut self, writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.current_indent -= 1;
if self.has_value {
writer.write_all(b"\n")?;
indent(writer, self.current_indent, self.indent)?;
}
writer.write_all(b"}")
}
fn begin_object_key<W>(&mut self, writer: &mut W, first: bool) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
if first {
writer.write_all(b"\n")?;
} else {
writer.write_all(b",\n")?;
}
indent(writer, self.current_indent, self.indent)
}
fn begin_object_value<W>(&mut self, writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
writer.write_all(b": ")
}
fn end_object_value<W>(&mut self, _writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.has_value = true;
Ok(())
}
}

View file

@ -6,10 +6,7 @@ use serde::{Deserialize, Serialize};
use zerotier_network_hypervisor::vl1::{Address, Endpoint};
use zerotier_network_hypervisor::vl2::NetworkId;
use zerotier_vl1_service::Settings;
/// Default primary ZeroTier port.
pub const DEFAULT_PORT: u16 = 9993;
use zerotier_vl1_service::VL1Settings;
#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)]
#[serde(default)]
@ -70,7 +67,7 @@ pub struct Config {
#[serde(rename = "virtual")]
pub virtual_: BTreeMap<Address, VirtualPathSettings>,
pub network: BTreeMap<NetworkId, NetworkSettings>,
pub settings: Settings,
pub settings: VL1Settings,
}
impl Default for Config {
@ -79,7 +76,7 @@ impl Default for Config {
physical: BTreeMap::new(),
virtual_: BTreeMap::new(),
network: BTreeMap::new(),
settings: Settings::default(),
settings: VL1Settings::default(),
}
}
}

View file

@ -3,8 +3,6 @@
pub mod cli;
pub mod cmdline_help;
pub mod datadir;
pub mod exitcode;
pub mod jsonformatter;
pub mod localconfig;
pub mod utils;
pub mod vnic;
@ -16,6 +14,7 @@ use clap::error::{ContextKind, ContextValue};
use clap::{Arg, ArgMatches, Command};
use zerotier_network_hypervisor::{VERSION_MAJOR, VERSION_MINOR, VERSION_REVISION};
use zerotier_utils::exitcode;
use zerotier_vl1_service::VL1Service;
use crate::datadir::DataDir;
@ -78,7 +77,7 @@ async fn async_main(flags: Flags, global_args: Box<ArgMatches>) -> i32 {
let test_inner = Arc::new(zerotier_network_hypervisor::vl1::DummyInnerProtocol::default());
let test_path_filter = Arc::new(zerotier_network_hypervisor::vl1::DummyPathFilter::default());
let datadir = open_datadir(&flags).await;
let svc = VL1Service::new(datadir, test_inner, test_path_filter, zerotier_vl1_service::Settings::default()).await;
let svc = VL1Service::new(datadir, test_inner, test_path_filter, zerotier_vl1_service::VL1Settings::default()).await;
if svc.is_ok() {
let svc = svc.unwrap();
svc.node().init_default_roots();

View file

@ -3,14 +3,9 @@
use std::path::Path;
use std::str::FromStr;
use serde::de::DeserializeOwned;
use serde::Serialize;
use tokio::fs::File;
use tokio::io::AsyncReadExt;
use crate::jsonformatter::JsonFormatter;
use zerotier_network_hypervisor::vl1::Identity;
/// Default sanity limit parameter for read_limit() used throughout the service.
@ -74,84 +69,6 @@ pub fn is_valid_port(v: &str) -> Result<(), String> {
Err(format!("invalid TCP/IP port number: {}", v))
}
/// Recursively patch a JSON object.
///
/// This is slightly different from a usual JSON merge. For objects in the target their fields
/// are updated by recursively calling json_patch if the same field is present in the source.
/// If the source tries to set an object to something other than another object, this is ignored.
/// Other fields are replaced. This is used for RESTful config object updates. The depth limit
/// field is to prevent stack overflows via the API.
pub fn json_patch(target: &mut serde_json::value::Value, source: &serde_json::value::Value, depth_limit: usize) {
if target.is_object() {
if source.is_object() {
let target = target.as_object_mut().unwrap();
let source = source.as_object().unwrap();
for kv in target.iter_mut() {
let _ = source.get(kv.0).map(|new_value| {
if depth_limit > 0 {
json_patch(kv.1, new_value, depth_limit - 1)
}
});
}
for kv in source.iter() {
if !target.contains_key(kv.0) && !kv.1.is_null() {
target.insert(kv.0.clone(), kv.1.clone());
}
}
}
} else if *target != *source {
*target = source.clone();
}
}
/// Patch a serializable object with the fields present in a JSON object.
///
/// If there are no changes, None is returned. The depth limit is passed through to json_patch and
/// should be set to a sanity check value to prevent overflows.
pub fn json_patch_object<O: Serialize + DeserializeOwned + Eq>(
obj: O,
patch: &str,
depth_limit: usize,
) -> Result<Option<O>, serde_json::Error> {
serde_json::from_str::<serde_json::value::Value>(patch).map_or_else(
|e| Err(e),
|patch| {
serde_json::value::to_value(&obj).map_or_else(
|e| Err(e),
|mut obj_value| {
json_patch(&mut obj_value, &patch, depth_limit);
serde_json::value::from_value::<O>(obj_value).map_or_else(
|e| Err(e),
|obj_merged| {
if obj == obj_merged {
Ok(None)
} else {
Ok(Some(obj_merged))
}
},
)
},
)
},
)
}
/// Shortcut to use serde_json to serialize an object, returns "null" on error.
pub fn to_json<O: serde::Serialize>(o: &O) -> String {
serde_json::to_string(o).unwrap_or("null".into())
}
/// Shortcut to use serde_json to serialize an object, returns "null" on error.
pub fn to_json_pretty<O: serde::Serialize>(o: &O) -> String {
let mut buf = Vec::new();
let mut ser = serde_json::Serializer::with_formatter(&mut buf, JsonFormatter::new());
if o.serialize(&mut ser).is_ok() {
String::from_utf8(buf).unwrap_or_else(|_| "null".into())
} else {
"null".into()
}
}
/// Read an identity as either a literal or from a file.
pub async fn parse_cli_identity(input: &str, validate: bool) -> Result<Identity, String> {
let parse_func = |s: &str| {

View file

@ -7,3 +7,5 @@ version = "0.1.0"
[dependencies]
parking_lot = { version = "^0", features = [], default-features = false }
serde = { version = "^1", features = ["derive"], default-features = false }
serde_json = { version = "^1", features = ["std"], default-features = false }

View file

@ -1,19 +1,143 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
use std::io::Write;
use std::mem::{size_of, MaybeUninit};
use std::ptr::{slice_from_raw_parts, slice_from_raw_parts_mut};
use serde::ser::SerializeSeq;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[derive(Clone, Copy, Debug)]
pub struct OutOfCapacityError<T>(pub T);
impl<T> std::fmt::Display for OutOfCapacityError<T> {
fn fmt(self: &Self, stream: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt("ArrayVec out of space", stream)
}
}
impl<T: std::fmt::Debug> ::std::error::Error for OutOfCapacityError<T> {
#[inline(always)]
fn description(self: &Self) -> &str {
"ArrayVec out of space"
}
}
/// A simple vector backed by a static sized array with no memory allocations and no overhead construction.
pub struct ArrayVec<T, const C: usize> {
pub(crate) a: [MaybeUninit<T>; C],
pub(crate) s: usize,
pub(crate) a: [MaybeUninit<T>; C],
}
impl<T, const C: usize> Default for ArrayVec<T, C> {
#[inline(always)]
fn default() -> Self {
Self::new()
}
}
impl<T: PartialEq, const C: usize> PartialEq for ArrayVec<T, C> {
#[inline(always)]
fn eq(&self, other: &Self) -> bool {
let tmp: &[T] = self.as_ref();
tmp.eq(other.as_ref())
}
}
impl<T: Eq, const C: usize> Eq for ArrayVec<T, C> {}
impl<T: Clone, const C: usize> Clone for ArrayVec<T, C> {
fn clone(&self) -> Self {
debug_assert!(self.s <= C);
Self {
s: self.s,
a: unsafe {
let mut tmp: [MaybeUninit<T>; C] = MaybeUninit::uninit().assume_init();
for i in 0..self.s {
tmp.get_unchecked_mut(i).write(self.a[i].assume_init_ref().clone());
}
tmp
},
}
}
}
impl<T: Clone, const C: usize, const S: usize> From<[T; S]> for ArrayVec<T, C> {
#[inline(always)]
fn from(v: [T; S]) -> Self {
if S <= C {
let mut tmp = Self::new();
for i in 0..S {
tmp.push(v[i].clone());
}
tmp
} else {
panic!();
}
}
}
impl<const C: usize> Write for ArrayVec<u8, C> {
#[inline(always)]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
for i in buf.iter() {
if self.try_push(*i).is_err() {
return Err(std::io::Error::new(std::io::ErrorKind::Other, "ArrayVec out of space"));
}
}
Ok(buf.len())
}
#[inline(always)]
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
impl<T, const C: usize> TryFrom<Vec<T>> for ArrayVec<T, C> {
type Error = OutOfCapacityError<T>;
#[inline(always)]
fn try_from(mut value: Vec<T>) -> Result<Self, Self::Error> {
let mut tmp = Self::new();
for x in value.drain(..) {
tmp.try_push(x)?;
}
Ok(tmp)
}
}
impl<T: Clone, const C: usize> TryFrom<&Vec<T>> for ArrayVec<T, C> {
type Error = OutOfCapacityError<T>;
#[inline(always)]
fn try_from(value: &Vec<T>) -> Result<Self, Self::Error> {
let mut tmp = Self::new();
for x in value.iter() {
tmp.try_push(x.clone())?;
}
Ok(tmp)
}
}
impl<T: Clone, const C: usize> TryFrom<&[T]> for ArrayVec<T, C> {
type Error = OutOfCapacityError<T>;
#[inline(always)]
fn try_from(value: &[T]) -> Result<Self, Self::Error> {
let mut tmp = Self::new();
for x in value.iter() {
tmp.try_push(x.clone())?;
}
Ok(tmp)
}
}
impl<T, const C: usize> ArrayVec<T, C> {
#[inline(always)]
pub fn new() -> Self {
assert_eq!(size_of::<[T; C]>(), size_of::<[MaybeUninit<T>; C]>());
Self { a: unsafe { MaybeUninit::uninit().assume_init() }, s: 0 }
Self { s: 0, a: unsafe { MaybeUninit::uninit().assume_init() } }
}
#[inline(always)]
@ -28,17 +152,22 @@ impl<T, const C: usize> ArrayVec<T, C> {
}
#[inline(always)]
pub fn try_push(&mut self, v: T) -> bool {
pub fn try_push(&mut self, v: T) -> Result<(), OutOfCapacityError<T>> {
if self.s < C {
let i = self.s;
unsafe { self.a.get_unchecked_mut(i).write(v) };
self.s = i + 1;
true
Ok(())
} else {
false
Err(OutOfCapacityError(v))
}
}
#[inline(always)]
pub fn as_bytes(&self) -> &[T] {
unsafe { &*slice_from_raw_parts(self.a.as_ptr().cast(), self.s) }
}
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.s == 0
@ -85,6 +214,50 @@ impl<T, const C: usize> AsMut<[T]> for ArrayVec<T, C> {
}
}
impl<T: Serialize, const L: usize> Serialize for ArrayVec<T, L> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
let sl: &[T] = self.as_ref();
for i in 0..self.s {
seq.serialize_element(&sl[i])?;
}
seq.end()
}
}
struct ArrayVecVisitor<'de, T: Deserialize<'de>, const L: usize>(std::marker::PhantomData<&'de T>);
impl<'de, T: Deserialize<'de>, const L: usize> serde::de::Visitor<'de> for ArrayVecVisitor<'de, T, L> {
type Value = ArrayVec<T, L>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str(format!("array of up to {} elements", L).as_str())
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
let mut a = ArrayVec::<T, L>::new();
while let Some(x) = seq.next_element()? {
a.push(x);
}
Ok(a)
}
}
impl<'de, T: Deserialize<'de> + 'de, const L: usize> Deserialize<'de> for ArrayVec<T, L> {
fn deserialize<D>(deserializer: D) -> Result<ArrayVec<T, L>, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_seq(ArrayVecVisitor(std::marker::PhantomData::default()))
}
}
#[cfg(test)]
mod tests {
use super::ArrayVec;
@ -96,7 +269,7 @@ mod tests {
v.push(i);
}
assert_eq!(v.len(), 128);
assert!(!v.try_push(1000));
assert!(!v.try_push(1000).is_ok());
assert_eq!(v.len(), 128);
for _ in 0..128 {
assert!(v.pop().is_some());

108
utils/src/blob.rs Normal file
View file

@ -0,0 +1,108 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
use serde::ser::SerializeTuple;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::hex;
/// Fixed size byte array with Serde serializer/deserializer for sizes over 32 elements and hex to_string().
#[repr(transparent)]
#[derive(Clone, Eq, PartialEq)]
pub struct Blob<const L: usize>([u8; L]);
impl<const L: usize> Blob<L> {
#[inline(always)]
pub fn as_bytes(&self) -> &[u8; L] {
&self.0
}
#[inline(always)]
pub const fn len(&self) -> usize {
L
}
}
impl<const L: usize> From<[u8; L]> for Blob<L> {
#[inline(always)]
fn from(a: [u8; L]) -> Self {
Self(a)
}
}
impl<const L: usize> From<&[u8; L]> for Blob<L> {
#[inline(always)]
fn from(a: &[u8; L]) -> Self {
Self(a.clone())
}
}
impl<const L: usize> Default for Blob<L> {
#[inline(always)]
fn default() -> Self {
unsafe { std::mem::zeroed() }
}
}
impl<const L: usize> AsRef<[u8; L]> for Blob<L> {
#[inline(always)]
fn as_ref(&self) -> &[u8; L] {
&self.0
}
}
impl<const L: usize> AsMut<[u8; L]> for Blob<L> {
#[inline(always)]
fn as_mut(&mut self) -> &mut [u8; L] {
&mut self.0
}
}
impl<const L: usize> ToString for Blob<L> {
#[inline(always)]
fn to_string(&self) -> String {
hex::to_string(&self.0)
}
}
impl<const L: usize> Serialize for Blob<L> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut t = serializer.serialize_tuple(L)?;
for i in self.0.iter() {
t.serialize_element(i)?;
}
t.end()
}
}
struct BlobVisitor<const L: usize>;
impl<'de, const L: usize> serde::de::Visitor<'de> for BlobVisitor<L> {
type Value = Blob<L>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str(format!("array of {} bytes", L).as_str())
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
let mut blob = Blob::<L>::default();
for i in 0..L {
blob.0[i] = seq.next_element()?.ok_or_else(|| serde::de::Error::invalid_length(i, &self))?;
}
Ok(blob)
}
}
impl<'de, const L: usize> Deserialize<'de> for Blob<L> {
fn deserialize<D>(deserializer: D) -> Result<Blob<L>, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_tuple(L, BlobVisitor::<L>)
}
}

View file

@ -1,11 +1,39 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
use std::error::Error;
use std::fmt::{Debug, Display};
use std::io::{Read, Write};
use std::mem::{size_of, MaybeUninit};
use zerotier_utils::memory;
use zerotier_utils::pool::PoolFactory;
use zerotier_utils::varint;
use crate::memory;
use crate::pool::PoolFactory;
use crate::unlikely_branch;
use crate::varint;
const OUT_OF_BOUNDS_MSG: &'static str = "Buffer access out of bounds";
pub struct OutOfBoundsError;
impl Display for OutOfBoundsError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(OUT_OF_BOUNDS_MSG)
}
}
impl Debug for OutOfBoundsError {
#[inline(always)]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(self, f)
}
}
impl Error for OutOfBoundsError {}
impl From<OutOfBoundsError> for std::io::Error {
fn from(_: OutOfBoundsError) -> Self {
std::io::Error::new(std::io::ErrorKind::Other, OUT_OF_BOUNDS_MSG)
}
}
/// An I/O buffer with extensions for efficiently reading and writing various objects.
///
@ -24,24 +52,17 @@ pub struct Buffer<const L: usize>(usize, [u8; L]);
impl<const L: usize> Default for Buffer<L> {
#[inline(always)]
fn default() -> Self {
Self::new()
unsafe { std::mem::zeroed() }
}
}
// Setting attributes this way causes the 'overflow' branches to be treated as unlikely by LLVM.
#[inline(never)]
#[cold]
fn overflow_err() -> std::io::Error {
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "buffer overflow")
}
impl<const L: usize> Buffer<L> {
pub const CAPACITY: usize = L;
/// Create an empty zeroed buffer.
#[inline(always)]
pub fn new() -> Self {
Self(0, [0_u8; L])
unsafe { std::mem::zeroed() }
}
/// Create an empty zeroed buffer on the heap without intermediate stack allocation.
@ -65,7 +86,7 @@ impl<const L: usize> Buffer<L> {
Self::CAPACITY
}
pub fn from_bytes(b: &[u8]) -> std::io::Result<Self> {
pub fn from_bytes(b: &[u8]) -> Result<Self, OutOfBoundsError> {
let l = b.len();
if l <= L {
let mut tmp = Self::new();
@ -73,7 +94,8 @@ impl<const L: usize> Buffer<L> {
tmp.1[0..l].copy_from_slice(b);
Ok(tmp)
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
@ -98,32 +120,36 @@ impl<const L: usize> Buffer<L> {
}
#[inline(always)]
pub fn as_bytes_starting_at(&self, start: usize) -> std::io::Result<&[u8]> {
pub fn as_bytes_starting_at(&self, start: usize) -> Result<&[u8], OutOfBoundsError> {
if start <= self.0 {
Ok(&self.1[start..self.0])
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn as_bytes_starting_at_mut(&mut self, start: usize) -> std::io::Result<&mut [u8]> {
pub fn as_bytes_starting_at_mut(&mut self, start: usize) -> Result<&mut [u8], OutOfBoundsError> {
if start <= self.0 {
Ok(&mut self.1[start..self.0])
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn as_byte_range(&self, start: usize, end: usize) -> std::io::Result<&[u8]> {
pub fn as_byte_range(&self, start: usize, end: usize) -> Result<&[u8], OutOfBoundsError> {
if end <= self.0 {
Ok(&self.1[start..end])
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn clear(&mut self) {
self.1[0..self.0].fill(0);
self.0 = 0;
@ -131,6 +157,7 @@ impl<const L: usize> Buffer<L> {
/// Load array into buffer.
/// This will panic if the array is larger than L.
#[inline(always)]
pub fn set_to(&mut self, b: &[u8]) {
let len = b.len();
self.0 = len;
@ -151,7 +178,6 @@ impl<const L: usize> Buffer<L> {
///
/// This will panic if the specified size is larger than L. If the size is larger
/// than the current size uninitialized space will be zeroed.
#[inline(always)]
pub fn set_size(&mut self, s: usize) {
let prev_len = self.0;
self.0 = s;
@ -180,45 +206,48 @@ impl<const L: usize> Buffer<L> {
/// Append a structure and return a mutable reference to its memory.
#[inline(always)]
pub fn append_struct_get_mut<T: Copy>(&mut self) -> std::io::Result<&mut T> {
pub fn append_struct_get_mut<T: Copy>(&mut self) -> Result<&mut T, OutOfBoundsError> {
let ptr = self.0;
let end = ptr + size_of::<T>();
if end <= L {
self.0 = end;
Ok(unsafe { &mut *self.1.as_mut_ptr().add(ptr).cast() })
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
/// Append a fixed size array and return a mutable reference to its memory.
#[inline(always)]
pub fn append_bytes_fixed_get_mut<const S: usize>(&mut self) -> std::io::Result<&mut [u8; S]> {
pub fn append_bytes_fixed_get_mut<const S: usize>(&mut self) -> Result<&mut [u8; S], OutOfBoundsError> {
let ptr = self.0;
let end = ptr + S;
if end <= L {
self.0 = end;
Ok(unsafe { &mut *self.1.as_mut_ptr().add(ptr).cast() })
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
/// Append a runtime sized array and return a mutable reference to its memory.
#[inline(always)]
pub fn append_bytes_get_mut(&mut self, s: usize) -> std::io::Result<&mut [u8]> {
pub fn append_bytes_get_mut(&mut self, s: usize) -> Result<&mut [u8], OutOfBoundsError> {
let ptr = self.0;
let end = ptr + s;
if end <= L {
self.0 = end;
Ok(&mut self.1[ptr..end])
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn append_padding(&mut self, b: u8, count: usize) -> std::io::Result<()> {
pub fn append_padding(&mut self, b: u8, count: usize) -> Result<(), OutOfBoundsError> {
let ptr = self.0;
let end = ptr + count;
if end <= L {
@ -226,12 +255,13 @@ impl<const L: usize> Buffer<L> {
self.1[ptr..end].fill(b);
Ok(())
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn append_bytes(&mut self, buf: &[u8]) -> std::io::Result<()> {
pub fn append_bytes(&mut self, buf: &[u8]) -> Result<(), OutOfBoundsError> {
let ptr = self.0;
let end = ptr + buf.len();
if end <= L {
@ -239,12 +269,13 @@ impl<const L: usize> Buffer<L> {
self.1[ptr..end].copy_from_slice(buf);
Ok(())
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn append_bytes_fixed<const S: usize>(&mut self, buf: &[u8; S]) -> std::io::Result<()> {
pub fn append_bytes_fixed<const S: usize>(&mut self, buf: &[u8; S]) -> Result<(), OutOfBoundsError> {
let ptr = self.0;
let end = ptr + S;
if end <= L {
@ -252,29 +283,26 @@ impl<const L: usize> Buffer<L> {
self.1[ptr..end].copy_from_slice(buf);
Ok(())
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn append_varint(&mut self, i: u64) -> std::io::Result<()> {
varint::write(self, i)
}
#[inline(always)]
pub fn append_u8(&mut self, i: u8) -> std::io::Result<()> {
pub fn append_u8(&mut self, i: u8) -> Result<(), OutOfBoundsError> {
let ptr = self.0;
if ptr < L {
self.0 = ptr + 1;
self.1[ptr] = i;
Ok(())
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn append_u16(&mut self, i: u16) -> std::io::Result<()> {
pub fn append_u16(&mut self, i: u16) -> Result<(), OutOfBoundsError> {
let ptr = self.0;
let end = ptr + 2;
if end <= L {
@ -282,12 +310,13 @@ impl<const L: usize> Buffer<L> {
memory::store_raw(i.to_be(), &mut self.1[ptr..]);
Ok(())
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn append_u32(&mut self, i: u32) -> std::io::Result<()> {
pub fn append_u32(&mut self, i: u32) -> Result<(), OutOfBoundsError> {
let ptr = self.0;
let end = ptr + 4;
if end <= L {
@ -295,12 +324,13 @@ impl<const L: usize> Buffer<L> {
memory::store_raw(i.to_be(), &mut self.1[ptr..]);
Ok(())
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn append_u64(&mut self, i: u64) -> std::io::Result<()> {
pub fn append_u64(&mut self, i: u64) -> Result<(), OutOfBoundsError> {
let ptr = self.0;
let end = ptr + 8;
if end <= L {
@ -308,12 +338,13 @@ impl<const L: usize> Buffer<L> {
memory::store_raw(i.to_be(), &mut self.1[ptr..]);
Ok(())
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn append_u64_le(&mut self, i: u64) -> std::io::Result<()> {
pub fn append_u64_le(&mut self, i: u64) -> Result<(), OutOfBoundsError> {
let ptr = self.0;
let end = ptr + 8;
if end <= L {
@ -321,90 +352,107 @@ impl<const L: usize> Buffer<L> {
memory::store_raw(i.to_be(), &mut self.1[ptr..]);
Ok(())
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
pub fn append_varint(&mut self, i: u64) -> Result<(), OutOfBoundsError> {
if varint::write(self, i).is_ok() {
Ok(())
} else {
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn bytes_fixed_at<const S: usize>(&self, ptr: usize) -> std::io::Result<&[u8; S]> {
pub fn bytes_fixed_at<const S: usize>(&self, ptr: usize) -> Result<&[u8; S], OutOfBoundsError> {
if (ptr + S) <= self.0 {
unsafe { Ok(&*self.1.as_ptr().cast::<u8>().add(ptr).cast::<[u8; S]>()) }
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn bytes_fixed_mut_at<const S: usize>(&mut self, ptr: usize) -> std::io::Result<&mut [u8; S]> {
pub fn bytes_fixed_mut_at<const S: usize>(&mut self, ptr: usize) -> Result<&mut [u8; S], OutOfBoundsError> {
if (ptr + S) <= self.0 {
unsafe { Ok(&mut *self.1.as_mut_ptr().cast::<u8>().add(ptr).cast::<[u8; S]>()) }
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn struct_at<T: Copy>(&self, ptr: usize) -> std::io::Result<&T> {
pub fn struct_at<T: Copy>(&self, ptr: usize) -> Result<&T, OutOfBoundsError> {
if (ptr + size_of::<T>()) <= self.0 {
unsafe { Ok(&*self.1.as_ptr().cast::<u8>().add(ptr).cast::<T>()) }
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn struct_mut_at<T: Copy>(&mut self, ptr: usize) -> std::io::Result<&mut T> {
pub fn struct_mut_at<T: Copy>(&mut self, ptr: usize) -> Result<&mut T, OutOfBoundsError> {
if (ptr + size_of::<T>()) <= self.0 {
unsafe { Ok(&mut *self.1.as_mut_ptr().cast::<u8>().offset(ptr as isize).cast::<T>()) }
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn u8_at(&self, ptr: usize) -> std::io::Result<u8> {
pub fn u8_at(&self, ptr: usize) -> Result<u8, OutOfBoundsError> {
if ptr < self.0 {
Ok(self.1[ptr])
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn u16_at(&self, ptr: usize) -> std::io::Result<u16> {
pub fn u16_at(&self, ptr: usize) -> Result<u16, OutOfBoundsError> {
let end = ptr + 2;
debug_assert!(end <= L);
if end <= self.0 {
Ok(u16::from_be(memory::load_raw(&self.1[ptr..])))
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn u32_at(&self, ptr: usize) -> std::io::Result<u32> {
pub fn u32_at(&self, ptr: usize) -> Result<u32, OutOfBoundsError> {
let end = ptr + 4;
debug_assert!(end <= L);
if end <= self.0 {
Ok(u32::from_be(memory::load_raw(&self.1[ptr..])))
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn u64_at(&self, ptr: usize) -> std::io::Result<u64> {
pub fn u64_at(&self, ptr: usize) -> Result<u64, OutOfBoundsError> {
let end = ptr + 8;
debug_assert!(end <= L);
if end <= self.0 {
Ok(u64::from_be(memory::load_raw(&self.1[ptr..])))
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn read_struct<T: Copy>(&self, cursor: &mut usize) -> std::io::Result<&T> {
pub fn read_struct<T: Copy>(&self, cursor: &mut usize) -> Result<&T, OutOfBoundsError> {
let ptr = *cursor;
let end = ptr + size_of::<T>();
debug_assert!(end <= L);
@ -412,12 +460,13 @@ impl<const L: usize> Buffer<L> {
*cursor = end;
unsafe { Ok(&*self.1.as_ptr().cast::<u8>().offset(ptr as isize).cast::<T>()) }
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn read_bytes_fixed<const S: usize>(&self, cursor: &mut usize) -> std::io::Result<&[u8; S]> {
pub fn read_bytes_fixed<const S: usize>(&self, cursor: &mut usize) -> Result<&[u8; S], OutOfBoundsError> {
let ptr = *cursor;
let end = ptr + S;
debug_assert!(end <= L);
@ -425,12 +474,13 @@ impl<const L: usize> Buffer<L> {
*cursor = end;
unsafe { Ok(&*self.1.as_ptr().cast::<u8>().offset(ptr as isize).cast::<[u8; S]>()) }
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn read_bytes(&self, l: usize, cursor: &mut usize) -> std::io::Result<&[u8]> {
pub fn read_bytes(&self, l: usize, cursor: &mut usize) -> Result<&[u8], OutOfBoundsError> {
let ptr = *cursor;
let end = ptr + l;
debug_assert!(end <= L);
@ -438,39 +488,43 @@ impl<const L: usize> Buffer<L> {
*cursor = end;
Ok(&self.1[ptr..end])
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn read_varint(&self, cursor: &mut usize) -> std::io::Result<u64> {
pub fn read_varint(&self, cursor: &mut usize) -> Result<u64, OutOfBoundsError> {
let c = *cursor;
if c < self.0 {
let mut a = &self.1[c..];
varint::read(&mut a).map(|r| {
*cursor = c + r.1;
debug_assert!(*cursor <= self.0);
r.0
})
varint::read(&mut a)
.map(|r| {
*cursor = c + r.1;
debug_assert!(*cursor <= self.0);
r.0
})
.map_err(|_| OutOfBoundsError)
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn read_u8(&self, cursor: &mut usize) -> std::io::Result<u8> {
pub fn read_u8(&self, cursor: &mut usize) -> Result<u8, OutOfBoundsError> {
let ptr = *cursor;
debug_assert!(ptr < L);
if ptr < self.0 {
*cursor = ptr + 1;
Ok(self.1[ptr])
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn read_u16(&self, cursor: &mut usize) -> std::io::Result<u16> {
pub fn read_u16(&self, cursor: &mut usize) -> Result<u16, OutOfBoundsError> {
let ptr = *cursor;
let end = ptr + 2;
debug_assert!(end <= L);
@ -478,12 +532,13 @@ impl<const L: usize> Buffer<L> {
*cursor = end;
Ok(u16::from_be(memory::load_raw(&self.1[ptr..])))
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn read_u32(&self, cursor: &mut usize) -> std::io::Result<u32> {
pub fn read_u32(&self, cursor: &mut usize) -> Result<u32, OutOfBoundsError> {
let ptr = *cursor;
let end = ptr + 4;
debug_assert!(end <= L);
@ -491,12 +546,13 @@ impl<const L: usize> Buffer<L> {
*cursor = end;
Ok(u32::from_be(memory::load_raw(&self.1[ptr..])))
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
#[inline(always)]
pub fn read_u64(&self, cursor: &mut usize) -> std::io::Result<u64> {
pub fn read_u64(&self, cursor: &mut usize) -> Result<u64, OutOfBoundsError> {
let ptr = *cursor;
let end = ptr + 8;
debug_assert!(end <= L);
@ -504,7 +560,8 @@ impl<const L: usize> Buffer<L> {
*cursor = end;
Ok(u64::from_be(memory::load_raw(&self.1[ptr..])))
} else {
Err(overflow_err())
unlikely_branch();
Err(OutOfBoundsError)
}
}
}
@ -519,7 +576,8 @@ impl<const L: usize> Write for Buffer<L> {
self.1[ptr..end].copy_from_slice(buf);
Ok(buf.len())
} else {
Err(overflow_err())
unlikely_branch();
Err(std::io::Error::new(std::io::ErrorKind::Other, OUT_OF_BOUNDS_MSG))
}
}
@ -656,92 +714,6 @@ mod tests {
assert!(b.is_empty());
}
#[test]
fn buffer_bytes() {
const SIZE: usize = 100;
for _ in 0..1000 {
let mut v: Vec<u8> = Vec::with_capacity(SIZE);
v.fill_with(|| rand::random());
let mut b = Buffer::<SIZE>::new();
assert!(b.append_bytes(&v).is_ok());
assert_eq!(b.read_bytes(v.len(), &mut 0).unwrap(), &v);
let mut v: [u8; SIZE] = [0u8; SIZE];
v.fill_with(|| rand::random());
let mut b = Buffer::<SIZE>::new();
assert!(b.append_bytes_fixed(&v).is_ok());
assert_eq!(b.read_bytes_fixed(&mut 0).unwrap(), &v);
// FIXME: append calls for _get_mut style do not accept anything to append, so we can't
// test them.
//
// let mut b = Buffer::<SIZE>::new();
// let res = b.append_bytes_fixed_get_mut(&v);
// assert!(res.is_ok());
// let byt = res.unwrap();
// assert_eq!(byt, &v);
}
}
#[test]
fn buffer_at() {
const SIZE: usize = 100;
for _ in 0..1000 {
let mut v = [0u8; SIZE];
let mut idx: usize = rand::random::<usize>() % SIZE;
v[idx] = 1;
let mut b = Buffer::<SIZE>::new();
assert!(b.append_bytes(&v).is_ok());
let res = b.bytes_fixed_at::<1>(idx);
assert!(res.is_ok());
assert_eq!(res.unwrap()[0], 1);
let res = b.bytes_fixed_mut_at::<1>(idx);
assert!(res.is_ok());
assert_eq!(res.unwrap()[0], 1);
// the uX integer tests require a little more massage. we're going to rewind the index
// by 8, correcting to 0 if necessary, and then write 1's in. our numbers will be
// consistent this way.
v[idx] = 0;
if idx < 8 {
idx = 0;
} else if (idx + 7) >= SIZE {
idx -= 7;
}
for i in idx..(idx + 8) {
v[i] = 1;
}
let mut b = Buffer::<SIZE>::new();
assert!(b.append_bytes(&v).is_ok());
let res = b.u8_at(idx);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 1);
let res = b.u16_at(idx);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 257);
let res = b.u32_at(idx);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 16843009);
let res = b.u64_at(idx);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 72340172838076673);
}
}
#[test]
fn buffer_sizing() {
const SIZE: usize = 100;

View file

@ -1,6 +1,6 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
// These were taken from BSD sysexits.h to provide some standard.
// These were taken from BSD sysexits.h to provide some standard for process exit codes.
pub const OK: i32 = 0;

207
utils/src/json.rs Normal file
View file

@ -0,0 +1,207 @@
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde_json::ser::Formatter;
/// Recursively patch a JSON object.
///
/// This is slightly different from a usual JSON merge. For objects in the target their fields
/// are updated by recursively calling json_patch if the same field is present in the source.
/// If the source tries to set an object to something other than another object, this is ignored.
/// Other fields are replaced. This is used for RESTful config object updates. The depth limit
/// field is to prevent stack overflows via the API.
pub fn json_patch(target: &mut serde_json::value::Value, source: &serde_json::value::Value, depth_limit: usize) {
if target.is_object() {
if source.is_object() {
let target = target.as_object_mut().unwrap();
let source = source.as_object().unwrap();
for kv in target.iter_mut() {
let _ = source.get(kv.0).map(|new_value| {
if depth_limit > 0 {
json_patch(kv.1, new_value, depth_limit - 1)
}
});
}
for kv in source.iter() {
if !target.contains_key(kv.0) && !kv.1.is_null() {
target.insert(kv.0.clone(), kv.1.clone());
}
}
}
} else if *target != *source {
*target = source.clone();
}
}
/// Patch a serializable object with the fields present in a JSON object.
///
/// If there are no changes, None is returned. The depth limit is passed through to json_patch and
/// should be set to a sanity check value to prevent overflows.
pub fn json_patch_object<O: Serialize + DeserializeOwned + Eq>(
obj: O,
patch: &str,
depth_limit: usize,
) -> Result<Option<O>, serde_json::Error> {
serde_json::from_str::<serde_json::value::Value>(patch).map_or_else(
|e| Err(e),
|patch| {
serde_json::value::to_value(&obj).map_or_else(
|e| Err(e),
|mut obj_value| {
json_patch(&mut obj_value, &patch, depth_limit);
serde_json::value::from_value::<O>(obj_value).map_or_else(
|e| Err(e),
|obj_merged| {
if obj == obj_merged {
Ok(None)
} else {
Ok(Some(obj_merged))
}
},
)
},
)
},
)
}
/// Shortcut to use serde_json to serialize an object, returns "null" on error.
pub fn to_json<O: serde::Serialize>(o: &O) -> String {
serde_json::to_string(o).unwrap_or("null".into())
}
/// Shortcut to use serde_json to serialize an object, returns "null" on error.
pub fn to_json_pretty<O: serde::Serialize>(o: &O) -> String {
let mut buf = Vec::new();
let mut ser = serde_json::Serializer::with_formatter(&mut buf, PrettyFormatter::new());
if o.serialize(&mut ser).is_ok() {
String::from_utf8(buf).unwrap_or_else(|_| "null".into())
} else {
"null".into()
}
}
/// JSON formatter that looks a bit better than the Serde default.
pub struct PrettyFormatter<'a> {
current_indent: usize,
has_value: bool,
indent: &'a [u8],
}
fn indent<W>(wr: &mut W, n: usize, s: &[u8]) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
for _ in 0..n {
wr.write_all(s)?;
}
Ok(())
}
impl<'a> PrettyFormatter<'a> {
pub fn new() -> Self {
Self::with_indent(b" ")
}
pub fn with_indent(indent: &'a [u8]) -> Self {
Self { current_indent: 0, has_value: false, indent }
}
}
impl<'a> Default for PrettyFormatter<'a> {
fn default() -> Self {
Self::new()
}
}
impl<'a> Formatter for PrettyFormatter<'a> {
fn begin_array<W>(&mut self, writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.current_indent += 1;
self.has_value = false;
writer.write_all(b"[")
}
fn end_array<W>(&mut self, writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.current_indent -= 1;
if self.has_value {
writer.write_all(b" ]")
} else {
writer.write_all(b"]")
}
}
fn begin_array_value<W>(&mut self, writer: &mut W, first: bool) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
if first {
writer.write_all(b" ")?;
} else {
writer.write_all(b", ")?;
}
Ok(())
}
fn end_array_value<W>(&mut self, _writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.has_value = true;
Ok(())
}
fn begin_object<W>(&mut self, writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.current_indent += 1;
self.has_value = false;
writer.write_all(b"{")
}
fn end_object<W>(&mut self, writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.current_indent -= 1;
if self.has_value {
writer.write_all(b"\n")?;
indent(writer, self.current_indent, self.indent)?;
}
writer.write_all(b"}")
}
fn begin_object_key<W>(&mut self, writer: &mut W, first: bool) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
if first {
writer.write_all(b"\n")?;
} else {
writer.write_all(b",\n")?;
}
indent(writer, self.current_indent, self.indent)
}
fn begin_object_value<W>(&mut self, writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
writer.write_all(b": ")
}
fn end_object_value<W>(&mut self, _writer: &mut W) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
self.has_value = true;
Ok(())
}
}

View file

@ -1,8 +1,13 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
pub mod arrayvec;
pub mod blob;
pub mod buffer;
#[allow(unused)]
pub mod exitcode;
pub mod gatherarray;
pub mod hex;
pub mod json;
pub mod memory;
pub mod pool;
pub mod ringbuffermap;
@ -28,6 +33,10 @@ pub fn ms_monotonic() -> i64 {
std::time::Instant::now().duration_since(instant_zero).as_millis() as i64
}
#[cold]
#[inline(never)]
pub extern "C" fn unlikely_branch() {}
#[cfg(test)]
mod tests {
use super::ms_monotonic;

View file

@ -10,5 +10,5 @@ pub mod sys;
pub use localinterface::LocalInterface;
pub use localsocket::LocalSocket;
pub use settings::Settings;
pub use settings::VL1Settings;
pub use vl1service::*;

View file

@ -5,7 +5,7 @@ use zerotier_network_hypervisor::vl1::InetAddress;
#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)]
#[serde(default)]
pub struct Settings {
pub struct VL1Settings {
/// Primary ZeroTier port that is always bound, default is 9993.
pub fixed_ports: Vec<u16>,
@ -22,7 +22,7 @@ pub struct Settings {
pub cidr_blacklist: Vec<InetAddress>,
}
impl Settings {
impl VL1Settings {
#[cfg(target_os = "macos")]
pub const DEFAULT_PREFIX_BLACKLIST: [&'static str; 10] = ["lo", "utun", "gif", "stf", "iptap", "pktap", "feth", "zt", "llw", "anpi"];
@ -33,7 +33,7 @@ impl Settings {
pub const DEFAULT_PREFIX_BLACKLIST: [&'static str; 0] = [];
}
impl Default for Settings {
impl Default for VL1Settings {
fn default() -> Self {
Self {
fixed_ports: vec![9993],

View file

@ -7,11 +7,11 @@ use std::sync::Arc;
use async_trait::async_trait;
use zerotier_crypto::random;
use zerotier_network_hypervisor::vl1::{Endpoint, Event, HostSystem, Identity, InetAddress, InnerProtocol, Node, PathFilter, Storage};
use zerotier_network_hypervisor::vl1::{Endpoint, Event, HostSystem, Identity, InetAddress, InnerProtocol, Node, NodeStorage, PathFilter};
use zerotier_utils::{ms_monotonic, ms_since_epoch};
use crate::constants::UNASSIGNED_PRIVILEGED_PORTS;
use crate::settings::Settings;
use crate::settings::VL1Settings;
use crate::sys::udp::{udp_test_bind, BoundUdpPort};
use crate::LocalSocket;
@ -24,9 +24,9 @@ use tokio::time::Duration;
/// talks to the physical network, manages the vl1 node, and presents a templated interface for
/// whatever inner protocol implementation is using it. This would typically be VL2 but could be
/// a test harness or just the controller for a controller that runs stand-alone.
pub struct VL1Service<StorageImpl: Storage, PathFilterImpl: PathFilter, InnerProtocolImpl: InnerProtocol> {
pub struct VL1Service<NodeStorageImpl: NodeStorage, PathFilterImpl: PathFilter, InnerProtocolImpl: InnerProtocol> {
state: tokio::sync::RwLock<VL1ServiceMutableState>,
storage: Arc<StorageImpl>,
storage: Arc<NodeStorageImpl>,
inner: Arc<InnerProtocolImpl>,
path_filter: Arc<PathFilterImpl>,
node_container: Option<Node<Self>>,
@ -35,17 +35,17 @@ pub struct VL1Service<StorageImpl: Storage, PathFilterImpl: PathFilter, InnerPro
struct VL1ServiceMutableState {
daemons: Vec<JoinHandle<()>>,
udp_sockets: HashMap<u16, parking_lot::RwLock<BoundUdpPort>>,
settings: Settings,
settings: VL1Settings,
}
impl<StorageImpl: Storage, PathFilterImpl: PathFilter, InnerProtocolImpl: InnerProtocol>
VL1Service<StorageImpl, PathFilterImpl, InnerProtocolImpl>
impl<NodeStorageImpl: NodeStorage, PathFilterImpl: PathFilter, InnerProtocolImpl: InnerProtocol>
VL1Service<NodeStorageImpl, PathFilterImpl, InnerProtocolImpl>
{
pub async fn new(
storage: Arc<StorageImpl>,
storage: Arc<NodeStorageImpl>,
inner: Arc<InnerProtocolImpl>,
path_filter: Arc<PathFilterImpl>,
settings: Settings,
settings: VL1Settings,
) -> Result<Arc<Self>, Box<dyn Error>> {
let mut service = VL1Service {
state: tokio::sync::RwLock::new(VL1ServiceMutableState {
@ -203,8 +203,8 @@ impl<StorageImpl: Storage, PathFilterImpl: PathFilter, InnerProtocolImpl: InnerP
}
#[async_trait]
impl<StorageImpl: Storage, PathFilterImpl: PathFilter, InnerProtocolImpl: InnerProtocol> HostSystem
for VL1Service<StorageImpl, PathFilterImpl, InnerProtocolImpl>
impl<NodeStorageImpl: NodeStorage, PathFilterImpl: PathFilter, InnerProtocolImpl: InnerProtocol> HostSystem
for VL1Service<NodeStorageImpl, PathFilterImpl, InnerProtocolImpl>
{
type LocalSocket = crate::LocalSocket;
type LocalInterface = crate::LocalInterface;
@ -301,8 +301,8 @@ impl<StorageImpl: Storage, PathFilterImpl: PathFilter, InnerProtocolImpl: InnerP
}
}
impl<StorageImpl: Storage, PathFilterImpl: PathFilter, InnerProtocolImpl: InnerProtocol> Drop
for VL1Service<StorageImpl, PathFilterImpl, InnerProtocolImpl>
impl<NodeStorageImpl: NodeStorage, PathFilterImpl: PathFilter, InnerProtocolImpl: InnerProtocol> Drop
for VL1Service<NodeStorageImpl, PathFilterImpl, InnerProtocolImpl>
{
fn drop(&mut self) {
loop {