Merge branch 'tetanus' into tetanus-monica

This commit is contained in:
Adam Ierymenko 2023-03-30 14:48:13 -04:00 committed by GitHub
commit e6af9bbc6e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
99 changed files with 3422 additions and 5542 deletions

View file

@ -5,7 +5,6 @@ members = [
"network-hypervisor",
"controller",
"service",
"vl1-service",
"utils",
]

157
attic/cache.rs Normal file
View file

@ -0,0 +1,157 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::collections::{BTreeMap, HashMap};
use std::error::Error;
use std::mem::replace;
use std::ops::Bound;
use std::sync::{Mutex, RwLock};
use crate::database::Database;
use crate::model::{Member, Network};
use zerotier_network_hypervisor::vl1::{Address, PartialAddress};
use zerotier_network_hypervisor::vl2::NetworkId;
/// Network and member cache used by database implementations to implement change detection.
///
/// Note: the database must ensure that calls to on_X_updated() methods are only performed
/// when a potentially newer version is committed. No-op calls when nothing has changed are
/// okay but calls out of order will result in extra updated events being generated for
/// movements forward and backward in time. Calls must be temporally ordered.
pub struct Cache {
by_nwid: RwLock<HashMap<NetworkId, (Network, Mutex<BTreeMap<PartialAddress, Member>>)>>,
}
impl Cache {
pub fn new() -> Self {
Self { by_nwid: RwLock::new(HashMap::new()) }
}
/// Load (or reload) the entire cache from a database.
pub async fn load_all<DatabaseImpl: Database>(&self, db: &DatabaseImpl) -> Result<(), Box<dyn Error + Send + Sync>> {
let mut by_nwid = self.by_nwid.write().unwrap();
by_nwid.clear();
let networks = db.list_networks().await?;
for network_id in networks {
if let Some(network) = db.get_network(&network_id).await? {
let network_entry = by_nwid
.entry(network_id.clone())
.or_insert_with(|| (network, Mutex::new(BTreeMap::new())));
let mut by_node_id = network_entry.1.lock().unwrap();
let members = db.list_members(&network_id).await?;
for node_id in members {
if let Some(member) = db.get_member(&network_id, &node_id).await? {
let _ = by_node_id.insert(node_id, member);
}
}
}
}
Ok(())
}
/// Update a network if changed, returning whether or not any update was made and the old version if any.
///
/// A value of (true, None) indicates that there was no network by that ID in which case it is added.
pub fn on_network_updated(&self, network: Network) -> (bool, Option<Network>) {
let mut by_nwid = self.by_nwid.write().unwrap();
if let Some(prev_network) = by_nwid.get_mut(&network.id) {
if !prev_network.0.eq(&network) {
(true, Some(replace(&mut prev_network.0, network)))
} else {
(false, None)
}
} else {
assert!(by_nwid
.insert(network.id.clone(), (network.clone(), Mutex::new(BTreeMap::new())))
.is_none());
(true, None)
}
}
/// Update a member if changed, returning whether or not any update was made and the old version if any.
///
/// A value of (true, None) indicates that there was no member with that ID and that it was added. If
/// there is no network with the member's network ID (false, None) is returned and no action is taken.
pub fn on_member_updated(&self, member: Member) -> (bool, Option<Member>) {
let by_nwid = self.by_nwid.read().unwrap();
if let Some(network) = by_nwid.get(&member.network_id) {
let mut by_node_id = network.1.lock().unwrap();
if let Some(exact_address_match) = by_node_id.get_mut(&member.node_id) {
if !member.eq(exact_address_match) {
return (true, Some(std::mem::replace(exact_address_match, member)));
}
} else {
let mut partial_address_match = None;
for m in by_node_id.range_mut::<PartialAddress, (Bound<&PartialAddress>, Bound<&PartialAddress>)>((
Bound::Included(&member.node_id),
Bound::Unbounded,
)) {
if m.0.matches_partial(&member.node_id) {
if partial_address_match.is_some() {
return (false, None);
}
let _ = partial_address_match.insert(m.1);
} else {
break;
}
}
if let Some(partial_address_match) = partial_address_match {
if !member.eq(partial_address_match) {
return (true, Some(std::mem::replace(partial_address_match, member)));
} else {
return (false, None);
}
}
let mut partial_address_match = None;
for m in by_node_id
.range_mut::<PartialAddress, (Bound<&PartialAddress>, Bound<&PartialAddress>)>((
Bound::Unbounded,
Bound::Included(&member.node_id),
))
.rev()
{
if m.0.matches_partial(&member.node_id) {
if partial_address_match.is_some() {
return (false, None);
}
let _ = partial_address_match.insert(m.1);
} else {
break;
}
}
if let Some(partial_address_match) = partial_address_match {
if !member.eq(partial_address_match) {
return (true, Some(std::mem::replace(partial_address_match, member)));
} else {
return (false, None);
}
}
assert!(by_node_id.insert(member.node_id.clone(), member).is_none());
return (true, None);
}
}
return (false, None);
}
/// Delete a network, returning it if it existed.
pub fn on_network_deleted(&self, network_id: NetworkId) -> Option<(Network, Vec<Member>)> {
let mut by_nwid = self.by_nwid.write().unwrap();
let network = by_nwid.remove(&network_id)?;
let mut members = network.1.lock().unwrap();
Some((network.0, members.values().cloned().collect()))
}
/// Delete a member, returning it if it existed.
pub fn on_member_deleted(&self, network_id: NetworkId, node_id: Address) -> Option<Member> {
let by_nwid = self.by_nwid.read().unwrap();
let network = by_nwid.get(&network_id)?;
let mut members = network.1.lock().unwrap();
members.remove(&node_id.to_partial())
}
}

View file

@ -10,9 +10,8 @@ use tokio_postgres::types::Type;
use tokio_postgres::{Client, Statement};
use zerotier_crypto::secure_eq;
use zerotier_crypto::typestate::Valid;
use zerotier_network_hypervisor::vl1::{Address, Identity, InetAddress};
use zerotier_network_hypervisor::vl1::{Address, InetAddress};
use zerotier_network_hypervisor::vl2::rule::Rule;
use zerotier_network_hypervisor::vl2::{IpRoute, NetworkId};
@ -21,7 +20,6 @@ use zerotier_utils::tokio;
use zerotier_utils::tokio::runtime::Handle;
use zerotier_utils::tokio::sync::broadcast::{channel, Receiver, Sender};
use zerotier_utils::tokio::task::JoinHandle;
use zerotier_vl1_service::VL1DataStorage;
use crate::database::*;
use crate::model::{IpAssignmentPool, Member, Network, RequestLogItem};
@ -130,15 +128,14 @@ impl<'a> Drop for ConnectionHolder<'a> {
}
pub struct PostgresDatabase {
local_controller_id_str: String,
local_identity: Valid<Identity>,
local_controller: Address,
connections: Mutex<(Vec<Box<PostgresConnection>>, Sender<()>)>,
postgres_path: String,
runtime: Handle,
}
impl PostgresDatabase {
pub async fn new(runtime: Handle, postgres_path: String, num_connections: usize, local_identity: Valid<Identity>) -> Result<Arc<Self>, Error> {
pub async fn new(runtime: Handle, postgres_path: String, num_connections: usize) -> Result<Arc<Self>, Error> {
assert!(num_connections > 0);
let (sender, _) = channel(4096);
let mut connections = Vec::with_capacity(num_connections);
@ -147,7 +144,6 @@ impl PostgresDatabase {
}
Ok(Arc::new(Self {
local_controller_id_str: local_identity.address.to_string(),
local_identity,
connections: Mutex::new((connections, sender)),
postgres_path,
runtime,
@ -177,16 +173,6 @@ impl PostgresDatabase {
}
}
impl VL1DataStorage for PostgresDatabase {
fn load_node_identity(&self) -> Option<Valid<Identity>> {
Some(self.local_identity.clone())
}
fn save_node_identity(&self, _id: &Valid<Identity>) -> bool {
panic!("local identity saving not supported by PostgresDatabase")
}
}
#[async_trait]
impl Database for PostgresDatabase {
async fn list_networks(&self) -> Result<Vec<NetworkId>, Error> {

98
attic/proquint.rs Normal file
View file

@ -0,0 +1,98 @@
// This is a trimmed down version of: https://github.com/christian-blades-cb/proquint-rs
// BSD license
const UINT2CONSONANT: [char; 16] = ['b', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n', 'p', 'r', 's', 't', 'v', 'z'];
const UINT2VOWEL: &'static [char] = &['a', 'i', 'o', 'u'];
const MASK_FIRST4_U16: u16 = 0xF000;
const MASK_FIRST2_U16: u16 = 0xC000;
macro_rules! decons {
($res:ident, $bitcounter:ident, $x:expr) => {{
$bitcounter += 4;
$res = $res.wrapping_shl(4);
$res += $x;
}};
}
macro_rules! devowel {
($res:ident, $bitcounter:ident, $x:expr) => {{
$bitcounter += 2;
$res = $res.wrapping_shl(2);
$res += $x;
}};
}
macro_rules! cons_u16 {
($i:ident, $out:ident) => {
let j: u16 = ($i & MASK_FIRST4_U16).wrapping_shr(12);
$i = $i.wrapping_shl(4);
$out.push(UINT2CONSONANT[j as usize]);
};
}
macro_rules! vowel_u16 {
($i:ident, $out:ident) => {
let j: u16 = ($i & MASK_FIRST2_U16).wrapping_shr(14);
$i = $i.wrapping_shl(2);
$out.push(UINT2VOWEL[j as usize]);
};
}
pub fn u16_from_quint(quint: &str) -> Option<u16> {
let mut bitcounter = 0usize;
let mut res = 0u16;
for c in quint.chars() {
match c {
'b' => decons!(res, bitcounter, 0u16),
'd' => decons!(res, bitcounter, 1u16),
'f' => decons!(res, bitcounter, 2u16),
'g' => decons!(res, bitcounter, 3u16),
'h' => decons!(res, bitcounter, 4u16),
'j' => decons!(res, bitcounter, 5u16),
'k' => decons!(res, bitcounter, 6u16),
'l' => decons!(res, bitcounter, 7u16),
'm' => decons!(res, bitcounter, 8u16),
'n' => decons!(res, bitcounter, 9u16),
'p' => decons!(res, bitcounter, 10u16),
'r' => decons!(res, bitcounter, 11u16),
's' => decons!(res, bitcounter, 12u16),
't' => decons!(res, bitcounter, 13u16),
'v' => decons!(res, bitcounter, 14u16),
'z' => decons!(res, bitcounter, 15u16),
'a' => devowel!(res, bitcounter, 0u16),
'i' => devowel!(res, bitcounter, 1u16),
'o' => devowel!(res, bitcounter, 2u16),
'u' => devowel!(res, bitcounter, 3u16),
_ => {}
}
}
if bitcounter == 16 {
Some(res)
} else {
None
}
}
pub fn u16_to_quint(mut i: u16, out: &mut String) {
cons_u16!(i, out);
vowel_u16!(i, out);
cons_u16!(i, out);
vowel_u16!(i, out);
out.push(UINT2CONSONANT[(i & MASK_FIRST4_U16).wrapping_shr(12) as usize]);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn quint_u16() {
let mut s = String::with_capacity(16);
for i in u16::MIN..=u16::MAX {
s.clear();
u16_to_quint(i, &mut s);
assert_eq!(s.len(), 5);
assert_eq!(u16_from_quint(s.as_str()).unwrap(), i);
}
}
}

View file

@ -1,37 +0,0 @@
[package]
name = "syncwhole"
version = "0.1.0"
edition = "2021"
license = "MPL-2.0"
authors = ["Adam Ierymenko <adam.ierymenko@zerotier.com>"]
[profile.release]
opt-level = 3
lto = true
codegen-units = 1
panic = 'abort'
[lib]
name = "syncwhole"
path = "src/lib.rs"
doc = true
[[bin]]
name = "syncwhole_local_testnet"
path = "src/main.rs"
doc = false
required-features = ["include_sha2_lib"]
[dependencies]
tokio = { version = "^1", features = ["net", "rt", "parking_lot", "time", "io-std", "io-util", "sync", "rt-multi-thread"], default-features = false }
serde = { version = "^1", features = ["derive"], default-features = false }
serde_bytes = "^0"
rmp = "^0"
rmp-serde = "^1"
sha2 = { version = "^0", optional = true }
async-trait = "^0"
futures-core = "^0"
iblt = { version = "^0", path = "../iblt" }
[features]
include_sha2_lib = ["sha2"]

View file

@ -1 +0,0 @@
../rustfmt.toml

View file

@ -1,105 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2022 ZeroTier, Inc.
* https://www.zerotier.com/
*/
use async_trait::async_trait;
/// Size of keys, which is the size of a 512-bit hash. This is a protocol constant.
pub const KEY_SIZE: usize = 64;
/// Result returned by DataStore::store().
pub enum StoreResult {
/// Entry was accepted.
Ok,
/// Entry was a duplicate of one we already have but was otherwise valid.
Duplicate,
/// Entry was valid but was ignored for an unspecified reason.
Ignored,
/// Entry was rejected as malformed or otherwise invalid (e.g. failed signature check).
Rejected,
}
/// API to be implemented by the data set we want to replicate.
///
/// Keys as used in this API are SHA512 hashes of values.
///
/// Range queries take an optional subset parameter. The format and interpretation of
/// this is entirely up to the implementer of DataStore. It could contain a time, a SQL
/// query, a set of certificates, anything. Its purpose is to select which items we want
/// from remote nodes so that we can replicate only a subset of a larger set of data.
/// Other nodes can also supply a subset to this one, so it's important that remote subset
/// values supplied to the local data store be handled correctly.
#[async_trait]
pub trait DataStore: Sync + Send {
/// Container for values returned by load().
///
/// Making this a trait defined type lets you use Arc<[u8]>, etc. as well as obvious
/// ones like Box<[u8]> and Vec<u8>.
type ValueRef: AsRef<[u8]> + Sync + Send + Clone;
/// Key hash size, always 64 for SHA512.
const KEY_SIZE: usize = KEY_SIZE;
/// Maximum size of a value in bytes.
const MAX_VALUE_SIZE: usize;
/// Get the subset that should be sent to remote nodes in queries.
async fn local_subset(&self) -> Option<Self::ValueRef>;
/// Get an item by identity hash key if it exists.
async fn load(&self, key: &[u8; KEY_SIZE]) -> Option<Self::ValueRef>;
/// Store an item in the data store and return its status.
///
/// Note that no time is supplied here. The data store must determine this in an implementation
/// dependent manner if this is a temporally subjective data store. It could be determined by
/// the wall clock, from the object itself, etc.
///
/// The key supplied here will always be the SHA512 hash of the value. There is no need to
/// re-compute and check the key, but the value must be validated.
///
/// Validation of the value and returning the appropriate StoreResult is important to the
/// operation of the synchronization algorithm:
///
/// StoreResult::Ok - Value was valid and was accepted and saved.
///
/// StoreResult::Duplicate - Value was valid but is a duplicate of one we already have.
///
/// StoreResult::Ignored - Value was valid but for some other reason was not saved.
///
/// StoreResult::Rejected - Value was not valid, causes link to peer to be dropped.
///
/// Rejected should only be returned if the value actually fails a validity check, signature
/// verification, proof of work check, or some other required criteria. Ignored must be
/// returned if the value is valid but is too old or was rejected for some other normal reason.
async fn store(&self, key: &[u8; KEY_SIZE], value: &[u8]) -> StoreResult;
/// Iterate through keys in a range.
///
/// Keys MUST be output in ascending binary sort order.
async fn keys<F: Send + FnMut(&[u8]) -> bool>(
&self,
subset: Option<&[u8]>,
range_start: &[u8; KEY_SIZE],
range_end: &[u8; KEY_SIZE],
f: F,
);
/// Iterate through values in a range.
///
/// Entries MUST be output in ascending binary sort order.
async fn values<F: Send + FnMut(&[u8], &[u8]) -> bool>(
&self,
subset: Option<&[u8]>,
range_start: &[u8; KEY_SIZE],
range_end: &[u8; KEY_SIZE],
f: F,
);
}

View file

@ -1,156 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2022 ZeroTier, Inc.
* https://www.zerotier.com/
*/
use std::net::SocketAddr;
use std::sync::Arc;
#[cfg(feature = "include_sha2_lib")]
use sha2::digest::{Digest, FixedOutput};
use serde::{Deserialize, Serialize};
use crate::node::RemoteNodeInfo;
/// Configuration setttings for a syncwhole node.
#[derive(Serialize, Deserialize, Clone, Eq, PartialEq)]
pub struct Config {
/// A list of peer addresses to which we always want to stay connected.
/// The library will try to maintain connectivity to these regardless of connection limits.
pub anchors: Vec<SocketAddr>,
/// A list of other peer addresses that we can try in order to achieve desired_connection_count.
/// If this includes the anchors too there will be no effect since the anchors are tried first anyway.
pub peers: Vec<SocketAddr>,
/// The maximum number of TCP connections we should allow.
pub max_connection_count: usize,
/// The desired number of peering links.
pub desired_connection_count: usize,
/// Synchronization interval in milliseconds.
pub sync_interval: u64,
/// Connection inactivity timeout in milliseconds.
pub connection_timeout: u64,
/// An arbitrary name for this data set to avoid connecting to irrelevant nodes.
pub domain: String,
/// An optional name for this node to advertise to other nodes.
pub name: String,
/// An optional contact string for this node to advertise to other nodes.
/// Example: bighead@stanford.edu or https://www.piedpiper.com/
pub contact: String,
}
impl Default for Config {
fn default() -> Self {
Self {
anchors: Vec::new(),
peers: Vec::new(),
max_connection_count: 128,
desired_connection_count: 64,
sync_interval: 500,
connection_timeout: 500 * 10,
domain: String::new(),
name: String::new(),
contact: String::new(),
}
}
}
/// A trait that users of syncwhole implement to provide configuration information and listen for events.
pub trait Host: Sync + Send {
/// Get the current configuration for this node.
fn node_config(&self) -> Arc<Config>;
/// Test whether an inbound connection should be allowed from an address.
///
/// This is called on first incoming connection before any init is received. The authenticate()
/// method is called once init has been received and is another decision point. The default
/// implementation of this always returns true.
///
/// This is not called for outbound connections.
#[allow(unused_variables)]
fn allow(&self, remote_address: &SocketAddr) -> bool {
true
}
/// Compute HMAC-SHA512(secret, challenge).
///
/// A return of None indicates that the connection should be dropped. If authentication is
/// not enabled, the response should be computed using an all-zero secret key. This is
/// what the default implementation does, so if you don't want authentication there is no
/// need to override and implement this.
///
/// This actually gets called twice per link: once when Init is received to compute the
/// response, and once when InitResponse is received to verify the response to our challenge.
///
/// The default implementation authenticates with an all-zero key. Leave it this way if
/// you don't want authentication.
#[allow(unused_variables)]
fn authenticate(&self, info: &RemoteNodeInfo, challenge: &[u8]) -> Option<[u8; 64]> {
Some(Self::hmac_sha512(&[0_u8; 64], challenge))
}
/// Called when an attempt is made to connect to a remote address.
fn on_connect_attempt(&self, address: &SocketAddr);
/// Called when a connection has been successfully established.
///
/// Hosts are encouraged to learn endpoints when a successful outbound connection is made. Check the
/// inbound flag in the remote node info structure.
fn on_connect(&self, info: &RemoteNodeInfo);
/// Called when an open connection is closed for any reason.
fn on_connection_closed(&self, info: &RemoteNodeInfo, reason: String);
/// Fill a buffer with secure random bytes.
///
/// The implementer must call a secure random number generator or source to implement this.
fn get_secure_random(&self, buf: &mut [u8]);
/// Compute a SHA512 digest of the input.
///
/// Input can consist of one or more slices that will be processed in order.
///
/// If the feature "include_sha2_lib" is enabled a default implementation in terms of the
/// Rust sha2 crate is generated. Otherwise the implementer must supply their own
/// SHA512 function.
#[cfg(not(feature = "include_sha2_lib"))]
fn sha512(msg: &[&[u8]]) -> [u8; 64];
#[cfg(feature = "include_sha2_lib")]
fn sha512(msg: &[&[u8]]) -> [u8; 64] {
let mut h = sha2::Sha512::new();
for b in msg.iter() {
h.update(*b);
}
h.finalize_fixed().try_into().unwrap()
}
/// Compute HMAC-SHA512 using key and input.
///
/// Supplied key will always be 64 bytes in length.
///
/// The default implementation is HMAC implemented in terms of sha512() above. Specialize
/// to provide your own implementation.
fn hmac_sha512(key: &[u8], msg: &[u8]) -> [u8; 64] {
let mut opad = [0x5c_u8; 128];
let mut ipad = [0x36_u8; 128];
assert!(key.len() >= 64);
for i in 0..64 {
opad[i] ^= key[i];
}
for i in 0..64 {
ipad[i] ^= key[i];
}
Self::sha512(&[&opad, &Self::sha512(&[&ipad, msg])])
}
}

View file

@ -1,17 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2022 ZeroTier, Inc.
* https://www.zerotier.com/
*/
pub(crate) mod protocol;
pub(crate) mod utils;
pub(crate) mod varint;
pub mod datastore;
pub mod host;
pub mod node;
pub use async_trait;

View file

@ -1,196 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2022 ZeroTier, Inc.
* https://www.zerotier.com/
*/
use std::collections::BTreeMap;
use std::io::{stdout, Write};
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::ops::Bound::Included;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime};
use async_trait::async_trait;
use sha2::digest::Digest;
use sha2::Sha512;
use syncwhole::datastore::*;
use syncwhole::host::*;
use syncwhole::node::*;
use syncwhole::utils::*;
const TEST_NODE_COUNT: usize = 8;
const TEST_PORT_RANGE_START: u16 = 21384;
const TEST_STARTING_RECORDS_PER_NODE: usize = 16;
static mut RANDOM_CTR: u128 = 0;
fn get_random_bytes(mut buf: &mut [u8]) {
// This is only for testing and is not really secure.
let mut ctr = unsafe { RANDOM_CTR };
if ctr == 0 {
ctr = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos() * (1 + Instant::now().elapsed().as_nanos());
}
while !buf.is_empty() {
let l = buf.len().min(64);
ctr = ctr.wrapping_add(1);
buf[..l].copy_from_slice(&Sha512::digest(&ctr.to_ne_bytes()).as_slice()[..l]);
buf = &mut buf[l..];
}
unsafe { RANDOM_CTR = ctr };
}
pub struct TestNodeHost {
pub name: String,
pub config: Config,
pub records: tokio::sync::RwLock<BTreeMap<[u8; 64], [u8; 64]>>,
}
impl TestNodeHost {
pub fn new_random(test_no: usize) -> Self {
let mut s = BTreeMap::new();
for _ in 0..TEST_STARTING_RECORDS_PER_NODE {
let mut v = [0_u8; 64];
get_random_bytes(&mut v);
let k = Self::sha512(&[&v]);
s.insert(k, v);
}
Self {
name: test_no.to_string(),
config: Config::default(),
records: tokio::sync::RwLock::new(s),
}
}
}
impl Host for TestNodeHost {
fn node_config(&self) -> Config {
self.config.clone()
}
fn on_connect_attempt(&self, _address: &SocketAddr) {
//println!("{:5}: connecting to {}", self.name, _address.to_string());
}
fn on_connect(&self, info: &RemoteNodeInfo) {
//println!("{:5}: connected to {} ({}, {})", self.name, info.remote_address.to_string(), info.node_name.as_ref().map_or("null", |s| s.as_str()), if info.inbound { "inbound" } else { "outbound" });
}
fn on_connection_closed(&self, info: &RemoteNodeInfo, reason: String) {
//println!("{:5}: closed connection to {}: {} ({}, {})", self.name, info.remote_address.to_string(), reason, if info.inbound { "inbound" } else { "outbound" }, if info.initialized { "initialized" } else { "not initialized" });
}
fn get_secure_random(&self, buf: &mut [u8]) {
// This is only for testing and is not really secure.
get_random_bytes(buf);
}
}
#[async_trait]
impl DataStore for TestNodeHost {
type ValueRef = [u8; 64];
const MAX_VALUE_SIZE: usize = 1024;
fn clock(&self) -> i64 {
ms_since_epoch()
}
fn domain(&self) -> &str {
"test"
}
async fn load(&self, key: &[u8; 64]) -> Option<Self::ValueRef> {
let records = self.records.read().await;
let value = records.get(key);
if value.is_some() {
Some(value.unwrap().clone())
} else {
None
}
}
async fn store(&self, key: &[u8; 64], value: &[u8]) -> StoreResult {
let value: [u8; 64] = value.try_into();
if value.is_ok() {
if self.records.write().await.insert(key.clone(), value).is_none() {
StoreResult::Ok
} else {
StoreResult::Duplicate
}
} else {
StoreResult::Rejected
}
}
async fn keys_under<F: Send + FnMut(&[u8]) -> bool>(&self, reference_time: i64, prefix: u64, prefix_bits: u32, f: F) {
let (start, end) = prefix_to_range(prefix, prefix_bits);
let records = self.records.read().await;
for (k, v) in records.range((Included(start), Included(end))) {
if !f(k) {
break;
}
}
}
}
fn main() {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
println!(
"Running syncwhole local self-test network with {} nodes starting at 127.0.0.1:{}",
TEST_NODE_COUNT, TEST_PORT_RANGE_START
);
println!();
println!("Starting nodes on 127.0.0.1...");
let mut nodes: Vec<Node<TestNodeHost, TestNodeHost>> = Vec::with_capacity(TEST_NODE_COUNT);
for port in TEST_PORT_RANGE_START..(TEST_PORT_RANGE_START + (TEST_NODE_COUNT as u16)) {
let mut peers: Vec<SocketAddr> = Vec::with_capacity(TEST_NODE_COUNT);
for port2 in TEST_PORT_RANGE_START..(TEST_PORT_RANGE_START + (TEST_NODE_COUNT as u16)) {
if port != port2 {
peers.push(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port2)));
}
}
let mut th = TestNodeHost::new_random(port as usize);
th.config.anchors = peers;
th.config.name = port.to_string();
let nh = Arc::new(th);
//println!("Starting node on 127.0.0.1:{}...", port, nh.db.lock().unwrap().len());
nodes.push(
Node::new(nh.clone(), nh.clone(), SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port)))
.await
.unwrap(),
);
}
print!("Waiting for all connections to be established...");
let _ = stdout().flush();
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
let mut count = 0;
for n in nodes.iter() {
count += n.connection_count().await;
}
if count == (TEST_NODE_COUNT * (TEST_NODE_COUNT - 1)) {
println!(" {} connections up.", count);
break;
} else {
print!(".");
let _ = stdout().flush();
}
}
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
}
});
}

View file

@ -1,701 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2022 ZeroTier, Inc.
* https://www.zerotier.com/
*/
use std::collections::{HashMap, HashSet};
use std::io::IoSlice;
use std::mem::MaybeUninit;
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
use std::ops::Add;
use std::sync::atomic::{AtomicBool, AtomicI64, Ordering};
use std::sync::Arc;
use iblt::IBLT;
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::tcp::{OwnedReadHalf, OwnedWriteHalf};
use tokio::net::{TcpListener, TcpSocket, TcpStream};
use tokio::sync::Mutex;
use tokio::task::JoinHandle;
use tokio::time::{Duration, Instant};
use crate::datastore::*;
use crate::host::Host;
use crate::protocol::*;
use crate::utils::*;
use crate::varint;
// Interval for announcing queued HaveRecords items, in milliseconds.
const ANNOUNCE_PERIOD: i64 = 100;
/// Information about a remote node to which we are connected.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct RemoteNodeInfo {
/// Optional name advertised by remote node (arbitrary).
pub name: String,
/// Optional contact information advertised by remote node (arbitrary).
pub contact: String,
/// Actual remote endpoint address.
pub remote_address: SocketAddr,
/// Explicitly advertised remote addresses supplied by remote node (not necessarily verified).
pub explicit_addresses: Vec<SocketAddr>,
/// Time TCP connection was established (ms since epoch).
pub connect_time: i64,
/// Time TCP connection was estaablished (ms, monotonic).
pub connect_instant: i64,
/// True if this is an inbound TCP connection.
pub inbound: bool,
/// True if this connection has exchanged init messages successfully.
pub initialized: bool,
}
/// An instance of the syncwhole data set synchronization engine.
///
/// This holds a number of async tasks that are terminated or aborted if this object
/// is dropped. In other words this implements structured concurrency.
pub struct Node<D: DataStore + 'static, H: Host + 'static> {
internal: Arc<NodeInternal<D, H>>,
housekeeping_task: JoinHandle<()>,
announce_task: JoinHandle<()>,
listener_task: JoinHandle<()>,
}
impl<D: DataStore + 'static, H: Host + 'static> Node<D, H> {
pub async fn new(db: Arc<D>, host: Arc<H>, bind_address: SocketAddr) -> std::io::Result<Self> {
let listener = if bind_address.is_ipv4() {
TcpSocket::new_v4()
} else {
TcpSocket::new_v6()
}?;
configure_tcp_socket(&listener)?;
listener.bind(bind_address.clone())?;
let listener = listener.listen(1024)?;
let internal = Arc::new(NodeInternal::<D, H> {
anti_loopback_secret: {
let mut tmp = [0_u8; 64];
host.get_secure_random(&mut tmp);
tmp
},
datastore: db.clone(),
host: host.clone(),
connections: Mutex::new(HashMap::new()),
connecting_to: Mutex::new(HashSet::new()),
announce_queue: Mutex::new(HashMap::new()),
bind_address,
starting_instant: Instant::now(),
});
Ok(Self {
internal: internal.clone(),
housekeeping_task: tokio::spawn(internal.clone().housekeeping_task_main()),
announce_task: tokio::spawn(internal.clone().announce_task_main()),
listener_task: tokio::spawn(internal.listener_task_main(listener)),
})
}
#[inline(always)]
pub fn datastore(&self) -> &Arc<D> {
&self.internal.datastore
}
#[inline(always)]
pub fn host(&self) -> &Arc<H> {
&self.internal.host
}
/// Attempt to connect to an explicitly specified TCP endpoint.
///
/// Ok(true) is returned if a new connection was made. Ok(false) means there is already a connection
/// to the endpoint. An error is returned if the connection fails.
pub async fn connect(&self, address: &SocketAddr) -> std::io::Result<bool> {
if self.internal.connecting_to.lock().await.insert(address.clone()) {
self.internal
.connect(
address,
Instant::now().add(Duration::from_millis(self.internal.host.node_config().connection_timeout)),
)
.await
} else {
Ok(false)
}
}
/// Get a list of all open peer to peer connections.
pub async fn list_connections(&self) -> Vec<RemoteNodeInfo> {
let connections = self.internal.connections.lock().await;
let mut cl: Vec<RemoteNodeInfo> = Vec::with_capacity(connections.len());
for (_, c) in connections.iter() {
cl.push(c.info.lock().unwrap().clone());
}
cl
}
/// Get the number of open peer to peer connections.
pub async fn connection_count(&self) -> usize {
self.internal.connections.lock().await.len()
}
}
impl<D: DataStore + 'static, H: Host + 'static> Drop for Node<D, H> {
fn drop(&mut self) {
self.housekeeping_task.abort();
self.announce_task.abort();
self.listener_task.abort();
}
}
/********************************************************************************************************************/
fn configure_tcp_socket(socket: &TcpSocket) -> std::io::Result<()> {
let _ = socket.set_linger(None);
if socket.set_reuseport(true).is_ok() {
Ok(())
} else {
socket.set_reuseaddr(true)
}
}
fn decode_msgpack<'a, T: Deserialize<'a>>(b: &'a [u8]) -> std::io::Result<T> {
rmp_serde::from_slice(b).map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("invalid msgpack object: {}", e.to_string()),
)
})
}
pub struct NodeInternal<D: DataStore + 'static, H: Host + 'static> {
// Secret used to perform HMAC to detect and drop loopback connections to self.
anti_loopback_secret: [u8; 64],
// Outside code implementations of DataStore and Host traits.
datastore: Arc<D>,
host: Arc<H>,
// Connections and their task join handles, by remote endpoint address.
connections: Mutex<HashMap<SocketAddr, Arc<Connection>>>,
// Outgoing connections in progress.
connecting_to: Mutex<HashSet<SocketAddr>>,
// Records received since last announce and the endpoints that we know already have them.
announce_queue: Mutex<HashMap<[u8; KEY_SIZE], Vec<SocketAddr>>>,
// Local address to which this node is bound
bind_address: SocketAddr,
// Instant this node started.
starting_instant: Instant,
}
impl<D: DataStore + 'static, H: Host + 'static> NodeInternal<D, H> {
fn ms_monotonic(&self) -> i64 {
Instant::now().duration_since(self.starting_instant).as_millis() as i64
}
async fn housekeeping_task_main(self: Arc<Self>) {
let tasks = AsyncTaskReaper::new();
let mut sleep_for = Duration::from_millis(500);
loop {
tokio::time::sleep(sleep_for).await;
let config = self.host.node_config();
let mut connections = self.connections.lock().await;
let mut connecting_to = self.connecting_to.lock().await;
let now = self.ms_monotonic();
// Drop dead or timed out connections, and for live connections handle sending sync requests.
connections.retain(|_, c| {
if !c.closed.load(Ordering::Relaxed) {
let cc = c.clone();
if (now - c.last_receive_time.load(Ordering::Relaxed)) < (config.connection_timeout as i64) {
// TODO: sync init if not waiting for a sync response
true // keep connection
} else {
let _ = c.read_task.lock().unwrap().take().map(|j| j.abort());
let host = self.host.clone();
tasks.spawn(async move {
host.on_connection_closed(&*cc.info.lock().unwrap(), "timeout".to_string());
});
false // discard connection
}
} else {
let host = self.host.clone();
let cc = c.clone();
let j = c.read_task.lock().unwrap().take();
tasks.spawn(async move {
if j.is_some() {
let e = j.unwrap().await;
if e.is_ok() {
let e = e.unwrap();
host.on_connection_closed(
&*cc.info.lock().unwrap(),
e.map_or_else(|e| e.to_string(), |_| "unknown error".to_string()),
);
} else {
host.on_connection_closed(&*cc.info.lock().unwrap(), "remote host closed connection".to_string());
}
} else {
host.on_connection_closed(&*cc.info.lock().unwrap(), "remote host closed connection".to_string());
}
});
false // discard connection
}
});
let connect_timeout_at = Instant::now().add(Duration::from_millis(config.connection_timeout));
// Always try to connect to anchor peers.
for sa in config.anchors.iter() {
if !connections.contains_key(sa) && connecting_to.insert(sa.clone()) {
let self2 = self.clone();
let sa = sa.clone();
tasks.spawn(async move {
let _ = self2.connect(&sa, connect_timeout_at).await;
});
}
}
// Try to connect to more peers until desired connection count is reached.
let desired_connection_count = config.desired_connection_count.min(config.max_connection_count);
for sa in config.peers.iter() {
if (connections.len() + connecting_to.len()) >= desired_connection_count {
break;
}
if !connections.contains_key(sa) && connecting_to.insert(sa.clone()) {
let self2 = self.clone();
let sa = sa.clone();
tasks.spawn(async move {
let _ = self2.connect(&sa, connect_timeout_at).await;
});
}
}
sleep_for = Duration::from_millis(config.sync_interval.min(config.connection_timeout));
}
}
async fn announce_task_main(self: Arc<Self>) {
let sleep_for = Duration::from_millis(ANNOUNCE_PERIOD as u64);
let mut to_announce: Vec<([u8; KEY_SIZE], Vec<SocketAddr>)> = Vec::new();
let background_tasks = AsyncTaskReaper::new();
let announce_timeout = Duration::from_millis(self.host.node_config().connection_timeout);
loop {
tokio::time::sleep(sleep_for).await;
for (key, already_has) in self.announce_queue.lock().await.drain() {
to_announce.push((key, already_has));
}
let now = self.ms_monotonic();
let have_records_est_size = (to_announce.len() * KEY_SIZE) + 2;
let mut have_records: Vec<u8> = Vec::with_capacity(have_records_est_size);
for c in self.connections.lock().await.iter() {
if c.1.announce_new_records.load(Ordering::Relaxed) {
for (key, already_has) in to_announce.iter() {
if !already_has.contains(c.0) {
let _ = std::io::Write::write_all(&mut have_records, key);
}
}
if !have_records.is_empty() {
let c2 = c.1.clone();
background_tasks.spawn(async move {
// If the connection dies this will either fail or time out in 1s. Usually these execute instantly due to
// write buffering but a short timeout prevents them from building up too much.
let _ = tokio::time::timeout(
announce_timeout,
c2.send_msg(MessageType::HaveRecords, have_records.as_slice(), now),
);
});
have_records = Vec::with_capacity(have_records_est_size);
}
}
}
to_announce.clear();
}
}
async fn listener_task_main(self: Arc<Self>, listener: TcpListener) {
loop {
let socket = listener.accept().await;
if socket.is_ok() {
let (stream, address) = socket.unwrap();
if self.host.allow(&address) {
let config = self.host.node_config();
if self.connections.lock().await.len() < config.max_connection_count || config.anchors.contains(&address) {
Self::connection_start(&self, address, stream, true).await;
}
}
}
}
}
/// Internal connection method.
///
/// Note that this does not add the address to connecting_to. Instead it's done by the caller
/// to avoid races and similar things. It is removed from connecting_to once the connection
/// either succeeds or fails.
async fn connect(self: &Arc<Self>, address: &SocketAddr, deadline: Instant) -> std::io::Result<bool> {
let f = async {
let stream = if address.is_ipv4() {
TcpSocket::new_v4()
} else {
TcpSocket::new_v6()
}?;
configure_tcp_socket(&stream)?;
stream.bind(self.bind_address.clone())?;
let stream = tokio::time::timeout_at(deadline, stream.connect(address.clone()));
self.host.on_connect_attempt(address);
let stream = stream.await;
if stream.is_ok() {
Ok(self.connection_start(address.clone(), stream.unwrap()?, false).await)
} else {
Err(std::io::Error::new(std::io::ErrorKind::TimedOut, "connect timed out"))
}
};
let r = f.await;
let _ = self.connecting_to.lock().await.remove(address);
r
}
/// Initialize and start a connection whether incoming or outgoing.
async fn connection_start(self: &Arc<Self>, address: SocketAddr, stream: TcpStream, inbound: bool) -> bool {
let mut ok = false;
let _ = self.connections.lock().await.entry(address.clone()).or_insert_with(|| {
ok = true;
let _ = stream.set_nodelay(false);
let (reader, writer) = stream.into_split();
let now = self.ms_monotonic();
let connection = Arc::new(Connection {
writer: Mutex::new(writer),
last_send_time: AtomicI64::new(now),
last_receive_time: AtomicI64::new(now),
info: std::sync::Mutex::new(RemoteNodeInfo {
name: String::new(),
contact: String::new(),
remote_address: address.clone(),
explicit_addresses: Vec::new(),
connect_time: ms_since_epoch(),
connect_instant: now,
inbound,
initialized: false,
}),
read_task: std::sync::Mutex::new(None),
announce_new_records: AtomicBool::new(false),
closed: AtomicBool::new(false),
});
let self2 = self.clone();
let c2 = connection.clone();
connection.read_task.lock().unwrap().replace(tokio::spawn(async move {
let result = self2.connection_io_task_main(&c2, address, reader).await;
c2.closed.store(true, Ordering::Relaxed);
result
}));
connection
});
ok
}
async fn connection_io_task_main(
self: Arc<Self>,
connection: &Arc<Connection>,
remote_address: SocketAddr,
mut reader: OwnedReadHalf,
) -> std::io::Result<()> {
const BUF_CHUNK_SIZE: usize = 4096;
const READ_BUF_INITIAL_SIZE: usize = 65536; // should be a multiple of BUF_CHUNK_SIZE
let mut write_buffer: Vec<u8> = Vec::with_capacity(BUF_CHUNK_SIZE);
let mut read_buffer: Vec<u8> = Vec::new();
read_buffer.resize(READ_BUF_INITIAL_SIZE, 0);
let config = self.host.node_config();
let mut anti_loopback_challenge_sent = [0_u8; 64];
let mut domain_challenge_sent = [0_u8; 64];
let mut auth_challenge_sent = [0_u8; 64];
self.host.get_secure_random(&mut anti_loopback_challenge_sent);
self.host.get_secure_random(&mut domain_challenge_sent);
self.host.get_secure_random(&mut auth_challenge_sent);
connection
.send_obj(
&mut write_buffer,
MessageType::Init,
&msg::Init {
anti_loopback_challenge: &anti_loopback_challenge_sent,
domain_challenge: &domain_challenge_sent,
auth_challenge: &auth_challenge_sent,
node_name: config.name.as_str(),
node_contact: config.contact.as_str(),
locally_bound_port: self.bind_address.port(),
explicit_ipv4: None,
explicit_ipv6: None,
},
self.ms_monotonic(),
)
.await?;
drop(config);
let max_message_size = ((D::MAX_VALUE_SIZE * 8) + (D::KEY_SIZE * 1024) + 65536) as u64; // sanity limit
let mut initialized = false;
let mut init_received = false;
let mut buffer_fill = 0_usize;
loop {
let message_type: MessageType;
let message_size: usize;
let header_size: usize;
let total_size: usize;
loop {
buffer_fill += reader.read(&mut read_buffer.as_mut_slice()[buffer_fill..]).await?;
if buffer_fill >= 2 {
// type and at least one byte of varint
let ms = varint::decode(&read_buffer.as_slice()[1..]);
if ms.1 > 0 {
// varint is all there and parsed correctly
if ms.0 > max_message_size {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "message too large"));
}
message_type = MessageType::from(*read_buffer.get(0).unwrap());
message_size = ms.0 as usize;
header_size = 1 + ms.1;
total_size = header_size + message_size;
if read_buffer.len() < total_size {
read_buffer.resize(((total_size / BUF_CHUNK_SIZE) + 1) * BUF_CHUNK_SIZE, 0);
}
while buffer_fill < total_size {
buffer_fill += reader.read(&mut read_buffer.as_mut_slice()[buffer_fill..]).await?;
}
break;
}
}
}
let message = &read_buffer.as_slice()[header_size..total_size];
let now = self.ms_monotonic();
connection.last_receive_time.store(now, Ordering::Relaxed);
match message_type {
MessageType::Nop => {}
MessageType::Init => {
if init_received {
return Err(std::io::Error::new(std::io::ErrorKind::Other, "duplicate init"));
}
init_received = true;
let msg: msg::Init = decode_msgpack(message)?;
let (anti_loopback_response, domain_challenge_response, auth_challenge_response) = {
let mut info = connection.info.lock().unwrap();
info.name = msg.node_name.to_string();
info.contact = msg.node_contact.to_string();
let _ = msg.explicit_ipv4.map(|pv4| {
info.explicit_addresses
.push(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::from(pv4.ip), pv4.port)));
});
let _ = msg.explicit_ipv6.map(|pv6| {
info.explicit_addresses
.push(SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::from(pv6.ip), pv6.port, 0, 0)));
});
let info = info.clone();
let auth_challenge_response = self.host.authenticate(&info, msg.auth_challenge);
if auth_challenge_response.is_none() {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"authenticate() returned None, connection dropped",
));
}
let auth_challenge_response = auth_challenge_response.unwrap();
(
H::hmac_sha512(&self.anti_loopback_secret, msg.anti_loopback_challenge),
H::hmac_sha512(&H::sha512(&[self.host.node_config().domain.as_bytes()]), msg.domain_challenge),
auth_challenge_response,
)
};
connection
.send_obj(
&mut write_buffer,
MessageType::InitResponse,
&msg::InitResponse {
anti_loopback_response: &anti_loopback_response,
domain_response: &domain_challenge_response,
auth_response: &auth_challenge_response,
},
now,
)
.await?;
}
MessageType::InitResponse => {
let msg: msg::InitResponse = decode_msgpack(message)?;
let mut info = connection.info.lock().unwrap();
if info.initialized {
return Err(std::io::Error::new(std::io::ErrorKind::Other, "duplicate init response"));
}
if msg
.anti_loopback_response
.eq(&H::hmac_sha512(&self.anti_loopback_secret, &anti_loopback_challenge_sent))
{
return Err(std::io::Error::new(std::io::ErrorKind::Other, "rejected connection to self"));
}
if !msg.domain_response.eq(&H::hmac_sha512(
&H::sha512(&[self.host.node_config().domain.as_bytes()]),
&domain_challenge_sent,
)) {
return Err(std::io::Error::new(std::io::ErrorKind::Other, "domain mismatch"));
}
if !self
.host
.authenticate(&info, &auth_challenge_sent)
.map_or(false, |cr| msg.auth_response.eq(&cr))
{
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"challenge/response authentication failed",
));
}
initialized = true;
info.initialized = true;
let info = info.clone(); // also releases lock since info is replaced/destroyed
self.host.on_connect(&info);
}
// Handle messages other than INIT and INIT_RESPONSE after checking 'initialized' flag.
_ => {
if !initialized {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"init exchange must be completed before other messages are sent",
));
}
match message_type {
MessageType::HaveRecords => {}
MessageType::GetRecords => {}
MessageType::Record => {
let key = H::sha512(&[message]);
match self.datastore.store(&key, message).await {
StoreResult::Ok => {
let mut q = self.announce_queue.lock().await;
let ql = q.entry(key).or_insert_with(|| Vec::with_capacity(2));
if !ql.contains(&remote_address) {
ql.push(remote_address.clone());
}
}
StoreResult::Rejected => {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("record rejected by data store: {}", to_hex_string(&key)),
));
}
_ => {}
}
}
MessageType::SyncRequest => {
let msg: msg::SyncRequest = decode_msgpack(message)?;
}
MessageType::Sync => {
let msg: msg::Sync = decode_msgpack(message)?;
}
_ => {}
}
}
}
read_buffer.copy_within(total_size..buffer_fill, 0);
buffer_fill -= total_size;
}
}
}
impl<D: DataStore + 'static, H: Host + 'static> Drop for NodeInternal<D, H> {
fn drop(&mut self) {
let _ = tokio::runtime::Handle::try_current().map_or_else(
|_| {
for (_, c) in self.connections.blocking_lock().drain() {
c.read_task.lock().unwrap().as_mut().map(|c| c.abort());
}
},
|h| {
let _ = h.block_on(async {
for (_, c) in self.connections.lock().await.drain() {
c.read_task.lock().unwrap().as_mut().map(|c| c.abort());
}
});
},
);
}
}
struct Connection {
writer: Mutex<OwnedWriteHalf>,
last_send_time: AtomicI64,
last_receive_time: AtomicI64,
info: std::sync::Mutex<RemoteNodeInfo>,
read_task: std::sync::Mutex<Option<JoinHandle<std::io::Result<()>>>>,
announce_new_records: AtomicBool,
closed: AtomicBool,
}
impl Connection {
async fn send_msg(&self, message_type: MessageType, data: &[u8], now: i64) -> std::io::Result<()> {
let mut header: [u8; 16] = unsafe { MaybeUninit::uninit().assume_init() };
header[0] = message_type as u8;
let header_size = 1 + varint::encode(&mut header[1..], data.len() as u64);
if self
.writer
.lock()
.await
.write_vectored(&[IoSlice::new(&header[0..header_size]), IoSlice::new(data)])
.await?
== (data.len() + header_size)
{
self.last_send_time.store(now, Ordering::Relaxed);
Ok(())
} else {
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "write error"))
}
}
async fn send_obj<O: Serialize>(&self, write_buf: &mut Vec<u8>, message_type: MessageType, obj: &O, now: i64) -> std::io::Result<()> {
write_buf.clear();
if rmp_serde::encode::write_named(write_buf, obj).is_ok() {
self.send_msg(message_type, write_buf.as_slice(), now).await
} else {
Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"serialize failure (internal error)",
))
}
}
}

View file

@ -1,180 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2022 ZeroTier, Inc.
* https://www.zerotier.com/
*/
#[derive(Clone, Copy, Eq, PartialEq)]
#[repr(u8)]
pub enum MessageType {
/// No operation, payload ignored.
Nop = 0_u8,
/// msg::Init (msgpack)
Init = 1_u8,
/// msg::InitResponse (msgpack)
InitResponse = 2_u8,
/// <full record key>[<full record key>...]
HaveRecords = 3_u8,
/// <u8 length of each key prefix in bytes>[<key>...]
GetRecords = 4_u8,
/// <record>
Record = 5_u8,
/// msg::SyncRequest (msgpack)
SyncRequest = 6_u8,
/// msg::Sync (msgpack)
Sync = 7_u8,
}
const MESSAGE_TYPE_MAX: u8 = 7;
impl From<u8> for MessageType {
/// Get a type from a byte, returning the Nop type if the byte is out of range.
#[inline(always)]
fn from(b: u8) -> Self {
if b <= MESSAGE_TYPE_MAX {
unsafe { std::mem::transmute(b) }
} else {
Self::Nop
}
}
}
impl MessageType {
#[allow(unused)]
pub fn name(&self) -> &'static str {
match *self {
Self::Nop => "NOP",
Self::Init => "INIT",
Self::InitResponse => "INIT_RESPONSE",
Self::HaveRecords => "HAVE_RECORDS",
Self::GetRecords => "GET_RECORDS",
Self::Record => "RECORD",
Self::SyncRequest => "SYNC_REQUEST",
Self::Sync => "SYNC",
}
}
}
/// Msgpack serializable message types.
/// Some that are frequently transferred use shortened names to save bandwidth.
pub mod msg {
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
pub struct IPv4 {
pub ip: [u8; 4],
pub port: u16,
}
#[derive(Serialize, Deserialize)]
pub struct IPv6 {
pub ip: [u8; 16],
pub port: u16,
}
#[derive(Serialize, Deserialize)]
pub struct Init<'a> {
/// A random challenge to be hashed with a secret to detect and drop connections to self.
#[serde(with = "serde_bytes")]
pub anti_loopback_challenge: &'a [u8],
/// A random challenge for checking the data set domain.
#[serde(with = "serde_bytes")]
pub domain_challenge: &'a [u8],
/// A random challenge for login/authentication.
#[serde(with = "serde_bytes")]
pub auth_challenge: &'a [u8],
/// Optional name to advertise for this node.
pub node_name: &'a str,
/// Optional contact information for this node, such as a URL or an e-mail address.
pub node_contact: &'a str,
/// Port to which this node has locally bound.
/// This is used to try to auto-detect whether a NAT is in the way.
pub locally_bound_port: u16,
/// An IPv4 address where this node can be reached.
/// If both explicit_ipv4 and explicit_ipv6 are omitted the physical source IP:port may be used.
pub explicit_ipv4: Option<IPv4>,
/// An IPv6 address where this node can be reached.
/// If both explicit_ipv4 and explicit_ipv6 are omitted the physical source IP:port may be used.
pub explicit_ipv6: Option<IPv6>,
}
#[derive(Serialize, Deserialize)]
pub struct InitResponse<'a> {
/// HMAC-SHA512(local secret, anti_loopback_challenge) to detect and drop loops.
#[serde(with = "serde_bytes")]
pub anti_loopback_response: &'a [u8],
/// HMAC-SHA512(SHA512(domain), domain_challenge) to check that the data set domain matches.
#[serde(with = "serde_bytes")]
pub domain_response: &'a [u8],
/// HMAC-SHA512(secret, challenge) for authentication. (If auth is not enabled, an all-zero secret is used.)
#[serde(with = "serde_bytes")]
pub auth_response: &'a [u8],
}
#[derive(Serialize, Deserialize)]
pub struct SyncRequest<'a> {
/// Starting range to query, padded with zeroes if shorter than KEY_SIZE.
#[serde(with = "serde_bytes")]
#[serde(rename = "s")]
pub range_start: &'a [u8],
/// Ending range to query, padded with 0xff if shorter than KEY_SIZE.
#[serde(with = "serde_bytes")]
#[serde(rename = "e")]
pub range_end: &'a [u8],
/// Data-store-specific subset selector indicating what subset of items desired
#[serde(with = "serde_bytes")]
#[serde(rename = "q")]
pub subset: Option<&'a [u8]>,
}
#[derive(Serialize, Deserialize)]
pub struct Sync<'a> {
/// Starting range summarized, padded with zeroes if shorter than KEY_SIZE.
#[serde(with = "serde_bytes")]
#[serde(rename = "s")]
pub range_start: &'a [u8],
/// Ending range summarized, padded with 0xff if shorter than KEY_SIZE.
#[serde(with = "serde_bytes")]
#[serde(rename = "e")]
pub range_end: &'a [u8],
/// Data-store-specific subset selector indicating what subset of items were included
#[serde(with = "serde_bytes")]
#[serde(rename = "q")]
pub subset: Option<&'a [u8]>,
/// Number of buckets in IBLT
#[serde(rename = "b")]
pub iblt_buckets: usize,
/// Number of bytes in each IBLT item (key prefix)
#[serde(rename = "l")]
pub iblt_item_bytes: usize,
/// Set summary for keys under prefix within subset
#[serde(with = "serde_bytes")]
#[serde(rename = "i")]
pub iblt: &'a [u8],
}
}

View file

@ -1,129 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2022 ZeroTier, Inc.
* https://www.zerotier.com/
*/
use std::collections::HashMap;
use std::future::Future;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::SystemTime;
use tokio::task::JoinHandle;
/// Get the real time clock in milliseconds since Unix epoch.
pub fn ms_since_epoch() -> i64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_millis() as i64
}
/// Encode a byte slice to a hexadecimal string.
pub fn to_hex_string(b: &[u8]) -> String {
const HEX_CHARS: [u8; 16] = [
b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'a', b'b', b'c', b'd', b'e', b'f',
];
let mut s = String::new();
s.reserve(b.len() * 2);
for c in b {
let x = *c as usize;
s.push(HEX_CHARS[x >> 4] as char);
s.push(HEX_CHARS[x & 0xf] as char);
}
s
}
#[inline(always)]
pub fn xorshift64(mut x: u64) -> u64 {
x ^= x.wrapping_shl(13);
x ^= x.wrapping_shr(7);
x ^= x.wrapping_shl(17);
x
}
#[inline(always)]
pub fn splitmix64(mut x: u64) -> u64 {
x ^= x.wrapping_shr(30);
x = x.wrapping_mul(0xbf58476d1ce4e5b9);
x ^= x.wrapping_shr(27);
x = x.wrapping_mul(0x94d049bb133111eb);
x ^= x.wrapping_shr(31);
x
}
/*
#[inline(always)]
pub fn splitmix64_inverse(mut x: u64) -> u64 {
x ^= x.wrapping_shr(31) ^ x.wrapping_shr(62);
x = x.wrapping_mul(0x319642b2d24d8ec3);
x ^= x.wrapping_shr(27) ^ x.wrapping_shr(54);
x = x.wrapping_mul(0x96de1b173f119089);
x ^= x.wrapping_shr(30) ^ x.wrapping_shr(60);
x
}
*/
static mut RANDOM_STATE_0: u64 = 0;
static mut RANDOM_STATE_1: u64 = 0;
/// Get a non-cryptographic pseudorandom number.
pub fn random() -> u64 {
let (mut s0, mut s1) = unsafe { (RANDOM_STATE_0, RANDOM_STATE_1) };
if s0 == 0 {
s0 = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos() as u64;
}
if s1 == 0 {
s1 = splitmix64(std::process::id() as u64);
}
let s1_new = xorshift64(s1);
s0 = splitmix64(s0.wrapping_add(s1));
s1 = s1_new;
unsafe {
RANDOM_STATE_0 = s0;
RANDOM_STATE_1 = s1;
};
s0
}
/// Wrapper for tokio::spawn() that aborts tasks not yet completed when it is dropped.
pub struct AsyncTaskReaper {
ctr: AtomicUsize,
handles: Arc<std::sync::Mutex<HashMap<usize, JoinHandle<()>>>>,
}
impl AsyncTaskReaper {
pub fn new() -> Self {
Self {
ctr: AtomicUsize::new(0),
handles: Arc::new(std::sync::Mutex::new(HashMap::new())),
}
}
/// Spawn a new task.
///
/// Note that currently any task output is ignored. This is for fire and forget
/// background tasks that you want to be collected on loss of scope.
pub fn spawn<F: Future + Send + 'static>(&self, future: F) {
let id = self.ctr.fetch_add(1, Ordering::Relaxed);
let handles = self.handles.clone();
self.handles.lock().unwrap().insert(
id,
tokio::spawn(async move {
let _ = future.await;
let _ = handles.lock().unwrap().remove(&id);
}),
);
}
}
impl Drop for AsyncTaskReaper {
fn drop(&mut self) {
for (_, h) in self.handles.lock().unwrap().iter() {
h.abort();
}
}
}

View file

@ -1,45 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c)2022 ZeroTier, Inc.
* https://www.zerotier.com/
*/
#[allow(unused)]
pub const VARINT_MAX_SIZE_BYTES: usize = 10;
pub fn encode(b: &mut [u8], mut v: u64) -> usize {
let mut i = 0;
loop {
if v > 0x7f {
b[i] = (v as u8) & 0x7f;
i += 1;
v = v.wrapping_shr(7);
} else {
b[i] = (v as u8) | 0x80;
i += 1;
break;
}
}
i
}
pub fn decode(b: &[u8]) -> (u64, usize) {
let mut v = 0_u64;
let mut pos = 0;
let mut l = 0;
let bl = b.len();
while l < bl {
let x = b[l];
l += 1;
if x <= 0x7f {
v |= (x as u64).wrapping_shl(pos);
pos += 7;
} else {
v |= ((x & 0x7f) as u64).wrapping_shl(pos);
return (v, l);
}
}
return (0, 0);
}

View file

@ -11,11 +11,10 @@ path = "src/main.rs"
zerotier-crypto = { path = "../crypto" }
zerotier-utils = { path = "../utils", features = ["tokio"] }
zerotier-network-hypervisor = { path = "../network-hypervisor" }
zerotier-vl1-service = { path = "../vl1-service" }
zerotier-service = { path = "../service" }
async-trait = "^0"
serde = { version = "^1", features = ["derive"], default-features = false }
serde_json = { version = "^1", features = ["std"], default-features = false }
serde_yaml = "^0"
clap = { version = "^3", features = ["std", "suggestions"], default-features = false }
notify = { version = "^5", features = ["macos_fsevent"], default-features = false }
tokio-postgres = "^0"

View file

@ -1,104 +0,0 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::collections::HashMap;
use std::error::Error;
use std::mem::replace;
use std::sync::{Mutex, RwLock};
use crate::database::Database;
use crate::model::{Member, Network};
use zerotier_network_hypervisor::vl1::Address;
use zerotier_network_hypervisor::vl2::NetworkId;
/// Network and member cache used by database implementations to implement change detection.
///
/// Note: the database must ensure that calls to on_X_updated() methods are only performed
/// when a potentially newer version is committed. No-op calls when nothing has changed are
/// okay but calls out of order will result in extra updated events being generated for
/// movements forward and backward in time. Calls must be temporally ordered.
pub struct Cache {
by_nwid: RwLock<HashMap<NetworkId, (Network, Mutex<HashMap<Address, Member>>)>>,
}
impl Cache {
pub fn new() -> Self {
Self { by_nwid: RwLock::new(HashMap::new()) }
}
/// Load (or reload) the entire cache from a database.
pub async fn load_all<DatabaseImpl: Database>(&self, db: &DatabaseImpl) -> Result<(), Box<dyn Error + Send + Sync>> {
let mut by_nwid = self.by_nwid.write().unwrap();
by_nwid.clear();
let networks = db.list_networks().await?;
for network_id in networks {
if let Some(network) = db.get_network(network_id).await? {
let network_entry = by_nwid.entry(network_id).or_insert_with(|| (network, Mutex::new(HashMap::new())));
let mut by_node_id = network_entry.1.lock().unwrap();
let members = db.list_members(network_id).await?;
for node_id in members {
if let Some(member) = db.get_member(network_id, node_id).await? {
let _ = by_node_id.insert(node_id, member);
}
}
}
}
Ok(())
}
/// Update a network if changed, returning whether or not any update was made and the old version if any.
/// A value of (true, None) indicates that there was no network by that ID in which case it is added.
pub fn on_network_updated(&self, network: Network) -> (bool, Option<Network>) {
let mut by_nwid = self.by_nwid.write().unwrap();
if let Some(prev_network) = by_nwid.get_mut(&network.id) {
if !prev_network.0.eq(&network) {
(true, Some(replace(&mut prev_network.0, network)))
} else {
(false, None)
}
} else {
let _ = by_nwid.insert(network.id, (network.clone(), Mutex::new(HashMap::new())));
(true, None)
}
}
/// Update a member if changed, returning whether or not any update was made and the old version if any.
/// A value of (true, None) indicates that there was no member with that ID. If there is no network with
/// the member's network ID (false, None) is returned and no action is taken.
pub fn on_member_updated(&self, member: Member) -> (bool, Option<Member>) {
let by_nwid = self.by_nwid.read().unwrap();
if let Some(network) = by_nwid.get(&member.network_id) {
let mut by_node_id = network.1.lock().unwrap();
if let Some(prev_member) = by_node_id.get_mut(&member.node_id) {
if !member.eq(prev_member) {
(true, Some(replace(prev_member, member)))
} else {
(false, None)
}
} else {
let _ = by_node_id.insert(member.node_id, member);
(true, None)
}
} else {
(false, None)
}
}
/// Delete a network, returning it if it existed.
pub fn on_network_deleted(&self, network_id: NetworkId) -> Option<(Network, Vec<Member>)> {
let mut by_nwid = self.by_nwid.write().unwrap();
let network = by_nwid.remove(&network_id)?;
let mut members = network.1.lock().unwrap();
Some((network.0, members.drain().map(|(_, v)| v).collect()))
}
/// Delete a member, returning it if it existed.
pub fn on_member_deleted(&self, network_id: NetworkId, node_id: Address) -> Option<Member> {
let by_nwid = self.by_nwid.read().unwrap();
let network = by_nwid.get(&network_id)?;
let mut members = network.1.lock().unwrap();
members.remove(&node_id)
}
}

View file

@ -6,23 +6,21 @@ use std::sync::{Arc, Mutex, RwLock, Weak};
use tokio::time::{Duration, Instant};
use zerotier_crypto::secure_eq;
use zerotier_network_hypervisor::protocol;
use zerotier_network_hypervisor::protocol::{PacketBuffer, DEFAULT_MULTICAST_LIMIT, ZEROTIER_VIRTUAL_NETWORK_DEFAULT_MTU};
use zerotier_network_hypervisor::protocol::PacketBuffer;
use zerotier_network_hypervisor::vl1::identity::{Identity, IdentitySecret};
use zerotier_network_hypervisor::vl1::*;
use zerotier_network_hypervisor::vl2;
use zerotier_network_hypervisor::vl2::multicastauthority::MulticastAuthority;
use zerotier_network_hypervisor::vl2::v1::networkconfig::*;
use zerotier_network_hypervisor::vl2::v1::Revocation;
use zerotier_network_hypervisor::vl2::NetworkId;
use zerotier_utils::blob::Blob;
use zerotier_service::vl1::VL1Service;
use zerotier_utils::buffer::OutOfBoundsError;
use zerotier_utils::cast::cast_ref;
use zerotier_utils::error::InvalidParameterError;
use zerotier_utils::reaper::Reaper;
use zerotier_utils::tokio;
use zerotier_utils::{ms_monotonic, ms_since_epoch};
use zerotier_vl1_service::VL1Service;
use crate::database::*;
use crate::model::{AuthenticationResult, Member, RequestLogItem, CREDENTIAL_WINDOW_SIZE_DEFAULT};
@ -33,11 +31,10 @@ const REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
/// ZeroTier VL2 network controller packet handler, answers VL2 netconf queries.
pub struct Controller {
self_ref: Weak<Self>,
service: RwLock<Weak<VL1Service<Self>>>,
reaper: Reaper,
runtime: tokio::runtime::Handle,
database: Arc<dyn Database>,
local_identity: Valid<Identity>,
local_identity: IdentitySecret,
/// Handler for MULTICAST_LIKE and MULTICAST_GATHER messages.
multicast_authority: MulticastAuthority,
@ -47,49 +44,40 @@ pub struct Controller {
/// Recently authorized network members and when that authorization expires (in monotonic ticks).
/// Note that this is not and should not be used for real authentication, just for locking up multicast info.
recently_authorized: RwLock<HashMap<[u8; Identity::FINGERPRINT_SIZE], HashMap<NetworkId, i64>>>,
recently_authorized: RwLock<HashMap<Address, HashMap<NetworkId, i64>>>,
}
impl Controller {
/// Start an inner protocol handler answer ZeroTier VL2 network controller queries.
///
/// The start() method must be called once the service this will run within is also created.
pub async fn new(database: Arc<dyn Database>, runtime: tokio::runtime::Handle) -> Result<Arc<Self>, Box<dyn Error>> {
if let Some(local_identity) = database.load_node_identity() {
assert!(local_identity.secret.is_some());
Ok(Arc::new_cyclic(|r| Self {
self_ref: r.clone(),
service: RwLock::new(Weak::default()),
reaper: Reaper::new(&runtime),
runtime,
database: database.clone(),
local_identity: local_identity,
multicast_authority: MulticastAuthority::new(),
daemons: Mutex::new(Vec::with_capacity(2)),
recently_authorized: RwLock::new(HashMap::new()),
}))
} else {
Err(Box::new(InvalidParameterError("local controller's identity not readable by database")))
}
pub async fn new(
runtime: tokio::runtime::Handle,
local_identity: IdentitySecret,
database: Arc<dyn Database>,
) -> Result<Arc<Self>, Box<dyn Error>> {
Ok(Arc::new_cyclic(|self_ref| Self {
self_ref: self_ref.clone(),
reaper: Reaper::new(&runtime),
runtime,
database: database.clone(),
local_identity,
multicast_authority: MulticastAuthority::new(),
daemons: Mutex::new(Vec::with_capacity(2)),
recently_authorized: RwLock::new(HashMap::new()),
}))
}
/// Set the service and HostSystem implementation for this controller and start daemons.
/// Start this controller's background tasks.
///
/// This must be called once the service that uses this handler is up or the controller
/// won't actually do anything. The controller holds a weak reference to VL1Service so
/// be sure it's not dropped.
pub async fn start(&self, service: &Arc<VL1Service<Self>>) {
*self.service.write().unwrap() = Arc::downgrade(service);
// Create database change listener.
/// Note that the controller only holds a Weak<VL1Service<Self>> to avoid circular references.
pub async fn start(&self, app: &Arc<VL1Service<Self>>) {
if let Some(cw) = self.database.changes().await.map(|mut ch| {
let self2 = self.self_ref.clone();
let controller_weak = self.self_ref.clone();
let app_weak = Arc::downgrade(app);
self.runtime.spawn(async move {
loop {
if let Ok(change) = ch.recv().await {
if let Some(self2) = self2.upgrade() {
self2.reaper.add(
self2.runtime.spawn(self2.clone().handle_change_notification(change)),
if let (Some(controller), Some(app)) = (controller_weak.upgrade(), app_weak.upgrade()) {
controller.reaper.add(
controller.runtime.spawn(controller.clone().handle_change_notification(app, change)),
Instant::now().checked_add(REQUEST_TIMEOUT).unwrap(),
);
} else {
@ -102,17 +90,16 @@ impl Controller {
self.daemons.lock().unwrap().push(cw);
}
// Create background task to expire multicast subscriptions and recent authorizations.
let self2 = self.self_ref.clone();
let controller_weak = self.self_ref.clone();
self.daemons.lock().unwrap().push(self.runtime.spawn(async move {
let sleep_duration = Duration::from_millis((protocol::VL2_DEFAULT_MULTICAST_LIKE_EXPIRE / 2).min(2500) as u64);
loop {
tokio::time::sleep(sleep_duration).await;
if let Some(self2) = self2.upgrade() {
if let Some(controller) = controller_weak.upgrade() {
let time_ticks = ms_monotonic();
self2.multicast_authority.clean(time_ticks);
self2.recently_authorized.write().unwrap().retain(|_, by_network| {
controller.multicast_authority.clean(time_ticks);
controller.recently_authorized.write().unwrap().retain(|_, by_network| {
by_network.retain(|_, timeout| *timeout > time_ticks);
!by_network.is_empty()
});
@ -124,7 +111,7 @@ impl Controller {
}
/// Launched as a task when the DB informs us of a change.
async fn handle_change_notification(self: Arc<Self>, change: Change) {
async fn handle_change_notification(self: Arc<Self>, app: Arc<VL1Service<Self>>, change: Change) {
match change {
Change::NetworkCreated(_) => {}
Change::NetworkChanged(_, _) => {}
@ -132,15 +119,15 @@ impl Controller {
Change::MemberCreated(_) => {}
Change::MemberChanged(old_member, new_member) => {
if !new_member.authorized() && old_member.authorized() {
self.deauthorize_member(&new_member).await;
self.deauthorize_member(&app, &new_member).await;
}
}
Change::MemberDeleted(member) => self.deauthorize_member(&member).await,
Change::MemberDeleted(member) => self.deauthorize_member(&app, &member).await,
}
}
/// Compose and send network configuration packet (either V1 or V2)
fn send_network_config<Application: ApplicationLayer + ?Sized>(
fn send_network_config<Application: ApplicationLayer>(
&self,
app: &Application,
node: &Node<Application>,
@ -163,7 +150,7 @@ impl Controller {
if peer.is_v2() {
todo!()
} else {
let config_data = if let Some(config_dict) = config.v1_proto_to_dictionary(&self.local_identity) {
let config_data = if let Some(config_dict) = config.v1_proto_to_dictionary(&self.local_identity.public) {
config_dict.to_bytes()
} else {
eprintln!("WARNING: unexpected error serializing network config into V1 format dictionary");
@ -174,11 +161,9 @@ impl Controller {
return Err(OutOfBoundsError); // abort
}
packet.append_u64(config.network_id.into())?;
packet.append_u64(config.network_id.to_legacy_u64())?;
packet.append_u16(config_data.len() as u16)?;
packet.append_bytes(config_data.as_slice())?;
// TODO: for V1 we may need to introduce use of the chunking mechanism for large configs.
}
let new_payload_len = protocol::compress(&mut packet.as_bytes_mut()[payload_start..]);
@ -189,51 +174,51 @@ impl Controller {
}
/// Send one or more revocation object(s) to a peer. The provided vector is drained.
fn send_revocations(&self, peer: &Peer<VL1Service<Self>>, revocations: &mut Vec<Revocation>) {
if let Some(host_system) = self.service.read().unwrap().upgrade() {
let time_ticks = ms_monotonic();
while !revocations.is_empty() {
let send_count = revocations.len().min(protocol::UDP_DEFAULT_MTU / 256);
debug_assert!(send_count <= (u16::MAX as usize));
peer.send(
host_system.as_ref(),
host_system.node(),
None,
time_ticks,
|packet| -> Result<(), OutOfBoundsError> {
let payload_start = packet.len();
fn send_revocations(&self, app: &Arc<VL1Service<Self>>, peer: &Peer<VL1Service<Self>>, revocations: &mut Vec<Revocation>) {
let time_ticks = ms_monotonic();
while !revocations.is_empty() {
let send_count = revocations.len().min(protocol::UDP_DEFAULT_MTU / 256);
debug_assert!(send_count <= (u16::MAX as usize));
peer.send(app.as_ref(), &app.node, None, time_ticks, |packet| -> Result<(), OutOfBoundsError> {
let payload_start = packet.len();
packet.append_u8(protocol::message_type::VL2_NETWORK_CREDENTIALS)?;
packet.append_u8(0)?;
packet.append_u16(0)?;
packet.append_u16(0)?;
packet.append_u16(send_count as u16)?;
for _ in 0..send_count {
let r = revocations.pop().unwrap();
packet.append_bytes(r.v1_proto_to_bytes(self.local_identity.address).as_bytes())?;
}
packet.append_u16(0)?;
packet.append_u8(protocol::message_type::VL2_NETWORK_CREDENTIALS)?;
packet.append_u8(0)?;
packet.append_u16(0)?;
packet.append_u16(0)?;
packet.append_u16(send_count as u16)?;
for _ in 0..send_count {
let r = revocations.pop().unwrap();
packet.append_bytes(r.v1_proto_to_bytes(&self.local_identity.public.address).as_bytes())?;
}
packet.append_u16(0)?;
let new_payload_len = protocol::compress(&mut packet.as_bytes_mut()[payload_start..]);
packet.set_size(payload_start + new_payload_len);
let new_payload_len = protocol::compress(&mut packet.as_bytes_mut()[payload_start..]);
packet.set_size(payload_start + new_payload_len);
Ok(())
},
);
}
Ok(())
});
}
}
async fn deauthorize_member(&self, member: &Member) {
async fn deauthorize_member(&self, app: &Arc<VL1Service<Self>>, member: &Member) {
let time_clock = ms_since_epoch();
let mut revocations = Vec::with_capacity(1);
if let Ok(all_network_members) = self.database.list_members(member.network_id).await {
for m in all_network_members.iter() {
if member.node_id != *m {
if let Some(peer) = self.service.read().unwrap().upgrade().and_then(|s| s.node().peer(*m)) {
if let Ok(all_network_members) = self.database.list_members(&member.network_id).await {
for other_member in all_network_members.iter() {
if member.node_id != *other_member && member.node_id.is_complete() && other_member.is_complete() {
let node_id = member.node_id.as_complete().unwrap();
if let Some(peer) = app.node.peer(node_id) {
revocations.clear();
Revocation::new(member.network_id, time_clock, member.node_id, *m, &self.local_identity, false).map(|r| revocations.push(r));
self.send_revocations(&peer, &mut revocations);
revocations.push(Revocation::new(
&member.network_id,
time_clock,
node_id,
other_member.as_complete().unwrap(),
&self.local_identity,
false,
));
self.send_revocations(&app, &peer, &mut revocations);
}
}
}
@ -250,56 +235,18 @@ impl Controller {
async fn authorize(
self: &Arc<Self>,
source_identity: &Valid<Identity>,
network_id: NetworkId,
network_id: &NetworkId,
time_clock: i64,
) -> Result<(AuthenticationResult, Option<NetworkConfig>), Box<dyn Error + Send + Sync>> {
let network = self.database.get_network(network_id).await?;
) -> Result<(AuthenticationResult, Option<Box<NetworkConfig>>), Box<dyn Error + Send + Sync>> {
let network = self.database.get_network(&network_id).await?;
if network.is_none() {
return Ok((AuthenticationResult::Rejected, None));
}
let network = network.unwrap();
let mut member = self.database.get_member(network_id, source_identity.address).await?;
let mut member = self.database.get_member(&network_id, &source_identity.address.to_partial()).await?;
let mut member_changed = false;
// SECURITY WARNING: this is a critical code path where members of networks are authorized.
// Read and modify with extreme care.
// If we have a member object and a pinned identity, check to make sure it matches. Also accept
// upgraded identities to replace old versions if they are properly formed and their signatures
// all check out (see Identity::is_upgraded_from()). Note that we do not pin the identity here
// if it is unspecified. That's not done until we fully authorize this member, since we don't
// want to have a way to somehow pin the wrong person's identity (if someone manages to somehow
// create a colliding identity and get it to us).
if let Some(member) = member.as_mut() {
if let Some(pinned_identity) = member.identity.as_ref() {
if !pinned_identity.eq(&source_identity) {
if source_identity.is_upgraded_from(pinned_identity) {
// Upgrade identity types if we have a V2 identity upgraded from a V1 identity.
let _ = member.identity.replace(source_identity.clone_without_secret());
let _ = member.identity_fingerprint.replace(Blob::from(source_identity.fingerprint));
member_changed = true;
} else {
return Ok((AuthenticationResult::RejectedIdentityMismatch, None));
}
}
}
if let Some(pinned_fingerprint) = member.identity_fingerprint.as_ref() {
if secure_eq(pinned_fingerprint.as_bytes(), &source_identity.fingerprint) {
if member.identity.is_none() {
// Learn the FULL identity if the fingerprint is pinned and they match. This
// lets us add members by address/fingerprint with full SHA384 identity
// verification instead of just by short address.
let _ = member.identity.replace(source_identity.clone_without_secret());
member_changed = true;
}
} else {
return Ok((AuthenticationResult::RejectedIdentityMismatch, None));
}
}
}
let mut authentication_result = AuthenticationResult::Rejected;
// This is the main "authorized" state of the member record. If it is true then the member is allowed,
@ -311,7 +258,7 @@ impl Controller {
if !member_authorized {
if member.is_none() {
if network.learn_members.unwrap_or(true) {
let _ = member.insert(Member::new_with_identity(source_identity.as_ref().clone(), network_id));
let _ = member.insert(Member::new(source_identity.clone(), network_id.clone()));
member_changed = true;
} else {
return Ok((AuthenticationResult::Rejected, None));
@ -347,45 +294,35 @@ impl Controller {
let member_authorized = member_authorized;
let authentication_result = authentication_result;
// Pin full address and full identity if these aren't pinned already.
if !member.node_id.is_complete() {
member.node_id = source_identity.address.to_partial();
member_changed = true;
}
if member.identity.is_none() {
let _ = member.identity.insert(source_identity.clone().remove_typestate());
member_changed = true;
}
// Generate network configuration if the member is authorized.
let network_config = if authentication_result.approved() {
// We should not be able to make it here if this is still false.
assert!(member_authorized);
// Pin member identity if not pinned already. This is analogous to SSH "trust on first use" except
// that the ZeroTier address is akin to the host name. Once we've seen the full identity once then
// it becomes truly "impossible" to collide the address. (Unless you can break ECC and SHA384.)
if member.identity.is_none() {
let _ = member.identity.replace(source_identity.clone_without_secret());
debug_assert!(member.identity_fingerprint.is_none());
let _ = member.identity_fingerprint.replace(Blob::from(source_identity.fingerprint));
member_changed = true;
}
// Make sure these agree. It should be impossible to end up with a member that's authorized and
// whose identity and identity fingerprint don't match.
if !secure_eq(
&member.identity.as_ref().unwrap().fingerprint,
member.identity_fingerprint.as_ref().unwrap().as_bytes(),
) {
debug_assert!(false);
return Ok((AuthenticationResult::RejectedDueToError, None));
}
// Figure out TTL for credentials (time window in V1).
let credential_ttl = network.credential_ttl.unwrap_or(CREDENTIAL_WINDOW_SIZE_DEFAULT);
// Check and if necessary auto-assign static IPs for this member.
member_changed |= network.assign_ip_addresses(self.database.as_ref(), &mut member).await;
let mut nc = NetworkConfig::new(network_id, source_identity.address);
let mut nc = Box::new(NetworkConfig::new(network_id.clone(), source_identity.address.clone()));
nc.name = network.name.clone();
nc.private = network.private;
nc.timestamp = time_clock;
nc.multicast_limit = network.multicast_limit.unwrap_or(DEFAULT_MULTICAST_LIMIT as u32);
nc.multicast_limit = network.multicast_limit.unwrap_or(protocol::v1::DEFAULT_MULTICAST_LIMIT as u32);
nc.multicast_like_expire = Some(protocol::VL2_DEFAULT_MULTICAST_LIKE_EXPIRE as u32);
nc.mtu = network.mtu.unwrap_or(ZEROTIER_VIRTUAL_NETWORK_DEFAULT_MTU as u16);
nc.mtu = network.mtu.unwrap_or(protocol::ZEROTIER_VIRTUAL_NETWORK_DEFAULT_MTU as u16);
nc.routes = network.ip_routes.iter().cloned().collect();
nc.static_ips = member.ip_assignments.iter().cloned().collect();
@ -395,7 +332,7 @@ impl Controller {
// connectivity between valid members.
if let Ok(mut deauthed_members_still_in_window) = self
.database
.list_members_deauthorized_after(network.id, time_clock - (credential_ttl as i64))
.list_members_deauthorized_after(&network.id, time_clock - (credential_ttl as i64))
.await
{
if !deauthed_members_still_in_window.is_empty() {
@ -403,7 +340,7 @@ impl Controller {
nc.rules.reserve(deauthed_members_still_in_window.len() + 1);
let mut or = false;
for dead in deauthed_members_still_in_window.iter() {
nc.rules.push(vl2::rule::Rule::match_source_zerotier_address(false, or, *dead));
nc.rules.push(vl2::rule::Rule::match_source_zerotier_address(false, or, dead.clone()));
or = true;
}
nc.rules.push(vl2::rule::Rule::action_drop());
@ -426,40 +363,34 @@ impl Controller {
// If this network supports V1 nodes we have to include V1 credentials. Otherwise we can skip
// the overhead (bandwidth and CPU) of generating these.
if let Some(com) =
vl2::v1::CertificateOfMembership::new(&self.local_identity, network_id, &source_identity, time_clock, credential_ttl)
{
let mut v1cred = V1Credentials {
revision: time_clock as u64,
max_delta: credential_ttl,
certificate_of_membership: com,
certificates_of_ownership: Vec::new(),
tags: HashMap::new(),
};
let com = vl2::v1::CertificateOfMembership::new(&self.local_identity, &network_id, &source_identity, time_clock, credential_ttl);
let mut v1cred = V1Credentials {
revision: time_clock as u64,
max_delta: credential_ttl,
certificate_of_membership: com,
certificates_of_ownership: Vec::new(),
tags: HashMap::new(),
};
if !nc.static_ips.is_empty() {
let mut coo = vl2::v1::CertificateOfOwnership::new(network_id, time_clock, source_identity.address);
for ip in nc.static_ips.iter() {
coo.add_ip(ip);
}
if !coo.sign(&self.local_identity, &source_identity) {
return Ok((AuthenticationResult::RejectedDueToError, None));
}
v1cred.certificates_of_ownership.push(coo);
if !nc.static_ips.is_empty() {
let mut coo = vl2::v1::CertificateOfOwnership::new(&network_id, time_clock, &source_identity.address);
for ip in nc.static_ips.iter() {
coo.add_ip(ip);
}
for (id, value) in member.tags.iter() {
let tag = vl2::v1::Tag::new(*id, *value, &self.local_identity, network_id, &source_identity, time_clock);
if tag.is_none() {
return Ok((AuthenticationResult::RejectedDueToError, None));
}
let _ = v1cred.tags.insert(*id, tag.unwrap());
if !coo.sign(&self.local_identity, &source_identity) {
return Ok((AuthenticationResult::RejectedDueToError, None));
}
nc.v1_credentials = Some(v1cred);
} else {
return Ok((AuthenticationResult::RejectedDueToError, None));
v1cred.certificates_of_ownership.push(coo);
}
for (id, value) in member.tags.iter() {
let _ = v1cred.tags.insert(
*id,
vl2::v1::Tag::new(*id, *value, &self.local_identity, &network_id, &source_identity, time_clock),
);
}
nc.v1_credentials = Some(v1cred);
}
if source_identity.p384.is_some() {
@ -472,9 +403,9 @@ impl Controller {
.recently_authorized
.write()
.unwrap()
.entry(source_identity.fingerprint)
.entry(source_identity.address.clone())
.or_default()
.insert(network_id, ms_monotonic() + (credential_ttl as i64));
.insert(network_id.clone(), ms_monotonic() + (credential_ttl as i64));
Some(nc)
} else {
@ -491,12 +422,12 @@ impl Controller {
}
impl InnerProtocolLayer for Controller {
fn handle_packet<Application: ApplicationLayer + ?Sized>(
fn handle_packet<Application: ApplicationLayer>(
&self,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_path: &Arc<Path<Application>>,
source_hops: u8,
message_id: u64,
verb: u8,
@ -509,7 +440,7 @@ impl InnerProtocolLayer for Controller {
if network_id.is_err() {
return PacketHandlerResult::Error;
}
let network_id = NetworkId::from_u64(network_id.unwrap());
let network_id = NetworkId::from_legacy_u64(network_id.unwrap()).ok();
if network_id.is_none() {
return PacketHandlerResult::Error;
}
@ -517,10 +448,10 @@ impl InnerProtocolLayer for Controller {
debug_event!(
app,
"[vl2] NETWORK_CONFIG_REQUEST from {}({}) for {:0>16x}",
"[vl2] NETWORK_CONFIG_REQUEST from {}({}) for {}",
source.identity.address.to_string(),
source_path.endpoint.to_string(),
u64::from(network_id)
network_id.to_string()
);
let metadata = if (cursor + 2) < payload.len() {
@ -538,36 +469,39 @@ impl InnerProtocolLayer for Controller {
};
// Launch handler as an async background task.
let (self2, source, source_remote_endpoint) = (self.self_ref.upgrade().unwrap(), source.clone(), source_path.endpoint.clone());
let app = app.concrete_self::<VL1Service<Self>>().unwrap().get_self_arc(); // can't be a dead pointer since we're in a handler being called by it
let (controller, source, source_remote_endpoint) = (self.self_ref.upgrade().unwrap(), source.clone(), source_path.endpoint.clone());
self.reaper.add(
self.runtime.spawn(async move {
let node_id = source.identity.address;
let node_fingerprint = Blob::from(source.identity.fingerprint);
let node_id = source.identity.address.clone();
let now = ms_since_epoch();
let (result, config) = match self2.authorize(&source.identity, network_id, now).await {
let result = match controller.authorize(&source.identity, &network_id, now).await {
Result::Ok((result, Some(config))) => {
//println!("{}", serde_yaml::to_string(&config).unwrap());
let app = self2.service.read().unwrap().upgrade().unwrap();
self2.send_network_config(app.as_ref(), app.node(), cast_ref(source.as_ref()).unwrap(), &config, Some(message_id));
(result, Some(config))
controller.send_network_config(
app.as_ref(),
&app.node,
cast_ref(source.as_ref()).unwrap(),
&config,
Some(message_id),
);
result
}
Result::Ok((result, None)) => (result, None),
Result::Ok((result, None)) => result,
Result::Err(e) => {
#[cfg(debug_assertions)]
let host = self2.service.read().unwrap().clone().upgrade().unwrap();
debug_event!(host, "[vl2] ERROR getting network config: {}", e.to_string());
debug_event!(app, "[vl2] ERROR getting network config: {}", e.to_string());
return;
}
};
let _ = self2
let _ = controller
.database
.log_request(RequestLogItem {
network_id,
node_id,
node_fingerprint,
controller_node_id: self2.local_identity.address,
controller_node_id: controller.local_identity.public.address.clone(),
metadata,
peer_version: source.version(),
peer_protocol_version: source.protocol_version(),
@ -575,7 +509,6 @@ impl InnerProtocolLayer for Controller {
source_remote_endpoint,
source_hops,
result,
config,
})
.await;
}),
@ -590,7 +523,7 @@ impl InnerProtocolLayer for Controller {
let time_ticks = ms_monotonic();
self.multicast_authority.handle_vl2_multicast_like(
|network_id, identity| {
auth.get(&identity.fingerprint)
auth.get(&identity.address)
.map_or(false, |t| t.get(&network_id).map_or(false, |t| *t > time_ticks))
},
time_ticks,
@ -606,7 +539,7 @@ impl InnerProtocolLayer for Controller {
let time_ticks = ms_monotonic();
self.multicast_authority.handle_vl2_multicast_gather(
|network_id, identity| {
auth.get(&identity.fingerprint)
auth.get(&identity.address)
.map_or(false, |t| t.get(&network_id).map_or(false, |t| *t > time_ticks))
},
time_ticks,

View file

@ -1,10 +1,9 @@
use async_trait::async_trait;
use zerotier_crypto::secure_eq;
use zerotier_network_hypervisor::vl1::{Address, InetAddress};
use zerotier_network_hypervisor::vl1::{InetAddress, PartialAddress};
use zerotier_network_hypervisor::vl2::NetworkId;
use zerotier_utils::tokio::sync::broadcast::Receiver;
use zerotier_vl1_service::VL1DataStorage;
use crate::model::*;
@ -22,23 +21,51 @@ pub enum Change {
}
#[async_trait]
pub trait Database: Sync + Send + VL1DataStorage + 'static {
pub trait Database: Sync + Send + 'static {
/// List networks on this controller.
async fn list_networks(&self) -> Result<Vec<NetworkId>, Error>;
async fn get_network(&self, id: NetworkId) -> Result<Option<Network>, Error>;
/// Get a network by network ID.
async fn get_network(&self, id: &NetworkId) -> Result<Option<Network>, Error>;
/// Save a network.
///
/// Note that unlike members the network ID is not automatically promoted from legacy to full
/// ID format.
async fn save_network(&self, obj: Network, generate_change_notification: bool) -> Result<(), Error>;
async fn list_members(&self, network_id: NetworkId) -> Result<Vec<Address>, Error>;
async fn get_member(&self, network_id: NetworkId, node_id: Address) -> Result<Option<Member>, Error>;
/// List members of a network.
async fn list_members(&self, network_id: &NetworkId) -> Result<Vec<PartialAddress>, Error>;
/// Get a member of network.
///
/// If node_id is not a complete address, the best unique match should be returned. None should
/// be returned not only if the member is not found but if node_id is ambiguous (would match more
/// than one member).
async fn get_member(&self, network_id: &NetworkId, node_id: &PartialAddress) -> Result<Option<Member>, Error>;
/// Save a modified member to a network.
///
/// Note that member modifications can include the automatic replacement of a less specific address
/// in node_id with a fully specific address. This happens the first time a member added with an
/// incomplete address is actually seen. In that case the implementation must correctly find the
/// best matching existing member and replace it with a member identified by the fully specified
/// address, removing and re-adding if needed.
///
/// This must also handle the (rare) case when someone may try to save a member with a less
/// specific address than the one currently in the database. In that case the "old" more specific
/// address should replace the less specific address in the node_id field. This can only happen if
/// an external user manually does this. The controller won't do this automatically.
async fn save_member(&self, obj: Member, generate_change_notification: bool) -> Result<(), Error>;
/// Save a log entry for a request this controller has handled.
async fn log_request(&self, obj: RequestLogItem) -> Result<(), Error>;
/// Get a receiver that can be used to receive changes made to networks and members, if supported.
///
/// The receiver returned is a broadcast receiver. This can be called more than once if there are
/// multiple parts of the controller that listen.
///
/// Changes should NOT be broadcast on call to save_network() or save_member(). They should only
/// be broadcast when externally generated changes occur.
///
/// The default implementation returns None indicating that change following is not supported.
/// Change following is required for instant deauthorization with revocations and other instant
/// changes in response to modifications to network and member configuration.
@ -50,11 +77,11 @@ pub trait Database: Sync + Send + VL1DataStorage + 'static {
///
/// The default trait implementation uses a brute force method. This should be reimplemented if a
/// more efficient way is available.
async fn list_members_deauthorized_after(&self, network_id: NetworkId, cutoff: i64) -> Result<Vec<Address>, Error> {
async fn list_members_deauthorized_after(&self, network_id: &NetworkId, cutoff: i64) -> Result<Vec<PartialAddress>, Error> {
let mut v = Vec::new();
let members = self.list_members(network_id).await?;
for a in members.iter() {
if let Some(m) = self.get_member(network_id, *a).await? {
if let Some(m) = self.get_member(network_id, a).await? {
if m.last_deauthorized_time.unwrap_or(i64::MIN) >= cutoff {
v.push(m.node_id);
}
@ -67,10 +94,10 @@ pub trait Database: Sync + Send + VL1DataStorage + 'static {
///
/// The default trait implementation uses a brute force method. This should be reimplemented if a
/// more efficient way is available.
async fn is_ip_assigned(&self, network_id: NetworkId, ip: &InetAddress) -> Result<bool, Error> {
async fn is_ip_assigned(&self, network_id: &NetworkId, ip: &InetAddress) -> Result<bool, Error> {
let members = self.list_members(network_id).await?;
for a in members.iter() {
if let Some(m) = self.get_member(network_id, *a).await? {
if let Some(m) = self.get_member(network_id, a).await? {
if m.ip_assignments.iter().any(|ip2| secure_eq(ip2.ip_bytes(), ip.ip_bytes())) {
return Ok(true);
}
@ -78,6 +105,4 @@ pub trait Database: Sync + Send + VL1DataStorage + 'static {
}
return Ok(false);
}
async fn log_request(&self, obj: RequestLogItem) -> Result<(), Error>;
}

View file

@ -1,418 +1,201 @@
use std::collections::BTreeMap;
use std::mem::replace;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex, Weak};
use std::sync::{Arc, Weak};
use serde::{Deserialize, Serialize};
use async_trait::async_trait;
use notify::{RecursiveMode, Watcher};
use serde::de::DeserializeOwned;
use zerotier_utils::tokio::io::AsyncWriteExt;
use zerotier_network_hypervisor::vl1::{Address, Identity, Valid};
use crate::database;
use crate::database::Change;
use crate::model::{Member, Network, RequestLogItem};
use zerotier_network_hypervisor::vl1::PartialAddress;
use zerotier_network_hypervisor::vl2::NetworkId;
use zerotier_utils::reaper::Reaper;
use zerotier_utils::tokio::fs;
use zerotier_utils::tokio::runtime::Handle;
use zerotier_utils::tokio::sync::broadcast::{channel, Receiver, Sender};
use zerotier_utils::tokio::task::JoinHandle;
use zerotier_utils::tokio::time::{sleep, Duration, Instant};
use zerotier_vl1_service::datadir::{load_node_identity, save_node_identity};
use zerotier_vl1_service::VL1DataStorage;
use zerotier_utils::tokio;
use zerotier_utils::tokio::sync::{broadcast, mpsc};
use crate::cache::Cache;
use crate::database::{Change, Database, Error};
use crate::model::*;
const EVENT_HANDLER_TASK_TIMEOUT: Duration = Duration::from_secs(10);
/// An in-filesystem database that permits live editing.
///
/// A cache is maintained that contains the actual objects. When an object is live edited,
/// once it successfully reads and loads it is merged with the cached object and saved to
/// the cache. The cache will also contain any ephemeral data, generated data, etc.
///
/// The file format is YAML instead of JSON for better human friendliness and the layout
/// is different from V1 so it'll need a converter to use with V1 FileDb controller data.
pub struct FileDatabase {
base_path: PathBuf,
local_identity: Valid<Identity>,
change_sender: Sender<Change>,
tasks: Reaper,
cache: Cache,
daemon: JoinHandle<()>,
db_path: PathBuf,
log: Option<tokio::sync::Mutex<tokio::fs::File>>,
data: tokio::sync::Mutex<(BTreeMap<NetworkId, FileDbNetwork>, bool)>,
change_sender: broadcast::Sender<Change>,
file_write_notify_sender: mpsc::Sender<()>,
file_writer: tokio::task::JoinHandle<()>,
}
// TODO: should cache at least hashes and detect changes in the filesystem live.
#[derive(Serialize, Deserialize)]
struct FileDbNetwork {
pub config: Network,
pub members: BTreeMap<PartialAddress, Member>,
}
impl FileDatabase {
pub async fn new<P: AsRef<Path>>(runtime: Handle, base_path: P) -> Result<Arc<Self>, Error> {
let base_path: PathBuf = base_path.as_ref().into();
pub async fn new(db_path: &Path, log_path: Option<&Path>) -> Result<Arc<Self>, Box<dyn std::error::Error + Send + Sync>> {
let data_bytes = tokio::fs::read(db_path).await;
let mut data: BTreeMap<NetworkId, FileDbNetwork> = BTreeMap::new();
if let Err(e) = data_bytes {
if !matches!(e.kind(), tokio::io::ErrorKind::NotFound) {
return Err(Box::new(e));
}
} else {
data = serde_json::from_slice(data_bytes.as_ref().unwrap().as_slice())?;
}
let (change_sender, _) = channel(256);
let db_weak_tmp: Arc<Mutex<Weak<Self>>> = Arc::new(Mutex::new(Weak::default()));
let db_weak = db_weak_tmp.clone();
let runtime2 = runtime.clone();
let log = if let Some(log_path) = log_path {
Some(tokio::sync::Mutex::new(
tokio::fs::OpenOptions::new().append(true).create(true).mode(0o600).open(log_path).await?,
))
} else {
None
};
let local_identity =
load_node_identity(base_path.as_path()).ok_or(std::io::Error::new(std::io::ErrorKind::NotFound, "identity.secret not found"))?;
let controller_address = local_identity.address;
let db = Arc::new(Self {
base_path: base_path.clone(),
local_identity,
change_sender,
tasks: Reaper::new(&runtime2),
cache: Cache::new(),
daemon: runtime2.spawn(async move {
let mut watcher = notify::recommended_watcher(move |event: notify::Result<notify::event::Event>| {
if let Ok(event) = event {
match event.kind {
notify::EventKind::Create(_) | notify::EventKind::Modify(_) | notify::EventKind::Remove(_) => {
if let Some(db) = db_weak.lock().unwrap().upgrade() {
db.clone().tasks.add(
runtime.spawn(async move {
if let Some(path0) = event.paths.first() {
if let Some((record_type, network_id, node_id)) =
Self::record_type_from_path(controller_address, path0.as_path())
{
// Paths to objects that were deleted or changed. Changed includes adding new objects.
let mut deleted = None;
let mut changed = None;
match event.kind {
notify::EventKind::Create(create_kind) => match create_kind {
notify::event::CreateKind::File => {
changed = Some(path0.as_path());
}
_ => {}
},
notify::EventKind::Modify(modify_kind) => match modify_kind {
notify::event::ModifyKind::Data(_) => {
changed = Some(path0.as_path());
}
notify::event::ModifyKind::Name(rename_mode) => match rename_mode {
notify::event::RenameMode::Both => {
if event.paths.len() >= 2 {
if let Some(path1) = event.paths.last() {
deleted = Some(path0.as_path());
changed = Some(path1.as_path());
}
}
}
notify::event::RenameMode::From => {
deleted = Some(path0.as_path());
}
notify::event::RenameMode::To => {
changed = Some(path0.as_path());
}
_ => {}
},
_ => {}
},
notify::EventKind::Remove(remove_kind) => match remove_kind {
notify::event::RemoveKind::File => {
deleted = Some(path0.as_path());
}
_ => {}
},
_ => {}
}
if deleted.is_some() {
match record_type {
RecordType::Network => {
if let Some((network, members)) = db.cache.on_network_deleted(network_id) {
let _ = db.change_sender.send(Change::NetworkDeleted(network, members));
}
}
RecordType::Member => {
if let Some(node_id) = node_id {
if let Some(member) = db.cache.on_member_deleted(network_id, node_id) {
let _ = db.change_sender.send(Change::MemberDeleted(member));
}
}
}
_ => {}
}
}
if let Some(changed) = changed {
match record_type {
RecordType::Network => {
if let Ok(Some(new_network)) = Self::load_object::<Network>(changed).await {
match db.cache.on_network_updated(new_network.clone()) {
(true, Some(old_network)) => {
let _ = db
.change_sender
.send(Change::NetworkChanged(old_network, new_network));
}
(true, None) => {
let _ = db.change_sender.send(Change::NetworkCreated(new_network));
}
_ => {}
}
}
}
RecordType::Member => {
if let Ok(Some(new_member)) = Self::load_object::<Member>(changed).await {
match db.cache.on_member_updated(new_member.clone()) {
(true, Some(old_member)) => {
let _ =
db.change_sender.send(Change::MemberChanged(old_member, new_member));
}
(true, None) => {
let _ = db.change_sender.send(Change::MemberCreated(new_member));
}
_ => {}
}
}
}
_ => {}
}
}
}
}
}),
Instant::now().checked_add(EVENT_HANDLER_TASK_TIMEOUT).unwrap(),
let (file_write_notify_sender, mut file_write_notify_receiver) = mpsc::channel(16);
let db = Arc::new_cyclic(|self_weak: &Weak<FileDatabase>| {
let self_weak = self_weak.clone();
Self {
db_path: db_path.to_path_buf(),
log,
data: tokio::sync::Mutex::new((data, false)),
change_sender: broadcast::channel(16).0,
file_write_notify_sender,
file_writer: tokio::task::spawn(async move {
loop {
file_write_notify_receiver.recv().await;
if let Some(db) = self_weak.upgrade() {
let mut data = db.data.lock().await;
if data.1 {
let json = zerotier_utils::json::to_json_pretty(&data.0);
if let Err(e) = tokio::fs::write(db.db_path.as_path(), json.as_bytes()).await {
eprintln!(
"WARNING: controller changes not persisted! unable to write file database to '{}': {}",
db.db_path.to_string_lossy(),
e.to_string()
);
} else {
data.1 = false;
}
}
_ => {}
} else {
break;
}
}
})
.expect("FATAL: unable to start filesystem change listener");
let _ = watcher.configure(
notify::Config::default()
.with_compare_contents(true)
.with_poll_interval(std::time::Duration::from_secs(2)),
);
watcher
.watch(&base_path, RecursiveMode::Recursive)
.expect("FATAL: unable to watch base path");
loop {
// Any periodic background stuff can be put here. Adjust timing as needed.
sleep(Duration::from_secs(10)).await;
}
}),
}),
}
});
db.cache.load_all(db.as_ref()).await?;
*db_weak_tmp.lock().unwrap() = Arc::downgrade(&db); // this starts the daemon tasks and starts watching for file changes
Ok(db)
}
fn network_path(&self, network_id: NetworkId) -> PathBuf {
self.base_path.join(format!("N{:06x}", network_id.network_no())).join("config.yaml")
}
fn member_path(&self, network_id: NetworkId, member_id: Address) -> PathBuf {
self.base_path
.join(format!("N{:06x}", network_id.network_no()))
.join(format!("M{}.yaml", member_id.to_string()))
}
async fn load_object<O: DeserializeOwned>(path: &Path) -> Result<Option<O>, Error> {
if let Ok(raw) = fs::read(path).await {
return Ok(Some(serde_yaml::from_slice::<O>(raw.as_slice())?));
} else {
return Ok(None);
}
}
/// Get record type and also the number after it: network number or address.
fn record_type_from_path(controller_address: Address, p: &Path) -> Option<(RecordType, NetworkId, Option<Address>)> {
let parent = p.parent()?.file_name()?.to_string_lossy();
if parent.len() == 7 && (parent.starts_with("N") || parent.starts_with('n')) {
let network_id = NetworkId::from_controller_and_network_no(controller_address, u64::from_str_radix(&parent[1..], 16).ok()?)?;
if let Some(file_name) = p.file_name().map(|p| p.to_string_lossy().to_lowercase()) {
if file_name.eq("config.yaml") {
return Some((RecordType::Network, network_id, None));
} else if file_name.len() == 16 && file_name.starts_with("m") && file_name.ends_with(".yaml") {
return Some((
RecordType::Member,
network_id,
Some(Address::from_u64(u64::from_str_radix(&file_name.as_str()[1..11], 16).unwrap_or(0))?),
));
}
}
}
return None;
}
}
impl Drop for FileDatabase {
fn drop(&mut self) {
self.daemon.abort();
}
}
impl VL1DataStorage for FileDatabase {
fn load_node_identity(&self) -> Option<Valid<Identity>> {
load_node_identity(self.base_path.as_path())
}
fn save_node_identity(&self, id: &Valid<Identity>) -> bool {
save_node_identity(self.base_path.as_path(), id)
self.file_writer.abort();
}
}
#[async_trait]
impl Database for FileDatabase {
async fn list_networks(&self) -> Result<Vec<NetworkId>, Error> {
let mut networks = Vec::new();
let controller_address_shift24 = u64::from(self.local_identity.address).wrapping_shl(24);
let mut dir = fs::read_dir(&self.base_path).await?;
while let Ok(Some(ent)) = dir.next_entry().await {
if ent.file_type().await.map_or(false, |t| t.is_dir()) {
let osname = ent.file_name();
let name = osname.to_string_lossy();
if name.len() == 7 && name.starts_with("N") {
if fs::metadata(ent.path().join("config.yaml")).await.is_ok() {
if let Ok(nwid_last24bits) = u64::from_str_radix(&name[1..], 16) {
if let Some(nwid) = NetworkId::from_u64(controller_address_shift24 | nwid_last24bits) {
networks.push(nwid);
}
}
}
impl database::Database for FileDatabase {
async fn list_networks(&self) -> Result<Vec<NetworkId>, database::Error> {
Ok(self.data.lock().await.0.keys().cloned().collect())
}
async fn get_network(&self, id: &NetworkId) -> Result<Option<Network>, database::Error> {
Ok(self.data.lock().await.0.get(id).map(|x| x.config.clone()))
}
async fn save_network(&self, obj: Network, generate_change_notification: bool) -> Result<(), database::Error> {
let mut data = self.data.lock().await;
if let Some(nw) = data.0.get_mut(&obj.id) {
if !nw.config.eq(&obj) {
let old = replace(&mut nw.config, obj);
if generate_change_notification {
let _ = self.change_sender.send(Change::NetworkChanged(old, nw.config.clone()));
}
let _ = self.file_write_notify_sender.send(()).await;
}
}
Ok(networks)
}
async fn get_network(&self, id: NetworkId) -> Result<Option<Network>, Error> {
let mut network = Self::load_object::<Network>(self.network_path(id).as_path()).await?;
if let Some(network) = network.as_mut() {
// FileDatabase stores networks by their "network number" and automatically adapts their IDs
// if the controller's identity changes. This is done to make it easy to just clone networks,
// including storing them in "git."
let network_id_should_be = network.id.change_network_controller(self.local_identity.address);
if network.id != network_id_should_be {
network.id = network_id_should_be;
let _ = self.save_network(network.clone(), false).await?;
} else {
data.0
.insert(obj.id.clone(), FileDbNetwork { config: obj.clone(), members: BTreeMap::new() });
if generate_change_notification {
let _ = self.change_sender.send(Change::NetworkCreated(obj));
}
let _ = self.file_write_notify_sender.send(()).await;
}
Ok(network)
}
async fn save_network(&self, obj: Network, generate_change_notification: bool) -> Result<(), Error> {
if !generate_change_notification {
let _ = self.cache.on_network_updated(obj.clone());
}
let base_network_path = self.network_path(obj.id);
let _ = fs::create_dir_all(base_network_path.parent().unwrap()).await;
let _ = fs::write(base_network_path, serde_yaml::to_string(&obj)?.as_bytes()).await?;
return Ok(());
}
async fn list_members(&self, network_id: NetworkId) -> Result<Vec<Address>, Error> {
let mut members = Vec::new();
let mut dir = fs::read_dir(self.base_path.join(format!("N{:06x}", network_id.network_no()))).await?;
while let Ok(Some(ent)) = dir.next_entry().await {
if ent.file_type().await.map_or(false, |t| t.is_file() || t.is_symlink()) {
let osname = ent.file_name();
let name = osname.to_string_lossy();
if name.len() == (zerotier_network_hypervisor::protocol::ADDRESS_SIZE_STRING + 6) && name.starts_with("M") && name.ends_with(".yaml")
{
if let Ok(member_address) = u64::from_str_radix(&name[1..11], 16) {
if let Some(member_address) = Address::from_u64(member_address) {
members.push(member_address);
async fn list_members(&self, network_id: &NetworkId) -> Result<Vec<PartialAddress>, database::Error> {
Ok(self
.data
.lock()
.await
.0
.get(network_id)
.map_or_else(|| Vec::new(), |x| x.members.keys().cloned().collect()))
}
async fn get_member(&self, network_id: &NetworkId, node_id: &PartialAddress) -> Result<Option<Member>, database::Error> {
Ok(self
.data
.lock()
.await
.0
.get_mut(network_id)
.and_then(|x| node_id.find_unique_match(&x.members).cloned()))
}
async fn save_member(&self, mut obj: Member, generate_change_notification: bool) -> Result<(), database::Error> {
let mut data = self.data.lock().await;
if let Some(nw) = data.0.get_mut(&obj.network_id) {
if let Some(member) = obj.node_id.find_unique_match_mut(&mut nw.members) {
if !obj.eq(member) {
if member.node_id.specificity_bytes() != obj.node_id.specificity_bytes() {
// If the specificity of the node_id has changed we have to delete and re-add the entry.
let old_node_id = member.node_id.clone();
let old = nw.members.remove(&old_node_id);
if old_node_id.specificity_bytes() > obj.node_id.specificity_bytes() {
obj.node_id = old_node_id;
}
nw.members.insert(obj.node_id.clone(), obj.clone());
if generate_change_notification {
let _ = self.change_sender.send(Change::MemberChanged(old.unwrap(), obj));
}
} else {
let old = replace(member, obj);
if generate_change_notification {
let _ = self.change_sender.send(Change::MemberChanged(old, member.clone()));
}
}
let _ = self.file_write_notify_sender.send(()).await;
}
} else {
let _ = nw.members.insert(obj.node_id.clone(), obj.clone());
if generate_change_notification {
let _ = self.change_sender.send(Change::MemberCreated(obj));
}
let _ = self.file_write_notify_sender.send(()).await;
}
}
Ok(members)
return Ok(());
}
async fn get_member(&self, network_id: NetworkId, node_id: Address) -> Result<Option<Member>, Error> {
let mut member = Self::load_object::<Member>(self.member_path(network_id, node_id).as_path()).await?;
if let Some(member) = member.as_mut() {
if member.network_id != network_id {
// Also auto-update member network IDs, see get_network().
member.network_id = network_id;
self.save_member(member.clone(), false).await?;
}
async fn log_request(&self, obj: RequestLogItem) -> Result<(), database::Error> {
if let Some(log) = self.log.as_ref() {
let mut json_line = zerotier_utils::json::to_json(&obj);
json_line.push('\n');
let _ = log.lock().await.write_all(json_line.as_bytes()).await;
}
Ok(member)
}
async fn save_member(&self, obj: Member, generate_change_notification: bool) -> Result<(), Error> {
if !generate_change_notification {
let _ = self.cache.on_member_updated(obj.clone());
}
let base_member_path = self.member_path(obj.network_id, obj.node_id);
let _ = fs::create_dir_all(base_member_path.parent().unwrap()).await;
let _ = fs::write(base_member_path, serde_yaml::to_string(&obj)?.as_bytes()).await?;
Ok(())
}
async fn changes(&self) -> Option<Receiver<Change>> {
async fn changes(&self) -> Option<broadcast::Receiver<Change>> {
Some(self.change_sender.subscribe())
}
async fn log_request(&self, obj: RequestLogItem) -> Result<(), Error> {
println!("{}", obj.to_string());
Ok(())
}
}
#[cfg(test)]
mod tests {
#[allow(unused_imports)]
use super::*;
use std::sync::atomic::{AtomicUsize, Ordering};
#[allow(unused)]
#[test]
fn test_db() {
if let Ok(tokio_runtime) = zerotier_utils::tokio::runtime::Builder::new_current_thread().enable_all().build() {
let _ = tokio_runtime.block_on(async {
let node_id = Address::from_u64(0xdeadbeefu64).unwrap();
let network_id = NetworkId::from_u64(0xfeedbeefcafebabeu64).unwrap();
let test_dir = std::env::temp_dir().join("zt_filedatabase_test");
println!("test filedatabase is in: {}", test_dir.as_os_str().to_str().unwrap());
let _ = std::fs::remove_dir_all(&test_dir);
let controller_id = Identity::generate();
assert!(fs::create_dir_all(&test_dir).await.is_ok());
assert!(save_node_identity(test_dir.as_path(), &controller_id));
let db = Arc::new(FileDatabase::new(tokio_runtime.handle().clone(), test_dir).await.expect("new db"));
let change_count = Arc::new(AtomicUsize::new(0));
let db2 = db.clone();
let change_count2 = change_count.clone();
tokio_runtime.spawn(async move {
let mut change_receiver = db2.changes().await.unwrap();
loop {
if let Ok(change) = change_receiver.recv().await {
change_count2.fetch_add(1, Ordering::SeqCst);
//println!("[FileDatabase] {:#?}", change);
} else {
break;
}
}
});
let mut test_network = Network::new(network_id);
db.save_network(test_network.clone(), true).await.expect("network save error");
let mut test_member = Member::new_without_identity(node_id, network_id);
for x in 0..3 {
test_member.name = x.to_string();
db.save_member(test_member.clone(), true).await.expect("member save error");
zerotier_utils::tokio::task::yield_now().await;
sleep(Duration::from_millis(100)).await;
zerotier_utils::tokio::task::yield_now().await;
let test_member2 = db.get_member(network_id, node_id).await.unwrap().unwrap();
assert!(test_member == test_member2);
}
});
}
}
}

View file

@ -2,11 +2,8 @@
mod controller;
pub(crate) mod cache;
pub mod database;
pub mod filedatabase;
pub mod model;
pub mod postgresdatabase;
pub use controller::*;

View file

@ -1,28 +1,32 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::path::Path;
use std::str::FromStr;
use std::sync::Arc;
use zerotier_network_controller::database::Database;
use zerotier_network_controller::filedatabase::FileDatabase;
use zerotier_network_controller::Controller;
use zerotier_network_hypervisor::vl1::identity::IdentitySecret;
use zerotier_network_hypervisor::{VERSION_MAJOR, VERSION_MINOR, VERSION_REVISION};
use zerotier_service::vl1::{VL1Service, VL1Settings};
use zerotier_utils::exitcode;
use zerotier_utils::tokio;
use zerotier_utils::tokio::runtime::Runtime;
use zerotier_vl1_service::VL1Service;
async fn run(database: Arc<impl Database>, runtime: &Runtime) -> i32 {
match Controller::new(database.clone(), runtime.handle().clone()).await {
async fn run(database: Arc<dyn Database>, identity: IdentitySecret, runtime: &Runtime) -> i32 {
match Controller::new(runtime.handle().clone(), identity.clone(), database.clone()).await {
Err(err) => {
eprintln!("FATAL: error initializing handler: {}", err.to_string());
exitcode::ERR_CONFIG
}
Ok(handler) => match VL1Service::new(database.clone(), handler.clone(), zerotier_vl1_service::VL1Settings::default()) {
Ok(handler) => match VL1Service::new(identity, handler.clone(), VL1Settings::default()) {
Err(err) => {
eprintln!("FATAL: error launching service: {}", err.to_string());
exitcode::ERR_IOERR
}
Ok(svc) => {
svc.node().init_default_roots();
svc.node.init_default_roots();
handler.start(&svc).await;
zerotier_utils::wait_for_process_abort();
println!("Terminate signal received, shutting down...");
@ -35,13 +39,33 @@ async fn run(database: Arc<impl Database>, runtime: &Runtime) -> i32 {
fn main() {
const REQUIRE_ONE_OF_ARGS: [&'static str; 2] = ["postgres", "filedb"];
let global_args = clap::Command::new("zerotier-controller")
.arg(
clap::Arg::new("identity")
.short('i')
.long("identity")
.takes_value(true)
.forbid_empty_values(true)
.value_name("identity")
.help(Some("Path to secret ZeroTier identity"))
.required(true),
)
.arg(
clap::Arg::new("logfile")
.short('l')
.long("logfile")
.takes_value(true)
.forbid_empty_values(true)
.value_name("logfile")
.help(Some("Path to log file"))
.required(false),
)
.arg(
clap::Arg::new("filedb")
.short('f')
.long("filedb")
.takes_value(true)
.forbid_empty_values(true)
.value_name("path")
.value_name("filedb")
.help(Some("Use filesystem database at path"))
.required_unless_present_any(&REQUIRE_ONE_OF_ARGS),
)
@ -51,8 +75,8 @@ fn main() {
.long("postgres")
.takes_value(true)
.forbid_empty_values(true)
.value_name("path")
.help(Some("Connect to postgres with parameters in YAML file"))
.value_name("postgres")
.help(Some("Connect to postgres with supplied URL"))
.required_unless_present_any(&REQUIRE_ONE_OF_ARGS),
)
.version(format!("{}.{}.{}", VERSION_MAJOR, VERSION_MINOR, VERSION_REVISION).as_str())
@ -63,23 +87,39 @@ fn main() {
std::process::exit(exitcode::ERR_USAGE);
});
if let Ok(tokio_runtime) = zerotier_utils::tokio::runtime::Builder::new_multi_thread().enable_all().build() {
if let Ok(tokio_runtime) = tokio::runtime::Builder::new_multi_thread().enable_all().build() {
tokio_runtime.block_on(async {
if let Some(filedb_base_path) = global_args.value_of("filedb") {
let file_db = FileDatabase::new(tokio_runtime.handle().clone(), filedb_base_path).await;
let identity = if let Ok(identity_data) = tokio::fs::read(global_args.value_of("identity").unwrap()).await {
if let Ok(identity) = IdentitySecret::from_str(String::from_utf8_lossy(identity_data.as_slice()).as_ref()) {
identity
} else {
eprintln!("FATAL: invalid secret identity");
std::process::exit(exitcode::ERR_CONFIG);
}
} else {
eprintln!("FATAL: unable to read secret identity");
std::process::exit(exitcode::ERR_IOERR);
};
let db: Arc<dyn Database> = if let Some(filedb_path) = global_args.value_of("filedb") {
let file_db = FileDatabase::new(Path::new(filedb_path), global_args.value_of("logfile").map(|l| Path::new(l))).await;
if file_db.is_err() {
eprintln!(
"FATAL: unable to open filesystem database at {}: {}",
filedb_base_path,
filedb_path,
file_db.as_ref().err().unwrap().to_string()
);
std::process::exit(exitcode::ERR_IOERR)
}
std::process::exit(run(file_db.unwrap(), &tokio_runtime).await);
file_db.unwrap()
} else if let Some(_postgres_url) = global_args.value_of("postgres") {
panic!("not implemented yet");
} else {
eprintln!("FATAL: no database type selected.");
std::process::exit(exitcode::ERR_USAGE);
};
std::process::exit(run(db, identity, &tokio_runtime).await);
});
} else {
eprintln!("FATAL: can't start async runtime");

View file

@ -5,25 +5,25 @@ use std::hash::Hash;
use serde::{Deserialize, Serialize};
use zerotier_network_hypervisor::vl1::{Address, Identity, InetAddress};
use zerotier_crypto::typestate::Valid;
use zerotier_network_hypervisor::vl1::identity::Identity;
use zerotier_network_hypervisor::vl1::{InetAddress, PartialAddress};
use zerotier_network_hypervisor::vl2::NetworkId;
use zerotier_utils::blob::Blob;
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]
pub struct Member {
/// Member node ID
///
/// This can be a partial address if it was manually added as such by a user. As soon as a node matching
/// this partial is seen, this will be replaced by a full specificity PartialAddress from the querying
/// node's full identity. The 'identity' field will also be populated in this case.
#[serde(rename = "address")]
pub node_id: Address,
pub node_id: PartialAddress,
#[serde(rename = "networkId")]
pub network_id: NetworkId,
/// Pinned full member identity fingerprint, if known.
/// If this is set but 'identity' is not, the 'identity' field will be set on first request
/// but an identity not matching this fingerprint will not be accepted. This allows a member
/// to be created with an address and a fingerprint for full SHA384 identity specification.
#[serde(skip_serializing_if = "Option::is_none")]
pub identity_fingerprint: Option<Blob<{ Identity::FINGERPRINT_SIZE }>>,
/// Pinned full member identity, if known.
/// Full identity of this node, if known.
#[serde(skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
@ -79,13 +79,11 @@ pub struct Member {
}
impl Member {
/// Create a new network member without specifying a "pinned" identity.
pub fn new_without_identity(node_id: Address, network_id: NetworkId) -> Self {
pub fn new(node_identity: Valid<Identity>, network_id: NetworkId) -> Self {
Self {
node_id,
node_id: node_identity.address.to_partial(),
network_id,
identity: None,
identity_fingerprint: None,
identity: Some(node_identity.remove_typestate()),
name: String::new(),
last_authorized_time: None,
last_deauthorized_time: None,
@ -98,14 +96,8 @@ impl Member {
}
}
pub fn new_with_identity(identity: Identity, network_id: NetworkId) -> Self {
let mut tmp = Self::new_without_identity(identity.address, network_id);
tmp.identity_fingerprint = Some(Blob::from(identity.fingerprint));
tmp.identity = Some(identity);
tmp
}
/// Check whether this member is authorized, which is true if the last authorized time is after last deauthorized time.
/// Check whether this member is authorized.
/// This is true if the last authorized time is after last deauthorized time.
pub fn authorized(&self) -> bool {
self.last_authorized_time
.map_or(false, |la| self.last_deauthorized_time.map_or(true, |ld| la > ld))

View file

@ -6,14 +6,10 @@ mod network;
pub use member::*;
pub use network::*;
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use zerotier_network_hypervisor::vl1::{Address, Endpoint};
use zerotier_network_hypervisor::vl2::v1::networkconfig::NetworkConfig;
use zerotier_network_hypervisor::vl2::NetworkId;
use zerotier_utils::blob::Blob;
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum RecordType {
@ -22,13 +18,6 @@ pub enum RecordType {
RequestLogItem,
}
/// A complete network with all member configuration information for import/export or blob storage.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct NetworkExport {
pub network: Network,
pub members: HashMap<Address, Member>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[repr(u8)]
pub enum AuthenticationResult {
@ -92,8 +81,6 @@ pub struct RequestLogItem {
pub network_id: NetworkId,
#[serde(rename = "n")]
pub node_id: Address,
#[serde(rename = "nf")]
pub node_fingerprint: Blob<48>,
#[serde(rename = "c")]
pub controller_node_id: Address,
@ -121,9 +108,6 @@ pub struct RequestLogItem {
#[serde(rename = "r")]
pub result: AuthenticationResult,
#[serde(rename = "nc")]
pub config: Option<NetworkConfig>,
}
impl ToString for RequestLogItem {

View file

@ -173,7 +173,7 @@ impl Network {
for route in self.ip_routes.iter() {
let ip = InetAddress::from_ip_port(&ip_ptr.to_be_bytes(), route.target.port()); // IP/bits
if ip.is_within(&route.target) {
if let Ok(is_ip_assigned) = database.is_ip_assigned(self.id, &ip).await {
if let Ok(is_ip_assigned) = database.is_ip_assigned(&self.id, &ip).await {
if !is_ip_assigned {
modified = true;
let _ = member.ip_assignments.insert(ip);
@ -201,7 +201,7 @@ impl Network {
for route in self.ip_routes.iter() {
let ip = InetAddress::from_ip_port(&ip_ptr.to_be_bytes(), route.target.port()); // IP/bits
if ip.is_within(&route.target) {
if let Ok(is_ip_assigned) = database.is_ip_assigned(self.id, &ip).await {
if let Ok(is_ip_assigned) = database.is_ip_assigned(&self.id, &ip).await {
if !is_ip_assigned {
modified = true;
let _ = member.ip_assignments.insert(ip);

View file

@ -1,4 +1,5 @@
mod bn;
#[allow(unused)]
mod cipher_ctx;
mod ec;
mod error;

258
debian/changelog vendored
View file

@ -1,258 +0,0 @@
zerotier-one (1.10.2) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Thu, 13 Oct 2022 01:00:00 -0700
zerotier-one (1.10.1) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 27 Jun 2022 01:00:00 -0700
zerotier-one (1.10.0) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Fri, 03 Jun 2022 01:00:00 -0700
zerotier-one (1.8.10) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 10 May 2022 01:00:00 -0700
zerotier-one (1.8.9) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 25 Apr 2022 01:00:00 -0700
zerotier-one (1.8.8) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 11 Apr 2022 01:00:00 -0700
zerotier-one (1.8.7) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 21 Mar 2022 01:00:00 -0700
zerotier-one (1.8.6) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 07 Mar 2022 01:00:00 -0700
zerotier-one (1.8.5) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Fri, 17 Dec 2021 01:00:00 -0700
zerotier-one (1.8.4) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 23 Nov 2021 01:00:00 -0700
zerotier-one (1.8.3) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 15 Nov 2021 01:00:00 -0700
zerotier-one (1.8.2) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 08 Nov 2021 01:00:00 -0700
zerotier-one (1.8.1) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Wed, 20 Oct 2021 01:00:00 -0700
zerotier-one (1.8.0) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Wed, 15 Sep 2021 01:00:00 -0700
zerotier-one (1.6.6) unstable; urgency=medium
* Backport endpoint mitigation against address collision attack.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 21 Sep 2021 01:00:00 -0700
zerotier-one (1.6.5) unstable; urgency=medium
* Fix path filtering bug that could cause "software laser" effect.
* Fix printf overflow in CLI (not exploitable or security related)
* Fix Windows device enumeration issue.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 13 Apr 2021 01:00:00 -0700
zerotier-one (1.6.4) unstable; urgency=medium
* REALLY fix a problem causing nodes to go into a "coma" with some network configurations.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 15 Feb 2021 01:00:00 -0700
zerotier-one (1.6.3-1) unstable; urgency=medium
* Fix a problem causing nodes to go into a "coma" with some network configurations.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 02 Feb 2021 01:00:00 -0700
zerotier-one (1.6.2-2) unstable; urgency=medium
* This is a minor update to the 1.6.2 package to address issues with
running on ARMv6 CPUs like the Raspberry Pi Zero and original v1 Pi.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 31 Nov 2020 01:00:00 -0700
zerotier-one (1.6.2) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 30 Nov 2020 01:00:00 -0700
zerotier-one (1.6.1) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 24 Nov 2020 01:00:00 -0700
zerotier-one (1.6.0) unstable; urgency=medium
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Thu, 19 Nov 2020 01:00:00 -0700
zerotier-one (1.5.0) unstable; urgency=medium
* Version 1.5.0 is actually 1.6.0-beta1
* See RELEASE-NOTES.md for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 05 Aug 2020 01:00:00 -0700
zerotier-one (1.4.6) unstable; urgency=medium
* Update default root server list
* Fix build flags on "armhf" (32-bit ARM) platforms for better
compatibility with Pi Zero and other devices.
* Fix license text in one.cpp.
* Add a clarification to LICENSE.txt.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Fri, 30 Aug 2019 01:00:00 -0700
zerotier-one (1.4.4) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
* License changed to BSL 1.1
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Fri, 23 Aug 2019 01:00:00 -0700
zerotier-one (1.4.2-2) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
* This is a new build that fixes a binary build issue with containers and SELinux
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Thu, 04 Aug 2019 01:00:00 -0700
zerotier-one (1.4.2) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Thu, 04 Aug 2019 01:00:00 -0700
zerotier-one (1.4.0) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Thu, 29 Jul 2019 01:00:00 -0700
zerotier-one (1.2.12) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 25 Jul 2018 01:00:00 -0700
zerotier-one (1.2.10) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 08 May 2018 01:00:00 -0700
zerotier-one (1.2.8) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 27 Apr 2018 01:00:00 -0700
zerotier-one (1.2.6) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 17 Apr 2018 01:00:00 -0700
zerotier-one (1.2.4) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Mon, 24 Mar 2017 01:00:00 -0700
zerotier-one (1.2.2) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Fri, 17 Mar 2017 01:00:00 -0700
zerotier-one (1.2.0) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 14 Mar 2017 09:08:00 -0700
zerotier-one (1.1.14) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 21 Jul 2016 07:14:12 -0700
zerotier-one (1.1.12) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 12 Jul 2016 03:02:22 -0700
zerotier-one (1.1.10) unstable; urgency=medium
* See https://github.com/zerotier/ZeroTierOne for release notes.
* ZeroTier Debian packages no longer depend on http-parser since its ABI is too unstable.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Tue, 12 Jul 2016 12:29:00 -0700
zerotier-one (1.1.8) unstable; urgency=low
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Fri, 08 Jul 2016 01:56:00 -0700
zerotier-one (1.1.6) unstable; urgency=medium
* First Debian release on ZeroTier, Inc. private apt repository.
* See https://github.com/zerotier/ZeroTierOne for release notes.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Fri, 24 Jun 2016 10:00:00 -0700
zerotier-one (1.1.5) UNRELEASED; urgency=medium
* Development package -- first clean Debian packaging test.
-- Adam Ierymenko <adam.ierymenko@zerotier.com> Wed, 08 Jun 2016 10:05:01 -0700

1
debian/compat vendored
View file

@ -1 +0,0 @@
8

19
debian/control vendored
View file

@ -1,19 +0,0 @@
Source: zerotier-one
Maintainer: Adam Ierymenko <adam.ierymenko@zerotier.com>
Section: net
Priority: optional
Standards-Version: 3.9.6
Build-Depends: debhelper (>= 9)
Vcs-Git: git://github.com/zerotier/ZeroTierOne
Vcs-Browser: https://github.com/zerotier/ZeroTierOne
Homepage: https://www.zerotier.com/
Package: zerotier-one
Architecture: any
Depends: iproute2, adduser, libstdc++6 (>= 5), openssl
Homepage: https://www.zerotier.com/
Description: ZeroTier network virtualization service
ZeroTier One lets you join ZeroTier virtual networks and
have them appear as tun/tap ports on your system. See
https://www.zerotier.com/ for instructions and
documentation.

19
debian/control.wheezy vendored
View file

@ -1,19 +0,0 @@
Source: zerotier-one
Maintainer: Adam Ierymenko <adam.ierymenko@zerotier.com>
Section: net
Priority: optional
Standards-Version: 3.9.4
Build-Depends: debhelper
Vcs-Git: git://github.com/zerotier/ZeroTierOne
Vcs-Browser: https://github.com/zerotier/ZeroTierOne
Homepage: https://www.zerotier.com/
Package: zerotier-one
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}, iproute, libstdc++6
Homepage: https://www.zerotier.com/
Description: ZeroTier network virtualization service
ZeroTier One lets you join ZeroTier virtual networks and
have them appear as tun/tap ports on your system. See
https://www.zerotier.com/ for instructions and
documentation.

18
debian/copyright vendored
View file

@ -1,18 +0,0 @@
Format: http://dep.debian.net/deps/dep5
Upstream-Name: zerotier-one
Source: https://github.com/zerotier/ZeroTierOne
Files: *
Copyright: 2011-2016 ZeroTier, Inc.
License: ZeroTier BSL 1.1
License: ZeroTier BSL 1.1
Copyright (c)2019 ZeroTier, Inc.
Use of this software is governed by the Business Source License included
in the LICENSE.TXT file in the project's root directory.
Change Date: 2025-01-01
On the date above, in accordance with the Business Source License, use
of this software will be governed by version 2.0 of the Apache License.

11
debian/postinst vendored
View file

@ -1,11 +0,0 @@
#!/bin/sh -e
case "$1" in
configure)
if ! id zerotier-one >>/dev/null 2>&1; then
useradd --system --user-group --home-dir /var/lib/zerotier-one --shell /usr/sbin/nologin --no-create-home zerotier-one
fi
;;
esac
#DEBHELPER#

16
debian/rules vendored
View file

@ -1,16 +0,0 @@
#!/usr/bin/make -f
CFLAGS=-O3 -fstack-protector-strong
CXXFLAGS=-O3 -fstack-protector-strong
%:
dh $@ --with systemd
override_dh_auto_build:
make
override_dh_systemd_start:
dh_systemd_start --restart-after-upgrade
override_dh_installinit:
dh_installinit --name=zerotier-one -- defaults

16
debian/rules.static vendored
View file

@ -1,16 +0,0 @@
#!/usr/bin/make -f
CFLAGS=-O3 -fstack-protector-strong
CXXFLAGS=-O3 -fstack-protector-strong
%:
dh $@ --with systemd
override_dh_auto_build:
# make -j 2
override_dh_systemd_start:
dh_systemd_start --restart-after-upgrade
override_dh_installinit:
dh_installinit --name=zerotier-one -- defaults

11
debian/rules.wheezy vendored
View file

@ -1,11 +0,0 @@
#!/usr/bin/make -f
CFLAGS=-O3 -fstack-protector
CXXFLAGS=-O3 -fstack-protector
%:
dh $@
override_dh_auto_build:
make -j 2

View file

@ -1,11 +0,0 @@
#!/usr/bin/make -f
CFLAGS=-O3 -fstack-protector
CXXFLAGS=-O3 -fstack-protector
%:
dh $@
override_dh_auto_build:
# make -j 2

View file

@ -1 +0,0 @@
3.0 (quilt)

View file

@ -1,4 +0,0 @@
[zerotier-one]
title=ZeroTier One
description=A planetary Ethernet switch
ports=9993/udp

View file

@ -1,49 +0,0 @@
#!/bin/sh
### BEGIN INIT INFO
# Provides: zerotier-one
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: ZeroTier One network virtualization service
### END INIT INFO
PATH=/bin:/usr/bin:/sbin:/usr/sbin
DESC="zerotier-one daemon"
NAME=zerotier-one
DAEMON=/usr/sbin/zerotier-one
PIDFILE=/var/lib/zerotier-one/zerotier-one.pid
SCRIPTNAME=/etc/init.d/"$NAME"
EXTRA_OPTS=-d
test -f $DAEMON || exit 0
. /lib/lsb/init-functions
case "$1" in
start) log_daemon_msg "Starting ZeroTier One" "zerotier-one"
start_daemon -p $PIDFILE $DAEMON $EXTRA_OPTS
log_end_msg $?
;;
stop) log_daemon_msg "Stopping ZeroTier One" "zerotier-one"
killproc -p $PIDFILE $DAEMON
RETVAL=$?
[ $RETVAL -eq 0 ] && [ -e "$PIDFILE" ] && rm -f $PIDFILE
log_end_msg $RETVAL
;;
restart) log_daemon_msg "Restarting ZeroTier One" "zerotier-one"
$0 stop
$0 start
;;
reload|force-reload) log_daemon_msg "Reloading ZeroTier One" "zerotier-one"
log_end_msg 0
;;
status)
status_of_proc -p $PIDFILE $DAEMON $NAME && exit 0 || exit $?
;;
*) log_action_msg "Usage: /etc/init.d/cron {start|stop|status|restart|reload|force-reload}"
exit 2
;;
esac
exit 0

View file

@ -1,12 +0,0 @@
[Unit]
Description=ZeroTier One
After=network-online.target network.target
Wants=network-online.target
[Service]
ExecStart=/usr/sbin/zerotier-one
Restart=always
KillMode=process
[Install]
WantedBy=multi-user.target

View file

@ -1,14 +0,0 @@
description "ZeroTier One upstart startup script"
author "Adam Ierymenko <adam.ierymenko@zerotier.com>"
start on (local-filesystems and net-device-up IFACE!=lo)
stop on runlevel [!2345]
respawn
respawn limit 2 300
#pre-start script
#end script
exec /usr/sbin/zerotier-one

View file

@ -16,8 +16,8 @@ lz4_flex = { version = "^0", features = ["safe-encode", "safe-decode", "checked-
serde = { version = "^1", features = ["derive"], default-features = false }
phf = { version = "^0", features = ["macros", "std"], default-features = false }
num-traits = "^0"
rmp-serde = "^1"
fastcdc = "^3"
serde_cbor = "^0"
[dev-dependencies]
rand = "*"

View file

@ -1,11 +1,11 @@
use criterion::{criterion_group, criterion_main, Criterion};
use std::time::Duration;
use zerotier_network_hypervisor::vl1::Identity;
use zerotier_network_hypervisor::vl1::identity::Identity;
pub fn criterion_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("basic");
group.measurement_time(Duration::new(30, 0));
group.bench_function("identity generation", |b| b.iter(|| Identity::generate()));
group.bench_function("identity generation", |b| b.iter(|| Identity::generate(false)));
group.finish();
}

View file

@ -129,24 +129,12 @@ pub const UDP_DEFAULT_MTU: usize = 1432;
/// Default MTU inside VL2 virtual networks.
pub const ZEROTIER_VIRTUAL_NETWORK_DEFAULT_MTU: usize = 2800;
/// Default multicast limit if not set in the network.
pub const DEFAULT_MULTICAST_LIMIT: usize = 32;
/// Length of an address in bytes.
pub const ADDRESS_SIZE: usize = 5;
/// Length of an address in string format.
pub const ADDRESS_SIZE_STRING: usize = 10;
/// Prefix indicating reserved addresses (that can't actually be addresses).
pub const ADDRESS_RESERVED_PREFIX: u8 = 0xff;
/// Bit mask for address bits in a u64.
pub const ADDRESS_MASK: u64 = 0xffffffffff;
pub mod v1 {
use super::*;
/// Default multicast limit if not set in the network.
pub const DEFAULT_MULTICAST_LIMIT: usize = 32;
/// Size of packet header that lies outside the encryption envelope.
pub const HEADER_SIZE: usize = 27;
@ -307,10 +295,10 @@ pub mod v1 {
}
#[inline(always)]
pub fn get_packet_aad_bytes(destination: Address, source: Address, flags_cipher_hops: u8) -> [u8; 11] {
pub fn get_packet_aad_bytes(destination: &Address, source: &Address, flags_cipher_hops: u8) -> [u8; 11] {
let mut id = [0u8; 11];
id[0..5].copy_from_slice(&destination.to_bytes());
id[5..10].copy_from_slice(&source.to_bytes());
id[0..5].copy_from_slice(destination.legacy_bytes());
id[5..10].copy_from_slice(source.legacy_bytes());
id[10] = flags_cipher_hops & FLAGS_FIELD_MASK_HIDE_HOPS;
id
}
@ -559,9 +547,6 @@ pub(crate) const PEER_HELLO_INTERVAL_MAX: i64 = 300000;
/// Timeout for path association with peers and for peers themselves.
pub(crate) const PEER_EXPIRATION_TIME: i64 = (PEER_HELLO_INTERVAL_MAX * 2) + 10000;
/// Proof of work difficulty (threshold) for identity generation.
pub(crate) const IDENTITY_POW_THRESHOLD: u8 = 17;
// Multicast LIKE expire time in milliseconds.
pub const VL2_DEFAULT_MULTICAST_LIKE_EXPIRE: i64 = 600000;

View file

@ -1,95 +1,144 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::borrow::Borrow;
use std::collections::BTreeMap;
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::num::NonZeroU64;
use std::hash::Hash;
use std::ops::Bound;
use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::protocol::{ADDRESS_RESERVED_PREFIX, ADDRESS_SIZE};
use zerotier_utils::error::InvalidParameterError;
use zerotier_utils::{base24, hex, memory};
use zerotier_utils::error::InvalidFormatError;
use zerotier_utils::hex;
/// A unique address on the global ZeroTier VL1 network.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
/// A full (V2) ZeroTier address.
///
/// The first 40 bits (5 bytes) of the address are the legacy 40-bit short ZeroTier address computed from
/// a hash of the identity's X25519 keys. The remaining bits are a SHA384 hash of that short address and
/// all key types and key material. See identity.rs for details.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct Address(NonZeroU64);
pub struct Address(pub(super) [u8; Self::SIZE_BYTES]);
/// A partial address, which is bytes and the number of bytes of specificity (similar to a CIDR IP address).
///
/// Partial addresses are looked up to get full addresses (and identities) via roots using WHOIS messages.
#[derive(Clone, PartialEq, Eq)]
pub struct PartialAddress {
pub(super) address: Address,
pub(super) specificity: u16,
}
impl Address {
/// Get an address from a 64-bit integer or return None if it is zero or reserved.
#[inline(always)]
pub fn from_u64(mut i: u64) -> Option<Address> {
i &= 0xffffffffff;
NonZeroU64::new(i).and_then(|ii| {
if (i >> 32) != ADDRESS_RESERVED_PREFIX as u64 {
Some(Address(ii))
} else {
None
}
})
}
pub const SIZE_BYTES: usize = 48;
/// The first byte of an address cannot be 0xff.
pub const RESERVED_PREFIX: u8 = 0xff;
#[inline(always)]
pub fn from_bytes(b: &[u8]) -> Option<Address> {
if b.len() >= ADDRESS_SIZE {
Self::from_u64((b[0] as u64) << 32 | (b[1] as u64) << 24 | (b[2] as u64) << 16 | (b[3] as u64) << 8 | b[4] as u64)
pub(super) fn new_uninitialized() -> Self {
Self([0u8; Self::SIZE_BYTES])
}
#[inline]
pub fn from_bytes(b: &[u8]) -> Result<Self, InvalidParameterError> {
if b.len() == Self::SIZE_BYTES && b[0] != Address::RESERVED_PREFIX && b[..PartialAddress::LEGACY_SIZE_BYTES].iter().any(|i| *i != 0) {
Ok(Self(b.try_into().unwrap()))
} else {
None
Err(InvalidParameterError("invalid address"))
}
}
/// Get the first 40 bits of this address (a legacy V1 ZeroTier address)
#[inline(always)]
pub fn legacy_bytes(&self) -> &[u8; 5] {
memory::array_range::<u8, { Address::SIZE_BYTES }, 0, { PartialAddress::LEGACY_SIZE_BYTES }>(&self.0)
}
/// Get the legacy address in the least significant bits of a u64.
#[inline(always)]
pub(crate) fn legacy_u64(&self) -> u64 {
u64::from_be(memory::load_raw(&self.0)).wrapping_shr(24)
}
/// Get a partial address object (with full specificity) for this address
#[inline]
pub fn to_partial(&self) -> PartialAddress {
PartialAddress {
address: Address(self.0),
specificity: Self::SIZE_BYTES as u16,
}
}
/// Get a partial address covering the 40-bit legacy address.
#[inline]
pub fn to_legacy_partial(&self) -> PartialAddress {
PartialAddress {
address: Address({
let mut tmp = [0u8; PartialAddress::MAX_SIZE_BYTES];
tmp[..PartialAddress::LEGACY_SIZE_BYTES].copy_from_slice(&self.0[..PartialAddress::LEGACY_SIZE_BYTES]);
tmp
}),
specificity: PartialAddress::LEGACY_SIZE_BYTES as u16,
}
}
#[inline(always)]
pub fn from_bytes_fixed(b: &[u8; ADDRESS_SIZE]) -> Option<Address> {
Self::from_u64((b[0] as u64) << 32 | (b[1] as u64) << 24 | (b[2] as u64) << 16 | (b[3] as u64) << 8 | b[4] as u64)
}
#[inline(always)]
pub fn to_bytes(&self) -> [u8; ADDRESS_SIZE] {
let i = self.0.get();
[(i >> 32) as u8, (i >> 24) as u8, (i >> 16) as u8, (i >> 8) as u8, i as u8]
pub fn as_bytes(&self) -> &[u8; Self::SIZE_BYTES] {
&self.0
}
}
impl From<Address> for u64 {
impl Borrow<[u8; Self::SIZE_BYTES]> for Address {
#[inline(always)]
fn from(a: Address) -> Self {
a.0.get()
}
}
impl From<&Address> for u64 {
#[inline(always)]
fn from(a: &Address) -> Self {
a.0.get()
fn borrow(&self) -> &[u8; Self::SIZE_BYTES] {
&self.0
}
}
impl ToString for Address {
#[inline(always)]
fn to_string(&self) -> String {
let mut v = self.0.get() << 24;
let mut s = String::with_capacity(ADDRESS_SIZE * 2);
for _ in 0..(ADDRESS_SIZE * 2) {
s.push(hex::HEX_CHARS[(v >> 60) as usize] as char);
v <<= 4;
let mut s = String::with_capacity(96);
let mut x = 0;
for c in self.0.chunks(4) {
if !s.is_empty() {
if (x & 3) == 0 {
s.push('.');
} else {
s.push('-');
}
}
x += 1;
base24::encode_into(c, &mut s);
}
s
}
}
impl FromStr for Address {
type Err = InvalidFormatError;
type Err = InvalidParameterError;
#[inline]
fn from_str(s: &str) -> Result<Self, Self::Err> {
Address::from_bytes(hex::from_string(s).as_slice()).map_or_else(|| Err(InvalidFormatError), |a| Ok(a))
let mut a = Self([0u8; Self::SIZE_BYTES]);
let mut f = 0;
for ss in s.split(&['-', '.']) {
if ss.len() > 0 {
base24::decode_into_slice(ss.as_bytes(), &mut a.0[f * 4..(f + 1) * 4])?;
f += 1;
}
}
return Ok(a);
}
}
impl Hash for Address {
#[inline(always)]
fn hash<H: Hasher>(&self, state: &mut H) {
state.write_u64(self.0.get());
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
// Since this contains a random hash, the first 64 bits should be enough for a local HashMap etc.
state.write_u64(memory::load_raw(&self.0))
}
}
@ -101,6 +150,7 @@ impl Debug for Address {
}
impl Serialize for Address {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
@ -108,31 +158,30 @@ impl Serialize for Address {
if serializer.is_human_readable() {
serializer.serialize_str(self.to_string().as_str())
} else {
serializer.serialize_bytes(&self.to_bytes())
serializer.serialize_bytes(self.as_bytes())
}
}
}
struct AddressVisitor;
struct AddressDeserializeVisitor;
impl<'de> serde::de::Visitor<'de> for AddressVisitor {
impl<'de> serde::de::Visitor<'de> for AddressDeserializeVisitor {
type Value = Address;
#[inline]
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a ZeroTier address")
formatter.write_str("address")
}
#[inline]
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if v.len() == ADDRESS_SIZE {
Address::from_bytes(v).map_or_else(|| Err(E::custom("object too large")), |a| Ok(a))
} else {
Err(E::custom("object size incorrect"))
}
Address::from_bytes(v).map_err(|_| E::custom("invalid address"))
}
#[inline]
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
@ -142,138 +191,384 @@ impl<'de> serde::de::Visitor<'de> for AddressVisitor {
}
impl<'de> Deserialize<'de> for Address {
fn deserialize<D>(deserializer: D) -> Result<Address, D::Error>
#[inline]
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
if deserializer.is_human_readable() {
deserializer.deserialize_str(AddressVisitor)
deserializer.deserialize_str(AddressDeserializeVisitor)
} else {
deserializer.deserialize_bytes(AddressVisitor)
deserializer.deserialize_bytes(AddressDeserializeVisitor)
}
}
}
impl PartialAddress {
/// Minimum number of specified bits in an address.
pub const MIN_SPECIFICITY: usize = Self::MIN_SIZE_BYTES * 8;
/// Maximum number of specified bits in an address.
pub const MAX_SPECIFICITY: usize = Self::MAX_SIZE_BYTES * 8;
pub const LEGACY_SIZE_BYTES: usize = 5;
pub const MIN_SIZE_BYTES: usize = Self::LEGACY_SIZE_BYTES;
pub const MAX_SIZE_BYTES: usize = Address::SIZE_BYTES;
const fn is_valid_specificity(s: u16) -> bool {
match s {
5 | 16 | 32 | 48 => true,
_ => false,
}
}
/// Construct an address from a byte slice with its length determining specificity.
#[inline]
pub fn from_bytes(b: &[u8]) -> Result<Self, InvalidParameterError> {
if b.len() >= Self::MIN_SIZE_BYTES
&& b.len() <= Self::MAX_SIZE_BYTES
&& Self::is_valid_specificity(b.len() as u16)
&& b[0] != Address::RESERVED_PREFIX
&& b[..Self::LEGACY_SIZE_BYTES].iter().any(|i| *i != 0)
{
let mut a = Self {
address: Address([0u8; Address::SIZE_BYTES]),
specificity: b.len() as u16,
};
a.address.0[..b.len()].copy_from_slice(b);
Ok(a)
} else {
Err(InvalidParameterError("invalid address"))
}
}
#[inline]
pub(crate) fn from_legacy_address_bytes(b: &[u8; 5]) -> Result<Self, InvalidParameterError> {
if b[0] != Address::RESERVED_PREFIX && b.iter().any(|i| *i != 0) {
Ok(Self {
address: Address({
let mut tmp = [0u8; Self::MAX_SIZE_BYTES];
tmp[..5].copy_from_slice(b);
tmp
}),
specificity: Self::LEGACY_SIZE_BYTES as u16,
})
} else {
Err(InvalidParameterError("invalid address"))
}
}
#[inline]
pub(crate) fn from_legacy_address_u64(mut b: u64) -> Result<Self, InvalidParameterError> {
b &= 0xffffffffff;
if b.wrapping_shr(32) != (Address::RESERVED_PREFIX as u64) && b != 0 {
Ok(Self {
address: Address({
let mut tmp = [0u8; Self::MAX_SIZE_BYTES];
tmp[..5].copy_from_slice(&b.to_be_bytes()[..5]);
tmp
}),
specificity: Self::LEGACY_SIZE_BYTES as u16,
})
} else {
Err(InvalidParameterError("invalid address"))
}
}
#[inline(always)]
pub fn as_bytes(&self) -> &[u8] {
debug_assert!(Self::is_valid_specificity(self.specificity));
&self.address.0[..self.specificity as usize]
}
#[inline(always)]
pub(crate) fn legacy_bytes(&self) -> &[u8; 5] {
debug_assert!(Self::is_valid_specificity(self.specificity));
memory::array_range::<u8, { Address::SIZE_BYTES }, 0, { PartialAddress::LEGACY_SIZE_BYTES }>(&self.address.0)
}
#[inline(always)]
pub(crate) fn legacy_u64(&self) -> u64 {
u64::from_be(memory::load_raw(&self.address.0)).wrapping_shr(24)
}
/// Returns true if this partial address matches a full length address up to this partial's specificity.
#[inline(always)]
pub fn matches(&self, k: &Address) -> bool {
debug_assert!(Self::is_valid_specificity(self.specificity));
let l = self.specificity as usize;
self.address.0[..l].eq(&k.0[..l])
}
/// Returns true if this partial address matches another up to the lower of the two addresses' specificities.
#[inline(always)]
pub fn matches_partial(&self, k: &PartialAddress) -> bool {
debug_assert!(Self::is_valid_specificity(self.specificity));
let l = self.specificity.min(k.specificity) as usize;
self.address.0[..l].eq(&k.address.0[..l])
}
/// Get the number of bits of specificity in this address
#[inline(always)]
pub fn specificity_bits(&self) -> usize {
(self.specificity * 8) as usize
}
/// Get the number of bytes of specificity in this address (only 8 bit increments in specificity are allowed)
#[inline(always)]
pub fn specificity_bytes(&self) -> usize {
self.specificity as usize
}
/// Returns true if this address has legacy 40 bit specificity (V1 ZeroTier address)
#[inline(always)]
pub fn is_legacy(&self) -> bool {
self.specificity == Self::LEGACY_SIZE_BYTES as u16
}
/// Get a complete address from this partial if it is in fact complete.
#[inline]
pub fn as_complete(&self) -> Option<&Address> {
if self.specificity == Self::MAX_SIZE_BYTES as u16 {
Some(&self.address)
} else {
None
}
}
/// Returns true if specificity is at the maximum value (384 bits)
#[inline(always)]
pub fn is_complete(&self) -> bool {
self.specificity == Self::MAX_SIZE_BYTES as u16
}
/// Efficiently find an entry in a BTreeMap of partial addresses that uniquely matches this partial.
///
/// This returns None if there is no match or if this partial matches more than one entry, in which
/// case it's ambiguous and may be unsafe to use. This should be prohibited at other levels of the
/// system but is checked for here as well.
#[inline]
pub fn find_unique_match<'a, T>(&self, map: &'a BTreeMap<PartialAddress, T>) -> Option<&'a T> {
// Search for an exact or more specific match.
let mut m = None;
// First search for exact or more specific matches, which would appear later in the sorted key list.
let mut pos = map.range((Bound::Included(self), Bound::Unbounded));
while let Some(e) = pos.next() {
if self.matches_partial(e.0) {
if m.is_some() {
// Ambiguous!
return None;
}
let _ = m.insert(e.1);
} else {
break;
}
}
// Then search for less specific matches or verify that the match we found above is not ambiguous.
let mut pos = map.range((Bound::Unbounded, Bound::Excluded(self)));
while let Some(e) = pos.next_back() {
if self.matches_partial(e.0) {
if m.is_some() {
return None;
}
let _ = m.insert(e.1);
} else {
break;
}
}
return m;
}
/// Efficiently find an entry in a BTreeMap of partial addresses that uniquely matches this partial.
///
/// This returns None if there is no match or if this partial matches more than one entry, in which
/// case it's ambiguous and may be unsafe to use. This should be prohibited at other levels of the
/// system but is checked for here as well.
#[inline(always)]
pub fn find_unique_match_mut<'a, T>(&self, map: &'a mut BTreeMap<PartialAddress, T>) -> Option<&'a mut T> {
// This not only saves some repetition but is in fact the only way to easily do this. The same code as
// find_unique_match() but with range_mut() doesn't compile because the second range_mut() would
// borrow 'map' a second time (since 'm' may have it borrowed). This is primarily due to the too-limited
// API of BTreeMap which is missing a good way to find the nearest match. This should be safe since
// we do not mutate the map and the signature of find_unique_match_mut() should properly guarantee
// that the semantics of mutable references are obeyed in the calling context.
unsafe { std::mem::transmute(self.find_unique_match::<T>(map)) }
}
}
impl Ord for PartialAddress {
#[inline(always)]
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.address.cmp(&other.address).then(self.specificity.cmp(&other.specificity))
}
}
impl PartialOrd for PartialAddress {
#[inline(always)]
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl ToString for PartialAddress {
fn to_string(&self) -> String {
debug_assert!(Self::is_valid_specificity(self.specificity));
if self.is_legacy() {
hex::to_string(&self.address.0[..Self::LEGACY_SIZE_BYTES])
} else {
let mut s = String::with_capacity(96);
let mut i = 0;
while i < self.specificity {
let ii = i + 4;
if !s.is_empty() {
if (i & 15) == 0 {
s.push('.');
} else {
s.push('-');
}
}
base24::encode_into(&self.address.0[i as usize..ii as usize], &mut s);
i = ii;
}
s
}
}
}
impl FromStr for PartialAddress {
type Err = InvalidParameterError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.len() == 10 {
return Self::from_bytes(hex::from_string(s).as_slice());
} else {
let mut a = Address([0u8; Address::SIZE_BYTES]);
let mut f = 0;
let mut specificity = 0;
for ss in s.split(&['-', '.']) {
if ss.len() > 0 {
base24::decode_into_slice(ss.as_bytes(), &mut a.0[f * 4..(f + 1) * 4])?;
f += 1;
specificity += 4;
}
}
if Self::is_valid_specificity(specificity) {
return Ok(Self { address: a, specificity });
} else {
return Err(InvalidParameterError("illegal specificity"));
}
}
}
}
impl Hash for PartialAddress {
#[inline(always)]
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
// Since this contains a random hash, the first 64 bits should be enough for a local HashMap etc.
state.write_u64(memory::load_raw(&self.address.0))
}
}
impl Debug for PartialAddress {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.to_string().as_str())
}
}
impl Serialize for PartialAddress {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(self.to_string().as_str())
} else {
serializer.serialize_bytes(self.as_bytes())
}
}
}
struct PartialAddressDeserializeVisitor;
impl<'de> serde::de::Visitor<'de> for PartialAddressDeserializeVisitor {
type Value = PartialAddress;
#[inline]
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("address")
}
#[inline]
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
PartialAddress::from_bytes(v).map_err(|_| E::custom("invalid address"))
}
#[inline]
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
PartialAddress::from_str(v).map_err(|e| E::custom(e.to_string()))
}
}
impl<'de> Deserialize<'de> for PartialAddress {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
if deserializer.is_human_readable() {
deserializer.deserialize_str(PartialAddressDeserializeVisitor)
} else {
deserializer.deserialize_bytes(PartialAddressDeserializeVisitor)
}
}
}
#[cfg(test)]
mod tests {
fn safe_address() -> super::Address {
let mut addr: Option<super::Address>;
use super::*;
use zerotier_crypto::random;
'retry: loop {
let rawaddr: u64 = rand::random();
addr = super::Address::from_u64(rawaddr);
#[test]
fn to_from_string() {
for _ in 0..64 {
let mut tmp = Address::new_uninitialized();
random::fill_bytes_secure(&mut tmp.0);
let s = tmp.to_string();
//println!("{}", s);
let tmp2 = Address::from_str(s.as_str()).unwrap();
assert!(tmp == tmp2);
}
}
if addr.is_some() {
break 'retry;
#[test]
fn to_from_string_partial() {
let mut tmp = [0u8; Address::SIZE_BYTES];
for _ in 0..64 {
for s in [5, 16, 32, 48] {
random::fill_bytes_secure(&mut tmp);
if tmp[0] == Address::RESERVED_PREFIX {
tmp[0] = 1;
}
if tmp[1] == 0 {
tmp[1] = 1;
}
let partial = PartialAddress::from_bytes(&tmp[..s]).unwrap();
let s = partial.to_string();
//println!("{}", s);
let partial2 = PartialAddress::from_str(s.as_str()).unwrap();
assert!(partial == partial2);
}
}
addr.unwrap()
}
#[test]
fn address_marshal_u64() {
let mut rawaddr: u64 = rand::random();
let addr = super::Address::from_u64(rawaddr);
assert!(addr.is_some());
let addr: u64 = addr.unwrap().into();
assert_eq!(addr, rawaddr & 0xffffffffff);
rawaddr = 0;
assert!(super::Address::from_u64(rawaddr).is_none());
rawaddr = (crate::protocol::ADDRESS_RESERVED_PREFIX as u64) << 32;
assert!(super::Address::from_u64(rawaddr).is_none());
}
#[test]
fn address_marshal_bytes() {
use crate::protocol::ADDRESS_SIZE;
let mut v: Vec<u8> = Vec::with_capacity(ADDRESS_SIZE);
let mut i = 0;
while i < ADDRESS_SIZE {
v.push(rand::random());
i += 1;
}
let addr = super::Address::from_bytes(v.as_slice());
assert!(addr.is_some());
assert_eq!(addr.unwrap().to_bytes(), v.as_slice());
let empty: Vec<u8> = Vec::new();
let emptyaddr = super::Address::from_bytes(empty.as_slice());
assert!(emptyaddr.is_none());
let mut v2: [u8; ADDRESS_SIZE] = [0u8; ADDRESS_SIZE];
let mut i = 0;
while i < ADDRESS_SIZE {
v2[i] = v[i];
i += 1;
}
let addr2 = super::Address::from_bytes_fixed(&v2);
assert!(addr2.is_some());
assert_eq!(addr2.unwrap().to_bytes(), v2);
assert_eq!(addr.unwrap(), addr2.unwrap());
}
#[test]
fn address_to_from_string() {
use std::str::FromStr;
for _ in 0..1000 {
let rawaddr: u64 = rand::random();
let addr = super::Address::from_u64(rawaddr);
// NOTE: a regression here is covered by other tests and should not break this test
// accidentally.
if addr.is_none() {
continue;
}
let addr = addr.unwrap();
assert_ne!(addr.to_string(), "");
assert_eq!(addr.to_string().len(), 10);
assert_eq!(super::Address::from_str(&addr.to_string()).unwrap(), addr);
}
}
#[test]
fn address_hash() {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
let addr = safe_address();
addr.hash(&mut hasher);
let result1 = hasher.finish();
// this loop is mostly to ensure that hash returns a consistent result every time.
for _ in 0..1000 {
let mut hasher = std::collections::hash_map::DefaultHasher::new();
addr.hash(&mut hasher);
let result2 = hasher.finish();
assert_ne!(result2.to_string(), "");
assert_eq!(result1.to_string(), result2.to_string());
}
}
#[test]
fn address_serialize() {
let addr = safe_address();
for _ in 0..1000 {
assert_eq!(
serde_json::from_str::<super::Address>(&serde_json::to_string(&addr).unwrap()).unwrap(),
addr
);
assert_eq!(
serde_cbor::from_slice::<super::Address>(&serde_cbor::to_vec(&addr).unwrap()).unwrap(),
addr
);
}
}
}

View file

@ -0,0 +1,186 @@
use std::hash::Hash;
use std::sync::Arc;
use super::endpoint::Endpoint;
use super::event::Event;
use super::identity::Identity;
use super::node::Node;
use super::path::Path;
use super::peer::Peer;
use crate::protocol::{PacketBuffer, PooledPacketBuffer};
use zerotier_crypto::typestate::Valid;
/// Interface trait to be implemented by code that's using the ZeroTier network hypervisor.
///
/// This is analogous to a C struct full of function pointers to callbacks along with some
/// associated type definitions.
pub trait ApplicationLayer: Sync + Send + 'static {
/// Type for local system sockets.
type LocalSocket: Sync + Send + Hash + PartialEq + Eq + Clone + ToString + Sized + 'static;
/// Type for local system interfaces.
type LocalInterface: Sync + Send + Hash + PartialEq + Eq + Clone + ToString + Sized + 'static;
/// A VL1 level event occurred.
fn event(&self, event: Event);
/// Get a pooled packet buffer for internal use.
fn get_buffer(&self) -> PooledPacketBuffer;
/// Check a local socket for validity.
///
/// This could return false if the socket's interface no longer exists, its port has been
/// unbound, etc.
fn local_socket_is_valid(&self, socket: &Self::LocalSocket) -> bool;
/// Check if this node should respond to messages from a given peer at all.
///
/// The default implementation always returns true. Typically this is what you want for a
/// controller or a root but not a regular node (unless required for backward compatibility).
#[allow(unused)]
fn should_respond_to(&self, id: &Valid<Identity>) -> bool {
true
}
/// Called to send a packet over the physical network (virtual -> physical).
///
/// This sends with UDP-like semantics. It should do whatever best effort it can and return.
///
/// If a local socket is specified the implementation should send from that socket or not
/// at all (returning false). If a local interface is specified the implementation should
/// send from all sockets on that interface. If neither is specified the packet may be
/// sent on all sockets or a random subset.
///
/// For endpoint types that support a packet TTL, the implementation may set the TTL
/// if the 'ttl' parameter is not zero. If the parameter is zero or TTL setting is not
/// supported, the default TTL should be used. This parameter is ignored for types that
/// don't support it.
fn wire_send(
&self,
endpoint: &Endpoint,
local_socket: Option<&Self::LocalSocket>,
local_interface: Option<&Self::LocalInterface>,
data: &[u8],
packet_ttl: u8,
);
/// Called to check and see if a physical address should be used for ZeroTier traffic to a node.
///
/// The default implementation always returns true.
#[allow(unused_variables)]
fn should_use_physical_path<Application: ApplicationLayer>(
&self,
id: &Valid<Identity>,
endpoint: &Endpoint,
local_socket: Option<&Application::LocalSocket>,
local_interface: Option<&Application::LocalInterface>,
) -> bool {
true
}
/// Called to look up any statically defined or memorized paths to known nodes.
///
/// The default implementation always returns None.
#[allow(unused_variables)]
fn get_path_hints<Application: ApplicationLayer>(
&self,
id: &Valid<Identity>,
) -> Option<Vec<(Endpoint, Option<Application::LocalSocket>, Option<Application::LocalInterface>)>> {
None
}
/// Called to get the current time in milliseconds from the system monotonically increasing clock.
/// This needs to be accurate to about 250 milliseconds resolution or better.
fn time_ticks(&self) -> i64;
/// Called to get the current time in milliseconds since epoch from the real-time clock.
/// This needs to be accurate to about one second resolution or better.
fn time_clock(&self) -> i64;
/// Get this application implementation cast to its concrete type.
///
/// The default implementation just returns None, but this can be implemented using the cast_ref()
/// function in zerotier_utils::cast to return the concrete implementation of this type. It's exposed
/// in this interface for convenience since it's common for inner protocol or other handlers to want
/// to get 'app' as its concrete type to access internal fields and methods. Implement it if possible
/// and convenient.
fn concrete_self<T: ApplicationLayer>(&self) -> Option<&T> {
None
}
}
/// Result of a packet handler in the InnerProtocolLayer trait.
pub enum PacketHandlerResult {
/// Packet was handled successfully.
Ok,
/// Packet was handled and an error occurred (malformed, authentication failure, etc.)
Error,
/// Packet was not handled by this handler.
NotHandled,
}
/// Interface between VL1 and higher/inner protocol layers.
///
/// This is implemented by Switch in VL2. It's usually not used outside of VL2 in the core but
/// it could also be implemented for testing or "off label" use of VL1 to carry different protocols.
#[allow(unused)]
pub trait InnerProtocolLayer: Sync + Send {
/// Handle a packet, returning true if it was handled by the next layer.
///
/// Do not attempt to handle OK or ERROR. Instead implement handle_ok() and handle_error().
/// The default version returns NotHandled.
fn handle_packet<Application: ApplicationLayer>(
&self,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application>>,
source_hops: u8,
message_id: u64,
verb: u8,
payload: &PacketBuffer,
cursor: usize,
) -> PacketHandlerResult {
PacketHandlerResult::NotHandled
}
/// Handle errors, returning true if the error was recognized.
/// The default version returns NotHandled.
fn handle_error<Application: ApplicationLayer>(
&self,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application>>,
source_hops: u8,
message_id: u64,
in_re_verb: u8,
in_re_message_id: u64,
error_code: u8,
payload: &PacketBuffer,
cursor: usize,
) -> PacketHandlerResult {
PacketHandlerResult::NotHandled
}
/// Handle an OK, returning true if the OK was recognized.
/// The default version returns NotHandled.
fn handle_ok<Application: ApplicationLayer>(
&self,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application>>,
source_hops: u8,
message_id: u64,
in_re_verb: u8,
in_re_message_id: u64,
payload: &PacketBuffer,
cursor: usize,
) -> PacketHandlerResult {
PacketHandlerResult::NotHandled
}
}

View file

@ -6,14 +6,13 @@ use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::vl1::identity::IDENTITY_FINGERPRINT_SIZE;
use crate::vl1::inetaddress::InetAddress;
use crate::vl1::{Address, MAC};
use super::inetaddress::InetAddress;
use super::{Address, MAC};
use zerotier_utils::base64;
use zerotier_utils::buffer::Buffer;
use zerotier_utils::buffer::{Buffer, OutOfBoundsError};
use zerotier_utils::error::InvalidFormatError;
use zerotier_utils::marshalable::{Marshalable, UnmarshalError};
use zerotier_utils::str::{escape, unescape};
pub const TYPE_NIL: u8 = 0;
pub const TYPE_ZEROTIER: u8 = 1;
@ -38,8 +37,7 @@ pub enum Endpoint {
Nil,
/// Via another node using unencapsulated relaying (e.g. via a root)
/// This is the address and the full identity fingerprint.
ZeroTier(Address, [u8; IDENTITY_FINGERPRINT_SIZE]),
ZeroTier(Address),
/// Direct L2 Ethernet
Ethernet(MAC),
@ -66,8 +64,7 @@ pub enum Endpoint {
WebRTC(Vec<u8>),
/// Via another node using inner encapsulation via VERB_ENCAP.
/// This is the address and the full identity fingerprint.
ZeroTierEncap(Address, [u8; IDENTITY_FINGERPRINT_SIZE]),
ZeroTierEncap(Address),
}
impl Default for Endpoint {
@ -92,7 +89,7 @@ impl Endpoint {
pub fn type_id(&self) -> u8 {
match self {
Endpoint::Nil => TYPE_NIL,
Endpoint::ZeroTier(_, _) => TYPE_ZEROTIER,
Endpoint::ZeroTier(_) => TYPE_ZEROTIER,
Endpoint::Ethernet(_) => TYPE_ETHERNET,
Endpoint::WifiDirect(_) => TYPE_WIFIDIRECT,
Endpoint::Bluetooth(_) => TYPE_BLUETOOTH,
@ -101,7 +98,7 @@ impl Endpoint {
Endpoint::IpTcp(_) => TYPE_IPTCP,
Endpoint::Http(_) => TYPE_HTTP,
Endpoint::WebRTC(_) => TYPE_WEBRTC,
Endpoint::ZeroTierEncap(_, _) => TYPE_ZEROTIER_ENCAP,
Endpoint::ZeroTierEncap(_) => TYPE_ZEROTIER_ENCAP,
}
}
@ -134,15 +131,14 @@ impl Endpoint {
impl Marshalable for Endpoint {
const MAX_MARSHAL_SIZE: usize = MAX_MARSHAL_SIZE;
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), UnmarshalError> {
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), OutOfBoundsError> {
match self {
Endpoint::Nil => {
buf.append_u8(16 + TYPE_NIL)?;
}
Endpoint::ZeroTier(a, h) => {
Endpoint::ZeroTier(a) => {
buf.append_u8(16 + TYPE_ZEROTIER)?;
buf.append_bytes_fixed(&a.to_bytes())?;
buf.append_bytes_fixed(h)?;
buf.append_bytes_fixed(a.as_bytes())?;
}
Endpoint::Ethernet(m) => {
buf.append_u8(16 + TYPE_ETHERNET)?;
@ -184,10 +180,9 @@ impl Marshalable for Endpoint {
buf.append_varint(b.len() as u64)?;
buf.append_bytes(b)?;
}
Endpoint::ZeroTierEncap(a, h) => {
Endpoint::ZeroTierEncap(a) => {
buf.append_u8(16 + TYPE_ZEROTIER_ENCAP)?;
buf.append_bytes_fixed(&a.to_bytes())?;
buf.append_bytes_fixed(h)?;
buf.append_bytes_fixed(a.as_bytes())?;
}
}
Ok(())
@ -214,10 +209,9 @@ impl Marshalable for Endpoint {
} else {
match type_byte - 16 {
TYPE_NIL => Ok(Endpoint::Nil),
TYPE_ZEROTIER => {
let zt = Address::from_bytes_fixed(buf.read_bytes_fixed(cursor)?).ok_or(UnmarshalError::InvalidData)?;
Ok(Endpoint::ZeroTier(zt, buf.read_bytes_fixed::<IDENTITY_FINGERPRINT_SIZE>(cursor)?.clone()))
}
TYPE_ZEROTIER => Ok(Endpoint::ZeroTier(
Address::from_bytes(buf.read_bytes_fixed::<{ Address::SIZE_BYTES }>(cursor)?).map_err(|_| UnmarshalError::InvalidData)?,
)),
TYPE_ETHERNET => Ok(Endpoint::Ethernet(MAC::unmarshal(buf, cursor)?)),
TYPE_WIFIDIRECT => Ok(Endpoint::WifiDirect(MAC::unmarshal(buf, cursor)?)),
TYPE_BLUETOOTH => Ok(Endpoint::Bluetooth(MAC::unmarshal(buf, cursor)?)),
@ -228,10 +222,9 @@ impl Marshalable for Endpoint {
String::from_utf8_lossy(buf.read_bytes(buf.read_varint(cursor)? as usize, cursor)?).to_string(),
)),
TYPE_WEBRTC => Ok(Endpoint::WebRTC(buf.read_bytes(buf.read_varint(cursor)? as usize, cursor)?.to_vec())),
TYPE_ZEROTIER_ENCAP => {
let zt = Address::from_bytes_fixed(buf.read_bytes_fixed(cursor)?).ok_or(UnmarshalError::InvalidData)?;
Ok(Endpoint::ZeroTierEncap(zt, buf.read_bytes_fixed(cursor)?.clone()))
}
TYPE_ZEROTIER_ENCAP => Ok(Endpoint::ZeroTier(
Address::from_bytes(buf.read_bytes_fixed::<{ Address::SIZE_BYTES }>(cursor)?).map_err(|_| UnmarshalError::InvalidData)?,
)),
_ => Err(UnmarshalError::InvalidData),
}
}
@ -244,9 +237,9 @@ impl Hash for Endpoint {
Endpoint::Nil => {
state.write_u8(TYPE_NIL);
}
Endpoint::ZeroTier(a, _) => {
Endpoint::ZeroTier(a) => {
state.write_u8(TYPE_ZEROTIER);
state.write_u64(a.into())
a.hash(state);
}
Endpoint::Ethernet(m) => {
state.write_u8(TYPE_ETHERNET);
@ -280,9 +273,9 @@ impl Hash for Endpoint {
state.write_u8(TYPE_WEBRTC);
offer.hash(state);
}
Endpoint::ZeroTierEncap(a, _) => {
Endpoint::ZeroTierEncap(a) => {
state.write_u8(TYPE_ZEROTIER_ENCAP);
state.write_u64(a.into())
a.hash(state);
}
}
}
@ -293,7 +286,7 @@ impl Ord for Endpoint {
// Manually implement Ord to ensure that sort order is known and consistent.
match (self, other) {
(Endpoint::Nil, Endpoint::Nil) => Ordering::Equal,
(Endpoint::ZeroTier(a, ah), Endpoint::ZeroTier(b, bh)) => a.cmp(b).then_with(|| ah.cmp(bh)),
(Endpoint::ZeroTier(a), Endpoint::ZeroTier(b)) => a.cmp(b),
(Endpoint::Ethernet(a), Endpoint::Ethernet(b)) => a.cmp(b),
(Endpoint::WifiDirect(a), Endpoint::WifiDirect(b)) => a.cmp(b),
(Endpoint::Bluetooth(a), Endpoint::Bluetooth(b)) => a.cmp(b),
@ -302,7 +295,7 @@ impl Ord for Endpoint {
(Endpoint::IpTcp(a), Endpoint::IpTcp(b)) => a.cmp(b),
(Endpoint::Http(a), Endpoint::Http(b)) => a.cmp(b),
(Endpoint::WebRTC(a), Endpoint::WebRTC(b)) => a.cmp(b),
(Endpoint::ZeroTierEncap(a, ah), Endpoint::ZeroTierEncap(b, bh)) => a.cmp(b).then_with(|| ah.cmp(bh)),
(Endpoint::ZeroTierEncap(a), Endpoint::ZeroTierEncap(b)) => a.cmp(b),
_ => self.type_id().cmp(&other.type_id()),
}
}
@ -319,7 +312,7 @@ impl ToString for Endpoint {
fn to_string(&self) -> String {
match self {
Endpoint::Nil => format!("nil"),
Endpoint::ZeroTier(a, ah) => format!("zt:{}-{}", a.to_string(), base64::encode_url_nopad(ah)),
Endpoint::ZeroTier(a) => format!("zt:{}", a.to_string()),
Endpoint::Ethernet(m) => format!("eth:{}", m.to_string()),
Endpoint::WifiDirect(m) => format!("wifip2p:{}", m.to_string()),
Endpoint::Bluetooth(m) => format!("bt:{}", m.to_string()),
@ -327,8 +320,8 @@ impl ToString for Endpoint {
Endpoint::IpUdp(ip) => format!("udp:{}", ip.to_string()),
Endpoint::IpTcp(ip) => format!("tcp:{}", ip.to_string()),
Endpoint::Http(url) => format!("url:{}", url.clone()), // http or https
Endpoint::WebRTC(offer) => format!("webrtc:{}", base64::encode_url_nopad(offer.as_slice())),
Endpoint::ZeroTierEncap(a, ah) => format!("zte:{}-{}", a.to_string(), base64::encode_url_nopad(ah)),
Endpoint::WebRTC(offer) => format!("webrtc:{}", escape(offer.as_slice())),
Endpoint::ZeroTierEncap(a) => format!("zte:{}", a.to_string()),
}
}
}
@ -348,18 +341,11 @@ impl FromStr for Endpoint {
let (endpoint_type, endpoint_data) = ss.unwrap();
match endpoint_type {
"zt" | "zte" => {
let address_and_hash = endpoint_data.split_once("-");
if address_and_hash.is_some() {
let (address, hash) = address_and_hash.unwrap();
if let Some(hash) = base64::decode_url_nopad(hash) {
if hash.len() == IDENTITY_FINGERPRINT_SIZE {
if endpoint_type == "zt" {
return Ok(Endpoint::ZeroTier(Address::from_str(address)?, hash.as_slice().try_into().unwrap()));
} else {
return Ok(Endpoint::ZeroTierEncap(Address::from_str(address)?, hash.as_slice().try_into().unwrap()));
}
}
}
let a = Address::from_str(endpoint_data).map_err(|_| InvalidFormatError)?;
if endpoint_type == "zt" {
return Ok(Endpoint::ZeroTier(a));
} else {
return Ok(Endpoint::ZeroTierEncap(a));
}
}
"eth" => return Ok(Endpoint::Ethernet(MAC::from_str(endpoint_data)?)),
@ -369,11 +355,7 @@ impl FromStr for Endpoint {
"udp" => return Ok(Endpoint::IpUdp(InetAddress::from_str(endpoint_data)?)),
"tcp" => return Ok(Endpoint::IpTcp(InetAddress::from_str(endpoint_data)?)),
"url" => return Ok(Endpoint::Http(endpoint_data.into())),
"webrtc" => {
if let Some(offer) = base64::decode_url_nopad(endpoint_data) {
return Ok(Endpoint::WebRTC(offer));
}
}
"webrtc" => return Ok(Endpoint::WebRTC(unescape(endpoint_data))),
_ => {}
}
return Err(InvalidFormatError);
@ -438,278 +420,3 @@ impl<'de> Deserialize<'de> for Endpoint {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::protocol::*;
fn randstring(len: u8) -> String {
(0..len)
.map(|_| (rand::random::<u8>() % 26) + 'a' as u8)
.map(|c| {
if rand::random::<bool>() {
(c as char).to_ascii_uppercase()
} else {
c as char
}
})
.map(|c| c.to_string())
.collect::<Vec<String>>()
.join("")
}
#[test]
fn endpoint_default() {
let e: Endpoint = Default::default();
assert!(matches!(e, Endpoint::Nil))
}
#[test]
fn endpoint_from_bytes() {
let v = [0u8; MAX_MARSHAL_SIZE];
assert!(Endpoint::from_bytes(&v).is_none());
}
#[test]
fn endpoint_marshal_nil() {
let n = Endpoint::Nil;
let mut buf = Buffer::<1>::new();
let res = n.marshal(&mut buf);
assert!(res.is_ok());
let res = Endpoint::unmarshal(&buf, &mut 0);
assert!(res.is_ok());
let n2 = res.unwrap();
assert_eq!(n, n2);
}
#[test]
fn endpoint_marshal_zerotier() {
for _ in 0..1000 {
let mut hash = [0u8; IDENTITY_FINGERPRINT_SIZE];
hash.fill_with(|| rand::random());
let mut v = [0u8; ADDRESS_SIZE];
v.fill_with(|| rand::random());
// correct for situations where RNG generates a prefix which generates a None value.
while v[0] == ADDRESS_RESERVED_PREFIX {
v[0] = rand::random()
}
let zte = Endpoint::ZeroTier(Address::from_bytes(&v).unwrap(), hash);
const TMP: usize = IDENTITY_FINGERPRINT_SIZE + 8;
let mut buf = Buffer::<TMP>::new();
let res = zte.marshal(&mut buf);
assert!(res.is_ok());
let res = Endpoint::unmarshal(&buf, &mut 0);
assert!(res.is_ok());
let zte2 = res.unwrap();
assert_eq!(zte, zte2);
}
}
#[test]
fn endpoint_marshal_zerotier_encap() {
for _ in 0..1000 {
let mut hash = [0u8; IDENTITY_FINGERPRINT_SIZE];
hash.fill_with(|| rand::random());
let mut v = [0u8; ADDRESS_SIZE];
v.fill_with(|| rand::random());
// correct for situations where RNG generates a prefix which generates a None value.
while v[0] == ADDRESS_RESERVED_PREFIX {
v[0] = rand::random()
}
let zte = Endpoint::ZeroTierEncap(Address::from_bytes(&v).unwrap(), hash);
const TMP: usize = IDENTITY_FINGERPRINT_SIZE + 8;
let mut buf = Buffer::<TMP>::new();
let res = zte.marshal(&mut buf);
assert!(res.is_ok());
let res = Endpoint::unmarshal(&buf, &mut 0);
assert!(res.is_ok());
let zte2 = res.unwrap();
assert_eq!(zte, zte2);
}
}
#[test]
fn endpoint_marshal_mac() {
for _ in 0..1000 {
let mac = crate::vl1::MAC::from_u64(rand::random()).unwrap();
for e in [
Endpoint::Ethernet(mac.clone()),
Endpoint::WifiDirect(mac.clone()),
Endpoint::Bluetooth(mac.clone()),
] {
let mut buf = Buffer::<7>::new();
let res = e.marshal(&mut buf);
assert!(res.is_ok());
let res = Endpoint::unmarshal(&buf, &mut 0);
assert!(res.is_ok());
let e2 = res.unwrap();
assert_eq!(e, e2);
}
}
}
#[test]
fn endpoint_marshal_inetaddress() {
for _ in 0..1000 {
let mut v = [0u8; 16];
v.fill_with(|| rand::random());
let inet = crate::vl1::InetAddress::from_ip_port(&v, 1234);
for e in [Endpoint::Icmp(inet.clone()), Endpoint::IpTcp(inet.clone()), Endpoint::IpUdp(inet.clone())] {
let mut buf = Buffer::<20>::new();
let res = e.marshal(&mut buf);
assert!(res.is_ok());
let res = Endpoint::unmarshal(&buf, &mut 0);
assert!(res.is_ok());
let e2 = res.unwrap();
assert_eq!(e, e2);
}
}
}
#[test]
fn endpoint_marshal_http() {
for _ in 0..1000 {
let http = Endpoint::Http(randstring(30));
let mut buf = Buffer::<33>::new();
assert!(http.marshal(&mut buf).is_ok());
let res = Endpoint::unmarshal(&buf, &mut 0);
assert!(res.is_ok());
let http2 = res.unwrap();
assert_eq!(http, http2);
}
}
#[test]
fn endpoint_marshal_webrtc() {
for _ in 0..1000 {
let mut v = Vec::with_capacity(100);
v.fill_with(|| rand::random());
let rtc = Endpoint::WebRTC(v);
let mut buf = Buffer::<102>::new();
assert!(rtc.marshal(&mut buf).is_ok());
let res = Endpoint::unmarshal(&buf, &mut 0);
assert!(res.is_ok());
let rtc2 = res.unwrap();
assert_eq!(rtc, rtc2);
}
}
#[test]
fn endpoint_to_from_string() {
use std::str::FromStr;
for _ in 0..1000 {
let mut v = Vec::with_capacity(100);
v.fill_with(|| rand::random());
let rtc = Endpoint::WebRTC(v);
assert_ne!(rtc.to_string().len(), 0);
assert!(rtc.to_string().starts_with("webrtc"));
let rtc2 = Endpoint::from_str(&rtc.to_string()).unwrap();
assert_eq!(rtc, rtc2);
let http = Endpoint::Http(randstring(30));
assert_ne!(http.to_string().len(), 0);
assert!(http.to_string().starts_with("url"));
let http2 = Endpoint::from_str(&http.to_string()).unwrap();
assert_eq!(http, http2);
let mut v = [0u8; 16];
v.fill_with(|| rand::random());
let inet = crate::vl1::InetAddress::from_ip_port(&v, 0);
let ip = Endpoint::Icmp(inet.clone());
assert_ne!(ip.to_string().len(), 0);
assert!(ip.to_string().starts_with("icmp"));
let ip2 = Endpoint::from_str(&ip.to_string()).unwrap();
assert_eq!(ip, ip2);
let inet = crate::vl1::InetAddress::from_ip_port(&v, 1234);
for e in [(Endpoint::IpTcp(inet.clone()), "tcp"), (Endpoint::IpUdp(inet.clone()), "udp")] {
assert_ne!(e.0.to_string().len(), 0);
assert!(e.0.to_string().starts_with(e.1));
let e2 = Endpoint::from_str(&e.0.to_string()).unwrap();
assert_eq!(e.0, e2);
}
let mac = crate::vl1::MAC::from_u64(rand::random()).unwrap();
for e in [
(Endpoint::Ethernet(mac.clone()), "eth"),
(Endpoint::WifiDirect(mac.clone()), "wifip2p"),
(Endpoint::Bluetooth(mac.clone()), "bt"),
] {
assert_ne!(e.0.to_string().len(), 0);
assert!(e.0.to_string().starts_with(e.1));
let e2 = Endpoint::from_str(&e.0.to_string()).unwrap();
assert_eq!(e.0, e2);
}
let mut hash = [0u8; IDENTITY_FINGERPRINT_SIZE];
hash.fill_with(|| rand::random());
let mut v = [0u8; ADDRESS_SIZE];
v.fill_with(|| rand::random());
// correct for situations where RNG generates a prefix which generates a None value.
while v[0] == ADDRESS_RESERVED_PREFIX {
v[0] = rand::random()
}
for e in [
(Endpoint::ZeroTier(Address::from_bytes(&v).unwrap(), hash), "zt"),
(Endpoint::ZeroTierEncap(Address::from_bytes(&v).unwrap(), hash), "zte"),
] {
assert_ne!(e.0.to_string().len(), 0);
assert!(e.0.to_string().starts_with(e.1));
let e2 = Endpoint::from_str(&e.0.to_string()).unwrap();
assert_eq!(e.0, e2);
}
assert_eq!(Endpoint::Nil.to_string(), "nil");
}
}
}

View file

@ -1,6 +1,6 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use crate::vl1::*;
use super::identity::Identity;
#[derive(Clone)]
pub enum Event {

File diff suppressed because it is too large Load diff

View file

@ -12,7 +12,7 @@ use num_traits::AsPrimitive;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use zerotier_utils::buffer::Buffer;
use zerotier_utils::buffer::{Buffer, OutOfBoundsError};
use zerotier_utils::error::{InvalidFormatError, InvalidParameterError};
use zerotier_utils::marshalable::{Marshalable, UnmarshalError};
@ -77,6 +77,7 @@ pub const AF_INET: AddressFamilyType = libc::AF_INET as AddressFamilyType;
#[cfg(not(windows))]
pub const AF_INET6: AddressFamilyType = libc::AF_INET6 as AddressFamilyType;
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum IpScope {
None = 0,
@ -879,7 +880,7 @@ impl InetAddress {
impl Marshalable for InetAddress {
const MAX_MARSHAL_SIZE: usize = 19;
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), UnmarshalError> {
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), OutOfBoundsError> {
unsafe {
match self.sa.sa_family as AddressFamilyType {
AF_INET => {

View file

@ -7,7 +7,7 @@ use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use zerotier_utils::buffer::Buffer;
use zerotier_utils::buffer::{Buffer, OutOfBoundsError};
use zerotier_utils::error::InvalidFormatError;
use zerotier_utils::hex;
use zerotier_utils::marshalable::{Marshalable, UnmarshalError};
@ -75,9 +75,8 @@ impl Marshalable for MAC {
const MAX_MARSHAL_SIZE: usize = 6;
#[inline(always)]
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), UnmarshalError> {
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), OutOfBoundsError> {
buf.append_bytes(&self.0.get().to_be_bytes()[2..])
.map_err(|_| UnmarshalError::OutOfBounds)
}
#[inline(always)]

View file

@ -1,24 +1,27 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
mod address;
mod api;
mod endpoint;
mod event;
mod mac;
mod node;
mod path;
mod peer;
mod peermap;
mod rootset;
mod whois;
pub mod identity;
pub mod inetaddress;
pub use address::Address;
pub use address::{Address, PartialAddress};
pub use api::{ApplicationLayer, InnerProtocolLayer, PacketHandlerResult};
pub use endpoint::Endpoint;
pub use event::Event;
pub use identity::Identity;
pub use inetaddress::InetAddress;
pub use mac::MAC;
pub use node::{ApplicationLayer, InnerProtocolLayer, Node, PacketHandlerResult};
pub use node::Node;
pub use path::Path;
pub use peer::Peer;
pub use rootset::{Root, RootSet};

View file

@ -1,195 +1,45 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::collections::HashMap;
use std::convert::Infallible;
use std::hash::Hash;
use std::io::Write;
use std::sync::atomic::Ordering;
use std::sync::{Arc, Mutex, RwLock, Weak};
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
use super::address::{Address, PartialAddress};
use super::api::{ApplicationLayer, InnerProtocolLayer};
use super::debug_event;
use super::endpoint::Endpoint;
use super::event::Event;
use super::identity::{Identity, IdentitySecret};
use super::path::{Path, PathServiceResult};
use super::peer::Peer;
use super::peermap::PeerMap;
use super::rootset::RootSet;
use crate::protocol::*;
use crate::vl1::address::Address;
use crate::vl1::debug_event;
use crate::vl1::endpoint::Endpoint;
use crate::vl1::event::Event;
use crate::vl1::identity::Identity;
use crate::vl1::path::{Path, PathServiceResult};
use crate::vl1::peer::Peer;
use crate::vl1::rootset::RootSet;
use zerotier_crypto::random;
use zerotier_crypto::typestate::{Valid, Verified};
use zerotier_utils::error::InvalidParameterError;
use zerotier_utils::gate::IntervalGate;
use zerotier_utils::hex;
use zerotier_utils::marshalable::Marshalable;
use zerotier_utils::ringbuffer::RingBuffer;
/// Interface trait to be implemented by code that's using the ZeroTier network hypervisor.
/// A VL1 node on the ZeroTier global peer to peer network.
///
/// This is analogous to a C struct full of function pointers to callbacks along with some
/// associated type definitions.
pub trait ApplicationLayer: Sync + Send + 'static {
/// Type for local system sockets.
type LocalSocket: Sync + Send + Hash + PartialEq + Eq + Clone + ToString + Sized + 'static;
/// Type for local system interfaces.
type LocalInterface: Sync + Send + Hash + PartialEq + Eq + Clone + ToString + Sized + 'static;
/// A VL1 level event occurred.
fn event(&self, event: Event);
/// Load this node's identity from the data store.
fn load_node_identity(&self) -> Option<Valid<Identity>>;
/// Save this node's identity to the data store, returning true on success.
fn save_node_identity(&self, id: &Valid<Identity>) -> bool;
/// Get a pooled packet buffer for internal use.
fn get_buffer(&self) -> PooledPacketBuffer;
/// Check a local socket for validity.
///
/// This could return false if the socket's interface no longer exists, its port has been
/// unbound, etc.
fn local_socket_is_valid(&self, socket: &Self::LocalSocket) -> bool;
/// Check if this node should respond to messages from a given peer at all.
fn should_respond_to(&self, id: &Valid<Identity>) -> bool;
/// Called to send a packet over the physical network (virtual -> physical).
///
/// This sends with UDP-like semantics. It should do whatever best effort it can and return.
///
/// If a local socket is specified the implementation should send from that socket or not
/// at all (returning false). If a local interface is specified the implementation should
/// send from all sockets on that interface. If neither is specified the packet may be
/// sent on all sockets or a random subset.
///
/// For endpoint types that support a packet TTL, the implementation may set the TTL
/// if the 'ttl' parameter is not zero. If the parameter is zero or TTL setting is not
/// supported, the default TTL should be used. This parameter is ignored for types that
/// don't support it.
fn wire_send(
&self,
endpoint: &Endpoint,
local_socket: Option<&Self::LocalSocket>,
local_interface: Option<&Self::LocalInterface>,
data: &[u8],
packet_ttl: u8,
);
/// Called to check and see if a physical address should be used for ZeroTier traffic to a node.
///
/// The default implementation always returns true.
#[allow(unused_variables)]
fn should_use_physical_path<Application: ApplicationLayer + ?Sized>(
&self,
id: &Valid<Identity>,
endpoint: &Endpoint,
local_socket: Option<&Application::LocalSocket>,
local_interface: Option<&Application::LocalInterface>,
) -> bool {
true
}
/// Called to look up any statically defined or memorized paths to known nodes.
///
/// The default implementation always returns None.
#[allow(unused_variables)]
fn get_path_hints<Application: ApplicationLayer + ?Sized>(
&self,
id: &Valid<Identity>,
) -> Option<Vec<(Endpoint, Option<Application::LocalSocket>, Option<Application::LocalInterface>)>> {
None
}
/// Called to get the current time in milliseconds from the system monotonically increasing clock.
/// This needs to be accurate to about 250 milliseconds resolution or better.
fn time_ticks(&self) -> i64;
/// Called to get the current time in milliseconds since epoch from the real-time clock.
/// This needs to be accurate to about one second resolution or better.
fn time_clock(&self) -> i64;
/// VL1 nodes communicate to/from both the outside world and the inner protocol layer via the two
/// supplied API traits that must be implemented by the application. ApplicationLayer provides a
/// means of interacting with the application/OS and InnerProtocolLayer provides the interface for
/// implementing the protocol (e.g. ZeroTier VL2) that will be carried by VL1.
pub struct Node<Application: ApplicationLayer> {
pub identity: IdentitySecret,
intervals: Mutex<BackgroundTaskIntervals>,
paths: RwLock<HashMap<PathKey<'static, 'static, Application::LocalSocket>, Arc<Path<Application>>>>,
pub(super) peers: PeerMap<Application>,
roots: RwLock<RootInfo<Application>>,
best_root: RwLock<Option<Arc<Peer<Application>>>>,
}
/// Result of a packet handler.
pub enum PacketHandlerResult {
/// Packet was handled successfully.
Ok,
/// Packet was handled and an error occurred (malformed, authentication failure, etc.)
Error,
/// Packet was not handled by this handler.
NotHandled,
}
/// Interface between VL1 and higher/inner protocol layers.
///
/// This is implemented by Switch in VL2. It's usually not used outside of VL2 in the core but
/// it could also be implemented for testing or "off label" use of VL1 to carry different protocols.
#[allow(unused)]
pub trait InnerProtocolLayer: Sync + Send {
/// Handle a packet, returning true if it was handled by the next layer.
///
/// Do not attempt to handle OK or ERROR. Instead implement handle_ok() and handle_error().
/// The default version returns NotHandled.
fn handle_packet<Application: ApplicationLayer + ?Sized>(
&self,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
verb: u8,
payload: &PacketBuffer,
cursor: usize,
) -> PacketHandlerResult {
PacketHandlerResult::NotHandled
}
/// Handle errors, returning true if the error was recognized.
/// The default version returns NotHandled.
fn handle_error<Application: ApplicationLayer + ?Sized>(
&self,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
in_re_verb: u8,
in_re_message_id: u64,
error_code: u8,
payload: &PacketBuffer,
cursor: usize,
) -> PacketHandlerResult {
PacketHandlerResult::NotHandled
}
/// Handle an OK, returning true if the OK was recognized.
/// The default version returns NotHandled.
fn handle_ok<Application: ApplicationLayer + ?Sized>(
&self,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
in_re_verb: u8,
in_re_message_id: u64,
payload: &PacketBuffer,
cursor: usize,
) -> PacketHandlerResult {
PacketHandlerResult::NotHandled
}
}
struct RootInfo<Application: ApplicationLayer + ?Sized> {
struct RootInfo<Application: ApplicationLayer> {
/// Root sets to which we are a member.
sets: HashMap<String, Verified<RootSet>>,
@ -208,7 +58,7 @@ struct RootInfo<Application: ApplicationLayer + ?Sized> {
}
/// How often to check the root cluster definitions against the root list and update.
const ROOT_SYNC_INTERVAL_MS: i64 = 1000;
const ROOT_SYNC_INTERVAL_MS: i64 = 2000;
#[derive(Default)]
struct BackgroundTaskIntervals {
@ -220,74 +70,13 @@ struct BackgroundTaskIntervals {
whois_queue_retry: IntervalGate<{ WHOIS_RETRY_INTERVAL }>,
}
struct WhoisQueueItem<Application: ApplicationLayer + ?Sized> {
v1_proto_waiting_packets:
RingBuffer<(Weak<Path<Application::LocalSocket, Application::LocalInterface>>, PooledPacketBuffer), WHOIS_MAX_WAITING_PACKETS>,
last_retry_time: i64,
retry_count: u16,
}
/// A ZeroTier VL1 node that can communicate securely with the ZeroTier peer-to-peer network.
pub struct Node<Application: ApplicationLayer + ?Sized> {
/// A random ID generated to identify this particular running instance.
pub instance_id: [u8; 16],
/// This node's identity and permanent keys.
pub identity: Valid<Identity>,
/// Interval latches for periodic background tasks.
intervals: Mutex<BackgroundTaskIntervals>,
/// Canonicalized network paths, held as Weak<> to be automatically cleaned when no longer in use.
paths: RwLock<HashMap<PathKey<'static, 'static, Application::LocalSocket>, Arc<Path<Application::LocalSocket, Application::LocalInterface>>>>,
/// Peers with which we are currently communicating.
peers: RwLock<HashMap<Address, Arc<Peer<Application>>>>,
/// This node's trusted roots, sorted in ascending order of quality/preference, and cluster definitions.
roots: RwLock<RootInfo<Application>>,
/// Current best root.
best_root: RwLock<Option<Arc<Peer<Application>>>>,
/// Queue of identities being looked up.
whois_queue: Mutex<HashMap<Address, WhoisQueueItem<Application>>>,
}
impl<Application: ApplicationLayer + ?Sized> Node<Application> {
pub fn new(app: &Application, auto_generate_identity: bool, auto_upgrade_identity: bool) -> Result<Self, InvalidParameterError> {
let mut id = {
let id = app.load_node_identity();
if id.is_none() {
if !auto_generate_identity {
return Err(InvalidParameterError("no identity found and auto-generate not enabled"));
} else {
let id = Identity::generate();
app.event(Event::IdentityAutoGenerated(id.as_ref().clone()));
app.save_node_identity(&id);
id
}
} else {
id.unwrap()
}
};
if auto_upgrade_identity {
let old = id.clone();
if id.upgrade()? {
app.save_node_identity(&id);
app.event(Event::IdentityAutoUpgraded(old.remove_typestate(), id.as_ref().clone()));
}
}
debug_event!(app, "[vl1] loaded identity {}", id.to_string());
Ok(Self {
instance_id: random::get_bytes_secure(),
identity: id,
impl<Application: ApplicationLayer> Node<Application> {
pub fn new(identity_secret: IdentitySecret) -> Self {
Self {
identity: identity_secret,
intervals: Mutex::new(BackgroundTaskIntervals::default()),
paths: RwLock::new(HashMap::new()),
peers: RwLock::new(HashMap::new()),
peers: PeerMap::new(),
roots: RwLock::new(RootInfo {
sets: HashMap::new(),
roots: HashMap::new(),
@ -296,13 +85,12 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
online: false,
}),
best_root: RwLock::new(None),
whois_queue: Mutex::new(HashMap::new()),
})
}
}
#[inline]
pub fn peer(&self, a: Address) -> Option<Arc<Peer<Application>>> {
self.peers.read().unwrap().get(&a).cloned()
#[inline(always)]
pub fn peer(&self, a: &Address) -> Option<Arc<Peer<Application>>> {
self.peers.get_exact(a)
}
#[inline]
@ -329,7 +117,6 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
/// Add a new root set or update the existing root set if the new root set is newer and otherwise matches.
#[inline]
pub fn add_update_root_set(&self, rs: Verified<RootSet>) -> bool {
let mut roots = self.roots.write().unwrap();
if let Some(entry) = roots.sets.get_mut(&rs.name) {
@ -348,13 +135,11 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
/// Returns whether or not this node has any root sets defined.
#[inline]
pub fn has_roots_defined(&self) -> bool {
self.roots.read().unwrap().sets.iter().any(|rs| !rs.1.members.is_empty())
}
/// Initialize with default roots if there are no roots defined, otherwise do nothing.
#[inline]
pub fn init_default_roots(&self) -> bool {
if !self.has_roots_defined() {
self.add_update_root_set(RootSet::zerotier_default())
@ -364,7 +149,6 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
/// Get the root sets that this node trusts.
#[inline]
pub fn root_sets(&self) -> Vec<RootSet> {
self.roots.read().unwrap().sets.values().cloned().map(|s| s.remove_typestate()).collect()
}
@ -398,7 +182,7 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
} {
debug_event!(app, "[vl1] root sets modified, synchronizing internal data structures");
let (mut old_root_identities, address_collisions, new_roots, bad_identities, my_root_sets) = {
let (mut old_root_identities, new_roots, bad_identities, my_root_sets) = {
let roots = self.roots.read().unwrap();
let old_root_identities: Vec<Identity> = roots.roots.iter().map(|(p, _)| p.identity.as_ref().clone()).collect();
@ -406,56 +190,24 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
let mut bad_identities = Vec::new();
let mut my_root_sets: Option<Vec<u8>> = None;
// This is a sanity check to make sure we don't have root sets that contain roots with the same address
// but a different identity. If we do, the offending address is blacklisted. This would indicate something
// weird and possibly nasty happening with whomever is making your root set definitions.
let mut address_collisions = Vec::new();
{
let mut address_collision_check = HashMap::with_capacity(roots.sets.len() * 8);
for (_, rs) in roots.sets.iter() {
for m in rs.members.iter() {
if m.identity.eq(&self.identity) {
let _ = my_root_sets.get_or_insert_with(|| Vec::new()).write_all(rs.to_bytes().as_slice());
} else if self
.peers
.read()
.unwrap()
.get(&m.identity.address)
.map_or(false, |p| !p.identity.as_ref().eq(&m.identity))
|| address_collision_check
.insert(m.identity.address, &m.identity)
.map_or(false, |old_id| !old_id.eq(&m.identity))
{
address_collisions.push(m.identity.address);
}
}
}
}
for (_, rs) in roots.sets.iter() {
for m in rs.members.iter() {
if m.endpoints.is_some() && !address_collisions.contains(&m.identity.address) && !m.identity.eq(&self.identity) {
if m.identity.eq(&self.identity.public) {
let _ = my_root_sets
.get_or_insert_with(|| Vec::new())
.write_all(rs.to_buffer::<{ RootSet::MAX_MARSHAL_SIZE }>().unwrap().as_bytes());
} else if m.endpoints.is_some() {
debug_event!(
app,
"[vl1] examining root {} with {} endpoints",
m.identity.address.to_string(),
m.endpoints.as_ref().map_or(0, |e| e.len())
);
let peers = self.peers.read().unwrap();
if let Some(peer) = peers.get(&m.identity.address) {
if let Some(peer) = self.peers.get_exact(&m.identity.address) {
new_roots.insert(peer.clone(), m.endpoints.as_ref().unwrap().iter().cloned().collect());
} else {
if let Some(peer) = Peer::new(&self.identity, Valid::mark_valid(m.identity.clone()), time_ticks) {
drop(peers);
new_roots.insert(
self.peers
.write()
.unwrap()
.entry(m.identity.address)
.or_insert_with(|| Arc::new(peer))
.clone(),
m.endpoints.as_ref().unwrap().iter().cloned().collect(),
);
new_roots.insert(self.peers.add(Arc::new(peer)).0, m.endpoints.as_ref().unwrap().iter().cloned().collect());
} else {
bad_identities.push(m.identity.clone());
}
@ -464,15 +216,9 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
}
(old_root_identities, address_collisions, new_roots, bad_identities, my_root_sets)
(old_root_identities, new_roots, bad_identities, my_root_sets)
};
for c in address_collisions.iter() {
app.event(Event::SecurityWarning(format!(
"address/identity collision in root sets! address {} collides across root sets or with an existing peer and is being ignored as a root!",
c.to_string()
)));
}
for i in bad_identities.iter() {
app.event(Event::SecurityWarning(format!(
"bad identity detected for address {} in at least one root set, ignoring (error creating peer object)",
@ -584,14 +330,14 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
let mut dead_peers = Vec::new();
{
let roots = self.roots.read().unwrap();
for (a, peer) in self.peers.read().unwrap().iter() {
self.peers.each(|peer| {
if !peer.service(app, self, time_ticks) && !roots.roots.contains_key(peer) {
dead_peers.push(*a);
dead_peers.push(peer.identity.address.clone());
}
}
});
}
for dp in dead_peers.iter() {
self.peers.write().unwrap().remove(dp);
self.peers.remove(dp);
}
}
@ -625,6 +371,7 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
if whois_queue_retry {
/*
let need_whois = {
let mut need_whois = Vec::new();
let mut whois_queue = self.whois_queue.lock().unwrap();
@ -633,7 +380,7 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
if (time_ticks - qi.last_retry_time) >= WHOIS_RETRY_INTERVAL {
qi.retry_count += 1;
qi.last_retry_time = time_ticks;
need_whois.push(*address);
need_whois.push(address.clone());
}
}
need_whois
@ -641,12 +388,13 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
if !need_whois.is_empty() {
self.send_whois(app, need_whois.as_slice(), time_ticks);
}
*/
}
INTERVAL
}
pub fn handle_incoming_physical_packet<Inner: InnerProtocolLayer + ?Sized>(
pub fn handle_incoming_physical_packet<Inner: InnerProtocolLayer>(
&self,
app: &Application,
inner: &Inner,
@ -672,10 +420,10 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
// Legacy ZeroTier V1 packet handling
if let Ok(fragment_header) = packet.struct_mut_at::<v1::FragmentHeader>(0) {
if let Some(dest) = Address::from_bytes_fixed(&fragment_header.dest) {
if let Ok(dest) = PartialAddress::from_legacy_address_bytes(&fragment_header.dest) {
// Packet is addressed to this node.
if dest == self.identity.address {
if dest.matches(&self.identity.public.address) {
let fragment_header = &*fragment_header; // discard mut
let path = self.canonical_path(source_endpoint, source_local_socket, source_local_interface, time_ticks);
path.log_receive_anything(time_ticks);
@ -703,8 +451,8 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
debug_event!(app, "[vl1] [v1] #{:0>16x} packet fully assembled!", fragment_header_id);
if let Ok(packet_header) = frag0.struct_at::<v1::PacketHeader>(0) {
if let Some(source) = Address::from_bytes(&packet_header.src) {
if let Some(peer) = self.peer(source) {
if let Ok(source) = PartialAddress::from_legacy_address_bytes(&packet_header.src) {
if let Some(peer) = self.peers.get_unambiguous(&source) {
peer.v1_proto_receive(
self,
app,
@ -728,7 +476,8 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
}
if ok {
self.whois(app, source, Some((Arc::downgrade(&path), combined_packet)), time_ticks);
// TODO
//self.whois(app, source.clone(), Some((Arc::downgrade(&path), combined_packet)), time_ticks);
}
}
} // else source address invalid
@ -738,11 +487,12 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
} else if let Ok(packet_header) = packet.struct_at::<v1::PacketHeader>(0) {
debug_event!(app, "[vl1] [v1] #{:0>16x} is unfragmented", u64::from_be_bytes(packet_header.id));
if let Some(source) = Address::from_bytes(&packet_header.src) {
if let Some(peer) = self.peer(source) {
if let Ok(source) = PartialAddress::from_legacy_address_bytes(&packet_header.src) {
if let Some(peer) = self.peers.get_unambiguous(&source) {
peer.v1_proto_receive(self, app, inner, time_ticks, &path, packet_header, packet.as_ref(), &[]);
} else {
self.whois(app, source, Some((Arc::downgrade(&path), packet)), time_ticks);
// TODO
//self.whois(app, source, Some((Arc::downgrade(&path), packet)), time_ticks);
}
}
} // else not fragment and header incomplete
@ -788,7 +538,7 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
return;
}
if let Some(peer) = self.peer(dest) {
if let Some(peer) = self.peers.get_unambiguous(&dest) {
if let Some(forward_path) = peer.direct_path() {
app.wire_send(
&forward_path.endpoint,
@ -809,109 +559,12 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
}
/// Enqueue and send a WHOIS query for a given address, adding the supplied packet (if any) to the list to be processed on reply.
fn whois(
&self,
app: &Application,
address: Address,
waiting_packet: Option<(Weak<Path<Application::LocalSocket, Application::LocalInterface>>, PooledPacketBuffer)>,
time_ticks: i64,
) {
{
let mut whois_queue = self.whois_queue.lock().unwrap();
let qi = whois_queue.entry(address).or_insert_with(|| WhoisQueueItem {
v1_proto_waiting_packets: RingBuffer::new(),
last_retry_time: 0,
retry_count: 0,
});
if let Some(p) = waiting_packet {
qi.v1_proto_waiting_packets.add(p);
}
if qi.retry_count > 0 {
return;
} else {
qi.last_retry_time = time_ticks;
qi.retry_count += 1;
}
}
self.send_whois(app, &[address], time_ticks);
}
/// Send a WHOIS query to the current best root.
fn send_whois(&self, app: &Application, mut addresses: &[Address], time_ticks: i64) {
debug_assert!(!addresses.is_empty());
debug_event!(app, "[vl1] [v1] sending WHOIS for {}", {
let mut tmp = String::new();
for a in addresses.iter() {
if !tmp.is_empty() {
tmp.push(',');
}
tmp.push_str(a.to_string().as_str());
}
tmp
});
if let Some(root) = self.best_root() {
while !addresses.is_empty() {
if !root
.send(app, self, None, time_ticks, |packet| -> Result<(), Infallible> {
assert!(packet.append_u8(message_type::VL1_WHOIS).is_ok());
while !addresses.is_empty() && (packet.len() + ADDRESS_SIZE) <= UDP_DEFAULT_MTU {
assert!(packet.append_bytes_fixed(&addresses[0].to_bytes()).is_ok());
addresses = &addresses[1..];
}
Ok(())
})
.is_some()
{
break;
}
}
}
}
/// Called by Peer when an identity is received from another node, e.g. via OK(WHOIS).
pub(crate) fn handle_incoming_identity<Inner: InnerProtocolLayer + ?Sized>(
&self,
app: &Application,
inner: &Inner,
received_identity: Identity,
time_ticks: i64,
authoritative: bool,
) {
if authoritative {
if let Some(received_identity) = received_identity.validate() {
let mut whois_queue = self.whois_queue.lock().unwrap();
if let Some(qi) = whois_queue.get_mut(&received_identity.address) {
let address = received_identity.address;
if app.should_respond_to(&received_identity) {
let mut peers = self.peers.write().unwrap();
if let Some(peer) = peers.get(&address).cloned().or_else(|| {
Peer::new(&self.identity, received_identity, time_ticks)
.map(|p| Arc::new(p))
.and_then(|peer| Some(peers.entry(address).or_insert(peer).clone()))
}) {
drop(peers);
for p in qi.v1_proto_waiting_packets.iter() {
if let Some(path) = p.0.upgrade() {
if let Ok(packet_header) = p.1.struct_at::<v1::PacketHeader>(0) {
peer.v1_proto_receive(self, app, inner, time_ticks, &path, packet_header, &p.1, &[]);
}
}
}
}
}
whois_queue.remove(&address);
}
}
}
}
/// Called when a remote node sends us a root set update, applying the update if it is valid and applicable.
///
/// This will only replace an existing root set with a newer one. It won't add a new root set, which must be
/// done by an authorized user or administrator not just by a root.
#[allow(unused)]
pub(crate) fn on_remote_update_root_set(&self, received_from: &Identity, rs: Verified<RootSet>) {
pub(super) fn on_remote_update_root_set(&self, received_from: &Identity, rs: Verified<RootSet>) {
let mut roots = self.roots.write().unwrap();
if let Some(entry) = roots.sets.get_mut(&rs.name) {
if entry.members.iter().any(|m| m.identity.eq(received_from)) && rs.should_replace(entry) {
@ -922,13 +575,13 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
/// Get the canonical Path object corresponding to an endpoint.
pub(crate) fn canonical_path(
pub(super) fn canonical_path(
&self,
ep: &Endpoint,
local_socket: &Application::LocalSocket,
local_interface: &Application::LocalInterface,
time_ticks: i64,
) -> Arc<Path<Application::LocalSocket, Application::LocalInterface>> {
) -> Arc<Path<Application>> {
let paths = self.paths.read().unwrap();
if let Some(path) = paths.get(&PathKey::Ref(ep, local_socket)) {
path.clone()
@ -944,7 +597,7 @@ impl<Application: ApplicationLayer + ?Sized> Node<Application> {
}
}
/// Key used to look up paths in a hash map efficiently.
/// Key used to look up paths in a hash map efficiently. It can be constructed for lookup without full copy.
enum PathKey<'a, 'b, LocalSocket: Hash + PartialEq + Eq + Clone> {
Copied(Endpoint, LocalSocket),
Ref(&'a Endpoint, &'b LocalSocket),

View file

@ -5,8 +5,9 @@ use std::hash::{BuildHasher, Hasher};
use std::sync::atomic::{AtomicI64, Ordering};
use std::sync::Mutex;
use super::endpoint::Endpoint;
use super::ApplicationLayer;
use crate::protocol;
use crate::vl1::endpoint::Endpoint;
use zerotier_crypto::random;
use zerotier_utils::NEVER_HAPPENED_TICKS;
@ -24,18 +25,23 @@ pub(crate) enum PathServiceResult {
/// These are maintained in Node and canonicalized so that all unique paths have
/// one and only one unique path object. That enables statistics to be tracked
/// for them and uniform application of things like keepalives.
pub struct Path<LocalSocket, LocalInterface> {
pub struct Path<Application: ApplicationLayer + ?Sized> {
pub endpoint: Endpoint,
pub local_socket: LocalSocket,
pub local_interface: LocalInterface,
pub local_socket: Application::LocalSocket,
pub local_interface: Application::LocalInterface,
last_send_time_ticks: AtomicI64,
last_receive_time_ticks: AtomicI64,
create_time_ticks: i64,
fragmented_packets: Mutex<HashMap<u64, protocol::v1::FragmentedPacket, PacketIdHasher>>,
v1_fragmented_packets: Mutex<HashMap<u64, protocol::v1::FragmentedPacket, PacketIdHasher>>,
}
impl<LocalSocket, LocalInterface> Path<LocalSocket, LocalInterface> {
pub(crate) fn new(endpoint: Endpoint, local_socket: LocalSocket, local_interface: LocalInterface, time_ticks: i64) -> Self {
impl<Application: ApplicationLayer + ?Sized> Path<Application> {
pub(crate) fn new(
endpoint: Endpoint,
local_socket: Application::LocalSocket,
local_interface: Application::LocalInterface,
time_ticks: i64,
) -> Self {
Self {
endpoint,
local_socket,
@ -43,7 +49,7 @@ impl<LocalSocket, LocalInterface> Path<LocalSocket, LocalInterface> {
last_send_time_ticks: AtomicI64::new(NEVER_HAPPENED_TICKS),
last_receive_time_ticks: AtomicI64::new(NEVER_HAPPENED_TICKS),
create_time_ticks: time_ticks,
fragmented_packets: Mutex::new(HashMap::with_capacity_and_hasher(4, PacketIdHasher(random::xorshift64_random()))),
v1_fragmented_packets: Mutex::new(HashMap::with_capacity_and_hasher(4, PacketIdHasher(random::xorshift64_random()))),
}
}
@ -57,7 +63,7 @@ impl<LocalSocket, LocalInterface> Path<LocalSocket, LocalInterface> {
packet: protocol::PooledPacketBuffer,
time_ticks: i64,
) -> Option<protocol::v1::FragmentedPacket> {
let mut fp = self.fragmented_packets.lock().unwrap();
let mut fp = self.v1_fragmented_packets.lock().unwrap();
// Discard some old waiting packets if the total incoming fragments for a path exceeds a
// sanity limit. This is to prevent memory exhaustion DOS attacks.
@ -96,7 +102,7 @@ impl<LocalSocket, LocalInterface> Path<LocalSocket, LocalInterface> {
}
pub(crate) fn service(&self, time_ticks: i64) -> PathServiceResult {
self.fragmented_packets
self.v1_fragmented_packets
.lock()
.unwrap()
.retain(|_, frag| (time_ticks - frag.ts_ticks) < protocol::v1::FRAGMENT_EXPIRATION);

View file

@ -14,17 +14,20 @@ use zerotier_utils::marshalable::Marshalable;
use zerotier_utils::memory::array_range;
use zerotier_utils::NEVER_HAPPENED_TICKS;
use super::api::*;
use super::debug_event;
use super::identity::{Identity, IdentitySecret};
use super::node::*;
use super::Valid;
use super::{Address, Endpoint, Path};
use crate::protocol::*;
use crate::vl1::address::Address;
use crate::vl1::debug_event;
use crate::vl1::node::*;
use crate::vl1::Valid;
use crate::vl1::{Endpoint, Identity, Path};
use crate::{VERSION_MAJOR, VERSION_MINOR, VERSION_REVISION};
use super::PartialAddress;
pub(crate) const SERVICE_INTERVAL_MS: i64 = 10000;
pub struct Peer<Application: ApplicationLayer + ?Sized> {
pub struct Peer<Application: ApplicationLayer> {
pub identity: Valid<Identity>,
v1_proto_static_secret: v1::SymmetricSecret,
@ -41,8 +44,8 @@ pub struct Peer<Application: ApplicationLayer + ?Sized> {
remote_node_info: RwLock<RemoteNodeInfo>,
}
struct PeerPath<Application: ApplicationLayer + ?Sized> {
path: Weak<Path<Application::LocalSocket, Application::LocalInterface>>,
struct PeerPath<Application: ApplicationLayer> {
path: Weak<Path<Application>>,
last_receive_time_ticks: i64,
}
@ -53,17 +56,17 @@ struct RemoteNodeInfo {
}
/// Sort a list of paths by quality or priority, with best paths first.
fn prioritize_paths<Application: ApplicationLayer + ?Sized>(paths: &mut Vec<PeerPath<Application>>) {
fn prioritize_paths<Application: ApplicationLayer>(paths: &mut Vec<PeerPath<Application>>) {
paths.sort_unstable_by(|a, b| a.last_receive_time_ticks.cmp(&b.last_receive_time_ticks).reverse());
}
impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
impl<Application: ApplicationLayer> Peer<Application> {
/// Create a new peer.
///
/// This only returns None if this_node_identity does not have its secrets or if some
/// fatal error occurs performing key agreement between the two identities.
pub(crate) fn new(this_node_identity: &Valid<Identity>, id: Valid<Identity>, time_ticks: i64) -> Option<Self> {
this_node_identity.agree(&id).map(|static_secret| -> Self {
pub(crate) fn new(this_node_identity: &IdentitySecret, id: Valid<Identity>, time_ticks: i64) -> Option<Self> {
this_node_identity.x25519.agree(&id).map(|static_secret| -> Self {
Self {
identity: id,
v1_proto_static_secret: v1::SymmetricSecret::new(static_secret),
@ -113,7 +116,7 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
/// Get current best path or None if there are no direct paths to this peer.
#[inline]
pub fn direct_path(&self) -> Option<Arc<Path<Application::LocalSocket, Application::LocalInterface>>> {
pub fn direct_path(&self) -> Option<Arc<Path<Application>>> {
for p in self.paths.lock().unwrap().iter() {
let pp = p.path.upgrade();
if pp.is_some() {
@ -125,7 +128,7 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
/// Get either the current best direct path or an indirect path via e.g. a root.
#[inline]
pub fn path(&self, node: &Node<Application>) -> Option<Arc<Path<Application::LocalSocket, Application::LocalInterface>>> {
pub fn path(&self, node: &Node<Application>) -> Option<Arc<Path<Application>>> {
let direct_path = self.direct_path();
if direct_path.is_some() {
return direct_path;
@ -136,7 +139,7 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
return None;
}
fn learn_path(&self, app: &Application, new_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>, time_ticks: i64) {
fn learn_path(&self, app: &Application, new_path: &Arc<Path<Application>>, time_ticks: i64) {
let mut paths = self.paths.lock().unwrap();
// TODO: check path filter
@ -285,7 +288,7 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
&self,
app: &Application,
node: &Node<Application>,
path: Option<&Arc<Path<Application::LocalSocket, Application::LocalInterface>>>,
path: Option<&Arc<Path<Application>>>,
time_ticks: i64,
builder_function: BuilderFunction,
) -> Option<Result<R, E>> {
@ -324,7 +327,11 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
let mut aes_gmac_siv = self.v1_proto_static_secret.aes_gmac_siv.get();
aes_gmac_siv.encrypt_init(&self.v1_proto_next_message_id().to_be_bytes());
aes_gmac_siv.encrypt_set_aad(&v1::get_packet_aad_bytes(self.identity.address, node.identity.address, flags_cipher_hops));
aes_gmac_siv.encrypt_set_aad(&v1::get_packet_aad_bytes(
&self.identity.address,
&node.identity.public.address,
flags_cipher_hops,
));
let payload = packet.as_bytes_starting_at_mut(v1::HEADER_SIZE).unwrap();
aes_gmac_siv.encrypt_first_pass(payload);
aes_gmac_siv.encrypt_first_pass_finish();
@ -333,8 +340,8 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
let header = packet.struct_mut_at::<v1::PacketHeader>(0).unwrap();
header.id.copy_from_slice(&tag[0..8]);
header.dest = self.identity.address.to_bytes();
header.src = node.identity.address.to_bytes();
header.dest = *self.identity.address.legacy_bytes();
header.src = *node.identity.public.address.legacy_bytes();
header.flags_cipher_hops = flags_cipher_hops;
header.mac.copy_from_slice(&tag[8..16]);
} else {
@ -350,8 +357,8 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
{
let header = packet.struct_mut_at::<v1::PacketHeader>(0).unwrap();
header.id = self.v1_proto_next_message_id().to_be_bytes();
header.dest = self.identity.address.to_bytes();
header.src = node.identity.address.to_bytes();
header.dest = *self.identity.address.legacy_bytes();
header.src = *node.identity.public.address.legacy_bytes();
header.flags_cipher_hops = flags_cipher_hops;
header
},
@ -408,8 +415,8 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
{
let f: &mut (v1::PacketHeader, v1::message_component_structs::HelloFixedHeaderFields) = packet.append_struct_get_mut().unwrap();
f.0.id = message_id.to_ne_bytes();
f.0.dest = self.identity.address.to_bytes();
f.0.src = node.identity.address.to_bytes();
f.0.dest = *self.identity.address.legacy_bytes();
f.0.src = *node.identity.public.address.legacy_bytes();
f.0.flags_cipher_hops = v1::CIPHER_NOCRYPT_POLY1305;
f.1.verb = message_type::VL1_HELLO;
f.1.version_proto = PROTOCOL_VERSION;
@ -420,7 +427,7 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
}
debug_assert_eq!(packet.len(), 41);
assert!(node.identity.write_public(packet.as_mut(), !self.is_v2()).is_ok());
assert!(node.identity.public.write_bytes(packet.as_mut(), !self.is_v2()).is_ok());
let (_, poly1305_key) = v1_proto_salsa_poly_create(
&self.v1_proto_static_secret,
@ -464,13 +471,13 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
/// those fragments after the main packet header and first chunk.
///
/// This returns true if the packet decrypted and passed authentication.
pub(crate) fn v1_proto_receive<Inner: InnerProtocolLayer + ?Sized>(
pub(crate) fn v1_proto_receive<Inner: InnerProtocolLayer>(
self: &Arc<Self>,
node: &Node<Application>,
app: &Application,
inner: &Inner,
time_ticks: i64,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_path: &Arc<Path<Application>>,
packet_header: &v1::PacketHeader,
frag0: &PacketBuffer,
fragments: &[Option<PooledPacketBuffer>],
@ -564,7 +571,7 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
node: &Node<Application>,
time_ticks: i64,
message_id: MessageId,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_path: &Arc<Path<Application>>,
payload: &PacketBuffer,
) -> PacketHandlerResult {
if !(app.should_respond_to(&self.identity) || node.this_node_is_root() || node.is_peer_root(self)) {
@ -611,13 +618,13 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
return PacketHandlerResult::Error;
}
fn handle_incoming_error<Inner: InnerProtocolLayer + ?Sized>(
fn handle_incoming_error<Inner: InnerProtocolLayer>(
self: &Arc<Self>,
app: &Application,
inner: &Inner,
node: &Node<Application>,
_time_ticks: i64,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_path: &Arc<Path<Application>>,
source_hops: u8,
message_id: u64,
payload: &PacketBuffer,
@ -649,13 +656,13 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
return PacketHandlerResult::Error;
}
fn handle_incoming_ok<Inner: InnerProtocolLayer + ?Sized>(
fn handle_incoming_ok<Inner: InnerProtocolLayer>(
self: &Arc<Self>,
app: &Application,
inner: &Inner,
node: &Node<Application>,
time_ticks: i64,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_path: &Arc<Path<Application>>,
source_hops: u8,
message_id: u64,
path_is_known: bool,
@ -716,7 +723,8 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
self.identity.address.to_string(),
received_identity.to_string()
);
node.handle_incoming_identity(app, inner, received_identity, time_ticks, true);
// TODO
//node.handle_incoming_identity(app, inner, received_identity, time_ticks, true);
} else {
debug_event!(
app,
@ -761,16 +769,16 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
) -> PacketHandlerResult {
if node.this_node_is_root() || app.should_respond_to(&self.identity) {
let mut addresses = payload.as_bytes();
while addresses.len() >= ADDRESS_SIZE {
while addresses.len() >= PartialAddress::LEGACY_SIZE_BYTES {
if !self
.send(app, node, None, time_ticks, |packet| {
while addresses.len() >= ADDRESS_SIZE && (packet.len() + Identity::MAX_MARSHAL_SIZE) <= UDP_DEFAULT_MTU {
if let Some(zt_address) = Address::from_bytes(&addresses[..ADDRESS_SIZE]) {
if let Some(peer) = node.peer(zt_address) {
peer.identity.write_public(packet, !self.is_v2())?;
while addresses.len() >= PartialAddress::LEGACY_SIZE_BYTES && (packet.len() + Identity::MAX_MARSHAL_SIZE) <= UDP_DEFAULT_MTU {
if let Ok(zt_address) = Address::from_bytes(&addresses[..PartialAddress::LEGACY_SIZE_BYTES]) {
if let Some(peer) = node.peer(&zt_address) {
peer.identity.write_bytes(packet, !self.is_v2())?;
}
}
addresses = &addresses[ADDRESS_SIZE..];
addresses = &addresses[PartialAddress::LEGACY_SIZE_BYTES..];
}
Ok(())
})
@ -789,7 +797,7 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
node: &Node<Application>,
_time_ticks: i64,
_message_id: MessageId,
_source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
_source_path: &Arc<Path<Application>>,
_payload: &PacketBuffer,
) -> PacketHandlerResult {
if node.is_peer_root(self) {}
@ -827,7 +835,7 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
_app: &Application,
_node: &Node<Application>,
_time_ticks: i64,
_source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
_source_path: &Arc<Path<Application>>,
_payload: &PacketBuffer,
) -> PacketHandlerResult {
PacketHandlerResult::Ok
@ -838,28 +846,28 @@ impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
_app: &Application,
_node: &Node<Application>,
_time_ticks: i64,
_source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
_source_path: &Arc<Path<Application>>,
_payload: &PacketBuffer,
) -> PacketHandlerResult {
PacketHandlerResult::Ok
}
}
impl<Application: ApplicationLayer + ?Sized> Hash for Peer<Application> {
impl<Application: ApplicationLayer> Hash for Peer<Application> {
#[inline(always)]
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
state.write_u64(self.identity.address.into());
self.identity.address.hash(state)
}
}
impl<Application: ApplicationLayer + ?Sized> PartialEq for Peer<Application> {
impl<Application: ApplicationLayer> PartialEq for Peer<Application> {
#[inline(always)]
fn eq(&self, other: &Self) -> bool {
self.identity.fingerprint.eq(&other.identity.fingerprint)
self.identity.eq(&other.identity)
}
}
impl<Application: ApplicationLayer + ?Sized> Eq for Peer<Application> {}
impl<Application: ApplicationLayer> Eq for Peer<Application> {}
fn v1_proto_try_aead_decrypt(
secret: &v1::SymmetricSecret,

View file

@ -0,0 +1,91 @@
use std::collections::BTreeMap;
use std::ops::Bound;
use std::sync::{Arc, RwLock};
use super::address::{Address, PartialAddress};
use super::api::ApplicationLayer;
use super::identity::{Identity, IdentitySecret};
use super::peer::Peer;
use zerotier_crypto::typestate::Valid;
pub struct PeerMap<Application: ApplicationLayer> {
maps: [RwLock<BTreeMap<Address, Arc<Peer<Application>>>>; 256],
}
impl<Application: ApplicationLayer> PeerMap<Application> {
pub fn new() -> Self {
Self { maps: std::array::from_fn(|_| RwLock::new(BTreeMap::new())) }
}
pub fn each<F: FnMut(&Arc<Peer<Application>>)>(&self, mut f: F) {
for m in self.maps.iter() {
let mm = m.read().unwrap();
for (_, p) in mm.iter() {
f(p);
}
}
}
pub fn remove(&self, address: &Address) -> Option<Arc<Peer<Application>>> {
self.maps[address.0[0] as usize].write().unwrap().remove(address)
}
/// Get an exact match for a full specificity address.
/// This always returns None if the address provided does not have 384 bits of specificity.
pub fn get_exact(&self, address: &Address) -> Option<Arc<Peer<Application>>> {
self.maps[address.0[0] as usize].read().unwrap().get(address).cloned()
}
/// Get a matching peer for a partial address of any specificity, but return None if the match is ambiguous.
pub fn get_unambiguous(&self, address: &PartialAddress) -> Option<Arc<Peer<Application>>> {
let mm = self.maps[address.address.0[0] as usize].read().unwrap();
let matches = mm.range::<[u8; Address::SIZE_BYTES], (Bound<&[u8; Address::SIZE_BYTES]>, Bound<&[u8; Address::SIZE_BYTES]>)>((
Bound::Included(&address.address.0),
Bound::Unbounded,
));
let mut r = None;
for m in matches {
if address.matches(m.0) {
if r.is_none() {
let _ = r.insert(m.1);
} else {
return None;
}
} else {
break;
}
}
return r.cloned();
}
/// Insert the supplied peer if it is in fact new, otherwise return the existing peer with the same address.
pub fn add(&self, peer: Arc<Peer<Application>>) -> (Arc<Peer<Application>>, bool) {
let mut mm = self.maps[peer.identity.address.0[0] as usize].write().unwrap();
let p = mm.entry(peer.identity.address.clone()).or_insert(peer.clone());
if Arc::ptr_eq(p, &peer) {
(peer, true)
} else {
(p.clone(), false)
}
}
/// Get a peer or create one if not found.
/// This should be used when the peer will almost always be new, such as on OK(WHOIS).
pub fn get_or_add(
&self,
this_node_identity: &IdentitySecret,
peer_identity: &Valid<Identity>,
time_ticks: i64,
) -> Option<Arc<Peer<Application>>> {
let peer = Arc::new(Peer::new(this_node_identity, peer_identity.clone(), time_ticks)?);
Some(
self.maps[peer_identity.address.0[0] as usize]
.write()
.unwrap()
.entry(peer_identity.address.clone())
.or_insert(peer)
.clone(),
)
}
}

View file

@ -3,12 +3,12 @@
use std::collections::BTreeSet;
use std::io::Write;
use crate::vl1::identity::{Identity, IDENTITY_MAX_SIGNATURE_SIZE};
use crate::vl1::Endpoint;
use super::endpoint::Endpoint;
use super::identity::{Identity, IdentitySecret};
use zerotier_crypto::typestate::Verified;
use zerotier_utils::arrayvec::ArrayVec;
use zerotier_utils::buffer::Buffer;
use zerotier_utils::buffer::{Buffer, OutOfBoundsError};
use zerotier_utils::marshalable::{Marshalable, UnmarshalError};
use serde::{Deserialize, Serialize};
@ -32,7 +32,7 @@ pub struct Root {
/// This is populated by the sign() method when the completed root set is signed by each member.
/// All member roots must sign.
#[serde(default)]
pub signature: ArrayVec<u8, IDENTITY_MAX_SIGNATURE_SIZE>,
pub signature: ArrayVec<u8, { Identity::MAX_SIGNATURE_SIZE }>,
/// Priority (higher number is lower priority, 0 is default).
///
@ -132,7 +132,7 @@ impl RootSet {
) {
self.members.retain(|m| m.identity.address != member_identity.address);
let _ = self.members.push(Root {
identity: member_identity.clone_without_secret(),
identity: member_identity.clone(),
endpoints: endpoints.map(|endpoints| {
let mut tmp = BTreeSet::new();
for a in endpoints {
@ -154,22 +154,22 @@ impl RootSet {
///
/// All current members must sign whether they are disabled (witnessing) or active. The verify()
/// method will return true when signing is complete.
pub fn sign(&mut self, member_identity: &Identity) -> bool {
let signature = member_identity.sign(self.marshal_for_signing().as_bytes(), false);
pub fn sign(&mut self, member_identity_secret: &IdentitySecret) -> bool {
let signature = member_identity_secret.sign(self.marshal_for_signing().as_bytes());
let unsigned_entry = self.members.iter().find_map(|m| {
if m.identity.eq(member_identity) {
if m.identity.eq(&member_identity_secret.public) {
Some(m.clone())
} else {
None
}
});
if unsigned_entry.is_some() && signature.is_some() {
if unsigned_entry.is_some() {
let unsigned_entry = unsigned_entry.unwrap();
self.members.retain(|m| !m.identity.eq(member_identity));
self.members.retain(|m| !m.identity.eq(&member_identity_secret.public));
let _ = self.members.push(Root {
identity: unsigned_entry.identity,
endpoints: unsigned_entry.endpoints,
signature: signature.unwrap(),
signature: signature,
priority: unsigned_entry.priority,
protocol_version: unsigned_entry.protocol_version,
});
@ -197,9 +197,9 @@ impl RootSet {
/// new root cluster definition and 'previous' being the current/old one.
pub fn should_replace(&self, previous: &Self) -> bool {
if self.name.eq(&previous.name) && self.revision > previous.revision {
let mut my_signers = BTreeSet::new();
let mut my_signers = Vec::with_capacity(self.members.len());
for m in self.members.iter() {
my_signers.insert(m.identity.fingerprint.clone());
my_signers.push(&m.identity);
}
let mut previous_count: isize = 0;
@ -207,7 +207,7 @@ impl RootSet {
for m in previous.members.iter() {
if m.endpoints.is_some() {
previous_count += 1;
witness_count += my_signers.contains(&m.identity.fingerprint) as isize;
witness_count += my_signers.iter().any(|id| (*id).eq(&m.identity)) as isize;
}
}
@ -217,7 +217,7 @@ impl RootSet {
}
}
fn marshal_internal<const BL: usize>(&self, buf: &mut Buffer<BL>, include_signatures: bool) -> Result<(), UnmarshalError> {
fn marshal_internal<const BL: usize>(&self, buf: &mut Buffer<BL>, include_signatures: bool) -> Result<(), OutOfBoundsError> {
buf.append_u8(0)?; // version byte for future use
buf.append_varint(self.name.as_bytes().len() as u64)?;
@ -265,7 +265,7 @@ impl Marshalable for RootSet {
const MAX_MARSHAL_SIZE: usize = crate::protocol::v1::SIZE_MAX;
#[inline(always)]
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), UnmarshalError> {
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), OutOfBoundsError> {
self.marshal_internal(buf, true)
}

View file

@ -0,0 +1,116 @@
use std::collections::BTreeMap;
use std::convert::Infallible;
use std::ops::Bound;
use std::sync::{Mutex, Weak};
use super::address::PartialAddress;
use super::api::{ApplicationLayer, InnerProtocolLayer};
use super::identity::Identity;
use super::node::Node;
use super::path::Path;
use crate::debug_event;
use crate::protocol;
use zerotier_crypto::typestate::Valid;
use zerotier_utils::ringbuffer::RingBuffer;
pub(super) struct Whois<Application: ApplicationLayer> {
whois_queue: Mutex<BTreeMap<PartialAddress, WhoisQueueItem<Application>>>,
}
struct WhoisQueueItem<Application: ApplicationLayer> {
pending_v1_packets: RingBuffer<(Weak<Path<Application>>, protocol::PooledPacketBuffer), { protocol::WHOIS_MAX_WAITING_PACKETS }>,
last_retry_time: i64,
retry_count: u16,
}
impl<Application: ApplicationLayer> Whois<Application> {
pub fn new() -> Self {
Self { whois_queue: Mutex::new(BTreeMap::new()) }
}
pub fn query(
&self,
app: &Application,
address: &PartialAddress,
waiting_packet: Option<(Weak<Path<Application>>, protocol::PooledPacketBuffer)>,
time_ticks: i64,
) {
}
pub fn handle_incoming_identity<Inner: InnerProtocolLayer>(
&self,
app: &Application,
node: &Node<Application>,
inner: &Inner,
time_ticks: i64,
identity: Valid<Identity>,
) {
let mut queued_items = Vec::with_capacity(2);
{
// Iterate "up" the sorted list of pending requests since less specific addresses will be sorted
// before more specific addresses. We keep going up until we find a non-matching address, matching
// all partials that this full identity matches.
let mut q = self.whois_queue.lock().unwrap();
let mut to_delete = Vec::with_capacity(2);
for qi in q.range((Bound::Unbounded, Bound::Included(identity.address.to_partial()))).rev() {
if qi.0.matches(&identity.address) {
to_delete.push(qi.0.clone());
// TODO
} else {
break;
}
}
for a in to_delete {
queued_items.push(q.remove(&a).unwrap());
}
}
if let Some(peer) = node.peers.get_or_add(&node.identity, &identity, time_ticks) {
for qi in queued_items.iter() {
for pkt in qi.pending_v1_packets.iter() {
if let Some(source_path) = pkt.0.upgrade() {
if let Ok(packet_header) = pkt.1.struct_at::<protocol::v1::PacketHeader>(0) {
peer.v1_proto_receive(node, app, inner, time_ticks, &source_path, packet_header, &pkt.1, &[]);
}
}
}
}
}
}
pub fn retry_queued(&self) {}
fn send_whois(&self, app: &Application, node: &Node<Application>, mut addresses: &[PartialAddress], time_ticks: i64) {
debug_assert!(!addresses.is_empty());
debug_event!(app, "[vl1] [v1] sending WHOIS for {}", {
let mut tmp = String::new();
for a in addresses.iter() {
if !tmp.is_empty() {
tmp.push(',');
}
tmp.push_str(a.to_string().as_str());
}
tmp
});
if let Some(root) = node.best_root() {
while !addresses.is_empty() {
if !root
.send(app, node, None, time_ticks, |packet| -> Result<(), Infallible> {
assert!(packet.append_u8(protocol::message_type::VL1_WHOIS).is_ok());
while !addresses.is_empty() && (packet.len() + addresses[0].as_bytes().len()) <= protocol::UDP_DEFAULT_MTU {
debug_assert_eq!(addresses[0].as_bytes().len(), PartialAddress::LEGACY_SIZE_BYTES); // will need protocol work to support different partial sizes
assert!(packet.append_bytes(addresses[0].as_bytes()).is_ok());
addresses = &addresses[1..];
}
Ok(())
})
.is_some()
{
break;
}
}
}
}
}

View file

@ -3,6 +3,7 @@ use std::sync::{Arc, Mutex, RwLock};
use crate::protocol;
use crate::protocol::PacketBuffer;
use crate::vl1::identity::Identity;
use crate::vl1::*;
use crate::vl2::{MulticastGroup, NetworkId};
@ -41,7 +42,7 @@ impl MulticastAuthority {
}
/// Call for VL2_MULTICAST_LIKE packets.
pub fn handle_vl2_multicast_like<Application: ApplicationLayer + ?Sized, Authenticator: Fn(NetworkId, &Identity) -> bool>(
pub fn handle_vl2_multicast_like<Application: ApplicationLayer, Authenticator: Fn(&NetworkId, &Identity) -> bool>(
&self,
auth: Authenticator,
time_ticks: i64,
@ -52,14 +53,14 @@ impl MulticastAuthority {
let mut subscriptions = RMaybeWLockGuard::new_read(&self.subscriptions);
while (cursor + 8 + 6 + 4) <= payload.len() {
let network_id = NetworkId::from_bytes_fixed(payload.read_bytes_fixed(&mut cursor).unwrap());
if let Some(network_id) = network_id {
let network_id = NetworkId::from_bytes(payload.read_bytes_fixed::<8>(&mut cursor).unwrap());
if let Ok(network_id) = network_id {
let mac = MAC::from_bytes_fixed(payload.read_bytes_fixed(&mut cursor).unwrap());
if let Some(mac) = mac {
if auth(network_id, &source.identity) {
let sub_key = (network_id, MulticastGroup { mac, adi: payload.read_u32(&mut cursor).unwrap() });
if auth(&network_id, &source.identity) {
let sub_key = (network_id.clone(), MulticastGroup { mac, adi: payload.read_u32(&mut cursor).unwrap() });
if let Some(sub) = subscriptions.read().get(&sub_key) {
let _ = sub.lock().unwrap().insert(source.identity.address, time_ticks);
let _ = sub.lock().unwrap().insert(source.identity.address.clone(), time_ticks);
} else {
let _ = subscriptions
.write(&self.subscriptions)
@ -67,7 +68,7 @@ impl MulticastAuthority {
.or_insert_with(|| Mutex::new(HashMap::new()))
.lock()
.unwrap()
.insert(source.identity.address, time_ticks);
.insert(source.identity.address.clone(), time_ticks);
}
}
}
@ -78,7 +79,7 @@ impl MulticastAuthority {
}
/// Call for VL2_MULTICAST_GATHER packets.
pub fn handle_vl2_multicast_gather<Application: ApplicationLayer + ?Sized, Authenticator: Fn(NetworkId, &Identity) -> bool>(
pub fn handle_vl2_multicast_gather<Application: ApplicationLayer, Authenticator: Fn(&NetworkId, &Identity) -> bool>(
&self,
auth: Authenticator,
time_ticks: i64,
@ -90,20 +91,20 @@ impl MulticastAuthority {
mut cursor: usize,
) -> PacketHandlerResult {
if let Some(network_id) = payload
.read_bytes_fixed(&mut cursor)
.map_or(None, |network_id| NetworkId::from_bytes_fixed(network_id))
.read_bytes_fixed::<8>(&mut cursor)
.map_or(None, |network_id| NetworkId::from_bytes(network_id).ok())
{
if auth(network_id, &source.identity) {
if auth(&network_id, &source.identity) {
cursor += 1; // skip flags, currently unused
if let Some(mac) = payload.read_bytes_fixed(&mut cursor).map_or(None, |mac| MAC::from_bytes_fixed(mac)) {
let mut gathered = Vec::new();
let adi = payload.read_u32(&mut cursor).unwrap_or(0);
let subscriptions = self.subscriptions.read().unwrap();
if let Some(sub) = subscriptions.get(&(network_id, MulticastGroup { mac, adi })) {
if let Some(sub) = subscriptions.get(&(network_id.clone(), MulticastGroup { mac, adi })) {
let sub = sub.lock().unwrap();
for a in sub.keys() {
gathered.push(*a);
gathered.push(a.clone());
}
}
@ -114,19 +115,19 @@ impl MulticastAuthority {
ok_header.in_re_verb = protocol::message_type::VL2_MULTICAST_GATHER;
ok_header.in_re_message_id = message_id.to_be_bytes();
packet.append_bytes_fixed(&network_id.to_bytes())?;
packet.append_bytes_fixed(&network_id.to_legacy_u64().to_be_bytes())?;
packet.append_bytes_fixed(&mac.to_bytes())?;
packet.append_u32(adi)?;
packet.append_u32(gathered.len() as u32)?;
let in_this_packet = gathered
.len()
.clamp(1, (packet.capacity() - packet.len()) / protocol::ADDRESS_SIZE)
.clamp(1, (packet.capacity() - packet.len()) / PartialAddress::LEGACY_SIZE_BYTES)
.min(u16::MAX as usize);
packet.append_u16(in_this_packet as u16)?;
for _ in 0..in_this_packet {
packet.append_bytes_fixed(&gathered.pop().unwrap().to_bytes())?;
packet.append_bytes_fixed(gathered.pop().unwrap().legacy_bytes())?;
}
Ok(())

View file

@ -1,122 +1,124 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::num::NonZeroU64;
use std::str::FromStr;
use crate::vl1::{Address, PartialAddress};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use zerotier_utils::error::InvalidFormatError;
use zerotier_utils::error::InvalidParameterError;
use zerotier_utils::hex;
use zerotier_utils::hex::HEX_CHARS;
use crate::protocol::ADDRESS_MASK;
use crate::vl1::Address;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct NetworkId(NonZeroU64);
#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum NetworkId {
// Legacy network ID consisting of 40-bit partial address and 24-bit network number.
Legacy(u64),
// Full length network ID consisting of 384-bit address and 24-bit network number.
Full(Address, u32),
}
impl NetworkId {
#[inline]
pub fn from_u64(i: u64) -> Option<NetworkId> {
// Note that we check both that 'i' is non-zero and that the address of the controller is valid.
if let Some(ii) = NonZeroU64::new(i) {
if Address::from_u64(i & ADDRESS_MASK).is_some() {
return Some(Self(ii));
/// Maximum network number on a controller (24 bits)
pub const MAX_NETWORK_NO: u32 = 0xffffff;
pub fn to_bytes(&self) -> Vec<u8> {
match self {
Self::Legacy(nwid) => nwid.to_be_bytes().to_vec(),
Self::Full(controller, nw) => {
let mut tmp = [0u8; Address::SIZE_BYTES + 4];
tmp[..Address::SIZE_BYTES].copy_from_slice(controller.as_bytes());
tmp[Address::SIZE_BYTES..].copy_from_slice(&nw.to_be_bytes());
tmp.to_vec()
}
}
return None;
}
#[inline]
pub fn from_controller_and_network_no(controller: Address, network_no: u64) -> Option<NetworkId> {
Self::from_u64(u64::from(controller).wrapping_shl(24) | (network_no & 0xffffff))
}
#[inline]
pub fn from_bytes(b: &[u8]) -> Option<NetworkId> {
if b.len() >= 8 {
Self::from_bytes_fixed(b[0..8].try_into().unwrap())
pub fn from_bytes(b: &[u8]) -> Result<Self, InvalidParameterError> {
if b.len() == 8 {
Self::from_legacy_u64(u64::from_be_bytes(b.try_into().unwrap()))
} else if b.len() == Address::SIZE_BYTES + 4 {
Ok(Self::Full(
Address::from_bytes(&b[..Address::SIZE_BYTES])?,
u32::from_be_bytes(b[Address::SIZE_BYTES..].try_into().unwrap()),
))
} else {
None
Err(InvalidParameterError("invalid network ID"))
}
}
#[inline]
pub fn from_bytes_fixed(b: &[u8; 8]) -> Option<NetworkId> {
Self::from_u64(u64::from_be_bytes(*b))
}
#[inline]
pub fn to_bytes(&self) -> [u8; 8] {
self.0.get().to_be_bytes()
}
/// Get the network controller ID for this network, which is the most significant 40 bits.
#[inline]
pub fn network_controller(&self) -> Address {
Address::from_u64(self.0.get()).unwrap()
}
/// Consume this network ID and return one with the same network number but a different controller ID.
pub fn change_network_controller(self, new_controller: Address) -> NetworkId {
Self(NonZeroU64::new((self.network_no() as u64) | u64::from(new_controller).wrapping_shl(24)).unwrap())
}
/// Get the 24-bit local network identifier minus the 40-bit controller address portion.
#[inline]
/// Get the 24-bit network number on the network's controller.
pub fn network_no(&self) -> u32 {
(self.0.get() & 0xffffff) as u32
match self {
Self::Legacy(nwid) => (*nwid & 0xffffff) as u32,
Self::Full(_, nwid) => *nwid & 0xffffff,
}
}
}
impl From<NetworkId> for u64 {
#[inline(always)]
fn from(v: NetworkId) -> Self {
v.0.get()
pub fn from_legacy_u64(nwid: u64) -> Result<Self, InvalidParameterError> {
let _ = PartialAddress::from_legacy_address_u64(nwid)?; // check validity of address portion
Ok(Self::Legacy(nwid))
}
}
impl From<&NetworkId> for u64 {
#[inline(always)]
fn from(v: &NetworkId) -> Self {
v.0.get()
/// Get the legacy 40-bit partial controller address from this network ID.
pub(crate) fn legacy_controller_address(&self) -> PartialAddress {
match self {
Self::Legacy(nwid) => PartialAddress::from_legacy_address_u64(nwid.wrapping_shr(24)).unwrap(),
Self::Full(controller, _) => PartialAddress::from_bytes(&controller.as_bytes()[..PartialAddress::LEGACY_SIZE_BYTES]).unwrap(),
}
}
/// Convert this into a legacy network ID in u64 form, or return itself if already a legacy ID.
pub fn to_legacy_u64(&self) -> u64 {
match self {
Self::Legacy(nwid) => *nwid,
Self::Full(controller, nw) => controller.legacy_u64().wrapping_shl(24) | ((*nw & Self::MAX_NETWORK_NO) as u64),
}
}
}
impl ToString for NetworkId {
fn to_string(&self) -> String {
let mut v = self.0.get();
let mut s = String::with_capacity(16);
for _ in 0..16 {
s.push(HEX_CHARS[(v >> 60) as usize] as char);
v <<= 4;
match self {
Self::Legacy(nwid) => hex::to_string_u64(*nwid, false),
Self::Full(controller, nw) => format!("{:06x}@{}", *nw & Self::MAX_NETWORK_NO, controller.to_string()),
}
s
}
}
impl Debug for NetworkId {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.to_string().as_str())
}
}
impl FromStr for NetworkId {
type Err = InvalidFormatError;
type Err = InvalidParameterError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
NetworkId::from_bytes(hex::from_string(s).as_slice()).map_or_else(|| Err(InvalidFormatError), |a| Ok(a))
if s.len() == 16 {
Self::from_legacy_u64(hex::from_string_u64(s))
} else {
let mut fno = 0;
let mut net_no = 0;
let mut controller = None;
for ss in s.split('@') {
if fno == 0 {
net_no = u32::from_str_radix(ss, 16).map_err(|_| InvalidParameterError("invalid network ID"))?;
} else if fno == 1 {
controller = Some(Address::from_str(ss)?);
} else {
return Err(InvalidParameterError("invalid network ID"));
}
fno += 1;
}
if let Some(controller) = controller {
return Ok(Self::Full(controller, net_no as u32));
} else {
return Err(InvalidParameterError("invalid network ID"));
}
}
}
}
impl Hash for NetworkId {
impl Debug for NetworkId {
#[inline(always)]
fn hash<H: Hasher>(&self, state: &mut H) {
state.write_u64(self.0.get());
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.to_string().as_str())
}
}
@ -128,7 +130,7 @@ impl Serialize for NetworkId {
if serializer.is_human_readable() {
serializer.serialize_str(self.to_string().as_str())
} else {
serializer.serialize_bytes(&self.to_bytes())
serializer.serialize_bytes(self.to_bytes().as_slice())
}
}
}
@ -139,25 +141,21 @@ impl<'de> serde::de::Visitor<'de> for NetworkIdVisitor {
type Value = NetworkId;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a ZeroTier network ID")
formatter.write_str("network ID")
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if v.len() == 8 {
NetworkId::from_bytes(v).map_or_else(|| Err(E::custom("object too large")), |a| Ok(a))
} else {
Err(E::custom("object too large"))
}
NetworkId::from_bytes(v).map_err(|_| E::custom("invalid network ID"))
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
NetworkId::from_str(v).map_err(|e| E::custom(e.to_string()))
NetworkId::from_str(v).map_err(|_| E::custom("invalid network ID"))
}
}

View file

@ -6,11 +6,10 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
use phf::phf_map;
use zerotier_utils::buffer::Buffer;
use zerotier_utils::buffer::{Buffer, OutOfBoundsError};
use zerotier_utils::marshalable::{Marshalable, UnmarshalError};
use crate::protocol;
use crate::vl1::{Address, InetAddress, MAC};
use crate::vl1::{InetAddress, PartialAddress, MAC};
#[allow(unused)]
pub const RULES_ENGINE_REVISION: u8 = 1;
@ -174,16 +173,16 @@ impl Default for RuleValue {
pub trait RuleVisitor {
fn action_drop(&mut self) -> bool;
fn action_accept(&mut self) -> bool;
fn action_tee(&mut self, address: Address, flags: u32, length: u16) -> bool;
fn action_watch(&mut self, address: Address, flags: u32, length: u16) -> bool;
fn action_redirect(&mut self, address: Address, flags: u32, length: u16) -> bool;
fn action_tee(&mut self, address: PartialAddress, flags: u32, length: u16) -> bool;
fn action_watch(&mut self, address: PartialAddress, flags: u32, length: u16) -> bool;
fn action_redirect(&mut self, address: PartialAddress, flags: u32, length: u16) -> bool;
fn action_break(&mut self) -> bool;
fn action_priority(&mut self, qos_bucket: u8) -> bool;
fn invalid_rule(&mut self) -> bool;
fn match_source_zerotier_address(&mut self, not: bool, or: bool, address: Address);
fn match_dest_zerotier_address(&mut self, not: bool, or: bool, address: Address);
fn match_source_zerotier_address(&mut self, not: bool, or: bool, address: PartialAddress);
fn match_dest_zerotier_address(&mut self, not: bool, or: bool, address: PartialAddress);
fn match_vlan_id(&mut self, not: bool, or: bool, id: u16);
fn match_vlan_pcp(&mut self, not: bool, or: bool, pcp: u8);
fn match_vlan_dei(&mut self, not: bool, or: bool, dei: u8);
@ -235,29 +234,29 @@ impl Rule {
Self { t: action::DROP, v: RuleValue::default() }
}
pub fn action_tee(address: Address, flags: u32, length: u16) -> Self {
pub fn action_tee(address: PartialAddress, flags: u32, length: u16) -> Self {
Self {
t: action::TEE,
v: RuleValue {
forward: rule_value::Forward { address: address.into(), flags, length },
forward: rule_value::Forward { address: address.legacy_u64(), flags, length },
},
}
}
pub fn action_watch(address: Address, flags: u32, length: u16) -> Self {
pub fn action_watch(address: PartialAddress, flags: u32, length: u16) -> Self {
Self {
t: action::TEE,
v: RuleValue {
forward: rule_value::Forward { address: address.into(), flags, length },
forward: rule_value::Forward { address: address.legacy_u64(), flags, length },
},
}
}
pub fn action_redirect(address: Address, flags: u32, length: u16) -> Self {
pub fn action_redirect(address: PartialAddress, flags: u32, length: u16) -> Self {
Self {
t: action::TEE,
v: RuleValue {
forward: rule_value::Forward { address: address.into(), flags, length },
forward: rule_value::Forward { address: address.legacy_u64(), flags, length },
},
}
}
@ -270,17 +269,17 @@ impl Rule {
Self { t: action::PRIORITY, v: RuleValue { qos_bucket } }
}
pub fn match_source_zerotier_address(not: bool, or: bool, address: Address) -> Self {
pub fn match_source_zerotier_address(not: bool, or: bool, address: PartialAddress) -> Self {
Self {
t: t(not, or, match_cond::SOURCE_ZEROTIER_ADDRESS),
v: RuleValue { zt: address.into() },
v: RuleValue { zt: address.legacy_u64() },
}
}
pub fn match_dest_zerotier_address(not: bool, or: bool, address: Address) -> Self {
pub fn match_dest_zerotier_address(not: bool, or: bool, address: PartialAddress) -> Self {
Self {
t: t(not, or, match_cond::DEST_ZEROTIER_ADDRESS),
v: RuleValue { zt: address.into() },
v: RuleValue { zt: address.legacy_u64() },
}
}
@ -306,21 +305,21 @@ impl Rule {
return v.action_accept();
}
action::TEE => {
if let Some(a) = Address::from_u64(self.v.forward.address) {
if let Ok(a) = PartialAddress::from_legacy_address_u64(self.v.forward.address) {
return v.action_tee(a, self.v.forward.flags, self.v.forward.length);
} else {
return v.invalid_rule();
}
}
action::WATCH => {
if let Some(a) = Address::from_u64(self.v.forward.address) {
if let Ok(a) = PartialAddress::from_legacy_address_u64(self.v.forward.address) {
return v.action_watch(a, self.v.forward.flags, self.v.forward.length);
} else {
return v.invalid_rule();
}
}
action::REDIRECT => {
if let Some(a) = Address::from_u64(self.v.forward.address) {
if let Ok(a) = PartialAddress::from_legacy_address_u64(self.v.forward.address) {
return v.action_redirect(a, self.v.forward.flags, self.v.forward.length);
} else {
return v.invalid_rule();
@ -333,14 +332,14 @@ impl Rule {
return v.action_priority(self.v.qos_bucket);
}
match_cond::SOURCE_ZEROTIER_ADDRESS => {
if let Some(a) = Address::from_u64(self.v.zt) {
if let Ok(a) = PartialAddress::from_legacy_address_u64(self.v.zt) {
v.match_source_zerotier_address(not, or, a);
} else {
return v.invalid_rule();
}
}
match_cond::DEST_ZEROTIER_ADDRESS => {
if let Some(a) = Address::from_u64(self.v.zt) {
if let Ok(a) = PartialAddress::from_legacy_address_u64(self.v.zt) {
v.match_dest_zerotier_address(not, or, a);
} else {
return v.invalid_rule();
@ -447,7 +446,7 @@ impl Rule {
impl Marshalable for Rule {
const MAX_MARSHAL_SIZE: usize = 21;
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), UnmarshalError> {
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), OutOfBoundsError> {
buf.append_u8(self.t)?;
unsafe {
match self.t & 0x3f {
@ -462,7 +461,7 @@ impl Marshalable for Rule {
}
match_cond::SOURCE_ZEROTIER_ADDRESS | match_cond::DEST_ZEROTIER_ADDRESS => {
buf.append_u8(5)?;
buf.append_bytes(&self.v.zt.to_be_bytes()[..protocol::ADDRESS_SIZE])?;
buf.append_bytes(&self.v.zt.to_be_bytes()[..PartialAddress::LEGACY_SIZE_BYTES])?;
}
match_cond::VLAN_ID => {
buf.append_u8(2)?;
@ -562,7 +561,7 @@ impl Marshalable for Rule {
r.v.qos_bucket = buf.read_u8(cursor)?;
}
match_cond::SOURCE_ZEROTIER_ADDRESS | match_cond::DEST_ZEROTIER_ADDRESS => {
let a = buf.read_bytes_fixed::<{ protocol::ADDRESS_SIZE }>(cursor)?;
let a = buf.read_bytes_fixed::<{ PartialAddress::LEGACY_SIZE_BYTES }>(cursor)?;
r.v.zt = a[0].wrapping_shl(32) as u64
| a[1].wrapping_shl(24) as u64
| a[2].wrapping_shl(16) as u64
@ -775,13 +774,13 @@ static HR_NAME_TO_RULE_TYPE: phf::Map<&'static str, u8> = phf_map! {
#[derive(Default, Serialize, Deserialize)]
struct HumanReadableRule<'a> {
#[serde(skip_serializing_if = "Option::is_none")]
pub address: Option<Address>,
pub address: Option<PartialAddress>,
#[serde(skip_serializing_if = "Option::is_none")]
pub flags: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub length: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
pub zt: Option<Address>,
pub zt: Option<PartialAddress>,
#[serde(skip_serializing_if = "Option::is_none")]
pub vlanId: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
@ -837,7 +836,7 @@ impl<'a> HumanReadableRule<'a> {
unsafe {
match *t {
action::TEE | action::WATCH | action::REDIRECT => {
r.v.forward.address = self.address?.into();
r.v.forward.address = self.address.as_ref()?.legacy_u64();
r.v.forward.flags = self.flags?;
r.v.forward.length = self.length?;
}
@ -845,7 +844,7 @@ impl<'a> HumanReadableRule<'a> {
r.v.qos_bucket = self.qosBucket?;
}
match_cond::SOURCE_ZEROTIER_ADDRESS | match_cond::DEST_ZEROTIER_ADDRESS => {
r.v.zt = self.address?.into();
r.v.zt = self.address.as_ref()?.legacy_u64();
}
match_cond::VLAN_ID => {
r.v.vlan_id = self.vlanId?;
@ -982,7 +981,7 @@ impl<'a> RuleVisitor for MakeHumanReadable<'a> {
}
#[inline(always)]
fn action_tee(&mut self, address: Address, flags: u32, length: u16) -> bool {
fn action_tee(&mut self, address: PartialAddress, flags: u32, length: u16) -> bool {
self.0._type = "ACTION_TEE";
let _ = self.0.address.insert(address);
let _ = self.0.flags.insert(flags);
@ -991,7 +990,7 @@ impl<'a> RuleVisitor for MakeHumanReadable<'a> {
}
#[inline(always)]
fn action_watch(&mut self, address: Address, flags: u32, length: u16) -> bool {
fn action_watch(&mut self, address: PartialAddress, flags: u32, length: u16) -> bool {
self.0._type = "ACTION_WATCH";
let _ = self.0.address.insert(address);
let _ = self.0.flags.insert(flags);
@ -1000,7 +999,7 @@ impl<'a> RuleVisitor for MakeHumanReadable<'a> {
}
#[inline(always)]
fn action_redirect(&mut self, address: Address, flags: u32, length: u16) -> bool {
fn action_redirect(&mut self, address: PartialAddress, flags: u32, length: u16) -> bool {
self.0._type = "ACTION_REDIRECT";
let _ = self.0.address.insert(address);
let _ = self.0.flags.insert(flags);
@ -1027,13 +1026,13 @@ impl<'a> RuleVisitor for MakeHumanReadable<'a> {
}
#[inline(always)]
fn match_source_zerotier_address(&mut self, not: bool, or: bool, address: Address) {
fn match_source_zerotier_address(&mut self, not: bool, or: bool, address: PartialAddress) {
let _ = self.0.zt.insert(address);
self.do_cond("MATCH_SOURCE_ZEROTIER_ADDRESS", not, or);
}
#[inline(always)]
fn match_dest_zerotier_address(&mut self, not: bool, or: bool, address: Address) {
fn match_dest_zerotier_address(&mut self, not: bool, or: bool, address: PartialAddress) {
let _ = self.0.zt.insert(address);
self.do_cond("MATCH_DEST_ZEROTIER_ADDRESS", not, or);
}
@ -1217,19 +1216,19 @@ impl RuleVisitor for RuleStringer {
}
#[inline(always)]
fn action_tee(&mut self, address: Address, flags: u32, length: u16) -> bool {
fn action_tee(&mut self, address: PartialAddress, flags: u32, length: u16) -> bool {
self.0 = format!("ACTION_TEE({}, {}, {})", address.to_string(), flags, length);
true
}
#[inline(always)]
fn action_watch(&mut self, address: Address, flags: u32, length: u16) -> bool {
fn action_watch(&mut self, address: PartialAddress, flags: u32, length: u16) -> bool {
self.0 = format!("ACTION_WATCH({}, {}, {})", address.to_string(), flags, length);
true
}
#[inline(always)]
fn action_redirect(&mut self, address: Address, flags: u32, length: u16) -> bool {
fn action_redirect(&mut self, address: PartialAddress, flags: u32, length: u16) -> bool {
self.0 = format!("ACTION_REDIRECT({}, {}, {})", address.to_string(), flags, length);
true
}
@ -1253,7 +1252,7 @@ impl RuleVisitor for RuleStringer {
}
#[inline(always)]
fn match_source_zerotier_address(&mut self, not: bool, or: bool, address: Address) {
fn match_source_zerotier_address(&mut self, not: bool, or: bool, address: PartialAddress) {
self.0 = format!(
"MATCH_SOURCE_ZEROTIER_ADDRESS({}{}{})",
if or {
@ -1271,7 +1270,7 @@ impl RuleVisitor for RuleStringer {
}
#[inline(always)]
fn match_dest_zerotier_address(&mut self, not: bool, or: bool, address: Address) {
fn match_dest_zerotier_address(&mut self, not: bool, or: bool, address: PartialAddress) {
self.0 = format!(
"MATCH_DEST_ZEROTIER_ADDRESS({}{}{})",
if or {

View file

@ -4,7 +4,7 @@ use fastcdc::v2020;
use zerotier_crypto::hash::{SHA384, SHA384_HASH_SIZE};
use zerotier_utils::error::{InvalidFormatError, InvalidParameterError};
use zerotier_utils::memory::byte_array_chunks_exact;
use zerotier_utils::memory::array_chunks_exact;
const MAX_RECURSION_DEPTH: u8 = 64; // sanity limit, object would have to be quite huge to hit this
@ -30,7 +30,7 @@ impl ScatteredObject {
if (hl.len() % SHA384_HASH_SIZE) != 0 || hl.is_empty() {
return Err(InvalidFormatError);
}
for h in byte_array_chunks_exact::<SHA384_HASH_SIZE>(hl) {
for h in array_chunks_exact::<u8, SHA384_HASH_SIZE>(hl) {
if (h[SHA384_HASH_SIZE - 1] & 0x01) != 0 {
if let Some(chunk) = get_chunk(h) {
if depth < MAX_RECURSION_DEPTH {
@ -72,7 +72,7 @@ impl ScatteredObject {
let mut chunk_no = 0;
let mut missing_chunks = false;
for h in byte_array_chunks_exact::<SHA384_HASH_SIZE>(self.need.as_slice()) {
for h in array_chunks_exact::<u8, SHA384_HASH_SIZE>(self.need.as_slice()) {
let dc = self.data_chunks.get_mut(chunk_no).unwrap();
if dc.is_empty() {
debug_assert_eq!(h.len(), SHA384_HASH_SIZE);
@ -110,7 +110,7 @@ impl ScatteredObject {
/// This list can get longer through the course of object retrival since incoming chunks can
/// be chunks of hashes instead of chunks of data.
pub fn need(&self) -> impl Iterator<Item = &[u8; SHA384_HASH_SIZE]> {
byte_array_chunks_exact::<SHA384_HASH_SIZE>(self.need.as_slice())
array_chunks_exact::<u8, SHA384_HASH_SIZE>(self.need.as_slice())
}
}

View file

@ -11,12 +11,12 @@ pub struct Switch {}
#[allow(unused_variables)]
impl InnerProtocolLayer for Switch {
fn handle_packet<Application: ApplicationLayer + ?Sized>(
fn handle_packet<Application: ApplicationLayer>(
&self,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_path: &Arc<Path<Application>>,
source_hops: u8,
message_id: u64,
verb: u8,
@ -26,12 +26,12 @@ impl InnerProtocolLayer for Switch {
PacketHandlerResult::NotHandled
}
fn handle_error<Application: ApplicationLayer + ?Sized>(
fn handle_error<Application: ApplicationLayer>(
&self,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_path: &Arc<Path<Application>>,
source_hops: u8,
message_id: u64,
in_re_verb: u8,
@ -43,12 +43,12 @@ impl InnerProtocolLayer for Switch {
PacketHandlerResult::NotHandled
}
fn handle_ok<Application: ApplicationLayer + ?Sized>(
fn handle_ok<Application: ApplicationLayer>(
&self,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_path: &Arc<Path<Application>>,
source_hops: u8,
message_id: u64,
in_re_verb: u8,

View file

@ -1,12 +1,11 @@
use std::borrow::Cow;
use zerotier_utils::blob::Blob;
use zerotier_utils::flatsortedmap::FlatSortedMap;
use serde::{Deserialize, Serialize};
use crate::vl1::identity::IDENTITY_FINGERPRINT_SIZE;
use crate::vl1::inetaddress::InetAddress;
use crate::vl1::Address;
use crate::vl2::rule::Rule;
#[derive(Serialize, Deserialize, Eq, PartialEq, Clone)]
@ -47,7 +46,7 @@ pub struct Topology<'a> {
#[serde(skip_serializing_if = "FlatSortedMap::is_empty")]
#[serde(default)]
pub members: FlatSortedMap<'a, Blob<IDENTITY_FINGERPRINT_SIZE>, Member<'a>>,
pub members: FlatSortedMap<'a, Address, Member<'a>>,
}
#[inline(always)]

View file

@ -1,7 +1,7 @@
use std::io::Write;
use crate::vl1::identity::Identity;
use crate::vl1::Address;
use crate::vl1::identity::{Identity, IdentitySecret};
use crate::vl1::PartialAddress;
use crate::vl2::NetworkId;
use serde::{Deserialize, Serialize};
@ -24,10 +24,10 @@ use zerotier_utils::memory;
/// certificate.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct CertificateOfMembership {
pub network_id: NetworkId,
pub network_id: u64, // 64-bit legacy network ID
pub timestamp: i64,
pub max_delta: u64,
pub issued_to: Address,
pub issued_to: u64, // 40-bit legacy address
pub issued_to_fingerprint: Blob<32>,
pub signature: ArrayVec<u8, { Identity::MAX_SIGNATURE_SIZE }>,
}
@ -35,23 +35,18 @@ pub struct CertificateOfMembership {
impl CertificateOfMembership {
/// Create a new signed certificate of membership.
/// None is returned if an error occurs, such as the issuer missing its secrets.
pub fn new(issuer: &Identity, network_id: NetworkId, issued_to: &Identity, timestamp: i64, max_delta: u64) -> Option<Self> {
pub fn new(issuer: &IdentitySecret, network_id: &NetworkId, issued_to: &Identity, timestamp: i64, max_delta: u64) -> Self {
let mut com = CertificateOfMembership {
network_id,
network_id: network_id.to_legacy_u64(),
timestamp,
max_delta,
issued_to: issued_to.address,
issued_to: issued_to.address.legacy_u64(),
issued_to_fingerprint: Blob::default(),
signature: ArrayVec::new(),
};
com.issued_to_fingerprint = Blob::from(Self::v1_proto_issued_to_fingerprint(issued_to));
if let Some(signature) = issuer.sign(&com.v1_proto_get_qualifier_bytes(), true) {
com.signature = signature;
Some(com)
} else {
None
}
com.signature = issuer.sign(&com.v1_proto_get_qualifier_bytes());
com
}
fn v1_proto_get_qualifier_bytes(&self) -> [u8; 168] {
@ -64,7 +59,7 @@ impl CertificateOfMembership {
q[4] = u64::from(self.network_id).to_be();
q[5] = 0; // no disagreement permitted
q[6] = 2u64.to_be();
q[7] = u64::from(self.issued_to).to_be();
q[7] = self.issued_to.to_be();
q[8] = u64::MAX; // no to_be needed for all-1s
// This is a fix for a security issue in V1 in which an attacker could (with much CPU use)
@ -91,20 +86,20 @@ impl CertificateOfMembership {
/// Get the identity fingerprint used in V1, which only covers the curve25519 keys.
fn v1_proto_issued_to_fingerprint(issued_to: &Identity) -> [u8; 32] {
let mut v1_signee_hasher = SHA384::new();
v1_signee_hasher.update(&issued_to.address.to_bytes());
v1_signee_hasher.update(&issued_to.x25519);
v1_signee_hasher.update(&issued_to.ed25519);
v1_signee_hasher.update(issued_to.address.legacy_bytes());
v1_signee_hasher.update(&issued_to.x25519.ecdh);
v1_signee_hasher.update(&issued_to.x25519.eddsa);
(&v1_signee_hasher.finish()[..32]).try_into().unwrap()
}
/// Get this certificate of membership in byte encoded format.
pub fn to_bytes(&self, controller_address: Address) -> ArrayVec<u8, 384> {
pub fn to_bytes(&self, controller_address: PartialAddress) -> ArrayVec<u8, 384> {
let mut v = ArrayVec::new();
v.push(1); // version byte from v1 protocol
v.push(0);
v.push(7); // 7 qualifiers, big-endian 16-bit
let _ = v.write_all(&self.v1_proto_get_qualifier_bytes());
let _ = v.write_all(&controller_address.to_bytes());
let _ = v.write_all(controller_address.legacy_bytes());
let _ = v.write_all(self.signature.as_bytes());
v
}
@ -157,10 +152,10 @@ impl CertificateOfMembership {
b = &b[5..]; // skip issuer address which is always the controller
Ok(Self {
network_id: NetworkId::from_u64(network_id).ok_or(InvalidParameterError("invalid network ID"))?,
network_id: NetworkId::from_legacy_u64(network_id)?.to_legacy_u64(),
timestamp,
max_delta,
issued_to: Address::from_u64(issued_to).ok_or(InvalidParameterError("invalid issued to address"))?,
issued_to,
issued_to_fingerprint: Blob::from(v1_fingerprint),
signature: {
let mut s = ArrayVec::new();

View file

@ -1,7 +1,8 @@
use std::collections::HashSet;
use std::io::Write;
use crate::vl1::{Address, Identity, InetAddress, MAC};
use crate::vl1::identity::{Identity, IdentitySecret};
use crate::vl1::{Address, InetAddress, PartialAddress, MAC};
use crate::vl2::NetworkId;
use serde::{Deserialize, Serialize};
@ -29,21 +30,21 @@ impl Thing {
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct CertificateOfOwnership {
pub network_id: NetworkId,
pub network_id: u64, // legacy 64-bit network ID
pub timestamp: i64,
pub things: HashSet<Thing>,
pub issued_to: Address,
pub signature: ArrayVec<u8, { crate::vl1::identity::IDENTITY_MAX_SIGNATURE_SIZE }>,
pub issued_to: u64, // legacy 40-bit address
pub signature: ArrayVec<u8, { Identity::MAX_SIGNATURE_SIZE }>,
}
impl CertificateOfOwnership {
/// Create a new empty and unsigned certificate.
pub fn new(network_id: NetworkId, timestamp: i64, issued_to: Address) -> Self {
pub fn new(network_id: &NetworkId, timestamp: i64, issued_to: &Address) -> Self {
Self {
network_id,
network_id: network_id.to_legacy_u64(),
timestamp,
things: HashSet::with_capacity(4),
issued_to,
issued_to: issued_to.legacy_u64(),
signature: ArrayVec::new(),
}
}
@ -62,7 +63,7 @@ impl CertificateOfOwnership {
let _ = self.things.insert(Thing::Mac(mac));
}
fn internal_to_bytes(&self, for_sign: bool, signed_by: Address) -> Option<Vec<u8>> {
fn internal_to_bytes(&self, for_sign: bool, signed_by: &Address) -> Option<Vec<u8>> {
if self.things.len() > 0xffff {
return None;
}
@ -70,7 +71,7 @@ impl CertificateOfOwnership {
if for_sign {
let _ = v.write_all(&[0x7fu8; 8]);
}
let _ = v.write_all(&self.network_id.to_bytes());
let _ = v.write_all(&self.network_id.to_be_bytes());
let _ = v.write_all(&self.timestamp.to_be_bytes());
let _ = v.write_all(&[0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]); // obsolete flags and ID fields
let _ = v.write_all(&(self.things.len() as u16).to_be_bytes());
@ -93,8 +94,8 @@ impl CertificateOfOwnership {
}
}
}
let _ = v.write_all(&self.issued_to.to_bytes());
let _ = v.write_all(&signed_by.to_bytes());
let _ = v.write_all(&self.issued_to.to_be_bytes()[3..8]);
let _ = v.write_all(signed_by.legacy_bytes());
if for_sign {
v.push(0);
v.push(0);
@ -111,7 +112,7 @@ impl CertificateOfOwnership {
}
#[inline(always)]
pub fn to_bytes(&self, signed_by: Address) -> Option<Vec<u8>> {
pub fn to_bytes(&self, signed_by: &Address) -> Option<Vec<u8>> {
self.internal_to_bytes(false, signed_by)
}
@ -152,10 +153,10 @@ impl CertificateOfOwnership {
}
Ok((
Self {
network_id: NetworkId::from_u64(network_id).ok_or(InvalidParameterError("invalid network ID"))?,
network_id: NetworkId::from_legacy_u64(network_id)?.to_legacy_u64(),
timestamp,
things,
issued_to: Address::from_bytes(&b[..5]).ok_or(InvalidParameterError("invalid address"))?,
issued_to: PartialAddress::from_bytes(&b[..5])?.legacy_u64(),
signature: {
let mut s = ArrayVec::new();
s.push_slice(&b[13..109]);
@ -167,13 +168,11 @@ impl CertificateOfOwnership {
}
/// Sign certificate of ownership for use by V1 nodes.
pub fn sign(&mut self, issuer: &Identity, issued_to: &Identity) -> bool {
self.issued_to = issued_to.address;
if let Some(to_sign) = self.internal_to_bytes(true, issuer.address) {
if let Some(signature) = issuer.sign(&to_sign.as_slice(), true) {
self.signature = signature;
return true;
}
pub fn sign(&mut self, issuer: &IdentitySecret, issued_to: &Identity) -> bool {
self.issued_to = issued_to.address.legacy_u64();
if let Some(to_sign) = self.internal_to_bytes(true, &issuer.public.address) {
self.signature = issuer.sign(&to_sign.as_slice());
return true;
}
return false;
}

View file

@ -6,16 +6,17 @@ use std::str::FromStr;
use serde::{Deserialize, Serialize};
use crate::vl1::{Address, Identity, InetAddress};
use crate::vl1::identity::Identity;
use crate::vl1::{Address, InetAddress};
use crate::vl2::iproute::IpRoute;
use crate::vl2::rule::Rule;
use crate::vl2::v1::{CertificateOfMembership, CertificateOfOwnership, Tag};
use crate::vl2::NetworkId;
use zerotier_utils::buffer::Buffer;
use zerotier_utils::buffer::{Buffer, OutOfBoundsError};
use zerotier_utils::dictionary::Dictionary;
use zerotier_utils::error::InvalidParameterError;
use zerotier_utils::marshalable::Marshalable;
use zerotier_utils::marshalable::{Marshalable, UnmarshalError};
/// Network configuration object sent to nodes by network controllers.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
@ -130,24 +131,18 @@ impl NetworkConfig {
if !self.routes.is_empty() {
let r: Vec<IpRoute> = self.routes.iter().cloned().collect();
d.set_bytes(
proto_v1_field_name::network_config::ROUTES,
IpRoute::marshal_multiple_to_bytes(r.as_slice()).unwrap(),
);
d.set_bytes(proto_v1_field_name::network_config::ROUTES, marshal_multiple_to_bytes(r.as_slice()));
}
if !self.static_ips.is_empty() {
let ips: Vec<InetAddress> = self.static_ips.iter().cloned().collect();
d.set_bytes(
proto_v1_field_name::network_config::STATIC_IPS,
InetAddress::marshal_multiple_to_bytes(ips.as_slice()).unwrap(),
);
d.set_bytes(proto_v1_field_name::network_config::STATIC_IPS, marshal_multiple_to_bytes(ips.as_slice()));
}
if !self.rules.is_empty() {
d.set_bytes(
proto_v1_field_name::network_config::RULES,
Rule::marshal_multiple_to_bytes(self.rules.as_slice()).unwrap(),
marshal_multiple_to_bytes(self.rules.as_slice()),
);
}
@ -180,7 +175,7 @@ impl NetworkConfig {
proto_v1_field_name::network_config::CERTIFICATE_OF_MEMBERSHIP,
v1cred
.certificate_of_membership
.to_bytes(self.network_id.network_controller())
.to_bytes(self.network_id.legacy_controller_address())
.as_bytes()
.to_vec(),
);
@ -188,7 +183,7 @@ impl NetworkConfig {
if !v1cred.certificates_of_ownership.is_empty() {
let mut certs = Vec::with_capacity(v1cred.certificates_of_ownership.len() * 256);
for c in v1cred.certificates_of_ownership.iter() {
let _ = certs.write_all(c.to_bytes(controller_identity.address)?.as_slice());
let _ = certs.write_all(c.to_bytes(&controller_identity.address)?.as_slice());
}
d.set_bytes(proto_v1_field_name::network_config::CERTIFICATES_OF_OWNERSHIP, certs);
}
@ -196,7 +191,7 @@ impl NetworkConfig {
if !v1cred.tags.is_empty() {
let mut tags = Vec::with_capacity(v1cred.tags.len() * 256);
for (_, t) in v1cred.tags.iter() {
let _ = tags.write_all(t.to_bytes(controller_identity.address).as_ref());
let _ = tags.write_all(t.to_bytes(&controller_identity.address).as_ref());
}
d.set_bytes(proto_v1_field_name::network_config::TAGS, tags);
}
@ -256,7 +251,7 @@ impl NetworkConfig {
nc.multicast_limit = d.get_u64(proto_v1_field_name::network_config::MULTICAST_LIMIT).unwrap_or(0) as u32;
if let Some(routes_bin) = d.get_bytes(proto_v1_field_name::network_config::ROUTES) {
for r in IpRoute::unmarshal_multiple_from_bytes(routes_bin)
for r in unmarshal_multiple_from_bytes(routes_bin)
.map_err(|_| InvalidParameterError("invalid route object(s)"))?
.drain(..)
{
@ -265,7 +260,7 @@ impl NetworkConfig {
}
if let Some(static_ips_bin) = d.get_bytes(proto_v1_field_name::network_config::STATIC_IPS) {
for ip in InetAddress::unmarshal_multiple_from_bytes(static_ips_bin)
for ip in unmarshal_multiple_from_bytes(static_ips_bin)
.map_err(|_| InvalidParameterError("invalid route object(s)"))?
.drain(..)
{
@ -274,7 +269,7 @@ impl NetworkConfig {
}
if let Some(rules_bin) = d.get_bytes(proto_v1_field_name::network_config::RULES) {
nc.rules = Rule::unmarshal_multiple_from_bytes(rules_bin).map_err(|_| InvalidParameterError("invalid route object(s)"))?;
nc.rules = unmarshal_multiple_from_bytes(rules_bin).map_err(|_| InvalidParameterError("invalid route object(s)"))?;
}
if let Some(dns_bin) = d.get_bytes(proto_v1_field_name::network_config::DNS) {
@ -434,7 +429,7 @@ pub struct V1Credentials {
impl Marshalable for IpRoute {
const MAX_MARSHAL_SIZE: usize = (InetAddress::MAX_MARSHAL_SIZE * 2) + 2 + 2;
fn marshal<const BL: usize>(&self, buf: &mut zerotier_utils::buffer::Buffer<BL>) -> Result<(), zerotier_utils::marshalable::UnmarshalError> {
fn marshal<const BL: usize>(&self, buf: &mut zerotier_utils::buffer::Buffer<BL>) -> Result<(), OutOfBoundsError> {
self.target.marshal(buf)?;
if let Some(via) = self.via.as_ref() {
via.marshal(buf)?;
@ -477,3 +472,34 @@ impl Marshalable for IpRoute {
})
}
}
const TEMP_BUF_SIZE: usize = 1024;
fn marshal_multiple_to_bytes<M: Marshalable>(multiple: &[M]) -> Vec<u8> {
debug_assert!(M::MAX_MARSHAL_SIZE <= TEMP_BUF_SIZE);
let mut tmp = Vec::with_capacity(M::MAX_MARSHAL_SIZE * multiple.len());
for m in multiple.iter() {
let _ = tmp.write_all(m.to_buffer::<TEMP_BUF_SIZE>().unwrap().as_bytes());
}
tmp
}
fn unmarshal_multiple_from_bytes<M: Marshalable>(mut bytes: &[u8]) -> Result<Vec<M>, UnmarshalError> {
debug_assert!(M::MAX_MARSHAL_SIZE <= TEMP_BUF_SIZE);
let mut tmp: Buffer<TEMP_BUF_SIZE> = Buffer::new();
let mut v: Vec<M> = Vec::new();
while bytes.len() > 0 {
let chunk_size = bytes.len().min(M::MAX_MARSHAL_SIZE);
if tmp.append_bytes(&bytes[..chunk_size]).is_err() {
return Err(UnmarshalError::OutOfBounds);
}
let mut cursor = 0;
v.push(M::unmarshal(&mut tmp, &mut cursor)?);
if cursor == 0 {
return Err(UnmarshalError::InvalidData);
}
let _ = tmp.erase_first_n(cursor);
bytes = &bytes[chunk_size..];
}
Ok(v)
}

View file

@ -1,64 +1,53 @@
use std::io::Write;
use zerotier_crypto::typestate::Valid;
use zerotier_utils::arrayvec::ArrayVec;
use serde::{Deserialize, Serialize};
use crate::vl1::{Address, Identity};
use crate::vl1::identity::IdentitySecret;
use crate::vl1::Address;
use crate::vl2::v1::CredentialType;
use crate::vl2::NetworkId;
/// "Anti-credential" revoking a network member's permission to communicate on a network.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Revocation {
pub network_id: NetworkId,
pub network_id: u64, // legacy 64-bit network ID
pub threshold: i64,
pub target: Address,
pub issued_to: Address,
pub target: u64, // legacy 40-bit address
pub issued_to: u64, // legacy 40-bit address
pub signature: ArrayVec<u8, 96>,
pub fast_propagate: bool,
}
impl Revocation {
pub fn new(
network_id: NetworkId,
threshold: i64,
target: Address,
issued_to: Address,
signer: &Valid<Identity>,
fast_propagate: bool,
) -> Option<Self> {
pub fn new(network_id: &NetworkId, threshold: i64, target: &Address, issued_to: &Address, signer: &IdentitySecret, fast_propagate: bool) -> Self {
let mut r = Self {
network_id,
network_id: network_id.to_legacy_u64(),
threshold,
target,
issued_to,
target: target.legacy_u64(),
issued_to: issued_to.legacy_u64(),
signature: ArrayVec::new(),
fast_propagate,
};
if let Some(sig) = signer.sign(r.internal_to_bytes(true, signer.address).as_bytes(), true) {
r.signature.as_mut().copy_from_slice(sig.as_bytes());
Some(r)
} else {
None
}
r.signature = signer.sign(r.internal_to_bytes(true, &signer.public.address).as_bytes());
r
}
fn internal_to_bytes(&self, for_sign: bool, signed_by: Address) -> ArrayVec<u8, 256> {
fn internal_to_bytes(&self, for_sign: bool, signed_by: &Address) -> ArrayVec<u8, 256> {
let mut v = ArrayVec::new();
if for_sign {
let _ = v.write_all(&[0x7f; 8]);
}
let _ = v.write_all(&[0; 4]);
let _ = v.write_all(&((self.threshold as u32) ^ (u64::from(self.target) as u32)).to_be_bytes()); // ID only used in V1, arbitrary
let _ = v.write_all(&self.network_id.to_bytes());
let _ = v.write_all(&((self.threshold as u32) ^ (self.target as u32)).to_be_bytes()); // ID is arbitrary
let _ = v.write_all(&self.network_id.to_be_bytes());
let _ = v.write_all(&[0; 8]);
let _ = v.write_all(&self.threshold.to_be_bytes());
let _ = v.write_all(&(self.fast_propagate as u64).to_be_bytes()); // 0x1 is the flag for this
let _ = v.write_all(&self.target.to_bytes());
let _ = v.write_all(&signed_by.to_bytes());
let _ = v.write_all(&self.target.to_be_bytes()[3..8]);
let _ = v.write_all(signed_by.as_bytes());
v.push(CredentialType::CertificateOfMembership as u8);
if for_sign {
@ -74,7 +63,7 @@ impl Revocation {
}
#[inline(always)]
pub fn v1_proto_to_bytes(&self, controller_address: Address) -> ArrayVec<u8, 256> {
pub fn v1_proto_to_bytes(&self, controller_address: &Address) -> ArrayVec<u8, 256> {
self.internal_to_bytes(false, controller_address)
}
}

View file

@ -1,7 +1,7 @@
use std::io::Write;
use crate::vl1::identity::Identity;
use crate::vl1::Address;
use crate::vl1::identity::{Identity, IdentitySecret};
use crate::vl1::{Address, PartialAddress};
use crate::vl2::NetworkId;
use serde::{Deserialize, Serialize};
@ -12,43 +12,40 @@ use zerotier_utils::error::InvalidParameterError;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Tag {
pub network_id: NetworkId,
pub network_id: u64, // legacy 64-bit network ID
pub timestamp: i64,
pub issued_to: Address,
pub issued_to: u64, // legacy 40-bit address
pub id: u32,
pub value: u32,
pub signature: Blob<96>,
}
impl Tag {
pub fn new(id: u32, value: u32, issuer: &Identity, network_id: NetworkId, issued_to: &Identity, timestamp: i64) -> Option<Self> {
pub fn new(id: u32, value: u32, issuer: &IdentitySecret, network_id: &NetworkId, issued_to: &Identity, timestamp: i64) -> Self {
let mut tag = Self {
network_id,
network_id: network_id.to_legacy_u64(),
timestamp,
issued_to: issued_to.address,
issued_to: issued_to.address.legacy_u64(),
id,
value,
signature: Blob::default(),
};
let to_sign = tag.internal_to_bytes(true, issuer.address);
if let Some(signature) = issuer.sign(to_sign.as_ref(), true) {
tag.signature.as_mut().copy_from_slice(signature.as_bytes());
return Some(tag);
}
return None;
let to_sign = tag.internal_to_bytes(true, &issuer.public.address);
tag.signature.as_mut().copy_from_slice(issuer.sign(to_sign.as_ref()).as_bytes());
tag
}
fn internal_to_bytes(&self, for_sign: bool, signed_by: Address) -> ArrayVec<u8, 256> {
fn internal_to_bytes(&self, for_sign: bool, signed_by: &Address) -> ArrayVec<u8, 256> {
let mut v = ArrayVec::new();
if for_sign {
let _ = v.write_all(&[0x7f; 8]);
}
let _ = v.write_all(&self.network_id.to_bytes());
let _ = v.write_all(&self.network_id.to_be_bytes());
let _ = v.write_all(&self.timestamp.to_be_bytes());
let _ = v.write_all(&self.id.to_be_bytes());
let _ = v.write_all(&self.value.to_be_bytes());
let _ = v.write_all(&self.issued_to.to_bytes());
let _ = v.write_all(&signed_by.to_bytes());
let _ = v.write_all(&self.issued_to.to_be_bytes()[3..8]);
let _ = v.write_all(signed_by.legacy_bytes());
if !for_sign {
v.push(1);
v.push(0);
@ -64,7 +61,7 @@ impl Tag {
}
#[inline(always)]
pub fn to_bytes(&self, signed_by: Address) -> ArrayVec<u8, 256> {
pub fn to_bytes(&self, signed_by: &Address) -> ArrayVec<u8, 256> {
self.internal_to_bytes(false, signed_by)
}
@ -75,9 +72,9 @@ impl Tag {
}
Ok((
Self {
network_id: NetworkId::from_bytes(&b[0..8]).ok_or(InvalidParameterError("invalid network ID"))?,
network_id: NetworkId::from_bytes(&b[0..8])?.to_legacy_u64(),
timestamp: i64::from_be_bytes(b[8..16].try_into().unwrap()),
issued_to: Address::from_bytes(&b[24..29]).ok_or(InvalidParameterError("invalid address"))?,
issued_to: PartialAddress::from_bytes(&b[24..29])?.legacy_u64(),
id: u32::from_be_bytes(b[16..20].try_into().unwrap()),
value: u32::from_be_bytes(b[20..24].try_into().unwrap()),
signature: {

View file

@ -5,6 +5,11 @@ authors = ["ZeroTier, Inc. <contact@zerotier.com>", "Adam Ierymenko <adam.ieryme
edition = "2021"
license = "MPL-2.0"
[lib]
name = "zerotier_service"
path = "src/lib.rs"
doc = true
[[bin]]
name = "zerotier"
path = "src/main.rs"
@ -13,10 +18,10 @@ path = "src/main.rs"
zerotier-network-hypervisor = { path = "../network-hypervisor" }
zerotier-crypto = { path = "../crypto" }
zerotier-utils = { path = "../utils", features = ["tokio"] }
zerotier-vl1-service = { path = "../vl1-service" }
serde = { version = "^1", features = ["derive"], default-features = false }
serde_json = { version = "^1", features = ["std"], default-features = false }
clap = { version = "^3", features = ["std", "suggestions"], default-features = false }
num-traits = "^0"
[target."cfg(windows)".dependencies]
winapi = { version = "^0", features = ["handleapi", "ws2ipdef", "ws2tcpip"] }

View file

@ -1,3 +1,10 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
pub mod rootset;
pub struct Flags {
pub json_output: bool,
pub base_path: String,
pub auth_token_path_override: Option<String>,
pub auth_token_override: Option<String>,
}

View file

@ -4,10 +4,11 @@ use std::io::Write;
use clap::ArgMatches;
use crate::{exitcode, Flags};
use super::Flags;
use zerotier_network_hypervisor::vl1::RootSet;
use zerotier_utils::exitcode;
use zerotier_utils::io::{read_limit, DEFAULT_FILE_IO_READ_LIMIT};
use zerotier_utils::json::to_json_pretty;
use zerotier_utils::marshalable::Marshalable;
@ -26,17 +27,13 @@ pub fn cmd(_: Flags, cmd_args: &ArgMatches) -> i32 {
if path.is_some() && secret_arg.is_some() {
let path = path.unwrap();
let secret_arg = secret_arg.unwrap();
let secret = crate::utils::parse_cli_identity(secret_arg, true);
let secret = crate::utils::parse_cli_identity_secret(secret_arg);
let json_data = read_limit(path, DEFAULT_FILE_IO_READ_LIMIT);
if secret.is_err() {
eprintln!("ERROR: unable to parse '{}' or read as a file.", secret_arg);
return exitcode::ERR_IOERR;
}
let secret = secret.unwrap();
if !secret.secret.is_some() {
eprintln!("ERROR: identity does not include secret key, which is required for signing.");
return exitcode::ERR_IOERR;
}
if json_data.is_err() {
eprintln!("ERROR: unable to read '{}'.", path);
return exitcode::ERR_IOERR;
@ -102,7 +99,7 @@ pub fn cmd(_: Flags, cmd_args: &ArgMatches) -> i32 {
eprintln!("ERROR: root set JSON parsing failed: {}", root_set.err().unwrap().to_string());
return exitcode::ERR_IOERR;
}
let _ = std::io::stdout().write_all(root_set.unwrap().to_bytes().as_slice());
let _ = std::io::stdout().write_all(root_set.unwrap().to_buffer::<16384>().unwrap().as_ref());
} else {
eprintln!("ERROR: 'rootset marshal' requires a path to a root set in JSON format.");
return exitcode::ERR_IOERR;

7
service/src/lib.rs Normal file
View file

@ -0,0 +1,7 @@
pub mod cli;
pub mod cmdline_help;
pub mod localconfig;
pub mod sys;
pub mod utils;
pub mod vl1;
pub mod vnic;

View file

@ -6,7 +6,8 @@ use serde::{Deserialize, Serialize};
use zerotier_network_hypervisor::vl1::{Address, Endpoint};
use zerotier_network_hypervisor::vl2::NetworkId;
use zerotier_vl1_service::VL1Settings;
use crate::vl1::VL1Settings;
#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)]
#[serde(default)]

View file

@ -1,11 +1,5 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
pub mod cli;
pub mod cmdline_help;
pub mod localconfig;
pub mod utils;
pub mod vnic;
use std::io::Write;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
@ -18,13 +12,17 @@ use clap::{Arg, ArgMatches, Command};
use zerotier_network_hypervisor::vl1::InnerProtocolLayer;
use zerotier_network_hypervisor::{VERSION_MAJOR, VERSION_MINOR, VERSION_REVISION};
use zerotier_utils::exitcode;
use zerotier_vl1_service::datadir::DataDir;
use zerotier_vl1_service::VL1Service;
use crate::localconfig::Config;
use zerotier_service::cli;
use zerotier_service::cli::Flags;
use zerotier_service::cmdline_help;
use zerotier_service::localconfig::Config;
use zerotier_service::utils;
use zerotier_service::vl1::datadir::DataDir;
use zerotier_service::vl1::{VL1Service, VL1Settings};
pub fn print_help() {
let h = crate::cmdline_help::make_cmdline_help();
let h = cmdline_help::make_cmdline_help();
let _ = std::io::stdout().write_all(h.as_bytes());
}
@ -38,13 +36,6 @@ pub fn platform_default_home_path() -> String {
"/var/lib/zerotier".into()
}
pub struct Flags {
pub json_output: bool,
pub base_path: String,
pub auth_token_path_override: Option<String>,
pub auth_token_override: Option<String>,
}
fn open_datadir(flags: &Flags) -> Arc<DataDir<Config>> {
let datadir = DataDir::open(flags.base_path.as_str());
if datadir.is_ok() {
@ -207,28 +198,34 @@ fn main() {
if let Ok(_tokio_runtime) = zerotier_utils::tokio::runtime::Builder::new_multi_thread().enable_all().build() {
let test_inner = Arc::new(DummyInnerLayer);
let datadir = open_datadir(&flags);
let svc = VL1Service::new(datadir, test_inner, zerotier_vl1_service::VL1Settings::default());
if svc.is_ok() {
let svc = svc.unwrap();
svc.node().init_default_roots();
// Wait for kill signal on Unix-like platforms.
#[cfg(unix)]
{
let term = Arc::new(AtomicBool::new(false));
let _ = signal_hook::flag::register(libc::SIGINT, term.clone());
let _ = signal_hook::flag::register(libc::SIGTERM, term.clone());
let _ = signal_hook::flag::register(libc::SIGQUIT, term.clone());
while !term.load(Ordering::Relaxed) {
std::thread::sleep(Duration::from_secs(1));
}
}
println!("Terminate signal received, shutting down...");
exitcode::OK
} else {
eprintln!("FATAL: error launching service: {}", svc.err().unwrap().to_string());
let id = datadir.read_identity(true, true);
if let Err(e) = id {
eprintln!("FATAL: error generator or writing identity: {}", e.to_string());
exitcode::ERR_IOERR
} else {
let svc = VL1Service::new(id.unwrap(), test_inner, VL1Settings::default());
if svc.is_ok() {
let svc = svc.unwrap();
svc.node.init_default_roots();
// Wait for kill signal on Unix-like platforms.
#[cfg(unix)]
{
let term = Arc::new(AtomicBool::new(false));
let _ = signal_hook::flag::register(libc::SIGINT, term.clone());
let _ = signal_hook::flag::register(libc::SIGTERM, term.clone());
let _ = signal_hook::flag::register(libc::SIGQUIT, term.clone());
while !term.load(Ordering::Relaxed) {
std::thread::sleep(Duration::from_secs(1));
}
}
println!("Terminate signal received, shutting down...");
exitcode::OK
} else {
eprintln!("FATAL: error launching service: {}", svc.err().unwrap().to_string());
exitcode::ERR_IOERR
}
}
} else {
eprintln!("FATAL: error launching service: can't start async runtime");

View file

@ -5,7 +5,7 @@ use std::ptr::{copy_nonoverlapping, null_mut};
use zerotier_network_hypervisor::vl1::InetAddress;
use crate::localinterface::LocalInterface;
use crate::vl1::LocalInterface;
#[allow(unused)]
#[inline(always)]

View file

@ -10,7 +10,7 @@ use std::ptr::{null, null_mut};
use std::sync::atomic::{AtomicBool, AtomicI64, Ordering};
use std::sync::{Arc, RwLock};
use crate::localinterface::LocalInterface;
use crate::vl1::LocalInterface;
#[allow(unused_imports)]
use num_traits::AsPrimitive;

View file

@ -3,7 +3,7 @@
use std::path::Path;
use std::str::FromStr;
use zerotier_network_hypervisor::vl1::Identity;
use zerotier_network_hypervisor::vl1::identity::{Identity, IdentitySecret};
use zerotier_utils::io::read_limit;
/// Returns true if the string starts with [yY1tT] or false for [nN0fF].
@ -36,7 +36,6 @@ pub fn is_valid_port(v: &str) -> Result<(), String> {
Err(format!("invalid TCP/IP port number: {}", v))
}
/// Read an identity as either a literal or from a file.
pub fn parse_cli_identity(input: &str, validate: bool) -> Result<Identity, String> {
let parse_func = |s: &str| {
Identity::from_str(s).map_or_else(
@ -64,6 +63,20 @@ pub fn parse_cli_identity(input: &str, validate: bool) -> Result<Identity, Strin
}
}
pub fn parse_cli_identity_secret(input: &str) -> Result<IdentitySecret, String> {
let parse_func = |s: &str| IdentitySecret::from_str(s).map_err(|e| format!("invalid identity: {}", e.to_string()));
let input_p = Path::new(input);
if input_p.is_file() {
read_limit(input_p, 16384).map_or_else(
|e| Err(e.to_string()),
|v| String::from_utf8(v).map_or_else(|e| Err(e.to_string()), |s| parse_func(s.as_str())),
)
} else {
parse_func(input)
}
}
//#[cfg(unix)]
//pub fn c_strerror() -> String {
// unsafe { std::ffi::CStr::from_ptr(libc::strerror(*libc::__error()).cast()).to_string_lossy().to_string() }

View file

@ -1,5 +1,6 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::io::ErrorKind;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::{Arc, Mutex, RwLock};
@ -8,12 +9,10 @@ use serde::de::DeserializeOwned;
use serde::Serialize;
use zerotier_crypto::random::next_u32_secure;
use zerotier_network_hypervisor::vl1::{Identity, Valid};
use zerotier_network_hypervisor::vl1::identity::{Identity, IdentitySecret};
use zerotier_utils::io::{fs_restrict_permissions, read_limit, DEFAULT_FILE_IO_READ_LIMIT};
use zerotier_utils::json::to_json_pretty;
use crate::vl1service::VL1DataStorage;
pub const AUTH_TOKEN_FILENAME: &'static str = "authtoken.secret";
pub const IDENTITY_PUBLIC_FILENAME: &'static str = "identity.public";
pub const IDENTITY_SECRET_FILENAME: &'static str = "identity.secret";
@ -22,46 +21,13 @@ pub const CONFIG_FILENAME: &'static str = "local.conf";
const AUTH_TOKEN_DEFAULT_LENGTH: usize = 48;
const AUTH_TOKEN_POSSIBLE_CHARS: &'static str = "0123456789abcdefghijklmnopqrstuvwxyz";
pub fn load_node_identity(base_path: &Path) -> Option<Valid<Identity>> {
let id_data = read_limit(base_path.join(IDENTITY_SECRET_FILENAME), 4096);
if id_data.is_err() {
return None;
}
let id_data = Identity::from_str(String::from_utf8_lossy(id_data.unwrap().as_slice()).as_ref());
if id_data.is_err() {
return None;
}
Some(Valid::mark_valid(id_data.unwrap()))
}
pub fn save_node_identity(base_path: &Path, id: &Valid<Identity>) -> bool {
assert!(id.secret.is_some());
let id_secret_str = id.to_secret_string();
let id_public_str = id.to_string();
let secret_path = base_path.join(IDENTITY_SECRET_FILENAME);
if std::fs::write(&secret_path, id_secret_str.as_bytes()).is_err() {
return false;
}
assert!(fs_restrict_permissions(&secret_path));
return std::fs::write(base_path.join(IDENTITY_PUBLIC_FILENAME), id_public_str.as_bytes()).is_ok();
}
/// ZeroTier home directory interface
pub struct DataDir<Config: PartialEq + Eq + Clone + Send + Sync + Default + Serialize + DeserializeOwned + 'static> {
pub base_path: PathBuf,
config: RwLock<Arc<Config>>,
authtoken: Mutex<String>,
}
impl<Config: PartialEq + Eq + Clone + Send + Sync + Default + Serialize + DeserializeOwned + 'static> VL1DataStorage for DataDir<Config> {
fn load_node_identity(&self) -> Option<Valid<Identity>> {
load_node_identity(self.base_path.as_path())
}
fn save_node_identity(&self, id: &Valid<Identity>) -> bool {
save_node_identity(self.base_path.as_path(), id)
}
}
impl<Config: PartialEq + Eq + Clone + Send + Sync + Default + Serialize + DeserializeOwned + 'static> DataDir<Config> {
pub fn open<P: AsRef<Path>>(path: P) -> std::io::Result<Self> {
let base_path = path.as_ref().to_path_buf();
@ -94,6 +60,31 @@ impl<Config: PartialEq + Eq + Clone + Send + Sync + Default + Serialize + Deseri
return Ok(Self { base_path, config, authtoken: Mutex::new(String::new()) });
}
/// Read (and possibly generate) the identity.
pub fn read_identity(&self, auto_generate: bool, generate_x25519_only: bool) -> std::io::Result<IdentitySecret> {
let identity_path = self.base_path.join(IDENTITY_SECRET_FILENAME);
match read_limit(&identity_path, 4096) {
Ok(id_bytes) => {
return IdentitySecret::from_str(String::from_utf8_lossy(id_bytes.as_slice()).as_ref())
.map_err(|_| std::io::Error::new(ErrorKind::InvalidData, "invalid identity"));
}
Err(e) => match e.kind() {
ErrorKind::NotFound => {
if auto_generate {
let id = Identity::generate(generate_x25519_only);
let ids = id.to_string();
std::fs::write(&identity_path, ids.as_bytes())?;
std::fs::write(self.base_path.join(IDENTITY_PUBLIC_FILENAME), id.public.to_string().as_bytes())?;
return Ok(id);
} else {
return Err(e);
}
}
_ => return Err(e),
},
}
}
/// Get authorization token for local API, creating and saving if it does not exist.
pub fn authtoken(&self) -> std::io::Result<String> {
let authtoken = self.authtoken.lock().unwrap().clone();

View file

@ -5,9 +5,7 @@ mod localsocket;
mod vl1service;
mod vl1settings;
pub mod constants;
pub mod datadir;
pub mod sys;
pub use localinterface::LocalInterface;
pub use localsocket::LocalSocket;

View file

@ -2,42 +2,35 @@
use std::collections::{HashMap, HashSet};
use std::error::Error;
use std::sync::Arc;
use std::sync::RwLock;
use std::sync::{Arc, RwLock, Weak};
use std::thread::JoinHandle;
use std::time::Duration;
use zerotier_crypto::random;
use zerotier_network_hypervisor::protocol::{PacketBufferFactory, PacketBufferPool};
use zerotier_network_hypervisor::vl1::identity::IdentitySecret;
use zerotier_network_hypervisor::vl1::*;
use zerotier_utils::cast::cast_ref;
use zerotier_utils::{ms_monotonic, ms_since_epoch};
use crate::constants::UNASSIGNED_PRIVILEGED_PORTS;
use super::vl1settings::{VL1Settings, UNASSIGNED_PRIVILEGED_PORTS};
use crate::sys::udp::{udp_test_bind, BoundUdpPort, UdpPacketHandler};
use crate::vl1settings::VL1Settings;
use crate::LocalSocket;
/// Update UDP bindings every this many seconds.
const UPDATE_UDP_BINDINGS_EVERY_SECS: usize = 10;
/// Trait to implement to provide storage for VL1-related state information.
pub trait VL1DataStorage: Sync + Send {
fn load_node_identity(&self) -> Option<Valid<Identity>>;
fn save_node_identity(&self, id: &Valid<Identity>) -> bool;
}
/// VL1 service that connects to the physical network and hosts an inner protocol like ZeroTier VL2.
///
/// This is the "outward facing" half of a full ZeroTier stack on a normal system. It binds sockets,
/// talks to the physical network, manages the vl1 node, and presents a templated interface for
/// whatever inner protocol implementation is using it. This would typically be VL2 but could be
/// a test harness or just the controller for a controller that runs stand-alone.
pub struct VL1Service<Inner: InnerProtocolLayer + ?Sized + 'static> {
pub struct VL1Service<Inner: InnerProtocolLayer + 'static> {
pub node: Node<Self>,
self_ref: Weak<Self>,
state: RwLock<VL1ServiceMutableState>,
vl1_data_storage: Arc<dyn VL1DataStorage>,
inner: Arc<Inner>,
buffer_pool: Arc<PacketBufferPool>,
node_container: Option<Node<Self>>, // never None, set in new()
}
struct VL1ServiceMutableState {
@ -47,26 +40,23 @@ struct VL1ServiceMutableState {
running: bool,
}
impl<Inner: InnerProtocolLayer + ?Sized + 'static> VL1Service<Inner> {
pub fn new(vl1_data_storage: Arc<dyn VL1DataStorage>, inner: Arc<Inner>, settings: VL1Settings) -> Result<Arc<Self>, Box<dyn Error>> {
let mut service = Self {
impl<Inner: InnerProtocolLayer + 'static> VL1Service<Inner> {
pub fn new(identity: IdentitySecret, inner: Arc<Inner>, settings: VL1Settings) -> Result<Arc<Self>, Box<dyn Error>> {
let service = Arc::new_cyclic(|self_ref| Self {
node: Node::<Self>::new(identity),
self_ref: self_ref.clone(),
state: RwLock::new(VL1ServiceMutableState {
daemons: Vec::with_capacity(2),
udp_sockets: HashMap::with_capacity(8),
settings,
running: true,
}),
vl1_data_storage,
inner,
buffer_pool: Arc::new(PacketBufferPool::new(
std::thread::available_parallelism().map_or(2, |c| c.get() + 2),
PacketBufferFactory::new(),
)),
node_container: None,
};
service.node_container.replace(Node::new(&service, true, false)?);
let service = Arc::new(service);
});
let mut daemons = Vec::new();
let s = service.clone();
@ -78,10 +68,9 @@ impl<Inner: InnerProtocolLayer + ?Sized + 'static> VL1Service<Inner> {
Ok(service)
}
#[inline(always)]
pub fn node(&self) -> &Node<Self> {
debug_assert!(self.node_container.is_some());
unsafe { self.node_container.as_ref().unwrap_unchecked() }
#[inline]
pub fn get_self_arc(&self) -> Arc<Self> {
self.self_ref.upgrade().unwrap()
}
pub fn bound_udp_ports(&self) -> Vec<u16> {
@ -173,12 +162,12 @@ impl<Inner: InnerProtocolLayer + ?Sized + 'static> VL1Service<Inner> {
self.update_udp_bindings();
}
udp_binding_check_every = udp_binding_check_every.wrapping_add(1);
std::thread::sleep(self.node().do_background_tasks(self.as_ref()));
std::thread::sleep(self.node.do_background_tasks(self.as_ref()));
}
}
}
impl<Inner: InnerProtocolLayer + ?Sized + 'static> UdpPacketHandler for VL1Service<Inner> {
impl<Inner: InnerProtocolLayer + 'static> UdpPacketHandler for VL1Service<Inner> {
#[inline(always)]
fn incoming_udp_packet(
self: &Arc<Self>,
@ -187,11 +176,11 @@ impl<Inner: InnerProtocolLayer + ?Sized + 'static> UdpPacketHandler for VL1Servi
source_address: &InetAddress,
packet: zerotier_network_hypervisor::protocol::PooledPacketBuffer,
) {
self.node().handle_incoming_physical_packet(
self.node.handle_incoming_physical_packet(
self.as_ref(),
self.inner.as_ref(),
&Endpoint::IpUdp(source_address.clone()),
&LocalSocket::new(socket),
&super::localsocket::LocalSocket::new(socket),
&socket.interface,
time_ticks,
packet,
@ -199,9 +188,9 @@ impl<Inner: InnerProtocolLayer + ?Sized + 'static> UdpPacketHandler for VL1Servi
}
}
impl<Inner: InnerProtocolLayer + ?Sized + 'static> ApplicationLayer for VL1Service<Inner> {
type LocalSocket = crate::LocalSocket;
type LocalInterface = crate::LocalInterface;
impl<Inner: InnerProtocolLayer + 'static> ApplicationLayer for VL1Service<Inner> {
type LocalSocket = super::localsocket::LocalSocket;
type LocalInterface = super::localinterface::LocalInterface;
#[inline]
fn event(&self, event: Event) {
@ -216,22 +205,6 @@ impl<Inner: InnerProtocolLayer + ?Sized + 'static> ApplicationLayer for VL1Servi
socket.is_valid()
}
#[inline]
fn should_respond_to(&self, _: &Valid<Identity>) -> bool {
// TODO: provide a way for the user of VL1Service to control this
true
}
#[inline]
fn load_node_identity(&self) -> Option<Valid<Identity>> {
self.vl1_data_storage.load_node_identity()
}
#[inline]
fn save_node_identity(&self, id: &Valid<Identity>) -> bool {
self.vl1_data_storage.save_node_identity(id)
}
#[inline]
fn get_buffer(&self) -> zerotier_network_hypervisor::protocol::PooledPacketBuffer {
self.buffer_pool.get()
@ -310,9 +283,14 @@ impl<Inner: InnerProtocolLayer + ?Sized + 'static> ApplicationLayer for VL1Servi
fn time_clock(&self) -> i64 {
ms_since_epoch()
}
#[inline(always)]
fn concrete_self<T: ApplicationLayer>(&self) -> Option<&T> {
cast_ref(self)
}
}
impl<Inner: InnerProtocolLayer + ?Sized + 'static> Drop for VL1Service<Inner> {
impl<Inner: InnerProtocolLayer + 'static> Drop for VL1Service<Inner> {
fn drop(&mut self) {
let mut state = self.state.write().unwrap();
state.running = false;

View file

@ -1,5 +1,53 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::collections::HashSet;
use serde::{Deserialize, Serialize};
use zerotier_network_hypervisor::vl1::InetAddress;
#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)]
#[serde(default)]
pub struct VL1Settings {
/// Primary ZeroTier port that is always bound, default is 9993.
pub fixed_ports: HashSet<u16>,
/// Number of additional random ports to bind.
pub random_port_count: usize,
/// Enable uPnP, NAT-PMP, and other router port mapping technologies?
pub port_mapping: bool,
/// Interface name prefix blacklist for local bindings (not remote IPs).
pub interface_prefix_blacklist: HashSet<String>,
/// IP/bits CIDR blacklist for local bindings (not remote IPs).
pub cidr_blacklist: HashSet<InetAddress>,
}
impl VL1Settings {
#[cfg(target_os = "macos")]
pub const DEFAULT_PREFIX_BLACKLIST: [&'static str; 11] = ["lo", "utun", "gif", "stf", "iptap", "pktap", "feth", "zt", "llw", "anpi", "bridge"];
#[cfg(target_os = "linux")]
pub const DEFAULT_PREFIX_BLACKLIST: [&'static str; 5] = ["lo", "tun", "tap", "ipsec", "zt"];
#[cfg(windows)]
pub const DEFAULT_PREFIX_BLACKLIST: [&'static str; 0] = [];
}
impl Default for VL1Settings {
fn default() -> Self {
Self {
fixed_ports: HashSet::from([9993u16]),
random_port_count: 5,
port_mapping: true,
interface_prefix_blacklist: Self::DEFAULT_PREFIX_BLACKLIST.iter().map(|s| s.to_string()).collect(),
cidr_blacklist: HashSet::new(),
}
}
}
/// A list of unassigned or obsolete ports under 1024 that could possibly be squatted.
pub const UNASSIGNED_PRIVILEGED_PORTS: [u16; 299] = [
4, 6, 8, 10, 12, 14, 15, 16, 26, 28, 30, 32, 34, 36, 40, 60, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 285, 288, 289, 290, 291, 292,

View file

@ -7,14 +7,12 @@ version = "0.1.0"
[features]
default = []
tokio = ["dep:tokio", "dep:futures-util"]
tokio = ["dep:tokio"]
[dependencies]
serde = { version = "^1", features = ["derive"], default-features = false }
serde_json = { version = "^1", features = ["std"], default-features = false }
tokio = { version = "^1", default-features = false, features = ["fs", "io-util", "io-std", "net", "process", "rt", "rt-multi-thread", "signal", "sync", "time"], optional = true }
futures-util = { version = "^0", optional = true }
base64 = "0.20.0"
[target."cfg(windows)".dependencies]
winapi = { version = "^0", features = ["handleapi", "ws2ipdef", "ws2tcpip"] }

View file

@ -204,6 +204,34 @@ impl<T, const C: usize> ArrayVec<T, C> {
C - self.s
}
#[inline(always)]
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &T> {
self.as_ref().iter()
}
#[inline(always)]
pub fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut T> {
self.as_mut().iter_mut()
}
#[inline(always)]
pub fn first(&self) -> Option<&T> {
if self.s != 0 {
Some(unsafe { self.a.get_unchecked(0).assume_init_ref() })
} else {
None
}
}
#[inline(always)]
pub fn last(&self) -> Option<&T> {
if self.s != 0 {
Some(unsafe { self.a.get_unchecked(self.s - 1).assume_init_ref() })
} else {
None
}
}
#[inline]
pub fn pop(&mut self) -> Option<T> {
if self.s > 0 {

131
utils/src/base24.rs Normal file
View file

@ -0,0 +1,131 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c) ZeroTier, Inc.
* https://www.zerotier.com/
*/
use std::io::Write;
use crate::error::InvalidParameterError;
/// All unambiguous letters, thus easy to type on the alphabetic keyboards on phones without extra shift taps.
/// The letters 'l' and 'u' are skipped.
const BASE24_ALPHABET: [u8; 24] = [
b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'v', b'w', b'x', b'y', b'z',
];
/// Reverse table for BASE24 alphabet, indexed relative to 'a' or 'A'.
const BASE24_ALPHABET_INV: [u8; 26] = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255, 11, 12, 13, 14, 15, 16, 17, 18, 255, 19, 20, 21, 22, 23,
];
/// Encode a byte slice into base24 ASCII format (no padding)
pub fn encode_into(mut b: &[u8], s: &mut String) {
while b.len() >= 4 {
let mut n = u32::from_le_bytes(b[..4].try_into().unwrap());
for _ in 0..6 {
s.push(BASE24_ALPHABET[(n % 24) as usize] as char);
n /= 24;
}
s.push(BASE24_ALPHABET[n as usize] as char);
b = &b[4..];
}
if !b.is_empty() {
let mut n = 0u32;
for i in 0..b.len() {
n |= (b[i] as u32).wrapping_shl((i as u32) * 8);
}
for _ in 0..(b.len() * 2) {
s.push(BASE24_ALPHABET[(n % 24) as usize] as char);
n /= 24;
}
}
}
fn decode_up_to_u32(s: &[u8]) -> Result<u32, InvalidParameterError> {
let mut n = 0u32;
for c in s.iter().rev() {
let mut c = *c;
if c >= 97 && c <= 122 {
c -= 97;
} else if c >= 65 && c <= 90 {
c -= 65;
} else {
return Err(InvalidParameterError("invalid base24 character"));
}
let i = BASE24_ALPHABET_INV[c as usize];
if i == 255 {
return Err(InvalidParameterError("invalid base24 character"));
}
n *= 24;
n = n.wrapping_add(i as u32);
}
return Ok(n);
}
/// Decode a base24 ASCII slice into bytes (no padding, length determines output length)
pub fn decode_into<W: Write>(s: &[u8], b: &mut W) -> Result<(), InvalidParameterError> {
let mut s = s.as_ref();
while s.len() >= 7 {
let _ = b.write_all(&decode_up_to_u32(&s[..7])?.to_le_bytes());
s = &s[7..];
}
if !s.is_empty() {
let _ = b.write_all(
&decode_up_to_u32(s)?.to_le_bytes()[..match s.len() {
2 => 1,
4 => 2,
6 => 3,
_ => return Err(InvalidParameterError("invalid base24 length")),
}],
);
}
return Ok(());
}
#[inline]
pub fn decode_into_slice(s: &[u8], mut b: &mut [u8]) -> Result<(), InvalidParameterError> {
decode_into(s, &mut b)
}
pub fn encode(b: &[u8]) -> String {
let mut tmp = String::with_capacity(((b.len() / 4) * 7) + 2);
encode_into(b, &mut tmp);
tmp
}
pub fn decode(s: &[u8]) -> Result<Vec<u8>, InvalidParameterError> {
let mut tmp = Vec::with_capacity(((s.len() / 7) * 4) + 2);
decode_into(s, &mut tmp)?;
Ok(tmp)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn encode_decode() {
let mut tmp = [0xffu8; 256];
for _ in 0..3 {
let mut s = String::with_capacity(1024);
let mut v: Vec<u8> = Vec::with_capacity(256);
for i in 1..256 {
s.clear();
encode_into(&tmp[..i], &mut s);
//println!("{}", s);
v.clear();
decode_into(s.as_str().as_bytes(), &mut v).expect("decode error");
assert!(v.as_slice().eq(&tmp[..i]));
}
for b in tmp.iter_mut() {
*b -= 3;
}
}
}
}

187
utils/src/base62.rs Normal file
View file

@ -0,0 +1,187 @@
use std::io::Write;
use super::arrayvec::ArrayVec;
use super::memory;
const MAX_LENGTH_WORDS: usize = 128;
/// Encode a byte array into a base62 string.
///
/// The pad_output_to_length parameter outputs base62 zeroes at the end to ensure that the output
/// string is at least a given length. Set this to zero if you don't want to pad the output. This
/// has no effect on decoded output length.
pub fn encode_into(b: &[u8], s: &mut String, pad_output_to_length: usize) {
assert!(b.len() <= MAX_LENGTH_WORDS * 4);
let mut n: ArrayVec<u32, MAX_LENGTH_WORDS> = ArrayVec::new();
let mut i = 0;
let len_words = b.len() & usize::MAX.wrapping_shl(2);
while i < len_words {
n.push(u32::from_le(memory::load_raw(&b[i..])));
i += 4;
}
if i < b.len() {
let mut w = 0u32;
let mut shift = 0u32;
while i < b.len() {
w |= (b[i] as u32).wrapping_shl(shift);
i += 1;
shift += 8;
}
n.push(w);
}
let mut string_len = 0;
while !n.is_empty() {
s.push(b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"[big_div_rem::<MAX_LENGTH_WORDS, 62>(&mut n) as usize] as char);
string_len += 1;
}
while string_len < pad_output_to_length {
s.push('0');
string_len += 1;
}
}
/// Decode Base62 into a vector or other output.
///
/// Note that base62 doesn't have a way to know the output length. Decoding may be short if there were
/// trailing zeroes in the input. The output length parameter specifies the expected length of the
/// output, which will be zero padded if decoded data does not reach it. If decoded data exceeds this
/// length an error is returned.
pub fn decode_into<W: Write>(s: &[u8], b: &mut W, output_length: usize) -> std::io::Result<()> {
let mut n: ArrayVec<u32, MAX_LENGTH_WORDS> = ArrayVec::new();
for c in s.iter().rev() {
let mut c = *c as u32;
// 0..9, A..Z, or a..z
if c >= 48 && c <= 57 {
c -= 48;
} else if c >= 65 && c <= 90 {
c -= 65 - 10;
} else if c >= 97 && c <= 122 {
c -= 97 - (10 + 26);
} else {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid base62"));
}
big_mul::<MAX_LENGTH_WORDS, 62>(&mut n);
big_add(&mut n, c);
}
let mut bc = output_length;
for w in n.iter() {
if bc > 0 {
let l = bc.min(4);
b.write_all(&w.to_le_bytes()[..l])?;
bc -= l;
} else {
return Err(std::io::Error::new(std::io::ErrorKind::Other, "data too large"));
}
}
while bc > 0 {
b.write_all(&[0])?;
bc -= 1;
}
return Ok(());
}
#[inline]
pub fn decode_into_slice(s: &[u8], mut b: &mut [u8]) -> std::io::Result<()> {
let l = b.len();
decode_into(s, &mut b, l)
}
/// Decode into and return an array whose length is the desired output_length.
/// None is returned if there is an error.
#[inline]
pub fn decode<const L: usize>(s: &[u8]) -> Option<[u8; L]> {
let mut buf = [0u8; L];
let mut w = &mut buf[..];
if decode_into(s, &mut w, L).is_ok() {
Some(buf)
} else {
None
}
}
#[inline(always)]
fn big_div_rem<const C: usize, const D: u64>(n: &mut ArrayVec<u32, C>) -> u32 {
while let Some(&0) = n.last() {
n.pop();
}
let mut rem = 0;
for word in n.iter_mut().rev() {
let temp = (rem as u64).wrapping_shl(32) | (*word as u64);
let (a, b) = (temp / D, temp % D);
*word = a as u32;
rem = b as u32;
}
while let Some(&0) = n.last() {
n.pop();
}
rem
}
#[inline(always)]
fn big_add<const C: usize>(n: &mut ArrayVec<u32, C>, i: u32) {
let mut carry = i as u64;
for word in n.iter_mut() {
let res = (*word as u64).wrapping_add(carry);
*word = res as u32;
carry = res.wrapping_shr(32);
}
if carry > 0 {
n.push(carry as u32);
}
}
#[inline(always)]
fn big_mul<const C: usize, const M: u64>(n: &mut ArrayVec<u32, C>) {
while let Some(&0) = n.last() {
n.pop();
}
let mut carry = 0;
for word in n.iter_mut() {
let temp = (*word as u64).wrapping_mul(M).wrapping_add(carry);
*word = temp as u32;
carry = temp.wrapping_shr(32);
}
if carry != 0 {
n.push(carry as u32);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn div_rem() {
let mut n = ArrayVec::<u32, 4>::new();
n.push_slice(&[0xdeadbeef, 0xfeedfeed, 0xcafebabe, 0xf00dd00d]);
let rem = big_div_rem::<4, 63>(&mut n);
let nn = n.as_ref();
assert!(nn[0] == 0xaa23440b && nn[1] == 0xa696103c && nn[2] == 0x89513fea && nn[3] == 0x03cf7514 && rem == 58);
}
#[test]
fn encode_decode() {
let mut test = [0xff; 64];
for tl in 1..64 {
let test = &mut test[..tl];
test.fill(0xff);
let mut b = Vec::with_capacity(1024);
for _ in 0..10 {
let mut s = String::with_capacity(1024);
encode_into(&test, &mut s, 86);
b.clear();
//println!("{}", s);
assert!(decode_into(s.as_bytes(), &mut b, test.len()).is_ok());
assert_eq!(b.as_slice(), test);
for c in test.iter_mut() {
*c = crate::rand() as u8;
}
}
}
}
}

View file

@ -1,20 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c) ZeroTier, Inc.
* https://www.zerotier.com/
*/
const BASE64_URL_SAFE_NO_PAD_ENGINE: base64::engine::fast_portable::FastPortable =
base64::engine::fast_portable::FastPortable::from(&base64::alphabet::URL_SAFE, base64::engine::fast_portable::NO_PAD);
/// Encode base64 using URL-safe alphabet and no padding.
pub fn encode_url_nopad(bytes: &[u8]) -> String {
base64::encode_engine(bytes, &BASE64_URL_SAFE_NO_PAD_ENGINE)
}
/// Decode base64 using URL-safe alphabet and no padding, or None on error.
pub fn decode_url_nopad(b64: &str) -> Option<Vec<u8>> {
base64::decode_engine(b64, &BASE64_URL_SAFE_NO_PAD_ENGINE).ok()
}

View file

@ -78,6 +78,38 @@ pub fn from_string(s: &str) -> Vec<u8> {
b
}
pub fn from_string_u64(s: &str) -> u64 {
let mut n = 0u64;
let mut byte = 0_u8;
let mut have_8: bool = false;
for cc in s.as_bytes() {
let c = *cc;
if c >= 48 && c <= 57 {
byte = (byte.wrapping_shl(4)) | (c - 48);
if have_8 {
n = n.wrapping_shl(8);
n |= byte as u64;
}
have_8 = !have_8;
} else if c >= 65 && c <= 70 {
byte = (byte.wrapping_shl(4)) | (c - 55);
if have_8 {
n = n.wrapping_shl(8);
n |= byte as u64;
}
have_8 = !have_8;
} else if c >= 97 && c <= 102 {
byte = (byte.wrapping_shl(4)) | (c - 87);
if have_8 {
n = n.wrapping_shl(8);
n |= byte as u64;
}
have_8 = !have_8;
}
}
n
}
/// Encode bytes from 'b' into hex characters in 'dest' and return the number of hex characters written.
/// This will panic if the destination slice is smaller than twice the length of the source.
pub fn to_hex_bytes(b: &[u8], dest: &mut [u8]) -> usize {

View file

@ -6,9 +6,9 @@
* https://www.zerotier.com/
*/
pub mod arc_pool;
pub mod arrayvec;
pub mod base64;
pub mod base24;
pub mod base62;
pub mod blob;
pub mod buffer;
pub mod cast;
@ -28,15 +28,13 @@ pub mod pool;
#[cfg(feature = "tokio")]
pub mod reaper;
pub mod ringbuffer;
pub mod str;
pub mod sync;
pub mod varint;
#[cfg(feature = "tokio")]
pub use tokio;
#[cfg(feature = "tokio")]
pub use futures_util;
/// Initial value that should be used for monotonic tick time variables.
pub const NEVER_HAPPENED_TICKS: i64 = i64::MIN;
@ -87,6 +85,11 @@ pub fn wait_for_process_abort() {
#[inline(never)]
pub extern "C" fn unlikely_branch() {}
#[cfg(unix)]
pub fn rand() -> u32 {
unsafe { (libc::rand() as u32) ^ (libc::rand() as u32).wrapping_shr(8) }
}
#[cfg(test)]
mod tests {
use super::ms_monotonic;

View file

@ -8,20 +8,15 @@
use std::error::Error;
use std::fmt::{Debug, Display};
use std::io::Write;
use crate::buffer::Buffer;
/// Must be larger than any object we want to use with to_bytes() or from_bytes().
/// This hack can go away once Rust allows us to reference trait consts as generics.
const TEMP_BUF_SIZE: usize = 8192;
use crate::buffer::{Buffer, OutOfBoundsError};
/// A super-lightweight zero-allocation serialization interface.
pub trait Marshalable: Sized {
const MAX_MARSHAL_SIZE: usize;
/// Write this object into a buffer.
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), UnmarshalError>;
fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> Result<(), OutOfBoundsError>;
/// Read this object from a buffer.
///
@ -35,88 +30,101 @@ pub trait Marshalable: Sized {
/// This will return an Err if the buffer is too small or some other error occurs. It's just
/// a shortcut to creating a buffer and marshaling into it.
#[inline]
fn to_buffer<const BL: usize>(&self) -> Result<Buffer<BL>, UnmarshalError> {
fn to_buffer<const BL: usize>(&self) -> Result<Buffer<BL>, OutOfBoundsError> {
let mut tmp = Buffer::new();
self.marshal(&mut tmp)?;
Ok(tmp)
}
/// Unmarshal this object from a buffer.
///
/// This is just a shortcut to calling unmarshal() with a zero cursor and then discarding the cursor.
#[inline]
fn from_buffer<const BL: usize>(buf: &Buffer<BL>) -> Result<Self, UnmarshalError> {
let mut tmp = 0;
Self::unmarshal(buf, &mut tmp)
}
/// Marshal and convert to a Rust vector.
#[inline]
fn to_bytes(&self) -> Vec<u8> {
assert!(Self::MAX_MARSHAL_SIZE <= TEMP_BUF_SIZE);
let mut tmp = Buffer::<TEMP_BUF_SIZE>::new();
assert!(self.marshal(&mut tmp).is_ok()); // panics if TEMP_BUF_SIZE is too small
tmp.as_bytes().to_vec()
}
/// Unmarshal from a raw slice.
#[inline]
fn from_bytes(b: &[u8]) -> Result<Self, UnmarshalError> {
if b.len() <= TEMP_BUF_SIZE {
let mut tmp = Buffer::<TEMP_BUF_SIZE>::new_boxed();
assert!(tmp.append_bytes(b).is_ok());
let mut cursor = 0;
Self::unmarshal(&tmp, &mut cursor)
} else {
Err(UnmarshalError::OutOfBounds)
/*
/// Write this marshalable entity into a buffer of the given size.
///
/// This will return an Err if the buffer is too small or some other error occurs. It's just
/// a shortcut to creating a buffer and marshaling into it.
#[inline]
fn to_buffer<const BL: usize>(&self) -> Result<Buffer<BL>, UnmarshalError> {
let mut tmp = Buffer::new();
self.marshal(&mut tmp)?;
Ok(tmp)
}
}
/// Marshal a slice of marshalable objects to a concatenated byte vector.
#[inline]
fn marshal_multiple_to_bytes(objects: &[Self]) -> Result<Vec<u8>, UnmarshalError> {
assert!(Self::MAX_MARSHAL_SIZE <= TEMP_BUF_SIZE);
let mut tmp: Buffer<{ TEMP_BUF_SIZE }> = Buffer::new();
let mut v: Vec<u8> = Vec::with_capacity(objects.len() * Self::MAX_MARSHAL_SIZE);
for i in objects.iter() {
i.marshal(&mut tmp)?;
let _ = v.write_all(tmp.as_bytes());
tmp.clear();
/// Unmarshal this object from a buffer.
///
/// This is just a shortcut to calling unmarshal() with a zero cursor and then discarding the cursor.
#[inline]
fn from_buffer<const BL: usize>(buf: &Buffer<BL>) -> Result<Self, UnmarshalError> {
let mut tmp = 0;
Self::unmarshal(buf, &mut tmp)
}
Ok(v)
}
/// Unmarshal a concatenated byte slice of marshalable objects.
#[inline]
fn unmarshal_multiple_from_bytes(mut bytes: &[u8]) -> Result<Vec<Self>, UnmarshalError> {
assert!(Self::MAX_MARSHAL_SIZE <= TEMP_BUF_SIZE);
let mut tmp: Buffer<{ TEMP_BUF_SIZE }> = Buffer::new();
let mut v: Vec<Self> = Vec::new();
while bytes.len() > 0 {
let chunk_size = bytes.len().min(Self::MAX_MARSHAL_SIZE);
if tmp.append_bytes(&bytes[..chunk_size]).is_err() {
return Err(UnmarshalError::OutOfBounds);
/// Marshal and convert to a Rust vector.
#[inline]
fn to_bytes(&self) -> Vec<u8> {
assert!(Self::MAX_MARSHAL_SIZE <= TEMP_BUF_SIZE);
let mut tmp = Buffer::<TEMP_BUF_SIZE>::new();
assert!(self.marshal(&mut tmp).is_ok()); // panics if TEMP_BUF_SIZE is too small
tmp.as_bytes().to_vec()
}
/// Unmarshal from a raw slice.
#[inline]
fn from_bytes(b: &[u8]) -> Result<Self, UnmarshalError> {
if b.len() <= TEMP_BUF_SIZE {
let mut tmp = Buffer::<TEMP_BUF_SIZE>::new_boxed();
assert!(tmp.append_bytes(b).is_ok());
let mut cursor = 0;
Self::unmarshal(&tmp, &mut cursor)
} else {
Err(UnmarshalError::OutOfBounds)
}
let mut cursor = 0;
v.push(Self::unmarshal(&mut tmp, &mut cursor)?);
if cursor == 0 {
return Err(UnmarshalError::InvalidData);
}
let _ = tmp.erase_first_n(cursor);
bytes = &bytes[chunk_size..];
}
Ok(v)
}
/// Unmarshal a buffer with a byte slice of marshalable objects.
#[inline]
fn unmarshal_multiple<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize, eof: usize) -> Result<Vec<Self>, UnmarshalError> {
let mut v: Vec<Self> = Vec::new();
while *cursor < eof {
v.push(Self::unmarshal(buf, cursor)?);
/// Marshal a slice of marshalable objects to a concatenated byte vector.
#[inline]
fn marshal_multiple_to_bytes(objects: &[Self]) -> Result<Vec<u8>, UnmarshalError> {
assert!(Self::MAX_MARSHAL_SIZE <= TEMP_BUF_SIZE);
let mut tmp: Buffer<{ TEMP_BUF_SIZE }> = Buffer::new();
let mut v: Vec<u8> = Vec::with_capacity(objects.len() * Self::MAX_MARSHAL_SIZE);
for i in objects.iter() {
i.marshal(&mut tmp)?;
let _ = v.write_all(tmp.as_bytes());
tmp.clear();
}
Ok(v)
}
Ok(v)
}
/// Unmarshal a concatenated byte slice of marshalable objects.
#[inline]
fn unmarshal_multiple_from_bytes(mut bytes: &[u8]) -> Result<Vec<Self>, UnmarshalError> {
assert!(Self::MAX_MARSHAL_SIZE <= TEMP_BUF_SIZE);
let mut tmp: Buffer<{ TEMP_BUF_SIZE }> = Buffer::new();
let mut v: Vec<Self> = Vec::new();
while bytes.len() > 0 {
let chunk_size = bytes.len().min(Self::MAX_MARSHAL_SIZE);
if tmp.append_bytes(&bytes[..chunk_size]).is_err() {
return Err(UnmarshalError::OutOfBounds);
}
let mut cursor = 0;
v.push(Self::unmarshal(&mut tmp, &mut cursor)?);
if cursor == 0 {
return Err(UnmarshalError::InvalidData);
}
let _ = tmp.erase_first_n(cursor);
bytes = &bytes[chunk_size..];
}
Ok(v)
}
/// Unmarshal a buffer with a byte slice of marshalable objects.
#[inline]
fn unmarshal_multiple<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize, eof: usize) -> Result<Vec<Self>, UnmarshalError> {
let mut v: Vec<Self> = Vec::new();
while *cursor < eof {
v.push(Self::unmarshal(buf, cursor)?);
}
Ok(v)
}
*/
}
pub enum UnmarshalError {

View file

@ -15,6 +15,9 @@ use std::mem::{needs_drop, size_of, MaybeUninit};
#[allow(unused_imports)]
use std::ptr::copy_nonoverlapping;
/// Implement this trait to mark a struct as safe to cast from a byte array.
pub unsafe trait FlatBuffer: Sized {}
/// Store a raw object to a byte array (for architectures known not to care about unaligned access).
/// This will panic if the slice is too small or the object requires drop.
#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64"))]
@ -59,9 +62,9 @@ pub fn load_raw<T: Copy>(src: &[u8]) -> T {
}
}
/// Our version of the not-yet-stable array_chunks method in slice, but only for byte arrays.
/// Our version of the not-yet-stable array_chunks method in slice.
#[inline(always)]
pub fn byte_array_chunks_exact<const S: usize>(a: &[u8]) -> impl Iterator<Item = &[u8; S]> {
pub fn array_chunks_exact<T, const S: usize>(a: &[T]) -> impl Iterator<Item = &[T; S]> {
let mut i = 0;
let l = a.len();
std::iter::from_fn(move || {
@ -79,7 +82,7 @@ pub fn byte_array_chunks_exact<const S: usize>(a: &[u8]) -> impl Iterator<Item =
/// Obtain a view into an array cast as another array.
/// This will panic if the template parameters would result in out of bounds access.
#[inline(always)]
pub fn array_range<T: Copy, const S: usize, const START: usize, const LEN: usize>(a: &[T; S]) -> &[T; LEN] {
pub fn array_range<T, const S: usize, const START: usize, const LEN: usize>(a: &[T; S]) -> &[T; LEN] {
assert!((START + LEN) <= S);
unsafe { &*a.as_ptr().add(START).cast::<[T; LEN]>() }
}
@ -108,3 +111,11 @@ pub fn to_byte_array<T: Copy, const S: usize>(o: T) -> [u8; S] {
assert!(!std::mem::needs_drop::<T>());
unsafe { *(&o as *const T).cast() }
}
/// Cast a byte slice into a flat struct.
/// This will panic if the slice is too small or the struct requires drop.
pub fn cast_to_struct<T: FlatBuffer>(b: &[u8]) -> &T {
assert!(b.len() >= size_of::<T>());
assert!(!std::mem::needs_drop::<T>());
unsafe { &*b.as_ptr().cast() }
}

56
utils/src/str.rs Normal file
View file

@ -0,0 +1,56 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c) ZeroTier, Inc.
* https://www.zerotier.com/
*/
use crate::hex::HEX_CHARS;
/// Escape non-ASCII-printable characters in a string.
/// This also escapes quotes and other sensitive characters that cause issues on terminals.
pub fn escape(b: &[u8]) -> String {
let mut s = String::with_capacity(b.len() * 2);
for b in b.iter() {
let b = *b;
if b >= 43 && b <= 126 && b != 92 && b != 96 {
s.push(b as char);
} else {
s.push('\\');
s.push(HEX_CHARS[(b.wrapping_shr(4) & 0xf) as usize] as char);
s.push(HEX_CHARS[(b & 0xf) as usize] as char);
}
}
s
}
/// Unescape a string with \XX hexadecimal escapes.
pub fn unescape(s: &str) -> Vec<u8> {
let mut b = Vec::with_capacity(s.len());
let mut s = s.as_bytes();
while let Some(c) = s.first() {
let c = *c;
if c == b'\\' {
if s.len() < 3 {
break;
}
let mut cc = 0u8;
for c in [s[1], s[2]] {
if c >= 48 && c <= 57 {
cc = cc.wrapping_shl(4) | (c - 48);
} else if c >= 65 && c <= 70 {
cc = cc.wrapping_shl(4) | (c - 55);
} else if c >= 97 && c <= 102 {
cc = cc.wrapping_shl(4) | (c - 87);
}
}
b.push(cc);
s = &s[3..];
} else {
b.push(c);
s = &s[1..];
}
}
b
}

View file

@ -1,20 +0,0 @@
[package]
name = "zerotier-vl1-service"
version = "0.1.0"
authors = ["ZeroTier, Inc. <contact@zerotier.com>", "Adam Ierymenko <adam.ierymenko@zerotier.com>"]
edition = "2021"
license = "MPL-2.0"
[dependencies]
zerotier-network-hypervisor = { path = "../network-hypervisor" }
zerotier-crypto = { path = "../crypto" }
zerotier-utils = { path = "../utils" }
num-traits = "^0"
serde = { version = "^1", features = ["derive"], default-features = false }
serde_json = { version = "^1", features = ["std"], default-features = false }
[target."cfg(windows)".dependencies]
winapi = { version = "^0", features = ["handleapi", "ws2ipdef", "ws2tcpip"] }
[target."cfg(not(windows))".dependencies]
libc = "^0"

View file

@ -1 +0,0 @@
../rustfmt.toml

View file

@ -1,49 +0,0 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently proprietary pending actual release and licensing. See LICENSE.md.
use std::collections::HashSet;
use serde::{Deserialize, Serialize};
use zerotier_network_hypervisor::vl1::InetAddress;
#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)]
#[serde(default)]
pub struct VL1Settings {
/// Primary ZeroTier port that is always bound, default is 9993.
pub fixed_ports: HashSet<u16>,
/// Number of additional random ports to bind.
pub random_port_count: usize,
/// Enable uPnP, NAT-PMP, and other router port mapping technologies?
pub port_mapping: bool,
/// Interface name prefix blacklist for local bindings (not remote IPs).
pub interface_prefix_blacklist: HashSet<String>,
/// IP/bits CIDR blacklist for local bindings (not remote IPs).
pub cidr_blacklist: HashSet<InetAddress>,
}
impl VL1Settings {
#[cfg(target_os = "macos")]
pub const DEFAULT_PREFIX_BLACKLIST: [&'static str; 11] = ["lo", "utun", "gif", "stf", "iptap", "pktap", "feth", "zt", "llw", "anpi", "bridge"];
#[cfg(target_os = "linux")]
pub const DEFAULT_PREFIX_BLACKLIST: [&'static str; 5] = ["lo", "tun", "tap", "ipsec", "zt"];
#[cfg(windows)]
pub const DEFAULT_PREFIX_BLACKLIST: [&'static str; 0] = [];
}
impl Default for VL1Settings {
fn default() -> Self {
Self {
fixed_ports: HashSet::from([9993u16]),
random_port_count: 5,
port_mapping: true,
interface_prefix_blacklist: Self::DEFAULT_PREFIX_BLACKLIST.iter().map(|s| s.to_string()).collect(),
cidr_blacklist: HashSet::new(),
}
}
}

View file

@ -1,237 +0,0 @@
Name: zerotier-one
Version: 1.10.2
Release: 1%{?dist}
Summary: ZeroTier network virtualization service
License: ZeroTier BSL 1.1
URL: https://www.zerotier.com
# Fedora
%if "%{?dist}" == ".fc35"
BuildRequires: systemd clang openssl openssl-devel
Requires: systemd openssl
Requires(pre): /usr/sbin/useradd, /usr/bin/getent
%endif
%if "%{?dist}" == ".fc36"
BuildRequires: systemd clang openssl1.1 openssl1.1-devel
Requires: systemd openssl1.1
Requires(pre): /usr/sbin/useradd, /usr/bin/getent
%endif
%if "%{?dist}" == ".fc37"
BuildRequires: systemd clang openssl1.1 openssl1.1-devel
Requires: systemd openssl1.1
Requires(pre): /usr/sbin/useradd, /usr/bin/getent
%endif
# RHEL
%if "%{?dist}" == ".el6"
Requires: chkconfig
Requires(pre): /usr/sbin/useradd, /usr/bin/getent
%endif
%if "%{?dist}" == ".el7"
BuildRequires: systemd openssl-devel
Requires: systemd openssl
Requires(pre): /usr/sbin/useradd, /usr/bin/getent
%endif
%if "%{?dist}" == ".el8"
BuildRequires: systemd openssl-devel
Requires: systemd openssl
Requires(pre): /usr/sbin/useradd, /usr/bin/getent
%endif
%if "%{?dist}" == ".el9"
BuildRequires: systemd openssl-devel
Requires: systemd openssl
Requires(pre): /usr/sbin/useradd, /usr/bin/getent
%endif
# Amazon
%if "%{?dist}" == ".amzn2"
BuildRequires: systemd openssl-devel
Requires: systemd openssl
Requires(pre): /usr/sbin/useradd, /usr/bin/getent
%endif
%if "%{?dist}" == ".amzn2022"
BuildRequires: systemd openssl-devel
Requires: systemd openssl
Requires(pre): /usr/sbin/useradd, /usr/bin/getent
%endif
%description
ZeroTier is a software defined networking layer for Earth.
It can be used for on-premise network virtualization, as a peer to peer VPN
for mobile teams, for hybrid or multi-data-center cloud deployments, or just
about anywhere else secure software defined virtual networking is useful.
This is our OS-level client service. It allows Mac, Linux, Windows,
FreeBSD, and soon other types of clients to join ZeroTier virtual networks
like conventional VPNs or VLANs. It can run on native systems, VMs, or
containers (Docker, OpenVZ, etc.).
%prep
%if "%{?dist}" != ".el6"
rm -rf BUILD BUILDROOT RPMS SRPMS SOURCES
ln -s %{getenv:PWD} %{name}-%{version}
mkdir -p SOURCES
tar --exclude=%{name}-%{version}/.git --exclude=%{name}-%{version}/%{name}-%{version} -czf SOURCES/%{name}-%{version}.tar.gz %{name}-%{version}/*
rm -f %{name}-%{version}
cp -a %{getenv:PWD}/* .
%endif
%build
%if "%{?dist}" != ".el6"
make ZT_USE_MINIUPNPC=1 %{?_smp_mflags} one
%endif
%pre
/usr/bin/getent passwd zerotier-one || /usr/sbin/useradd -r -d /var/lib/zerotier-one -s /sbin/nologin zerotier-one
%install
%if "%{?dist}" != ".el6"
make install DESTDIR=$RPM_BUILD_ROOT
mkdir -p $RPM_BUILD_ROOT%{_unitdir}
cp %{getenv:PWD}/debian/zerotier-one.service $RPM_BUILD_ROOT%{_unitdir}/%{name}.service
%else
rm -rf $RPM_BUILD_ROOT
pushd %{getenv:PWD}
make install DESTDIR=$RPM_BUILD_ROOT
popd
mkdir -p $RPM_BUILD_ROOT/etc/init.d
cp %{getenv:PWD}/ext/installfiles/linux/zerotier-one.init.rhel6 $RPM_BUILD_ROOT/etc/init.d/zerotier-one
chmod 0755 $RPM_BUILD_ROOT/etc/init.d/zerotier-one
%endif
%files
%{_sbindir}/*
%{_mandir}/*
%{_localstatedir}/*
%if 0%{?rhel} && 0%{?rhel} <= 6
/etc/init.d/zerotier-one
%else
%{_unitdir}/%{name}.service
%endif
%post
%if ! 0%{?rhel} && 0%{?rhel} <= 6
%systemd_post zerotier-one.service
%endif
%preun
%if ! 0%{?rhel} && 0%{?rhel} <= 6
%systemd_preun zerotier-one.service
%endif
%postun
%if ! 0%{?rhel} && 0%{?rhel} <= 6
%systemd_postun_with_restart zerotier-one.service
%endif
%changelog
* Mon Oct 13 2022 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.10.2
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Jun 27 2022 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.10.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Fri Jun 03 2022 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.10.0
- see https://github.com/zerotier/ZeroTierOne for release notes
* Tue May 10 2022 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.8.10
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Apr 25 2022 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.8.9
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Apr 11 2022 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.8.8
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Mar 21 2022 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.8.7
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Mar 07 2022 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.8.6
- see https://github.com/zerotier/ZeroTierOne for release notes
* Fri Dec 17 2021 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.8.5
- see https://github.com/zerotier/ZeroTierOne for release notes
* Tue Nov 23 2021 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.8.4
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Nov 15 2021 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.8.3
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Nov 08 2021 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.8.2
- see https://github.com/zerotier/ZeroTierOne for release notes
* Wed Oct 20 2021 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.8.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Wed Sep 15 2021 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.8.0
- see https://github.com/zerotier/ZeroTierOne for release notes
* Tue Apr 13 2021 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.6.5
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Feb 15 2021 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.6.4
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Nov 30 2020 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.6.2-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Tue Nov 24 2020 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.6.1-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Thu Nov 19 2020 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.6.0-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Oct 05 2020 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.6.0-beta1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Fri Aug 23 2019 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.4.4-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Jul 29 2019 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.4.0-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Tue May 08 2018 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.2.10-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Thu May 03 2018 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.2.8-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Mon Apr 24 2017 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.2.2-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Fri Mar 17 2017 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.2.2-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Tue Mar 14 2017 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.2.0-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Tue Jul 12 2016 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.1.10-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Fri Jul 08 2016 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.1.8-0.1
- see https://github.com/zerotier/ZeroTierOne for release notes
* Sat Jun 25 2016 Adam Ierymenko <adam.ierymenko@zerotier.com> - 1.1.6-0.1
- now builds on CentOS 6 as well as newer distros, and some cleanup
* Wed Jun 08 2016 François Kooman <fkooman@tuxed.net> - 1.1.5-0.3
- include systemd unit file
* Wed Jun 08 2016 François Kooman <fkooman@tuxed.net> - 1.1.5-0.2
- add libnatpmp as (build)dependency
* Wed Jun 08 2016 François Kooman <fkooman@tuxed.net> - 1.1.5-0.1
- initial package