Fixes to crypto on macOS, a lot of cleanup, and work on full state replication for V2 VL2 (#1911)

* Move some stuff around in prep for a VL2 rework and identity rework.

* Mix ephemeral keys into "h"

* More topology stuff for VL2.

* Simplify key queue, fix macOS issues with bindings, and no need to cache PSK forever.

* Some more merge fixes.

* A bunch of ZSSP cleanup and optimization. Runs a bit faster now.

* Remove some unused util code.

* scatter gather stuff

* The scatter/gather algorithm works.

* Make OpenSSL init get called automatically at process launch, and some more scatter gather work.

* added support for cloning on EcKey

* Scatter/gather, move SG into VL2 since that is where it will be used, add an array chunker to utils::memory

* Simplify some Rust generic madness.

* docs

* Some cleanup and reorg.

* Bring back AES-GMAC-SIV tests.

* Turns out a Mutex is not really any slower...

---------

Co-authored-by: mamoniot <mamoniot@protonmail.com>
This commit is contained in:
Adam Ierymenko 2023-03-14 15:29:20 -04:00 committed by GitHub
parent faf4c9a5b1
commit d0446a965e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
33 changed files with 1148 additions and 833 deletions

View file

@ -17,6 +17,7 @@ use zerotier_network_hypervisor::vl2::v1::Revocation;
use zerotier_network_hypervisor::vl2::NetworkId;
use zerotier_utils::blob::Blob;
use zerotier_utils::buffer::OutOfBoundsError;
use zerotier_utils::cast::cast_ref;
use zerotier_utils::error::InvalidParameterError;
use zerotier_utils::reaper::Reaper;
use zerotier_utils::tokio;
@ -139,62 +140,56 @@ impl Controller {
}
/// Compose and send network configuration packet (either V1 or V2)
fn send_network_config(
fn send_network_config<Application: ApplicationLayer + ?Sized>(
&self,
peer: &Peer,
app: &Application,
node: &Node<Application>,
peer: &Peer<Application>,
config: &NetworkConfig,
in_re_message_id: Option<u64>, // None for unsolicited push
) {
if let Some(host_system) = self.service.read().unwrap().upgrade() {
peer.send(
host_system.as_ref(),
host_system.node(),
None,
ms_monotonic(),
|packet| -> Result<(), OutOfBoundsError> {
let payload_start = packet.len();
peer.send(app, node, None, ms_monotonic(), |packet| -> Result<(), OutOfBoundsError> {
let payload_start = packet.len();
if let Some(in_re_message_id) = in_re_message_id {
let ok_header = packet.append_struct_get_mut::<protocol::OkHeader>()?;
ok_header.verb = protocol::message_type::VL1_OK;
ok_header.in_re_verb = protocol::message_type::VL2_NETWORK_CONFIG_REQUEST;
ok_header.in_re_message_id = in_re_message_id.to_be_bytes();
} else {
packet.append_u8(protocol::message_type::VL2_NETWORK_CONFIG)?;
}
if let Some(in_re_message_id) = in_re_message_id {
let ok_header = packet.append_struct_get_mut::<protocol::OkHeader>()?;
ok_header.verb = protocol::message_type::VL1_OK;
ok_header.in_re_verb = protocol::message_type::VL2_NETWORK_CONFIG_REQUEST;
ok_header.in_re_message_id = in_re_message_id.to_be_bytes();
} else {
packet.append_u8(protocol::message_type::VL2_NETWORK_CONFIG)?;
}
if peer.is_v2() {
todo!()
} else {
let config_data = if let Some(config_dict) = config.v1_proto_to_dictionary(&self.local_identity) {
config_dict.to_bytes()
} else {
eprintln!("WARNING: unexpected error serializing network config into V1 format dictionary");
return Err(OutOfBoundsError); // abort
};
if config_data.len() > (u16::MAX as usize) {
eprintln!("WARNING: network config is larger than 65536 bytes!");
return Err(OutOfBoundsError); // abort
}
if peer.is_v2() {
todo!()
} else {
let config_data = if let Some(config_dict) = config.v1_proto_to_dictionary(&self.local_identity) {
config_dict.to_bytes()
} else {
eprintln!("WARNING: unexpected error serializing network config into V1 format dictionary");
return Err(OutOfBoundsError); // abort
};
if config_data.len() > (u16::MAX as usize) {
eprintln!("WARNING: network config is larger than 65536 bytes!");
return Err(OutOfBoundsError); // abort
}
packet.append_u64(config.network_id.into())?;
packet.append_u16(config_data.len() as u16)?;
packet.append_bytes(config_data.as_slice())?;
packet.append_u64(config.network_id.into())?;
packet.append_u16(config_data.len() as u16)?;
packet.append_bytes(config_data.as_slice())?;
// TODO: for V1 we may need to introduce use of the chunking mechanism for large configs.
}
// TODO: for V1 we may need to introduce use of the chunking mechanism for large configs.
}
let new_payload_len = protocol::compress(&mut packet.as_bytes_mut()[payload_start..]);
packet.set_size(payload_start + new_payload_len);
let new_payload_len = protocol::compress(&mut packet.as_bytes_mut()[payload_start..]);
packet.set_size(payload_start + new_payload_len);
Ok(())
},
);
}
Ok(())
});
}
/// Send one or more revocation object(s) to a peer. The provided vector is drained.
fn send_revocations(&self, peer: &Peer, revocations: &mut Vec<Revocation>) {
fn send_revocations(&self, peer: &Peer<VL1Service<Self>>, revocations: &mut Vec<Revocation>) {
if let Some(host_system) = self.service.read().unwrap().upgrade() {
let time_ticks = ms_monotonic();
while !revocations.is_empty() {
@ -496,28 +491,12 @@ impl Controller {
}
impl InnerProtocolLayer for Controller {
#[inline(always)]
fn should_respond_to(&self, _: &Valid<Identity>) -> bool {
// Controllers always have to establish sessions to process requests. We don't really know if
// a member is relevant until we have looked up both the network and the member, since whether
// or not to "learn" unknown members is a network level option.
true
}
fn has_trust_relationship(&self, id: &Valid<Identity>) -> bool {
self.recently_authorized
.read()
.unwrap()
.get(&id.fingerprint)
.map_or(false, |by_network| by_network.values().any(|t| *t > ms_monotonic()))
}
fn handle_packet<HostSystemImpl: ApplicationLayer + ?Sized>(
fn handle_packet<Application: ApplicationLayer + ?Sized>(
&self,
host_system: &HostSystemImpl,
_: &Node,
source: &Arc<Peer>,
source_path: &Arc<Path>,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
verb: u8,
@ -526,10 +505,6 @@ impl InnerProtocolLayer for Controller {
) -> PacketHandlerResult {
match verb {
protocol::message_type::VL2_NETWORK_CONFIG_REQUEST => {
if !self.should_respond_to(&source.identity) {
return PacketHandlerResult::Ok; // handled and ignored
}
let network_id = payload.read_u64(&mut cursor);
if network_id.is_err() {
return PacketHandlerResult::Error;
@ -541,7 +516,7 @@ impl InnerProtocolLayer for Controller {
let network_id = network_id.unwrap();
debug_event!(
host_system,
app,
"[vl2] NETWORK_CONFIG_REQUEST from {}({}) for {:0>16x}",
source.identity.address.to_string(),
source_path.endpoint.to_string(),
@ -573,7 +548,8 @@ impl InnerProtocolLayer for Controller {
let (result, config) = match self2.authorize(&source.identity, network_id, now).await {
Result::Ok((result, Some(config))) => {
//println!("{}", serde_yaml::to_string(&config).unwrap());
self2.send_network_config(source.as_ref(), &config, Some(message_id));
let app = self2.service.read().unwrap().upgrade().unwrap();
self2.send_network_config(app.as_ref(), app.node(), cast_ref(source.as_ref()).unwrap(), &config, Some(message_id));
(result, Some(config))
}
Result::Ok((result, None)) => (result, None),
@ -626,23 +602,21 @@ impl InnerProtocolLayer for Controller {
}
protocol::message_type::VL2_MULTICAST_GATHER => {
if let Some(service) = self.service.read().unwrap().upgrade() {
let auth = self.recently_authorized.read().unwrap();
let time_ticks = ms_monotonic();
self.multicast_authority.handle_vl2_multicast_gather(
|network_id, identity| {
auth.get(&identity.fingerprint)
.map_or(false, |t| t.get(&network_id).map_or(false, |t| *t > time_ticks))
},
time_ticks,
service.as_ref(),
service.node(),
source,
message_id,
payload,
cursor,
);
}
let auth = self.recently_authorized.read().unwrap();
let time_ticks = ms_monotonic();
self.multicast_authority.handle_vl2_multicast_gather(
|network_id, identity| {
auth.get(&identity.fingerprint)
.map_or(false, |t| t.get(&network_id).map_or(false, |t| *t > time_ticks))
},
time_ticks,
app,
node,
source,
message_id,
payload,
cursor,
);
PacketHandlerResult::Ok
}

View file

@ -19,6 +19,7 @@ foreign-types = "0.5.0"
libc = "0.2"
lazy_static = "^1"
rand_core = "0.6.4"
ctor = "^0"
#ed25519-dalek still uses rand_core 0.5.1, and that version is incompatible with 0.6.4, so we need to import and implement both.
rand_core_051 = { package = "rand_core", version = "0.5.1" }

View file

@ -3,7 +3,7 @@
// MacOS implementation of AES primitives since CommonCrypto seems to be faster than OpenSSL, especially on ARM64.
use std::os::raw::{c_int, c_void};
use std::ptr::{null, null_mut};
use std::sync::atomic::AtomicPtr;
use std::sync::Mutex;
use crate::secret::Secret;
use crate::secure_eq;
@ -172,25 +172,18 @@ impl AesGcm<false> {
}
}
pub struct Aes(AtomicPtr<c_void>, AtomicPtr<c_void>);
pub struct Aes(Mutex<(*mut c_void, *mut c_void)>);
impl Drop for Aes {
#[inline(always)]
fn drop(&mut self) {
unsafe {
loop {
let p = self.0.load(std::sync::atomic::Ordering::Acquire);
if !p.is_null() {
CCCryptorRelease(p);
break;
}
let p = self.0.lock().unwrap();
if !p.0.is_null() {
CCCryptorRelease(p.0);
}
loop {
let p = self.1.load(std::sync::atomic::Ordering::Acquire);
if !p.is_null() {
CCCryptorRelease(p);
break;
}
if !p.1.is_null() {
CCCryptorRelease(p.1);
}
}
}
@ -238,7 +231,7 @@ impl Aes {
),
0
);
Self(AtomicPtr::new(p0), AtomicPtr::new(p1))
Self(Mutex::new((p0, p1)))
}
}
@ -247,16 +240,8 @@ impl Aes {
assert_eq!(data.len(), 16);
unsafe {
let mut data_out_written = 0;
loop {
let p = self.0.load(std::sync::atomic::Ordering::Acquire);
if !p.is_null() {
CCCryptorUpdate(p, data.as_ptr().cast(), 16, data.as_mut_ptr().cast(), 16, &mut data_out_written);
self.0.store(p, std::sync::atomic::Ordering::Release);
break;
} else {
std::thread::yield_now();
}
}
let p = self.0.lock().unwrap();
CCCryptorUpdate(p.0, data.as_ptr().cast(), 16, data.as_mut_ptr().cast(), 16, &mut data_out_written);
}
}
@ -265,16 +250,8 @@ impl Aes {
assert_eq!(data.len(), 16);
unsafe {
let mut data_out_written = 0;
loop {
let p = self.1.load(std::sync::atomic::Ordering::Acquire);
if !p.is_null() {
CCCryptorUpdate(p, data.as_ptr().cast(), 16, data.as_mut_ptr().cast(), 16, &mut data_out_written);
self.1.store(p, std::sync::atomic::Ordering::Release);
break;
} else {
std::thread::yield_now();
}
}
let p = self.0.lock().unwrap();
CCCryptorUpdate(p.1, data.as_ptr().cast(), 16, data.as_mut_ptr().cast(), 16, &mut data_out_written);
}
}
}

View file

@ -1,14 +1,21 @@
#[cfg(test)]
mod test {
use crate::aes::AesGcm;
use crate::init;
use crate::aes_gmac_siv::AesGmacSiv;
use crate::secret::Secret;
use hex_literal::hex;
use std::time::SystemTime;
fn to_hex(b: &[u8]) -> String {
let mut s = String::new();
for c in b.iter() {
s = format!("{}{:0>2x}", s, *c);
}
s
}
#[test]
fn aes_256_gcm() {
init();
let key = Secret::move_bytes([1u8; 32]);
let mut enc = AesGcm::<true>::new(&key);
let mut dec = AesGcm::<false>::new(&key);
@ -50,7 +57,7 @@ mod test {
}
#[test]
fn quick_benchmark() {
fn aes_256_gcm_quick_benchmark() {
let mut buf = [0_u8; 12345];
for i in 1..12345 {
buf[i] = i as u8;
@ -67,7 +74,7 @@ mod test {
}
let duration = SystemTime::now().duration_since(start).unwrap();
println!(
"AES-256-GCM encrypt benchmark: {} MiB/sec",
" AES-256-GCM encrypt benchmark: {} MiB/sec",
(((benchmark_iterations * buf.len()) as f64) / 1048576.0) / duration.as_secs_f64()
);
@ -80,7 +87,7 @@ mod test {
}
let duration = SystemTime::now().duration_since(start).unwrap();
println!(
"AES-256-GCM decrypt benchmark: {} MiB/sec",
" AES-256-GCM decrypt benchmark: {} MiB/sec",
(((benchmark_iterations * buf.len()) as f64) / 1048576.0) / duration.as_secs_f64()
);
}
@ -114,6 +121,109 @@ mod test {
}
}
#[test]
fn aes_gmac_siv_test_vectors() {
let mut test_pt = [0_u8; 65536];
let mut test_ct = [0_u8; 65536];
let mut test_aad = [0_u8; 65536];
for i in 0..65536 {
test_pt[i] = i as u8;
test_aad[i] = i as u8;
}
let mut c = AesGmacSiv::new(TV0_KEYS[0], TV0_KEYS[1]);
for (test_length, expected_ct_sha384, expected_tag) in TEST_VECTORS.iter() {
test_ct.fill(0);
c.reset();
c.encrypt_init(&(*test_length as u64).to_le_bytes());
c.encrypt_set_aad(&test_aad[0..*test_length]);
c.encrypt_first_pass(&test_pt[0..*test_length]);
c.encrypt_first_pass_finish();
c.encrypt_second_pass(&test_pt[0..*test_length], &mut test_ct[0..*test_length]);
let tag = c.encrypt_second_pass_finish();
let ct_hash = crate::hash::SHA384::hash(&test_ct[0..*test_length]).to_vec();
//println!("{} {} {}", *test_length, to_hex(ct_hash.as_slice()), to_hex(tag));
if !to_hex(ct_hash.as_slice()).eq(*expected_ct_sha384) {
panic!("test vector failed (ciphertest)");
}
if !to_hex(tag).eq(*expected_tag) {
panic!("test vector failed (tag)");
}
}
}
#[test]
fn aes_gmac_siv_encrypt_decrypt() {
let aes_key_0: [u8; 32] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
];
let aes_key_1: [u8; 32] = [
2, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
];
let iv: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
let mut buf = [0_u8; 12345];
for i in 1..12345 {
buf[i] = i as u8;
}
let mut c = AesGmacSiv::new(&aes_key_0, &aes_key_1);
for _ in 0..256 {
c.reset();
c.encrypt_init(&iv);
c.encrypt_first_pass(&buf);
c.encrypt_first_pass_finish();
c.encrypt_second_pass_in_place(&mut buf);
let tag = c.encrypt_second_pass_finish().clone();
let sha = crate::hash::SHA384::hash(&buf).to_vec();
let sha = to_hex(sha.as_slice());
if sha != "4dc97c10abb6112a3907e5eb588ea5123719442b715da994d9756b003677719824326973960268823d924f66491a16e6" {
panic!("encrypt result hash check failed! {}", sha);
}
//println!("Encrypt OK, tag: {}, hash: {}", to_hex(&tag), sha);
c.reset();
c.decrypt_init(&tag);
c.decrypt_in_place(&mut buf);
let _ = c.decrypt_finish().expect("decrypt_finish() failed!");
for i in 1..12345 {
if buf[i] != (i & 0xff) as u8 {
panic!("decrypt data check failed!");
}
}
//println!("Decrypt OK");
}
//println!("Encrypt/decrypt test OK");
let benchmark_iterations: usize = 80000;
let start = SystemTime::now();
for _ in 0..benchmark_iterations {
c.reset();
c.encrypt_init(&iv);
c.encrypt_first_pass(&buf);
c.encrypt_first_pass_finish();
c.encrypt_second_pass_in_place(&mut buf);
let _ = c.encrypt_second_pass_finish();
}
let duration = SystemTime::now().duration_since(start).unwrap();
println!(
" AES-GMAC-SIV (legacy) encrypt benchmark: {} MiB/sec",
(((benchmark_iterations * buf.len()) as f64) / 1048576.0) / duration.as_secs_f64()
);
let start = SystemTime::now();
for _ in 0..benchmark_iterations {
c.reset();
c.decrypt_init(&buf[0..16]); // we don't care if decryption is successful to benchmark, so anything will do
c.decrypt_in_place(&mut buf);
c.decrypt_finish();
}
let duration = SystemTime::now().duration_since(start).unwrap();
println!(
" AES-GMAC-SIV (legacy) decrypt benchmark: {} MiB/sec",
(((benchmark_iterations * buf.len()) as f64) / 1048576.0) / duration.as_secs_f64()
);
}
struct GcmTV<K: 'static> {
pub key: &'static K,
pub nonce: &'static [u8; 12],
@ -123,6 +233,445 @@ mod test {
pub tag: &'static [u8; 16],
}
/// AES-GMAC-SIV test keys.
const TV0_KEYS: [&'static [u8]; 2] = [
"00000000000000000000000000000000".as_bytes(),
"11111111111111111111111111111111".as_bytes(),
];
/// AES-GMAC-SIV test vectors.
/// Test vectors consist of a series of input sizes, a SHA384 hash of a resulting ciphertext, and an expected tag.
/// Input is a standard byte array consisting of bytes 0, 1, 2, 3, ..., 255 and then cycling back to 0 over and over
/// and is provided both as ciphertext and associated data (AAD).
#[allow(unused)]
const TEST_VECTORS: [(usize, &'static str, &'static str); 85] = [
(
0,
"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b",
"43847e644239134deccf5538162c861e",
),
(
777,
"aabf892f18a620b9c3bae91bb03a74c84193e4a7b64916c6bc88b885b9ebed4134495e5f22f12e3046fbb3f26fa111a7",
"b8c318b5dcc1d672114a6f7be54ef289",
),
(
1554,
"648f551df29217f0e634b72ba6973c0eb95c7d4be8b135e550d8bcdf65b75980881bc0e03cf22589e04bedc7da1804cd",
"535b8ddd51ec82a1e850906fe321b21a",
),
(
2331,
"bfbfdffea40062e23bbdf0835e1d38d1623bebca7407908bbc6d5b3f2bfd062a2d237f091affda7348094fafda0bd1a7",
"4f521876fbb2c563051196b33c20c822",
),
(
3108,
"cc6035cab70f3a3298a5c4956ff07f179acf3771bb915c590a8a19fe5133d6d8a81c118148394dfb364af5c2fbdaadeb",
"d3adfa578c8bcd738c55ffc527358cef",
),
(
3885,
"15ec2760a21c25f9870a84ee757f3da2c261a950c2f692d75ff9e99b2d50c826c21e27e49c4cd3450fedc7e60371589f",
"a4c22d6c3d773634c2dc057e1f7c6738",
),
(
4662,
"c2afad6f034704300c34f143dcdcb86c9b954cec1ebf22e7071f288c58a2ae430d3e3748d214d1021472793d3f337dc6",
"c0601cb6cd4883102f70570c2cdc0ab6",
),
(
5439,
"8fee067f5a7a475a630f9db8b2eb80c1edc40eb4246a0f1c078e535df7d06451c6a9bde1a23ba70285690dd7100a8626",
"7352239f2302b08844309d28b13fa867",
),
(
6216,
"60095b4172438aee61e65f5379f4ef276c3632d4ac74eea7723a2201823432614aba7b4670d9bf7a5b9126ca38f3b88a",
"c0f0b0aa651965f8514b473c5406285e",
),
(
6993,
"10e754dd08b4d2a6c109fb01fce2b57d54743947e14a7e67d7efd0608baf91f7fc42a53328fe8c18d234abad8ebcdff0",
"58444988a62a99060728a7637c8499eb",
),
(
7770,
"1abc4a5dcd2696336bd0e8af20fe7fc261aa424b52cfb5ad80ee7c7c793ac44f11db3506cdbbbaed0f80000925d08d52",
"e8065c563bc6018cdcbf9aaafef767e6",
),
(
8547,
"26aaf74ae8bfc6aaf45ceee0476ea0a484304f5c36050d3e2265cb194a2f7c308213314232270608b6d3f1c11b834e33",
"ec50e4b3f6e4b3de24b3476623d08157",
),
(
9324,
"863206305d466aa9c0d0ec674572069f61fe5009767f99ec8832912725c28c49d6a106ad3f55372c922e4e169fc382ce",
"0cfac64f49e0f128d0a18d293878f222",
),
(
10101,
"bd0c0950b947a6c34f1fa6e877433b42c039a8ea7b37634c40fb47efae4958ba74ef0991cfedf3c82a0b87ef59635071",
"e0220a02b74259eeebbebede847d50f9",
),
(
10878,
"d7b9901af1dacf6a8c369b993ba1c607f9b7f073d02311c72d8449d3494d477ffc8344a1d8b488020ccfc7c80fbd27e1",
"ebe3933146734a6ade2b434f2bcd78ae",
),
(
11655,
"0ba265e3ef0bebf01a4f3490da462c7730aad6aa6c70bb9ce64a36d26d24fe213660e60e4d3301329170471f11ff8ca2",
"ec3dd4bf4cb7d527a86dd559c773a87b",
),
(
12432,
"c3b6755a1be922ec71c1e187ead36c4e6fc307c72969c64ca1e9b7339d61e1a93a74a315fd73bed8fa5797b78b19dbe5",
"5b58dcf392749bcef91056ba9475d0ef",
),
(
13209,
"2fb1a67151183daa2f0d7f0064534497357f173161349dd008499a8c1a123cc942662ecc426e2ad7743fe0ab9f5d7be1",
"c011260d328d310e2ab606aa1ef8afd4",
),
(
13986,
"6afae2a07ce9bfe30fbbfb7dcf32d755bcf357334dc5c309e58cab38ebe559f25b313a0b3ca32ff1dc41f7b99718f653",
"011bf43cfbbb7ae5986f8e0fc87771a9",
),
(
14763,
"cc6215c115eb6411f4712c2289f5bf0ccb5151635f9f9ceac7c1b62d8d2f4d26498079d0289f83aeb26e97b5b924ffc4",
"a015034a8d5bc83cc76c6983a5ba19ab",
),
(
15540,
"3cebce794e947341c4ceec444ca43c6ac57c6f58de462bfec7566cbd59a1b6f2eae774120e29521e76120a604d1a12d9",
"d373cd2bd9000655141ac632880eca40",
),
(
16317,
"899147b98d78bb5d137dc7c4f03be7eca82bcca19cc3a701261332923707aed2e6719d35d2f2bf067cd1d193a53529cf",
"ed223b64529299c787f49d631ce181c1",
),
(
17094,
"aecd1830958b994b2c331b90e7d8ff79f27c83a71f5797a65ade3a30b4fa5928e79140bcd03f375591d53df96fea1a4d",
"948a7c253d54bb6b65d78530c0eb7aab",
),
(
17871,
"e677ffd4ecaba5899659fefe5fe8e643004392be3be6dc5a801409870ac1e3398f47cc1d83f7a4c41925b6337e01f7fd",
"156a600c336f3ac034ca90034aa22635",
),
(
18648,
"4ee50f4a98d0bbd160add6acf76765ccdac0c1cd0bb2adbbcb22dd012a1121620b739a120df7dc4091e684ddf28eb726",
"75873467b416a7b025f9f1b015bf653a",
),
(
19425,
"aa025f32c0575af7209828fc7fc4591b41fa7cfb485e26c5401e63ca1fa05776f8b8af1769a15e81f2c663bca9b02ab3",
"5679efa7a4404e1e5c9b372782a41bf2",
),
(
20202,
"6e77ab62d2affeb27f4ef326191b3df3863c338a629f64a785505f4a5968ff59bc011c7a27951cb00e2e7d9b9bd32fec",
"36a9c4515d34f9bb962d8876ab3b5c86",
),
(
20979,
"1625b4f0e65fc66f11ba3ee6b3e20c732535654c447df6b517ced113107a1057a64477faa2af4a5ede4034bf3cff98ea",
"9058044e0f71c28d4f8d3281a3aec024",
),
(
21756,
"94efe6aa55bd77bfa58c185dec313a41003f9bef02568e72c337be4de1b46c6e5bb9a9329b4f108686489b8bc9d5f4f0",
"8d6d2c90590268a26f5e7d76351f48c1",
),
(
22533,
"7327a05fdb0ac92433dfc2c85c5e96e6ddcbdb01e079f8dafbee79c14cb4d5fd46047acd6bb0e09a98f6dd03dced2a0a",
"4e0f0a394f85bca35c68ef667aa9c244",
),
(
23310,
"93da9e356efbc8b5ae366256f4c6fc11c11fc347aaa879d591b7c1262d90adf98925f571914696054f1d09c74783561e",
"8c83c157be439280afc790ee3fd667eb",
),
(
24087,
"99b91be5ffca51b1cbc7410798b1540b5b1a3356f801ed4dc54812919c08ca5a9adc218bc51e594d97b46445a1515506",
"9436ff05729a77f673e815e464aeaa75",
),
(
24864,
"074253ad5d5a5d2b072e7aeaffa04a06119ec812a88ca43481fe5e2dce02cf6736952095cd342ec70b833c12fc1777f4",
"69d8951b96866a08efbb65f2bc31cfbc",
),
(
25641,
"c0a301f90597c05cf19e60c35378676764086b7156e455f4800347f8a6e733d644e4cc709fb9d95a9211f3e1e10c762a",
"3561c9802143c306ecc5e07e3b976d9e",
),
(
26418,
"3c839e59d945b841acb604e1b9ae3df36a291444ce0bcae336ee875beaf208bf10af7342b375429ecb92ec54d11a5907",
"3032ffdb8daee11b2e739132c6175615",
),
(
27195,
"3dc59b16603950dfc26a90bf036712eb088412e8de4d1b27c3fa6be6502ac12d89d194764fb53c3dc7d90fa696ba5a16",
"49436717edff7cd67c9a1be16d524f07",
),
(
27972,
"4fbc0d40ff13376b8ed5382890cdea337b4a0c9c31b477c4008d2ef8299bd5ab771ba70b1b4b743f8f7caa1f0164d1a1",
"64a9856a3bb81dc81ff1bc1025192dc9",
),
(
28749,
"6ab191aa6327f229cc94e8c7b1b7ee30bc723e6aeaf3050eb7d14cb491c3513254e9b19894c2b4f071d298401fd31945",
"101f2ffea60f246a3b57c4a530d67cf1",
),
(
29526,
"d06dece58e6c7345986aae4b7f15b3317653f5387d6262f389b5cbbe804568124a876eabb89204e96b3c0f7b552df3c4",
"5c0e873adba65a9f4cb24cce4f194b18",
),
(
30303,
"7a33c1268eafdc1f89ad460fa4ded8d3df9a3cabe4339706877878c64a2c8080cf3fa5ea7f2f24744e3341476b1eb5a5",
"b7dc708fc46ce5cde24a31ad549fec83",
),
(
31080,
"37bf1f9fca6d705b989b2d63259ca924dc860fc6027e07d9aad79b94841227739774f5d324590df45d8f41249ef742ea",
"8ead50308c281e699b79b69dad7ecb91",
),
(
31857,
"91b120c73be86f9d53326fa707cfa1411e5ac76ab998a2d7ebd73a75e3b1a04c9f0855d102184b8a3fd5d99818b0b134",
"6056d09595bd16bfa317c6f87ce64bb7",
),
(
32634,
"42cc255c06184ead57b27efd0cefb0f2c788c8962a6fd15db3f25533a7f49700bca85af916f9e985f1941a6e66943b38",
"3b15e332d2f53bb97e1a9d03e6113b97",
),
(
33411,
"737f8bb8f3fd03a9d13e50abba3a42f4491c36eda3eb215085abda733227ec490cb863ffbd68f915c8fb2926a899fbc3",
"b2c647d25c46aab4d4a5ede4a3b4576d",
),
(
34188,
"e9caa36505e19628175d1ce8b933267380099753a41e503fa2f894cea17b7692f0b27079ed33cdd1293db9a35722d561",
"a2882adfd00f22823250215b12b3a1fd",
),
(
34965,
"81ddc348ebbdfb963daa5d0c1b51bbb73cacd883d4fc4316db6bd3388779beff7be0655bbac73951f89dc53832199c11",
"f33106eb8104f3780350c6d4f82333ad",
),
(
35742,
"308ce31daf40dab707e2cb4c4a5307bc403e24c971ae1e30e998449f804a167fe5f2cf617d585851b6fe9f2b4209f09c",
"44070ac90cbf350ab92289cc063e978c",
),
(
36519,
"71f51b4bddbe8a52f18be75f9bdb3fca0773901b794de845450fb308c34775ede1a6da9a82b61e9682a29a3ef71274e2",
"0e387704298c444bf3afba0edc0c1c1c",
),
(
37296,
"478ac94eee8c5f96210003fcb478392b91f2ef6fc3a729774e5fe82a2d8d0abc54ae1d25b3eaefb061e2bd43b70ca4ea",
"fb65ebeda52cd5848d303c0677cecb7f",
),
(
38073,
"bc3a9390618da7d644be932627353e2c92024df939d2d8497fba61fae3dd822cdd3e130c1707f4a9d5d4a0cbb4b3e0b3",
"d790d529a837ec79f7cc3f66ed9a399f",
),
(
38850,
"ef0e63a53a10e56477c47e13320b8a7d330aee3a4363c850edc56c0707a2686478e5a5193f54ceb33467ab7e8a22aa21",
"6f2c18742f106f16fc290767342fb62b",
),
(
39627,
"c16f63533c099d872d9a01c326db7756e7eb488c756b9a6ebf575993d8ea2eb45c572b2e162f061e145710e0e21e8e18",
"a57afde7938b223ae5e109a03db4ee4c",
),
(
40404,
"ade484ae8c13465a73589ef14789bb6891c933453e198df84edd34b4ac5c83aa90f2cf61fa072fa4d8f5b5c4cd68fa9e",
"a01d13009db86ac442f7afd39d83309f",
),
(
41181,
"6c5c7eed0e043a0bd60bcac9b5b546e150028d70c1efefc9ff69037ef4dc1a36878b171b9f2a639df822d11054a0e405",
"6321c8622ca5866c875d340206d06a28",
),
(
41958,
"dd311c54222fb0d92858719cf5b1c51bb5e3ca2539ffd68f1dd6c7e38969495be935804855ccdcc4b4cf221fcdbda886",
"cf401eb819b5dc5cd8c909aae9b3b34b",
),
(
42735,
"31cda9d663199b32eff042dd16c0b909ba999641e77ba751c91752bfc4d595e17ec6467119e74a600b72da72ba287d0a",
"12fd6298ab5d744eb6ade3106565afad",
),
(
43512,
"11b014057d51a8384d549d5d083c4406b575df6a9295853dd8f2f84f078cc241bb90495a119126b10b9510efcb68c0d3",
"a48a49eea5dc90359ef21f32132f8604",
),
(
44289,
"b44f5dbeecd76ee7efe3fb4dfe10ba8135d7a5e4d104149f4a91c5c6ee9446d9be19fb4c9ba668b074466d3892e22228",
"07e1cbb7a19174d9b1e4d5a2c741cc14",
),
(
45066,
"d87bbba3a3c739cab622386c89aeb685a70009fab1a606bd34622adfa3a75a05b58d56ee6b9874d414db38a6a32927b3",
"a27cd252712cd2a1a2d95dea39f888d4",
),
(
45843,
"abb90e60ea13c6cb3b401b8e271637416b87fbede165dde7be1d34abe4427dae4b39b499352cacac909bc43fb94028c8",
"df3ae762b9257936feda435a61a9c3a1",
),
(
46620,
"56d1132ee6e0f85543950d2d9667244b66b0ce6414eacd1859b128ed0b9026b31a25bfdcce3d1a0ce7c39d99f609c89c",
"cfe7c3c3f1cb615e2d210cc8136443e6",
),
(
47397,
"ecb023ec4c23cf95d1848a38b359f1f590f172dee9d8fb1be6bc9c4fb2ce96f612d60d7b111de539ab8313a87b821176",
"501d24752bf55cb12239863981898a07",
),
(
48174,
"34236ab60f05bb510aa0880fec358fb2002903efa14c912cab8a399e09418f97223ca2f7b8d6798c11d39e79032eaaa8",
"4ecaba4eae886aa429927188abab9623",
),
(
48951,
"55e8b40fad90a3d8c85a0f4d5bcf5975b8a6e2fb78377109f5b607a5e367187fbbc9a1e978aab3228fbf43ad23d0ad13",
"84c43bc30eb4a67230b6c634fe3c7782",
),
(
49728,
"14b1f896d0d01ecff4e456c3c392b1ca2bad9f1ef07713f84cdd89e663aa27ca77d80213ed57a89431eb992b11d98749",
"7f58c2f9a249f70fe1c6f9b4f65e5a1d",
),
(
50505,
"1335b1fb56196e0b371fa53ab7445845fdefcea3eb2833478deb3526e2ec888945e95ee8239b52caae5b9920ba4f43bb",
"5fd729126b236ce3e0686fc706dce20f",
),
(
51282,
"0d1983a6cab870c5e78f89a11dd30e7d2c71a3882f8bba3e71dc1b96a2d9fc6cc6d91d683b74456b886de34df792cfda",
"7731ae6e6c54dfde12f6116357e812ea",
),
(
52059,
"9d619fb4aa8441baaefed7b778693c291f2c1441b206ec135930fac3529d26587ac36f4472949e0b198b51c0c5a9d0f8",
"39db2c996aea28996e03d576c118630f",
),
(
52836,
"31dca4fa285878ba3efc3b66a248a078b69a11c3c73f81077377c4ffcb7002627aad5faa955e3141c1d8508aad68c8f6",
"32ac1e5a09e7e629ff95f30aa9b69c00",
),
(
53613,
"931a9969cf2bb02302c32b1eecd4933805e2da403d85aaf98c82c68129fb95f089eb85c65a6fcbc7d81bedb39de0cabb",
"1a6f54b87c12868da530eac94d99eb31",
),
(
54390,
"2f0742565801a37810ecb3f50a6f782e73a369a790d1a6a85135e7ffa12fc063db8909ab9eca7cf7308832887a6149d1",
"1b18ed6a8f901b7947626216839f0643",
),
(
55167,
"901defbd308b54deef89acd0d94e4387b370f9d2e6f870d72da2e447ed3ebe69c5f9f144488bd6207a732102160bff47",
"1e0e6a05fcc0794121f617e28cfac1a0",
),
(
55944,
"df984a5f7475250155dc4733a746e98446dc93a56a3f3bff691ddfef7deefb32b1da1b0e7e15cce443831ebfb3e30ada",
"876121af882d0ebeae38f111f3d4b6e8",
),
(
56721,
"acb693ed837b33561408cb1eed636e0082ac404f3fd72d277fa146ae5cd81a1fde3645f4cdc7babd8ba044b78075cb67",
"5b90ed6c7943fc6da623c536e2ff1352",
),
(
57498,
"dffb54bf5938e812076cfbf15cd524d72a189566c7980363a49dd89fb49e230d9742ef0b0e1ac543dca14366d735d152",
"22aee072457306e32747fbbbc3ae127c",
),
(
58275,
"92dbc245a980fc78974f7a27e62c22b12a00be9d3ef8d3718ff85f6d5fbcbf1d9d1e0f0a3daeb8c2628d090550a0ff6b",
"5fa348117faba4ac8c9d9317ff44cd2d",
),
(
59052,
"57721475cb719691850696d9a8ad4c28ca8ef9a7d45874ca21df4df250cb87ea60c464f4e3252e2d6161ed36c4b56d75",
"24d92ae7cac56d9c0276b06f7428d5df",
),
(
59829,
"d0936026440b5276747cb9fb7dc96de5d4e7846c233ca5f6f9354b2b39f760333483cbe99ffa905facb347242f58a7ef",
"05c57068e183f9d835e7f461202f923c",
),
(
60606,
"7b3bb3527b73a8692f076f6a503b2e09b427119543c7812db73c7c7fb2d43af9ecbd2a8a1452ac8ada96ad0bad7bb185",
"f958635a193fec0bfb958e97961381df",
),
(
61383,
"ff0d00255a36747eced86acfccd0cf9ef09faa9f44c8cf382efec462e7ead66e562a971060c3f32798ba142d9e1640a2",
"838159b222e56aadde8229ed56a14095",
),
(
62160,
"15806e088ed1428cd73ede3fecf5b60e2a616f1925004dadd2cab8e847059f795659659e82a4554f270baf88bf60af63",
"fed2aa0c9c0a73d499cc970aef21c52f",
),
(
62937,
"cfad71b23b6da51256bd1ddbd1ac77977fe10b2ad0a830a23a794cef914bf71a9519d78a5f83fc411e8d8db996a45d4e",
"e1ea412fd3e1bd91c24b6b6445e8ff43",
),
(
63714,
"7d03a3698a79b1af1663e3e485c2efdc306ecd87b2644f2e01d83a35999d6cdf12241b6114d60d107c10c0d0c9cc0d23",
"e6a3c3f3fd2d9cfcdc06cca2f59e9a83",
),
(
64491,
"e12b168cce0e82ed1db88df549f39b3ff40b5884a09fceae69c4c3db13c1c37ea79531c47b2700d1c27774a1ab7e8b35",
"4cbb14d789f5cd8eca49ce9e1d442ea1",
),
(
65268,
"056c9d1172cfa76ce7f19c605e5969c284b82dca155dc9c1ed58062ab4d5a7704e27fe69f3aa745b73f45f1cd0ee57df",
"8195187f092d52c2a8695b680568b934",
),
];
/// <https://csrc.nist.gov/Projects/cryptographic-algorithm-validation-program/CAVP-TESTING-BLOCK-CIPHER-MODES>
const NIST_AES_GCM_TEST_VECTORS: &[GcmTV<[u8; 32]>] = &[
GcmTV {

View file

@ -845,14 +845,10 @@ impl Neg for BigNum {
#[cfg(test)]
mod tests {
use crate::{
bn::{BigNum, BigNumContext},
init,
};
use crate::bn::{BigNum, BigNumContext};
#[test]
fn test_to_from_slice() {
init();
let v0 = BigNum::from_u32(10_203_004).unwrap();
let vec = v0.to_vec();
let v1 = BigNum::from_slice(&vec).unwrap();
@ -862,7 +858,6 @@ mod tests {
#[test]
fn test_negation() {
init();
let a = BigNum::from_u32(909_829_283).unwrap();
assert!(!a.is_negative());
@ -871,7 +866,6 @@ mod tests {
#[test]
fn test_shift() {
init();
let a = BigNum::from_u32(909_829_283).unwrap();
assert!(a == &(&a << 1) >> 1);
@ -880,7 +874,6 @@ mod tests {
#[cfg(not(osslconf = "OPENSSL_NO_DEPRECATED_3_0"))]
#[test]
fn test_prime_numbers() {
init();
let a = BigNum::from_u32(19_029_017).unwrap();
let mut p = BigNum::new().unwrap();
p.generate_prime(128, true, None, Some(&a)).unwrap();
@ -893,7 +886,6 @@ mod tests {
#[cfg(ossl110)]
#[test]
fn test_secure_bn() {
init();
let a = BigNum::new().unwrap();
assert!(!a.is_secure());
@ -904,7 +896,6 @@ mod tests {
#[cfg(ossl110)]
#[test]
fn test_const_time_bn() {
init();
let a = BigNum::new().unwrap();
assert!(!a.is_const_time());

View file

@ -127,11 +127,9 @@ impl CipherCtxRef {
#[cfg(test)]
mod test {
use super::*;
use crate::init;
#[test]
fn aes_128_ecb() {
init();
let key = [1u8; 16];
let ctx = CipherCtx::new().unwrap();
unsafe {

View file

@ -1,3 +1,4 @@
use core::fmt;
use std::ptr;
use crate::bn::{BigNumContext, BigNumContextRef, BigNumRef};
@ -5,13 +6,11 @@ use crate::error::{cvt, cvt_n, cvt_p, ErrorStack};
use foreign_types::{foreign_type, ForeignType, ForeignTypeRef};
foreign_type! {
#[derive(Clone)]
pub unsafe type EcGroup: Send + Sync {
type CType = ffi::EC_GROUP;
fn drop = ffi::EC_GROUP_free;
}
/// Public and optional private key on the given curve.
#[derive(Clone)]
pub unsafe type EcKey {
type CType = ffi::EC_KEY;
fn drop = ffi::EC_KEY_free;
@ -125,3 +124,27 @@ impl EcPointRef {
}
}
}
impl ToOwned for EcKeyRef {
type Owned = EcKey;
fn to_owned(&self) -> EcKey {
unsafe {
let r = ffi::EC_KEY_up_ref(self.as_ptr());
assert!(r == 1);
EcKey::from_ptr(self.as_ptr())
}
}
}
impl Clone for EcKey {
fn clone(&self) -> EcKey {
(**self).to_owned()
}
}
impl fmt::Debug for EcKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "EcKey")
}
}

View file

@ -26,15 +26,21 @@ pub use aes_openssl as aes;
mod aes_tests;
#[cfg(target_os = "macos")]
pub mod aes_gmac_siv_fruity;
pub mod aes_gmac_siv_openssl;
#[cfg(target_os = "macos")]
pub use aes_gmac_siv_fruity as aes_gmac_siv;
#[cfg(not(target_os = "macos"))]
pub mod aes_gmac_siv_openssl;
#[cfg(not(target_os = "macos"))]
pub use aes_gmac_siv_openssl as aes_gmac_siv;
/// This must be called before using any function from this library.
pub fn init() {
use ctor::ctor;
#[ctor]
fn openssl_init() {
println!("OpenSSL init()");
ffi::init();
}
@ -52,4 +58,5 @@ pub fn secure_eq<A: AsRef<[u8]> + ?Sized, B: AsRef<[u8]> + ?Sized>(a: &A, b: &B)
false
}
}
pub const ZEROES: [u8; 64] = [0_u8; 64];

View file

@ -1322,11 +1322,10 @@ pub use openssl_based::*;
#[cfg(test)]
mod tests {
use crate::{init, p384::P384KeyPair, secure_eq};
use crate::{p384::P384KeyPair, secure_eq};
#[test]
fn generate_sign_verify_agree() {
init();
let kp = P384KeyPair::generate();
let kp2 = P384KeyPair::generate();

View file

@ -16,6 +16,8 @@ lz4_flex = { version = "^0", features = ["safe-encode", "safe-decode", "checked-
serde = { version = "^1", features = ["derive"], default-features = false }
phf = { version = "^0", features = ["macros", "std"], default-features = false }
num-traits = "^0"
rmp-serde = "^1"
fastcdc = "^3"
[dev-dependencies]
rand = "*"

View file

@ -12,8 +12,6 @@ use zerotier_crypto::aes_gmac_siv::AesGmacSiv;
use zerotier_crypto::hash::hmac_sha384;
use zerotier_crypto::secret::Secret;
use self::v1::VERB_FLAG_COMPRESSED;
/*
* Legacy V1 protocol versions:
*
@ -75,6 +73,8 @@ pub type MessageId = u64;
/// ZeroTier VL1 and VL2 wire protocol message types.
pub mod message_type {
// VL1: Virtual Layer 1, the peer to peer network
pub const VL1_NOP: u8 = 0x00;
pub const VL1_HELLO: u8 = 0x01;
pub const VL1_ERROR: u8 = 0x02;
@ -85,6 +85,8 @@ pub mod message_type {
pub const VL1_PUSH_DIRECT_PATHS: u8 = 0x10;
pub const VL1_USER_MESSAGE: u8 = 0x14;
// VL2: Virtual Layer 2, the virtual Ethernet network
pub const VL2_MULTICAST_LIKE: u8 = 0x09;
pub const VL2_NETWORK_CREDENTIALS: u8 = 0x0a;
pub const VL2_NETWORK_CONFIG_REQUEST: u8 = 0x0b;
@ -111,6 +113,12 @@ pub mod message_type {
}
}
/// Verb (inner) flag indicating that the packet's payload (after the verb) is LZ4 compressed.
pub const MESSAGE_FLAG_COMPRESSED: u8 = 0x80;
/// Mask to get only the verb from the verb + verb flags byte.
pub const MESSAGE_TYPE_MASK: u8 = 0x1f;
/// Default maximum payload size for UDP transport.
///
/// This is small enough to traverse numerous weird networks including PPPoE and Google Cloud's
@ -201,18 +209,6 @@ pub mod v1 {
/// Byte found at FRAGMENT_INDICATOR_INDEX to indicate a fragment.
pub const FRAGMENT_INDICATOR: u8 = 0xff;
/// Verb (inner) flag indicating that the packet's payload (after the verb) is LZ4 compressed.
pub const VERB_FLAG_COMPRESSED: u8 = 0x80;
/// Verb (inner) flag indicating that payload is authenticated with HMAC-SHA384.
pub const VERB_FLAG_EXTENDED_AUTHENTICATION: u8 = 0x40;
/// Mask to get only the verb from the verb + verb flags byte.
pub const VERB_MASK: u8 = 0x1f;
/// Maximum number of verbs that the protocol can support.
pub const VERB_MAX_COUNT: usize = 32;
/// Header (outer) flag indicating that this packet has additional fragments.
pub const HEADER_FLAG_FRAGMENTED: u8 = 0x40;
@ -245,30 +241,6 @@ pub mod v1 {
/// Maximum number of hops to allow.
pub const FORWARD_MAX_HOPS: u8 = 3;
/// Attempt to compress a packet's payload with LZ4
///
/// If this returns true the destination buffer will contain a compressed packet. If false is
/// returned the contents of 'dest' are entirely undefined. This indicates that the data was not
/// compressable or some other error occurred.
pub fn compress_packet<const S: usize>(src: &[u8], dest: &mut Buffer<S>) -> bool {
if src.len() > (VERB_INDEX + 16) {
let compressed_data_size = {
let d = unsafe { dest.entire_buffer_mut() };
d[..VERB_INDEX].copy_from_slice(&src[..VERB_INDEX]);
d[VERB_INDEX] = src[VERB_INDEX] | VERB_FLAG_COMPRESSED;
lz4_flex::block::compress_into(&src[VERB_INDEX + 1..], &mut d[VERB_INDEX + 1..])
};
if compressed_data_size.is_ok() {
let compressed_data_size = compressed_data_size.unwrap();
if compressed_data_size > 0 && compressed_data_size < (src.len() - VERB_INDEX) {
unsafe { dest.set_size_unchecked(VERB_INDEX + 1 + compressed_data_size) };
return true;
}
}
}
return false;
}
/// Set header flag indicating that a packet is fragmented.
///
/// This will panic if the buffer provided doesn't contain a proper header.
@ -534,7 +506,7 @@ pub fn compress(payload: &mut [u8]) -> usize {
let mut tmp = [0u8; 65536];
if let Ok(mut compressed_size) = lz4_flex::block::compress_into(&payload[1..], &mut tmp) {
if compressed_size < (payload.len() - 1) {
payload[0] |= VERB_FLAG_COMPRESSED;
payload[0] |= MESSAGE_FLAG_COMPRESSED;
payload[1..(1 + compressed_size)].copy_from_slice(&tmp[..compressed_size]);
return 1 + compressed_size;
}

View file

@ -10,10 +10,10 @@ use crate::vl1::identity::IDENTITY_FINGERPRINT_SIZE;
use crate::vl1::inetaddress::InetAddress;
use crate::vl1::{Address, MAC};
use zerotier_utils::base64;
use zerotier_utils::buffer::Buffer;
use zerotier_utils::error::InvalidFormatError;
use zerotier_utils::marshalable::{Marshalable, UnmarshalError};
use zerotier_utils::{base64_decode_url_nopad, base64_encode_url_nopad};
pub const TYPE_NIL: u8 = 0;
pub const TYPE_ZEROTIER: u8 = 1;
@ -319,7 +319,7 @@ impl ToString for Endpoint {
fn to_string(&self) -> String {
match self {
Endpoint::Nil => format!("nil"),
Endpoint::ZeroTier(a, ah) => format!("zt:{}-{}", a.to_string(), base64_encode_url_nopad(ah)),
Endpoint::ZeroTier(a, ah) => format!("zt:{}-{}", a.to_string(), base64::encode_url_nopad(ah)),
Endpoint::Ethernet(m) => format!("eth:{}", m.to_string()),
Endpoint::WifiDirect(m) => format!("wifip2p:{}", m.to_string()),
Endpoint::Bluetooth(m) => format!("bt:{}", m.to_string()),
@ -327,8 +327,8 @@ impl ToString for Endpoint {
Endpoint::IpUdp(ip) => format!("udp:{}", ip.to_string()),
Endpoint::IpTcp(ip) => format!("tcp:{}", ip.to_string()),
Endpoint::Http(url) => format!("url:{}", url.clone()), // http or https
Endpoint::WebRTC(offer) => format!("webrtc:{}", base64_encode_url_nopad(offer.as_slice())),
Endpoint::ZeroTierEncap(a, ah) => format!("zte:{}-{}", a.to_string(), base64_encode_url_nopad(ah)),
Endpoint::WebRTC(offer) => format!("webrtc:{}", base64::encode_url_nopad(offer.as_slice())),
Endpoint::ZeroTierEncap(a, ah) => format!("zte:{}-{}", a.to_string(), base64::encode_url_nopad(ah)),
}
}
}
@ -351,7 +351,7 @@ impl FromStr for Endpoint {
let address_and_hash = endpoint_data.split_once("-");
if address_and_hash.is_some() {
let (address, hash) = address_and_hash.unwrap();
if let Some(hash) = base64_decode_url_nopad(hash) {
if let Some(hash) = base64::decode_url_nopad(hash) {
if hash.len() == IDENTITY_FINGERPRINT_SIZE {
if endpoint_type == "zt" {
return Ok(Endpoint::ZeroTier(Address::from_str(address)?, hash.as_slice().try_into().unwrap()));
@ -370,7 +370,7 @@ impl FromStr for Endpoint {
"tcp" => return Ok(Endpoint::IpTcp(InetAddress::from_str(endpoint_data)?)),
"url" => return Ok(Endpoint::Http(endpoint_data.into())),
"webrtc" => {
if let Some(offer) = base64_decode_url_nopad(endpoint_data) {
if let Some(offer) = base64::decode_url_nopad(endpoint_data) {
return Ok(Endpoint::WebRTC(offer));
}
}

View file

@ -16,10 +16,11 @@ use zerotier_crypto::x25519::*;
use zerotier_crypto::{hash::*, secure_eq};
use zerotier_utils::arrayvec::ArrayVec;
use zerotier_utils::base64;
use zerotier_utils::buffer::Buffer;
use zerotier_utils::error::{InvalidFormatError, InvalidParameterError};
use zerotier_utils::hex;
use zerotier_utils::marshalable::{Marshalable, UnmarshalError};
use zerotier_utils::{base64_decode_url_nopad, base64_encode_url_nopad, hex};
use crate::protocol::{ADDRESS_SIZE, ADDRESS_SIZE_STRING, IDENTITY_POW_THRESHOLD};
use crate::vl1::Address;
@ -492,7 +493,7 @@ impl Identity {
&p384.ecdsa_self_signature,
&p384.ed25519_self_signature,
);
s.push_str(base64_encode_url_nopad(&p384_joined).as_str());
s.push_str(base64::encode_url_nopad(&p384_joined).as_str());
if self.secret.is_some() && include_private {
let secret = self.secret.as_ref().unwrap();
if secret.p384.is_some() {
@ -502,7 +503,7 @@ impl Identity {
p384_secret.ecdsa.secret_key_bytes().as_bytes(),
);
s.push(':');
s.push_str(base64_encode_url_nopad(&p384_secret_joined).as_str());
s.push_str(base64::encode_url_nopad(&p384_secret_joined).as_str());
}
}
}
@ -586,8 +587,8 @@ impl FromStr for Identity {
let keys = [
hex::from_string(keys[0].unwrap_or("")),
hex::from_string(keys[1].unwrap_or("")),
base64_decode_url_nopad(keys[2].unwrap_or("")).unwrap_or_else(|| Vec::new()),
base64_decode_url_nopad(keys[3].unwrap_or("")).unwrap_or_else(|| Vec::new()),
base64::decode_url_nopad(keys[2].unwrap_or("")).unwrap_or_else(|| Vec::new()),
base64::decode_url_nopad(keys[3].unwrap_or("")).unwrap_or_else(|| Vec::new()),
];
if keys[0].len() != C25519_PUBLIC_KEY_SIZE + ED25519_PUBLIC_KEY_SIZE {
return Err(InvalidFormatError);

View file

@ -25,13 +25,12 @@ use zerotier_utils::gate::IntervalGate;
use zerotier_utils::hex;
use zerotier_utils::marshalable::Marshalable;
use zerotier_utils::ringbuffer::RingBuffer;
use zerotier_utils::thing::Thing;
/// Interface trait to be implemented by code that's using the ZeroTier network hypervisor.
///
/// This is analogous to a C struct full of function pointers to callbacks along with some
/// associated type definitions.
pub trait ApplicationLayer: Sync + Send {
pub trait ApplicationLayer: Sync + Send + 'static {
/// Type for local system sockets.
type LocalSocket: Sync + Send + Hash + PartialEq + Eq + Clone + ToString + Sized + 'static;
@ -56,6 +55,9 @@ pub trait ApplicationLayer: Sync + Send {
/// unbound, etc.
fn local_socket_is_valid(&self, socket: &Self::LocalSocket) -> bool;
/// Check if this node should respond to messages from a given peer at all.
fn should_respond_to(&self, id: &Valid<Identity>) -> bool;
/// Called to send a packet over the physical network (virtual -> physical).
///
/// This sends with UDP-like semantics. It should do whatever best effort it can and return.
@ -130,24 +132,6 @@ pub enum PacketHandlerResult {
/// it could also be implemented for testing or "off label" use of VL1 to carry different protocols.
#[allow(unused)]
pub trait InnerProtocolLayer: Sync + Send {
/// Check if this node should respond to messages from a given peer at all.
///
/// The default implementation always returns true.
fn should_respond_to(&self, id: &Valid<Identity>) -> bool {
true
}
/// Check if this node has any trust relationship with the provided identity.
///
/// This should return true if there is any special trust relationship. It controls things
/// like sharing of detailed P2P connectivity data, which should be limited to peers with
/// some privileged relationship like mutual membership in a network.
///
/// The default implementation always returns true.
fn has_trust_relationship(&self, id: &Valid<Identity>) -> bool {
true
}
/// Handle a packet, returning true if it was handled by the next layer.
///
/// Do not attempt to handle OK or ERROR. Instead implement handle_ok() and handle_error().
@ -155,9 +139,9 @@ pub trait InnerProtocolLayer: Sync + Send {
fn handle_packet<Application: ApplicationLayer + ?Sized>(
&self,
app: &Application,
node: &Node,
source: &Arc<Peer>,
source_path: &Arc<Path>,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
verb: u8,
@ -172,9 +156,9 @@ pub trait InnerProtocolLayer: Sync + Send {
fn handle_error<Application: ApplicationLayer + ?Sized>(
&self,
app: &Application,
node: &Node,
source: &Arc<Peer>,
source_path: &Arc<Path>,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
in_re_verb: u8,
@ -191,9 +175,9 @@ pub trait InnerProtocolLayer: Sync + Send {
fn handle_ok<Application: ApplicationLayer + ?Sized>(
&self,
app: &Application,
node: &Node,
source: &Arc<Peer>,
source_path: &Arc<Path>,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
in_re_verb: u8,
@ -205,12 +189,12 @@ pub trait InnerProtocolLayer: Sync + Send {
}
}
struct RootInfo {
struct RootInfo<Application: ApplicationLayer + ?Sized> {
/// Root sets to which we are a member.
sets: HashMap<String, Verified<RootSet>>,
/// Root peers and their statically defined endpoints (from root sets).
roots: HashMap<Arc<Peer>, Vec<Endpoint>>,
roots: HashMap<Arc<Peer<Application>>, Vec<Endpoint>>,
/// If this node is a root, these are the root sets to which it's a member in binary serialized form.
/// Set to None if this node is not a root, meaning it doesn't appear in any of its root sets.
@ -236,17 +220,15 @@ struct BackgroundTaskIntervals {
whois_queue_retry: IntervalGate<{ WHOIS_RETRY_INTERVAL }>,
}
struct WhoisQueueItem {
v1_proto_waiting_packets: RingBuffer<(Weak<Path>, PooledPacketBuffer), WHOIS_MAX_WAITING_PACKETS>,
struct WhoisQueueItem<Application: ApplicationLayer + ?Sized> {
v1_proto_waiting_packets:
RingBuffer<(Weak<Path<Application::LocalSocket, Application::LocalInterface>>, PooledPacketBuffer), WHOIS_MAX_WAITING_PACKETS>,
last_retry_time: i64,
retry_count: u16,
}
const PATH_MAP_SIZE: usize = std::mem::size_of::<HashMap<[u8; std::mem::size_of::<Endpoint>() + 128], Arc<Path>>>();
type PathMap<LocalSocket> = HashMap<PathKey<'static, 'static, LocalSocket>, Arc<Path>>;
/// A ZeroTier VL1 node that can communicate securely with the ZeroTier peer-to-peer network.
pub struct Node {
pub struct Node<Application: ApplicationLayer + ?Sized> {
/// A random ID generated to identify this particular running instance.
pub instance_id: [u8; 16],
@ -257,27 +239,23 @@ pub struct Node {
intervals: Mutex<BackgroundTaskIntervals>,
/// Canonicalized network paths, held as Weak<> to be automatically cleaned when no longer in use.
paths: RwLock<Thing<PATH_MAP_SIZE>>, // holds a PathMap<> but as a Thing<> to hide ApplicationLayer template parameter
paths: RwLock<HashMap<PathKey<'static, 'static, Application::LocalSocket>, Arc<Path<Application::LocalSocket, Application::LocalInterface>>>>,
/// Peers with which we are currently communicating.
peers: RwLock<HashMap<Address, Arc<Peer>>>,
peers: RwLock<HashMap<Address, Arc<Peer<Application>>>>,
/// This node's trusted roots, sorted in ascending order of quality/preference, and cluster definitions.
roots: RwLock<RootInfo>,
roots: RwLock<RootInfo<Application>>,
/// Current best root.
best_root: RwLock<Option<Arc<Peer>>>,
best_root: RwLock<Option<Arc<Peer<Application>>>>,
/// Queue of identities being looked up.
whois_queue: Mutex<HashMap<Address, WhoisQueueItem>>,
whois_queue: Mutex<HashMap<Address, WhoisQueueItem<Application>>>,
}
impl Node {
pub fn new<Application: ApplicationLayer + ?Sized>(
app: &Application,
auto_generate_identity: bool,
auto_upgrade_identity: bool,
) -> Result<Self, InvalidParameterError> {
impl<Application: ApplicationLayer + ?Sized> Node<Application> {
pub fn new(app: &Application, auto_generate_identity: bool, auto_upgrade_identity: bool) -> Result<Self, InvalidParameterError> {
let mut id = {
let id = app.load_node_identity();
if id.is_none() {
@ -308,7 +286,7 @@ impl Node {
instance_id: random::get_bytes_secure(),
identity: id,
intervals: Mutex::new(BackgroundTaskIntervals::default()),
paths: RwLock::new(Thing::new(PathMap::<Application::LocalSocket>::new())),
paths: RwLock::new(HashMap::new()),
peers: RwLock::new(HashMap::new()),
roots: RwLock::new(RootInfo {
sets: HashMap::new(),
@ -323,7 +301,7 @@ impl Node {
}
#[inline]
pub fn peer(&self, a: Address) -> Option<Arc<Peer>> {
pub fn peer(&self, a: Address) -> Option<Arc<Peer<Application>>> {
self.peers.read().unwrap().get(&a).cloned()
}
@ -334,13 +312,13 @@ impl Node {
/// Get the current "best" root from among this node's trusted roots.
#[inline]
pub fn best_root(&self) -> Option<Arc<Peer>> {
pub fn best_root(&self) -> Option<Arc<Peer<Application>>> {
self.best_root.read().unwrap().clone()
}
/// Check whether a peer is a root according to any root set trusted by this node.
#[inline]
pub fn is_peer_root(&self, peer: &Peer) -> bool {
pub fn is_peer_root(&self, peer: &Peer<Application>) -> bool {
self.roots.read().unwrap().roots.keys().any(|p| p.identity.eq(&peer.identity))
}
@ -391,7 +369,7 @@ impl Node {
self.roots.read().unwrap().sets.values().cloned().map(|s| s.remove_typestate()).collect()
}
pub fn do_background_tasks<Application: ApplicationLayer + ?Sized>(&self, app: &Application) -> Duration {
pub fn do_background_tasks(&self, app: &Application) -> Duration {
const INTERVAL_MS: i64 = 1000;
const INTERVAL: Duration = Duration::from_millis(INTERVAL_MS as u64);
let time_ticks = app.time_ticks();
@ -622,7 +600,7 @@ impl Node {
let mut need_keepalive = Vec::new();
// First check all paths in read mode to avoid blocking the entire node.
for (k, path) in self.paths.read().unwrap().get::<PathMap<Application::LocalSocket>>().iter() {
for (k, path) in self.paths.read().unwrap().iter() {
if app.local_socket_is_valid(k.local_socket()) {
match path.service(time_ticks) {
PathServiceResult::Ok => {}
@ -636,19 +614,13 @@ impl Node {
// Lock in write mode and remove dead paths, doing so piecemeal to again avoid blocking.
for dp in dead_paths.iter() {
self.paths.write().unwrap().get_mut::<PathMap<Application::LocalSocket>>().remove(dp);
self.paths.write().unwrap().remove(dp);
}
// Finally run keepalive sends as a batch.
let keepalive_buf = [time_ticks as u8]; // just an arbitrary byte, no significance
for p in need_keepalive.iter() {
app.wire_send(
&p.endpoint,
Some(p.local_socket::<Application>()),
Some(p.local_interface::<Application>()),
&keepalive_buf,
0,
);
app.wire_send(&p.endpoint, Some(&p.local_socket), Some(&p.local_interface), &keepalive_buf, 0);
}
}
@ -674,7 +646,7 @@ impl Node {
INTERVAL
}
pub fn handle_incoming_physical_packet<Application: ApplicationLayer + ?Sized, Inner: InnerProtocolLayer + ?Sized>(
pub fn handle_incoming_physical_packet<Inner: InnerProtocolLayer + ?Sized>(
&self,
app: &Application,
inner: &Inner,
@ -696,14 +668,7 @@ impl Node {
source_local_interface.to_string()
);
// An 0xff value at byte [8] means this is a ZSSP packet. This is accomplished via the
// backward compatibility hack of always having 0xff at byte [4] of 6-byte session IDs
// and by having 0xffffffffffff be the "nil" session ID for session init packets. ZSSP
// is the new V2 Noise-based forward-secure transport protocol. What follows below this
// is legacy handling of the old v1 protocol.
if packet.u8_at(8).map_or(false, |x| x == 0xff) {
todo!();
}
// TODO: detect inbound ZSSP sessions, handle ZSSP mode.
// Legacy ZeroTier V1 packet handling
if let Ok(fragment_header) = packet.struct_mut_at::<v1::FragmentHeader>(0) {
@ -712,7 +677,7 @@ impl Node {
if dest == self.identity.address {
let fragment_header = &*fragment_header; // discard mut
let path = self.canonical_path::<Application>(source_endpoint, source_local_socket, source_local_interface, time_ticks);
let path = self.canonical_path(source_endpoint, source_local_socket, source_local_interface, time_ticks);
path.log_receive_anything(time_ticks);
if fragment_header.is_fragment() {
@ -827,8 +792,8 @@ impl Node {
if let Some(forward_path) = peer.direct_path() {
app.wire_send(
&forward_path.endpoint,
Some(forward_path.local_socket::<Application>()),
Some(forward_path.local_interface::<Application>()),
Some(&forward_path.local_socket),
Some(&forward_path.local_interface),
packet.as_bytes(),
0,
);
@ -845,11 +810,11 @@ impl Node {
}
/// Enqueue and send a WHOIS query for a given address, adding the supplied packet (if any) to the list to be processed on reply.
fn whois<Application: ApplicationLayer + ?Sized>(
fn whois(
&self,
app: &Application,
address: Address,
waiting_packet: Option<(Weak<Path>, PooledPacketBuffer)>,
waiting_packet: Option<(Weak<Path<Application::LocalSocket, Application::LocalInterface>>, PooledPacketBuffer)>,
time_ticks: i64,
) {
{
@ -873,7 +838,7 @@ impl Node {
}
/// Send a WHOIS query to the current best root.
fn send_whois<Application: ApplicationLayer + ?Sized>(&self, app: &Application, mut addresses: &[Address], time_ticks: i64) {
fn send_whois(&self, app: &Application, mut addresses: &[Address], time_ticks: i64) {
debug_assert!(!addresses.is_empty());
debug_event!(app, "[vl1] [v1] sending WHOIS for {}", {
let mut tmp = String::new();
@ -905,7 +870,7 @@ impl Node {
}
/// Called by Peer when an identity is received from another node, e.g. via OK(WHOIS).
pub(crate) fn handle_incoming_identity<Application: ApplicationLayer + ?Sized, Inner: InnerProtocolLayer + ?Sized>(
pub(crate) fn handle_incoming_identity<Inner: InnerProtocolLayer + ?Sized>(
&self,
app: &Application,
inner: &Inner,
@ -918,7 +883,7 @@ impl Node {
let mut whois_queue = self.whois_queue.lock().unwrap();
if let Some(qi) = whois_queue.get_mut(&received_identity.address) {
let address = received_identity.address;
if inner.should_respond_to(&received_identity) {
if app.should_respond_to(&received_identity) {
let mut peers = self.peers.write().unwrap();
if let Some(peer) = peers.get(&address).cloned().or_else(|| {
Peer::new(&self.identity, received_identity, time_ticks)
@ -957,31 +922,23 @@ impl Node {
}
/// Get the canonical Path object corresponding to an endpoint.
pub(crate) fn canonical_path<Application: ApplicationLayer + ?Sized>(
pub(crate) fn canonical_path(
&self,
ep: &Endpoint,
local_socket: &Application::LocalSocket,
local_interface: &Application::LocalInterface,
time_ticks: i64,
) -> Arc<Path> {
) -> Arc<Path<Application::LocalSocket, Application::LocalInterface>> {
let paths = self.paths.read().unwrap();
if let Some(path) = paths.get::<PathMap<Application::LocalSocket>>().get(&PathKey::Ref(ep, local_socket)) {
if let Some(path) = paths.get(&PathKey::Ref(ep, local_socket)) {
path.clone()
} else {
drop(paths);
self.paths
.write()
.unwrap()
.get_mut::<PathMap<Application::LocalSocket>>()
.entry(PathKey::Copied(ep.clone(), local_socket.clone()))
.or_insert_with(|| {
Arc::new(Path::new::<Application>(
ep.clone(),
local_socket.clone(),
local_interface.clone(),
time_ticks,
))
})
.or_insert_with(|| Arc::new(Path::new(ep.clone(), local_socket.clone(), local_interface.clone(), time_ticks)))
.clone()
}
}

View file

@ -7,10 +7,8 @@ use std::sync::Mutex;
use crate::protocol;
use crate::vl1::endpoint::Endpoint;
use crate::vl1::node::*;
use zerotier_crypto::random;
use zerotier_utils::thing::Thing;
use zerotier_utils::NEVER_HAPPENED_TICKS;
pub(crate) const SERVICE_INTERVAL_MS: i64 = protocol::PATH_KEEPALIVE_INTERVAL;
@ -26,27 +24,22 @@ pub(crate) enum PathServiceResult {
/// These are maintained in Node and canonicalized so that all unique paths have
/// one and only one unique path object. That enables statistics to be tracked
/// for them and uniform application of things like keepalives.
pub struct Path {
pub struct Path<LocalSocket, LocalInterface> {
pub endpoint: Endpoint,
local_socket: Thing<64>,
local_interface: Thing<64>,
pub local_socket: LocalSocket,
pub local_interface: LocalInterface,
last_send_time_ticks: AtomicI64,
last_receive_time_ticks: AtomicI64,
create_time_ticks: i64,
fragmented_packets: Mutex<HashMap<u64, protocol::v1::FragmentedPacket, PacketIdHasher>>,
}
impl Path {
pub(crate) fn new<Application: ApplicationLayer + ?Sized>(
endpoint: Endpoint,
local_socket: Application::LocalSocket,
local_interface: Application::LocalInterface,
time_ticks: i64,
) -> Self {
impl<LocalSocket, LocalInterface> Path<LocalSocket, LocalInterface> {
pub(crate) fn new(endpoint: Endpoint, local_socket: LocalSocket, local_interface: LocalInterface, time_ticks: i64) -> Self {
Self {
endpoint,
local_socket: Thing::new(local_socket), // enlarge Thing<> if this panics
local_interface: Thing::new(local_interface), // enlarge Thing<> if this panics
local_socket,
local_interface,
last_send_time_ticks: AtomicI64::new(NEVER_HAPPENED_TICKS),
last_receive_time_ticks: AtomicI64::new(NEVER_HAPPENED_TICKS),
create_time_ticks: time_ticks,
@ -54,16 +47,6 @@ impl Path {
}
}
#[inline(always)]
pub(crate) fn local_socket<Application: ApplicationLayer + ?Sized>(&self) -> &Application::LocalSocket {
self.local_socket.get()
}
#[inline(always)]
pub(crate) fn local_interface<Application: ApplicationLayer + ?Sized>(&self) -> &Application::LocalInterface {
self.local_interface.get()
}
/// Receive a fragment and return a FragmentedPacket if the entire packet was assembled.
/// This returns None if more fragments are needed to assemble the packet.
pub(crate) fn v1_proto_receive_fragment(

View file

@ -24,11 +24,11 @@ use crate::{VERSION_MAJOR, VERSION_MINOR, VERSION_REVISION};
pub(crate) const SERVICE_INTERVAL_MS: i64 = 10000;
pub struct Peer {
pub struct Peer<Application: ApplicationLayer + ?Sized> {
pub identity: Valid<Identity>,
v1_proto_static_secret: v1::SymmetricSecret,
paths: Mutex<Vec<PeerPath>>,
paths: Mutex<Vec<PeerPath<Application>>>,
pub(crate) last_send_time_ticks: AtomicI64,
pub(crate) last_receive_time_ticks: AtomicI64,
@ -41,8 +41,8 @@ pub struct Peer {
remote_node_info: RwLock<RemoteNodeInfo>,
}
struct PeerPath {
path: Weak<Path>,
struct PeerPath<Application: ApplicationLayer + ?Sized> {
path: Weak<Path<Application::LocalSocket, Application::LocalInterface>>,
last_receive_time_ticks: i64,
}
@ -53,11 +53,11 @@ struct RemoteNodeInfo {
}
/// Sort a list of paths by quality or priority, with best paths first.
fn prioritize_paths<Application: ApplicationLayer + ?Sized>(paths: &mut Vec<PeerPath>) {
fn prioritize_paths<Application: ApplicationLayer + ?Sized>(paths: &mut Vec<PeerPath<Application>>) {
paths.sort_unstable_by(|a, b| a.last_receive_time_ticks.cmp(&b.last_receive_time_ticks).reverse());
}
impl Peer {
impl<Application: ApplicationLayer + ?Sized> Peer<Application> {
/// Create a new peer.
///
/// This only returns None if this_node_identity does not have its secrets or if some
@ -113,7 +113,7 @@ impl Peer {
/// Get current best path or None if there are no direct paths to this peer.
#[inline]
pub fn direct_path(&self) -> Option<Arc<Path>> {
pub fn direct_path(&self) -> Option<Arc<Path<Application::LocalSocket, Application::LocalInterface>>> {
for p in self.paths.lock().unwrap().iter() {
let pp = p.path.upgrade();
if pp.is_some() {
@ -125,7 +125,7 @@ impl Peer {
/// Get either the current best direct path or an indirect path via e.g. a root.
#[inline]
pub fn path(&self, node: &Node) -> Option<Arc<Path>> {
pub fn path(&self, node: &Node<Application>) -> Option<Arc<Path<Application::LocalSocket, Application::LocalInterface>>> {
let direct_path = self.direct_path();
if direct_path.is_some() {
return direct_path;
@ -136,7 +136,7 @@ impl Peer {
return None;
}
fn learn_path<Application: ApplicationLayer + ?Sized>(&self, app: &Application, new_path: &Arc<Path>, time_ticks: i64) {
fn learn_path(&self, app: &Application, new_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>, time_ticks: i64) {
let mut paths = self.paths.lock().unwrap();
// TODO: check path filter
@ -202,7 +202,7 @@ impl Peer {
}
/// Called every SERVICE_INTERVAL_MS by the background service loop in Node.
pub(crate) fn service<Application: ApplicationLayer + ?Sized>(&self, _: &Application, _: &Node, time_ticks: i64) -> bool {
pub(crate) fn service(&self, _: &Application, _: &Node<Application>, time_ticks: i64) -> bool {
// Prune dead paths and sort in descending order of quality.
{
let mut paths = self.paths.lock().unwrap();
@ -223,7 +223,7 @@ impl Peer {
}
/// Send a prepared and encrypted packet using the V1 protocol with fragmentation if needed.
fn v1_proto_internal_send<Application: ApplicationLayer + ?Sized>(
fn v1_proto_internal_send(
&self,
app: &Application,
endpoint: &Endpoint,
@ -281,11 +281,11 @@ impl Peer {
/// The builder function must append the verb (with any verb flags) and packet payload. If it returns
/// an error, the error is returned immediately and the send is aborted. None is returned if the send
/// function itself fails for some reason such as no paths being available.
pub fn send<Application: ApplicationLayer + ?Sized, R, E, BuilderFunction: FnOnce(&mut PacketBuffer) -> Result<R, E>>(
pub fn send<R, E, BuilderFunction: FnOnce(&mut PacketBuffer) -> Result<R, E>>(
&self,
app: &Application,
node: &Node,
path: Option<&Arc<Path>>,
node: &Node<Application>,
path: Option<&Arc<Path<Application::LocalSocket, Application::LocalInterface>>>,
time_ticks: i64,
builder_function: BuilderFunction,
) -> Option<Result<R, E>> {
@ -369,8 +369,8 @@ impl Peer {
self.v1_proto_internal_send(
app,
&path.endpoint,
Some(path.local_socket::<Application>()),
Some(path.local_interface::<Application>()),
Some(&path.local_socket),
Some(&path.local_interface),
max_fragment_size,
packet,
);
@ -385,16 +385,7 @@ impl Peer {
///
/// If explicit_endpoint is not None the packet will be sent directly to this endpoint.
/// Otherwise it will be sent via the best direct or indirect path known.
///
/// Unlike other messages HELLO is sent partially in the clear and always with the long-lived
/// static identity key. Authentication in old versions is via Poly1305 and in new versions
/// via HMAC-SHA512.
pub(crate) fn send_hello<Application: ApplicationLayer + ?Sized>(
&self,
app: &Application,
node: &Node,
explicit_endpoint: Option<&Endpoint>,
) -> bool {
pub(crate) fn send_hello(&self, app: &Application, node: &Node<Application>, explicit_endpoint: Option<&Endpoint>) -> bool {
let mut path = None;
let destination = if let Some(explicit_endpoint) = explicit_endpoint {
explicit_endpoint
@ -420,7 +411,7 @@ impl Peer {
f.0.dest = self.identity.address.to_bytes();
f.0.src = node.identity.address.to_bytes();
f.0.flags_cipher_hops = v1::CIPHER_NOCRYPT_POLY1305;
f.1.verb = message_type::VL1_HELLO | v1::VERB_FLAG_EXTENDED_AUTHENTICATION;
f.1.verb = message_type::VL1_HELLO;
f.1.version_proto = PROTOCOL_VERSION;
f.1.version_major = VERSION_MAJOR;
f.1.version_minor = VERSION_MINOR;
@ -454,8 +445,8 @@ impl Peer {
self.v1_proto_internal_send(
app,
destination,
Some(p.local_socket::<Application>()),
Some(p.local_interface::<Application>()),
Some(&p.local_socket),
Some(&p.local_interface),
max_fragment_size,
packet,
);
@ -473,13 +464,13 @@ impl Peer {
/// those fragments after the main packet header and first chunk.
///
/// This returns true if the packet decrypted and passed authentication.
pub(crate) fn v1_proto_receive<Application: ApplicationLayer + ?Sized, Inner: InnerProtocolLayer + ?Sized>(
pub(crate) fn v1_proto_receive<Inner: InnerProtocolLayer + ?Sized>(
self: &Arc<Self>,
node: &Node,
node: &Node<Application>,
app: &Application,
inner: &Inner,
time_ticks: i64,
source_path: &Arc<Path>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
packet_header: &v1::PacketHeader,
frag0: &PacketBuffer,
fragments: &[Option<PooledPacketBuffer>],
@ -503,7 +494,7 @@ impl Peer {
};
if let Ok(mut verb) = payload.u8_at(0) {
if (verb & v1::VERB_FLAG_COMPRESSED) != 0 {
if (verb & MESSAGE_FLAG_COMPRESSED) != 0 {
let mut decompressed_payload = [0u8; v1::SIZE_MAX];
decompressed_payload[0] = verb;
if let Ok(dlen) = lz4_flex::block::decompress_into(&payload.as_bytes()[1..], &mut decompressed_payload[1..]) {
@ -528,7 +519,7 @@ impl Peer {
}
}
verb &= v1::VERB_MASK; // mask off flags
verb &= MESSAGE_TYPE_MASK; // mask off flags
debug_event!(
app,
"[vl1] #{:0>16x} decrypted and authenticated, verb: {} ({:0>2x})",
@ -539,7 +530,7 @@ impl Peer {
return match verb {
message_type::VL1_NOP => PacketHandlerResult::Ok,
message_type::VL1_HELLO => self.handle_incoming_hello(app, inner, node, time_ticks, message_id, source_path, &payload),
message_type::VL1_HELLO => self.handle_incoming_hello(app, node, time_ticks, message_id, source_path, &payload),
message_type::VL1_ERROR => {
self.handle_incoming_error(app, inner, node, time_ticks, source_path, packet_header.hops(), message_id, &payload)
}
@ -554,9 +545,9 @@ impl Peer {
path_is_known,
&payload,
),
message_type::VL1_WHOIS => self.handle_incoming_whois(app, inner, node, time_ticks, message_id, &payload),
message_type::VL1_WHOIS => self.handle_incoming_whois(app, node, time_ticks, message_id, &payload),
message_type::VL1_RENDEZVOUS => self.handle_incoming_rendezvous(app, node, time_ticks, message_id, source_path, &payload),
message_type::VL1_ECHO => self.handle_incoming_echo(app, inner, node, time_ticks, message_id, &payload),
message_type::VL1_ECHO => self.handle_incoming_echo(app, node, time_ticks, message_id, &payload),
message_type::VL1_PUSH_DIRECT_PATHS => self.handle_incoming_push_direct_paths(app, node, time_ticks, source_path, &payload),
message_type::VL1_USER_MESSAGE => self.handle_incoming_user_message(app, node, time_ticks, source_path, &payload),
_ => inner.handle_packet(app, node, self, &source_path, packet_header.hops(), message_id, verb, &payload, 1),
@ -567,17 +558,16 @@ impl Peer {
return PacketHandlerResult::Error;
}
fn handle_incoming_hello<Application: ApplicationLayer + ?Sized, Inner: InnerProtocolLayer + ?Sized>(
fn handle_incoming_hello(
&self,
app: &Application,
inner: &Inner,
node: &Node,
node: &Node<Application>,
time_ticks: i64,
message_id: MessageId,
source_path: &Arc<Path>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
payload: &PacketBuffer,
) -> PacketHandlerResult {
if !(inner.should_respond_to(&self.identity) || node.this_node_is_root() || node.is_peer_root(self)) {
if !(app.should_respond_to(&self.identity) || node.this_node_is_root() || node.is_peer_root(self)) {
debug_event!(
app,
"[vl1] dropping HELLO from {} due to lack of trust relationship",
@ -621,13 +611,13 @@ impl Peer {
return PacketHandlerResult::Error;
}
fn handle_incoming_error<Application: ApplicationLayer + ?Sized, Inner: InnerProtocolLayer + ?Sized>(
fn handle_incoming_error<Inner: InnerProtocolLayer + ?Sized>(
self: &Arc<Self>,
app: &Application,
inner: &Inner,
node: &Node,
node: &Node<Application>,
_time_ticks: i64,
source_path: &Arc<Path>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
payload: &PacketBuffer,
@ -659,13 +649,13 @@ impl Peer {
return PacketHandlerResult::Error;
}
fn handle_incoming_ok<Application: ApplicationLayer + ?Sized, Inner: InnerProtocolLayer + ?Sized>(
fn handle_incoming_ok<Inner: InnerProtocolLayer + ?Sized>(
self: &Arc<Self>,
app: &Application,
inner: &Inner,
node: &Node,
node: &Node<Application>,
time_ticks: i64,
source_path: &Arc<Path>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
path_is_known: bool,
@ -761,16 +751,15 @@ impl Peer {
return PacketHandlerResult::Error;
}
fn handle_incoming_whois<Application: ApplicationLayer + ?Sized, Inner: InnerProtocolLayer + ?Sized>(
fn handle_incoming_whois(
self: &Arc<Self>,
app: &Application,
inner: &Inner,
node: &Node,
node: &Node<Application>,
time_ticks: i64,
_message_id: MessageId,
payload: &PacketBuffer,
) -> PacketHandlerResult {
if node.this_node_is_root() || inner.should_respond_to(&self.identity) {
if node.this_node_is_root() || app.should_respond_to(&self.identity) {
let mut addresses = payload.as_bytes();
while addresses.len() >= ADDRESS_SIZE {
if !self
@ -794,29 +783,28 @@ impl Peer {
return PacketHandlerResult::Ok;
}
fn handle_incoming_rendezvous<Application: ApplicationLayer + ?Sized>(
fn handle_incoming_rendezvous(
self: &Arc<Self>,
_app: &Application,
node: &Node,
node: &Node<Application>,
_time_ticks: i64,
_message_id: MessageId,
_source_path: &Arc<Path>,
_source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
_payload: &PacketBuffer,
) -> PacketHandlerResult {
if node.is_peer_root(self) {}
return PacketHandlerResult::Ok;
}
fn handle_incoming_echo<Application: ApplicationLayer + ?Sized, Inner: InnerProtocolLayer + ?Sized>(
fn handle_incoming_echo(
&self,
app: &Application,
inner: &Inner,
node: &Node,
node: &Node<Application>,
time_ticks: i64,
message_id: MessageId,
payload: &PacketBuffer,
) -> PacketHandlerResult {
if inner.should_respond_to(&self.identity) || node.is_peer_root(self) {
if app.should_respond_to(&self.identity) || node.is_peer_root(self) {
self.send(app, node, None, time_ticks, |packet| {
let mut f: &mut OkHeader = packet.append_struct_get_mut().unwrap();
f.verb = message_type::VL1_OK;
@ -834,44 +822,44 @@ impl Peer {
return PacketHandlerResult::Ok;
}
fn handle_incoming_push_direct_paths<Application: ApplicationLayer + ?Sized>(
fn handle_incoming_push_direct_paths(
self: &Arc<Self>,
_app: &Application,
_node: &Node,
_node: &Node<Application>,
_time_ticks: i64,
_source_path: &Arc<Path>,
_source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
_payload: &PacketBuffer,
) -> PacketHandlerResult {
PacketHandlerResult::Ok
}
fn handle_incoming_user_message<Application: ApplicationLayer + ?Sized>(
fn handle_incoming_user_message(
self: &Arc<Self>,
_app: &Application,
_node: &Node,
_node: &Node<Application>,
_time_ticks: i64,
_source_path: &Arc<Path>,
_source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
_payload: &PacketBuffer,
) -> PacketHandlerResult {
PacketHandlerResult::Ok
}
}
impl Hash for Peer {
impl<Application: ApplicationLayer + ?Sized> Hash for Peer<Application> {
#[inline(always)]
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
state.write_u64(self.identity.address.into());
}
}
impl PartialEq for Peer {
impl<Application: ApplicationLayer + ?Sized> PartialEq for Peer<Application> {
#[inline(always)]
fn eq(&self, other: &Self) -> bool {
self.identity.fingerprint.eq(&other.identity.fingerprint)
}
}
impl Eq for Peer {}
impl<Application: ApplicationLayer + ?Sized> Eq for Peer<Application> {}
fn v1_proto_try_aead_decrypt(
secret: &v1::SymmetricSecret,
@ -899,7 +887,7 @@ fn v1_proto_try_aead_decrypt(
if cipher == v1::CIPHER_SALSA2012_POLY1305 {
salsa.crypt_in_place(payload.as_bytes_mut());
Some(message_id)
} else if (payload.u8_at(0).unwrap_or(0) & v1::VERB_MASK) == message_type::VL1_HELLO {
} else if (payload.u8_at(0).unwrap_or(0) & MESSAGE_TYPE_MASK) == message_type::VL1_HELLO {
Some(message_id)
} else {
// SECURITY: fail if there is no encryption and the message is not HELLO. No other types are allowed

View file

@ -3,6 +3,7 @@
mod iproute;
mod multicastgroup;
mod networkid;
mod scattergather;
mod switch;
mod topology;

View file

@ -3,19 +3,13 @@ use std::sync::{Arc, Mutex, RwLock};
use crate::protocol;
use crate::protocol::PacketBuffer;
use crate::vl1::{Address, ApplicationLayer, Identity, Node, PacketHandlerResult, Peer, MAC};
use crate::vl1::*;
use crate::vl2::{MulticastGroup, NetworkId};
use zerotier_utils::buffer::OutOfBoundsError;
use zerotier_utils::sync::RMaybeWLockGuard;
/// Handler implementations for VL2_MULTICAST_LIKE and VL2_MULTICAST_GATHER.
///
/// Both controllers and roots will want to handle these, with the latter supporting them for legacy
/// reasons only. Regular nodes may also want to handle them in the future. So, break this out to allow
/// easy code reuse. To integrate call the appropriate method when the appropriate message type is
/// received and pass in a function to check whether specific network/identity combinations should be
/// processed. The GATHER implementation will send reply packets to the source peer.
pub struct MulticastAuthority {
subscriptions: RwLock<HashMap<(NetworkId, MulticastGroup), Mutex<HashMap<Address, i64>>>>,
}
@ -47,11 +41,11 @@ impl MulticastAuthority {
}
/// Call for VL2_MULTICAST_LIKE packets.
pub fn handle_vl2_multicast_like<Authenticator: Fn(NetworkId, &Identity) -> bool>(
pub fn handle_vl2_multicast_like<Application: ApplicationLayer + ?Sized, Authenticator: Fn(NetworkId, &Identity) -> bool>(
&self,
auth: Authenticator,
time_ticks: i64,
source: &Arc<Peer>,
source: &Arc<Peer<Application>>,
payload: &PacketBuffer,
mut cursor: usize,
) -> PacketHandlerResult {
@ -84,13 +78,13 @@ impl MulticastAuthority {
}
/// Call for VL2_MULTICAST_GATHER packets.
pub fn handle_vl2_multicast_gather<HostSystemImpl: ApplicationLayer + ?Sized, Authenticator: Fn(NetworkId, &Identity) -> bool>(
pub fn handle_vl2_multicast_gather<Application: ApplicationLayer + ?Sized, Authenticator: Fn(NetworkId, &Identity) -> bool>(
&self,
auth: Authenticator,
time_ticks: i64,
host_system: &HostSystemImpl,
node: &Node,
source: &Arc<Peer>,
app: &Application,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
message_id: u64,
payload: &PacketBuffer,
mut cursor: usize,
@ -114,7 +108,7 @@ impl MulticastAuthority {
}
while !gathered.is_empty() {
source.send(host_system, node, None, time_ticks, |packet| -> Result<(), OutOfBoundsError> {
source.send(app, node, None, time_ticks, |packet| -> Result<(), OutOfBoundsError> {
let ok_header = packet.append_struct_get_mut::<protocol::OkHeader>()?;
ok_header.verb = protocol::message_type::VL1_OK;
ok_header.in_re_verb = protocol::message_type::VL2_MULTICAST_GATHER;

View file

@ -0,0 +1,234 @@
use std::io::Write;
use fastcdc::v2020;
use zerotier_crypto::hash::{SHA384, SHA384_HASH_SIZE};
use zerotier_utils::error::{InvalidFormatError, InvalidParameterError};
use zerotier_utils::memory::byte_array_chunks_exact;
const MAX_RECURSION_DEPTH: u8 = 64; // sanity limit, object would have to be quite huge to hit this
/// Re-assembler for scattered objects.
pub struct ScatteredObject {
data_chunks: Vec<Vec<u8>>,
need: Vec<u8>,
}
impl ScatteredObject {
/// Create a new assembler to gather an object given its root hash list.
pub fn init(hash_list: Vec<u8>) -> Self {
Self { data_chunks: Vec::new(), need: hash_list }
}
fn gather_recursive<GetChunk: FnMut(&[u8; SHA384_HASH_SIZE]) -> Option<Vec<u8>>>(
hl: &[u8],
new_hl: &mut Vec<u8>,
get_chunk: &mut GetChunk,
have_all_data_chunk_hashes: &mut bool,
depth: u8,
) -> Result<(), InvalidFormatError> {
if (hl.len() % SHA384_HASH_SIZE) != 0 || hl.is_empty() {
return Err(InvalidFormatError);
}
for h in byte_array_chunks_exact::<SHA384_HASH_SIZE>(hl) {
if (h[SHA384_HASH_SIZE - 1] & 0x01) != 0 {
if let Some(chunk) = get_chunk(h) {
if depth < MAX_RECURSION_DEPTH {
Self::gather_recursive(chunk.as_slice(), new_hl, get_chunk, have_all_data_chunk_hashes, depth + 1)?;
continue;
} else {
return Err(InvalidFormatError);
}
}
*have_all_data_chunk_hashes = false;
}
let _ = new_hl.write_all(h);
}
return Ok(());
}
/// Try to assemble this object using the supplied function to request chunks we don't have.
///
/// Once all chunks are retrieved this will return Ok(Some(object)). A return of Ok(None) means there are
/// still missing chunks that couldn't be resolved with the supplied getter. In that case this should be
/// called again once more chunks are fetched. Use need() to get an iterator over chunks that are still
/// outstanding.
///
/// Once a result is returned this assembler object should be discarded. An error return indicates an
/// invalid object due to either invalid chunk data or too many recursions.
pub fn gather<GetChunk: FnMut(&[u8; SHA384_HASH_SIZE]) -> Option<Vec<u8>>>(
&mut self,
mut get_chunk: GetChunk,
) -> Result<Option<Vec<u8>>, InvalidFormatError> {
// First attempt to resolve all chunks of hashes until we have a complete in-order list of all data chunks.
let mut new_need = Vec::with_capacity(self.need.len());
let mut have_all_data_chunk_hashes = true; // set to false if we still need to get chunks of hashes
Self::gather_recursive(self.need.as_slice(), &mut new_need, &mut get_chunk, &mut have_all_data_chunk_hashes, 0)?;
std::mem::swap(&mut self.need, &mut new_need);
// Once we have all data chunks, resolve those until the entire object is re-assembled.
if have_all_data_chunk_hashes {
self.data_chunks.resize(self.need.len() / SHA384_HASH_SIZE, Vec::new());
let mut chunk_no = 0;
let mut missing_chunks = false;
for h in byte_array_chunks_exact::<SHA384_HASH_SIZE>(self.need.as_slice()) {
let dc = self.data_chunks.get_mut(chunk_no).unwrap();
if dc.is_empty() {
debug_assert_eq!(h.len(), SHA384_HASH_SIZE);
if let Some(chunk) = get_chunk(h) {
if !chunk.is_empty() {
*dc = chunk;
} else {
return Err(InvalidFormatError);
}
} else {
missing_chunks = true;
}
}
chunk_no += 1;
}
if !missing_chunks {
let mut obj_size = 0;
for dc in self.data_chunks.iter() {
obj_size += dc.len();
}
let mut obj = Vec::with_capacity(obj_size);
for dc in self.data_chunks.iter() {
let _ = obj.write_all(dc.as_slice());
}
return Ok(Some(obj));
}
}
return Ok(None);
}
/// Get an iterator of hashes currently known to be needed to reassemble this object.
///
/// This list can get longer through the course of object retrival since incoming chunks can
/// be chunks of hashes instead of chunks of data.
pub fn need(&self) -> impl Iterator<Item = &[u8; SHA384_HASH_SIZE]> {
byte_array_chunks_exact::<SHA384_HASH_SIZE>(self.need.as_slice())
}
}
/// Decompose an object into a series of chunks identified by SHA384 hashes.
///
/// This splits the supplied binary object into chunks using the FastCDC2020 content defined chunking
/// algorithm. For each chunk a SHA384 hash is computed and added to a hash list. If the resulting
/// hash list is larger than max_chunk_size it is recurisvely chunked into chunks of hashes. Chunking
/// of the hash list is done deterministically rather than using FastCDC since the record size in these
/// chunks is always a multiple of the hash size.
///
/// The supplied function is called to output each chunk except for the root hash list, which is
/// returned. It's technically possible for the same chunk to be output more than once if there are
/// long runs of identical data in the supplied object. In this case it need only be stored once.
///
/// * `obj` - Blob to decompose
/// * `max_chunk_size` - Maximum size of any chunk including root hash list (minimum allowed: 256)
/// * `store_chunk` - Function that is called to store each chunk other than the root hash list
pub fn scatter<F: FnMut([u8; SHA384_HASH_SIZE], &[u8])>(
obj: &[u8],
max_chunk_size: u32,
mut store_chunk: F,
) -> Result<Vec<u8>, InvalidParameterError> {
if max_chunk_size < 512 {
return Err(InvalidParameterError("max chunk size must be >= 512"));
}
let mut root_hash_list = Vec::with_capacity(max_chunk_size as usize);
for chunk in v2020::FastCDC::new(obj, (max_chunk_size / 4).max(v2020::MINIMUM_MIN), max_chunk_size / 2, max_chunk_size) {
let chunk = &obj[chunk.offset..chunk.offset + chunk.length];
let mut chunk_hash = SHA384::hash(chunk);
chunk_hash[SHA384_HASH_SIZE - 1] &= 0xfe; // chunk of data
let _ = root_hash_list.write_all(&chunk_hash);
store_chunk(chunk_hash, chunk);
}
if root_hash_list.len() > (max_chunk_size as usize) {
let max_hashes_per_chunk = ((max_chunk_size / (SHA384_HASH_SIZE as u32)) * (SHA384_HASH_SIZE as u32)) as usize;
let mut new_root_hash_list = Vec::with_capacity(max_chunk_size as usize);
let mut recursion_depth = 0;
loop {
let mut r = root_hash_list.as_slice();
while !r.is_empty() {
debug_assert_eq!(new_root_hash_list.len() % SHA384_HASH_SIZE, 0);
debug_assert_eq!(r.len() % SHA384_HASH_SIZE, 0);
if (new_root_hash_list.len() + r.len()) <= (max_chunk_size as usize) {
let _ = new_root_hash_list.write_all(r);
break;
} else {
let clen = r.len().min(max_hashes_per_chunk);
let chunk = &r[..clen];
r = &r[clen..];
let mut chunk_hash = SHA384::hash(chunk);
chunk_hash[SHA384_HASH_SIZE - 1] |= 0x01; // chunk of hashes
let _ = new_root_hash_list.write_all(&chunk_hash);
store_chunk(chunk_hash, chunk);
}
}
std::mem::swap(&mut root_hash_list, &mut new_root_hash_list);
if root_hash_list.len() <= (max_chunk_size as usize) {
break;
} else {
new_root_hash_list.clear();
if recursion_depth >= MAX_RECURSION_DEPTH {
return Err(InvalidParameterError("max recursion depth exceeded"));
}
recursion_depth += 1;
}
}
}
return Ok(root_hash_list);
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[test]
fn scatter_gather_random_blobs() {
let mut random_data = Vec::new();
random_data.resize(1024 * 1024 * 8, 0);
zerotier_crypto::random::fill_bytes_secure(random_data.as_mut());
let mut chunks = HashMap::new();
for _ in 0..4 {
chunks.clear();
let test_blob = ((zerotier_crypto::random::xorshift64_random() as usize) % (random_data.len() - 1)) + 1;
let test_blob = &random_data.as_slice()[..test_blob];
let root_hash_list = scatter(test_blob, 1024, |k, v| {
//println!("{}", hex::to_string(&k));
chunks.insert(k, v.to_vec());
})
.unwrap();
let mut assembler = ScatteredObject::init(root_hash_list);
let mut gathered_blob;
loop {
gathered_blob = assembler
.gather(|c| {
if zerotier_crypto::random::xorshift64_random() < (u64::MAX / 8) {
None
} else {
chunks.get(c).cloned()
}
})
.unwrap();
if gathered_blob.is_some() {
break;
}
}
let gathered_blob = gathered_blob.unwrap();
assert!(gathered_blob.eq(test_blob));
}
}
}

View file

@ -11,20 +11,12 @@ pub struct Switch {}
#[allow(unused_variables)]
impl InnerProtocolLayer for Switch {
fn should_respond_to(&self, id: &zerotier_crypto::typestate::Valid<crate::vl1::Identity>) -> bool {
true
}
fn has_trust_relationship(&self, id: &zerotier_crypto::typestate::Valid<crate::vl1::Identity>) -> bool {
true
}
fn handle_packet<Application: ApplicationLayer + ?Sized>(
&self,
app: &Application,
node: &Node,
source: &Arc<Peer>,
source_path: &Arc<Path>,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
verb: u8,
@ -37,9 +29,9 @@ impl InnerProtocolLayer for Switch {
fn handle_error<Application: ApplicationLayer + ?Sized>(
&self,
app: &Application,
node: &Node,
source: &Arc<Peer>,
source_path: &Arc<Path>,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
in_re_verb: u8,
@ -54,9 +46,9 @@ impl InnerProtocolLayer for Switch {
fn handle_ok<Application: ApplicationLayer + ?Sized>(
&self,
app: &Application,
node: &Node,
source: &Arc<Peer>,
source_path: &Arc<Path>,
node: &Node<Application>,
source: &Arc<Peer<Application>>,
source_path: &Arc<Path<Application::LocalSocket, Application::LocalInterface>>,
source_hops: u8,
message_id: u64,
in_re_verb: u8,

View file

@ -20,6 +20,11 @@ pub struct Member<'a> {
pub name: Cow<'a, str>,
}
#[allow(unused)]
pub mod member_flag {
pub const BRIDGING_ALLOWED: u64 = 0x0001;
}
#[derive(Serialize, Deserialize, Eq, PartialEq, Clone)]
pub struct Topology<'a> {
pub timestamp: i64,

20
utils/src/base64.rs Normal file
View file

@ -0,0 +1,20 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c) ZeroTier, Inc.
* https://www.zerotier.com/
*/
const BASE64_URL_SAFE_NO_PAD_ENGINE: base64::engine::fast_portable::FastPortable =
base64::engine::fast_portable::FastPortable::from(&base64::alphabet::URL_SAFE, base64::engine::fast_portable::NO_PAD);
/// Encode base64 using URL-safe alphabet and no padding.
pub fn encode_url_nopad(bytes: &[u8]) -> String {
base64::encode_engine(bytes, &BASE64_URL_SAFE_NO_PAD_ENGINE)
}
/// Decode base64 using URL-safe alphabet and no padding, or None on error.
pub fn decode_url_nopad(b64: &str) -> Option<Vec<u8>> {
base64::decode_engine(b64, &BASE64_URL_SAFE_NO_PAD_ENGINE).ok()
}

View file

@ -14,7 +14,8 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::hex;
/// Fixed size byte array with Serde serializer/deserializer for sizes over 32 elements and hex to_string().
/// Fixed size Serde serializable byte array.
/// This makes it easier to deal with blobs larger than 32 bytes (due to serde array limitations)
#[repr(transparent)]
#[derive(Clone, Eq, PartialEq)]
pub struct Blob<const L: usize>([u8; L]);

View file

@ -27,7 +27,6 @@ impl Display for OutOfBoundsError {
}
impl Debug for OutOfBoundsError {
#[inline(always)]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(self, f)
}

36
utils/src/cast.rs Normal file
View file

@ -0,0 +1,36 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c) ZeroTier, Inc.
* https://www.zerotier.com/
*/
use std::any::TypeId;
use std::mem::size_of;
/// Returns true if two types are in fact the same type.
#[inline(always)]
pub fn same_type<U: 'static, V: 'static>() -> bool {
TypeId::of::<U>() == TypeId::of::<V>() && size_of::<U>() == size_of::<V>()
}
/// Cast a reference if the types are equal, such as from a specific type to a generic that it implements.
#[inline(always)]
pub fn cast_ref<U: 'static, V: 'static>(u: &U) -> Option<&V> {
if same_type::<U, V>() {
Some(unsafe { std::mem::transmute::<&U, &V>(u) })
} else {
None
}
}
/// Cast a reference if the types are equal, such as from a specific type to a generic that it implements.
#[inline(always)]
pub fn cast_mut<U: 'static, V: 'static>(u: &mut U) -> Option<&mut V> {
if same_type::<U, V>() {
Some(unsafe { std::mem::transmute::<&mut U, &mut V>(u) })
} else {
None
}
}

View file

@ -33,36 +33,3 @@ impl<const FREQ: i64> IntervalGate<FREQ> {
}
}
}
/*
/// Boolean rate limiter with atomic semantics.
#[repr(transparent)]
pub struct AtomicIntervalGate<const FREQ: i64>(AtomicI64);
impl<const FREQ: i64> Default for AtomicIntervalGate<FREQ> {
#[inline(always)]
fn default() -> Self {
Self(AtomicI64::new(crate::util::NEVER_HAPPENED_TICKS))
}
}
impl<const FREQ: i64> AtomicIntervalGate<FREQ> {
#[inline(always)]
#[allow(unused)]
pub fn new(initial_ts: i64) -> Self {
Self(AtomicI64::new(initial_ts))
}
#[inline(always)]
#[allow(unused)]
pub fn gate(&self, mut time: i64) -> bool {
let prev_time = self.0.load(Ordering::Acquire);
if (time - prev_time) < FREQ {
false
} else {
self.0.store(time, Ordering::Release);
true
}
}
}
*/

View file

@ -1,102 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c) ZeroTier, Inc.
* https://www.zerotier.com/
*/
use std::mem::{size_of, MaybeUninit};
use std::ptr::copy_nonoverlapping;
use crate::arrayvec::ArrayVec;
/// A fixed sized array of items to be gathered with fast check logic to return when complete.
///
/// This supports a maximum capacity of 64 and will panic if created with a larger value for C.
pub struct GatherArray<T, const C: usize> {
a: [MaybeUninit<T>; C],
have_bits: u64,
have_count: u8,
goal: u8,
}
impl<T, const C: usize> GatherArray<T, C> {
/// Create a new gather array, which must be initialized prior to use.
#[inline(always)]
pub fn new(goal: u8) -> Self {
assert!(C <= 64);
assert!(goal <= (C as u8));
assert_eq!(size_of::<[T; C]>(), size_of::<[MaybeUninit<T>; C]>());
Self {
a: unsafe { MaybeUninit::uninit().assume_init() },
have_bits: 0,
have_count: 0,
goal,
}
}
/// Add an item to the array if we don't have this index anymore, returning complete array if all parts are here.
#[inline(always)]
pub fn add_return_when_satisfied(&mut self, index: u8, value: T) -> Option<ArrayVec<T, C>> {
if index < self.goal {
let mut have = self.have_bits;
let got = 1u64.wrapping_shl(index as u32);
if (have & got) == 0 {
have |= got;
self.have_bits = have;
let count = self.have_count + 1;
self.have_count = count;
let goal = self.goal as usize;
unsafe {
self.a.get_unchecked_mut(index as usize).write(value);
if (self.have_count as usize) == goal {
debug_assert_eq!(0xffffffffffffffffu64.wrapping_shr(64 - goal as u32), have);
let mut tmp = ArrayVec::new();
copy_nonoverlapping(
self.a.as_ptr().cast::<u8>(),
tmp.a.as_mut_ptr().cast::<u8>(),
size_of::<MaybeUninit<T>>() * goal,
);
tmp.s = goal;
self.goal = 0;
return Some(tmp);
}
}
}
}
return None;
}
}
impl<T, const C: usize> Drop for GatherArray<T, C> {
#[inline]
fn drop(&mut self) {
let have = self.have_bits;
for i in 0..self.goal {
if (have & 1u64.wrapping_shl(i as u32)) != 0 {
unsafe { self.a.get_unchecked_mut(i as usize).assume_init_drop() };
}
}
self.goal = 0;
}
}
#[cfg(test)]
mod tests {
use super::GatherArray;
#[test]
fn gather_array() {
for goal in 2u8..64u8 {
let mut m = GatherArray::<u8, 64>::new(goal);
for x in 0..(goal - 1) {
assert!(m.add_return_when_satisfied(x, x).is_none());
}
let r = m.add_return_when_satisfied(goal - 1, goal - 1).unwrap();
for x in 0..goal {
assert_eq!(r.as_ref()[x as usize], x);
}
}
}
}

View file

@ -7,8 +7,10 @@
*/
pub mod arrayvec;
pub mod base64;
pub mod blob;
pub mod buffer;
pub mod cast;
pub mod defer;
pub mod dictionary;
pub mod error;
@ -16,7 +18,6 @@ pub mod error;
pub mod exitcode;
pub mod flatsortedmap;
pub mod gate;
pub mod gatherarray;
pub mod hex;
pub mod io;
pub mod json;
@ -26,9 +27,7 @@ pub mod pool;
#[cfg(feature = "tokio")]
pub mod reaper;
pub mod ringbuffer;
pub mod ringbuffermap;
pub mod sync;
pub mod thing;
pub mod varint;
#[cfg(feature = "tokio")]
@ -38,20 +37,7 @@ pub use tokio;
pub use futures_util;
/// Initial value that should be used for monotonic tick time variables.
pub const NEVER_HAPPENED_TICKS: i64 = 0;
const BASE64_URL_SAFE_NO_PAD_ENGINE: base64::engine::fast_portable::FastPortable =
base64::engine::fast_portable::FastPortable::from(&base64::alphabet::URL_SAFE, base64::engine::fast_portable::NO_PAD);
/// Encode base64 using URL-safe alphabet and no padding.
pub fn base64_encode_url_nopad(bytes: &[u8]) -> String {
base64::encode_engine(bytes, &BASE64_URL_SAFE_NO_PAD_ENGINE)
}
/// Decode base64 using URL-safe alphabet and no padding, or None on error.
pub fn base64_decode_url_nopad(b64: &str) -> Option<Vec<u8>> {
base64::decode_engine(b64, &BASE64_URL_SAFE_NO_PAD_ENGINE).ok()
}
pub const NEVER_HAPPENED_TICKS: i64 = i64::MIN;
/// Get milliseconds since unix epoch.
#[inline]

View file

@ -6,6 +6,9 @@
* https://www.zerotier.com/
*/
// This is a collection of functions that use "unsafe" to do things with memory that should in fact
// be safe. Some of these may eventually get stable standard library replacements.
#[allow(unused_imports)]
use std::mem::{needs_drop, size_of, MaybeUninit};
@ -56,6 +59,23 @@ pub fn load_raw<T: Copy>(src: &[u8]) -> T {
}
}
/// Our version of the not-yet-stable array_chunks method in slice, but only for byte arrays.
#[inline(always)]
pub fn byte_array_chunks_exact<const S: usize>(a: &[u8]) -> impl Iterator<Item = &[u8; S]> {
let mut i = 0;
let l = a.len();
std::iter::from_fn(move || {
let j = i + S;
if j <= l {
let next = unsafe { &*a.as_ptr().add(i).cast() };
i = j;
Some(next)
} else {
None
}
})
}
/// Obtain a view into an array cast as another array.
/// This will panic if the template parameters would result in out of bounds access.
#[inline(always)]

View file

@ -1,265 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* (c) ZeroTier, Inc.
* https://www.zerotier.com/
*/
use std::hash::{Hash, Hasher};
use std::mem::MaybeUninit;
const EMPTY: u16 = 0xffff;
#[inline(always)]
fn xorshift64(mut x: u64) -> u64 {
x ^= x.wrapping_shl(13);
x ^= x.wrapping_shr(7);
x ^= x.wrapping_shl(17);
x
}
struct XorShiftHasher(u64);
impl XorShiftHasher {
#[inline(always)]
fn new(salt: u32) -> Self {
Self(salt as u64)
}
}
impl Hasher for XorShiftHasher {
#[inline(always)]
fn finish(&self) -> u64 {
self.0
}
#[inline(always)]
fn write(&mut self, mut bytes: &[u8]) {
let mut x = self.0;
while bytes.len() >= 8 {
x = xorshift64(x.wrapping_add(u64::from_ne_bytes(unsafe { *bytes.as_ptr().cast::<[u8; 8]>() })));
bytes = &bytes[8..];
}
while bytes.len() >= 4 {
x = xorshift64(x.wrapping_add(u32::from_ne_bytes(unsafe { *bytes.as_ptr().cast::<[u8; 4]>() }) as u64));
bytes = &bytes[4..];
}
for b in bytes.iter() {
x = xorshift64(x.wrapping_add(*b as u64));
}
self.0 = x;
}
#[inline(always)]
fn write_isize(&mut self, i: isize) {
self.0 = xorshift64(self.0.wrapping_add(i as u64));
}
#[inline(always)]
fn write_usize(&mut self, i: usize) {
self.0 = xorshift64(self.0.wrapping_add(i as u64));
}
#[inline(always)]
fn write_i32(&mut self, i: i32) {
self.0 = xorshift64(self.0.wrapping_add(i as u64));
}
#[inline(always)]
fn write_u32(&mut self, i: u32) {
self.0 = xorshift64(self.0.wrapping_add(i as u64));
}
#[inline(always)]
fn write_i64(&mut self, i: i64) {
self.0 = xorshift64(self.0.wrapping_add(i as u64));
}
#[inline(always)]
fn write_u64(&mut self, i: u64) {
self.0 = xorshift64(self.0.wrapping_add(i));
}
}
struct Entry<K: Eq + PartialEq + Hash + Clone, V> {
key: MaybeUninit<K>,
value: MaybeUninit<V>,
bucket: u16, // which bucket is this in? EMPTY for none
next: u16, // next item in bucket's linked list, EMPTY for none
prev: u16, // previous entry to permit deletion of old entries from bucket lists
}
/// A hybrid between a circular buffer and a map.
///
/// The map has a finite capacity. If a new entry is added and there's no more room the oldest
/// entry is removed and overwritten. The same could be achieved by pairing a circular buffer
/// with a HashMap but that would be less efficient. This requires no memory allocations unless
/// the K or V types allocate memory and occupies a fixed amount of memory.
///
/// There is no explicit remove since that would require more complex logic to maintain FIFO
/// ordering for replacement of entries. Old entries just roll off the end.
///
/// This is used for things like defragmenting incoming packets to support multiple fragmented
/// packets in flight. Having no allocations is good to reduce the potential for memory
/// exhaustion attacks.
///
/// The C template parameter is the total capacity while the B parameter is the number of
/// buckets in the hash table. The maximum for both these parameters is 65535. This could be
/// increased by making the index variables larger (e.g. u32 instead of u16).
pub struct RingBufferMap<K: Eq + PartialEq + Hash + Clone, V, const C: usize, const B: usize> {
entries: [Entry<K, V>; C],
salt: u32,
buckets: [u16; B],
entry_ptr: u16,
}
impl<K: Eq + PartialEq + Hash + Clone, V, const C: usize, const B: usize> RingBufferMap<K, V, C, B> {
/// Create a new map with the supplied random salt to perturb the hashing function.
#[inline]
pub fn new(salt: u32) -> Self {
debug_assert!(C <= EMPTY as usize);
debug_assert!(B <= EMPTY as usize);
#[allow(invalid_value)]
let mut tmp: Self = unsafe { MaybeUninit::uninit().assume_init() };
// EMPTY is the maximum value of the indices, which is all 0xff, so this sets all indices to EMPTY.
unsafe { std::ptr::write_bytes(&mut tmp, 0xff, 1) };
tmp.salt = salt;
tmp.entry_ptr = 0;
tmp
}
#[inline]
pub fn get(&self, key: &K) -> Option<&V> {
let mut h = XorShiftHasher::new(self.salt);
key.hash(&mut h);
let mut e = self.buckets[(h.finish() as usize) % B];
while e != EMPTY {
let ee = &self.entries[e as usize];
debug_assert!(ee.bucket != EMPTY);
if unsafe { ee.key.assume_init_ref().eq(key) } {
return Some(unsafe { &ee.value.assume_init_ref() });
}
e = ee.next;
}
return None;
}
/// Get an entry, creating if not present, and return a mutable reference to it.
#[inline]
pub fn get_or_create_mut<CF: FnOnce() -> V>(&mut self, key: &K, create: CF) -> &mut V {
let mut h = XorShiftHasher::new(self.salt);
key.hash(&mut h);
let bucket = (h.finish() as usize) % B;
let mut e = self.buckets[bucket];
while e != EMPTY {
unsafe {
let e_ptr = &mut *self.entries.as_mut_ptr().add(e as usize);
debug_assert!(e_ptr.bucket != EMPTY);
if e_ptr.key.assume_init_ref().eq(key) {
return e_ptr.value.assume_init_mut();
}
e = e_ptr.next;
}
}
return self.internal_add(bucket, key.clone(), create());
}
/// Set a value or create a new entry if not found.
#[inline]
pub fn set(&mut self, key: K, value: V) {
let mut h = XorShiftHasher::new(self.salt);
key.hash(&mut h);
let bucket = (h.finish() as usize) % B;
let mut e = self.buckets[bucket];
while e != EMPTY {
let e_ptr = &mut self.entries[e as usize];
debug_assert!(e_ptr.bucket != EMPTY);
if unsafe { e_ptr.key.assume_init_ref().eq(&key) } {
unsafe { *e_ptr.value.assume_init_mut() = value };
return;
}
e = e_ptr.next;
}
let _ = self.internal_add(bucket, key, value);
}
#[inline]
fn internal_add(&mut self, bucket: usize, key: K, value: V) -> &mut V {
let e = (self.entry_ptr as usize) % C;
self.entry_ptr = self.entry_ptr.wrapping_add(1);
let e_ptr = unsafe { &mut *self.entries.as_mut_ptr().add(e) };
if e_ptr.bucket != EMPTY {
if e_ptr.prev != EMPTY {
self.entries[e_ptr.prev as usize].next = e_ptr.next;
} else {
self.buckets[e_ptr.bucket as usize] = e_ptr.next;
}
unsafe {
e_ptr.key.assume_init_drop();
e_ptr.value.assume_init_drop();
}
}
e_ptr.key.write(key);
e_ptr.value.write(value);
e_ptr.bucket = bucket as u16;
e_ptr.next = self.buckets[bucket];
if e_ptr.next != EMPTY {
self.entries[e_ptr.next as usize].prev = e as u16;
}
self.buckets[bucket] = e as u16;
e_ptr.prev = EMPTY;
unsafe { e_ptr.value.assume_init_mut() }
}
}
impl<K: Eq + PartialEq + Hash + Clone, V, const C: usize, const B: usize> Drop for RingBufferMap<K, V, C, B> {
#[inline]
fn drop(&mut self) {
for e in self.entries.iter_mut() {
if e.bucket != EMPTY {
unsafe {
e.key.assume_init_drop();
e.value.assume_init_drop();
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::RingBufferMap;
#[test]
fn finite_map() {
let mut m = RingBufferMap::<usize, usize, 128, 17>::new(1);
for i in 0..64 {
m.set(i, i);
}
for i in 0..64 {
assert_eq!(*m.get(&i).unwrap(), i);
}
for i in 0..256 {
m.set(i, i);
}
for i in 0..128 {
assert!(m.get(&i).is_none());
}
for i in 128..256 {
assert_eq!(*m.get(&i).unwrap(), i);
}
m.set(1000, 1000);
assert!(m.get(&128).is_none());
assert_eq!(*m.get(&129).unwrap(), 129);
assert_eq!(*m.get(&1000).unwrap(), 1000);
}
}

View file

@ -37,7 +37,7 @@ pub struct VL1Service<Inner: InnerProtocolLayer + ?Sized + 'static> {
vl1_data_storage: Arc<dyn VL1DataStorage>,
inner: Arc<Inner>,
buffer_pool: Arc<PacketBufferPool>,
node_container: Option<Node>, // never None, set in new()
node_container: Option<Node<Self>>, // never None, set in new()
}
struct VL1ServiceMutableState {
@ -79,7 +79,7 @@ impl<Inner: InnerProtocolLayer + ?Sized + 'static> VL1Service<Inner> {
}
#[inline(always)]
pub fn node(&self) -> &Node {
pub fn node(&self) -> &Node<Self> {
debug_assert!(self.node_container.is_some());
unsafe { self.node_container.as_ref().unwrap_unchecked() }
}
@ -216,6 +216,12 @@ impl<Inner: InnerProtocolLayer + ?Sized + 'static> ApplicationLayer for VL1Servi
socket.is_valid()
}
#[inline]
fn should_respond_to(&self, _: &Valid<Identity>) -> bool {
// TODO: provide a way for the user of VL1Service to control this
true
}
#[inline]
fn load_node_identity(&self) -> Option<Valid<Identity>> {
self.vl1_data_storage.load_node_identity()

View file

@ -155,7 +155,6 @@ impl<Application: ApplicationLayer> Context<Application> {
///
/// * `max_incomplete_session_queue_size` - Maximum number of incomplete sessions in negotiation phase
pub fn new(max_incomplete_session_queue_size: usize, default_physical_mtu: usize) -> Self {
zerotier_crypto::init();
Self {
max_incomplete_session_queue_size,
default_physical_mtu: AtomicUsize::new(default_physical_mtu),