A bunch of session reworking

This commit is contained in:
Adam Ierymenko 2022-09-02 16:15:45 -04:00
parent ea5abdc3db
commit 614b84ef40
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
14 changed files with 865 additions and 421 deletions

View file

@ -5,6 +5,7 @@ members = [
"network-hypervisor",
"controller",
"system-service",
"utils",
]
[profile.release]

View file

@ -6,6 +6,7 @@ name = "zerotier-core-crypto"
version = "0.1.0"
[dependencies]
zerotier-utils = { path = "../utils" }
ed25519-dalek = {version = "1.0.1", features = ["std", "u64_backend"], default-features = false}
foreign-types = "0.3.1"
lazy_static = "^1"

View file

@ -3,14 +3,12 @@
pub mod aes;
pub mod aes_gmac_siv;
pub mod hash;
pub mod hex;
pub mod kbkdf;
pub mod p384;
pub mod poly1305;
pub mod random;
pub mod salsa;
pub mod secret;
pub mod varint;
pub mod x25519;
pub mod zssp;

View file

@ -215,14 +215,12 @@ mod tests {
use crate::salsa::*;
const SALSA_20_TV0_KEY: [u8; 32] = [
0x0f, 0x62, 0xb5, 0x08, 0x5b, 0xae, 0x01, 0x54, 0xa7, 0xfa, 0x4d, 0xa0, 0xf3, 0x46, 0x99, 0xec, 0x3f, 0x92, 0xe5, 0x38, 0x8b, 0xde, 0x31, 0x84, 0xd7, 0x2a, 0x7d, 0xd0,
0x23, 0x76, 0xc9, 0x1c,
0x0f, 0x62, 0xb5, 0x08, 0x5b, 0xae, 0x01, 0x54, 0xa7, 0xfa, 0x4d, 0xa0, 0xf3, 0x46, 0x99, 0xec, 0x3f, 0x92, 0xe5, 0x38, 0x8b, 0xde, 0x31, 0x84, 0xd7, 0x2a, 0x7d, 0xd0, 0x23, 0x76, 0xc9, 0x1c,
];
const SALSA_20_TV0_IV: [u8; 8] = [0x28, 0x8f, 0xf6, 0x5d, 0xc4, 0x2b, 0x92, 0xf9];
const SALSA_20_TV0_KS: [u8; 64] = [
0x5e, 0x5e, 0x71, 0xf9, 0x01, 0x99, 0x34, 0x03, 0x04, 0xab, 0xb2, 0x2a, 0x37, 0xb6, 0x62, 0x5b, 0xf8, 0x83, 0xfb, 0x89, 0xce, 0x3b, 0x21, 0xf5, 0x4a, 0x10, 0xb8, 0x10,
0x66, 0xef, 0x87, 0xda, 0x30, 0xb7, 0x76, 0x99, 0xaa, 0x73, 0x79, 0xda, 0x59, 0x5c, 0x77, 0xdd, 0x59, 0x54, 0x2d, 0xa2, 0x08, 0xe5, 0x95, 0x4f, 0x89, 0xe4, 0x0e, 0xb7,
0xaa, 0x80, 0xa8, 0x4a, 0x61, 0x76, 0x66, 0x3f,
0x5e, 0x5e, 0x71, 0xf9, 0x01, 0x99, 0x34, 0x03, 0x04, 0xab, 0xb2, 0x2a, 0x37, 0xb6, 0x62, 0x5b, 0xf8, 0x83, 0xfb, 0x89, 0xce, 0x3b, 0x21, 0xf5, 0x4a, 0x10, 0xb8, 0x10, 0x66, 0xef, 0x87, 0xda,
0x30, 0xb7, 0x76, 0x99, 0xaa, 0x73, 0x79, 0xda, 0x59, 0x5c, 0x77, 0xdd, 0x59, 0x54, 0x2d, 0xa2, 0x08, 0xe5, 0x95, 0x4f, 0x89, 0xe4, 0x0e, 0xb7, 0xaa, 0x80, 0xa8, 0x4a, 0x61, 0x76, 0x66, 0x3f,
];
#[test]

File diff suppressed because it is too large Load diff

View file

@ -11,6 +11,7 @@ debug_events = []
[dependencies]
zerotier-core-crypto = { path = "../core-crypto" }
zerotier-utils = { path = "../utils" }
async-trait = "^0"
base64 = "^0"
lz4_flex = { version = "^0", features = ["safe-encode", "safe-decode", "checked-decode"] }

8
utils/Cargo.toml Normal file
View file

@ -0,0 +1,8 @@
[package]
authors = ["ZeroTier, Inc. <contact@zerotier.com>"]
edition = "2021"
license = "MPL-2.0"
name = "zerotier-utils"
version = "0.1.0"
[dependencies]

1
utils/rustfmt.toml Symbolic link
View file

@ -0,0 +1 @@
../rustfmt.toml

106
utils/src/arrayvec.rs Normal file
View file

@ -0,0 +1,106 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
use std::mem::{size_of, MaybeUninit};
use std::ptr::{slice_from_raw_parts, slice_from_raw_parts_mut};
/// A simple vector backed by a static sized array with no memory allocations and no overhead construction.
pub struct ArrayVec<T, const C: usize> {
pub(crate) a: [MaybeUninit<T>; C],
pub(crate) s: usize,
}
impl<T, const C: usize> ArrayVec<T, C> {
#[inline(always)]
pub fn new() -> Self {
assert_eq!(size_of::<[T; C]>(), size_of::<[MaybeUninit<T>; C]>());
Self { a: unsafe { MaybeUninit::uninit().assume_init() }, s: 0 }
}
#[inline(always)]
pub fn push(&mut self, v: T) {
if self.s < C {
let i = self.s;
unsafe { self.a.get_unchecked_mut(i).write(v) };
self.s = i + 1;
} else {
panic!();
}
}
#[inline(always)]
pub fn try_push(&mut self, v: T) -> bool {
if self.s < C {
let i = self.s;
unsafe { self.a.get_unchecked_mut(i).write(v) };
self.s = i + 1;
true
} else {
false
}
}
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.s == 0
}
#[inline(always)]
pub fn len(&self) -> usize {
self.s
}
#[inline(always)]
pub fn pop(&mut self) -> Option<T> {
if self.s > 0 {
let i = self.s - 1;
debug_assert!(i < C);
self.s = i;
Some(unsafe { self.a.get_unchecked(i).assume_init_read() })
} else {
None
}
}
}
impl<T, const C: usize> Drop for ArrayVec<T, C> {
#[inline(always)]
fn drop(&mut self) {
for i in 0..self.s {
unsafe { self.a.get_unchecked_mut(i).assume_init_drop() };
}
}
}
impl<T, const C: usize> AsRef<[T]> for ArrayVec<T, C> {
#[inline(always)]
fn as_ref(&self) -> &[T] {
unsafe { &*slice_from_raw_parts(self.a.as_ptr().cast(), self.s) }
}
}
impl<T, const C: usize> AsMut<[T]> for ArrayVec<T, C> {
#[inline(always)]
fn as_mut(&mut self) -> &mut [T] {
unsafe { &mut *slice_from_raw_parts_mut(self.a.as_mut_ptr().cast(), self.s) }
}
}
#[cfg(test)]
mod tests {
use super::ArrayVec;
#[test]
fn array_vec() {
let mut v = ArrayVec::<usize, 128>::new();
for i in 0..128 {
v.push(i);
}
assert_eq!(v.len(), 128);
assert!(!v.try_push(1000));
assert_eq!(v.len(), 128);
for _ in 0..128 {
assert!(v.pop().is_some());
}
assert!(v.pop().is_none());
}
}

92
utils/src/gatherarray.rs Normal file
View file

@ -0,0 +1,92 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
use std::mem::{size_of, MaybeUninit};
use std::ptr::copy_nonoverlapping;
use crate::arrayvec::ArrayVec;
/// A fixed sized array of items to be gathered with fast check logic to return when complete.
///
/// This supports a maximum capacity of 64 and will panic if created with a larger value for C.
pub struct GatherArray<T, const C: usize> {
a: [MaybeUninit<T>; C],
have_bits: u64,
have_count: u32,
goal: u32,
}
impl<T, const C: usize> GatherArray<T, C> {
/// Create a new gather array, which must be initialized prior to use.
#[inline(always)]
pub fn new(goal: u32) -> Self {
assert!(C <= 64);
assert!(goal <= (C as u32));
assert_eq!(size_of::<[T; C]>(), size_of::<[MaybeUninit<T>; C]>());
Self {
a: unsafe { MaybeUninit::uninit().assume_init() },
have_bits: 0,
have_count: 0,
goal,
}
}
/// Add an item to the array if we don't have this index anymore, returning complete array if all parts are here.
#[inline(always)]
pub fn add(&mut self, index: u32, value: T) -> Option<ArrayVec<T, C>> {
if index < self.goal {
let mut have = self.have_bits;
let got = 1u64.wrapping_shl(index);
if (have & got) == 0 {
have |= got;
self.have_bits = have;
let count = self.have_count + 1;
self.have_count = count;
let goal = self.goal as usize;
unsafe {
self.a.get_unchecked_mut(index as usize).write(value);
if (self.have_count as usize) == goal {
debug_assert_eq!(0xffffffffffffffffu64.wrapping_shr(64 - goal as u32), have);
let mut tmp = ArrayVec::new();
copy_nonoverlapping(self.a.as_ptr().cast::<u8>(), tmp.a.as_mut_ptr().cast::<u8>(), size_of::<MaybeUninit<T>>() * goal);
tmp.s = goal;
self.goal = 0;
return Some(tmp);
}
}
}
}
return None;
}
}
impl<T, const C: usize> Drop for GatherArray<T, C> {
#[inline(always)]
fn drop(&mut self) {
let have = self.have_bits;
for i in 0..self.goal {
if (have & 1u64.wrapping_shl(i)) != 0 {
unsafe { self.a.get_unchecked_mut(i as usize).assume_init_drop() };
}
}
self.goal = 0;
}
}
#[cfg(test)]
mod tests {
use super::GatherArray;
#[test]
fn gather_array() {
for goal in 2..64 {
let mut m = GatherArray::<u32, 64>::new(goal);
for x in 0..(goal - 1) {
assert!(m.add(x, x).is_none());
}
let r = m.add(goal - 1, goal - 1).unwrap();
for x in 0..goal {
assert_eq!(r.as_ref()[x as usize], x);
}
}
}
}

5
utils/src/lib.rs Normal file
View file

@ -0,0 +1,5 @@
pub mod arrayvec;
pub mod gatherarray;
pub mod hex;
pub mod ringbuffermap;
pub mod varint;

255
utils/src/ringbuffermap.rs Normal file
View file

@ -0,0 +1,255 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
use std::hash::{Hash, Hasher};
use std::mem::MaybeUninit;
#[inline(always)]
fn xorshift64(mut x: u64) -> u64 {
x ^= x.wrapping_shl(13);
x ^= x.wrapping_shr(7);
x ^= x.wrapping_shl(17);
x
}
struct XorShiftHasher(u64);
impl XorShiftHasher {
#[inline(always)]
fn new(salt: u32) -> Self {
Self(salt as u64)
}
}
impl Hasher for XorShiftHasher {
#[inline(always)]
fn finish(&self) -> u64 {
self.0
}
#[inline(always)]
fn write(&mut self, mut bytes: &[u8]) {
let mut x = self.0;
while bytes.len() >= 8 {
x = xorshift64(x.wrapping_add(u64::from_ne_bytes(unsafe { *bytes.as_ptr().cast::<[u8; 8]>() })));
bytes = &bytes[8..];
}
while bytes.len() >= 4 {
x = xorshift64(x.wrapping_add(u32::from_ne_bytes(unsafe { *bytes.as_ptr().cast::<[u8; 4]>() }) as u64));
bytes = &bytes[4..];
}
for b in bytes.iter() {
x = xorshift64(x.wrapping_add(*b as u64));
}
self.0 = x;
}
#[inline(always)]
fn write_isize(&mut self, i: isize) {
self.0 = xorshift64(self.0.wrapping_add(i as u64));
}
#[inline(always)]
fn write_usize(&mut self, i: usize) {
self.0 = xorshift64(self.0.wrapping_add(i as u64));
}
#[inline(always)]
fn write_i32(&mut self, i: i32) {
self.0 = xorshift64(self.0.wrapping_add(i as u64));
}
#[inline(always)]
fn write_u32(&mut self, i: u32) {
self.0 = xorshift64(self.0.wrapping_add(i as u64));
}
#[inline(always)]
fn write_i64(&mut self, i: i64) {
self.0 = xorshift64(self.0.wrapping_add(i as u64));
}
#[inline(always)]
fn write_u64(&mut self, i: u64) {
self.0 = xorshift64(self.0.wrapping_add(i));
}
}
struct Entry<K: Eq + PartialEq + Hash + Clone, V> {
key: MaybeUninit<K>,
value: MaybeUninit<V>,
bucket: i32, // which bucket is this in? -1 for none
next: i32, // next item in bucket's linked list, -1 for none
prev: i32, // previous entry to permit deletion of old entries from bucket lists
}
/// A hybrid between a circular buffer and a map.
///
/// The map has a finite capacity. If a new entry is added and there's no more room the oldest
/// entry is removed and overwritten. The same could be achieved by pairing a circular buffer
/// with a HashMap but that would be less efficient. This requires no memory allocations unless
/// the K or V types allocate memory and occupies a fixed amount of memory.
///
/// This is pretty basic and doesn't have a remove function. Old entries just roll off. This
/// only contains what is needed elsewhere in the project.
///
/// The C template parameter is the total capacity while the B parameter is the number of
/// buckets in the hash table.
pub struct RingBufferMap<K: Eq + PartialEq + Hash + Clone, V, const C: usize, const B: usize> {
entries: [Entry<K, V>; C],
buckets: [i32; B],
entry_ptr: u32,
salt: u32,
}
impl<K: Eq + PartialEq + Hash + Clone, V, const C: usize, const B: usize> RingBufferMap<K, V, C, B> {
#[inline]
pub fn new(salt: u32) -> Self {
Self {
entries: std::array::from_fn(|_| Entry::<K, V> {
key: MaybeUninit::uninit(),
value: MaybeUninit::uninit(),
bucket: -1,
next: -1,
prev: -1,
}),
buckets: [-1; B],
entry_ptr: 0,
salt,
}
}
#[inline]
pub fn get(&self, key: &K) -> Option<&V> {
let mut h = XorShiftHasher::new(self.salt);
key.hash(&mut h);
let mut e = self.buckets[(h.finish() as usize) % B];
while e >= 0 {
let ee = &self.entries[e as usize];
debug_assert!(ee.bucket >= 0);
if unsafe { ee.key.assume_init_ref().eq(key) } {
return Some(unsafe { &ee.value.assume_init_ref() });
}
e = ee.next;
}
return None;
}
/// Get an entry, creating if not present.
#[inline]
pub fn get_or_create_mut<CF: FnOnce() -> V>(&mut self, key: &K, create: CF) -> &mut V {
let mut h = XorShiftHasher::new(self.salt);
key.hash(&mut h);
let bucket = (h.finish() as usize) % B;
let mut e = self.buckets[bucket];
while e >= 0 {
unsafe {
let e_ptr = &mut *self.entries.as_mut_ptr().add(e as usize);
debug_assert!(e_ptr.bucket >= 0);
if e_ptr.key.assume_init_ref().eq(key) {
return e_ptr.value.assume_init_mut();
}
e = e_ptr.next;
}
}
return self.internal_add(bucket, key.clone(), create());
}
/// Set a value or create a new entry if not found.
#[inline]
pub fn set(&mut self, key: K, value: V) {
let mut h = XorShiftHasher::new(self.salt);
key.hash(&mut h);
let bucket = (h.finish() as usize) % B;
let mut e = self.buckets[bucket];
while e >= 0 {
let e_ptr = &mut self.entries[e as usize];
debug_assert!(e_ptr.bucket >= 0);
if unsafe { e_ptr.key.assume_init_ref().eq(&key) } {
unsafe { *e_ptr.value.assume_init_mut() = value };
return;
}
e = e_ptr.next;
}
self.internal_add(bucket, key, value);
}
#[inline]
fn internal_add(&mut self, bucket: usize, key: K, value: V) -> &mut V {
let e = (self.entry_ptr as usize) % C;
self.entry_ptr = self.entry_ptr.wrapping_add(1);
let e_ptr = unsafe { &mut *self.entries.as_mut_ptr().add(e) };
if e_ptr.bucket >= 0 {
if e_ptr.prev >= 0 {
self.entries[e_ptr.prev as usize].next = e_ptr.next;
} else {
self.buckets[e_ptr.bucket as usize] = e_ptr.next;
}
unsafe {
e_ptr.key.assume_init_drop();
e_ptr.value.assume_init_drop();
}
}
e_ptr.key.write(key);
e_ptr.value.write(value);
e_ptr.bucket = bucket as i32;
e_ptr.next = self.buckets[bucket];
if e_ptr.next >= 0 {
self.entries[e_ptr.next as usize].prev = e as i32;
}
self.buckets[bucket] = e as i32;
e_ptr.prev = -1;
unsafe { e_ptr.value.assume_init_mut() }
}
}
impl<K: Eq + PartialEq + Hash + Clone, V, const C: usize, const B: usize> Drop for RingBufferMap<K, V, C, B> {
#[inline]
fn drop(&mut self) {
for e in self.entries.iter_mut() {
if e.bucket >= 0 {
unsafe {
e.key.assume_init_drop();
e.value.assume_init_drop();
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::RingBufferMap;
#[test]
fn finite_map() {
let mut m = RingBufferMap::<usize, usize, 128, 17>::new(1);
for i in 0..64 {
m.set(i, i);
}
for i in 0..64 {
assert_eq!(*m.get(&i).unwrap(), i);
}
for i in 0..256 {
m.set(i, i);
}
for i in 0..128 {
assert!(m.get(&i).is_none());
}
for i in 128..256 {
assert_eq!(*m.get(&i).unwrap(), i);
}
m.set(1000, 1000);
assert!(m.get(&128).is_none());
assert_eq!(*m.get(&129).unwrap(), 129);
assert_eq!(*m.get(&1000).unwrap(), 1000);
}
}