mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-05 20:13:44 +02:00
Remove some unused util code.
This commit is contained in:
parent
98e427d21b
commit
99ede32c96
3 changed files with 0 additions and 369 deletions
|
@ -1,102 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
*
|
||||
* (c) ZeroTier, Inc.
|
||||
* https://www.zerotier.com/
|
||||
*/
|
||||
|
||||
use std::mem::{size_of, MaybeUninit};
|
||||
use std::ptr::copy_nonoverlapping;
|
||||
|
||||
use crate::arrayvec::ArrayVec;
|
||||
|
||||
/// A fixed sized array of items to be gathered with fast check logic to return when complete.
|
||||
///
|
||||
/// This supports a maximum capacity of 64 and will panic if created with a larger value for C.
|
||||
pub struct GatherArray<T, const C: usize> {
|
||||
a: [MaybeUninit<T>; C],
|
||||
have_bits: u64,
|
||||
have_count: u8,
|
||||
goal: u8,
|
||||
}
|
||||
|
||||
impl<T, const C: usize> GatherArray<T, C> {
|
||||
/// Create a new gather array, which must be initialized prior to use.
|
||||
#[inline(always)]
|
||||
pub fn new(goal: u8) -> Self {
|
||||
assert!(C <= 64);
|
||||
assert!(goal <= (C as u8));
|
||||
assert_eq!(size_of::<[T; C]>(), size_of::<[MaybeUninit<T>; C]>());
|
||||
Self {
|
||||
a: unsafe { MaybeUninit::uninit().assume_init() },
|
||||
have_bits: 0,
|
||||
have_count: 0,
|
||||
goal,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an item to the array if we don't have this index anymore, returning complete array if all parts are here.
|
||||
#[inline(always)]
|
||||
pub fn add_return_when_satisfied(&mut self, index: u8, value: T) -> Option<ArrayVec<T, C>> {
|
||||
if index < self.goal {
|
||||
let mut have = self.have_bits;
|
||||
let got = 1u64.wrapping_shl(index as u32);
|
||||
if (have & got) == 0 {
|
||||
have |= got;
|
||||
self.have_bits = have;
|
||||
let count = self.have_count + 1;
|
||||
self.have_count = count;
|
||||
let goal = self.goal as usize;
|
||||
unsafe {
|
||||
self.a.get_unchecked_mut(index as usize).write(value);
|
||||
if (self.have_count as usize) == goal {
|
||||
debug_assert_eq!(0xffffffffffffffffu64.wrapping_shr(64 - goal as u32), have);
|
||||
let mut tmp = ArrayVec::new();
|
||||
copy_nonoverlapping(
|
||||
self.a.as_ptr().cast::<u8>(),
|
||||
tmp.a.as_mut_ptr().cast::<u8>(),
|
||||
size_of::<MaybeUninit<T>>() * goal,
|
||||
);
|
||||
tmp.s = goal;
|
||||
self.goal = 0;
|
||||
return Some(tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const C: usize> Drop for GatherArray<T, C> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
let have = self.have_bits;
|
||||
for i in 0..self.goal {
|
||||
if (have & 1u64.wrapping_shl(i as u32)) != 0 {
|
||||
unsafe { self.a.get_unchecked_mut(i as usize).assume_init_drop() };
|
||||
}
|
||||
}
|
||||
self.goal = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::GatherArray;
|
||||
|
||||
#[test]
|
||||
fn gather_array() {
|
||||
for goal in 2u8..64u8 {
|
||||
let mut m = GatherArray::<u8, 64>::new(goal);
|
||||
for x in 0..(goal - 1) {
|
||||
assert!(m.add_return_when_satisfied(x, x).is_none());
|
||||
}
|
||||
let r = m.add_return_when_satisfied(goal - 1, goal - 1).unwrap();
|
||||
for x in 0..goal {
|
||||
assert_eq!(r.as_ref()[x as usize], x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -16,7 +16,6 @@ pub mod error;
|
|||
pub mod exitcode;
|
||||
pub mod flatsortedmap;
|
||||
pub mod gate;
|
||||
pub mod gatherarray;
|
||||
pub mod hex;
|
||||
pub mod io;
|
||||
pub mod json;
|
||||
|
@ -26,7 +25,6 @@ pub mod pool;
|
|||
#[cfg(feature = "tokio")]
|
||||
pub mod reaper;
|
||||
pub mod ringbuffer;
|
||||
pub mod ringbuffermap;
|
||||
pub mod sync;
|
||||
pub mod thing;
|
||||
pub mod varint;
|
||||
|
|
|
@ -1,265 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
*
|
||||
* (c) ZeroTier, Inc.
|
||||
* https://www.zerotier.com/
|
||||
*/
|
||||
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
const EMPTY: u16 = 0xffff;
|
||||
|
||||
#[inline(always)]
|
||||
fn xorshift64(mut x: u64) -> u64 {
|
||||
x ^= x.wrapping_shl(13);
|
||||
x ^= x.wrapping_shr(7);
|
||||
x ^= x.wrapping_shl(17);
|
||||
x
|
||||
}
|
||||
|
||||
struct XorShiftHasher(u64);
|
||||
|
||||
impl XorShiftHasher {
|
||||
#[inline(always)]
|
||||
fn new(salt: u32) -> Self {
|
||||
Self(salt as u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl Hasher for XorShiftHasher {
|
||||
#[inline(always)]
|
||||
fn finish(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn write(&mut self, mut bytes: &[u8]) {
|
||||
let mut x = self.0;
|
||||
while bytes.len() >= 8 {
|
||||
x = xorshift64(x.wrapping_add(u64::from_ne_bytes(unsafe { *bytes.as_ptr().cast::<[u8; 8]>() })));
|
||||
bytes = &bytes[8..];
|
||||
}
|
||||
while bytes.len() >= 4 {
|
||||
x = xorshift64(x.wrapping_add(u32::from_ne_bytes(unsafe { *bytes.as_ptr().cast::<[u8; 4]>() }) as u64));
|
||||
bytes = &bytes[4..];
|
||||
}
|
||||
for b in bytes.iter() {
|
||||
x = xorshift64(x.wrapping_add(*b as u64));
|
||||
}
|
||||
self.0 = x;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn write_isize(&mut self, i: isize) {
|
||||
self.0 = xorshift64(self.0.wrapping_add(i as u64));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn write_usize(&mut self, i: usize) {
|
||||
self.0 = xorshift64(self.0.wrapping_add(i as u64));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn write_i32(&mut self, i: i32) {
|
||||
self.0 = xorshift64(self.0.wrapping_add(i as u64));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn write_u32(&mut self, i: u32) {
|
||||
self.0 = xorshift64(self.0.wrapping_add(i as u64));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn write_i64(&mut self, i: i64) {
|
||||
self.0 = xorshift64(self.0.wrapping_add(i as u64));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn write_u64(&mut self, i: u64) {
|
||||
self.0 = xorshift64(self.0.wrapping_add(i));
|
||||
}
|
||||
}
|
||||
|
||||
struct Entry<K: Eq + PartialEq + Hash + Clone, V> {
|
||||
key: MaybeUninit<K>,
|
||||
value: MaybeUninit<V>,
|
||||
bucket: u16, // which bucket is this in? EMPTY for none
|
||||
next: u16, // next item in bucket's linked list, EMPTY for none
|
||||
prev: u16, // previous entry to permit deletion of old entries from bucket lists
|
||||
}
|
||||
|
||||
/// A hybrid between a circular buffer and a map.
|
||||
///
|
||||
/// The map has a finite capacity. If a new entry is added and there's no more room the oldest
|
||||
/// entry is removed and overwritten. The same could be achieved by pairing a circular buffer
|
||||
/// with a HashMap but that would be less efficient. This requires no memory allocations unless
|
||||
/// the K or V types allocate memory and occupies a fixed amount of memory.
|
||||
///
|
||||
/// There is no explicit remove since that would require more complex logic to maintain FIFO
|
||||
/// ordering for replacement of entries. Old entries just roll off the end.
|
||||
///
|
||||
/// This is used for things like defragmenting incoming packets to support multiple fragmented
|
||||
/// packets in flight. Having no allocations is good to reduce the potential for memory
|
||||
/// exhaustion attacks.
|
||||
///
|
||||
/// The C template parameter is the total capacity while the B parameter is the number of
|
||||
/// buckets in the hash table. The maximum for both these parameters is 65535. This could be
|
||||
/// increased by making the index variables larger (e.g. u32 instead of u16).
|
||||
pub struct RingBufferMap<K: Eq + PartialEq + Hash + Clone, V, const C: usize, const B: usize> {
|
||||
entries: [Entry<K, V>; C],
|
||||
salt: u32,
|
||||
buckets: [u16; B],
|
||||
entry_ptr: u16,
|
||||
}
|
||||
|
||||
impl<K: Eq + PartialEq + Hash + Clone, V, const C: usize, const B: usize> RingBufferMap<K, V, C, B> {
|
||||
/// Create a new map with the supplied random salt to perturb the hashing function.
|
||||
#[inline]
|
||||
pub fn new(salt: u32) -> Self {
|
||||
debug_assert!(C <= EMPTY as usize);
|
||||
debug_assert!(B <= EMPTY as usize);
|
||||
#[allow(invalid_value)]
|
||||
let mut tmp: Self = unsafe { MaybeUninit::uninit().assume_init() };
|
||||
// EMPTY is the maximum value of the indices, which is all 0xff, so this sets all indices to EMPTY.
|
||||
unsafe { std::ptr::write_bytes(&mut tmp, 0xff, 1) };
|
||||
tmp.salt = salt;
|
||||
tmp.entry_ptr = 0;
|
||||
tmp
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, key: &K) -> Option<&V> {
|
||||
let mut h = XorShiftHasher::new(self.salt);
|
||||
key.hash(&mut h);
|
||||
let mut e = self.buckets[(h.finish() as usize) % B];
|
||||
while e != EMPTY {
|
||||
let ee = &self.entries[e as usize];
|
||||
debug_assert!(ee.bucket != EMPTY);
|
||||
if unsafe { ee.key.assume_init_ref().eq(key) } {
|
||||
return Some(unsafe { &ee.value.assume_init_ref() });
|
||||
}
|
||||
e = ee.next;
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
||||
/// Get an entry, creating if not present, and return a mutable reference to it.
|
||||
#[inline]
|
||||
pub fn get_or_create_mut<CF: FnOnce() -> V>(&mut self, key: &K, create: CF) -> &mut V {
|
||||
let mut h = XorShiftHasher::new(self.salt);
|
||||
key.hash(&mut h);
|
||||
let bucket = (h.finish() as usize) % B;
|
||||
|
||||
let mut e = self.buckets[bucket];
|
||||
while e != EMPTY {
|
||||
unsafe {
|
||||
let e_ptr = &mut *self.entries.as_mut_ptr().add(e as usize);
|
||||
debug_assert!(e_ptr.bucket != EMPTY);
|
||||
if e_ptr.key.assume_init_ref().eq(key) {
|
||||
return e_ptr.value.assume_init_mut();
|
||||
}
|
||||
e = e_ptr.next;
|
||||
}
|
||||
}
|
||||
|
||||
return self.internal_add(bucket, key.clone(), create());
|
||||
}
|
||||
|
||||
/// Set a value or create a new entry if not found.
|
||||
#[inline]
|
||||
pub fn set(&mut self, key: K, value: V) {
|
||||
let mut h = XorShiftHasher::new(self.salt);
|
||||
key.hash(&mut h);
|
||||
let bucket = (h.finish() as usize) % B;
|
||||
|
||||
let mut e = self.buckets[bucket];
|
||||
while e != EMPTY {
|
||||
let e_ptr = &mut self.entries[e as usize];
|
||||
debug_assert!(e_ptr.bucket != EMPTY);
|
||||
if unsafe { e_ptr.key.assume_init_ref().eq(&key) } {
|
||||
unsafe { *e_ptr.value.assume_init_mut() = value };
|
||||
return;
|
||||
}
|
||||
e = e_ptr.next;
|
||||
}
|
||||
|
||||
let _ = self.internal_add(bucket, key, value);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn internal_add(&mut self, bucket: usize, key: K, value: V) -> &mut V {
|
||||
let e = (self.entry_ptr as usize) % C;
|
||||
self.entry_ptr = self.entry_ptr.wrapping_add(1);
|
||||
let e_ptr = unsafe { &mut *self.entries.as_mut_ptr().add(e) };
|
||||
|
||||
if e_ptr.bucket != EMPTY {
|
||||
if e_ptr.prev != EMPTY {
|
||||
self.entries[e_ptr.prev as usize].next = e_ptr.next;
|
||||
} else {
|
||||
self.buckets[e_ptr.bucket as usize] = e_ptr.next;
|
||||
}
|
||||
unsafe {
|
||||
e_ptr.key.assume_init_drop();
|
||||
e_ptr.value.assume_init_drop();
|
||||
}
|
||||
}
|
||||
|
||||
e_ptr.key.write(key);
|
||||
e_ptr.value.write(value);
|
||||
e_ptr.bucket = bucket as u16;
|
||||
e_ptr.next = self.buckets[bucket];
|
||||
if e_ptr.next != EMPTY {
|
||||
self.entries[e_ptr.next as usize].prev = e as u16;
|
||||
}
|
||||
self.buckets[bucket] = e as u16;
|
||||
e_ptr.prev = EMPTY;
|
||||
unsafe { e_ptr.value.assume_init_mut() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Eq + PartialEq + Hash + Clone, V, const C: usize, const B: usize> Drop for RingBufferMap<K, V, C, B> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
for e in self.entries.iter_mut() {
|
||||
if e.bucket != EMPTY {
|
||||
unsafe {
|
||||
e.key.assume_init_drop();
|
||||
e.value.assume_init_drop();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::RingBufferMap;
|
||||
|
||||
#[test]
|
||||
fn finite_map() {
|
||||
let mut m = RingBufferMap::<usize, usize, 128, 17>::new(1);
|
||||
for i in 0..64 {
|
||||
m.set(i, i);
|
||||
}
|
||||
for i in 0..64 {
|
||||
assert_eq!(*m.get(&i).unwrap(), i);
|
||||
}
|
||||
|
||||
for i in 0..256 {
|
||||
m.set(i, i);
|
||||
}
|
||||
for i in 0..128 {
|
||||
assert!(m.get(&i).is_none());
|
||||
}
|
||||
for i in 128..256 {
|
||||
assert_eq!(*m.get(&i).unwrap(), i);
|
||||
}
|
||||
|
||||
m.set(1000, 1000);
|
||||
assert!(m.get(&128).is_none());
|
||||
assert_eq!(*m.get(&129).unwrap(), 129);
|
||||
assert_eq!(*m.get(&1000).unwrap(), 1000);
|
||||
}
|
||||
}
|
Loading…
Add table
Reference in a new issue