ran cargo fmt

This commit is contained in:
mamoniot 2023-03-20 15:29:02 -04:00
parent f6540e129a
commit 7ec194a6d1
No known key found for this signature in database
GPG key ID: ADCCDBBE0E3D3B3B
2 changed files with 39 additions and 79 deletions

View file

@ -1,9 +1,12 @@
use std::sync::{Mutex, RwLock, RwLockReadGuard, atomic::{AtomicU32, Ordering, AtomicPtr}};
use std::mem::{self, MaybeUninit, ManuallyDrop};
use std::ptr::{self, NonNull};
use std::marker::PhantomData; use std::marker::PhantomData;
use std::mem::{self, ManuallyDrop, MaybeUninit};
use std::num::NonZeroU64; use std::num::NonZeroU64;
use std::ops::Deref; use std::ops::Deref;
use std::ptr::{self, NonNull};
use std::sync::{
atomic::{AtomicPtr, AtomicU32, Ordering},
Mutex, RwLock, RwLockReadGuard,
};
const DEFAULT_L: usize = 64; const DEFAULT_L: usize = 64;
@ -32,7 +35,7 @@ struct PoolMem<T, const L: usize = DEFAULT_L> {
/// Atomic reference counting is also implemented allowing for exceedingly complex models of shared ownership. Multiple copies of both strong and weak references to the underlying `T` can be generated that are all memory safe and borrow-checked. /// Atomic reference counting is also implemented allowing for exceedingly complex models of shared ownership. Multiple copies of both strong and weak references to the underlying `T` can be generated that are all memory safe and borrow-checked.
/// ///
/// Allocating from a pool results in very little internal and external fragmentation in the global heap, thus saving significant amounts of memory from being used by one's program. Pools also allocate memory significantly faster on average than the global allocator. This specific pool implementation supports guaranteed constant time `alloc` and `free`. /// Allocating from a pool results in very little internal and external fragmentation in the global heap, thus saving significant amounts of memory from being used by one's program. Pools also allocate memory significantly faster on average than the global allocator. This specific pool implementation supports guaranteed constant time `alloc` and `free`.
pub struct Pool<T, const L: usize = DEFAULT_L> (Mutex<(*mut Slot<T>, u64, *mut PoolMem<T, L>, usize)>); pub struct Pool<T, const L: usize = DEFAULT_L>(Mutex<(*mut Slot<T>, u64, *mut PoolMem<T, L>, usize)>);
unsafe impl<T, const L: usize> Send for Pool<T, L> {} unsafe impl<T, const L: usize> Send for Pool<T, L> {}
unsafe impl<T, const L: usize> Sync for Pool<T, L> {} unsafe impl<T, const L: usize> Sync for Pool<T, L> {}
@ -44,10 +47,9 @@ impl<T, const L: usize> Pool<T, L> {
/// A `Pool<T>` cannot be interacted with directly, it requires a `impl StaticPool<T> for Pool<T>` implementation. See the `static_pool!` macro for automatically generated trait implementation. /// A `Pool<T>` cannot be interacted with directly, it requires a `impl StaticPool<T> for Pool<T>` implementation. See the `static_pool!` macro for automatically generated trait implementation.
#[inline] #[inline]
pub const fn new() -> Self { pub const fn new() -> Self {
Pool (Mutex::new((ptr::null_mut(), 1, ptr::null_mut(), usize::MAX))) Pool(Mutex::new((ptr::null_mut(), 1, ptr::null_mut(), usize::MAX)))
} }
#[inline(always)] #[inline(always)]
fn create_arr() -> [MaybeUninit<Slot<T>>; L] { fn create_arr() -> [MaybeUninit<Slot<T>>; L] {
unsafe { MaybeUninit::<[MaybeUninit<Slot<T>>; L]>::uninit().assume_init() } unsafe { MaybeUninit::<[MaybeUninit<Slot<T>>; L]>::uninit().assume_init() }
@ -68,15 +70,12 @@ impl<T, const L: usize> Pool<T, L> {
slot_ptr slot_ptr
} else { } else {
if head_size >= L { if head_size >= L {
let new = Box::leak(Box::new(PoolMem { let new = Box::leak(Box::new(PoolMem { pre: head_arena, mem: Self::create_arr() }));
pre: head_arena,
mem: Self::create_arr(),
}));
head_arena = new; head_arena = new;
head_size = 0; head_size = 0;
} }
let slot = Slot { let slot = Slot {
obj: SlotState {full_obj: ManuallyDrop::new(obj)}, obj: SlotState { full_obj: ManuallyDrop::new(obj) },
free_lock: RwLock::new(()), free_lock: RwLock::new(()),
ref_count: AtomicU32::new(1), ref_count: AtomicU32::new(1),
uid, uid,
@ -122,7 +121,6 @@ impl<T, const L: usize> Drop for Pool<T, L> {
} }
pub trait StaticPool<T, const L: usize = DEFAULT_L> { pub trait StaticPool<T, const L: usize = DEFAULT_L> {
/// Must return a pointer to an instance of a `Pool<T, L>` with a static lifetime. That pointer must be cast to a `*const ()` to make the borrow-checker happy. /// Must return a pointer to an instance of a `Pool<T, L>` with a static lifetime. That pointer must be cast to a `*const ()` to make the borrow-checker happy.
/// ///
/// **Safety**: The returned pointer must have originally been a `&'static Pool<T, L>` reference. So it must have had a matching `T` and `L` and it must have the static lifetime. /// **Safety**: The returned pointer must have originally been a `&'static Pool<T, L>` reference. So it must have had a matching `T` and `L` and it must have the static lifetime.
@ -135,21 +133,23 @@ pub trait StaticPool<T, const L: usize = DEFAULT_L> {
/// ///
/// This `PoolArc` supports the ability to generate weak, non-owning references to the allocated `T`. /// This `PoolArc` supports the ability to generate weak, non-owning references to the allocated `T`.
#[inline(always)] #[inline(always)]
fn alloc(obj: T) -> PoolArc<T, Self, L> where Self: Sized { fn alloc(obj: T) -> PoolArc<T, Self, L>
where
Self: Sized,
{
unsafe { unsafe {
PoolArc { PoolArc {
ptr: (*Self::get_static_pool().cast::<Pool<T, L>>()).alloc_ptr(obj), ptr: (*Self::get_static_pool().cast::<Pool<T, L>>()).alloc_ptr(obj),
_p: PhantomData _p: PhantomData,
} }
} }
} }
} }
/// A multithreading lock guard that prevents another thread from freeing the underlying `T` while it is held. It does not prevent other threads from accessing the underlying `T`. /// A multithreading lock guard that prevents another thread from freeing the underlying `T` while it is held. It does not prevent other threads from accessing the underlying `T`.
/// ///
/// If the same thread that holds this guard attempts to free `T` before dropping the guard, it will deadlock. /// If the same thread that holds this guard attempts to free `T` before dropping the guard, it will deadlock.
pub struct PoolGuard<'a, T> (RwLockReadGuard<'a, ()>, &'a T); pub struct PoolGuard<'a, T>(RwLockReadGuard<'a, ()>, &'a T);
impl<'a, T> Deref for PoolGuard<'a, T> { impl<'a, T> Deref for PoolGuard<'a, T> {
type Target = T; type Target = T;
#[inline] #[inline]
@ -158,7 +158,6 @@ impl<'a, T> Deref for PoolGuard<'a, T> {
} }
} }
/// A rust-style RAII wrapper that drops and frees memory allocated from a pool automatically, the same as an `Arc<T>`. This will run the destructor of `T` in place within the pool before freeing it, correctly maintaining the invariants that the borrow checker and rust compiler expect of generic types. /// A rust-style RAII wrapper that drops and frees memory allocated from a pool automatically, the same as an `Arc<T>`. This will run the destructor of `T` in place within the pool before freeing it, correctly maintaining the invariants that the borrow checker and rust compiler expect of generic types.
pub struct PoolArc<T, OriginPool: StaticPool<T, L>, const L: usize = DEFAULT_L> { pub struct PoolArc<T, OriginPool: StaticPool<T, L>, const L: usize = DEFAULT_L> {
ptr: NonNull<Slot<T>>, ptr: NonNull<Slot<T>>,
@ -179,9 +178,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> PoolArc<T, OriginPool, L>
} }
/// Returns a number that uniquely identifies this allocated `T` within this pool. No other instance of `T` may have this uid. /// Returns a number that uniquely identifies this allocated `T` within this pool. No other instance of `T` may have this uid.
pub fn uid(&self) -> NonZeroU64 { pub fn uid(&self) -> NonZeroU64 {
unsafe { unsafe { NonZeroU64::new_unchecked(self.ptr.as_ref().uid) }
NonZeroU64::new_unchecked(self.ptr.as_ref().uid)
}
} }
} }
unsafe impl<T, OriginPool: StaticPool<T, L>, const L: usize> Send for PoolArc<T, OriginPool, L> where T: Send {} unsafe impl<T, OriginPool: StaticPool<T, L>, const L: usize> Send for PoolArc<T, OriginPool, L> where T: Send {}
@ -191,9 +188,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> Deref for PoolArc<T, Origi
type Target = T; type Target = T;
#[inline] #[inline]
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
unsafe { unsafe { &self.ptr.as_ref().obj.full_obj }
&self.ptr.as_ref().obj.full_obj
}
} }
} }
impl<T, OriginPool: StaticPool<T, L>, const L: usize> Clone for PoolArc<T, OriginPool, L> { impl<T, OriginPool: StaticPool<T, L>, const L: usize> Clone for PoolArc<T, OriginPool, L> {
@ -201,10 +196,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> Clone for PoolArc<T, Origi
unsafe { unsafe {
self.ptr.as_ref().ref_count.fetch_add(1, Ordering::Relaxed); self.ptr.as_ref().ref_count.fetch_add(1, Ordering::Relaxed);
} }
Self { Self { ptr: self.ptr, _p: PhantomData }
ptr: self.ptr,
_p: PhantomData,
}
} }
} }
impl<T, OriginPool: StaticPool<T, L>, const L: usize> Drop for PoolArc<T, OriginPool, L> { impl<T, OriginPool: StaticPool<T, L>, const L: usize> Drop for PoolArc<T, OriginPool, L> {
@ -218,7 +210,6 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> Drop for PoolArc<T, Origin
} }
} }
/// A non-owning reference to a `T` allocated by a pool. This reference has the special property that the underlying `T` can be dropped from the pool while neither making this reference invalid nor leaking the memory of `T`. Instead attempts to `grab` this reference will safely return `None` if the underlying `T` has been freed by any thread. /// A non-owning reference to a `T` allocated by a pool. This reference has the special property that the underlying `T` can be dropped from the pool while neither making this reference invalid nor leaking the memory of `T`. Instead attempts to `grab` this reference will safely return `None` if the underlying `T` has been freed by any thread.
/// ///
/// Due to there thread safety and low overhead a `PoolWeakRef` implements clone and copy. /// Due to there thread safety and low overhead a `PoolWeakRef` implements clone and copy.
@ -252,15 +243,11 @@ unsafe impl<T> Sync for PoolWeakRef<T> where T: Sync {}
impl<T> Clone for PoolWeakRef<T> { impl<T> Clone for PoolWeakRef<T> {
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self { Self { uid: self.uid, ptr: self.ptr }
uid: self.uid,
ptr: self.ptr,
}
} }
} }
impl<T> Copy for PoolWeakRef<T> {} impl<T> Copy for PoolWeakRef<T> {}
pub struct PoolArcSwap<T, OriginPool: StaticPool<T, L>, const L: usize = DEFAULT_L> { pub struct PoolArcSwap<T, OriginPool: StaticPool<T, L>, const L: usize = DEFAULT_L> {
ptr: AtomicPtr<Slot<T>>, ptr: AtomicPtr<Slot<T>>,
reads: AtomicU32, reads: AtomicU32,
@ -289,10 +276,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> PoolArcSwap<T, OriginPool,
} }
mem::forget(arc); mem::forget(arc);
PoolArc { PoolArc { ptr: NonNull::new_unchecked(pre_ptr), _p: self._p }
ptr: NonNull::new_unchecked(pre_ptr),
_p: self._p,
}
} }
} }
@ -302,10 +286,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> PoolArcSwap<T, OriginPool,
let ptr = self.ptr.load(Ordering::Relaxed); let ptr = self.ptr.load(Ordering::Relaxed);
(*ptr).ref_count.fetch_add(1, Ordering::Relaxed); (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
self.reads.fetch_sub(1, Ordering::Release); self.reads.fetch_sub(1, Ordering::Release);
PoolArc { PoolArc { ptr: NonNull::new_unchecked(ptr), _p: self._p }
ptr: NonNull::new_unchecked(ptr),
_p: self._p,
}
} }
} }
} }
@ -317,15 +298,11 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> Drop for PoolArcSwap<T, Or
fn drop(&mut self) { fn drop(&mut self) {
unsafe { unsafe {
let pre = self.ptr.load(Ordering::SeqCst); let pre = self.ptr.load(Ordering::SeqCst);
PoolArc { PoolArc { _p: self._p, ptr: NonNull::new_unchecked(pre) };
_p: self._p,
ptr: NonNull::new_unchecked(pre),
};
} }
} }
} }
pub struct PoolArcSwapRw<T, OriginPool: StaticPool<T, L>, const L: usize = DEFAULT_L> { pub struct PoolArcSwapRw<T, OriginPool: StaticPool<T, L>, const L: usize = DEFAULT_L> {
ptr: RwLock<NonNull<Slot<T>>>, ptr: RwLock<NonNull<Slot<T>>>,
_p: PhantomData<*const OriginPool>, _p: PhantomData<*const OriginPool>,
@ -333,20 +310,14 @@ pub struct PoolArcSwapRw<T, OriginPool: StaticPool<T, L>, const L: usize = DEFAU
impl<T, OriginPool: StaticPool<T, L>, const L: usize> PoolArcSwapRw<T, OriginPool, L> { impl<T, OriginPool: StaticPool<T, L>, const L: usize> PoolArcSwapRw<T, OriginPool, L> {
pub fn new(arc: PoolArc<T, OriginPool, L>) -> Self { pub fn new(arc: PoolArc<T, OriginPool, L>) -> Self {
let ret = Self { let ret = Self { ptr: RwLock::new(arc.ptr), _p: arc._p };
ptr: RwLock::new(arc.ptr),
_p: arc._p,
};
mem::forget(arc); mem::forget(arc);
ret ret
} }
pub fn swap(&self, arc: PoolArc<T, OriginPool, L>) -> PoolArc<T, OriginPool, L> { pub fn swap(&self, arc: PoolArc<T, OriginPool, L>) -> PoolArc<T, OriginPool, L> {
let mut w = self.ptr.write().unwrap(); let mut w = self.ptr.write().unwrap();
let pre = PoolArc { let pre = PoolArc { ptr: *w, _p: self._p };
ptr: *w,
_p: self._p,
};
*w = arc.ptr; *w = arc.ptr;
mem::forget(arc); mem::forget(arc);
pre pre
@ -357,10 +328,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> PoolArcSwapRw<T, OriginPoo
unsafe { unsafe {
r.as_ref().ref_count.fetch_add(1, Ordering::Relaxed); r.as_ref().ref_count.fetch_add(1, Ordering::Relaxed);
} }
let pre = PoolArc { let pre = PoolArc { ptr: *r, _p: self._p };
ptr: *r,
_p: self._p,
};
pre pre
} }
} }
@ -368,10 +336,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> Drop for PoolArcSwapRw<T,
#[inline] #[inline]
fn drop(&mut self) { fn drop(&mut self) {
let w = self.ptr.write().unwrap(); let w = self.ptr.write().unwrap();
PoolArc { PoolArc { ptr: *w, _p: self._p };
ptr: *w,
_p: self._p,
};
} }
} }
unsafe impl<T, OriginPool: StaticPool<T, L>, const L: usize> Send for PoolArcSwapRw<T, OriginPool, L> where T: Send {} unsafe impl<T, OriginPool: StaticPool<T, L>, const L: usize> Send for PoolArcSwapRw<T, OriginPool, L> where T: Send {}
@ -424,9 +389,11 @@ pub use __static_pool__ as static_pool;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::{thread, sync::{Arc, atomic::AtomicU64}};
use super::*; use super::*;
use std::{
sync::{atomic::AtomicU64, Arc},
thread,
};
fn rand(r: &mut u32) -> u32 { fn rand(r: &mut u32) -> u32 {
/* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */ /* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */
@ -436,18 +403,18 @@ mod tests {
*r *r
} }
const fn prob(p: u64) -> u32 { const fn prob(p: u64) -> u32 {
(p*(u32::MAX as u64)/100) as u32 (p * (u32::MAX as u64) / 100) as u32
} }
fn rand_idx<'a, T>(v: &'a [T], r: &mut u32) -> Option<&'a T> { fn rand_idx<'a, T>(v: &'a [T], r: &mut u32) -> Option<&'a T> {
if v.len() > 0 { if v.len() > 0 {
Some(&v[(rand(r) as usize)%v.len()]) Some(&v[(rand(r) as usize) % v.len()])
} else { } else {
None None
} }
} }
fn rand_i<'a, T>(v: &'a [T], r: &mut u32) -> Option<usize> { fn rand_i<'a, T>(v: &'a [T], r: &mut u32) -> Option<usize> {
if v.len() > 0 { if v.len() > 0 {
Some((rand(r) as usize)%v.len()) Some((rand(r) as usize) % v.len())
} else { } else {
None None
} }
@ -461,11 +428,7 @@ mod tests {
impl Item { impl Item {
fn new(r: u32, count: &'static AtomicU64) -> Item { fn new(r: u32, count: &'static AtomicU64) -> Item {
count.fetch_add(1, Ordering::Relaxed); count.fetch_add(1, Ordering::Relaxed);
Item { Item { a: r, count, b: r }
a: r,
count,
b: r,
}
} }
fn check(&self, id: u32) { fn check(&self, id: u32) {
assert_eq!(self.a, self.b); assert_eq!(self.a, self.b);
@ -479,14 +442,13 @@ mod tests {
} }
} }
const POOL_U32_LEN: usize = (5*12)<<2; const POOL_U32_LEN: usize = (5 * 12) << 2;
static_pool!(StaticPool TestPools { static_pool!(StaticPool TestPools {
Pool<u32, POOL_U32_LEN>, Pool<Item> Pool<u32, POOL_U32_LEN>, Pool<Item>
}); });
#[test] #[test]
fn usage() { fn usage() {
let num1 = TestPools::alloc(1u32); let num1 = TestPools::alloc(1u32);
let num2 = TestPools::alloc(2u32); let num2 = TestPools::alloc(2u32);
let num3 = TestPools::alloc(3u32); let num3 = TestPools::alloc(3u32);
@ -503,7 +465,6 @@ mod tests {
} }
#[test] #[test]
fn single_thread() { fn single_thread() {
let mut history = Vec::new(); let mut history = Vec::new();
let num1 = TestPools::alloc(1u32); let num1 = TestPools::alloc(1u32);
@ -516,7 +477,7 @@ mod tests {
history.push(TestPools::alloc(i as u32)); history.push(TestPools::alloc(i as u32));
} }
for i in 0..100 { for i in 0..100 {
let arc = history.remove((i*10)%history.len()); let arc = history.remove((i * 10) % history.len());
assert!(*arc < 1000); assert!(*arc < 1000);
} }
for i in 0..1000 { for i in 0..1000 {
@ -645,7 +606,7 @@ mod tests {
let _a = s.load(); let _a = s.load();
assert_eq!(_a.a, _a.b); assert_eq!(_a.a, _a.b);
} }
} }
})); }));
} }
for j in joins { for j in joins {

View file

@ -6,6 +6,7 @@
* https://www.zerotier.com/ * https://www.zerotier.com/
*/ */
pub mod arc_pool;
pub mod arrayvec; pub mod arrayvec;
pub mod base64; pub mod base64;
pub mod blob; pub mod blob;
@ -29,8 +30,6 @@ pub mod reaper;
pub mod ringbuffer; pub mod ringbuffer;
pub mod sync; pub mod sync;
pub mod varint; pub mod varint;
pub mod arc_pool;
#[cfg(feature = "tokio")] #[cfg(feature = "tokio")]
pub use tokio; pub use tokio;