From 7ec194a6d1a3a824020e8fa1e74569024f30248b Mon Sep 17 00:00:00 2001 From: mamoniot Date: Mon, 20 Mar 2023 15:29:02 -0400 Subject: [PATCH] ran cargo fmt --- utils/src/arc_pool.rs | 115 ++++++++++++++---------------------------- utils/src/lib.rs | 3 +- 2 files changed, 39 insertions(+), 79 deletions(-) diff --git a/utils/src/arc_pool.rs b/utils/src/arc_pool.rs index 1d5e71e1a..d9eb269e5 100644 --- a/utils/src/arc_pool.rs +++ b/utils/src/arc_pool.rs @@ -1,9 +1,12 @@ -use std::sync::{Mutex, RwLock, RwLockReadGuard, atomic::{AtomicU32, Ordering, AtomicPtr}}; -use std::mem::{self, MaybeUninit, ManuallyDrop}; -use std::ptr::{self, NonNull}; use std::marker::PhantomData; +use std::mem::{self, ManuallyDrop, MaybeUninit}; use std::num::NonZeroU64; use std::ops::Deref; +use std::ptr::{self, NonNull}; +use std::sync::{ + atomic::{AtomicPtr, AtomicU32, Ordering}, + Mutex, RwLock, RwLockReadGuard, +}; const DEFAULT_L: usize = 64; @@ -32,7 +35,7 @@ struct PoolMem { /// Atomic reference counting is also implemented allowing for exceedingly complex models of shared ownership. Multiple copies of both strong and weak references to the underlying `T` can be generated that are all memory safe and borrow-checked. /// /// Allocating from a pool results in very little internal and external fragmentation in the global heap, thus saving significant amounts of memory from being used by one's program. Pools also allocate memory significantly faster on average than the global allocator. This specific pool implementation supports guaranteed constant time `alloc` and `free`. -pub struct Pool (Mutex<(*mut Slot, u64, *mut PoolMem, usize)>); +pub struct Pool(Mutex<(*mut Slot, u64, *mut PoolMem, usize)>); unsafe impl Send for Pool {} unsafe impl Sync for Pool {} @@ -44,10 +47,9 @@ impl Pool { /// A `Pool` cannot be interacted with directly, it requires a `impl StaticPool for Pool` implementation. See the `static_pool!` macro for automatically generated trait implementation. #[inline] pub const fn new() -> Self { - Pool (Mutex::new((ptr::null_mut(), 1, ptr::null_mut(), usize::MAX))) + Pool(Mutex::new((ptr::null_mut(), 1, ptr::null_mut(), usize::MAX))) } - #[inline(always)] fn create_arr() -> [MaybeUninit>; L] { unsafe { MaybeUninit::<[MaybeUninit>; L]>::uninit().assume_init() } @@ -68,15 +70,12 @@ impl Pool { slot_ptr } else { if head_size >= L { - let new = Box::leak(Box::new(PoolMem { - pre: head_arena, - mem: Self::create_arr(), - })); + let new = Box::leak(Box::new(PoolMem { pre: head_arena, mem: Self::create_arr() })); head_arena = new; head_size = 0; } let slot = Slot { - obj: SlotState {full_obj: ManuallyDrop::new(obj)}, + obj: SlotState { full_obj: ManuallyDrop::new(obj) }, free_lock: RwLock::new(()), ref_count: AtomicU32::new(1), uid, @@ -122,7 +121,6 @@ impl Drop for Pool { } pub trait StaticPool { - /// Must return a pointer to an instance of a `Pool` with a static lifetime. That pointer must be cast to a `*const ()` to make the borrow-checker happy. /// /// **Safety**: The returned pointer must have originally been a `&'static Pool` reference. So it must have had a matching `T` and `L` and it must have the static lifetime. @@ -135,21 +133,23 @@ pub trait StaticPool { /// /// This `PoolArc` supports the ability to generate weak, non-owning references to the allocated `T`. #[inline(always)] - fn alloc(obj: T) -> PoolArc where Self: Sized { + fn alloc(obj: T) -> PoolArc + where + Self: Sized, + { unsafe { PoolArc { ptr: (*Self::get_static_pool().cast::>()).alloc_ptr(obj), - _p: PhantomData + _p: PhantomData, } } } } - /// A multithreading lock guard that prevents another thread from freeing the underlying `T` while it is held. It does not prevent other threads from accessing the underlying `T`. /// /// If the same thread that holds this guard attempts to free `T` before dropping the guard, it will deadlock. -pub struct PoolGuard<'a, T> (RwLockReadGuard<'a, ()>, &'a T); +pub struct PoolGuard<'a, T>(RwLockReadGuard<'a, ()>, &'a T); impl<'a, T> Deref for PoolGuard<'a, T> { type Target = T; #[inline] @@ -158,7 +158,6 @@ impl<'a, T> Deref for PoolGuard<'a, T> { } } - /// A rust-style RAII wrapper that drops and frees memory allocated from a pool automatically, the same as an `Arc`. This will run the destructor of `T` in place within the pool before freeing it, correctly maintaining the invariants that the borrow checker and rust compiler expect of generic types. pub struct PoolArc, const L: usize = DEFAULT_L> { ptr: NonNull>, @@ -179,9 +178,7 @@ impl, const L: usize> PoolArc } /// Returns a number that uniquely identifies this allocated `T` within this pool. No other instance of `T` may have this uid. pub fn uid(&self) -> NonZeroU64 { - unsafe { - NonZeroU64::new_unchecked(self.ptr.as_ref().uid) - } + unsafe { NonZeroU64::new_unchecked(self.ptr.as_ref().uid) } } } unsafe impl, const L: usize> Send for PoolArc where T: Send {} @@ -191,9 +188,7 @@ impl, const L: usize> Deref for PoolArc &Self::Target { - unsafe { - &self.ptr.as_ref().obj.full_obj - } + unsafe { &self.ptr.as_ref().obj.full_obj } } } impl, const L: usize> Clone for PoolArc { @@ -201,10 +196,7 @@ impl, const L: usize> Clone for PoolArc, const L: usize> Drop for PoolArc { @@ -218,7 +210,6 @@ impl, const L: usize> Drop for PoolArc Sync for PoolWeakRef where T: Sync {} impl Clone for PoolWeakRef { fn clone(&self) -> Self { - Self { - uid: self.uid, - ptr: self.ptr, - } + Self { uid: self.uid, ptr: self.ptr } } } impl Copy for PoolWeakRef {} - pub struct PoolArcSwap, const L: usize = DEFAULT_L> { ptr: AtomicPtr>, reads: AtomicU32, @@ -289,10 +276,7 @@ impl, const L: usize> PoolArcSwap, const L: usize> PoolArcSwap, const L: usize> Drop for PoolArcSwap, const L: usize = DEFAULT_L> { ptr: RwLock>>, _p: PhantomData<*const OriginPool>, @@ -333,20 +310,14 @@ pub struct PoolArcSwapRw, const L: usize = DEFAU impl, const L: usize> PoolArcSwapRw { pub fn new(arc: PoolArc) -> Self { - let ret = Self { - ptr: RwLock::new(arc.ptr), - _p: arc._p, - }; + let ret = Self { ptr: RwLock::new(arc.ptr), _p: arc._p }; mem::forget(arc); ret } pub fn swap(&self, arc: PoolArc) -> PoolArc { let mut w = self.ptr.write().unwrap(); - let pre = PoolArc { - ptr: *w, - _p: self._p, - }; + let pre = PoolArc { ptr: *w, _p: self._p }; *w = arc.ptr; mem::forget(arc); pre @@ -357,10 +328,7 @@ impl, const L: usize> PoolArcSwapRw, const L: usize> Drop for PoolArcSwapRw, const L: usize> Send for PoolArcSwapRw where T: Send {} @@ -424,9 +389,11 @@ pub use __static_pool__ as static_pool; #[cfg(test)] mod tests { - use std::{thread, sync::{Arc, atomic::AtomicU64}}; use super::*; - + use std::{ + sync::{atomic::AtomicU64, Arc}, + thread, + }; fn rand(r: &mut u32) -> u32 { /* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */ @@ -436,18 +403,18 @@ mod tests { *r } const fn prob(p: u64) -> u32 { - (p*(u32::MAX as u64)/100) as u32 + (p * (u32::MAX as u64) / 100) as u32 } fn rand_idx<'a, T>(v: &'a [T], r: &mut u32) -> Option<&'a T> { if v.len() > 0 { - Some(&v[(rand(r) as usize)%v.len()]) + Some(&v[(rand(r) as usize) % v.len()]) } else { None } } fn rand_i<'a, T>(v: &'a [T], r: &mut u32) -> Option { if v.len() > 0 { - Some((rand(r) as usize)%v.len()) + Some((rand(r) as usize) % v.len()) } else { None } @@ -461,11 +428,7 @@ mod tests { impl Item { fn new(r: u32, count: &'static AtomicU64) -> Item { count.fetch_add(1, Ordering::Relaxed); - Item { - a: r, - count, - b: r, - } + Item { a: r, count, b: r } } fn check(&self, id: u32) { assert_eq!(self.a, self.b); @@ -479,14 +442,13 @@ mod tests { } } - const POOL_U32_LEN: usize = (5*12)<<2; + const POOL_U32_LEN: usize = (5 * 12) << 2; static_pool!(StaticPool TestPools { Pool, Pool }); #[test] fn usage() { - let num1 = TestPools::alloc(1u32); let num2 = TestPools::alloc(2u32); let num3 = TestPools::alloc(3u32); @@ -503,7 +465,6 @@ mod tests { } #[test] fn single_thread() { - let mut history = Vec::new(); let num1 = TestPools::alloc(1u32); @@ -516,7 +477,7 @@ mod tests { history.push(TestPools::alloc(i as u32)); } for i in 0..100 { - let arc = history.remove((i*10)%history.len()); + let arc = history.remove((i * 10) % history.len()); assert!(*arc < 1000); } for i in 0..1000 { @@ -645,7 +606,7 @@ mod tests { let _a = s.load(); assert_eq!(_a.a, _a.b); } - } + } })); } for j in joins { diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 30d87df93..a3be5577b 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -6,6 +6,7 @@ * https://www.zerotier.com/ */ +pub mod arc_pool; pub mod arrayvec; pub mod base64; pub mod blob; @@ -29,8 +30,6 @@ pub mod reaper; pub mod ringbuffer; pub mod sync; pub mod varint; -pub mod arc_pool; - #[cfg(feature = "tokio")] pub use tokio;