ran cargo fmt

This commit is contained in:
mamoniot 2023-03-20 15:29:02 -04:00
parent f6540e129a
commit 7ec194a6d1
No known key found for this signature in database
GPG key ID: ADCCDBBE0E3D3B3B
2 changed files with 39 additions and 79 deletions

View file

@ -1,9 +1,12 @@
use std::sync::{Mutex, RwLock, RwLockReadGuard, atomic::{AtomicU32, Ordering, AtomicPtr}};
use std::mem::{self, MaybeUninit, ManuallyDrop};
use std::ptr::{self, NonNull};
use std::marker::PhantomData;
use std::mem::{self, ManuallyDrop, MaybeUninit};
use std::num::NonZeroU64;
use std::ops::Deref;
use std::ptr::{self, NonNull};
use std::sync::{
atomic::{AtomicPtr, AtomicU32, Ordering},
Mutex, RwLock, RwLockReadGuard,
};
const DEFAULT_L: usize = 64;
@ -47,7 +50,6 @@ impl<T, const L: usize> Pool<T, L> {
Pool(Mutex::new((ptr::null_mut(), 1, ptr::null_mut(), usize::MAX)))
}
#[inline(always)]
fn create_arr() -> [MaybeUninit<Slot<T>>; L] {
unsafe { MaybeUninit::<[MaybeUninit<Slot<T>>; L]>::uninit().assume_init() }
@ -68,10 +70,7 @@ impl<T, const L: usize> Pool<T, L> {
slot_ptr
} else {
if head_size >= L {
let new = Box::leak(Box::new(PoolMem {
pre: head_arena,
mem: Self::create_arr(),
}));
let new = Box::leak(Box::new(PoolMem { pre: head_arena, mem: Self::create_arr() }));
head_arena = new;
head_size = 0;
}
@ -122,7 +121,6 @@ impl<T, const L: usize> Drop for Pool<T, L> {
}
pub trait StaticPool<T, const L: usize = DEFAULT_L> {
/// Must return a pointer to an instance of a `Pool<T, L>` with a static lifetime. That pointer must be cast to a `*const ()` to make the borrow-checker happy.
///
/// **Safety**: The returned pointer must have originally been a `&'static Pool<T, L>` reference. So it must have had a matching `T` and `L` and it must have the static lifetime.
@ -135,17 +133,19 @@ pub trait StaticPool<T, const L: usize = DEFAULT_L> {
///
/// This `PoolArc` supports the ability to generate weak, non-owning references to the allocated `T`.
#[inline(always)]
fn alloc(obj: T) -> PoolArc<T, Self, L> where Self: Sized {
fn alloc(obj: T) -> PoolArc<T, Self, L>
where
Self: Sized,
{
unsafe {
PoolArc {
ptr: (*Self::get_static_pool().cast::<Pool<T, L>>()).alloc_ptr(obj),
_p: PhantomData
_p: PhantomData,
}
}
}
}
/// A multithreading lock guard that prevents another thread from freeing the underlying `T` while it is held. It does not prevent other threads from accessing the underlying `T`.
///
/// If the same thread that holds this guard attempts to free `T` before dropping the guard, it will deadlock.
@ -158,7 +158,6 @@ impl<'a, T> Deref for PoolGuard<'a, T> {
}
}
/// A rust-style RAII wrapper that drops and frees memory allocated from a pool automatically, the same as an `Arc<T>`. This will run the destructor of `T` in place within the pool before freeing it, correctly maintaining the invariants that the borrow checker and rust compiler expect of generic types.
pub struct PoolArc<T, OriginPool: StaticPool<T, L>, const L: usize = DEFAULT_L> {
ptr: NonNull<Slot<T>>,
@ -179,9 +178,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> PoolArc<T, OriginPool, L>
}
/// Returns a number that uniquely identifies this allocated `T` within this pool. No other instance of `T` may have this uid.
pub fn uid(&self) -> NonZeroU64 {
unsafe {
NonZeroU64::new_unchecked(self.ptr.as_ref().uid)
}
unsafe { NonZeroU64::new_unchecked(self.ptr.as_ref().uid) }
}
}
unsafe impl<T, OriginPool: StaticPool<T, L>, const L: usize> Send for PoolArc<T, OriginPool, L> where T: Send {}
@ -191,9 +188,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> Deref for PoolArc<T, Origi
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
unsafe {
&self.ptr.as_ref().obj.full_obj
}
unsafe { &self.ptr.as_ref().obj.full_obj }
}
}
impl<T, OriginPool: StaticPool<T, L>, const L: usize> Clone for PoolArc<T, OriginPool, L> {
@ -201,10 +196,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> Clone for PoolArc<T, Origi
unsafe {
self.ptr.as_ref().ref_count.fetch_add(1, Ordering::Relaxed);
}
Self {
ptr: self.ptr,
_p: PhantomData,
}
Self { ptr: self.ptr, _p: PhantomData }
}
}
impl<T, OriginPool: StaticPool<T, L>, const L: usize> Drop for PoolArc<T, OriginPool, L> {
@ -218,7 +210,6 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> Drop for PoolArc<T, Origin
}
}
/// A non-owning reference to a `T` allocated by a pool. This reference has the special property that the underlying `T` can be dropped from the pool while neither making this reference invalid nor leaking the memory of `T`. Instead attempts to `grab` this reference will safely return `None` if the underlying `T` has been freed by any thread.
///
/// Due to there thread safety and low overhead a `PoolWeakRef` implements clone and copy.
@ -252,15 +243,11 @@ unsafe impl<T> Sync for PoolWeakRef<T> where T: Sync {}
impl<T> Clone for PoolWeakRef<T> {
fn clone(&self) -> Self {
Self {
uid: self.uid,
ptr: self.ptr,
}
Self { uid: self.uid, ptr: self.ptr }
}
}
impl<T> Copy for PoolWeakRef<T> {}
pub struct PoolArcSwap<T, OriginPool: StaticPool<T, L>, const L: usize = DEFAULT_L> {
ptr: AtomicPtr<Slot<T>>,
reads: AtomicU32,
@ -289,10 +276,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> PoolArcSwap<T, OriginPool,
}
mem::forget(arc);
PoolArc {
ptr: NonNull::new_unchecked(pre_ptr),
_p: self._p,
}
PoolArc { ptr: NonNull::new_unchecked(pre_ptr), _p: self._p }
}
}
@ -302,10 +286,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> PoolArcSwap<T, OriginPool,
let ptr = self.ptr.load(Ordering::Relaxed);
(*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
self.reads.fetch_sub(1, Ordering::Release);
PoolArc {
ptr: NonNull::new_unchecked(ptr),
_p: self._p,
}
PoolArc { ptr: NonNull::new_unchecked(ptr), _p: self._p }
}
}
}
@ -317,15 +298,11 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> Drop for PoolArcSwap<T, Or
fn drop(&mut self) {
unsafe {
let pre = self.ptr.load(Ordering::SeqCst);
PoolArc {
_p: self._p,
ptr: NonNull::new_unchecked(pre),
};
PoolArc { _p: self._p, ptr: NonNull::new_unchecked(pre) };
}
}
}
pub struct PoolArcSwapRw<T, OriginPool: StaticPool<T, L>, const L: usize = DEFAULT_L> {
ptr: RwLock<NonNull<Slot<T>>>,
_p: PhantomData<*const OriginPool>,
@ -333,20 +310,14 @@ pub struct PoolArcSwapRw<T, OriginPool: StaticPool<T, L>, const L: usize = DEFAU
impl<T, OriginPool: StaticPool<T, L>, const L: usize> PoolArcSwapRw<T, OriginPool, L> {
pub fn new(arc: PoolArc<T, OriginPool, L>) -> Self {
let ret = Self {
ptr: RwLock::new(arc.ptr),
_p: arc._p,
};
let ret = Self { ptr: RwLock::new(arc.ptr), _p: arc._p };
mem::forget(arc);
ret
}
pub fn swap(&self, arc: PoolArc<T, OriginPool, L>) -> PoolArc<T, OriginPool, L> {
let mut w = self.ptr.write().unwrap();
let pre = PoolArc {
ptr: *w,
_p: self._p,
};
let pre = PoolArc { ptr: *w, _p: self._p };
*w = arc.ptr;
mem::forget(arc);
pre
@ -357,10 +328,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> PoolArcSwapRw<T, OriginPoo
unsafe {
r.as_ref().ref_count.fetch_add(1, Ordering::Relaxed);
}
let pre = PoolArc {
ptr: *r,
_p: self._p,
};
let pre = PoolArc { ptr: *r, _p: self._p };
pre
}
}
@ -368,10 +336,7 @@ impl<T, OriginPool: StaticPool<T, L>, const L: usize> Drop for PoolArcSwapRw<T,
#[inline]
fn drop(&mut self) {
let w = self.ptr.write().unwrap();
PoolArc {
ptr: *w,
_p: self._p,
};
PoolArc { ptr: *w, _p: self._p };
}
}
unsafe impl<T, OriginPool: StaticPool<T, L>, const L: usize> Send for PoolArcSwapRw<T, OriginPool, L> where T: Send {}
@ -424,9 +389,11 @@ pub use __static_pool__ as static_pool;
#[cfg(test)]
mod tests {
use std::{thread, sync::{Arc, atomic::AtomicU64}};
use super::*;
use std::{
sync::{atomic::AtomicU64, Arc},
thread,
};
fn rand(r: &mut u32) -> u32 {
/* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */
@ -461,11 +428,7 @@ mod tests {
impl Item {
fn new(r: u32, count: &'static AtomicU64) -> Item {
count.fetch_add(1, Ordering::Relaxed);
Item {
a: r,
count,
b: r,
}
Item { a: r, count, b: r }
}
fn check(&self, id: u32) {
assert_eq!(self.a, self.b);
@ -486,7 +449,6 @@ mod tests {
#[test]
fn usage() {
let num1 = TestPools::alloc(1u32);
let num2 = TestPools::alloc(2u32);
let num3 = TestPools::alloc(3u32);
@ -503,7 +465,6 @@ mod tests {
}
#[test]
fn single_thread() {
let mut history = Vec::new();
let num1 = TestPools::alloc(1u32);

View file

@ -6,6 +6,7 @@
* https://www.zerotier.com/
*/
pub mod arc_pool;
pub mod arrayvec;
pub mod base64;
pub mod blob;
@ -29,8 +30,6 @@ pub mod reaper;
pub mod ringbuffer;
pub mod sync;
pub mod varint;
pub mod arc_pool;
#[cfg(feature = "tokio")]
pub use tokio;