// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md. use std::ops::{Deref, DerefMut}; use std::ptr::NonNull; use std::sync::{Arc, Weak}; use parking_lot::Mutex; /// Each pool requires a factory that creates and resets (for re-use) pooled objects. pub trait PoolFactory { fn create(&self) -> O; fn reset(&self, obj: &mut O); } #[repr(C)] struct PoolEntry> { obj: O, // must be first return_pool: Weak>, } struct PoolInner> { factory: F, pool: Mutex>>>, } /// Container for pooled objects that have been checked out of the pool. /// /// When this is dropped the object is returned to the pool or if the pool or is /// dropped if the pool has been dropped. There is also an into_raw() and from_raw() /// functionality that allows conversion to/from naked pointers to O for /// interoperation with C/C++ APIs. This in addition to being as slim as possible is /// why we implemented our own pool. /// /// Note that pooled objects are not clonable. If you want to share them use Rc<> /// or Arc<>. #[repr(transparent)] pub struct Pooled>(NonNull>); impl> Pooled { /// Get a raw pointer to the object wrapped by this pooled object container. /// The returned raw pointer MUST be restored into a Pooled instance with /// from_raw() or memory will leak. #[inline(always)] pub unsafe fn into_raw(self) -> *mut O { // Verify that the structure is not padded before 'obj'. assert_eq!( (&self.0.as_ref().obj as *const O).cast::(), (self.0.as_ref() as *const PoolEntry).cast::() ); let ptr = self.0.as_ptr().cast::(); std::mem::forget(self); ptr } /// Restore a raw pointer from into_raw() into a Pooled object. /// The supplied pointer MUST have been obtained from a Pooled object or /// undefined behavior will occur. Pointers from other sources can't be used /// here. None is returned if the pointer is null. #[inline(always)] pub unsafe fn from_raw(raw: *mut O) -> Option { if !raw.is_null() { Some(Self(NonNull::new_unchecked(raw.cast()))) } else { None } } } unsafe impl> Send for Pooled {} unsafe impl> Sync for Pooled where O: Sync {} impl> Deref for Pooled { type Target = O; #[inline(always)] fn deref(&self) -> &Self::Target { unsafe { &self.0.as_ref().obj } } } impl> DerefMut for Pooled { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { unsafe { &mut self.0.as_mut().obj } } } impl> AsRef for Pooled { #[inline(always)] fn as_ref(&self) -> &O { unsafe { &self.0.as_ref().obj } } } impl> AsMut for Pooled { #[inline(always)] fn as_mut(&mut self) -> &mut O { unsafe { &mut self.0.as_mut().obj } } } impl> Drop for Pooled { #[inline(always)] fn drop(&mut self) { let internal = unsafe { self.0.as_mut() }; if let Some(p) = internal.return_pool.upgrade() { p.factory.reset(&mut internal.obj); p.pool.lock().push(self.0); } else { drop(unsafe { Box::from_raw(self.0.as_ptr()) }); } } } /// An object pool for Reusable objects. /// Checked out objects are held by a guard object that returns them when dropped if /// the pool still exists or drops them if the pool has itself been dropped. pub struct Pool>(Arc>); impl> Pool { pub fn new(initial_stack_capacity: usize, factory: F) -> Self { Self(Arc::new(PoolInner:: { factory, pool: Mutex::new(Vec::with_capacity(initial_stack_capacity)), })) } /// Get a pooled object, or allocate one if the pool is empty. #[inline(always)] pub fn get(&self) -> Pooled { if let Some(o) = self.0.pool.lock().pop() { return Pooled::(o); } return Pooled::(unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(PoolEntry:: { obj: self.0.factory.create(), return_pool: Arc::downgrade(&self.0), }))) }); } /// Dispose of all pooled objects, freeing any memory they use. /// /// If get() is called after this new objects will be allocated, and any outstanding /// objects will still be returned on drop unless the pool itself is dropped. This can /// be done to free some memory if there has been a spike in memory use. pub fn purge(&self) { for o in self.0.pool.lock().drain(..) { drop(unsafe { Box::from_raw(o.as_ptr()) }) } } } impl> Drop for Pool { #[inline(always)] fn drop(&mut self) { self.purge(); } } unsafe impl> Send for Pool {} unsafe impl> Sync for Pool {} #[cfg(test)] mod tests { use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use super::*; struct TestPoolFactory; impl PoolFactory for TestPoolFactory { fn create(&self) -> String { String::new() } fn reset(&self, obj: &mut String) { obj.clear(); } } #[test] fn threaded_pool_use() { let p: Arc> = Arc::new(Pool::new(2, TestPoolFactory {})); let ctr = Arc::new(AtomicUsize::new(0)); for _ in 0..64 { let p2 = p.clone(); let ctr2 = ctr.clone(); let _ = std::thread::spawn(move || { for _ in 0..16384 { let mut o1 = p2.get(); o1.push('a'); let o2 = p2.get(); drop(o1); let mut o2 = unsafe { Pooled::::from_raw(o2.into_raw()).unwrap() }; o2.push('b'); ctr2.fetch_add(1, Ordering::Relaxed); } }); } loop { std::thread::sleep(Duration::from_millis(100)); if ctr.load(Ordering::Relaxed) >= 16384 * 64 { break; } } } }