From 4931e449989f74b9518d15bc69521fdbefb313e7 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko <adam.ierymenko@gmail.com> Date: Fri, 2 Sep 2016 12:34:02 -0700 Subject: [PATCH] Implement "weak pointer" behavior on Topology Path canonicalization hash table. --- node/AtomicCounter.hpp | 51 ++++-------------------------------------- node/SharedPtr.hpp | 28 +++++++++++++++++++++-- node/Topology.cpp | 27 +++++++++++++++------- 3 files changed, 49 insertions(+), 57 deletions(-) diff --git a/node/AtomicCounter.hpp b/node/AtomicCounter.hpp index b49937716..a0f29baaf 100644 --- a/node/AtomicCounter.hpp +++ b/node/AtomicCounter.hpp @@ -20,11 +20,9 @@ #define ZT_ATOMICCOUNTER_HPP #include "Constants.hpp" -#include "Mutex.hpp" #include "NonCopyable.hpp" -#ifdef __WINDOWS__ -// <atomic> will replace this whole class eventually once it's ubiquitous +#ifndef __GNUC__ #include <atomic> #endif @@ -36,75 +34,34 @@ namespace ZeroTier { class AtomicCounter : NonCopyable { public: - /** - * Initialize counter at zero - */ AtomicCounter() - throw() { _v = 0; } - inline operator int() const - throw() - { -#ifdef __GNUC__ - return __sync_or_and_fetch(const_cast <volatile int *>(&_v),0); -#else -#ifdef __WINDOWS__ - return (int)_v; -#else - _l.lock(); - int v = _v; - _l.unlock(); - return v; -#endif -#endif - } - inline int operator++() - throw() { #ifdef __GNUC__ return __sync_add_and_fetch(&_v,1); #else -#ifdef __WINDOWS__ return ++_v; -#else - _l.lock(); - int v = ++_v; - _l.unlock(); - return v; -#endif #endif } inline int operator--() - throw() { #ifdef __GNUC__ return __sync_sub_and_fetch(&_v,1); #else -#ifdef __WINDOWS__ return --_v; -#else - _l.lock(); - int v = --_v; - _l.unlock(); - return v; -#endif #endif } private: -#ifdef __WINDOWS__ - std::atomic_int _v; -#else +#ifdef __GNUC__ int _v; -#ifndef __GNUC__ -#warning Neither __WINDOWS__ nor __GNUC__ so AtomicCounter using Mutex - Mutex _l; -#endif +#else + std::atomic_int _v; #endif }; diff --git a/node/SharedPtr.hpp b/node/SharedPtr.hpp index 3ff5ed18a..1dd3b43de 100644 --- a/node/SharedPtr.hpp +++ b/node/SharedPtr.hpp @@ -119,15 +119,39 @@ public: inline T *ptr() const throw() { return _ptr; } /** - * Set this pointer to null + * Set this pointer to NULL */ inline void zero() { if (_ptr) { if (--_ptr->__refCount <= 0) delete _ptr; + _ptr = (T *)0; + } + } + + /** + * Set this pointer to NULL if this is the only pointer holding the object + * + * @return True if object was deleted and SharedPtr is now NULL (or was already NULL) + */ + inline bool reclaimIfWeak() + { + if (_ptr) { + if (++_ptr->__refCount <= 2) { + if (--_ptr->__refCount <= 1) { + delete _ptr; + _ptr = (T *)0; + return true; + } else { + return false; + } + } else { + return false; + } + } else { + return true; } - _ptr = (T *)0; } inline bool operator==(const SharedPtr &sp) const throw() { return (_ptr == sp._ptr); } diff --git a/node/Topology.cpp b/node/Topology.cpp index 6e0fe90c0..c6d46dc5e 100644 --- a/node/Topology.cpp +++ b/node/Topology.cpp @@ -251,14 +251,25 @@ bool Topology::worldUpdateIfValid(const World &newWorld) void Topology::clean(uint64_t now) { Mutex::Lock _l(_lock); - Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers); - Address *a = (Address *)0; - SharedPtr<Peer> *p = (SharedPtr<Peer> *)0; - while (i.next(a,p)) { - if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end())) { - _peers.erase(*a); - } else { - (*p)->clean(now); + { + Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers); + Address *a = (Address *)0; + SharedPtr<Peer> *p = (SharedPtr<Peer> *)0; + while (i.next(a,p)) { + if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end())) { + _peers.erase(*a); + } else { + (*p)->clean(now); + } + } + } + { + Hashtable< Path::HashKey,SharedPtr<Path> >::Iterator i(_paths); + Path::HashKey *k = (Path::HashKey *)0; + SharedPtr<Path> *p = (SharedPtr<Path> *)0; + while (i.next(k,p)) { + if (p->reclaimIfWeak()) + _paths.erase(*k); } } }