Cleanup and optimization.

This commit is contained in:
Adam Ierymenko 2021-03-02 11:18:04 -05:00
parent 88181c491c
commit 8474508412
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
13 changed files with 168 additions and 87 deletions

View file

@ -50,6 +50,7 @@ set(core_headers
SelfAwareness.hpp SelfAwareness.hpp
SHA512.hpp SHA512.hpp
SharedPtr.hpp SharedPtr.hpp
Spinlock.hpp
SymmetricKey.hpp SymmetricKey.hpp
TagCredential.hpp TagCredential.hpp
Topology.hpp Topology.hpp

View file

@ -110,6 +110,11 @@
*/ */
#define ZT_NETWORK_HOUSEKEEPING_PERIOD 30000 #define ZT_NETWORK_HOUSEKEEPING_PERIOD 30000
/**
* How often to rank roots
*/
#define ZT_ROOT_RANK_PERIOD 5000
/** /**
* Delay between WHOIS retries in ms * Delay between WHOIS retries in ms
*/ */

View file

@ -31,7 +31,9 @@ namespace ZeroTier {
namespace { namespace {
// Structure containing all the core objects for a ZeroTier node to reduce memory allocations. /*
* All the core objects of ZeroTier in a single struct to reduce allocations.
*/
struct _NodeObjects struct _NodeObjects
{ {
ZT_INLINE _NodeObjects(RuntimeEnvironment *const RR, void *const tPtr, const int64_t now) : ZT_INLINE _NodeObjects(RuntimeEnvironment *const RR, void *const tPtr, const int64_t now) :
@ -74,10 +76,11 @@ Node::Node(
m_lastPeerPulse(0), m_lastPeerPulse(0),
m_lastHousekeepingRun(0), m_lastHousekeepingRun(0),
m_lastNetworkHousekeepingRun(0), m_lastNetworkHousekeepingRun(0),
m_lastRootRank(0),
m_now(now), m_now(now),
m_online(false) m_online(false)
{ {
ZT_SPEW("starting up..."); ZT_SPEW("Node starting up!");
// Load this node's identity. // Load this node's identity.
uint64_t idtmp[2]; uint64_t idtmp[2];
@ -135,27 +138,22 @@ Node::Node(
} }
} }
// This constructs all the components of the ZeroTier core within a single contiguous memory container, // Create all the things!
// which reduces memory fragmentation and may improve cache locality.
ZT_SPEW("initializing subsystem objects...");
m_objects = new _NodeObjects(RR, tPtr, now); m_objects = new _NodeObjects(RR, tPtr, now);
ZT_SPEW("node initialized!");
ZT_SPEW("node initialized!");
postEvent(tPtr, ZT_EVENT_UP); postEvent(tPtr, ZT_EVENT_UP);
} }
Node::~Node() Node::~Node()
{ {
ZT_SPEW("node destructor run"); ZT_SPEW("Node shutting down (destructor called).");
m_networks_l.lock(); m_networks_l.lock();
m_networks_l.unlock(); m_networks_l.unlock();
m_networks.clear(); m_networks.clear();
m_networks_l.lock();
m_networks_l.unlock();
if (m_objects) delete (_NodeObjects *)m_objects;
delete (_NodeObjects *)m_objects;
// Let go of cached Buf objects. If other nodes happen to be running in this // Let go of cached Buf objects. If other nodes happen to be running in this
// same process space new Bufs will be allocated as needed, but this is almost // same process space new Bufs will be allocated as needed, but this is almost
@ -166,7 +164,6 @@ Node::~Node()
void Node::shutdown(void *tPtr) void Node::shutdown(void *tPtr)
{ {
ZT_SPEW("explicit shutdown() called");
postEvent(tPtr, ZT_EVENT_DOWN); postEvent(tPtr, ZT_EVENT_DOWN);
if (RR->topology) if (RR->topology)
RR->topology->saveAll(tPtr); RR->topology->saveAll(tPtr);
@ -200,7 +197,7 @@ ZT_ResultCode Node::processVirtualNetworkFrame(
{ {
m_now = now; m_now = now;
SharedPtr< Network > nw(this->network(nwid)); SharedPtr< Network > nw(this->network(nwid));
if (nw) { if (likely(nw)) {
RR->vl2->onLocalEthernet(tPtr, nw, MAC(sourceMac), MAC(destMac), etherType, vlanId, frameData, frameLength); RR->vl2->onLocalEthernet(tPtr, nw, MAC(sourceMac), MAC(destMac), etherType, vlanId, frameData, frameLength);
return ZT_RESULT_OK; return ZT_RESULT_OK;
} else { } else {
@ -217,7 +214,6 @@ ZT_ResultCode Node::processBackgroundTasks(
Mutex::Lock bl(m_backgroundTasksLock); Mutex::Lock bl(m_backgroundTasksLock);
try { try {
// Call peer pulse() method of all peers every ZT_PEER_PULSE_INTERVAL.
if ((now - m_lastPeerPulse) >= ZT_PEER_PULSE_INTERVAL) { if ((now - m_lastPeerPulse) >= ZT_PEER_PULSE_INTERVAL) {
m_lastPeerPulse = now; m_lastPeerPulse = now;
ZT_SPEW("running pulse() on each peer..."); ZT_SPEW("running pulse() on each peer...");
@ -239,7 +235,6 @@ ZT_ResultCode Node::processBackgroundTasks(
} }
} }
// Perform network housekeeping and possibly request new certs and configs every ZT_NETWORK_HOUSEKEEPING_PERIOD.
if ((now - m_lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) { if ((now - m_lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
m_lastHousekeepingRun = now; m_lastHousekeepingRun = now;
ZT_SPEW("running networking housekeeping..."); ZT_SPEW("running networking housekeeping...");
@ -249,7 +244,6 @@ ZT_ResultCode Node::processBackgroundTasks(
} }
} }
// Clean up other stuff every ZT_HOUSEKEEPING_PERIOD.
if ((now - m_lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) { if ((now - m_lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
m_lastHousekeepingRun = now; m_lastHousekeepingRun = now;
ZT_SPEW("running housekeeping..."); ZT_SPEW("running housekeeping...");
@ -269,6 +263,12 @@ ZT_ResultCode Node::processBackgroundTasks(
RR->sa->clean(now); RR->sa->clean(now);
} }
if ((now - m_lastRootRank) >= ZT_ROOT_RANK_PERIOD) {
m_lastRootRank = now;
ZT_SPEW("ranking roots...");
RR->topology->rankRoots(now);
}
*nextBackgroundTaskDeadline = now + ZT_TIMER_TASK_INTERVAL; *nextBackgroundTaskDeadline = now + ZT_TIMER_TASK_INTERVAL;
} catch (...) { } catch (...) {
return ZT_RESULT_FATAL_ERROR_INTERNAL; return ZT_RESULT_FATAL_ERROR_INTERNAL;
@ -286,7 +286,7 @@ ZT_ResultCode Node::join(
Fingerprint fp; Fingerprint fp;
if (controllerFingerprint) { if (controllerFingerprint) {
fp = *controllerFingerprint; fp = *controllerFingerprint;
ZT_SPEW("joining network %.16llx with fingerprint %s", nwid, fp.toString().c_str()); ZT_SPEW("joining network %.16llx with controller fingerprint %s", nwid, fp.toString().c_str());
} else { } else {
ZT_SPEW("joining network %.16llx", nwid); ZT_SPEW("joining network %.16llx", nwid);
} }

View file

@ -409,6 +409,7 @@ private:
int64_t m_lastPeerPulse; int64_t m_lastPeerPulse;
int64_t m_lastHousekeepingRun; int64_t m_lastHousekeepingRun;
int64_t m_lastNetworkHousekeepingRun; int64_t m_lastNetworkHousekeepingRun;
int64_t m_lastRootRank;
// This is the most recent value for time passed in via any of the core API methods. // This is the most recent value for time passed in via any of the core API methods.
std::atomic< int64_t > m_now; std::atomic< int64_t > m_now;

View file

@ -30,11 +30,11 @@ template< typename T >
class SharedPtr : public TriviallyCopyable class SharedPtr : public TriviallyCopyable
{ {
public: public:
ZT_INLINE SharedPtr() noexcept: m_ptr((T *)0) ZT_INLINE SharedPtr() noexcept: m_ptr(nullptr)
{} {}
explicit ZT_INLINE SharedPtr(T *obj) noexcept: m_ptr(obj) explicit ZT_INLINE SharedPtr(T *obj) noexcept: m_ptr(obj)
{ if (likely(obj != nullptr)) ++*const_cast<std::atomic< int > *>(&(obj->__refCount)); } { if (likely(obj != nullptr)) const_cast<std::atomic< int > *>(&(obj->__refCount))->fetch_add(1, std::memory_order_relaxed); }
ZT_INLINE SharedPtr(const SharedPtr &sp) noexcept: m_ptr(sp._getAndInc()) ZT_INLINE SharedPtr(const SharedPtr &sp) noexcept: m_ptr(sp._getAndInc())
{} {}
@ -42,7 +42,7 @@ public:
ZT_INLINE ~SharedPtr() ZT_INLINE ~SharedPtr()
{ {
if (likely(m_ptr != nullptr)) { if (likely(m_ptr != nullptr)) {
if (unlikely(--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0)) if (unlikely(const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_relaxed) <= 0))
delete m_ptr; delete m_ptr;
} }
} }
@ -50,9 +50,9 @@ public:
ZT_INLINE SharedPtr &operator=(const SharedPtr &sp) ZT_INLINE SharedPtr &operator=(const SharedPtr &sp)
{ {
if (likely(m_ptr != sp.m_ptr)) { if (likely(m_ptr != sp.m_ptr)) {
T *p = sp._getAndInc(); T *const p = sp._getAndInc();
if (likely(m_ptr != nullptr)) { if (likely(m_ptr != nullptr)) {
if (unlikely(--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0)) if (unlikely(const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_relaxed) <= 0))
delete m_ptr; delete m_ptr;
} }
m_ptr = p; m_ptr = p;
@ -60,31 +60,16 @@ public:
return *this; return *this;
} }
/**
* Set to a naked pointer and increment its reference count
*
* This assumes this SharedPtr is NULL and that ptr is not a 'zombie.' No
* checks are performed.
*
* @param ptr Naked pointer to assign
*/
ZT_INLINE void set(T *ptr) noexcept ZT_INLINE void set(T *ptr) noexcept
{ {
zero(); if (likely(m_ptr != nullptr)) {
++*const_cast<std::atomic< int > *>(&(ptr->__refCount)); if (unlikely(const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_relaxed) <= 0))
delete m_ptr;
}
const_cast<std::atomic< int > *>(&(ptr->__refCount))->fetch_add(1, std::memory_order_relaxed);
m_ptr = ptr; m_ptr = ptr;
} }
/**
* Stupidly set this SharedPtr to 'ptr', ignoring current value and not incrementing reference counter
*
* This must only be used in code that knows what it's doing. :)
*
* @param ptr Pointer to set
*/
ZT_INLINE void unsafeSet(T *ptr) noexcept
{ m_ptr = ptr; }
/** /**
* Swap with another pointer 'for free' without ref count overhead * Swap with another pointer 'for free' without ref count overhead
* *
@ -92,7 +77,7 @@ public:
*/ */
ZT_INLINE void swap(SharedPtr &with) noexcept ZT_INLINE void swap(SharedPtr &with) noexcept
{ {
T *tmp = m_ptr; T *const tmp = m_ptr;
m_ptr = with.m_ptr; m_ptr = with.m_ptr;
with.m_ptr = tmp; with.m_ptr = tmp;
} }
@ -107,8 +92,8 @@ public:
*/ */
ZT_INLINE void move(SharedPtr &from) ZT_INLINE void move(SharedPtr &from)
{ {
if (likely(m_ptr != nullptr)) { if (m_ptr != nullptr) {
if (--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0) if (const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_relaxed) <= 0)
delete m_ptr; delete m_ptr;
} }
m_ptr = from.m_ptr; m_ptr = from.m_ptr;
@ -136,7 +121,7 @@ public:
ZT_INLINE void zero() ZT_INLINE void zero()
{ {
if (likely(m_ptr != nullptr)) { if (likely(m_ptr != nullptr)) {
if (unlikely(--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0)) if (unlikely(const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_relaxed) <= 0))
delete m_ptr; delete m_ptr;
m_ptr = nullptr; m_ptr = nullptr;
} }
@ -203,8 +188,8 @@ public:
private: private:
ZT_INLINE T *_getAndInc() const noexcept ZT_INLINE T *_getAndInc() const noexcept
{ {
if (m_ptr) if (likely(m_ptr))
++*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)); const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_add(1, std::memory_order_relaxed);
return m_ptr; return m_ptr;
} }

52
core/Spinlock.hpp Normal file
View file

@ -0,0 +1,52 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_SPINLOCK_HPP
#define ZT_SPINLOCK_HPP
#include "OS.hpp"
#include <thread>
/**
* Simple spinlock
*
* This can be used in place of Mutex to lock things that are extremely fast
* to access. It should be used very sparingly.
*/
class Spinlock
{
public:
ZT_INLINE Spinlock() noexcept
{ m_locked.clear(); }
ZT_INLINE void lock() noexcept
{
if (unlikely(m_locked.test_and_set(std::memory_order_acquire))) {
do {
std::this_thread::yield();
} while (m_locked.test_and_set(std::memory_order_acquire));
}
}
ZT_INLINE void unlock() noexcept
{ m_locked.clear(std::memory_order_release); }
private:
ZT_INLINE Spinlock(const Spinlock &) noexcept {}
ZT_INLINE const Spinlock &operator=(const Spinlock &) noexcept { return *this; }
std::atomic_flag m_locked;
};
#endif

View file

@ -17,8 +17,7 @@
namespace ZeroTier { namespace ZeroTier {
Topology::Topology(const RuntimeEnvironment *renv, void *tPtr, const int64_t now) : Topology::Topology(const RuntimeEnvironment *renv, void *tPtr, const int64_t now) :
RR(renv), RR(renv)
m_lastRankedRoots(0)
{ {
char tmp[32]; char tmp[32];
Dictionary d; Dictionary d;
@ -70,7 +69,7 @@ void Topology::allPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr
allPeers.push_back(i->second); allPeers.push_back(i->second);
} }
{ {
RWMutex::RLock l(m_roots_l); Mutex::Lock l(m_roots_l);
rootPeers = m_roots; rootPeers = m_roots;
} }
} }
@ -83,13 +82,23 @@ void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
if (m_cleanCertificates(tPtr, now)) { if (m_cleanCertificates(tPtr, now)) {
m_writeTrustStore(tPtr); m_writeTrustStore(tPtr);
{ {
Mutex::Lock l2(m_roots_l);
RWMutex::Lock l3(m_peers_l); RWMutex::Lock l3(m_peers_l);
RWMutex::Lock l2(m_roots_l);
m_updateRootPeers(tPtr, now); m_updateRootPeers(tPtr, now);
} }
} }
} }
// Rank roots and get root lookup map.
Vector< uintptr_t > rootLookup;
{
Mutex::Lock l(m_roots_l);
m_rankRoots(now);
rootLookup.reserve(m_roots.size());
for (Vector< SharedPtr< Peer > >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r)
rootLookup.push_back((uintptr_t)r->ptr());
}
// Cleaning of peers and paths uses a two pass method to avoid write locking // Cleaning of peers and paths uses a two pass method to avoid write locking
// m_peers or m_paths for any significant amount of time. This avoids pauses // m_peers or m_paths for any significant amount of time. This avoids pauses
// on nodes with large numbers of peers or paths. // on nodes with large numbers of peers or paths.
@ -98,14 +107,6 @@ void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
// peers to delete in read lock mode. Second pass: delete peers one by one, // peers to delete in read lock mode. Second pass: delete peers one by one,
// acquiring hard write lock each time to avoid pauses. // acquiring hard write lock each time to avoid pauses.
{ {
Vector< uintptr_t > rootLookup;
{
RWMutex::RLock l2(m_roots_l);
rootLookup.reserve(m_roots.size());
for (Vector< SharedPtr< Peer > >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r)
rootLookup.push_back((uintptr_t)r->ptr());
}
Vector< Address > toDelete; Vector< Address > toDelete;
{ {
RWMutex::RLock l1(m_peers_l); RWMutex::RLock l1(m_peers_l);
@ -223,8 +224,8 @@ ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert
m_certs[serial] = certEntry; m_certs[serial] = certEntry;
if (refreshRootSets) { if (refreshRootSets) {
Mutex::Lock l2(m_roots_l);
RWMutex::Lock l3(m_peers_l); RWMutex::Lock l3(m_peers_l);
RWMutex::Lock l2(m_roots_l);
m_updateRootPeers(tPtr, now); m_updateRootPeers(tPtr, now);
} }
@ -260,8 +261,8 @@ unsigned int Topology::deleteCertificate(void *tPtr,const uint8_t serialNo[ZT_SH
m_cleanCertificates(tPtr, now); m_cleanCertificates(tPtr, now);
m_writeTrustStore(tPtr); m_writeTrustStore(tPtr);
{ {
Mutex::Lock l2(m_roots_l);
RWMutex::Lock l3(m_peers_l); RWMutex::Lock l3(m_peers_l);
RWMutex::Lock l2(m_roots_l);
m_updateRootPeers(tPtr, now); m_updateRootPeers(tPtr, now);
} }
} }
@ -306,8 +307,17 @@ struct p_RootRankingComparisonOperator
void Topology::m_rankRoots(const int64_t now) void Topology::m_rankRoots(const int64_t now)
{ {
// assumes m_roots is locked // assumes m_roots is locked
m_lastRankedRoots = now;
std::sort(m_roots.begin(), m_roots.end(), p_RootRankingComparisonOperator()); std::sort(m_roots.begin(), m_roots.end(), p_RootRankingComparisonOperator());
if (unlikely(m_roots.empty())) {
l_bestRoot.lock();
m_bestRoot.zero();
l_bestRoot.unlock();
} else {
l_bestRoot.lock();
m_bestRoot = m_roots.front();
l_bestRoot.unlock();
}
} }
void Topology::m_eraseCertificate(void *tPtr, const SharedPtr< const Certificate > &cert, const SHA384Hash *uniqueIdHash) void Topology::m_eraseCertificate(void *tPtr, const SharedPtr< const Certificate > &cert, const SHA384Hash *uniqueIdHash)

View file

@ -28,6 +28,7 @@
#include "FCV.hpp" #include "FCV.hpp"
#include "Certificate.hpp" #include "Certificate.hpp"
#include "Containers.hpp" #include "Containers.hpp"
#include "Spinlock.hpp"
namespace ZeroTier { namespace ZeroTier {
@ -94,18 +95,28 @@ public:
} }
/** /**
* @return Current best root (lowest latency active root) * Get current best root
*
* @return Root peer or nullptr if none
*/ */
ZT_INLINE SharedPtr< Peer > root(const int64_t now) ZT_INLINE SharedPtr< Peer > root()
{ {
RWMutex::RMaybeWLock l(m_roots_l); l_bestRoot.lock(); // spinlock
if (unlikely(m_roots.empty())) SharedPtr< Peer > r(m_bestRoot);
return SharedPtr< Peer >(); l_bestRoot.unlock();
if (unlikely((now - m_lastRankedRoots) > (ZT_PATH_KEEPALIVE_PERIOD / 2))) { return r;
l.writing(); }
m_rankRoots(now);
} /**
return m_roots.front(); * Get current best root by setting a result parameter
*
* @param root Set to best root or nullptr if none
*/
ZT_INLINE void root(SharedPtr< Peer > &root)
{
l_bestRoot.lock(); // spinlock
root = m_bestRoot;
l_bestRoot.unlock();
} }
/** /**
@ -114,13 +125,24 @@ public:
void allPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const; void allPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const;
/** /**
* Do periodic tasks such as database cleanup * Do periodic tasks such as database cleanup, cert cleanup, root ranking, etc.
* *
* @param tPtr Thread pointer * @param tPtr Thread pointer
* @param now Current time * @param now Current time
*/ */
void doPeriodicTasks(void *tPtr, int64_t now); void doPeriodicTasks(void *tPtr, int64_t now);
/**
* Rank root servers in descending order of quality
*
* @param now Current time
*/
ZT_INLINE void rankRoots(int64_t now)
{
Mutex::Lock l(m_roots_l);
m_rankRoots(now);
}
/** /**
* Save all currently known peers to data store * Save all currently known peers to data store
* *
@ -180,7 +202,6 @@ private:
const RuntimeEnvironment *const RR; const RuntimeEnvironment *const RR;
int64_t m_lastRankedRoots;
Vector< SharedPtr< Peer > > m_roots; Vector< SharedPtr< Peer > > m_roots;
Map< Address, SharedPtr< Peer > > m_peers; Map< Address, SharedPtr< Peer > > m_peers;
Map< UniqueID, SharedPtr< Path > > m_paths; Map< UniqueID, SharedPtr< Path > > m_paths;
@ -198,10 +219,13 @@ private:
Map< SHA384Hash, p_CertEntry > m_certsBySubjectUniqueID; Map< SHA384Hash, p_CertEntry > m_certsBySubjectUniqueID;
Map< Fingerprint, Map< SharedPtr< const Certificate >, unsigned int > > m_certsBySubjectIdentity; Map< Fingerprint, Map< SharedPtr< const Certificate >, unsigned int > > m_certsBySubjectIdentity;
RWMutex m_paths_l; // m_paths
RWMutex m_peers_l; // m_peers RWMutex m_peers_l; // m_peers
RWMutex m_roots_l; // m_roots and m_lastRankedRoots RWMutex m_paths_l; // m_paths
Mutex m_roots_l; // m_roots and m_lastRankedRoots
Mutex m_certs_l; // m_certs and friends Mutex m_certs_l; // m_certs and friends
SharedPtr< Peer > m_bestRoot;
Spinlock l_bestRoot;
}; };
} // namespace ZeroTier } // namespace ZeroTier

View file

@ -13,6 +13,7 @@
use std::cmp::Ordering; use std::cmp::Ordering;
#[derive(Copy)]
pub struct Address(pub u64); pub struct Address(pub u64);
impl From<&[u8]> for Address { impl From<&[u8]> for Address {

View file

@ -13,6 +13,7 @@
use std::cmp::Ordering; use std::cmp::Ordering;
#[derive(Copy)]
pub struct MAC(pub u64); pub struct MAC(pub u64);
impl ToString for MAC { impl ToString for MAC {

View file

@ -13,6 +13,7 @@
use std::cmp::Ordering; use std::cmp::Ordering;
#[derive(Copy)]
pub struct NetworkId(pub u64); pub struct NetworkId(pub u64);
impl NetworkId { impl NetworkId {

View file

@ -383,10 +383,8 @@ impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + 's
/// (if any) of this function is returned, or None if we are not joined to /// (if any) of this function is returned, or None if we are not joined to
/// this network. /// this network.
#[inline(always)] #[inline(always)]
pub fn network<F: FnOnce(&N) -> R, R>(&self, nwid: NetworkId, f: F) -> Option<R> { pub fn with_network<F: FnOnce(&N) -> R, R>(&self, nwid: NetworkId, f: F) -> Option<R> {
self.intl.networks_by_id.lock().unwrap().get(&nwid.0).map_or_else(|| { self.intl.networks_by_id.lock().unwrap().get(&nwid.0).map_or(None, |nw| {
None
}, |nw| {
Some(f(&*nw)) Some(f(&*nw))
}) })
} }
@ -507,6 +505,14 @@ impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + 's
} }
} }
impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + Clone + 'static> Node<T, N> {
/// Get a copy of this network's associated object.
/// This is only available if N implements Clone.
pub fn network(&self, nwid: NetworkId) -> Option<N> {
self.intl.networks_by_id.lock().unwrap().get(&nwid.0).map_or(None, |nw| { Some(nw.as_ref().get_ref().clone()) })
}
}
unsafe impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + 'static> Sync for Node<T, N> {} unsafe impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + 'static> Sync for Node<T, N> {}
unsafe impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + 'static> Send for Node<T, N> {} unsafe impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + 'static> Send for Node<T, N> {}

View file

@ -188,12 +188,6 @@ unsafe impl Send for Service {}
unsafe impl Sync for Service {} unsafe impl Sync for Service {}
impl Drop for Service {
fn drop(&mut self) {
self.log.debug("Service::drop()");
}
}
async fn run_async(store: &Arc<Store>, auth_token: String, log: &Arc<Log>, local_config: Arc<LocalConfig>) -> i32 { async fn run_async(store: &Arc<Store>, auth_token: String, log: &Arc<Log>, local_config: Arc<LocalConfig>) -> i32 {
let mut process_exit_value: i32 = 0; let mut process_exit_value: i32 = 0;