mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-03 19:13:43 +02:00
Cleanup and optimization.
This commit is contained in:
parent
88181c491c
commit
8474508412
13 changed files with 168 additions and 87 deletions
|
@ -50,6 +50,7 @@ set(core_headers
|
|||
SelfAwareness.hpp
|
||||
SHA512.hpp
|
||||
SharedPtr.hpp
|
||||
Spinlock.hpp
|
||||
SymmetricKey.hpp
|
||||
TagCredential.hpp
|
||||
Topology.hpp
|
||||
|
|
|
@ -110,6 +110,11 @@
|
|||
*/
|
||||
#define ZT_NETWORK_HOUSEKEEPING_PERIOD 30000
|
||||
|
||||
/**
|
||||
* How often to rank roots
|
||||
*/
|
||||
#define ZT_ROOT_RANK_PERIOD 5000
|
||||
|
||||
/**
|
||||
* Delay between WHOIS retries in ms
|
||||
*/
|
||||
|
|
|
@ -31,7 +31,9 @@ namespace ZeroTier {
|
|||
|
||||
namespace {
|
||||
|
||||
// Structure containing all the core objects for a ZeroTier node to reduce memory allocations.
|
||||
/*
|
||||
* All the core objects of ZeroTier in a single struct to reduce allocations.
|
||||
*/
|
||||
struct _NodeObjects
|
||||
{
|
||||
ZT_INLINE _NodeObjects(RuntimeEnvironment *const RR, void *const tPtr, const int64_t now) :
|
||||
|
@ -74,10 +76,11 @@ Node::Node(
|
|||
m_lastPeerPulse(0),
|
||||
m_lastHousekeepingRun(0),
|
||||
m_lastNetworkHousekeepingRun(0),
|
||||
m_lastRootRank(0),
|
||||
m_now(now),
|
||||
m_online(false)
|
||||
{
|
||||
ZT_SPEW("starting up...");
|
||||
ZT_SPEW("Node starting up!");
|
||||
|
||||
// Load this node's identity.
|
||||
uint64_t idtmp[2];
|
||||
|
@ -135,27 +138,22 @@ Node::Node(
|
|||
}
|
||||
}
|
||||
|
||||
// This constructs all the components of the ZeroTier core within a single contiguous memory container,
|
||||
// which reduces memory fragmentation and may improve cache locality.
|
||||
ZT_SPEW("initializing subsystem objects...");
|
||||
// Create all the things!
|
||||
m_objects = new _NodeObjects(RR, tPtr, now);
|
||||
ZT_SPEW("node initialized!");
|
||||
|
||||
ZT_SPEW("node initialized!");
|
||||
postEvent(tPtr, ZT_EVENT_UP);
|
||||
}
|
||||
|
||||
Node::~Node()
|
||||
{
|
||||
ZT_SPEW("node destructor run");
|
||||
ZT_SPEW("Node shutting down (destructor called).");
|
||||
|
||||
m_networks_l.lock();
|
||||
m_networks_l.unlock();
|
||||
m_networks.clear();
|
||||
m_networks_l.lock();
|
||||
m_networks_l.unlock();
|
||||
|
||||
if (m_objects)
|
||||
delete (_NodeObjects *)m_objects;
|
||||
delete (_NodeObjects *)m_objects;
|
||||
|
||||
// Let go of cached Buf objects. If other nodes happen to be running in this
|
||||
// same process space new Bufs will be allocated as needed, but this is almost
|
||||
|
@ -166,7 +164,6 @@ Node::~Node()
|
|||
|
||||
void Node::shutdown(void *tPtr)
|
||||
{
|
||||
ZT_SPEW("explicit shutdown() called");
|
||||
postEvent(tPtr, ZT_EVENT_DOWN);
|
||||
if (RR->topology)
|
||||
RR->topology->saveAll(tPtr);
|
||||
|
@ -200,7 +197,7 @@ ZT_ResultCode Node::processVirtualNetworkFrame(
|
|||
{
|
||||
m_now = now;
|
||||
SharedPtr< Network > nw(this->network(nwid));
|
||||
if (nw) {
|
||||
if (likely(nw)) {
|
||||
RR->vl2->onLocalEthernet(tPtr, nw, MAC(sourceMac), MAC(destMac), etherType, vlanId, frameData, frameLength);
|
||||
return ZT_RESULT_OK;
|
||||
} else {
|
||||
|
@ -217,7 +214,6 @@ ZT_ResultCode Node::processBackgroundTasks(
|
|||
Mutex::Lock bl(m_backgroundTasksLock);
|
||||
|
||||
try {
|
||||
// Call peer pulse() method of all peers every ZT_PEER_PULSE_INTERVAL.
|
||||
if ((now - m_lastPeerPulse) >= ZT_PEER_PULSE_INTERVAL) {
|
||||
m_lastPeerPulse = now;
|
||||
ZT_SPEW("running pulse() on each peer...");
|
||||
|
@ -239,7 +235,6 @@ ZT_ResultCode Node::processBackgroundTasks(
|
|||
}
|
||||
}
|
||||
|
||||
// Perform network housekeeping and possibly request new certs and configs every ZT_NETWORK_HOUSEKEEPING_PERIOD.
|
||||
if ((now - m_lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
|
||||
m_lastHousekeepingRun = now;
|
||||
ZT_SPEW("running networking housekeeping...");
|
||||
|
@ -249,7 +244,6 @@ ZT_ResultCode Node::processBackgroundTasks(
|
|||
}
|
||||
}
|
||||
|
||||
// Clean up other stuff every ZT_HOUSEKEEPING_PERIOD.
|
||||
if ((now - m_lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
|
||||
m_lastHousekeepingRun = now;
|
||||
ZT_SPEW("running housekeeping...");
|
||||
|
@ -269,6 +263,12 @@ ZT_ResultCode Node::processBackgroundTasks(
|
|||
RR->sa->clean(now);
|
||||
}
|
||||
|
||||
if ((now - m_lastRootRank) >= ZT_ROOT_RANK_PERIOD) {
|
||||
m_lastRootRank = now;
|
||||
ZT_SPEW("ranking roots...");
|
||||
RR->topology->rankRoots(now);
|
||||
}
|
||||
|
||||
*nextBackgroundTaskDeadline = now + ZT_TIMER_TASK_INTERVAL;
|
||||
} catch (...) {
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
|
@ -286,7 +286,7 @@ ZT_ResultCode Node::join(
|
|||
Fingerprint fp;
|
||||
if (controllerFingerprint) {
|
||||
fp = *controllerFingerprint;
|
||||
ZT_SPEW("joining network %.16llx with fingerprint %s", nwid, fp.toString().c_str());
|
||||
ZT_SPEW("joining network %.16llx with controller fingerprint %s", nwid, fp.toString().c_str());
|
||||
} else {
|
||||
ZT_SPEW("joining network %.16llx", nwid);
|
||||
}
|
||||
|
|
|
@ -409,6 +409,7 @@ private:
|
|||
int64_t m_lastPeerPulse;
|
||||
int64_t m_lastHousekeepingRun;
|
||||
int64_t m_lastNetworkHousekeepingRun;
|
||||
int64_t m_lastRootRank;
|
||||
|
||||
// This is the most recent value for time passed in via any of the core API methods.
|
||||
std::atomic< int64_t > m_now;
|
||||
|
|
|
@ -30,11 +30,11 @@ template< typename T >
|
|||
class SharedPtr : public TriviallyCopyable
|
||||
{
|
||||
public:
|
||||
ZT_INLINE SharedPtr() noexcept: m_ptr((T *)0)
|
||||
ZT_INLINE SharedPtr() noexcept: m_ptr(nullptr)
|
||||
{}
|
||||
|
||||
explicit ZT_INLINE SharedPtr(T *obj) noexcept: m_ptr(obj)
|
||||
{ if (likely(obj != nullptr)) ++*const_cast<std::atomic< int > *>(&(obj->__refCount)); }
|
||||
{ if (likely(obj != nullptr)) const_cast<std::atomic< int > *>(&(obj->__refCount))->fetch_add(1, std::memory_order_relaxed); }
|
||||
|
||||
ZT_INLINE SharedPtr(const SharedPtr &sp) noexcept: m_ptr(sp._getAndInc())
|
||||
{}
|
||||
|
@ -42,7 +42,7 @@ public:
|
|||
ZT_INLINE ~SharedPtr()
|
||||
{
|
||||
if (likely(m_ptr != nullptr)) {
|
||||
if (unlikely(--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0))
|
||||
if (unlikely(const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_relaxed) <= 0))
|
||||
delete m_ptr;
|
||||
}
|
||||
}
|
||||
|
@ -50,9 +50,9 @@ public:
|
|||
ZT_INLINE SharedPtr &operator=(const SharedPtr &sp)
|
||||
{
|
||||
if (likely(m_ptr != sp.m_ptr)) {
|
||||
T *p = sp._getAndInc();
|
||||
T *const p = sp._getAndInc();
|
||||
if (likely(m_ptr != nullptr)) {
|
||||
if (unlikely(--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0))
|
||||
if (unlikely(const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_relaxed) <= 0))
|
||||
delete m_ptr;
|
||||
}
|
||||
m_ptr = p;
|
||||
|
@ -60,31 +60,16 @@ public:
|
|||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to a naked pointer and increment its reference count
|
||||
*
|
||||
* This assumes this SharedPtr is NULL and that ptr is not a 'zombie.' No
|
||||
* checks are performed.
|
||||
*
|
||||
* @param ptr Naked pointer to assign
|
||||
*/
|
||||
ZT_INLINE void set(T *ptr) noexcept
|
||||
{
|
||||
zero();
|
||||
++*const_cast<std::atomic< int > *>(&(ptr->__refCount));
|
||||
if (likely(m_ptr != nullptr)) {
|
||||
if (unlikely(const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_relaxed) <= 0))
|
||||
delete m_ptr;
|
||||
}
|
||||
const_cast<std::atomic< int > *>(&(ptr->__refCount))->fetch_add(1, std::memory_order_relaxed);
|
||||
m_ptr = ptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stupidly set this SharedPtr to 'ptr', ignoring current value and not incrementing reference counter
|
||||
*
|
||||
* This must only be used in code that knows what it's doing. :)
|
||||
*
|
||||
* @param ptr Pointer to set
|
||||
*/
|
||||
ZT_INLINE void unsafeSet(T *ptr) noexcept
|
||||
{ m_ptr = ptr; }
|
||||
|
||||
/**
|
||||
* Swap with another pointer 'for free' without ref count overhead
|
||||
*
|
||||
|
@ -92,7 +77,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE void swap(SharedPtr &with) noexcept
|
||||
{
|
||||
T *tmp = m_ptr;
|
||||
T *const tmp = m_ptr;
|
||||
m_ptr = with.m_ptr;
|
||||
with.m_ptr = tmp;
|
||||
}
|
||||
|
@ -107,8 +92,8 @@ public:
|
|||
*/
|
||||
ZT_INLINE void move(SharedPtr &from)
|
||||
{
|
||||
if (likely(m_ptr != nullptr)) {
|
||||
if (--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0)
|
||||
if (m_ptr != nullptr) {
|
||||
if (const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_relaxed) <= 0)
|
||||
delete m_ptr;
|
||||
}
|
||||
m_ptr = from.m_ptr;
|
||||
|
@ -136,7 +121,7 @@ public:
|
|||
ZT_INLINE void zero()
|
||||
{
|
||||
if (likely(m_ptr != nullptr)) {
|
||||
if (unlikely(--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0))
|
||||
if (unlikely(const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_relaxed) <= 0))
|
||||
delete m_ptr;
|
||||
m_ptr = nullptr;
|
||||
}
|
||||
|
@ -203,8 +188,8 @@ public:
|
|||
private:
|
||||
ZT_INLINE T *_getAndInc() const noexcept
|
||||
{
|
||||
if (m_ptr)
|
||||
++*const_cast<std::atomic< int > *>(&(m_ptr->__refCount));
|
||||
if (likely(m_ptr))
|
||||
const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_add(1, std::memory_order_relaxed);
|
||||
return m_ptr;
|
||||
}
|
||||
|
||||
|
|
52
core/Spinlock.hpp
Normal file
52
core/Spinlock.hpp
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright (c)2013-2021 ZeroTier, Inc.
|
||||
*
|
||||
* Use of this software is governed by the Business Source License included
|
||||
* in the LICENSE.TXT file in the project's root directory.
|
||||
*
|
||||
* Change Date: 2026-01-01
|
||||
*
|
||||
* On the date above, in accordance with the Business Source License, use
|
||||
* of this software will be governed by version 2.0 of the Apache License.
|
||||
*/
|
||||
/****/
|
||||
|
||||
#ifndef ZT_SPINLOCK_HPP
|
||||
#define ZT_SPINLOCK_HPP
|
||||
|
||||
#include "OS.hpp"
|
||||
|
||||
#include <thread>
|
||||
|
||||
/**
|
||||
* Simple spinlock
|
||||
*
|
||||
* This can be used in place of Mutex to lock things that are extremely fast
|
||||
* to access. It should be used very sparingly.
|
||||
*/
|
||||
class Spinlock
|
||||
{
|
||||
public:
|
||||
ZT_INLINE Spinlock() noexcept
|
||||
{ m_locked.clear(); }
|
||||
|
||||
ZT_INLINE void lock() noexcept
|
||||
{
|
||||
if (unlikely(m_locked.test_and_set(std::memory_order_acquire))) {
|
||||
do {
|
||||
std::this_thread::yield();
|
||||
} while (m_locked.test_and_set(std::memory_order_acquire));
|
||||
}
|
||||
}
|
||||
|
||||
ZT_INLINE void unlock() noexcept
|
||||
{ m_locked.clear(std::memory_order_release); }
|
||||
|
||||
private:
|
||||
ZT_INLINE Spinlock(const Spinlock &) noexcept {}
|
||||
ZT_INLINE const Spinlock &operator=(const Spinlock &) noexcept { return *this; }
|
||||
|
||||
std::atomic_flag m_locked;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -17,8 +17,7 @@
|
|||
namespace ZeroTier {
|
||||
|
||||
Topology::Topology(const RuntimeEnvironment *renv, void *tPtr, const int64_t now) :
|
||||
RR(renv),
|
||||
m_lastRankedRoots(0)
|
||||
RR(renv)
|
||||
{
|
||||
char tmp[32];
|
||||
Dictionary d;
|
||||
|
@ -70,7 +69,7 @@ void Topology::allPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr
|
|||
allPeers.push_back(i->second);
|
||||
}
|
||||
{
|
||||
RWMutex::RLock l(m_roots_l);
|
||||
Mutex::Lock l(m_roots_l);
|
||||
rootPeers = m_roots;
|
||||
}
|
||||
}
|
||||
|
@ -83,13 +82,23 @@ void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
|
|||
if (m_cleanCertificates(tPtr, now)) {
|
||||
m_writeTrustStore(tPtr);
|
||||
{
|
||||
Mutex::Lock l2(m_roots_l);
|
||||
RWMutex::Lock l3(m_peers_l);
|
||||
RWMutex::Lock l2(m_roots_l);
|
||||
m_updateRootPeers(tPtr, now);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rank roots and get root lookup map.
|
||||
Vector< uintptr_t > rootLookup;
|
||||
{
|
||||
Mutex::Lock l(m_roots_l);
|
||||
m_rankRoots(now);
|
||||
rootLookup.reserve(m_roots.size());
|
||||
for (Vector< SharedPtr< Peer > >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r)
|
||||
rootLookup.push_back((uintptr_t)r->ptr());
|
||||
}
|
||||
|
||||
// Cleaning of peers and paths uses a two pass method to avoid write locking
|
||||
// m_peers or m_paths for any significant amount of time. This avoids pauses
|
||||
// on nodes with large numbers of peers or paths.
|
||||
|
@ -98,14 +107,6 @@ void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
|
|||
// peers to delete in read lock mode. Second pass: delete peers one by one,
|
||||
// acquiring hard write lock each time to avoid pauses.
|
||||
{
|
||||
Vector< uintptr_t > rootLookup;
|
||||
{
|
||||
RWMutex::RLock l2(m_roots_l);
|
||||
rootLookup.reserve(m_roots.size());
|
||||
for (Vector< SharedPtr< Peer > >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r)
|
||||
rootLookup.push_back((uintptr_t)r->ptr());
|
||||
}
|
||||
|
||||
Vector< Address > toDelete;
|
||||
{
|
||||
RWMutex::RLock l1(m_peers_l);
|
||||
|
@ -223,8 +224,8 @@ ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert
|
|||
m_certs[serial] = certEntry;
|
||||
|
||||
if (refreshRootSets) {
|
||||
Mutex::Lock l2(m_roots_l);
|
||||
RWMutex::Lock l3(m_peers_l);
|
||||
RWMutex::Lock l2(m_roots_l);
|
||||
m_updateRootPeers(tPtr, now);
|
||||
}
|
||||
|
||||
|
@ -260,8 +261,8 @@ unsigned int Topology::deleteCertificate(void *tPtr,const uint8_t serialNo[ZT_SH
|
|||
m_cleanCertificates(tPtr, now);
|
||||
m_writeTrustStore(tPtr);
|
||||
{
|
||||
Mutex::Lock l2(m_roots_l);
|
||||
RWMutex::Lock l3(m_peers_l);
|
||||
RWMutex::Lock l2(m_roots_l);
|
||||
m_updateRootPeers(tPtr, now);
|
||||
}
|
||||
}
|
||||
|
@ -306,8 +307,17 @@ struct p_RootRankingComparisonOperator
|
|||
void Topology::m_rankRoots(const int64_t now)
|
||||
{
|
||||
// assumes m_roots is locked
|
||||
m_lastRankedRoots = now;
|
||||
std::sort(m_roots.begin(), m_roots.end(), p_RootRankingComparisonOperator());
|
||||
|
||||
if (unlikely(m_roots.empty())) {
|
||||
l_bestRoot.lock();
|
||||
m_bestRoot.zero();
|
||||
l_bestRoot.unlock();
|
||||
} else {
|
||||
l_bestRoot.lock();
|
||||
m_bestRoot = m_roots.front();
|
||||
l_bestRoot.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
void Topology::m_eraseCertificate(void *tPtr, const SharedPtr< const Certificate > &cert, const SHA384Hash *uniqueIdHash)
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "FCV.hpp"
|
||||
#include "Certificate.hpp"
|
||||
#include "Containers.hpp"
|
||||
#include "Spinlock.hpp"
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
|
@ -94,18 +95,28 @@ public:
|
|||
}
|
||||
|
||||
/**
|
||||
* @return Current best root (lowest latency active root)
|
||||
* Get current best root
|
||||
*
|
||||
* @return Root peer or nullptr if none
|
||||
*/
|
||||
ZT_INLINE SharedPtr< Peer > root(const int64_t now)
|
||||
ZT_INLINE SharedPtr< Peer > root()
|
||||
{
|
||||
RWMutex::RMaybeWLock l(m_roots_l);
|
||||
if (unlikely(m_roots.empty()))
|
||||
return SharedPtr< Peer >();
|
||||
if (unlikely((now - m_lastRankedRoots) > (ZT_PATH_KEEPALIVE_PERIOD / 2))) {
|
||||
l.writing();
|
||||
m_rankRoots(now);
|
||||
}
|
||||
return m_roots.front();
|
||||
l_bestRoot.lock(); // spinlock
|
||||
SharedPtr< Peer > r(m_bestRoot);
|
||||
l_bestRoot.unlock();
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current best root by setting a result parameter
|
||||
*
|
||||
* @param root Set to best root or nullptr if none
|
||||
*/
|
||||
ZT_INLINE void root(SharedPtr< Peer > &root)
|
||||
{
|
||||
l_bestRoot.lock(); // spinlock
|
||||
root = m_bestRoot;
|
||||
l_bestRoot.unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -114,13 +125,24 @@ public:
|
|||
void allPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const;
|
||||
|
||||
/**
|
||||
* Do periodic tasks such as database cleanup
|
||||
* Do periodic tasks such as database cleanup, cert cleanup, root ranking, etc.
|
||||
*
|
||||
* @param tPtr Thread pointer
|
||||
* @param now Current time
|
||||
*/
|
||||
void doPeriodicTasks(void *tPtr, int64_t now);
|
||||
|
||||
/**
|
||||
* Rank root servers in descending order of quality
|
||||
*
|
||||
* @param now Current time
|
||||
*/
|
||||
ZT_INLINE void rankRoots(int64_t now)
|
||||
{
|
||||
Mutex::Lock l(m_roots_l);
|
||||
m_rankRoots(now);
|
||||
}
|
||||
|
||||
/**
|
||||
* Save all currently known peers to data store
|
||||
*
|
||||
|
@ -180,7 +202,6 @@ private:
|
|||
|
||||
const RuntimeEnvironment *const RR;
|
||||
|
||||
int64_t m_lastRankedRoots;
|
||||
Vector< SharedPtr< Peer > > m_roots;
|
||||
Map< Address, SharedPtr< Peer > > m_peers;
|
||||
Map< UniqueID, SharedPtr< Path > > m_paths;
|
||||
|
@ -198,10 +219,13 @@ private:
|
|||
Map< SHA384Hash, p_CertEntry > m_certsBySubjectUniqueID;
|
||||
Map< Fingerprint, Map< SharedPtr< const Certificate >, unsigned int > > m_certsBySubjectIdentity;
|
||||
|
||||
RWMutex m_paths_l; // m_paths
|
||||
RWMutex m_peers_l; // m_peers
|
||||
RWMutex m_roots_l; // m_roots and m_lastRankedRoots
|
||||
RWMutex m_paths_l; // m_paths
|
||||
Mutex m_roots_l; // m_roots and m_lastRankedRoots
|
||||
Mutex m_certs_l; // m_certs and friends
|
||||
|
||||
SharedPtr< Peer > m_bestRoot;
|
||||
Spinlock l_bestRoot;
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
#[derive(Copy)]
|
||||
pub struct Address(pub u64);
|
||||
|
||||
impl From<&[u8]> for Address {
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
#[derive(Copy)]
|
||||
pub struct MAC(pub u64);
|
||||
|
||||
impl ToString for MAC {
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
#[derive(Copy)]
|
||||
pub struct NetworkId(pub u64);
|
||||
|
||||
impl NetworkId {
|
||||
|
|
|
@ -383,10 +383,8 @@ impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + 's
|
|||
/// (if any) of this function is returned, or None if we are not joined to
|
||||
/// this network.
|
||||
#[inline(always)]
|
||||
pub fn network<F: FnOnce(&N) -> R, R>(&self, nwid: NetworkId, f: F) -> Option<R> {
|
||||
self.intl.networks_by_id.lock().unwrap().get(&nwid.0).map_or_else(|| {
|
||||
None
|
||||
}, |nw| {
|
||||
pub fn with_network<F: FnOnce(&N) -> R, R>(&self, nwid: NetworkId, f: F) -> Option<R> {
|
||||
self.intl.networks_by_id.lock().unwrap().get(&nwid.0).map_or(None, |nw| {
|
||||
Some(f(&*nw))
|
||||
})
|
||||
}
|
||||
|
@ -507,6 +505,14 @@ impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + 's
|
|||
}
|
||||
}
|
||||
|
||||
impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + Clone + 'static> Node<T, N> {
|
||||
/// Get a copy of this network's associated object.
|
||||
/// This is only available if N implements Clone.
|
||||
pub fn network(&self, nwid: NetworkId) -> Option<N> {
|
||||
self.intl.networks_by_id.lock().unwrap().get(&nwid.0).map_or(None, |nw| { Some(nw.as_ref().get_ref().clone()) })
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + 'static> Sync for Node<T, N> {}
|
||||
|
||||
unsafe impl<T: NodeEventHandler<N> + Sync + Send + Clone + 'static, N: Sync + Send + 'static> Send for Node<T, N> {}
|
||||
|
|
|
@ -188,12 +188,6 @@ unsafe impl Send for Service {}
|
|||
|
||||
unsafe impl Sync for Service {}
|
||||
|
||||
impl Drop for Service {
|
||||
fn drop(&mut self) {
|
||||
self.log.debug("Service::drop()");
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_async(store: &Arc<Store>, auth_token: String, log: &Arc<Log>, local_config: Arc<LocalConfig>) -> i32 {
|
||||
let mut process_exit_value: i32 = 0;
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue