From cfd101c9b85b20e5911445998a6f040089e3e414 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 11:50:12 -0700 Subject: [PATCH 01/12] Add entries() to go with keys() for future use. --- node/Hashtable.hpp | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index 5076751dd..6f7541c42 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -30,6 +30,8 @@ #include #include +#include +#include namespace ZeroTier { @@ -196,6 +198,24 @@ public: return k; } + /** + * @return Vector of all entries (pairs of K,V) + */ + inline typename std::vector< std::pair > entries() + { + typename std::vector< std::pair > k; + if (_s) { + for(unsigned long i=0;i<_bc;++i) { + _Bucket *b = _t[i]; + while (b) { + k.push_back(std::pair(b->k,b->v)); + b = b->next; + } + } + } + return k; + } + /** * @param k Key * @return Pointer to value or NULL if not found From 3a959a7763b44ffcddce557167169150a28b9059 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 12:14:21 -0700 Subject: [PATCH 02/12] Swap out std::map<> for Hashtable<> for main peer database in Topology. (ongoing std::map-ectomy) --- node/Address.hpp | 14 +++++++++++--- node/Hashtable.hpp | 6 ++++-- node/Node.cpp | 5 +++-- node/Topology.cpp | 18 ++++++++++-------- node/Topology.hpp | 14 +++++++++----- 5 files changed, 37 insertions(+), 20 deletions(-) diff --git a/node/Address.hpp b/node/Address.hpp index 137e4f4f3..0b38ec626 100644 --- a/node/Address.hpp +++ b/node/Address.hpp @@ -166,6 +166,15 @@ public: return _a; } + /** + * @return Hash code for use with Hashtable + */ + inline unsigned long hashCode() const + throw() + { + return (unsigned long)_a; + } + /** * @return Hexadecimal string */ @@ -197,11 +206,11 @@ public: /** * Check if this address is reserved - * + * * The all-zero null address and any address beginning with 0xff are * reserved. (0xff is reserved for future use to designate possibly * longer addresses, addresses based on IPv6 innards, etc.) - * + * * @return True if address is reserved and may not be used */ inline bool isReserved() const @@ -230,4 +239,3 @@ private: } // namespace ZeroTier #endif - diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index 6f7541c42..84b5be0ef 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -183,10 +183,11 @@ public: /** * @return Vector of all keys */ - inline typename std::vector keys() + inline typename std::vector keys() const { typename std::vector k; if (_s) { + k.reserve(_s); for(unsigned long i=0;i<_bc;++i) { _Bucket *b = _t[i]; while (b) { @@ -201,10 +202,11 @@ public: /** * @return Vector of all entries (pairs of K,V) */ - inline typename std::vector< std::pair > entries() + inline typename std::vector< std::pair > entries() const { typename std::vector< std::pair > k; if (_s) { + k.reserve(_s); for(unsigned long i=0;i<_bc;++i) { _Bucket *b = _t[i]; while (b) { diff --git a/node/Node.cpp b/node/Node.cpp index 534c085d0..c8c50d66e 100644 --- a/node/Node.cpp +++ b/node/Node.cpp @@ -355,7 +355,8 @@ void Node::status(ZT1_NodeStatus *status) const ZT1_PeerList *Node::peers() const { - std::map< Address,SharedPtr > peers(RR->topology->allPeers()); + std::vector< std::pair< Address,SharedPtr > > peers(RR->topology->allPeers()); + std::sort(peers.begin(),peers.end()); char *buf = (char *)::malloc(sizeof(ZT1_PeerList) + (sizeof(ZT1_Peer) * peers.size())); if (!buf) @@ -364,7 +365,7 @@ ZT1_PeerList *Node::peers() const pl->peers = (ZT1_Peer *)(buf + sizeof(ZT1_PeerList)); pl->peerCount = 0; - for(std::map< Address,SharedPtr >::iterator pi(peers.begin());pi!=peers.end();++pi) { + for(std::vector< std::pair< Address,SharedPtr > >::iterator pi(peers.begin());pi!=peers.end();++pi) { ZT1_Peer *p = &(pl->peers[pl->peerCount++]); p->address = pi->second->address().toInt(); p->lastUnicastFrame = pi->second->lastUnicastFrame(); diff --git a/node/Topology.cpp b/node/Topology.cpp index b255080e2..25a92acdb 100644 --- a/node/Topology.cpp +++ b/node/Topology.cpp @@ -103,7 +103,7 @@ SharedPtr Topology::addPeer(const SharedPtr &peer) const uint64_t now = RR->node->now(); Mutex::Lock _l(_lock); - SharedPtr p(_activePeers.insert(std::pair< Address,SharedPtr >(peer->address(),peer)).first->second); + SharedPtr &p = _activePeers.set(peer->address(),peer); p->use(now); _saveIdentity(p->identity()); @@ -160,9 +160,9 @@ SharedPtr Topology::getBestRoot(const Address *avoid,unsigned int avoidCou if (++sna == _rootAddresses.end()) sna = _rootAddresses.begin(); // wrap around at end if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order - std::map< Address,SharedPtr >::const_iterator p(_activePeers.find(*sna)); - if ((p != _activePeers.end())&&(p->second->hasActiveDirectPath(now))) { - bestRoot = p->second; + SharedPtr *p = _activePeers.get(*sna); + if ((p)&&((*p)->hasActiveDirectPath(now))) { + bestRoot = *p; break; } } @@ -249,10 +249,12 @@ bool Topology::isRoot(const Identity &id) const void Topology::clean(uint64_t now) { Mutex::Lock _l(_lock); - for(std::map< Address,SharedPtr >::iterator p(_activePeers.begin());p!=_activePeers.end();) { - if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),p->first) == _rootAddresses.end())) { - _activePeers.erase(p++); - } else ++p; + Hashtable< Address,SharedPtr >::Iterator i(_activePeers); + Address *a = (Address *)0; + SharedPtr *p = (SharedPtr *)0; + while (i.next(a,p)) + if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end())) { + _activePeers.erase(*a); } } diff --git a/node/Topology.hpp b/node/Topology.hpp index 1c5cca007..3066b50c9 100644 --- a/node/Topology.hpp +++ b/node/Topology.hpp @@ -44,6 +44,7 @@ #include "Mutex.hpp" #include "InetAddress.hpp" #include "Dictionary.hpp" +#include "Hashtable.hpp" namespace ZeroTier { @@ -163,17 +164,20 @@ public: inline void eachPeer(F f) { Mutex::Lock _l(_lock); - for(std::map< Address,SharedPtr >::const_iterator p(_activePeers.begin());p!=_activePeers.end();++p) - f(*this,p->second); + Hashtable< Address,SharedPtr >::Iterator i(_activePeers); + Address *a = (Address *)0; + SharedPtr *p = (SharedPtr *)0; + while (i.next(a,p)) + f(*this,*p); } /** * @return All currently active peers by address */ - inline std::map< Address,SharedPtr > allPeers() const + inline std::vector< std::pair< Address,SharedPtr > > allPeers() const { Mutex::Lock _l(_lock); - return _activePeers; + return _activePeers.entries(); } /** @@ -190,7 +194,7 @@ private: const RuntimeEnvironment *RR; - std::map< Address,SharedPtr > _activePeers; + Hashtable< Address,SharedPtr > _activePeers; std::map< Identity,std::vector > _roots; std::vector< Address > _rootAddresses; std::vector< SharedPtr > _rootPeers; From 7b8ce1605781f14d909e0aa099455b86d738c60a Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 13:42:19 -0700 Subject: [PATCH 03/12] Another std::map<> dies. --- node/Hashtable.hpp | 35 +++++++++++++++++++++++++++++++++++ node/Network.cpp | 21 ++++++++++++--------- node/Network.hpp | 3 ++- node/Peer.hpp | 1 + 4 files changed, 50 insertions(+), 10 deletions(-) diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index 84b5be0ef..59a577262 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -199,6 +199,26 @@ public: return k; } + /** + * Append all keys (in unspecified order) to the supplied vector or list + * + * @param v Vector, list, or other compliant container + * @tparam Type of V (generally inferred) + */ + template + inline void appendKeys(C &v) const + { + if (_s) { + for(unsigned long i=0;i<_bc;++i) { + _Bucket *b = _t[i]; + while (b) { + v.push_back(b->k); + b = b->next; + } + } + } + } + /** * @return Vector of all entries (pairs of K,V) */ @@ -234,6 +254,21 @@ public: } inline const V *get(const K &k) const { return const_cast(this)->get(k); } + /** + * @param k Key to check + * @return True if key is present + */ + inline bool contains(const K &k) const + { + _Bucket *b = _t[_hc(k) % _bc]; + while (b) { + if (b->k == k) + return true; + b = b->next; + } + return false; + } + /** * @param k Key * @return True if value was present diff --git a/node/Network.cpp b/node/Network.cpp index 39042fabd..8317cad9e 100644 --- a/node/Network.cpp +++ b/node/Network.cpp @@ -147,7 +147,7 @@ bool Network::subscribedToMulticastGroup(const MulticastGroup &mg,bool includeBr if (std::binary_search(_myMulticastGroups.begin(),_myMulticastGroups.end(),mg)) return true; else if (includeBridgedGroups) - return (_multicastGroupsBehindMe.find(mg) != _multicastGroupsBehindMe.end()); + return _multicastGroupsBehindMe.contains(mg); else return false; } @@ -373,10 +373,14 @@ void Network::clean() } // Clean learned multicast groups if we haven't heard from them in a while - for(std::map::iterator mg(_multicastGroupsBehindMe.begin());mg!=_multicastGroupsBehindMe.end();) { - if ((now - mg->second) > (ZT_MULTICAST_LIKE_EXPIRE * 2)) - _multicastGroupsBehindMe.erase(mg++); - else ++mg; + { + Hashtable< MulticastGroup,uint64_t >::Iterator i(_multicastGroupsBehindMe); + MulticastGroup *mg = (MulticastGroup *)0; + uint64_t *ts = (uint64_t *)0; + while (i.next(mg,ts)) { + if ((now - *ts) > (ZT_MULTICAST_LIKE_EXPIRE * 2)) + _multicastGroupsBehindMe.erase(*mg); + } } } @@ -408,8 +412,8 @@ void Network::learnBridgeRoute(const MAC &mac,const Address &addr) void Network::learnBridgedMulticastGroup(const MulticastGroup &mg,uint64_t now) { Mutex::Lock _l(_lock); - unsigned long tmp = (unsigned long)_multicastGroupsBehindMe.size(); - _multicastGroupsBehindMe[mg] = now; + const unsigned long tmp = (unsigned long)_multicastGroupsBehindMe.size(); + _multicastGroupsBehindMe.set(mg,now); if (tmp != _multicastGroupsBehindMe.size()) _announceMulticastGroups(); } @@ -510,8 +514,7 @@ std::vector Network::_allMulticastGroups() const std::vector mgs; mgs.reserve(_myMulticastGroups.size() + _multicastGroupsBehindMe.size() + 1); mgs.insert(mgs.end(),_myMulticastGroups.begin(),_myMulticastGroups.end()); - for(std::map< MulticastGroup,uint64_t >::const_iterator i(_multicastGroupsBehindMe.begin());i!=_multicastGroupsBehindMe.end();++i) - mgs.push_back(i->first); + _multicastGroupsBehindMe.appendKeys(mgs); if ((_config)&&(_config->enableBroadcast())) mgs.push_back(Network::BROADCAST); std::sort(mgs.begin(),mgs.end()); diff --git a/node/Network.hpp b/node/Network.hpp index d7320d46f..47d2efc03 100644 --- a/node/Network.hpp +++ b/node/Network.hpp @@ -40,6 +40,7 @@ #include "Constants.hpp" #include "NonCopyable.hpp" +#include "Hashtable.hpp" #include "Address.hpp" #include "Mutex.hpp" #include "SharedPtr.hpp" @@ -359,7 +360,7 @@ private: volatile bool _portInitialized; std::vector< MulticastGroup > _myMulticastGroups; // multicast groups that we belong to including those behind us (updated periodically) - std::map< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups bridged to us and when we last saw activity on each + Hashtable< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups bridged to us and when we last saw activity on each std::map _remoteBridgeRoutes; // remote addresses where given MACs are reachable diff --git a/node/Peer.hpp b/node/Peer.hpp index 283e3f332..ef436cd95 100644 --- a/node/Peer.hpp +++ b/node/Peer.hpp @@ -40,6 +40,7 @@ #include "../include/ZeroTierOne.h" #include "RuntimeEnvironment.hpp" +#include "CertificateOfMembership.hpp" #include "RemotePath.hpp" #include "Address.hpp" #include "Utils.hpp" From d1341578d8dc7fd3e39b24dde1ac2dae4da7a632 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 13:53:48 -0700 Subject: [PATCH 04/12] ... and another one! --- node/Network.cpp | 34 +++++++++++++++++++++++----------- node/Network.hpp | 14 +++++++------- 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/node/Network.cpp b/node/Network.cpp index 8317cad9e..b0c2627bb 100644 --- a/node/Network.cpp +++ b/node/Network.cpp @@ -389,22 +389,34 @@ void Network::learnBridgeRoute(const MAC &mac,const Address &addr) Mutex::Lock _l(_lock); _remoteBridgeRoutes[mac] = addr; - // If _remoteBridgeRoutes exceeds sanity limit, trim worst offenders until below -- denial of service circuit breaker + // Anti-DOS circuit breaker to prevent nodes from spamming us with absurd numbers of bridge routes while (_remoteBridgeRoutes.size() > ZT_MAX_BRIDGE_ROUTES) { - std::map counts; + Hashtable< Address,unsigned long > counts; Address maxAddr; unsigned long maxCount = 0; - for(std::map::iterator br(_remoteBridgeRoutes.begin());br!=_remoteBridgeRoutes.end();++br) { - unsigned long c = ++counts[br->second]; - if (c > maxCount) { - maxCount = c; - maxAddr = br->second; + + MAC *m = (MAC *)0; + Address *a = (Address *)0; + + // Find the address responsible for the most entries + { + Hashtable::Iterator i(_remoteBridgeRoutes); + while (i.next(m,a)) { + const unsigned long c = ++counts[*a]; + if (c > maxCount) { + maxCount = c; + maxAddr = *a; + } } } - for(std::map::iterator br(_remoteBridgeRoutes.begin());br!=_remoteBridgeRoutes.end();) { - if (br->second == maxAddr) - _remoteBridgeRoutes.erase(br++); - else ++br; + + // Kill this address from our table, since it's most likely spamming us + { + Hashtable::Iterator i(_remoteBridgeRoutes); + while (i.next(m,a)) { + if (*a == maxAddr) + _remoteBridgeRoutes.erase(*m); + } } } } diff --git a/node/Network.hpp b/node/Network.hpp index 47d2efc03..d8ffd612a 100644 --- a/node/Network.hpp +++ b/node/Network.hpp @@ -298,10 +298,10 @@ public: inline Address findBridgeTo(const MAC &mac) const { Mutex::Lock _l(_lock); - std::map::const_iterator br(_remoteBridgeRoutes.find(mac)); - if (br == _remoteBridgeRoutes.end()) - return Address(); - return br->second; + const Address *const br = _remoteBridgeRoutes.get(mac); + if (br) + return *br; + return Address(); } /** @@ -359,10 +359,10 @@ private: volatile bool _enabled; volatile bool _portInitialized; - std::vector< MulticastGroup > _myMulticastGroups; // multicast groups that we belong to including those behind us (updated periodically) - Hashtable< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups bridged to us and when we last saw activity on each + std::vector< MulticastGroup > _myMulticastGroups; // multicast groups that we belong to (according to tap) + Hashtable< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups that seem to be behind us and when we last saw them (if we are a bridge) - std::map _remoteBridgeRoutes; // remote addresses where given MACs are reachable + Hashtable< MAC,Address > _remoteBridgeRoutes; // remote addresses where given MACs are reachable (for remote bridges) std::map _membershipCertificates; // Other members' certificates of membership std::map _lastPushedMembershipCertificate; // When did we last push our certificate to each remote member? From 307e44f7c84dccd402e33fec08029d824b46515d Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 14:14:32 -0700 Subject: [PATCH 05/12] Two for one! (std::map removal) --- node/Network.cpp | 78 ++++++++++++++++++++++-------------------------- node/Network.hpp | 13 +++++--- 2 files changed, 45 insertions(+), 46 deletions(-) diff --git a/node/Network.cpp b/node/Network.cpp index b0c2627bb..331b37a21 100644 --- a/node/Network.cpp +++ b/node/Network.cpp @@ -92,7 +92,7 @@ Network::Network(const RuntimeEnvironment *renv,uint64_t nwid) : com.deserialize2(p,e); if (!com) break; - _membershipCertificates.insert(std::pair< Address,CertificateOfMembership >(com.issuedTo(),com)); + _certInfo[com.issuedTo()].com = com; } } } @@ -125,19 +125,22 @@ Network::~Network() clean(); - std::string buf("ZTMCD0"); Utils::snprintf(n,sizeof(n),"networks.d/%.16llx.mcerts",_id); + Mutex::Lock _l(_lock); - - if ((!_config)||(_config->isPublic())||(_membershipCertificates.size() == 0)) { + if ((!_config)||(_config->isPublic())||(_certInfo.empty())) { RR->node->dataStoreDelete(n); - return; + } else { + std::string buf("ZTMCD0"); + Hashtable< Address,_RemoteMemberCertificateInfo >::Iterator i(_certInfo); + Address *a = (Address *)0; + _RemoteMemberCertificateInfo *ci = (_RemoteMemberCertificateInfo *)0; + while (i.next(a,ci)) { + if (ci->com) + ci->com.serialize2(buf); + } + RR->node->dataStorePut(n,buf,true); } - - for(std::map::iterator c(_membershipCertificates.begin());c!=_membershipCertificates.end();++c) - c->second.serialize2(buf); - - RR->node->dataStorePut(n,buf,true); } } @@ -284,11 +287,12 @@ bool Network::validateAndAddMembershipCertificate(const CertificateOfMembership return false; Mutex::Lock _l(_lock); - CertificateOfMembership &old = _membershipCertificates[cert.issuedTo()]; - // Nothing to do if the cert hasn't changed -- we get duplicates due to zealous cert pushing - if (old == cert) - return true; // but if it's a duplicate of one we already accepted, return is 'true' + { + const _RemoteMemberCertificateInfo *ci = _certInfo.get(cert.issuedTo()); + if ((ci)&&(ci->com == cert)) + return true; // we already have it + } // Check signature, log and return if cert is invalid if (cert.signedBy() != controller()) { @@ -322,9 +326,8 @@ bool Network::validateAndAddMembershipCertificate(const CertificateOfMembership } } - // If we made it past authentication, update cert - if (cert.revision() != old.revision()) - old = cert; + // If we made it past authentication, add or update cert in our cert info store + _certInfo[cert.issuedTo()].com = cert; return true; } @@ -333,9 +336,9 @@ bool Network::peerNeedsOurMembershipCertificate(const Address &to,uint64_t now) { Mutex::Lock _l(_lock); if ((_config)&&(!_config->isPublic())&&(_config->com())) { - uint64_t &lastPushed = _lastPushedMembershipCertificate[to]; - if ((now - lastPushed) > (ZT_NETWORK_AUTOCONF_DELAY / 2)) { - lastPushed = now; + _RemoteMemberCertificateInfo &ci = _certInfo[to]; + if ((now - ci.lastPushed) > (ZT_NETWORK_AUTOCONF_DELAY / 2)) { + ci.lastPushed = now; return true; } } @@ -352,23 +355,16 @@ void Network::clean() if ((_config)&&(_config->isPublic())) { // Open (public) networks do not track certs or cert pushes at all. - _membershipCertificates.clear(); - _lastPushedMembershipCertificate.clear(); + _certInfo.clear(); } else if (_config) { - // Clean certificates that are no longer valid from the cache. - for(std::map::iterator c=(_membershipCertificates.begin());c!=_membershipCertificates.end();) { - if (_config->com().agreesWith(c->second)) - ++c; - else _membershipCertificates.erase(c++); - } - - // Clean entries from the last pushed tracking map if they're so old as - // to be no longer relevant. - uint64_t forgetIfBefore = now - (ZT_PEER_ACTIVITY_TIMEOUT * 16); // arbitrary reasonable cutoff - for(std::map::iterator lp(_lastPushedMembershipCertificate.begin());lp!=_lastPushedMembershipCertificate.end();) { - if (lp->second < forgetIfBefore) - _lastPushedMembershipCertificate.erase(lp++); - else ++lp; + // Clean obsolete entries from private network cert info table + Hashtable< Address,_RemoteMemberCertificateInfo >::Iterator i(_certInfo); + Address *a = (Address *)0; + _RemoteMemberCertificateInfo *ci = (_RemoteMemberCertificateInfo *)0; + const uint64_t forgetIfBefore = now - (ZT_PEER_ACTIVITY_TIMEOUT * 16); // arbitrary reasonable cutoff + while (i.next(a,ci)) { + if ((ci->lastPushed < forgetIfBefore)&&(!ci->com.agreesWith(_config->com()))) + _certInfo.erase(*a); } } @@ -506,12 +502,10 @@ bool Network::_isAllowed(const Address &peer) const return false; if (_config->isPublic()) return true; - - std::map::const_iterator pc(_membershipCertificates.find(peer)); - if (pc == _membershipCertificates.end()) - return false; // no certificate on file - - return _config->com().agreesWith(pc->second); // is other cert valid against ours? + const _RemoteMemberCertificateInfo *ci = _certInfo.get(peer); + if (!ci) + return false; + return _config->com().agreesWith(ci->com); } catch (std::exception &exc) { TRACE("isAllowed() check failed for peer %s: unexpected exception: %s",peer.toString().c_str(),exc.what()); } catch ( ... ) { diff --git a/node/Network.hpp b/node/Network.hpp index d8ffd612a..7f215555c 100644 --- a/node/Network.hpp +++ b/node/Network.hpp @@ -347,6 +347,13 @@ public: inline bool operator>=(const Network &n) const throw() { return (_id >= n._id); } private: + struct _RemoteMemberCertificateInfo + { + _RemoteMemberCertificateInfo() : com(),lastPushed(0) {} + CertificateOfMembership com; // remote member's COM + uint64_t lastPushed; // when did we last push ours to them? + }; + ZT1_VirtualNetworkStatus _status() const; void _externalConfig(ZT1_VirtualNetworkConfig *ec) const; // assumes _lock is locked bool _isAllowed(const Address &peer) const; @@ -361,11 +368,9 @@ private: std::vector< MulticastGroup > _myMulticastGroups; // multicast groups that we belong to (according to tap) Hashtable< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups that seem to be behind us and when we last saw them (if we are a bridge) + Hashtable< MAC,Address > _remoteBridgeRoutes; // remote addresses where given MACs are reachable (for tracking devices behind remote bridges) - Hashtable< MAC,Address > _remoteBridgeRoutes; // remote addresses where given MACs are reachable (for remote bridges) - - std::map _membershipCertificates; // Other members' certificates of membership - std::map _lastPushedMembershipCertificate; // When did we last push our certificate to each remote member? + Hashtable< Address,_RemoteMemberCertificateInfo > _certInfo; SharedPtr _config; // Most recent network configuration, which is an immutable value-object volatile uint64_t _lastConfigUpdate; From f116c4b9c05e88130cb7f2970b616f7930c30c6c Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 14:24:31 -0700 Subject: [PATCH 06/12] ... and another ... --- node/SelfAwareness.cpp | 37 ++++++++++++++----------------------- node/SelfAwareness.hpp | 14 +++++--------- 2 files changed, 19 insertions(+), 32 deletions(-) diff --git a/node/SelfAwareness.cpp b/node/SelfAwareness.cpp index 716cf7f34..7329322af 100644 --- a/node/SelfAwareness.cpp +++ b/node/SelfAwareness.cpp @@ -107,10 +107,14 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi // Erase all entries (other than this one) for this scope to prevent thrashing // Note: we should probably not use 'entry' after this - for(std::map< PhySurfaceKey,PhySurfaceEntry >::iterator p(_phy.begin());p!=_phy.end();) { - if ((p->first.reporter != reporter)&&(p->first.scope == scope)) - _phy.erase(p++); - else ++p; + { + Hashtable< PhySurfaceKey,PhySurfaceEntry >::Iterator i(_phy); + PhySurfaceKey *k = (PhySurfaceKey *)0; + PhySurfaceEntry *e = (PhySurfaceEntry *)0; + while (i.next(k,e)) { + if ((k->reporter != reporter)&&(k->scope == scope)) + _phy.erase(*k); + } } _ResetWithinScope rset(RR,now,(InetAddress::IpScope)scope); @@ -140,26 +144,13 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi void SelfAwareness::clean(uint64_t now) { Mutex::Lock _l(_phy_m); - for(std::map< PhySurfaceKey,PhySurfaceEntry >::iterator p(_phy.begin());p!=_phy.end();) { - if ((now - p->second.ts) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT) - _phy.erase(p++); - else ++p; + Hashtable< PhySurfaceKey,PhySurfaceEntry >::Iterator i(_phy); + PhySurfaceKey *k = (PhySurfaceKey *)0; + PhySurfaceEntry *e = (PhySurfaceEntry *)0; + while (i.next(k,e)) { + if ((now - e->ts) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT) + _phy.erase(*k); } } -bool SelfAwareness::areGlobalIPv4PortsRandomized() const -{ - int port = 0; - Mutex::Lock _l(_phy_m); - for(std::map< PhySurfaceKey,PhySurfaceEntry >::const_iterator p(_phy.begin());p!=_phy.end();++p) { - if ((p->first.scope == InetAddress::IP_SCOPE_GLOBAL)&&(p->second.mySurface.ss_family == AF_INET)) { - const int tmp = (int)p->second.mySurface.port(); - if ((port)&&(tmp != port)) - return true; - else port = tmp; - } - } - return false; -} - } // namespace ZeroTier diff --git a/node/SelfAwareness.hpp b/node/SelfAwareness.hpp index d3b79d186..3133553ee 100644 --- a/node/SelfAwareness.hpp +++ b/node/SelfAwareness.hpp @@ -28,10 +28,9 @@ #ifndef ZT_SELFAWARENESS_HPP #define ZT_SELFAWARENESS_HPP -#include -#include - +#include "Constants.hpp" #include "InetAddress.hpp" +#include "Hashtable.hpp" #include "Address.hpp" #include "Mutex.hpp" @@ -66,17 +65,14 @@ public: */ void clean(uint64_t now); - /** - * @return True if our external (global scope) IPv4 ports appear to be randomized by a NAT device - */ - bool areGlobalIPv4PortsRandomized() const; - private: struct PhySurfaceKey { Address reporter; InetAddress::IpScope scope; + inline unsigned long hashCode() const throw() { return ((unsigned long)reporter.toInt() + (unsigned long)scope); } + PhySurfaceKey() : reporter(),scope(InetAddress::IP_SCOPE_NONE) {} PhySurfaceKey(const Address &r,InetAddress::IpScope s) : reporter(r),scope(s) {} inline bool operator<(const PhySurfaceKey &k) const throw() { return ((reporter < k.reporter) ? true : ((reporter == k.reporter) ? ((int)scope < (int)k.scope) : false)); } @@ -93,7 +89,7 @@ private: const RuntimeEnvironment *RR; - std::map< PhySurfaceKey,PhySurfaceEntry > _phy; + Hashtable< PhySurfaceKey,PhySurfaceEntry > _phy; Mutex _phy_m; }; From 0ab3e49be91ed7a8723c8b58750aef77c01e8d08 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 14:44:22 -0700 Subject: [PATCH 07/12] Starting in on Switch... kill map in defrag queue, which will probably improve performance pretty decently under high load with lots of peers. --- node/Hashtable.hpp | 9 ++++++--- node/Switch.cpp | 49 ++++++++++++++++++++++++---------------------- node/Switch.hpp | 4 +++- 3 files changed, 35 insertions(+), 27 deletions(-) diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index 59a577262..29c548387 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -372,9 +372,12 @@ private: } static inline unsigned long _hc(const uint64_t i) { - // NOTE: this is fine for network IDs, but might be bad for other kinds - // of IDs if they are not evenly or randomly distributed. - return (unsigned long)((i ^ (i >> 32)) * 2654435761ULL); + /* NOTE: this assumes that 'i' is evenly distributed, which is the case for + * packet IDs and network IDs -- the two use cases in ZT for uint64_t keys. + * These values are also greater than 0xffffffff so they'll map onto a full + * bucket count just fine no matter what happens. Normally you'd want to + * hash an integer key index in a hash table. */ + return (unsigned long)i; } inline void _grow() diff --git a/node/Switch.cpp b/node/Switch.cpp index 989f497a0..3dcb7002b 100644 --- a/node/Switch.cpp +++ b/node/Switch.cpp @@ -531,11 +531,14 @@ unsigned long Switch::doTimerTasks(uint64_t now) { // Time out packets that didn't get all their fragments. Mutex::Lock _l(_defragQueue_m); - for(std::map< uint64_t,DefragQueueEntry >::iterator i(_defragQueue.begin());i!=_defragQueue.end();) { - if ((now - i->second.creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) { + Hashtable< uint64_t,DefragQueueEntry >::Iterator i(_defragQueue); + uint64_t *packetId = (uint64_t *)0; + DefragQueueEntry *qe = (DefragQueueEntry *)0; + while (i.next(packetId,qe)) { + if ((now - qe->creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) { TRACE("incomplete fragmented packet %.16llx timed out, fragments discarded",i->first); - _defragQueue.erase(i++); - } else ++i; + _defragQueue.erase(*packetId); + } } } @@ -577,32 +580,31 @@ void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,const void // seeing a Packet::Fragment? Mutex::Lock _l(_defragQueue_m); - std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid)); + DefragQueueEntry &dq = _defragQueue[pid]; - if (dqe == _defragQueue.end()) { + if (!dq.creationTime) { // We received a Packet::Fragment without its head, so queue it and wait - DefragQueueEntry &dq = _defragQueue[pid]; dq.creationTime = RR->node->now(); dq.frags[fno - 1] = fragment; dq.totalFragments = tf; // total fragment count is known dq.haveFragments = 1 << fno; // we have only this fragment //TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str()); - } else if (!(dqe->second.haveFragments & (1 << fno))) { + } else if (!(dq.haveFragments & (1 << fno))) { // We have other fragments and maybe the head, so add this one and check - dqe->second.frags[fno - 1] = fragment; - dqe->second.totalFragments = tf; + dq.frags[fno - 1] = fragment; + dq.totalFragments = tf; //TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str()); - if (Utils::countBits(dqe->second.haveFragments |= (1 << fno)) == tf) { + if (Utils::countBits(dq.haveFragments |= (1 << fno)) == tf) { // We have all fragments -- assemble and process full Packet //TRACE("packet %.16llx is complete, assembling and processing...",pid); - SharedPtr packet(dqe->second.frag0); + SharedPtr packet(dq.frag0); for(unsigned int f=1;fappend(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength()); - _defragQueue.erase(dqe); + packet->append(dq.frags[f - 1].payload(),dq.frags[f - 1].payloadLength()); + _defragQueue.erase(pid); // dq no longer valid after this if (!packet->tryDecode(RR)) { Mutex::Lock _l(_rxQueue_m); @@ -645,26 +647,27 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *dat uint64_t pid = packet->packetId(); Mutex::Lock _l(_defragQueue_m); - std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid)); + DefragQueueEntry &dq = _defragQueue[pid]; - if (dqe == _defragQueue.end()) { + if (!dq.creationTime) { // If we have no other fragments yet, create an entry and save the head - DefragQueueEntry &dq = _defragQueue[pid]; + dq.creationTime = RR->node->now(); dq.frag0 = packet; dq.totalFragments = 0; // 0 == unknown, waiting for Packet::Fragment dq.haveFragments = 1; // head is first bit (left to right) //TRACE("fragment (0/?) of %.16llx from %s",pid,fromAddr.toString().c_str()); - } else if (!(dqe->second.haveFragments & 1)) { + } else if (!(dq.haveFragments & 1)) { // If we have other fragments but no head, see if we are complete with the head - if ((dqe->second.totalFragments)&&(Utils::countBits(dqe->second.haveFragments |= 1) == dqe->second.totalFragments)) { + + if ((dq.totalFragments)&&(Utils::countBits(dq.haveFragments |= 1) == dq.totalFragments)) { // We have all fragments -- assemble and process full Packet //TRACE("packet %.16llx is complete, assembling and processing...",pid); // packet already contains head, so append fragments - for(unsigned int f=1;fsecond.totalFragments;++f) - packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength()); - _defragQueue.erase(dqe); + for(unsigned int f=1;fappend(dq.frags[f - 1].payload(),dq.frags[f - 1].payloadLength()); + _defragQueue.erase(pid); // dq no longer valid after this if (!packet->tryDecode(RR)) { Mutex::Lock _l(_rxQueue_m); @@ -672,7 +675,7 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *dat } } else { // Still waiting on more fragments, so queue the head - dqe->second.frag0 = packet; + dq.frag0 = packet; } } // else this is a duplicate head, ignore } else { diff --git a/node/Switch.hpp b/node/Switch.hpp index ac85606e2..a1b36014d 100644 --- a/node/Switch.hpp +++ b/node/Switch.hpp @@ -45,6 +45,7 @@ #include "Network.hpp" #include "SharedPtr.hpp" #include "IncomingPacket.hpp" +#include "Hashtable.hpp" /* Ethernet frame types that might be relevant to us */ #define ZT_ETHERTYPE_IPV4 0x0800 @@ -199,13 +200,14 @@ private: // Packet defragmentation queue -- comes before RX queue in path struct DefragQueueEntry { + DefragQueueEntry() : creationTime(0),totalFragments(0),haveFragments(0) {} uint64_t creationTime; SharedPtr frag0; Packet::Fragment frags[ZT_MAX_PACKET_FRAGMENTS - 1]; unsigned int totalFragments; // 0 if only frag0 received, waiting for frags uint32_t haveFragments; // bit mask, LSB to MSB }; - std::map< uint64_t,DefragQueueEntry > _defragQueue; + Hashtable< uint64_t,DefragQueueEntry > _defragQueue; Mutex _defragQueue_m; // ZeroTier-layer RX queue of incoming packets in the process of being decoded From db0369e9b8e6bf49ce8ec8f7fe706004314d4548 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 14:56:39 -0700 Subject: [PATCH 08/12] Remove way-overkill multimap from Switch. --- node/Switch.cpp | 27 ++++++++++++++------------- node/Switch.hpp | 8 +++++--- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/node/Switch.cpp b/node/Switch.cpp index 3dcb7002b..7d6f8094c 100644 --- a/node/Switch.cpp +++ b/node/Switch.cpp @@ -291,7 +291,7 @@ void Switch::send(const Packet &packet,bool encrypt,uint64_t nwid) if (!_trySend(packet,encrypt,nwid)) { Mutex::Lock _l(_txQueue_m); - _txQueue.insert(std::pair< Address,TXQueueEntry >(packet.destination(),TXQueueEntry(RR->node->now(),packet,encrypt,nwid))); + _txQueue.push_back(TXQueueEntry(packet.destination(),RR->node->now(),packet,encrypt,nwid)); } } @@ -435,11 +435,12 @@ void Switch::doAnythingWaitingForPeer(const SharedPtr &peer) { // finish sending any packets waiting on peer's public key / identity Mutex::Lock _l(_txQueue_m); - std::pair< std::multimap< Address,TXQueueEntry >::iterator,std::multimap< Address,TXQueueEntry >::iterator > waitingTxQueueItems(_txQueue.equal_range(peer->address())); - for(std::multimap< Address,TXQueueEntry >::iterator txi(waitingTxQueueItems.first);txi!=waitingTxQueueItems.second;) { - if (_trySend(txi->second.packet,txi->second.encrypt,txi->second.nwid)) - _txQueue.erase(txi++); - else ++txi; + for(std::list< TXQueueEntry >::iterator txi(_txQueue.begin());txi!=_txQueue.end();) { + if (txi->dest == peer->address()) { + if (_trySend(txi->packet,txi->encrypt,txi->nwid)) + _txQueue.erase(txi++); + else ++txi; + } else ++txi; } } } @@ -509,13 +510,13 @@ unsigned long Switch::doTimerTasks(uint64_t now) { // Time out TX queue packets that never got WHOIS lookups or other info. Mutex::Lock _l(_txQueue_m); - for(std::multimap< Address,TXQueueEntry >::iterator i(_txQueue.begin());i!=_txQueue.end();) { - if (_trySend(i->second.packet,i->second.encrypt,i->second.nwid)) - _txQueue.erase(i++); - else if ((now - i->second.creationTime) > ZT_TRANSMIT_QUEUE_TIMEOUT) { - TRACE("TX %s -> %s timed out",i->second.packet.source().toString().c_str(),i->second.packet.destination().toString().c_str()); - _txQueue.erase(i++); - } else ++i; + for(std::list< TXQueueEntry >::iterator txi(_txQueue.begin());txi!=_txQueue.end();) { + if (_trySend(txi->packet,txi->encrypt,txi->nwid)) + _txQueue.erase(txi++); + else if ((now - txi->creationTime) > ZT_TRANSMIT_QUEUE_TIMEOUT) { + TRACE("TX %s -> %s timed out",txi->packet.source().toString().c_str(),txi->packet.destination().toString().c_str()); + _txQueue.erase(txi++); + } else ++txi; } } diff --git a/node/Switch.hpp b/node/Switch.hpp index a1b36014d..0791681f0 100644 --- a/node/Switch.hpp +++ b/node/Switch.hpp @@ -214,22 +214,24 @@ private: std::list< SharedPtr > _rxQueue; Mutex _rxQueue_m; - // ZeroTier-layer TX queue by destination ZeroTier address + // ZeroTier-layer TX queue entry struct TXQueueEntry { TXQueueEntry() {} - TXQueueEntry(uint64_t ct,const Packet &p,bool enc,uint64_t nw) : + TXQueueEntry(Address d,uint64_t ct,const Packet &p,bool enc,uint64_t nw) : + dest(d), creationTime(ct), nwid(nw), packet(p), encrypt(enc) {} + Address dest; uint64_t creationTime; uint64_t nwid; Packet packet; // unencrypted/unMAC'd packet -- this is done at send time bool encrypt; }; - std::multimap< Address,TXQueueEntry > _txQueue; + std::list< TXQueueEntry > _txQueue; Mutex _txQueue_m; // Tracks sending of VERB_RENDEZVOUS to relaying peers From 3dba016a9354d9c50743877988c8d928d25f7a2b Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 15:21:22 -0700 Subject: [PATCH 09/12] Almost done... very few std::map<>s remaining in any spot that matters. --- node/Hashtable.hpp | 5 +++-- node/Switch.cpp | 38 ++++++++++++++++++-------------------- node/Switch.hpp | 19 ++++++++++++++++++- 3 files changed, 39 insertions(+), 23 deletions(-) diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index 29c548387..bcc111e31 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -39,8 +39,9 @@ namespace ZeroTier { * A minimal hash table implementation for the ZeroTier core * * This is not a drop-in replacement for STL containers, and has several - * limitations. It's designed to be small and fast for use in the - * ZeroTier core. + * limitations. Keys can be uint64_t or an object, and if the latter they + * must implement a method called hashCode() that returns an unsigned long + * value that is evenly distributed. */ template class Hashtable diff --git a/node/Switch.cpp b/node/Switch.cpp index 7d6f8094c..d5ee3e236 100644 --- a/node/Switch.cpp +++ b/node/Switch.cpp @@ -309,31 +309,18 @@ bool Switch::unite(const Address &p1,const Address &p2,bool force) const uint64_t now = RR->node->now(); - std::pair cg(Peer::findCommonGround(*p1p,*p2p,now)); - if (!(cg.first)) - return false; - - if (cg.first.ipScope() != cg.second.ipScope()) - return false; - - // Addresses are sorted in key for last unite attempt map for order - // invariant lookup: (p1,p2) == (p2,p1) - Array uniteKey; - if (p1 >= p2) { - uniteKey[0] = p2; - uniteKey[1] = p1; - } else { - uniteKey[0] = p1; - uniteKey[1] = p2; - } { Mutex::Lock _l(_lastUniteAttempt_m); - std::map< Array< Address,2 >,uint64_t >::const_iterator e(_lastUniteAttempt.find(uniteKey)); - if ((!force)&&(e != _lastUniteAttempt.end())&&((now - e->second) < ZT_MIN_UNITE_INTERVAL)) + uint64_t &luts = _lastUniteAttempt[_LastUniteKey(p1,p2)]; + if (((now - luts) < ZT_MIN_UNITE_INTERVAL)&&(!force)) return false; - else _lastUniteAttempt[uniteKey] = now; + luts = now; } + std::pair cg(Peer::findCommonGround(*p1p,*p2p,now)); + if ((!(cg.first))||(cg.first.ipScope() != cg.second.ipScope())) + return false; + TRACE("unite: %s(%s) <> %s(%s)",p1.toString().c_str(),cg.second.toString().c_str(),p2.toString().c_str(),cg.first.toString().c_str()); /* Tell P1 where to find P2 and vice versa, sending the packets to P1 and @@ -543,6 +530,17 @@ unsigned long Switch::doTimerTasks(uint64_t now) } } + { // Remove really old last unite attempt entries to keep table size controlled + Mutex::Lock _l(_lastUniteAttempt_m); + Hashtable< _LastUniteKey,uint64_t >::Iterator i(_lastUniteAttempt); + _LastUniteKey *k = (_LastUniteKey *)0; + uint64_t *v = (uint64_t *)0; + while (i.next(k,v)) { + if ((now - *v) >= (ZT_MIN_UNITE_INTERVAL * 16)) + _lastUniteAttempt.erase(*k); + } + } + return nextDelay; } diff --git a/node/Switch.hpp b/node/Switch.hpp index 0791681f0..2d83b70cc 100644 --- a/node/Switch.hpp +++ b/node/Switch.hpp @@ -235,7 +235,24 @@ private: Mutex _txQueue_m; // Tracks sending of VERB_RENDEZVOUS to relaying peers - std::map< Array< Address,2 >,uint64_t > _lastUniteAttempt; // key is always sorted in ascending order, for set-like behavior + struct _LastUniteKey + { + _LastUniteKey() : x(0),y(0) {} + _LastUniteKey(const Address &a1,const Address &a2) + { + if (a1 > a2) { + x = a2.toInt(); + y = a1.toInt(); + } else { + x = a1.toInt(); + y = a2.toInt(); + } + } + inline unsigned long hashCode() const throw() { return ((unsigned long)x ^ (unsigned long)y); } + inline bool operator==(const _LastUniteKey &k) const throw() { return ((x == k.x)&&(y == k.y)); } + uint64_t x,y; + }; + Hashtable< _LastUniteKey,uint64_t > _lastUniteAttempt; // key is always sorted in ascending order, for set-like behavior Mutex _lastUniteAttempt_m; // Active attempts to contact remote peers, including state of multi-phase NAT traversal From 85b90f122a826510491e15a1bfd6c1fe75dfe069 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 15:35:43 -0700 Subject: [PATCH 10/12] Final std::map<> from Switch, and add some smallish default values for hash size. --- node/Switch.cpp | 41 ++++++++++++++++++++++++----------------- node/Switch.hpp | 3 ++- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/node/Switch.cpp b/node/Switch.cpp index d5ee3e236..995abef48 100644 --- a/node/Switch.cpp +++ b/node/Switch.cpp @@ -67,7 +67,10 @@ static const char *etherTypeName(const unsigned int etherType) Switch::Switch(const RuntimeEnvironment *renv) : RR(renv), - _lastBeaconResponse(0) + _lastBeaconResponse(0), + _outstandingWhoisRequests(32), + _defragQueue(32), + _lastUniteAttempt(8) // only really used on root servers and upstreams, and it'll grow there just fine { } @@ -389,10 +392,13 @@ void Switch::requestWhois(const Address &addr) bool inserted = false; { Mutex::Lock _l(_outstandingWhoisRequests_m); - std::pair< std::map< Address,WhoisRequest >::iterator,bool > entry(_outstandingWhoisRequests.insert(std::pair(addr,WhoisRequest()))); - if ((inserted = entry.second)) - entry.first->second.lastSent = RR->node->now(); - entry.first->second.retries = 0; // reset retry count if entry already existed + WhoisRequest &r = _outstandingWhoisRequests[addr]; + if (r.lastSent) { + r.retries = 0; // reset retry count if entry already existed, but keep waiting and retry again after normal timeout + } else { + r.lastSent = RR->node->now(); + inserted = true; + } } if (inserted) _sendWhoisRequest(addr,(const Address *)0,0); @@ -474,24 +480,25 @@ unsigned long Switch::doTimerTasks(uint64_t now) { // Retry outstanding WHOIS requests Mutex::Lock _l(_outstandingWhoisRequests_m); - for(std::map< Address,WhoisRequest >::iterator i(_outstandingWhoisRequests.begin());i!=_outstandingWhoisRequests.end();) { - unsigned long since = (unsigned long)(now - i->second.lastSent); + Hashtable< Address,WhoisRequest >::Iterator i(_outstandingWhoisRequests); + Address *a = (Address *)0; + WhoisRequest *r = (WhoisRequest *)0; + while (i.next(a,r)) { + const unsigned long since = (unsigned long)(now - r->lastSent); if (since >= ZT_WHOIS_RETRY_DELAY) { - if (i->second.retries >= ZT_MAX_WHOIS_RETRIES) { - TRACE("WHOIS %s timed out",i->first.toString().c_str()); - _outstandingWhoisRequests.erase(i++); - continue; + if (r->retries >= ZT_MAX_WHOIS_RETRIES) { + TRACE("WHOIS %s timed out",a->toString().c_str()); + _outstandingWhoisRequests.erase(*a); } else { - i->second.lastSent = now; - i->second.peersConsulted[i->second.retries] = _sendWhoisRequest(i->first,i->second.peersConsulted,i->second.retries); - ++i->second.retries; - TRACE("WHOIS %s (retry %u)",i->first.toString().c_str(),i->second.retries); + r->lastSent = now; + r->peersConsulted[r->retries] = _sendWhoisRequest(*a,r->peersConsulted,r->retries); + ++r->retries; + TRACE("WHOIS %s (retry %u)",a->toString().c_str(),r->retries); nextDelay = std::min(nextDelay,(unsigned long)ZT_WHOIS_RETRY_DELAY); } } else { nextDelay = std::min(nextDelay,ZT_WHOIS_RETRY_DELAY - since); } - ++i; } } @@ -524,7 +531,7 @@ unsigned long Switch::doTimerTasks(uint64_t now) DefragQueueEntry *qe = (DefragQueueEntry *)0; while (i.next(packetId,qe)) { if ((now - qe->creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) { - TRACE("incomplete fragmented packet %.16llx timed out, fragments discarded",i->first); + TRACE("incomplete fragmented packet %.16llx timed out, fragments discarded",*packetId); _defragQueue.erase(*packetId); } } diff --git a/node/Switch.hpp b/node/Switch.hpp index 2d83b70cc..55e2c362d 100644 --- a/node/Switch.hpp +++ b/node/Switch.hpp @@ -190,11 +190,12 @@ private: // Outsanding WHOIS requests and how many retries they've undergone struct WhoisRequest { + WhoisRequest() : lastSent(0),retries(0) {} uint64_t lastSent; Address peersConsulted[ZT_MAX_WHOIS_RETRIES]; // by retry unsigned int retries; // 0..ZT_MAX_WHOIS_RETRIES }; - std::map< Address,WhoisRequest > _outstandingWhoisRequests; + Hashtable< Address,WhoisRequest > _outstandingWhoisRequests; Mutex _outstandingWhoisRequests_m; // Packet defragmentation queue -- comes before RX queue in path From 0d386f1c3119b869197bafd5f2ad75ba2179850f Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Tue, 8 Sep 2015 11:35:55 -0700 Subject: [PATCH 11/12] Add a bit of useful testing instrumentation to SqliteNetworkController. --- controller/SqliteNetworkController.cpp | 43 ++++++++++++++++++++++++-- controller/SqliteNetworkController.hpp | 1 - node/IncomingPacket.cpp | 4 +-- node/Network.cpp | 2 +- node/NetworkController.hpp | 2 -- 5 files changed, 43 insertions(+), 9 deletions(-) diff --git a/controller/SqliteNetworkController.cpp b/controller/SqliteNetworkController.cpp index 2a004bda4..6a786ce4e 100644 --- a/controller/SqliteNetworkController.cpp +++ b/controller/SqliteNetworkController.cpp @@ -301,7 +301,7 @@ SqliteNetworkController::~SqliteNetworkController() } } -NetworkController::ResultCode SqliteNetworkController::doNetworkConfigRequest(const InetAddress &fromAddr,const Identity &signingId,const Identity &identity,uint64_t nwid,const Dictionary &metaData,uint64_t haveRevision,Dictionary &netconf) +NetworkController::ResultCode SqliteNetworkController::doNetworkConfigRequest(const InetAddress &fromAddr,const Identity &signingId,const Identity &identity,uint64_t nwid,const Dictionary &metaData,Dictionary &netconf) { // Decode some stuff from metaData const unsigned int clientMajorVersion = (unsigned int)metaData.getHexUInt(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_MAJOR_VERSION,0); @@ -1355,8 +1355,44 @@ unsigned int SqliteNetworkController::_doCPGet( sqlite3_bind_text(_sGetMember2,1,nwids,16,SQLITE_STATIC); sqlite3_bind_text(_sGetMember2,2,addrs,10,SQLITE_STATIC); if (sqlite3_step(_sGetMember2) == SQLITE_ROW) { + const char *memberIdStr = (const char *)sqlite3_column_text(_sGetMember2,3); + + // If testSingingId is included in the URL or X-ZT1-TestSigningId in the headers + // and if it contains an identity with a secret portion, the resturned JSON + // will contain an extra field called _testConf. This will contain several + // fields that report the result of doNetworkConfigRequest() for this member. + std::string testFields; + { + Identity testOutputSigningId; + std::map::const_iterator sid(urlArgs.find("testSigningId")); + if (sid != urlArgs.end()) { + testOutputSigningId.fromString(sid->second.c_str()); + } else { + sid = headers.find("x-zt1-testsigningid"); + if (sid != headers.end()) + testOutputSigningId.fromString(sid->second.c_str()); + } + + if ((testOutputSigningId.hasPrivate())&&(memberIdStr)) { + Dictionary testNetconf; + NetworkController::ResultCode rc = this->doNetworkConfigRequest( + InetAddress(), + testOutputSigningId, + Identity(memberIdStr), + nwid, + Dictionary(), // TODO: allow passing of meta-data for testing + testNetconf); + char rcs[16]; + Utils::snprintf(rcs,sizeof(rcs),"%d",(int)rc); + testFields.append("\t\"_test\": {\n"); + testFields.append("\t\t\"resultCode\": "); testFields.append(rcs); testFields.append(",\n"); + testFields.append("\t\t\"result\": \""); testFields.append(_jsonEscape(testNetconf.toString().c_str()).c_str()); testFields.append("\""); + testFields.append("\t}\n"); + } + } + Utils::snprintf(json,sizeof(json), - "{\n" + "{\n%s" "\t\"nwid\": \"%s\",\n" "\t\"address\": \"%s\",\n" "\t\"controllerInstanceId\": \"%s\",\n" @@ -1366,6 +1402,7 @@ unsigned int SqliteNetworkController::_doCPGet( "\t\"clock\": %llu,\n" "\t\"identity\": \"%s\",\n" "\t\"ipAssignments\": [", + testFields.c_str(), nwids, addrs, _instanceId.c_str(), @@ -1373,7 +1410,7 @@ unsigned int SqliteNetworkController::_doCPGet( (sqlite3_column_int(_sGetMember2,1) > 0) ? "true" : "false", (unsigned long long)sqlite3_column_int64(_sGetMember2,2), (unsigned long long)OSUtils::now(), - _jsonEscape((const char *)sqlite3_column_text(_sGetMember2,3)).c_str()); + _jsonEscape(memberIdStr).c_str()); responseBody = json; sqlite3_reset(_sGetIpAssignmentsForNode2); diff --git a/controller/SqliteNetworkController.hpp b/controller/SqliteNetworkController.hpp index adfe09916..26a069682 100644 --- a/controller/SqliteNetworkController.hpp +++ b/controller/SqliteNetworkController.hpp @@ -54,7 +54,6 @@ public: const Identity &identity, uint64_t nwid, const Dictionary &metaData, - uint64_t haveRevision, Dictionary &netconf); unsigned int handleControlPlaneHttpGET( diff --git a/node/IncomingPacket.cpp b/node/IncomingPacket.cpp index b1fda8ef8..fc79270bb 100644 --- a/node/IncomingPacket.cpp +++ b/node/IncomingPacket.cpp @@ -662,7 +662,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons const uint64_t nwid = at(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_NETWORK_ID); const unsigned int metaDataLength = at(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT_LEN); const Dictionary metaData((const char *)field(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT,metaDataLength),metaDataLength); - const uint64_t haveRevision = ((ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength + 8) <= size()) ? at(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength) : 0ULL; + //const uint64_t haveRevision = ((ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength + 8) <= size()) ? at(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength) : 0ULL; const unsigned int h = hops(); const uint64_t pid = packetId(); @@ -670,7 +670,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons if (RR->localNetworkController) { Dictionary netconf; - switch(RR->localNetworkController->doNetworkConfigRequest((h > 0) ? InetAddress() : _remoteAddress,RR->identity,peer->identity(),nwid,metaData,haveRevision,netconf)) { + switch(RR->localNetworkController->doNetworkConfigRequest((h > 0) ? InetAddress() : _remoteAddress,RR->identity,peer->identity(),nwid,metaData,netconf)) { case NetworkController::NETCONF_QUERY_OK: { const std::string netconfStr(netconf.toString()); diff --git a/node/Network.cpp b/node/Network.cpp index 331b37a21..f8130d76c 100644 --- a/node/Network.cpp +++ b/node/Network.cpp @@ -240,7 +240,7 @@ void Network::requestConfiguration() if (RR->localNetworkController) { SharedPtr nconf(config2()); Dictionary newconf; - switch(RR->localNetworkController->doNetworkConfigRequest(InetAddress(),RR->identity,RR->identity,_id,Dictionary(),(nconf) ? nconf->revision() : (uint64_t)0,newconf)) { + switch(RR->localNetworkController->doNetworkConfigRequest(InetAddress(),RR->identity,RR->identity,_id,Dictionary(),newconf)) { case NetworkController::NETCONF_QUERY_OK: this->setConfiguration(newconf,true); return; diff --git a/node/NetworkController.hpp b/node/NetworkController.hpp index ee481a623..cf327d7ec 100644 --- a/node/NetworkController.hpp +++ b/node/NetworkController.hpp @@ -75,7 +75,6 @@ public: * @param identity Originating peer ZeroTier identity * @param nwid 64-bit network ID * @param metaData Meta-data bundled with request (empty if none) - * @param haveRevision Network revision ID sent by requesting peer or 0 if none * @param result Dictionary to receive resulting signed netconf on success * @return Returns NETCONF_QUERY_OK if result dictionary is valid, or an error code on error */ @@ -85,7 +84,6 @@ public: const Identity &identity, uint64_t nwid, const Dictionary &metaData, - uint64_t haveRevision, Dictionary &result) = 0; }; From 4fbcad246850d7bf00289b898f4a26065276d6e2 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Tue, 8 Sep 2015 13:02:42 -0700 Subject: [PATCH 12/12] Allow identity to be populated for newly inserted Member objects to permit transfer from old network controller and testing. --- controller/SqliteNetworkController.cpp | 27 +++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/controller/SqliteNetworkController.cpp b/controller/SqliteNetworkController.cpp index 6a786ce4e..e0567fed7 100644 --- a/controller/SqliteNetworkController.cpp +++ b/controller/SqliteNetworkController.cpp @@ -166,7 +166,7 @@ SqliteNetworkController::SqliteNetworkController(const char *dbPath) : /* Node */ ||(sqlite3_prepare_v2(_db,"SELECT identity FROM Node WHERE id = ?",-1,&_sGetNodeIdentity,(const char **)0) != SQLITE_OK) - ||(sqlite3_prepare_v2(_db,"INSERT INTO Node (id,identity) VALUES (?,?)",-1,&_sCreateNode,(const char **)0) != SQLITE_OK) + ||(sqlite3_prepare_v2(_db,"INSERT OR REPLACE INTO Node (id,identity) VALUES (?,?)",-1,&_sCreateNode,(const char **)0) != SQLITE_OK) /* Rule */ ||(sqlite3_prepare_v2(_db,"SELECT etherType FROM Rule WHERE networkId = ? AND \"action\" = 'accept'",-1,&_sGetEtherTypesFromRuleTable,(const char **)0) != SQLITE_OK) @@ -870,6 +870,27 @@ unsigned int SqliteNetworkController::handleControlPlaneHttpPOST( } addToNetworkRevision = 1; } + } else if (!strcmp(j->u.object.values[k].name,"identity")) { + // Identity is technically an immutable field, but if the member's Node has + // no identity we allow it to be populated. This is primarily for migrating + // node data from another controller. + json_value *idstr = j->u.object.values[k].value; + if (idstr->type == json_string) { + sqlite3_reset(_sGetNodeIdentity); + sqlite3_bind_text(_sGetNodeIdentity,1,addrs,10,SQLITE_STATIC); + if ((sqlite3_step(_sGetNodeIdentity) == SQLITE_ROW)&&(!sqlite3_column_text(_sGetNodeIdentity,0))) { + try { + Identity id2(idstr->u.string.ptr); + if (id2) { + std::string idstr2(id2.toString(false)); // object must persist until after sqlite3_step() for SQLITE_STATIC + sqlite3_reset(_sCreateNode); + sqlite3_bind_text(_sCreateNode,1,addrs,10,SQLITE_STATIC); + sqlite3_bind_text(_sCreateNode,2,idstr2.c_str(),-1,SQLITE_STATIC); + sqlite3_step(_sCreateNode); + } + } catch ( ... ) {} // ignore invalid identities + } + } } } @@ -1383,9 +1404,9 @@ unsigned int SqliteNetworkController::_doCPGet( Dictionary(), // TODO: allow passing of meta-data for testing testNetconf); char rcs[16]; - Utils::snprintf(rcs,sizeof(rcs),"%d",(int)rc); + Utils::snprintf(rcs,sizeof(rcs),"%d,\n",(int)rc); testFields.append("\t\"_test\": {\n"); - testFields.append("\t\t\"resultCode\": "); testFields.append(rcs); testFields.append(",\n"); + testFields.append("\t\t\"resultCode\": "); testFields.append(rcs); testFields.append("\t\t\"result\": \""); testFields.append(_jsonEscape(testNetconf.toString().c_str()).c_str()); testFields.append("\""); testFields.append("\t}\n"); }