Fix infinite loop in Cluster, clean up some stuff elsewhere, and back out rate limiting in PUSH_DIRECT_PATHS for now (but we will do something else to mitigate amplification attacks)

This commit is contained in:
Adam Ierymenko 2015-10-27 12:01:00 -07:00
parent 54a99d8e32
commit a1a0ee4edb
7 changed files with 16 additions and 100 deletions

View file

@ -239,7 +239,7 @@ void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
const MAC mac(dmsg.field(ptr,6),6); ptr += 6; const MAC mac(dmsg.field(ptr,6),6); ptr += 6;
const uint32_t adi = dmsg.at<uint32_t>(ptr); ptr += 4; const uint32_t adi = dmsg.at<uint32_t>(ptr); ptr += 4;
RR->mc->add(RR->node->now(),nwid,MulticastGroup(mac,adi),address); RR->mc->add(RR->node->now(),nwid,MulticastGroup(mac,adi),address);
TRACE("[%u] %s likes %s/%.8x on %.16llu",(unsigned int)fromMemberId,address.toString().c_str(),mac.toString().c_str(),(unsigned int)adi,nwid); TRACE("[%u] %s likes %s/%.8x on %.16llx",(unsigned int)fromMemberId,address.toString().c_str(),mac.toString().c_str(),(unsigned int)adi,nwid);
} break; } break;
case STATE_MESSAGE_COM: { case STATE_MESSAGE_COM: {
@ -376,11 +376,12 @@ bool Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPee
Mutex::Lock _l2(_peerAffinities_m); Mutex::Lock _l2(_peerAffinities_m);
std::vector<_PeerAffinity>::iterator i(std::lower_bound(_peerAffinities.begin(),_peerAffinities.end(),_PeerAffinity(toPeerAddress,0,0))); // O(log(n)) std::vector<_PeerAffinity>::iterator i(std::lower_bound(_peerAffinities.begin(),_peerAffinities.end(),_PeerAffinity(toPeerAddress,0,0))); // O(log(n))
while ((i != _peerAffinities.end())&&(i->address() == toPeerAddress)) { while ((i != _peerAffinities.end())&&(i->address() == toPeerAddress)) {
uint16_t mid = i->clusterMemberId(); const uint16_t mid = i->clusterMemberId();
if ((mid != _id)&&(i->timestamp > mostRecentTimestamp)) { if ((mid != _id)&&(i->timestamp > mostRecentTimestamp)) {
mostRecentTimestamp = i->timestamp; mostRecentTimestamp = i->timestamp;
canHasPeer = mid; canHasPeer = mid;
} }
++i;
} }
} }

View file

@ -324,11 +324,6 @@
*/ */
#define ZT_DIRECT_PATH_PUSH_INTERVAL 120000 #define ZT_DIRECT_PATH_PUSH_INTERVAL 120000
/**
* Minimum interval between direct path pushes from a given peer or we will ignore them
*/
#define ZT_DIRECT_PATH_PUSH_MIN_RECEIVE_INTERVAL 2000
/** /**
* How long (max) to remember network certificates of membership? * How long (max) to remember network certificates of membership?
* *
@ -355,7 +350,7 @@
/** /**
* Maximum number of endpoints to contact per address type (to limit pushes like GitHub issue #235) * Maximum number of endpoints to contact per address type (to limit pushes like GitHub issue #235)
*/ */
#define ZT_PUSH_DIRECT_PATHS_MAX_ENDPOINTS_PER_TYPE 8 #define ZT_PUSH_DIRECT_PATHS_MAX_ENDPOINTS_PER_TYPE 2
/** /**
* A test pseudo-network-ID that can be joined * A test pseudo-network-ID that can be joined

View file

@ -622,7 +622,7 @@ bool IncomingPacket::_doMULTICAST_LIKE(const RuntimeEnvironment *RR,const Shared
// Iterate through 18-byte network,MAC,ADI tuples // Iterate through 18-byte network,MAC,ADI tuples
for(unsigned int ptr=ZT_PACKET_IDX_PAYLOAD;ptr<size();ptr+=18) { for(unsigned int ptr=ZT_PACKET_IDX_PAYLOAD;ptr<size();ptr+=18) {
const uint64_t nwid(at<uint64_t>(ptr)); const uint64_t nwid = at<uint64_t>(ptr);
const MulticastGroup group(MAC(field(ptr + 8,6),6),at<uint32_t>(ptr + 14)); const MulticastGroup group(MAC(field(ptr + 8,6),6),at<uint32_t>(ptr + 14));
RR->mc->add(now,nwid,group,peer->address()); RR->mc->add(now,nwid,group,peer->address());
#ifdef ZT_ENABLE_CLUSTER #ifdef ZT_ENABLE_CLUSTER
@ -888,12 +888,6 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
{ {
try { try {
const uint64_t now = RR->node->now(); const uint64_t now = RR->node->now();
if ((now - peer->lastDirectPathPushReceived()) < ZT_DIRECT_PATH_PUSH_MIN_RECEIVE_INTERVAL) {
TRACE("dropped PUSH_DIRECT_PATHS from %s(%s): too frequent!",source().toString().c_str(),_remoteAddress.toString().c_str());
return true;
}
peer->setLastDirectPathPushReceived(now);
const RemotePath *currentBest = peer->getBestPath(now); const RemotePath *currentBest = peer->getBestPath(now);
unsigned int count = at<uint16_t>(ZT_PACKET_IDX_PAYLOAD); unsigned int count = at<uint16_t>(ZT_PACKET_IDX_PAYLOAD);
@ -916,7 +910,7 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str()); TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
if (v4Count++ < ZT_PUSH_DIRECT_PATHS_MAX_ENDPOINTS_PER_TYPE) { if (v4Count++ < ZT_PUSH_DIRECT_PATHS_MAX_ENDPOINTS_PER_TYPE) {
if ((!currentBest)||(currentBest->address() != a)) if ((!currentBest)||(currentBest->address() != a))
peer->attemptToContactAt(RR,_localAddress,a,RR->node->now()); peer->attemptToContactAt(RR,_localAddress,a,now);
} }
} }
} break; } break;
@ -926,7 +920,7 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str()); TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
if (v6Count++ < ZT_PUSH_DIRECT_PATHS_MAX_ENDPOINTS_PER_TYPE) { if (v6Count++ < ZT_PUSH_DIRECT_PATHS_MAX_ENDPOINTS_PER_TYPE) {
if ((!currentBest)||(currentBest->address() != a)) if ((!currentBest)||(currentBest->address() != a))
peer->attemptToContactAt(RR,_localAddress,a,RR->node->now()); peer->attemptToContactAt(RR,_localAddress,a,now);
} }
} }
} break; } break;

View file

@ -55,7 +55,6 @@ Peer::Peer(const Identity &myIdentity,const Identity &peerIdentity)
_lastAnnouncedTo(0), _lastAnnouncedTo(0),
_lastPathConfirmationSent(0), _lastPathConfirmationSent(0),
_lastDirectPathPushSent(0), _lastDirectPathPushSent(0),
_lastDirectPathPushReceived(0),
_lastPathSort(0), _lastPathSort(0),
_vProto(0), _vProto(0),
_vMajor(0), _vMajor(0),

View file

@ -407,16 +407,6 @@ public:
*/ */
bool needsOurNetworkMembershipCertificate(uint64_t nwid,uint64_t now,bool updateLastPushedTime); bool needsOurNetworkMembershipCertificate(uint64_t nwid,uint64_t now,bool updateLastPushedTime);
/**
* @return Time last direct path push was received
*/
inline uint64_t lastDirectPathPushReceived() const throw() { return _lastDirectPathPushReceived; }
/**
* @param t New time of last direct path push received
*/
inline void setLastDirectPathPushReceived(uint64_t t) throw() { _lastDirectPathPushReceived = t; }
/** /**
* Perform periodic cleaning operations * Perform periodic cleaning operations
*/ */
@ -450,7 +440,7 @@ public:
const unsigned int recSizePos = b.size(); const unsigned int recSizePos = b.size();
b.addSize(4); // space for uint32_t field length b.addSize(4); // space for uint32_t field length
b.append((uint16_t)1); // version of serialized Peer data b.append((uint16_t)0); // version of serialized Peer data
_id.serialize(b,false); _id.serialize(b,false);
@ -461,7 +451,6 @@ public:
b.append((uint64_t)_lastAnnouncedTo); b.append((uint64_t)_lastAnnouncedTo);
b.append((uint64_t)_lastPathConfirmationSent); b.append((uint64_t)_lastPathConfirmationSent);
b.append((uint64_t)_lastDirectPathPushSent); b.append((uint64_t)_lastDirectPathPushSent);
b.append((uint64_t)_lastDirectPathPushReceived);
b.append((uint64_t)_lastPathSort); b.append((uint64_t)_lastPathSort);
b.append((uint16_t)_vProto); b.append((uint16_t)_vProto);
b.append((uint16_t)_vMajor); b.append((uint16_t)_vMajor);
@ -513,7 +502,7 @@ public:
const unsigned int recSize = b.template at<uint32_t>(p); p += 4; const unsigned int recSize = b.template at<uint32_t>(p); p += 4;
if ((p + recSize) > b.size()) if ((p + recSize) > b.size())
return SharedPtr<Peer>(); // size invalid return SharedPtr<Peer>(); // size invalid
if (b.template at<uint16_t>(p) != 1) if (b.template at<uint16_t>(p) != 0)
return SharedPtr<Peer>(); // version mismatch return SharedPtr<Peer>(); // version mismatch
p += 2; p += 2;
@ -531,7 +520,6 @@ public:
np->_lastAnnouncedTo = b.template at<uint64_t>(p); p += 8; np->_lastAnnouncedTo = b.template at<uint64_t>(p); p += 8;
np->_lastPathConfirmationSent = b.template at<uint64_t>(p); p += 8; np->_lastPathConfirmationSent = b.template at<uint64_t>(p); p += 8;
np->_lastDirectPathPushSent = b.template at<uint64_t>(p); p += 8; np->_lastDirectPathPushSent = b.template at<uint64_t>(p); p += 8;
np->_lastDirectPathPushReceived = b.template at<uint64_t>(p); p += 8;
np->_lastPathSort = b.template at<uint64_t>(p); p += 8; np->_lastPathSort = b.template at<uint64_t>(p); p += 8;
np->_vProto = b.template at<uint16_t>(p); p += 2; np->_vProto = b.template at<uint16_t>(p); p += 2;
np->_vMajor = b.template at<uint16_t>(p); p += 2; np->_vMajor = b.template at<uint16_t>(p); p += 2;
@ -581,7 +569,6 @@ private:
uint64_t _lastAnnouncedTo; uint64_t _lastAnnouncedTo;
uint64_t _lastPathConfirmationSent; uint64_t _lastPathConfirmationSent;
uint64_t _lastDirectPathPushSent; uint64_t _lastDirectPathPushSent;
uint64_t _lastDirectPathPushReceived;
uint64_t _lastPathSort; uint64_t _lastPathSort;
uint16_t _vProto; uint16_t _vProto;
uint16_t _vMajor; uint16_t _vMajor;

View file

@ -123,16 +123,16 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi
// For all peers for whom we forgot an address, send a packet indirectly if // For all peers for whom we forgot an address, send a packet indirectly if
// they are still considered alive so that we will re-establish direct links. // they are still considered alive so that we will re-establish direct links.
SharedPtr<Peer> sn(RR->topology->getBestRoot()); SharedPtr<Peer> r(RR->topology->getBestRoot());
if (sn) { if (r) {
RemotePath *snp = sn->getBestPath(now); RemotePath *rp = r->getBestPath(now);
if (snp) { if (rp) {
for(std::vector< SharedPtr<Peer> >::const_iterator p(rset.peersReset.begin());p!=rset.peersReset.end();++p) { for(std::vector< SharedPtr<Peer> >::const_iterator p(rset.peersReset.begin());p!=rset.peersReset.end();++p) {
if ((*p)->alive(now)) { if ((*p)->alive(now)) {
TRACE("sending indirect NOP to %s via %s(%s) to re-establish link",(*p)->address().toString().c_str(),sn->address().toString().c_str(),snp->address().toString().c_str()); TRACE("sending indirect NOP to %s via %s to re-establish link",(*p)->address().toString().c_str(),r->address().toString().c_str());
Packet outp((*p)->address(),RR->identity.address(),Packet::VERB_NOP); Packet outp((*p)->address(),RR->identity.address(),Packet::VERB_NOP);
outp.armor((*p)->key(),true); outp.armor((*p)->key(),true);
snp->send(RR,outp.data(),outp.size(),now); rp->send(RR,outp.data(),outp.size(),now);
} }
} }
} }

View file

@ -254,68 +254,8 @@ SharedPtr<Peer> Topology::getBestRoot(const Address *avoid,unsigned int avoidCou
return *bestOverall; return *bestOverall;
} }
/*
unsigned int l,bestLatency = 65536;
uint64_t lds,ldr;
// First look for a best root by comparing latencies, but exclude
// root servers that have not responded to direct messages in order to
// try to exclude any that are dead or unreachable.
for(std::vector< SharedPtr<Peer> >::const_iterator sn(_rootPeers.begin());sn!=_rootPeers.end();) {
// Skip explicitly avoided relays
for(unsigned int i=0;i<avoidCount;++i) {
if (avoid[i] == (*sn)->address())
goto keep_searching_for_roots;
}
// Skip possibly comatose or unreachable relays
lds = (*sn)->lastDirectSend();
ldr = (*sn)->lastDirectReceive();
if ((lds)&&(lds > ldr)&&((lds - ldr) > ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD))
goto keep_searching_for_roots;
if ((*sn)->hasActiveDirectPath(now)) {
l = (*sn)->latency();
if (bestRoot) {
if ((l)&&(l < bestLatency)) {
bestLatency = l;
bestRoot = *sn;
}
} else {
if (l)
bestLatency = l;
bestRoot = *sn;
}
}
keep_searching_for_roots:
++sn;
}
if (bestRoot) {
bestRoot->use(now);
return bestRoot;
} else if (strictAvoid)
return SharedPtr<Peer>();
// If we have nothing from above, just pick one without avoidance criteria.
for(std::vector< SharedPtr<Peer> >::const_iterator sn=_rootPeers.begin();sn!=_rootPeers.end();++sn) {
if ((*sn)->hasActiveDirectPath(now)) {
unsigned int l = (*sn)->latency();
if (bestRoot) {
if ((l)&&(l < bestLatency)) {
bestLatency = l;
bestRoot = *sn;
}
} else {
if (l)
bestLatency = l;
bestRoot = *sn;
}
}
}
*/
} }
return SharedPtr<Peer>(); return SharedPtr<Peer>();
} }