mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-05 03:53:44 +02:00
Periodically re-announce peers that we have.
This commit is contained in:
parent
9f0f0197fe
commit
d6c0d176ee
2 changed files with 46 additions and 18 deletions
|
@ -84,7 +84,8 @@ Cluster::Cluster(
|
||||||
_zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
|
_zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
|
||||||
_members(new _Member[ZT_CLUSTER_MAX_MEMBERS]),
|
_members(new _Member[ZT_CLUSTER_MAX_MEMBERS]),
|
||||||
_peerAffinities(65536),
|
_peerAffinities(65536),
|
||||||
_lastCleanedPeerAffinities(0)
|
_lastCleanedPeerAffinities(0),
|
||||||
|
_lastCheckedPeersForAnnounce(0)
|
||||||
{
|
{
|
||||||
uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
|
uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
|
||||||
|
|
||||||
|
@ -328,6 +329,7 @@ void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
|
||||||
|
|
||||||
if (haveMatch) {
|
if (haveMatch) {
|
||||||
_send(fromMemberId,STATE_MESSAGE_PROXY_SEND,rendezvousForRemote.data(),rendezvousForRemote.size());
|
_send(fromMemberId,STATE_MESSAGE_PROXY_SEND,rendezvousForRemote.data(),rendezvousForRemote.size());
|
||||||
|
_flush(fromMemberId); // we want this to go ASAP, since with port restricted cone NATs success can be timing-sensitive
|
||||||
RR->sw->send(rendezvousForLocal,true,0);
|
RR->sw->send(rendezvousForLocal,true,0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -469,10 +471,45 @@ void Cluster::replicateCertificateOfNetworkMembership(const CertificateOfMembers
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct _ClusterAnnouncePeers
|
||||||
|
{
|
||||||
|
_ClusterAnnouncePeers(const uint64_t now_,Cluster *parent_) : now(now_),parent(parent_) {}
|
||||||
|
const uint64_t now;
|
||||||
|
Cluster *const parent;
|
||||||
|
inline void operator()(const Topology &t,const SharedPtr<Peer> &peer)
|
||||||
|
{
|
||||||
|
Path *p = peer->getBestPath(now);
|
||||||
|
if (p)
|
||||||
|
parent->replicateHavePeer(peer->identity(),p->address());
|
||||||
|
}
|
||||||
|
};
|
||||||
void Cluster::doPeriodicTasks()
|
void Cluster::doPeriodicTasks()
|
||||||
{
|
{
|
||||||
const uint64_t now = RR->node->now();
|
const uint64_t now = RR->node->now();
|
||||||
|
|
||||||
|
// Erase old peer affinity entries just to control table size
|
||||||
|
if ((now - _lastCleanedPeerAffinities) >= (ZT_PEER_ACTIVITY_TIMEOUT * 5)) {
|
||||||
|
_lastCleanedPeerAffinities = now;
|
||||||
|
Address *k = (Address *)0;
|
||||||
|
_PA *v = (_PA *)0;
|
||||||
|
Mutex::Lock _l(_peerAffinities_m);
|
||||||
|
Hashtable< Address,_PA >::Iterator i(_peerAffinities);
|
||||||
|
while (i.next(k,v)) {
|
||||||
|
if ((now - v->ts) >= (ZT_PEER_ACTIVITY_TIMEOUT * 5))
|
||||||
|
_peerAffinities.erase(*k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Announce peers that we have active direct paths to -- note that we forget paths
|
||||||
|
// that other cluster members claim they have, which prevents us from fighting
|
||||||
|
// with other cluster members (route flapping) over specific paths.
|
||||||
|
if ((now - _lastCheckedPeersForAnnounce) >= (ZT_CLUSTER_HAVE_PEER_ANNOUNCE_PERIOD / 4)) {
|
||||||
|
_lastCheckedPeersForAnnounce = now;
|
||||||
|
_ClusterAnnouncePeers func(now,this);
|
||||||
|
RR->topology->eachPeer<_ClusterAnnouncePeers &>(func);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush outgoing packet send queue every doPeriodicTasks()
|
||||||
{
|
{
|
||||||
Mutex::Lock _l(_memberIds_m);
|
Mutex::Lock _l(_memberIds_m);
|
||||||
for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
|
for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
|
||||||
|
@ -506,20 +543,6 @@ void Cluster::doPeriodicTasks()
|
||||||
_flush(*mid); // does nothing if nothing to flush
|
_flush(*mid); // does nothing if nothing to flush
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
|
||||||
if ((now - _lastCleanedPeerAffinities) >= (ZT_PEER_ACTIVITY_TIMEOUT * 10)) {
|
|
||||||
_lastCleanedPeerAffinities = now;
|
|
||||||
Address *k = (Address *)0;
|
|
||||||
_PA *v = (_PA *)0;
|
|
||||||
Mutex::Lock _l(_peerAffinities_m);
|
|
||||||
Hashtable< Address,_PA >::Iterator i(_peerAffinities);
|
|
||||||
while (i.next(k,v)) {
|
|
||||||
if ((now - v->ts) >= (ZT_PEER_ACTIVITY_TIMEOUT * 10))
|
|
||||||
_peerAffinities.erase(*k);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Cluster::addMember(uint16_t memberId)
|
void Cluster::addMember(uint16_t memberId)
|
||||||
|
|
|
@ -46,18 +46,21 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Timeout for cluster members being considered "alive"
|
* Timeout for cluster members being considered "alive"
|
||||||
|
*
|
||||||
|
* A cluster member is considered dead and will no longer have peers
|
||||||
|
* redirected to it if we have not heard a heartbeat in this long.
|
||||||
*/
|
*/
|
||||||
#define ZT_CLUSTER_TIMEOUT 20000
|
#define ZT_CLUSTER_TIMEOUT 10000
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* How often should we announce that we have a peer?
|
* How often should we announce that we have a peer?
|
||||||
*/
|
*/
|
||||||
#define ZT_CLUSTER_HAVE_PEER_ANNOUNCE_PERIOD 30000
|
#define ZT_CLUSTER_HAVE_PEER_ANNOUNCE_PERIOD ((ZT_PEER_ACTIVITY_TIMEOUT / 2) - 1000)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Desired period between doPeriodicTasks() in milliseconds
|
* Desired period between doPeriodicTasks() in milliseconds
|
||||||
*/
|
*/
|
||||||
#define ZT_CLUSTER_PERIODIC_TASK_PERIOD 100
|
#define ZT_CLUSTER_PERIODIC_TASK_PERIOD 250
|
||||||
|
|
||||||
namespace ZeroTier {
|
namespace ZeroTier {
|
||||||
|
|
||||||
|
@ -349,7 +352,9 @@ private:
|
||||||
};
|
};
|
||||||
Hashtable< Address,_PA > _peerAffinities;
|
Hashtable< Address,_PA > _peerAffinities;
|
||||||
Mutex _peerAffinities_m;
|
Mutex _peerAffinities_m;
|
||||||
|
|
||||||
uint64_t _lastCleanedPeerAffinities;
|
uint64_t _lastCleanedPeerAffinities;
|
||||||
|
uint64_t _lastCheckedPeersForAnnounce;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace ZeroTier
|
} // namespace ZeroTier
|
||||||
|
|
Loading…
Add table
Reference in a new issue