mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-06 12:33:44 +02:00
How did that ever work?
This commit is contained in:
parent
12cd9df059
commit
73e2c6e511
3 changed files with 31 additions and 29 deletions
|
@ -223,10 +223,10 @@ void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
|
||||||
std::list<_SQE> q;
|
std::list<_SQE> q;
|
||||||
{
|
{
|
||||||
Mutex::Lock _l(_sendViaClusterQueue_m);
|
Mutex::Lock _l(_sendViaClusterQueue_m);
|
||||||
std::map< Address,std::list<_SQE> >::iterator qe(_sendViaClusterQueue.find(id.address()));
|
for(std::list<_SQE>::iterator qi(_sendViaClusterQueue.begin());qi!=_sendViaClusterQueue.end();) {
|
||||||
if (qe != _sendViaClusterQueue.end()) {
|
if (qi->toPeerAddress == id.address())
|
||||||
q.swap(qe->second); // just swap ptr instead of copying
|
q.splice(q.end(),_sendViaClusterQueue,qi++);
|
||||||
_sendViaClusterQueue.erase(qe);
|
else ++qi;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for(std::list<_SQE>::iterator qi(q.begin());qi!=q.end();++qi)
|
for(std::list<_SQE>::iterator qi(q.begin());qi!=q.end();++qi)
|
||||||
|
@ -368,16 +368,17 @@ void Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPee
|
||||||
if (len > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check
|
if (len > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check
|
||||||
return;
|
return;
|
||||||
|
|
||||||
_sendViaClusterQueue_m.lock();
|
unsigned int queueCount = 0;
|
||||||
unsigned long queueCount;
|
|
||||||
{
|
{
|
||||||
std::map< Address,std::list<_SQE> >::const_iterator qe(_sendViaClusterQueue.find(fromPeerAddress));
|
Mutex::Lock _l(_sendViaClusterQueue_m);
|
||||||
queueCount = (qe == _sendViaClusterQueue.end()) ? 0 : (unsigned long)qe->second.size();
|
for(std::list<_SQE>::const_iterator qi(_sendViaClusterQueue.begin());qi!=_sendViaClusterQueue.end();++qi) {
|
||||||
}
|
if (qi->fromPeerAddress == fromPeerAddress) {
|
||||||
_sendViaClusterQueue_m.unlock();
|
if (++queueCount > ZT_CLUSTER_MAX_QUEUE_PER_SENDER) {
|
||||||
if (queueCount > ZT_CLUSTER_MAX_QUEUE_PER_SENDER) {
|
TRACE("dropping sendViaCluster for %s -> %s since queue for sender is full",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
|
||||||
TRACE("dropping sendViaCluster for %s -> %s since queue for sender is full",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
|
return;
|
||||||
return;
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const uint64_t now = RR->node->now();
|
const uint64_t now = RR->node->now();
|
||||||
|
@ -386,9 +387,9 @@ void Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPee
|
||||||
unsigned int mostRecentMemberId = 0xffffffff;
|
unsigned int mostRecentMemberId = 0xffffffff;
|
||||||
{
|
{
|
||||||
Mutex::Lock _l2(_remotePeers_m);
|
Mutex::Lock _l2(_remotePeers_m);
|
||||||
std::map< std::pair<Address,unsigned int>,uint64_t >::const_iterator rpe(_remotePeers.lower_bound(std::pair<Address,unsigned int>(fromPeerAddress,0)));
|
std::map< std::pair<Address,unsigned int>,uint64_t >::const_iterator rpe(_remotePeers.lower_bound(std::pair<Address,unsigned int>(toPeerAddress,0)));
|
||||||
for(;;) {
|
for(;;) {
|
||||||
if ((rpe == _remotePeers.end())||(rpe->first.first != fromPeerAddress))
|
if ((rpe == _remotePeers.end())||(rpe->first.first != toPeerAddress))
|
||||||
break;
|
break;
|
||||||
else if (rpe->second > mostRecentTs) {
|
else if (rpe->second > mostRecentTs) {
|
||||||
mostRecentTs = rpe->second;
|
mostRecentTs = rpe->second;
|
||||||
|
@ -420,7 +421,7 @@ void Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPee
|
||||||
if (enqueueAndWait) {
|
if (enqueueAndWait) {
|
||||||
TRACE("sendViaCluster %s -> %s enqueueing to wait for HAVE_PEER",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
|
TRACE("sendViaCluster %s -> %s enqueueing to wait for HAVE_PEER",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
|
||||||
Mutex::Lock _l(_sendViaClusterQueue_m);
|
Mutex::Lock _l(_sendViaClusterQueue_m);
|
||||||
_sendViaClusterQueue[fromPeerAddress].push_back(_SQE(now,toPeerAddress,data,len,unite));
|
_sendViaClusterQueue.push_back(_SQE(now,fromPeerAddress,toPeerAddress,data,len,unite));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -484,13 +485,8 @@ void Cluster::doPeriodicTasks()
|
||||||
|
|
||||||
{
|
{
|
||||||
Mutex::Lock _l2(_sendViaClusterQueue_m);
|
Mutex::Lock _l2(_sendViaClusterQueue_m);
|
||||||
for(std::map< Address,std::list<_SQE> >::iterator qi(_sendViaClusterQueue.begin());qi!=_sendViaClusterQueue.end();) {
|
for(std::list<_SQE>::iterator qi(_sendViaClusterQueue.begin());qi!=_sendViaClusterQueue.end();) {
|
||||||
for(std::list<_SQE>::iterator qii(qi->second.begin());qii!=qi->second.end();) {
|
if ((now - qi->timestamp) >= ZT_CLUSTER_QUEUE_EXPIRATION)
|
||||||
if ((now - qii->timestamp) >= ZT_CLUSTER_QUEUE_EXPIRATION)
|
|
||||||
qi->second.erase(qii++);
|
|
||||||
else ++qii;
|
|
||||||
}
|
|
||||||
if (qi->second.empty())
|
|
||||||
_sendViaClusterQueue.erase(qi++);
|
_sendViaClusterQueue.erase(qi++);
|
||||||
else ++qi;
|
else ++qi;
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
#include "Constants.hpp"
|
#include "Constants.hpp"
|
||||||
#include "../include/ZeroTierOne.h"
|
#include "../include/ZeroTierOne.h"
|
||||||
#include "Address.hpp"
|
#include "Address.hpp"
|
||||||
|
#include "Array.hpp"
|
||||||
#include "InetAddress.hpp"
|
#include "InetAddress.hpp"
|
||||||
#include "SHA512.hpp"
|
#include "SHA512.hpp"
|
||||||
#include "Utils.hpp"
|
#include "Utils.hpp"
|
||||||
|
@ -74,7 +75,7 @@
|
||||||
/**
|
/**
|
||||||
* Expiration time for send queue entries
|
* Expiration time for send queue entries
|
||||||
*/
|
*/
|
||||||
#define ZT_CLUSTER_QUEUE_EXPIRATION 1500
|
#define ZT_CLUSTER_QUEUE_EXPIRATION 500
|
||||||
|
|
||||||
namespace ZeroTier {
|
namespace ZeroTier {
|
||||||
|
|
||||||
|
@ -372,18 +373,20 @@ private:
|
||||||
struct _SQE
|
struct _SQE
|
||||||
{
|
{
|
||||||
_SQE() : timestamp(0),len(0),unite(false) {}
|
_SQE() : timestamp(0),len(0),unite(false) {}
|
||||||
_SQE(const uint64_t ts,const Address &t,const void *d,const unsigned int l,const bool u) :
|
_SQE(const uint64_t ts,const Address &f,const Address &t,const void *d,const unsigned int l,const bool u) :
|
||||||
timestamp(ts),
|
timestamp(ts),
|
||||||
|
fromPeerAddress(f),
|
||||||
toPeerAddress(t),
|
toPeerAddress(t),
|
||||||
len(l),
|
len(l),
|
||||||
unite(u) { memcpy(data,d,l); }
|
unite(u) { memcpy(data,d,l); }
|
||||||
uint64_t timestamp;
|
uint64_t timestamp;
|
||||||
|
Address fromPeerAddress;
|
||||||
Address toPeerAddress;
|
Address toPeerAddress;
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
bool unite;
|
bool unite;
|
||||||
unsigned char data[ZT_PROTO_MAX_PACKET_LENGTH];
|
unsigned char data[ZT_PROTO_MAX_PACKET_LENGTH];
|
||||||
};
|
};
|
||||||
std::map< Address,std::list<_SQE> > _sendViaClusterQueue;
|
std::list<_SQE> _sendViaClusterQueue;
|
||||||
Mutex _sendViaClusterQueue_m;
|
Mutex _sendViaClusterQueue_m;
|
||||||
|
|
||||||
uint64_t _lastFlushed;
|
uint64_t _lastFlushed;
|
||||||
|
|
|
@ -3,14 +3,17 @@
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Customizable parameters:
|
// Customizable parameters:
|
||||||
|
|
||||||
|
// Time between startup and first test attempt
|
||||||
|
var TEST_STARTUP_LAG = 10000;
|
||||||
|
|
||||||
// Maximum interval between test attempts (actual timing is random % this)
|
// Maximum interval between test attempts (actual timing is random % this)
|
||||||
var TEST_INTERVAL_MAX = (60000 * 10);
|
var TEST_INTERVAL_MAX = (60000 * 10);
|
||||||
|
|
||||||
// Test timeout in ms
|
// Test timeout in ms
|
||||||
var TEST_TIMEOUT = 60000;
|
var TEST_TIMEOUT = 30000;
|
||||||
|
|
||||||
// Where should I get other agents' IDs and POST results?
|
// Where should I get other agents' IDs and POST results?
|
||||||
var SERVER_HOST = '52.32.186.221';
|
var SERVER_HOST = '52.26.196.147';
|
||||||
var SERVER_PORT = 18080;
|
var SERVER_PORT = 18080;
|
||||||
|
|
||||||
// Which port do agents use to serve up test data to each other?
|
// Which port do agents use to serve up test data to each other?
|
||||||
|
@ -186,5 +189,5 @@ app.get('/',function(req,res) { return res.status(200).send(payload); });
|
||||||
|
|
||||||
var expressServer = app.listen(AGENT_PORT,function () {
|
var expressServer = app.listen(AGENT_PORT,function () {
|
||||||
// Start timeout-based loop
|
// Start timeout-based loop
|
||||||
doTest();
|
setTimeout(doTest(),TEST_STARTUP_LAG);
|
||||||
});
|
});
|
||||||
|
|
Loading…
Add table
Reference in a new issue