mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-07 13:03:45 +02:00
Tons and tons of cleanup and cruft removal
This commit is contained in:
parent
5280d28505
commit
0b5472f9fb
17 changed files with 202 additions and 377 deletions
|
@ -2017,7 +2017,7 @@ ZT_SDK_API void ZT_Node_clearLocalInterfaceAddresses(ZT_Node *node);
|
||||||
ZT_SDK_API int ZT_Node_sendUserMessage(ZT_Node *node,void *tptr,uint64_t dest,uint64_t typeId,const void *data,unsigned int len);
|
ZT_SDK_API int ZT_Node_sendUserMessage(ZT_Node *node,void *tptr,uint64_t dest,uint64_t typeId,const void *data,unsigned int len);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set a network configuration master instance for this node
|
* Set a network controller instance for this node
|
||||||
*
|
*
|
||||||
* Normal nodes should not need to use this. This is for nodes with
|
* Normal nodes should not need to use this. This is for nodes with
|
||||||
* special compiled-in support for acting as network configuration
|
* special compiled-in support for acting as network configuration
|
||||||
|
@ -2031,7 +2031,7 @@ ZT_SDK_API int ZT_Node_sendUserMessage(ZT_Node *node,void *tptr,uint64_t dest,ui
|
||||||
* @param networkConfigMasterInstance Instance of NetworkConfigMaster C++ class or NULL to disable
|
* @param networkConfigMasterInstance Instance of NetworkConfigMaster C++ class or NULL to disable
|
||||||
* @return OK (0) or error code if a fatal error condition has occurred
|
* @return OK (0) or error code if a fatal error condition has occurred
|
||||||
*/
|
*/
|
||||||
ZT_SDK_API void ZT_Node_setNetconfMaster(ZT_Node *node,void *networkConfigMasterInstance);
|
ZT_SDK_API void ZT_Node_setController(ZT_Node *node,void *networkConfigMasterInstance);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set configuration for a given physical path
|
* Set configuration for a given physical path
|
||||||
|
|
|
@ -318,13 +318,6 @@
|
||||||
*/
|
*/
|
||||||
#define ZT_MULTIPATH_PROPORTION_WIN_SZ 128
|
#define ZT_MULTIPATH_PROPORTION_WIN_SZ 128
|
||||||
|
|
||||||
/**
|
|
||||||
* How often we will sample packet latency. Should be at least greater than ZT_PING_CHECK_INVERVAL
|
|
||||||
* since we will record a 0 bit/s measurement if no valid latency measurement was made within this
|
|
||||||
* window of time.
|
|
||||||
*/
|
|
||||||
#define ZT_PATH_LATENCY_SAMPLE_INTERVAL (ZT_MULTIPATH_PEER_PING_PERIOD * 2)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interval used for rate-limiting the computation of path quality estimates.
|
* Interval used for rate-limiting the computation of path quality estimates.
|
||||||
*/
|
*/
|
||||||
|
@ -453,11 +446,6 @@
|
||||||
*/
|
*/
|
||||||
#define ZT_QOS_DEFAULT_BUCKET 0
|
#define ZT_QOS_DEFAULT_BUCKET 0
|
||||||
|
|
||||||
/**
|
|
||||||
* How frequently to send heartbeats over in-use paths
|
|
||||||
*/
|
|
||||||
#define ZT_PATH_HEARTBEAT_PERIOD 14000
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Do not accept HELLOs over a given path more often than this
|
* Do not accept HELLOs over a given path more often than this
|
||||||
*/
|
*/
|
||||||
|
@ -465,18 +453,13 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Delay between full-fledge pings of directly connected peers
|
* Delay between full-fledge pings of directly connected peers
|
||||||
*/
|
|
||||||
#define ZT_PEER_PING_PERIOD 60000
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Delay between full-fledge pings of directly connected peers.
|
|
||||||
*
|
*
|
||||||
* With multipath bonding enabled ping peers more often to measure
|
* See https://conferences.sigcomm.org/imc/2010/papers/p260.pdf for
|
||||||
* packet loss and latency. This uses more bandwidth so is disabled
|
* some real world data on NAT UDP timeouts. From the paper: "the
|
||||||
* by default to avoid increasing idle bandwidth use for regular
|
* lowest measured timeout when a binding has seen bidirectional
|
||||||
* links.
|
* traffic is 54 sec." We use 45 to be a bit under this.
|
||||||
*/
|
*/
|
||||||
#define ZT_MULTIPATH_PEER_PING_PERIOD 5000
|
#define ZT_PEER_PING_PERIOD 45000
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Paths are considered expired if they have not sent us a real packet in this long
|
* Paths are considered expired if they have not sent us a real packet in this long
|
||||||
|
@ -524,11 +507,6 @@
|
||||||
*/
|
*/
|
||||||
#define ZT_MIN_UNITE_INTERVAL 30000
|
#define ZT_MIN_UNITE_INTERVAL 30000
|
||||||
|
|
||||||
/**
|
|
||||||
* How often should peers try memorized or statically defined paths?
|
|
||||||
*/
|
|
||||||
#define ZT_TRY_MEMORIZED_PATH_INTERVAL 30000
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sanity limit on maximum bridge routes
|
* Sanity limit on maximum bridge routes
|
||||||
*
|
*
|
||||||
|
|
|
@ -223,6 +223,7 @@ bool IncomingPacket::_doQOS_MEASUREMENT(const RuntimeEnvironment *RR,void *tPtr,
|
||||||
{
|
{
|
||||||
if (!peer->rateGateQoS(RR->node->now()))
|
if (!peer->rateGateQoS(RR->node->now()))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
/* Dissect incoming QoS packet. From this we can compute latency values and their variance.
|
/* Dissect incoming QoS packet. From this we can compute latency values and their variance.
|
||||||
* The latency variance is used as a measure of "jitter". */
|
* The latency variance is used as a measure of "jitter". */
|
||||||
if (peer->localMultipathSupport()) {
|
if (peer->localMultipathSupport()) {
|
||||||
|
@ -349,12 +350,14 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR,void *tPtr,const bool
|
||||||
|
|
||||||
// VALID -- if we made it here, packet passed identity and authenticity checks!
|
// VALID -- if we made it here, packet passed identity and authenticity checks!
|
||||||
|
|
||||||
// Get external surface address if present (was not in old versions)
|
// Get address to which this packet was sent to learn our external surface address if packet was direct.
|
||||||
InetAddress externalSurfaceAddress;
|
if (hops() == 0) {
|
||||||
if (ptr < size()) {
|
InetAddress externalSurfaceAddress;
|
||||||
ptr += externalSurfaceAddress.deserialize(*this,ptr);
|
if (ptr < size()) {
|
||||||
if ((externalSurfaceAddress)&&(hops() == 0))
|
ptr += externalSurfaceAddress.deserialize(*this,ptr);
|
||||||
RR->sa->iam(tPtr,id.address(),_path->localSocket(),_path->address(),externalSurfaceAddress,RR->topology->isRoot(id),now);
|
if ((externalSurfaceAddress)&&(hops() == 0))
|
||||||
|
RR->sa->iam(tPtr,id.address(),_path->localSocket(),_path->address(),externalSurfaceAddress,RR->topology->isRoot(id),now);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send OK(HELLO) with an echo of the packet's timestamp and some of the same
|
// Send OK(HELLO) with an echo of the packet's timestamp and some of the same
|
||||||
|
@ -398,20 +401,17 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,void *tPtr,const SharedP
|
||||||
if (vProto < ZT_PROTO_VERSION_MIN)
|
if (vProto < ZT_PROTO_VERSION_MIN)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
InetAddress externalSurfaceAddress;
|
if (hops() == 0) {
|
||||||
unsigned int ptr = ZT_PROTO_VERB_HELLO__OK__IDX_REVISION + 2;
|
|
||||||
|
|
||||||
// Get reported external surface address if present
|
|
||||||
if (ptr < size())
|
|
||||||
ptr += externalSurfaceAddress.deserialize(*this,ptr);
|
|
||||||
if (!hops()) {
|
|
||||||
_path->updateLatency((unsigned int)latency,RR->node->now());
|
_path->updateLatency((unsigned int)latency,RR->node->now());
|
||||||
|
if ((ZT_PROTO_VERB_HELLO__OK__IDX_REVISION + 2) < size()) {
|
||||||
|
InetAddress externalSurfaceAddress;
|
||||||
|
externalSurfaceAddress.deserialize(*this,ZT_PROTO_VERB_HELLO__OK__IDX_REVISION + 2);
|
||||||
|
if (externalSurfaceAddress)
|
||||||
|
RR->sa->iam(tPtr,peer->address(),_path->localSocket(),_path->address(),externalSurfaceAddress,RR->topology->isRoot(peer->identity()),RR->node->now());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
peer->setRemoteVersion(vProto,vMajor,vMinor,vRevision);
|
peer->setRemoteVersion(vProto,vMajor,vMinor,vRevision);
|
||||||
|
|
||||||
if ((externalSurfaceAddress)&&(hops() == 0))
|
|
||||||
RR->sa->iam(tPtr,peer->address(),_path->localSocket(),_path->address(),externalSurfaceAddress,RR->topology->isRoot(peer->identity()),RR->node->now());
|
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
case Packet::VERB_WHOIS:
|
case Packet::VERB_WHOIS:
|
||||||
|
@ -528,16 +528,14 @@ bool IncomingPacket::_doRENDEZVOUS(const RuntimeEnvironment *RR,void *tPtr,const
|
||||||
if ((port > 0)&&((addrlen == 4)||(addrlen == 16))) {
|
if ((port > 0)&&((addrlen == 4)||(addrlen == 16))) {
|
||||||
InetAddress atAddr(field(ZT_PROTO_VERB_RENDEZVOUS_IDX_ADDRESS,addrlen),addrlen,port);
|
InetAddress atAddr(field(ZT_PROTO_VERB_RENDEZVOUS_IDX_ADDRESS,addrlen),addrlen,port);
|
||||||
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,with,_path->localSocket(),atAddr)) {
|
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,with,_path->localSocket(),atAddr)) {
|
||||||
const uint64_t junk = RR->node->prng();
|
const uint64_t junk = Utils::random();
|
||||||
RR->node->putPacket(tPtr,_path->localSocket(),atAddr,&junk,4,2); // send low-TTL junk packet to 'open' local NAT(s) and stateful firewalls
|
RR->node->putPacket(tPtr,_path->localSocket(),atAddr,&junk,4,2); // send low-TTL junk packet to 'open' local NAT(s) and stateful firewalls
|
||||||
rendezvousWith->attemptToContactAt(tPtr,_path->localSocket(),atAddr,RR->node->now(),false);
|
rendezvousWith->sendHELLO(tPtr,_path->localSocket(),atAddr,RR->node->now());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
peer->received(tPtr,_path,hops(),packetId(),payloadLength(),Packet::VERB_RENDEZVOUS,0,Packet::VERB_NOP,0);
|
peer->received(tPtr,_path,hops(),packetId(),payloadLength(),Packet::VERB_RENDEZVOUS,0,Packet::VERB_NOP,0);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -560,9 +558,7 @@ bool IncomingPacket::_doFRAME(const RuntimeEnvironment *RR,void *tPtr,const Shar
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
peer->received(tPtr,_path,hops(),packetId(),payloadLength(),Packet::VERB_FRAME,0,Packet::VERB_NOP,nwid);
|
peer->received(tPtr,_path,hops(),packetId(),payloadLength(),Packet::VERB_FRAME,0,Packet::VERB_NOP,nwid);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1003,8 +999,6 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,void *tPt
|
||||||
unsigned int ptr = ZT_PACKET_IDX_PAYLOAD + 2;
|
unsigned int ptr = ZT_PACKET_IDX_PAYLOAD + 2;
|
||||||
|
|
||||||
while (count--) { // if ptr overflows Buffer will throw
|
while (count--) { // if ptr overflows Buffer will throw
|
||||||
// TODO: some flags are not yet implemented
|
|
||||||
|
|
||||||
unsigned int flags = (*this)[ptr++];
|
unsigned int flags = (*this)[ptr++];
|
||||||
unsigned int extLen = at<uint16_t>(ptr); ptr += 2;
|
unsigned int extLen = at<uint16_t>(ptr); ptr += 2;
|
||||||
ptr += extLen; // unused right now
|
ptr += extLen; // unused right now
|
||||||
|
@ -1014,26 +1008,20 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,void *tPt
|
||||||
switch(addrType) {
|
switch(addrType) {
|
||||||
case 4: {
|
case 4: {
|
||||||
const InetAddress a(field(ptr,4),4,at<uint16_t>(ptr + 4));
|
const InetAddress a(field(ptr,4),4,at<uint16_t>(ptr + 4));
|
||||||
if ((!( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) == 0) && (peer->hasActivePathTo(now,a)) )) && // not already known
|
if ((!peer->hasActivePathTo(now,a)) && // not already known
|
||||||
(RR->node->shouldUsePathForZeroTierTraffic(tPtr,peer->address(),_path->localSocket(),a)) ) // should use path
|
(RR->node->shouldUsePathForZeroTierTraffic(tPtr,peer->address(),-1,a)) ) // should use path
|
||||||
{
|
{
|
||||||
if ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) != 0) {
|
if (++countPerScope[(int)a.ipScope()][0] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY)
|
||||||
peer->clusterRedirect(tPtr,_path,a,now);
|
peer->sendHELLO(tPtr,-1,a,now);
|
||||||
} else if (++countPerScope[(int)a.ipScope()][0] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
|
|
||||||
peer->attemptToContactAt(tPtr,InetAddress(),a,now,false);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case 6: {
|
case 6: {
|
||||||
const InetAddress a(field(ptr,16),16,at<uint16_t>(ptr + 16));
|
const InetAddress a(field(ptr,16),16,at<uint16_t>(ptr + 16));
|
||||||
if ((!( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) == 0) && (peer->hasActivePathTo(now,a)) )) && // not already known
|
if ((!peer->hasActivePathTo(now,a)) && // not already known
|
||||||
(RR->node->shouldUsePathForZeroTierTraffic(tPtr,peer->address(),_path->localSocket(),a)) ) // should use path
|
(RR->node->shouldUsePathForZeroTierTraffic(tPtr,peer->address(),-1,a)) ) // should use path
|
||||||
{
|
{
|
||||||
if ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) != 0) {
|
if (++countPerScope[(int)a.ipScope()][1] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY)
|
||||||
peer->clusterRedirect(tPtr,_path,a,now);
|
peer->sendHELLO(tPtr,-1,a,now);
|
||||||
} else if (++countPerScope[(int)a.ipScope()][1] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
|
|
||||||
peer->attemptToContactAt(tPtr,InetAddress(),a,now,false);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,10 +110,10 @@ public:
|
||||||
return nconf.com.agreesWith(_com); // check timestamp agreement window
|
return nconf.com.agreesWith(_com); // check timestamp agreement window
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool recentlyAssociated(const int64_t now) const
|
/**
|
||||||
{
|
* @return True if this peer has sent us a valid certificate within ZT_PEER_ACTIVITY_TIMEOUT
|
||||||
return ((_com)&&((now - _com.timestamp()) < ZT_PEER_ACTIVITY_TIMEOUT));
|
*/
|
||||||
}
|
inline bool recentlyAssociated(const int64_t now) const { return ((_com)&&((now - _com.timestamp()) < ZT_PEER_ACTIVITY_TIMEOUT)); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check whether the peer represented by this Membership owns a given address
|
* Check whether the peer represented by this Membership owns a given address
|
||||||
|
@ -170,6 +170,26 @@ public:
|
||||||
*/
|
*/
|
||||||
static uint64_t credentialKey(const Credential::Type &t,const uint32_t i) { return (((uint64_t)t << 32) | (uint64_t)i); }
|
static uint64_t credentialKey(const Credential::Type &t,const uint32_t i) { return (((uint64_t)t << 32) | (uint64_t)i); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return Bytes received so far
|
||||||
|
*/
|
||||||
|
inline uint64_t receivedBytes() const { return _received; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return Bytes sent so far
|
||||||
|
*/
|
||||||
|
inline uint64_t sentBytes() const { return _sent; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param bytes Bytes received
|
||||||
|
*/
|
||||||
|
inline void logReceivedBytes(const unsigned int bytes) { _received = (uint64_t)bytes; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param bytes Bytes sent
|
||||||
|
*/
|
||||||
|
inline void logSentBytes(const unsigned int bytes) { _sent = (uint64_t)bytes; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// This returns true if a resource is an IPv6 NDP-emulated address. These embed the ZT
|
// This returns true if a resource is an IPv6 NDP-emulated address. These embed the ZT
|
||||||
// address of the peer and therefore cannot be spoofed, causing peerOwnsAddress() to
|
// address of the peer and therefore cannot be spoofed, causing peerOwnsAddress() to
|
||||||
|
@ -221,6 +241,12 @@ private:
|
||||||
// Time we last pushed credentials
|
// Time we last pushed credentials
|
||||||
int64_t _lastPushedCredentials;
|
int64_t _lastPushedCredentials;
|
||||||
|
|
||||||
|
// Number of Ethernet frame bytes received
|
||||||
|
uint64_t _received;
|
||||||
|
|
||||||
|
// Number of Ethernet frame bytes sent
|
||||||
|
uint64_t _sent;
|
||||||
|
|
||||||
// Remote member's latest network COM
|
// Remote member's latest network COM
|
||||||
CertificateOfMembership _com;
|
CertificateOfMembership _com;
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,7 @@ unsigned int Multicaster::gather(const Address &queryingPeer,uint64_t nwid,const
|
||||||
// will return different subsets of a large multicast group.
|
// will return different subsets of a large multicast group.
|
||||||
k = 0;
|
k = 0;
|
||||||
while ((added < limit)&&(k < s->members.size())&&((appendTo.size() + ZT_ADDRESS_LENGTH) <= ZT_PROTO_MAX_PACKET_LENGTH)) {
|
while ((added < limit)&&(k < s->members.size())&&((appendTo.size() + ZT_ADDRESS_LENGTH) <= ZT_PROTO_MAX_PACKET_LENGTH)) {
|
||||||
rptr = (unsigned int)RR->node->prng();
|
rptr = (unsigned int)Utils::random();
|
||||||
|
|
||||||
restart_member_scan:
|
restart_member_scan:
|
||||||
a = s->members[rptr % (unsigned int)s->members.size()].address.toInt();
|
a = s->members[rptr % (unsigned int)s->members.size()].address.toInt();
|
||||||
|
@ -183,7 +183,7 @@ void Multicaster::send(
|
||||||
for(unsigned long i=0;i<gs.members.size();++i)
|
for(unsigned long i=0;i<gs.members.size();++i)
|
||||||
indexes[i] = i;
|
indexes[i] = i;
|
||||||
for(unsigned long i=(unsigned long)gs.members.size()-1;i>0;--i) {
|
for(unsigned long i=(unsigned long)gs.members.size()-1;i>0;--i) {
|
||||||
unsigned long j = (unsigned long)RR->node->prng() % (i + 1);
|
unsigned long j = (unsigned long)Utils::random() % (i + 1);
|
||||||
unsigned long tmp = indexes[j];
|
unsigned long tmp = indexes[j];
|
||||||
indexes[j] = indexes[i];
|
indexes[j] = indexes[i];
|
||||||
indexes[i] = tmp;
|
indexes[i] = tmp;
|
||||||
|
@ -251,7 +251,7 @@ void Multicaster::send(
|
||||||
for(unsigned int i=0;i<accnt;++i)
|
for(unsigned int i=0;i<accnt;++i)
|
||||||
shuffled[i] = i;
|
shuffled[i] = i;
|
||||||
for(unsigned int i=0,k=accnt>>1;i<k;++i) {
|
for(unsigned int i=0,k=accnt>>1;i<k;++i) {
|
||||||
const uint64_t x = RR->node->prng();
|
const uint64_t x = Utils::random();
|
||||||
const unsigned int x1 = shuffled[(unsigned int)x % accnt];
|
const unsigned int x1 = shuffled[(unsigned int)x % accnt];
|
||||||
const unsigned int x2 = shuffled[(unsigned int)(x >> 32) % accnt];
|
const unsigned int x2 = shuffled[(unsigned int)(x >> 32) % accnt];
|
||||||
const unsigned int tmp = shuffled[x1];
|
const unsigned int tmp = shuffled[x1];
|
||||||
|
|
|
@ -51,17 +51,14 @@ public:
|
||||||
|
|
||||||
inline void lock() const
|
inline void lock() const
|
||||||
{
|
{
|
||||||
const uint16_t myTicket = __sync_fetch_and_add(&(const_cast<Mutex *>(this)->nextTicket),1);
|
const uint16_t myTicket = __sync_fetch_and_add(&(const_cast<Mutex *>(this)->nextTicket),1);
|
||||||
while (nowServing != myTicket) {
|
while (nowServing != myTicket) {
|
||||||
__asm__ __volatile__("rep;nop"::);
|
__asm__ __volatile__("rep;nop"::);
|
||||||
__asm__ __volatile__("":::"memory");
|
__asm__ __volatile__("":::"memory");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void unlock() const
|
inline void unlock() const { ++(const_cast<Mutex *>(this)->nowServing); }
|
||||||
{
|
|
||||||
++(const_cast<Mutex *>(this)->nowServing);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Uses C++ contexts and constructor/destructor to lock/unlock automatically
|
* Uses C++ contexts and constructor/destructor to lock/unlock automatically
|
||||||
|
|
|
@ -432,7 +432,7 @@ static _doZtFilterResult _doZtFilter(
|
||||||
thisRuleMatches = (uint8_t)((frameLen >= (unsigned int)rules[rn].v.frameSize[0])&&(frameLen <= (unsigned int)rules[rn].v.frameSize[1]));
|
thisRuleMatches = (uint8_t)((frameLen >= (unsigned int)rules[rn].v.frameSize[0])&&(frameLen <= (unsigned int)rules[rn].v.frameSize[1]));
|
||||||
break;
|
break;
|
||||||
case ZT_NETWORK_RULE_MATCH_RANDOM:
|
case ZT_NETWORK_RULE_MATCH_RANDOM:
|
||||||
thisRuleMatches = (uint8_t)((uint32_t)(RR->node->prng() & 0xffffffffULL) <= rules[rn].v.randomProbability);
|
thisRuleMatches = (uint8_t)((uint32_t)(Utils::random() & 0xffffffffULL) <= rules[rn].v.randomProbability);
|
||||||
break;
|
break;
|
||||||
case ZT_NETWORK_RULE_MATCH_TAGS_DIFFERENCE:
|
case ZT_NETWORK_RULE_MATCH_TAGS_DIFFERENCE:
|
||||||
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_AND:
|
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_AND:
|
||||||
|
@ -702,6 +702,9 @@ bool Network::filterOutgoingPacket(
|
||||||
}
|
}
|
||||||
|
|
||||||
if (accept) {
|
if (accept) {
|
||||||
|
if (membership)
|
||||||
|
membership->logSentBytes(frameLen);
|
||||||
|
|
||||||
if ((!noTee)&&(cc)) {
|
if ((!noTee)&&(cc)) {
|
||||||
Packet outp(cc,RR->identity.address(),Packet::VERB_EXT_FRAME);
|
Packet outp(cc,RR->identity.address(),Packet::VERB_EXT_FRAME);
|
||||||
outp.append(_id);
|
outp.append(_id);
|
||||||
|
@ -820,6 +823,8 @@ int Network::filterIncomingPacket(
|
||||||
}
|
}
|
||||||
|
|
||||||
if (accept) {
|
if (accept) {
|
||||||
|
membership.logReceivedBytes(frameLen);
|
||||||
|
|
||||||
if (cc) {
|
if (cc) {
|
||||||
Packet outp(cc,RR->identity.address(),Packet::VERB_EXT_FRAME);
|
Packet outp(cc,RR->identity.address(),Packet::VERB_EXT_FRAME);
|
||||||
outp.append(_id);
|
outp.append(_id);
|
||||||
|
|
|
@ -64,9 +64,6 @@ Node::Node(void *uptr,void *tptr,const struct ZT_Node_Callbacks *callbacks,int64
|
||||||
{
|
{
|
||||||
memcpy(&_cb,callbacks,sizeof(ZT_Node_Callbacks));
|
memcpy(&_cb,callbacks,sizeof(ZT_Node_Callbacks));
|
||||||
|
|
||||||
// Initialize non-cryptographic PRNG from a good random source
|
|
||||||
Utils::getSecureRandom((void *)_prngState,sizeof(_prngState));
|
|
||||||
|
|
||||||
_online = false;
|
_online = false;
|
||||||
|
|
||||||
memset(_expectingRepliesToBucketPtr,0,sizeof(_expectingRepliesToBucketPtr));
|
memset(_expectingRepliesToBucketPtr,0,sizeof(_expectingRepliesToBucketPtr));
|
||||||
|
@ -211,7 +208,7 @@ struct _PingPeersThatNeedPing
|
||||||
bool contacted = (sent != 0);
|
bool contacted = (sent != 0);
|
||||||
|
|
||||||
if ((sent & 0x1) == 0) { // bit 0x1 == IPv4 sent
|
if ((sent & 0x1) == 0) { // bit 0x1 == IPv4 sent
|
||||||
for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)alwaysContactEndpoints->size();++k) {
|
for(unsigned long k=0,ptr=(unsigned long)Utils::random();k<(unsigned long)alwaysContactEndpoints->size();++k) {
|
||||||
const InetAddress &addr = (*alwaysContactEndpoints)[ptr++ % alwaysContactEndpoints->size()];
|
const InetAddress &addr = (*alwaysContactEndpoints)[ptr++ % alwaysContactEndpoints->size()];
|
||||||
if (addr.ss_family == AF_INET) {
|
if (addr.ss_family == AF_INET) {
|
||||||
p->sendHELLO(_tPtr,-1,addr,_now);
|
p->sendHELLO(_tPtr,-1,addr,_now);
|
||||||
|
@ -222,7 +219,7 @@ struct _PingPeersThatNeedPing
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((sent & 0x2) == 0) { // bit 0x2 == IPv6 sent
|
if ((sent & 0x2) == 0) { // bit 0x2 == IPv6 sent
|
||||||
for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)alwaysContactEndpoints->size();++k) {
|
for(unsigned long k=0,ptr=(unsigned long)Utils::random();k<(unsigned long)alwaysContactEndpoints->size();++k) {
|
||||||
const InetAddress &addr = (*alwaysContactEndpoints)[ptr++ % alwaysContactEndpoints->size()];
|
const InetAddress &addr = (*alwaysContactEndpoints)[ptr++ % alwaysContactEndpoints->size()];
|
||||||
if (addr.ss_family == AF_INET6) {
|
if (addr.ss_family == AF_INET6) {
|
||||||
p->sendHELLO(_tPtr,-1,addr,_now);
|
p->sendHELLO(_tPtr,-1,addr,_now);
|
||||||
|
@ -427,10 +424,13 @@ void Node::status(ZT_NodeStatus *status) const
|
||||||
status->online = _online ? 1 : 0;
|
status->online = _online ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct _sortPeerPtrsByAddress { inline bool cmp(const SharedPtr<Peer> &a,const SharedPtr<Peer> &b) const { return (a->address() < b->address()); } };
|
||||||
|
|
||||||
ZT_PeerList *Node::peers() const
|
ZT_PeerList *Node::peers() const
|
||||||
{
|
{
|
||||||
std::vector< std::pair< Address,SharedPtr<Peer> > > peers(RR->topology->allPeers());
|
std::vector< SharedPtr<Peer> > peers;
|
||||||
std::sort(peers.begin(),peers.end());
|
RR->topology->getAllPeers(peers);
|
||||||
|
std::sort(peers.begin(),peers.end(),_sortPeerPtrsByAddress());
|
||||||
|
|
||||||
char *buf = (char *)::malloc(sizeof(ZT_PeerList) + (sizeof(ZT_Peer) * peers.size()));
|
char *buf = (char *)::malloc(sizeof(ZT_PeerList) + (sizeof(ZT_Peer) * peers.size()));
|
||||||
if (!buf)
|
if (!buf)
|
||||||
|
@ -439,27 +439,27 @@ ZT_PeerList *Node::peers() const
|
||||||
pl->peers = (ZT_Peer *)(buf + sizeof(ZT_PeerList));
|
pl->peers = (ZT_Peer *)(buf + sizeof(ZT_PeerList));
|
||||||
|
|
||||||
pl->peerCount = 0;
|
pl->peerCount = 0;
|
||||||
for(std::vector< std::pair< Address,SharedPtr<Peer> > >::iterator pi(peers.begin());pi!=peers.end();++pi) {
|
for(std::vector< SharedPtr<Peer> >::iterator pi(peers.begin());pi!=peers.end();++pi) {
|
||||||
ZT_Peer *p = &(pl->peers[pl->peerCount++]);
|
ZT_Peer *p = &(pl->peers[pl->peerCount++]);
|
||||||
p->address = pi->second->address().toInt();
|
p->address = (*pi)->address().toInt();
|
||||||
p->hadAggregateLink = 0;
|
p->hadAggregateLink = 0;
|
||||||
if (pi->second->remoteVersionKnown()) {
|
if ((*pi)->remoteVersionKnown()) {
|
||||||
p->versionMajor = pi->second->remoteVersionMajor();
|
p->versionMajor = (*pi)->remoteVersionMajor();
|
||||||
p->versionMinor = pi->second->remoteVersionMinor();
|
p->versionMinor = (*pi)->remoteVersionMinor();
|
||||||
p->versionRev = pi->second->remoteVersionRevision();
|
p->versionRev = (*pi)->remoteVersionRevision();
|
||||||
} else {
|
} else {
|
||||||
p->versionMajor = -1;
|
p->versionMajor = -1;
|
||||||
p->versionMinor = -1;
|
p->versionMinor = -1;
|
||||||
p->versionRev = -1;
|
p->versionRev = -1;
|
||||||
}
|
}
|
||||||
p->latency = pi->second->latency(_now);
|
p->latency = (*pi)->latency(_now);
|
||||||
if (p->latency >= 0xffff)
|
if (p->latency >= 0xffff)
|
||||||
p->latency = -1;
|
p->latency = -1;
|
||||||
p->role = RR->topology->isRoot(pi->second->identity()) ? ZT_PEER_ROLE_PLANET : ZT_PEER_ROLE_LEAF;
|
p->role = RR->topology->isRoot((*pi)->identity()) ? ZT_PEER_ROLE_PLANET : ZT_PEER_ROLE_LEAF;
|
||||||
|
|
||||||
std::vector< SharedPtr<Path> > paths(pi->second->paths(_now));
|
std::vector< SharedPtr<Path> > paths((*pi)->paths(_now));
|
||||||
SharedPtr<Path> bestp(pi->second->getAppropriatePath(_now,false));
|
SharedPtr<Path> bestp((*pi)->getAppropriatePath(_now,false));
|
||||||
p->hadAggregateLink |= pi->second->hasAggregateLink();
|
p->hadAggregateLink |= (*pi)->hasAggregateLink();
|
||||||
p->pathCount = 0;
|
p->pathCount = 0;
|
||||||
for(std::vector< SharedPtr<Path> >::iterator path(paths.begin());path!=paths.end();++path) {
|
for(std::vector< SharedPtr<Path> >::iterator path(paths.begin());path!=paths.end();++path) {
|
||||||
memcpy(&(p->paths[p->pathCount].address),&((*path)->address()),sizeof(struct sockaddr_storage));
|
memcpy(&(p->paths[p->pathCount].address),&((*path)->address()),sizeof(struct sockaddr_storage));
|
||||||
|
@ -557,7 +557,7 @@ int Node::sendUserMessage(void *tptr,uint64_t dest,uint64_t typeId,const void *d
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Node::setNetconfMaster(void *networkControllerInstance)
|
void Node::setController(void *networkControllerInstance)
|
||||||
{
|
{
|
||||||
RR->localNetworkController = reinterpret_cast<NetworkController *>(networkControllerInstance);
|
RR->localNetworkController = reinterpret_cast<NetworkController *>(networkControllerInstance);
|
||||||
if (networkControllerInstance)
|
if (networkControllerInstance)
|
||||||
|
@ -589,18 +589,6 @@ bool Node::shouldUsePathForZeroTierTraffic(void *tPtr,const Address &ztaddr,cons
|
||||||
return ( (_cb.pathCheckFunction) ? (_cb.pathCheckFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,ztaddr.toInt(),localSocket,reinterpret_cast<const struct sockaddr_storage *>(&remoteAddress)) != 0) : true);
|
return ( (_cb.pathCheckFunction) ? (_cb.pathCheckFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,ztaddr.toInt(),localSocket,reinterpret_cast<const struct sockaddr_storage *>(&remoteAddress)) != 0) : true);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t Node::prng()
|
|
||||||
{
|
|
||||||
// https://en.wikipedia.org/wiki/Xorshift#xorshift.2B
|
|
||||||
uint64_t x = _prngState[0];
|
|
||||||
const uint64_t y = _prngState[1];
|
|
||||||
_prngState[0] = y;
|
|
||||||
x ^= x << 23;
|
|
||||||
const uint64_t z = x ^ y ^ (x >> 17) ^ (y >> 26);
|
|
||||||
_prngState[1] = z;
|
|
||||||
return z + y;
|
|
||||||
}
|
|
||||||
|
|
||||||
ZT_ResultCode Node::setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork, const ZT_PhysicalPathConfiguration *pathConfig)
|
ZT_ResultCode Node::setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork, const ZT_PhysicalPathConfiguration *pathConfig)
|
||||||
{
|
{
|
||||||
RR->topology->setPhysicalPathConfiguration(pathNetwork,pathConfig);
|
RR->topology->setPhysicalPathConfiguration(pathNetwork,pathConfig);
|
||||||
|
@ -621,7 +609,7 @@ void Node::ncSendConfig(uint64_t nwid,uint64_t requestPacketId,const Address &de
|
||||||
Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY> *dconf = new Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY>();
|
Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY> *dconf = new Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY>();
|
||||||
try {
|
try {
|
||||||
if (nc.toDictionary(*dconf,sendLegacyFormatConfig)) {
|
if (nc.toDictionary(*dconf,sendLegacyFormatConfig)) {
|
||||||
uint64_t configUpdateId = prng();
|
uint64_t configUpdateId = Utils::random();
|
||||||
if (!configUpdateId) ++configUpdateId;
|
if (!configUpdateId) ++configUpdateId;
|
||||||
|
|
||||||
const unsigned int totalSize = dconf->sizeBytes();
|
const unsigned int totalSize = dconf->sizeBytes();
|
||||||
|
@ -913,10 +901,10 @@ int ZT_Node_sendUserMessage(ZT_Node *node,void *tptr,uint64_t dest,uint64_t type
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZT_Node_setNetconfMaster(ZT_Node *node,void *networkControllerInstance)
|
void ZT_Node_setController(ZT_Node *node,void *networkControllerInstance)
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
reinterpret_cast<ZeroTier::Node *>(node)->setNetconfMaster(networkControllerInstance);
|
reinterpret_cast<ZeroTier::Node *>(node)->setController(networkControllerInstance);
|
||||||
} catch ( ... ) {}
|
} catch ( ... ) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -106,7 +106,7 @@ public:
|
||||||
int addLocalInterfaceAddress(const struct sockaddr_storage *addr);
|
int addLocalInterfaceAddress(const struct sockaddr_storage *addr);
|
||||||
void clearLocalInterfaceAddresses();
|
void clearLocalInterfaceAddresses();
|
||||||
int sendUserMessage(void *tptr,uint64_t dest,uint64_t typeId,const void *data,unsigned int len);
|
int sendUserMessage(void *tptr,uint64_t dest,uint64_t typeId,const void *data,unsigned int len);
|
||||||
void setNetconfMaster(void *networkControllerInstance);
|
void setController(void *networkControllerInstance);
|
||||||
|
|
||||||
// Internal functions ------------------------------------------------------
|
// Internal functions ------------------------------------------------------
|
||||||
|
|
||||||
|
@ -187,7 +187,6 @@ public:
|
||||||
bool shouldUsePathForZeroTierTraffic(void *tPtr,const Address &ztaddr,const int64_t localSocket,const InetAddress &remoteAddress);
|
bool shouldUsePathForZeroTierTraffic(void *tPtr,const Address &ztaddr,const int64_t localSocket,const InetAddress &remoteAddress);
|
||||||
inline bool externalPathLookup(void *tPtr,const Address &ztaddr,int family,InetAddress &addr) { return ( (_cb.pathLookupFunction) ? (_cb.pathLookupFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,ztaddr.toInt(),family,reinterpret_cast<struct sockaddr_storage *>(&addr)) != 0) : false ); }
|
inline bool externalPathLookup(void *tPtr,const Address &ztaddr,int family,InetAddress &addr) { return ( (_cb.pathLookupFunction) ? (_cb.pathLookupFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,ztaddr.toInt(),family,reinterpret_cast<struct sockaddr_storage *>(&addr)) != 0) : false ); }
|
||||||
|
|
||||||
uint64_t prng();
|
|
||||||
ZT_ResultCode setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork,const ZT_PhysicalPathConfiguration *pathConfig);
|
ZT_ResultCode setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork,const ZT_PhysicalPathConfiguration *pathConfig);
|
||||||
|
|
||||||
inline const Identity &identity() const { return _RR.identity; }
|
inline const Identity &identity() const { return _RR.identity; }
|
||||||
|
@ -309,7 +308,6 @@ private:
|
||||||
int64_t _lastPingCheck;
|
int64_t _lastPingCheck;
|
||||||
int64_t _lastHousekeepingRun;
|
int64_t _lastHousekeepingRun;
|
||||||
int64_t _lastMemoizedTraceSettings;
|
int64_t _lastMemoizedTraceSettings;
|
||||||
volatile int64_t _prngState[2];
|
|
||||||
bool _online;
|
bool _online;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -135,11 +135,6 @@
|
||||||
*/
|
*/
|
||||||
#define ZT_PROTO_VERB_FLAG_COMPRESSED 0x80
|
#define ZT_PROTO_VERB_FLAG_COMPRESSED 0x80
|
||||||
|
|
||||||
/**
|
|
||||||
* PUSH_DIRECT_PATHS flag: cluster redirect
|
|
||||||
*/
|
|
||||||
#define ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT 0x02
|
|
||||||
|
|
||||||
// Field indexes in packet header
|
// Field indexes in packet header
|
||||||
#define ZT_PACKET_IDX_IV 0
|
#define ZT_PACKET_IDX_IV 0
|
||||||
#define ZT_PACKET_IDX_DEST 8
|
#define ZT_PACKET_IDX_DEST 8
|
||||||
|
@ -799,7 +794,6 @@ public:
|
||||||
*
|
*
|
||||||
* Path record flags:
|
* Path record flags:
|
||||||
* 0x01 - Forget this path if currently known (not implemented yet)
|
* 0x01 - Forget this path if currently known (not implemented yet)
|
||||||
* 0x02 - Cluster redirect -- use this in preference to others
|
|
||||||
*
|
*
|
||||||
* The receiver may, upon receiving a push, attempt to establish a
|
* The receiver may, upon receiving a push, attempt to establish a
|
||||||
* direct link to one or more of the indicated addresses. It is the
|
* direct link to one or more of the indicated addresses. It is the
|
||||||
|
|
|
@ -279,9 +279,9 @@ public:
|
||||||
*/
|
*/
|
||||||
inline long quality(const int64_t now) const
|
inline long quality(const int64_t now) const
|
||||||
{
|
{
|
||||||
const int l = (long)_latency;
|
const long l = (long)_latency;
|
||||||
const int age = (long)std::min((now - _lastIn),(int64_t)(ZT_PATH_HEARTBEAT_PERIOD * 10)); // set an upper sanity limit to avoid overflow
|
const long age = (long)std::min((long)(now - _lastIn),(long)(ZT_PEER_PING_PERIOD * 10)); // set an upper sanity limit to avoid overflow
|
||||||
return (((age < (ZT_PATH_HEARTBEAT_PERIOD + 5000)) ? l : (l + 0xffff + age)) * (long)((ZT_INETADDRESS_MAX_SCOPE - _ipScope) + 1));
|
return ( ( (age < (ZT_PEER_PING_PERIOD + 5000)) ? l : (l + 65535 + age) ) * (long)((ZT_INETADDRESS_MAX_SCOPE - _ipScope) + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -611,14 +611,9 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return True if this path is alive (receiving heartbeats)
|
* @return True if this path is alive (receiving data)
|
||||||
*/
|
*/
|
||||||
inline bool alive(const int64_t now) const { return ((now - _lastIn) < (ZT_PATH_HEARTBEAT_PERIOD + 5000)); }
|
inline bool alive(const int64_t now) const { return ((now - _lastIn) < ((ZT_PEER_PING_PERIOD * 2) + 5000)); }
|
||||||
|
|
||||||
/**
|
|
||||||
* @return True if this path needs a heartbeat
|
|
||||||
*/
|
|
||||||
inline bool needsHeartbeat(const int64_t now) const { return ((now - _lastOut) >= ZT_PATH_HEARTBEAT_PERIOD); }
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Last time we sent something
|
* @return Last time we sent something
|
||||||
|
|
152
node/Peer.cpp
152
node/Peer.cpp
|
@ -43,15 +43,12 @@ static unsigned char s_freeRandomByteCounter = 0;
|
||||||
Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Identity &peerIdentity) :
|
Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Identity &peerIdentity) :
|
||||||
RR(renv),
|
RR(renv),
|
||||||
_lastReceive(0),
|
_lastReceive(0),
|
||||||
_lastNontrivialReceive(0),
|
|
||||||
_lastTriedMemorizedPath(0),
|
|
||||||
_lastDirectPathPushSent(0),
|
_lastDirectPathPushSent(0),
|
||||||
_lastDirectPathPushReceive(0),
|
_lastDirectPathPushReceive(0),
|
||||||
_lastCredentialRequestSent(0),
|
_lastCredentialRequestSent(0),
|
||||||
_lastWhoisRequestReceived(0),
|
_lastWhoisRequestReceived(0),
|
||||||
_lastEchoRequestReceived(0),
|
_lastEchoRequestReceived(0),
|
||||||
_lastCredentialsReceived(0),
|
_lastCredentialsReceived(0),
|
||||||
_lastSentFullHello(0),
|
|
||||||
_lastACKWindowReset(0),
|
_lastACKWindowReset(0),
|
||||||
_lastQoSWindowReset(0),
|
_lastQoSWindowReset(0),
|
||||||
_lastMultipathCompatibilityCheck(0),
|
_lastMultipathCompatibilityCheck(0),
|
||||||
|
@ -91,17 +88,6 @@ void Peer::received(
|
||||||
const int64_t now = RR->node->now();
|
const int64_t now = RR->node->now();
|
||||||
|
|
||||||
_lastReceive = now;
|
_lastReceive = now;
|
||||||
switch (verb) {
|
|
||||||
case Packet::VERB_FRAME:
|
|
||||||
case Packet::VERB_EXT_FRAME:
|
|
||||||
case Packet::VERB_NETWORK_CONFIG_REQUEST:
|
|
||||||
case Packet::VERB_NETWORK_CONFIG:
|
|
||||||
case Packet::VERB_MULTICAST_FRAME:
|
|
||||||
_lastNontrivialReceive = now;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
Mutex::Lock _l(_paths_m);
|
Mutex::Lock _l(_paths_m);
|
||||||
|
@ -181,7 +167,6 @@ void Peer::received(
|
||||||
RR->t->peerLearnedNewPath(tPtr,networkId,*this,path,packetId);
|
RR->t->peerLearnedNewPath(tPtr,networkId,*this,path,packetId);
|
||||||
_paths[replacePath].lr = now;
|
_paths[replacePath].lr = now;
|
||||||
_paths[replacePath].p = path;
|
_paths[replacePath].p = path;
|
||||||
_paths[replacePath].priority = 1;
|
|
||||||
} else {
|
} else {
|
||||||
attemptToContact = true;
|
attemptToContact = true;
|
||||||
}
|
}
|
||||||
|
@ -189,7 +174,7 @@ void Peer::received(
|
||||||
}
|
}
|
||||||
|
|
||||||
if (attemptToContact) {
|
if (attemptToContact) {
|
||||||
attemptToContactAt(tPtr,path->localSocket(),path->address(),now,true);
|
sendHELLO(tPtr,path->localSocket(),path->address(),now);
|
||||||
path->sent(now);
|
path->sent(now);
|
||||||
RR->t->peerConfirmingUnknownPath(tPtr,networkId,*this,path,packetId,verb);
|
RR->t->peerConfirmingUnknownPath(tPtr,networkId,*this,path,packetId,verb);
|
||||||
}
|
}
|
||||||
|
@ -376,7 +361,7 @@ SharedPtr<Path> Peer::getAppropriatePath(int64_t now, bool includeExpired)
|
||||||
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
||||||
if (_paths[i].p) {
|
if (_paths[i].p) {
|
||||||
if ((includeExpired)||((now - _paths[i].lr) < ZT_PEER_PATH_EXPIRATION)) {
|
if ((includeExpired)||((now - _paths[i].lr) < ZT_PEER_PATH_EXPIRATION)) {
|
||||||
const long q = _paths[i].p->quality(now) / _paths[i].priority;
|
const long q = _paths[i].p->quality(now);
|
||||||
if (q <= bestPathQuality) {
|
if (q <= bestPathQuality) {
|
||||||
bestPathQuality = q;
|
bestPathQuality = q;
|
||||||
bestPath = i;
|
bestPath = i;
|
||||||
|
@ -525,7 +510,7 @@ void Peer::introduce(void *const tPtr,const int64_t now,const SharedPtr<Peer> &o
|
||||||
|
|
||||||
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
||||||
if (_paths[i].p) {
|
if (_paths[i].p) {
|
||||||
const long q = _paths[i].p->quality(now) / _paths[i].priority;
|
const long q = _paths[i].p->quality(now);
|
||||||
const unsigned int s = (unsigned int)_paths[i].p->ipScope();
|
const unsigned int s = (unsigned int)_paths[i].p->ipScope();
|
||||||
switch(_paths[i].p->address().ss_family) {
|
switch(_paths[i].p->address().ss_family) {
|
||||||
case AF_INET:
|
case AF_INET:
|
||||||
|
@ -548,7 +533,7 @@ void Peer::introduce(void *const tPtr,const int64_t now,const SharedPtr<Peer> &o
|
||||||
|
|
||||||
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
||||||
if (other->_paths[i].p) {
|
if (other->_paths[i].p) {
|
||||||
const long q = other->_paths[i].p->quality(now) / other->_paths[i].priority;
|
const long q = other->_paths[i].p->quality(now);
|
||||||
const unsigned int s = (unsigned int)other->_paths[i].p->ipScope();
|
const unsigned int s = (unsigned int)other->_paths[i].p->ipScope();
|
||||||
switch(other->_paths[i].p->address().ss_family) {
|
switch(other->_paths[i].p->address().ss_family) {
|
||||||
case AF_INET:
|
case AF_INET:
|
||||||
|
@ -584,7 +569,7 @@ void Peer::introduce(void *const tPtr,const int64_t now,const SharedPtr<Peer> &o
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mine != ZT_MAX_PEER_NETWORK_PATHS) {
|
if (mine != ZT_MAX_PEER_NETWORK_PATHS) {
|
||||||
unsigned int alt = (unsigned int)RR->node->prng() & 1; // randomize which hint we send first for black magickal NAT-t reasons
|
unsigned int alt = (unsigned int)Utils::random() & 1; // randomize which hint we send first for black magickal NAT-t reasons
|
||||||
const unsigned int completed = alt + 2;
|
const unsigned int completed = alt + 2;
|
||||||
while (alt != completed) {
|
while (alt != completed) {
|
||||||
if ((alt & 1) == 0) {
|
if ((alt & 1) == 0) {
|
||||||
|
@ -710,38 +695,13 @@ void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atA
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Peer::attemptToContactAt(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now,bool sendFullHello)
|
void Peer::ping(void *tPtr,int64_t now,unsigned int &v4SendCount,unsigned int &v6SendCount)
|
||||||
{
|
{
|
||||||
if ( (!sendFullHello) && (_vProto >= 5) && (!((_vMajor == 1)&&(_vMinor == 1)&&(_vRevision == 0))) ) {
|
v4SendCount = 0;
|
||||||
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_ECHO);
|
v6SendCount = 0;
|
||||||
RR->node->expectReplyTo(outp.packetId());
|
|
||||||
outp.armor(_key,true);
|
|
||||||
RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
|
|
||||||
} else {
|
|
||||||
sendHELLO(tPtr,localSocket,atAddress,now);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Peer::tryMemorizedPath(void *tPtr,int64_t now)
|
|
||||||
{
|
|
||||||
if ((now - _lastTriedMemorizedPath) >= ZT_TRY_MEMORIZED_PATH_INTERVAL) {
|
|
||||||
_lastTriedMemorizedPath = now;
|
|
||||||
InetAddress mp;
|
|
||||||
if (RR->node->externalPathLookup(tPtr,_id.address(),-1,mp))
|
|
||||||
attemptToContactAt(tPtr,-1,mp,now,true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned int Peer::doPingAndKeepalive(void *tPtr,int64_t now)
|
|
||||||
{
|
|
||||||
unsigned int sent = 0;
|
|
||||||
Mutex::Lock _l(_paths_m);
|
Mutex::Lock _l(_paths_m);
|
||||||
|
|
||||||
const bool sendFullHello = ((now - _lastSentFullHello) >= ZT_PEER_PING_PERIOD);
|
|
||||||
_lastSentFullHello = now;
|
|
||||||
|
|
||||||
processBackgroundPeerTasks(now);
|
|
||||||
|
|
||||||
// Emit traces regarding aggregate link status
|
// Emit traces regarding aggregate link status
|
||||||
if (_canUseMultipath) {
|
if (_canUseMultipath) {
|
||||||
int alivePathCount = aggregateLinkPhysicalPathCount();
|
int alivePathCount = aggregateLinkPhysicalPathCount();
|
||||||
|
@ -759,90 +719,26 @@ unsigned int Peer::doPingAndKeepalive(void *tPtr,int64_t now)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Right now we only keep pinging links that have the maximum priority. The
|
|
||||||
// priority is used to track cluster redirections, meaning that when a cluster
|
|
||||||
// redirects us its redirect target links override all other links and we
|
|
||||||
// let those old links expire.
|
|
||||||
long maxPriority = 0;
|
|
||||||
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
||||||
if (_paths[i].p)
|
|
||||||
maxPriority = std::max(_paths[i].priority,maxPriority);
|
|
||||||
else break;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned int j = 0;
|
unsigned int j = 0;
|
||||||
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
||||||
if (_paths[i].p) {
|
if ((_paths[i].p)&&(_paths[i].p->alive(now))) {
|
||||||
// Clean expired and reduced priority paths
|
sendHELLO(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now);
|
||||||
if ( ((now - _paths[i].lr) < ZT_PEER_PATH_EXPIRATION) && (_paths[i].priority == maxPriority) ) {
|
|
||||||
if ((sendFullHello)||(_paths[i].p->needsHeartbeat(now))) {
|
_paths[i].p->sent(now);
|
||||||
attemptToContactAt(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now,sendFullHello);
|
if (_paths[i].p->address().isV4())
|
||||||
_paths[i].p->sent(now);
|
++v4SendCount;
|
||||||
sent |= (_paths[i].p->address().ss_family == AF_INET) ? 0x1 : 0x2;
|
else if (_paths[i].p->address().isV6())
|
||||||
}
|
++v6SendCount;
|
||||||
if (i != j)
|
|
||||||
_paths[j] = _paths[i];
|
if (i != j)
|
||||||
++j;
|
_paths[j] = _paths[i];
|
||||||
}
|
|
||||||
} else break;
|
|
||||||
}
|
|
||||||
if (canUseMultipath()) {
|
|
||||||
while(j < ZT_MAX_PEER_NETWORK_PATHS) {
|
|
||||||
_paths[j].lr = 0;
|
|
||||||
_paths[j].p.zero();
|
|
||||||
_paths[j].priority = 1;
|
|
||||||
++j;
|
++j;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return sent;
|
while(j < ZT_MAX_PEER_NETWORK_PATHS) {
|
||||||
}
|
_paths[j].lr = 0;
|
||||||
|
_paths[j].p.zero();
|
||||||
void Peer::clusterRedirect(void *tPtr,const SharedPtr<Path> &originatingPath,const InetAddress &remoteAddress,const int64_t now)
|
++j;
|
||||||
{
|
|
||||||
SharedPtr<Path> np(RR->topology->getPath(originatingPath->localSocket(),remoteAddress));
|
|
||||||
RR->t->peerRedirected(tPtr,0,*this,np);
|
|
||||||
|
|
||||||
attemptToContactAt(tPtr,originatingPath->localSocket(),remoteAddress,now,true);
|
|
||||||
|
|
||||||
{
|
|
||||||
Mutex::Lock _l(_paths_m);
|
|
||||||
|
|
||||||
// New priority is higher than the priority of the originating path (if known)
|
|
||||||
long newPriority = 1;
|
|
||||||
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
||||||
if (_paths[i].p) {
|
|
||||||
if (_paths[i].p == originatingPath) {
|
|
||||||
newPriority = _paths[i].priority;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else break;
|
|
||||||
}
|
|
||||||
newPriority += 2;
|
|
||||||
|
|
||||||
// Erase any paths with lower priority than this one or that are duplicate
|
|
||||||
// IPs and add this path.
|
|
||||||
unsigned int j = 0;
|
|
||||||
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
||||||
if (_paths[i].p) {
|
|
||||||
if ((_paths[i].priority >= newPriority)&&(!_paths[i].p->address().ipsEqual2(remoteAddress))) {
|
|
||||||
if (i != j)
|
|
||||||
_paths[j] = _paths[i];
|
|
||||||
++j;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (j < ZT_MAX_PEER_NETWORK_PATHS) {
|
|
||||||
_paths[j].lr = now;
|
|
||||||
_paths[j].p = np;
|
|
||||||
_paths[j].priority = newPriority;
|
|
||||||
++j;
|
|
||||||
while (j < ZT_MAX_PEER_NETWORK_PATHS) {
|
|
||||||
_paths[j].lr = 0;
|
|
||||||
_paths[j].p.zero();
|
|
||||||
_paths[j].priority = 1;
|
|
||||||
++j;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -852,7 +748,7 @@ void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddres
|
||||||
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
||||||
if (_paths[i].p) {
|
if (_paths[i].p) {
|
||||||
if ((_paths[i].p->address().ss_family == inetAddressFamily)&&(_paths[i].p->ipScope() == scope)) {
|
if ((_paths[i].p->address().ss_family == inetAddressFamily)&&(_paths[i].p->ipScope() == scope)) {
|
||||||
attemptToContactAt(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now,false);
|
sendHELLO(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now);
|
||||||
_paths[i].p->sent(now);
|
_paths[i].p->sent(now);
|
||||||
_paths[i].lr = 0; // path will not be used unless it speaks again
|
_paths[i].lr = 0; // path will not be used unless it speaks again
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,8 +29,6 @@
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "../include/ZeroTierOne.h"
|
|
||||||
|
|
||||||
#include "Constants.hpp"
|
#include "Constants.hpp"
|
||||||
#include "RuntimeEnvironment.hpp"
|
#include "RuntimeEnvironment.hpp"
|
||||||
#include "Node.hpp"
|
#include "Node.hpp"
|
||||||
|
@ -95,7 +93,7 @@ public:
|
||||||
* @param verb Packet verb
|
* @param verb Packet verb
|
||||||
* @param inRePacketId Packet ID in reply to (default: none)
|
* @param inRePacketId Packet ID in reply to (default: none)
|
||||||
* @param inReVerb Verb in reply to (for OK/ERROR, default: VERB_NOP)
|
* @param inReVerb Verb in reply to (for OK/ERROR, default: VERB_NOP)
|
||||||
* @param networkId Network ID if this pertains to a network, or 0 otherwise
|
* @param networkId Network ID if this packet is related to a network, 0 otherwise
|
||||||
*/
|
*/
|
||||||
void received(
|
void received(
|
||||||
void *tPtr,
|
void *tPtr,
|
||||||
|
@ -247,39 +245,16 @@ public:
|
||||||
void sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now);
|
void sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Send ECHO (or HELLO for older peers) to this peer at the given address
|
* Send pings to active paths
|
||||||
*
|
|
||||||
* No statistics or sent times are updated here.
|
|
||||||
*
|
|
||||||
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
|
|
||||||
* @param localSocket Local source socket
|
|
||||||
* @param atAddress Destination address
|
|
||||||
* @param now Current time
|
|
||||||
* @param sendFullHello If true, always send a full HELLO instead of just an ECHO
|
|
||||||
*/
|
|
||||||
void attemptToContactAt(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now,bool sendFullHello);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Try a memorized or statically defined path if any are known
|
|
||||||
*
|
|
||||||
* Under the hood this is done periodically based on ZT_TRY_MEMORIZED_PATH_INTERVAL.
|
|
||||||
*
|
|
||||||
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
|
|
||||||
* @param now Current time
|
|
||||||
*/
|
|
||||||
void tryMemorizedPath(void *tPtr,int64_t now);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Send pings or keepalives depending on configured timeouts
|
|
||||||
*
|
*
|
||||||
* This also cleans up some internal data structures. It's called periodically from Node.
|
* This also cleans up some internal data structures. It's called periodically from Node.
|
||||||
*
|
*
|
||||||
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
|
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
|
||||||
* @param now Current time
|
* @param now Current time
|
||||||
* @param inetAddressFamily Keep this address family alive, or -1 for any
|
* @param v4SendCount Number of IPv4 packets sent (result parameter)
|
||||||
* @return 0 if nothing sent or bit mask: bit 0x1 if IPv4 sent, bit 0x2 if IPv6 sent (0x3 means both sent)
|
* @param v6SendCount Number of IPv6 packets sent (result parameter)
|
||||||
*/
|
*/
|
||||||
unsigned int doPingAndKeepalive(void *tPtr,int64_t now);
|
void ping(void *tPtr,int64_t now,unsigned int &v4SendCount,unsigned int &v6SendCount);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clear paths whose localSocket(s) are in a CLOSED state or have an otherwise INVALID state.
|
* Clear paths whose localSocket(s) are in a CLOSED state or have an otherwise INVALID state.
|
||||||
|
@ -291,16 +266,6 @@ public:
|
||||||
*/
|
*/
|
||||||
unsigned int prunePaths();
|
unsigned int prunePaths();
|
||||||
|
|
||||||
/**
|
|
||||||
* Process a cluster redirect sent by this peer
|
|
||||||
*
|
|
||||||
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
|
|
||||||
* @param originatingPath Path from which redirect originated
|
|
||||||
* @param remoteAddress Remote address
|
|
||||||
* @param now Current time
|
|
||||||
*/
|
|
||||||
void clusterRedirect(void *tPtr,const SharedPtr<Path> &originatingPath,const InetAddress &remoteAddress,const int64_t now);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reset paths within a given IP scope and address family
|
* Reset paths within a given IP scope and address family
|
||||||
*
|
*
|
||||||
|
@ -341,11 +306,6 @@ public:
|
||||||
*/
|
*/
|
||||||
inline bool isAlive(const int64_t now) const { return ((now - _lastReceive) < ZT_PEER_ACTIVITY_TIMEOUT); }
|
inline bool isAlive(const int64_t now) const { return ((now - _lastReceive) < ZT_PEER_ACTIVITY_TIMEOUT); }
|
||||||
|
|
||||||
/**
|
|
||||||
* @return True if this peer has sent us real network traffic recently
|
|
||||||
*/
|
|
||||||
inline int64_t isActive(int64_t now) const { return ((now - _lastNontrivialReceive) < ZT_PEER_ACTIVITY_TIMEOUT); }
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Latency in milliseconds of best/aggregate path or 0xffff if unknown / no paths
|
* @return Latency in milliseconds of best/aggregate path or 0xffff if unknown / no paths
|
||||||
*/
|
*/
|
||||||
|
@ -417,7 +377,7 @@ public:
|
||||||
*
|
*
|
||||||
* @param now Current time
|
* @param now Current time
|
||||||
*/
|
*/
|
||||||
inline void processBackgroundPeerTasks(const int64_t now);
|
void processBackgroundPeerTasks(const int64_t now);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Record that the remote peer does have multipath enabled. As is evident by the receipt of a VERB_ACK
|
* Record that the remote peer does have multipath enabled. As is evident by the receipt of a VERB_ACK
|
||||||
|
@ -541,10 +501,9 @@ public:
|
||||||
private:
|
private:
|
||||||
struct _PeerPath
|
struct _PeerPath
|
||||||
{
|
{
|
||||||
_PeerPath() : lr(0),p(),priority(1) {}
|
_PeerPath() : lr(0),p() {}
|
||||||
int64_t lr; // time of last valid ZeroTier packet
|
int64_t lr; // time of last valid ZeroTier packet
|
||||||
SharedPtr<Path> p;
|
SharedPtr<Path> p;
|
||||||
long priority; // >= 1, higher is better
|
|
||||||
};
|
};
|
||||||
|
|
||||||
uint8_t _key[ZT_PEER_SECRET_KEY_LENGTH];
|
uint8_t _key[ZT_PEER_SECRET_KEY_LENGTH];
|
||||||
|
@ -552,15 +511,12 @@ private:
|
||||||
const RuntimeEnvironment *RR;
|
const RuntimeEnvironment *RR;
|
||||||
|
|
||||||
int64_t _lastReceive; // direct or indirect
|
int64_t _lastReceive; // direct or indirect
|
||||||
int64_t _lastNontrivialReceive; // frames, things like netconf, etc.
|
|
||||||
int64_t _lastTriedMemorizedPath;
|
|
||||||
int64_t _lastDirectPathPushSent;
|
int64_t _lastDirectPathPushSent;
|
||||||
int64_t _lastDirectPathPushReceive;
|
int64_t _lastDirectPathPushReceive;
|
||||||
int64_t _lastCredentialRequestSent;
|
int64_t _lastCredentialRequestSent;
|
||||||
int64_t _lastWhoisRequestReceived;
|
int64_t _lastWhoisRequestReceived;
|
||||||
int64_t _lastEchoRequestReceived;
|
int64_t _lastEchoRequestReceived;
|
||||||
int64_t _lastCredentialsReceived;
|
int64_t _lastCredentialsReceived;
|
||||||
int64_t _lastSentFullHello;
|
|
||||||
int64_t _lastPathPrune;
|
int64_t _lastPathPrune;
|
||||||
int64_t _lastACKWindowReset;
|
int64_t _lastACKWindowReset;
|
||||||
int64_t _lastQoSWindowReset;
|
int64_t _lastQoSWindowReset;
|
||||||
|
|
|
@ -51,14 +51,11 @@ namespace ZeroTier {
|
||||||
*
|
*
|
||||||
* It's also possible to create a root with no DNS and no DNS validator public key. This root
|
* It's also possible to create a root with no DNS and no DNS validator public key. This root
|
||||||
* will be a static entry pointing to a single root identity and set of physical addresses.
|
* will be a static entry pointing to a single root identity and set of physical addresses.
|
||||||
*
|
|
||||||
* This object is thread-safe and may be concurrently accessed and updated.
|
|
||||||
*/
|
*/
|
||||||
class Root
|
class Root
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
inline Root() : _dnsPublicKeySize(0) {}
|
inline Root() : _dnsPublicKeySize(0) {}
|
||||||
inline Root(const Root &r) { *this = r; }
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new root entry
|
* Create a new root entry
|
||||||
|
@ -83,25 +80,11 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline Root &operator=(const Root &r)
|
|
||||||
{
|
|
||||||
Mutex::Lock l(_lock);
|
|
||||||
Mutex::Lock rl(r._lock);
|
|
||||||
_defaultIdentity = r._defaultIdentity;
|
|
||||||
_defaultAddresses = r._defaultAddresses;
|
|
||||||
_dnsName = r._dnsName;
|
|
||||||
_lastFetchedLocator = r._lastFetchedLocator;
|
|
||||||
_dnsPublicKeySize = r._dnsPublicKeySize;
|
|
||||||
memcpy(_dnsPublicKey,r._dnsPublicKey,_dnsPublicKeySize);
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Current identity (either default or latest locator)
|
* @return Current identity (either default or latest locator)
|
||||||
*/
|
*/
|
||||||
inline const Identity id() const
|
inline const Identity id() const
|
||||||
{
|
{
|
||||||
Mutex::Lock l(_lock);
|
|
||||||
if (_lastFetchedLocator.id())
|
if (_lastFetchedLocator.id())
|
||||||
return _lastFetchedLocator.id();
|
return _lastFetchedLocator.id();
|
||||||
return _defaultIdentity;
|
return _defaultIdentity;
|
||||||
|
@ -113,7 +96,6 @@ public:
|
||||||
*/
|
*/
|
||||||
inline bool is(const Identity &id) const
|
inline bool is(const Identity &id) const
|
||||||
{
|
{
|
||||||
Mutex::Lock l(_lock);
|
|
||||||
return ((_lastFetchedLocator.id()) ? (id == _lastFetchedLocator.id()) : (id == _defaultIdentity));
|
return ((_lastFetchedLocator.id()) ? (id == _lastFetchedLocator.id()) : (id == _defaultIdentity));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,7 +104,6 @@ public:
|
||||||
*/
|
*/
|
||||||
inline const Address address() const
|
inline const Address address() const
|
||||||
{
|
{
|
||||||
Mutex::Lock l(_lock);
|
|
||||||
if (_lastFetchedLocator.id())
|
if (_lastFetchedLocator.id())
|
||||||
return _lastFetchedLocator.id().address();
|
return _lastFetchedLocator.id().address();
|
||||||
return _defaultIdentity.address();
|
return _defaultIdentity.address();
|
||||||
|
@ -133,7 +114,6 @@ public:
|
||||||
*/
|
*/
|
||||||
inline const Str dnsName() const
|
inline const Str dnsName() const
|
||||||
{
|
{
|
||||||
Mutex::Lock l(_lock);
|
|
||||||
return _dnsName;
|
return _dnsName;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,7 +122,6 @@ public:
|
||||||
*/
|
*/
|
||||||
inline Locator locator() const
|
inline Locator locator() const
|
||||||
{
|
{
|
||||||
Mutex::Lock l(_lock);
|
|
||||||
return _lastFetchedLocator;
|
return _lastFetchedLocator;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,7 +130,6 @@ public:
|
||||||
*/
|
*/
|
||||||
inline int64_t locatorTimestamp() const
|
inline int64_t locatorTimestamp() const
|
||||||
{
|
{
|
||||||
Mutex::Lock l(_lock);
|
|
||||||
return _lastFetchedLocator.timestamp();
|
return _lastFetchedLocator.timestamp();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,7 +140,6 @@ public:
|
||||||
{
|
{
|
||||||
if (!loc.verify())
|
if (!loc.verify())
|
||||||
return false;
|
return false;
|
||||||
Mutex::Lock l(_lock);
|
|
||||||
if ((loc.phy().size() > 0)&&(loc.timestamp() > _lastFetchedLocator.timestamp())) {
|
if ((loc.phy().size() > 0)&&(loc.timestamp() > _lastFetchedLocator.timestamp())) {
|
||||||
_lastFetchedLocator = loc;
|
_lastFetchedLocator = loc;
|
||||||
return true;
|
return true;
|
||||||
|
@ -177,7 +154,6 @@ public:
|
||||||
inline bool updateLocatorFromTxt(I start,I end)
|
inline bool updateLocatorFromTxt(I start,I end)
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
Mutex::Lock l(_lock);
|
|
||||||
if (_dnsPublicKeySize != ZT_ECC384_PUBLIC_KEY_SIZE)
|
if (_dnsPublicKeySize != ZT_ECC384_PUBLIC_KEY_SIZE)
|
||||||
return false;
|
return false;
|
||||||
Locator loc;
|
Locator loc;
|
||||||
|
@ -193,38 +169,25 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pick random IPv4 and IPv6 addresses for this root
|
* Pick a random physical IP for this root with the given address family
|
||||||
*
|
*
|
||||||
* @param v4 Filled with V4 address or NIL if none found
|
* @param addressFamily AF_INET or AF_INET6
|
||||||
* @param v6 Filled with V6 address or NIL if none found
|
* @return Address or InetAddress::NIL if no addresses exist for the given family
|
||||||
*/
|
*/
|
||||||
inline void pickPhysical(InetAddress &v4,InetAddress &v6) const
|
inline const InetAddress &pickPhysical(const int addressFamily) const
|
||||||
{
|
{
|
||||||
v4.clear();
|
std::vector<const InetAddress *> pickList;
|
||||||
v6.clear();
|
|
||||||
std::vector<const InetAddress *> v4a,v6a;
|
|
||||||
Mutex::Lock l(_lock);
|
|
||||||
const std::vector<InetAddress> *const av = (_lastFetchedLocator) ? &(_lastFetchedLocator.phy()) : &_defaultAddresses;
|
const std::vector<InetAddress> *const av = (_lastFetchedLocator) ? &(_lastFetchedLocator.phy()) : &_defaultAddresses;
|
||||||
for(std::vector<InetAddress>::const_iterator i(av->begin());i!=av->end();++i) {
|
for(std::vector<InetAddress>::const_iterator i(av->begin());i!=av->end();++i) {
|
||||||
switch(i->ss_family) {
|
if (addressFamily == (int)i->ss_family) {
|
||||||
case AF_INET:
|
pickList.push_back(&(*i));
|
||||||
v4a.push_back(&(*i));
|
|
||||||
break;
|
|
||||||
case AF_INET6:
|
|
||||||
v6a.push_back(&(*i));
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (v4a.size() == 1) {
|
if (pickList.size() == 1)
|
||||||
v4 = *v4a[0];
|
return *pickList[0];
|
||||||
} else if (v4a.size() > 1) {
|
else if (pickList.size() > 1)
|
||||||
v4 = *v4a[(unsigned long)Utils::random() % (unsigned long)v4a.size()];
|
return *pickList[(unsigned long)Utils::random() % (unsigned long)pickList.size()];
|
||||||
}
|
return InetAddress::NIL;
|
||||||
if (v6a.size() == 1) {
|
|
||||||
v6 = *v6a[0];
|
|
||||||
} else if (v6a.size() > 1) {
|
|
||||||
v6 = *v6a[(unsigned long)Utils::random() % (unsigned long)v6a.size()];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -234,7 +197,6 @@ private:
|
||||||
Locator _lastFetchedLocator;
|
Locator _lastFetchedLocator;
|
||||||
unsigned int _dnsPublicKeySize;
|
unsigned int _dnsPublicKeySize;
|
||||||
uint8_t _dnsPublicKey[ZT_ECC384_PUBLIC_KEY_SIZE];
|
uint8_t _dnsPublicKey[ZT_ECC384_PUBLIC_KEY_SIZE];
|
||||||
Mutex _lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace ZeroTier
|
} // namespace ZeroTier
|
||||||
|
|
|
@ -470,7 +470,7 @@ void Switch::onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const
|
||||||
while (numBridges < ZT_MAX_BRIDGE_SPAM) {
|
while (numBridges < ZT_MAX_BRIDGE_SPAM) {
|
||||||
if (ab == activeBridges.end())
|
if (ab == activeBridges.end())
|
||||||
ab = activeBridges.begin();
|
ab = activeBridges.begin();
|
||||||
if (((unsigned long)RR->node->prng() % (unsigned long)activeBridges.size()) == 0) {
|
if (((unsigned long)Utils::random() % (unsigned long)activeBridges.size()) == 0) {
|
||||||
bridges[numBridges++] = *ab;
|
bridges[numBridges++] = *ab;
|
||||||
++ab;
|
++ab;
|
||||||
} else ++ab;
|
} else ++ab;
|
||||||
|
@ -519,7 +519,7 @@ void Switch::aqm_enqueue(void *tPtr, const SharedPtr<Network> &network, Packet &
|
||||||
// DEBUG_INFO("skipping, no QoS for this packet, verb=%x", packet.verb());
|
// DEBUG_INFO("skipping, no QoS for this packet, verb=%x", packet.verb());
|
||||||
// just send packet normally, no QoS for ZT protocol traffic
|
// just send packet normally, no QoS for ZT protocol traffic
|
||||||
send(tPtr, packet, encrypt);
|
send(tPtr, packet, encrypt);
|
||||||
}
|
}
|
||||||
|
|
||||||
_aqm_m.lock();
|
_aqm_m.lock();
|
||||||
|
|
||||||
|
@ -527,7 +527,7 @@ void Switch::aqm_enqueue(void *tPtr, const SharedPtr<Network> &network, Packet &
|
||||||
|
|
||||||
const Address dest(packet.destination());
|
const Address dest(packet.destination());
|
||||||
TXQueueEntry *txEntry = new TXQueueEntry(dest,RR->node->now(),packet,encrypt);
|
TXQueueEntry *txEntry = new TXQueueEntry(dest,RR->node->now(),packet,encrypt);
|
||||||
|
|
||||||
ManagedQueue *selectedQueue = nullptr;
|
ManagedQueue *selectedQueue = nullptr;
|
||||||
for (size_t i=0; i<ZT_QOS_NUM_BUCKETS; i++) {
|
for (size_t i=0; i<ZT_QOS_NUM_BUCKETS; i++) {
|
||||||
if (i < nqcb->oldQueues.size()) { // search old queues first (I think this is best since old would imply most recent usage of the queue)
|
if (i < nqcb->oldQueues.size()) { // search old queues first (I think this is best since old would imply most recent usage of the queue)
|
||||||
|
@ -601,7 +601,7 @@ uint64_t Switch::control_law(uint64_t t, int count)
|
||||||
return (uint64_t)(t + ZT_QOS_INTERVAL / sqrt(count));
|
return (uint64_t)(t + ZT_QOS_INTERVAL / sqrt(count));
|
||||||
}
|
}
|
||||||
|
|
||||||
Switch::dqr Switch::dodequeue(ManagedQueue *q, uint64_t now)
|
Switch::dqr Switch::dodequeue(ManagedQueue *q, uint64_t now)
|
||||||
{
|
{
|
||||||
dqr r;
|
dqr r;
|
||||||
r.ok_to_drop = false;
|
r.ok_to_drop = false;
|
||||||
|
|
|
@ -215,13 +215,16 @@ public:
|
||||||
/**
|
/**
|
||||||
* Apply a function or function object to all peers
|
* Apply a function or function object to all peers
|
||||||
*
|
*
|
||||||
|
* This locks the peer map during execution, so calls to get() etc. during
|
||||||
|
* eachPeer() will deadlock.
|
||||||
|
*
|
||||||
* @param f Function to apply
|
* @param f Function to apply
|
||||||
* @tparam F Function or function object type
|
* @tparam F Function or function object type
|
||||||
*/
|
*/
|
||||||
template<typename F>
|
template<typename F>
|
||||||
inline void eachPeer(F f)
|
inline void eachPeer(F f)
|
||||||
{
|
{
|
||||||
Mutex::Lock _l(_peers_m);
|
Mutex::Lock l(_peers_m);
|
||||||
Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
|
Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
|
||||||
Address *a = (Address *)0;
|
Address *a = (Address *)0;
|
||||||
SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
|
SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
|
||||||
|
@ -231,12 +234,51 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return All peers by address (unsorted)
|
* Apply a function or function object to all roots
|
||||||
|
*
|
||||||
|
* Arguments to the function are this topology object, the root in
|
||||||
|
* question, and a pointer to the peer corresponding to it.
|
||||||
|
*
|
||||||
|
* This locks the root list during execution but other operations
|
||||||
|
* are fine.
|
||||||
|
*
|
||||||
|
* @param f Function to apply
|
||||||
|
* @tparam F function or function object type
|
||||||
*/
|
*/
|
||||||
inline std::vector< std::pair< Address,SharedPtr<Peer> > > allPeers() const
|
template<typename F>
|
||||||
|
inline void eachRoot(F f) const
|
||||||
{
|
{
|
||||||
Mutex::Lock _l(_peers_m);
|
Mutex::Lock l(_roots_m);
|
||||||
return _peers.entries();
|
SharedPtr<Peer> rp;
|
||||||
|
for(std::vector<Root>::const_iterator i(_roots.begin());i!=_roots.end();++i) {
|
||||||
|
{
|
||||||
|
SharedPtr::Lock l2(_peers_m);
|
||||||
|
const SharedPtr<Peer> *const ap = _peers.get(i->address());
|
||||||
|
if (ap) {
|
||||||
|
rp = *ap;
|
||||||
|
} else {
|
||||||
|
rp.set(new Peer(RR,_myIdentity,i->id()));
|
||||||
|
_peers.set(rp->address(),rp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f(*this,*i,rp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param allPeers vector to fill with all current peers
|
||||||
|
*/
|
||||||
|
inline void getAllPeers(std::vector< SharedPtr<Peer> > &allPeers) const
|
||||||
|
{
|
||||||
|
Mutex::Lock l(_peers_m);
|
||||||
|
allPeers.clear();
|
||||||
|
allPeers.reserve(_peers.size());
|
||||||
|
Hashtable< Address,SharedPtr<Peer> >::Iterator i(*(const_cast<Hashtable< Address,SharedPtr<Peer> > *>(&_peers)));
|
||||||
|
Address *a = (Address *)0;
|
||||||
|
SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
|
||||||
|
while (i.next(a,p)) {
|
||||||
|
allPeers.push_back(*p);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -315,22 +357,22 @@ public:
|
||||||
std::map<InetAddress,ZT_PhysicalPathConfiguration> cpaths;
|
std::map<InetAddress,ZT_PhysicalPathConfiguration> cpaths;
|
||||||
for(unsigned int i=0,j=_numConfiguredPhysicalPaths;i<j;++i)
|
for(unsigned int i=0,j=_numConfiguredPhysicalPaths;i<j;++i)
|
||||||
cpaths[_physicalPathConfig[i].first] = _physicalPathConfig[i].second;
|
cpaths[_physicalPathConfig[i].first] = _physicalPathConfig[i].second;
|
||||||
|
|
||||||
if (pathConfig) {
|
if (pathConfig) {
|
||||||
ZT_PhysicalPathConfiguration pc(*pathConfig);
|
ZT_PhysicalPathConfiguration pc(*pathConfig);
|
||||||
|
|
||||||
if (pc.mtu <= 0)
|
if (pc.mtu <= 0)
|
||||||
pc.mtu = ZT_DEFAULT_PHYSMTU;
|
pc.mtu = ZT_DEFAULT_PHYSMTU;
|
||||||
else if (pc.mtu < ZT_MIN_PHYSMTU)
|
else if (pc.mtu < ZT_MIN_PHYSMTU)
|
||||||
pc.mtu = ZT_MIN_PHYSMTU;
|
pc.mtu = ZT_MIN_PHYSMTU;
|
||||||
else if (pc.mtu > ZT_MAX_PHYSMTU)
|
else if (pc.mtu > ZT_MAX_PHYSMTU)
|
||||||
pc.mtu = ZT_MAX_PHYSMTU;
|
pc.mtu = ZT_MAX_PHYSMTU;
|
||||||
|
|
||||||
cpaths[*(reinterpret_cast<const InetAddress *>(pathNetwork))] = pc;
|
cpaths[*(reinterpret_cast<const InetAddress *>(pathNetwork))] = pc;
|
||||||
} else {
|
} else {
|
||||||
cpaths.erase(*(reinterpret_cast<const InetAddress *>(pathNetwork)));
|
cpaths.erase(*(reinterpret_cast<const InetAddress *>(pathNetwork)));
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int cnt = 0;
|
unsigned int cnt = 0;
|
||||||
for(std::map<InetAddress,ZT_PhysicalPathConfiguration>::const_iterator i(cpaths.begin());((i!=cpaths.end())&&(cnt<ZT_MAX_CONFIGURABLE_PATHS));++i) {
|
for(std::map<InetAddress,ZT_PhysicalPathConfiguration>::const_iterator i(cpaths.begin());((i!=cpaths.end())&&(cnt<ZT_MAX_CONFIGURABLE_PATHS));++i) {
|
||||||
_physicalPathConfig[cnt].first = i->first;
|
_physicalPathConfig[cnt].first = i->first;
|
||||||
|
|
|
@ -599,7 +599,7 @@ public:
|
||||||
|
|
||||||
// Network controller is now enabled by default for desktop and server
|
// Network controller is now enabled by default for desktop and server
|
||||||
_controller = new EmbeddedNetworkController(_node,_homePath.c_str(),_controllerDbPath.c_str(),_ports[0], _mqc);
|
_controller = new EmbeddedNetworkController(_node,_homePath.c_str(),_controllerDbPath.c_str(),_ports[0], _mqc);
|
||||||
_node->setNetconfMaster((void *)_controller);
|
_node->setController((void *)_controller);
|
||||||
|
|
||||||
// Join existing networks in networks.d
|
// Join existing networks in networks.d
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Reference in a new issue