Clean up timing stuff in Peer and make it more rational, implement some more P2P stuff, rename some methods, cleanup cleanup cleanup.

This commit is contained in:
Adam Ierymenko 2020-04-03 10:18:46 -07:00
parent 3d6d9c5808
commit fbf4ae823b
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
16 changed files with 563 additions and 511 deletions

View file

@ -423,6 +423,11 @@ enum ZT_TraceEventPathAddressType
ZT_TRACE_EVENT_PATH_TYPE_INETADDR_V6 = 6 /* 16-byte IPv6 */
};
/**
* Maximum integer value of enum ZT_TraceEventPathAddressType
*/
#define ZT_TRACE_EVENT_PATH_TYPE__MAX 6
/**
* Reasons for trying new paths
*/

View file

@ -55,8 +55,6 @@ class Capability : public Credential
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_CAPABILITY; }
ZT_INLINE Capability() noexcept { memoryZero(this); } // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
/**

View file

@ -106,8 +106,6 @@ class CertificateOfMembership : public Credential
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_COM; }
/**
* Create an empty certificate of membership
*/

View file

@ -50,8 +50,6 @@ class CertificateOfOwnership : public Credential
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_COO; }
enum Thing
{
THING_NULL = 0,

View file

@ -47,7 +47,7 @@
#define ZT_ENDPOINT_MAX_NAME_SIZE 61
/**
* Size of an identity hash (SHA384)
* Size of an identity hash (SHA384) in bytes
*/
#define ZT_IDENTITY_HASH_SIZE 48
@ -82,7 +82,10 @@
#define ZT_MAX_TIMER_TASK_INTERVAL 1000
/**
* Interval between steps or stages in NAT-t attempts
* Interval between steps or stages in multi-stage NAT traversal operations.
*
* This is for example the interval between initial firewall openers and real packets
* for two-phase IPv4 hole punch.
*/
#define ZT_NAT_TRAVERSAL_INTERVAL 200
@ -97,7 +100,7 @@
* Note that this affects how frequently we re-request network configurations
* from network controllers if we haven't received one yet.
*/
#define ZT_NETWORK_HOUSEKEEPING_PERIOD 15000
#define ZT_NETWORK_HOUSEKEEPING_PERIOD 30000
/**
* Delay between WHOIS retries in ms
@ -122,19 +125,14 @@
#define ZT_PATH_ALIVE_TIMEOUT ((ZT_PATH_KEEPALIVE_PERIOD * 2) + 5000)
/**
* Delay between full HELLO messages between peers
* Delay between calls to the pulse() method in Peer for each peer
*/
#define ZT_PEER_PING_PERIOD 60000
#define ZT_PEER_PULSE_INTERVAL ZT_PATH_KEEPALIVE_PERIOD
/**
* Timeout for peer alive-ness (measured from last receive)
* Minimum interval between HELLOs to peers.
*/
#define ZT_PEER_ALIVE_TIMEOUT ((ZT_PEER_PING_PERIOD * 2) + 5000)
/**
* Timeout for peer active-ness (measured from last receive)
*/
#define ZT_PEER_ACTIVITY_TIMEOUT (ZT_PEER_PING_PERIOD + 5000)
#define ZT_PEER_HELLO_INTERVAL 120000LL
/**
* Global timeout for peers in milliseconds
@ -175,14 +173,9 @@
#define ZT_MAX_BRIDGE_SPAM 32
/**
* Interval between direct path pushes in milliseconds if we don't have a path
* Interval between attempts to make a direct connection if one does not exist
*/
#define ZT_DIRECT_PATH_PUSH_INTERVAL 30000
/**
* Interval between direct path pushes in milliseconds if we already have a path
*/
#define ZT_DIRECT_PATH_PUSH_INTERVAL_HAVEPATH 120000
#define ZT_DIRECT_CONNECT_ATTEMPT_INTERVAL 30000
/**
* Maximum number of paths per IP scope (e.g. global, link-local) and family (e.g. v4/v6)

View file

@ -54,8 +54,8 @@ void identityV0ProofOfWorkFrankenhash(const void *const publicKey,unsigned int p
// Render final digest using genmem as a lookup table
for(unsigned long i=0;i<(ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t));) {
unsigned long idx1 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (64 / sizeof(uint64_t)));
unsigned long idx2 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t)));
unsigned long idx1 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (64 / sizeof(uint64_t))); // NOLINT(hicpp-use-auto,modernize-use-auto)
unsigned long idx2 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t))); // NOLINT(hicpp-use-auto,modernize-use-auto)
uint64_t tmp = ((uint64_t *)genmem)[idx2];
((uint64_t *)genmem)[idx2] = ((uint64_t *)digest)[idx1];
((uint64_t *)digest)[idx1] = tmp;
@ -78,12 +78,12 @@ struct identityV0ProofOfWorkCriteria
// It's not quite as intensive as the V0 frankenhash, is a little more orderly in
// its design, but remains relatively resistant to GPU acceleration due to memory
// requirements for efficient computation.
bool identityV1ProofOfWorkCriteria(const void *in,const unsigned int len)
#define ZT_IDENTITY_V1_POW_MEMORY_SIZE 98304
bool identityV1ProofOfWorkCriteria(const void *in,const unsigned int len,uint64_t *const b)
{
uint64_t b[98304]; // 768 KiB of working memory
SHA512(b,in,len);
// This treats hash output as little-endian, so swap on BE machines.
#if __BYTE_ORDER == __BIG_ENDIAN
b[0] = Utils::swapBytes(b[0]);
b[1] = Utils::swapBytes(b[1]);
@ -102,7 +102,8 @@ bool identityV1ProofOfWorkCriteria(const void *in,const unsigned int len)
// least that this is the most efficient implementation.
Speck128<24> s16;
s16.initXY(b[4],b[5]);
for(unsigned long i=0;i<(98304-8);) {
for(unsigned long i=0;i<(ZT_IDENTITY_V1_POW_MEMORY_SIZE-8);) {
// Load four 128-bit blocks.
uint64_t x0 = b[i];
uint64_t y0 = b[i + 1];
uint64_t x1 = b[i + 2];
@ -111,12 +112,22 @@ bool identityV1ProofOfWorkCriteria(const void *in,const unsigned int len)
uint64_t y2 = b[i + 5];
uint64_t x3 = b[i + 6];
uint64_t y3 = b[i + 7];
// Advance by 512 bits / 64 bytes (its a uint64_t array).
i += 8;
x0 += x1; // mix parallel 128-bit blocks
// Ensure that mixing happens across blocks.
x0 += x1;
x1 += x2;
x2 += x3;
x3 += y0;
// Encrypt 4X blocks. Speck is used for this PoW function because
// its performance is similar on all architectures while AES is much
// faster on some than others.
s16.encryptXYXYXYXY(x0,y0,x1,y1,x2,y2,x3,y3);
// Store four 128-bit blocks at new position.
b[i] = x0;
b[i + 1] = y0;
b[i + 2] = x1;
@ -126,8 +137,13 @@ bool identityV1ProofOfWorkCriteria(const void *in,const unsigned int len)
b[i + 6] = x3;
b[i + 7] = y3;
}
std::sort(b,b + 98304);
// Sort array, something that can't efficiently be done unless we have
// computed the whole array and have it in memory. This also involves
// branching which is less efficient on GPUs.
std::sort(b,b + ZT_IDENTITY_V1_POW_MEMORY_SIZE);
// Swap byte order back on BE machines.
#if __BYTE_ORDER == __BIG_ENDIAN
for(unsigned int i=0;i<98304;i+=8) {
b[i] = Utils::swapBytes(b[i]);
@ -141,12 +157,11 @@ bool identityV1ProofOfWorkCriteria(const void *in,const unsigned int len)
}
#endif
// Hash resulting sorted array to get final result for PoW criteria test.
SHA384(b,b,sizeof(b),in,len);
// Criterion: add two 64-bit components of poly1305 hash, must be zero mod 180.
// As with the rest of this bits are used in little-endian byte order. The value
// of 180 was set empirically to result in about one second per new identity on
// one CPU core of a typical desktop or server in 2020.
// PoW passes if sum of first two 64-bit integers (treated as little-endian) mod 180 is 0.
// This value was picked to yield about 1-2s total on typical desktop and server cores in 2020.
#if __BYTE_ORDER == __BIG_ENDIAN
const uint64_t finalHash = Utils::swapBytes(b[0]) + Utils::swapBytes(b[1]);
#else
@ -179,6 +194,9 @@ bool Identity::generate(const Type t)
} break;
case P384: {
uint64_t *const b = (uint64_t *)malloc(ZT_IDENTITY_V1_POW_MEMORY_SIZE * 8); // NOLINT(hicpp-use-auto,modernize-use-auto)
if (!b)
return false;
for(;;) {
// Loop until we pass the PoW criteria. The nonce is only 8 bits, so generate
// some new key material every time it wraps. The ECC384 generator is slightly
@ -187,7 +205,7 @@ bool Identity::generate(const Type t)
C25519::generate(_pub.c25519,_priv.c25519);
ECC384GenerateKey(_pub.p384,_priv.p384);
for(;;) {
if (identityV1ProofOfWorkCriteria(&_pub,sizeof(_pub)))
if (identityV1ProofOfWorkCriteria(&_pub,sizeof(_pub),b))
break;
if (++_pub.nonce == 0)
ECC384GenerateKey(_pub.p384,_priv.p384);
@ -200,6 +218,7 @@ bool Identity::generate(const Type t)
if (!_address.isReserved())
break;
}
free(b);
} break;
default:
@ -223,8 +242,16 @@ bool Identity::locallyValidate() const noexcept
return ((_address == Address(digest + 59)) && (digest[0] < 17));
}
case P384:
return ((_address == Address(_fp.hash())) && identityV1ProofOfWorkCriteria(&_pub,sizeof(_pub)) );
case P384: {
if (_address != Address(_fp.hash()))
return false;
uint64_t *const b = (uint64_t *)malloc(ZT_IDENTITY_V1_POW_MEMORY_SIZE * 8); // NOLINT(hicpp-use-auto,modernize-use-auto)
if (!b)
return false;
const bool ok = identityV1ProofOfWorkCriteria(&_pub,sizeof(_pub),b);
free(b);
return ok;
}
}
}

View file

@ -77,19 +77,19 @@ struct _sortPeerPtrsByAddress
Node::Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64_t now) :
_RR(this),
_objects(nullptr),
RR(&_RR),
_objects(nullptr),
_cb(*callbacks),
_uPtr(uPtr),
_networks(),
_now(now),
_lastPing(0),
_lastPeerPulse(0),
_lastHousekeepingRun(0),
_lastNetworkHousekeepingRun(0),
_lastPathKeepaliveCheck(0),
_now(now),
_natMustDie(true),
_online(false)
{
// Load this node's identity.
uint64_t idtmp[2]; idtmp[0] = 0; idtmp[1] = 0;
std::vector<uint8_t> data(stateObjectGet(tPtr,ZT_STATE_OBJECT_IDENTITY_SECRET,idtmp));
bool haveIdentity = false;
@ -102,6 +102,7 @@ Node::Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64
}
}
// Generate a new identity if we don't have one.
if (!haveIdentity) {
RR->identity.generate(Identity::C25519);
RR->identity.toString(false,RR->publicIdentityStr);
@ -190,7 +191,7 @@ ZT_ResultCode Node::processVirtualNetworkFrame(
struct _processBackgroundTasks_eachPeer
{
ZT_INLINE _processBackgroundTasks_eachPeer(const int64_t now_,Node *const parent_,void *const tPtr_) :
ZT_INLINE _processBackgroundTasks_eachPeer(const int64_t now_,Node *const parent_,void *const tPtr_) noexcept :
now(now_),
parent(parent_),
tPtr(tPtr_),
@ -200,86 +201,70 @@ struct _processBackgroundTasks_eachPeer
Node *const parent;
void *const tPtr;
bool online;
std::vector<Address> rootsNotOnline;
ZT_INLINE void operator()(const SharedPtr<Peer> &peer,const bool isRoot)
std::vector< SharedPtr<Peer> > rootsNotOnline;
ZT_INLINE void operator()(const SharedPtr<Peer> &peer,const bool isRoot) noexcept
{
peer->ping(tPtr,now,isRoot);
peer->pulse(tPtr,now,isRoot);
if (isRoot) {
if (peer->active(now)) {
if (peer->directlyConnected(now)) {
online = true;
} else {
rootsNotOnline.push_back(peer->address());
rootsNotOnline.push_back(peer);
}
}
}
};
struct _processBackgroundTasks_eachPath
{
ZT_INLINE _processBackgroundTasks_eachPath(const int64_t now_,const RuntimeEnvironment *const RR_,void *const tPtr_) :
now(now_),
RR(RR_),
tPtr(tPtr_),
keepAlivePayload((uint8_t)now_) {}
const int64_t now;
const RuntimeEnvironment *const RR;
void *const tPtr;
uint8_t keepAlivePayload;
ZT_INLINE void operator()(const SharedPtr<Path> &path)
{
if ((now - path->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
++keepAlivePayload;
path->send(RR,tPtr,&keepAlivePayload,sizeof(keepAlivePayload),now);
}
}
};
ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int64_t *nextBackgroundTaskDeadline)
ZT_ResultCode Node::processBackgroundTasks(void *tPtr,int64_t now,volatile int64_t *nextBackgroundTaskDeadline)
{
_now = now;
Mutex::Lock bl(_backgroundTasksLock);
if ((now - _lastPing) >= ZT_PEER_PING_PERIOD) {
_lastPing = now;
try {
_processBackgroundTasks_eachPeer pf(now,this,tPtr);
RR->topology->eachPeerWithRoot<_processBackgroundTasks_eachPeer &>(pf);
try {
// Call peer pulse() method of all peers every ZT_PEER_PULSE_INTERVAL.
if ((now - _lastPeerPulse) >= ZT_PEER_PULSE_INTERVAL) {
_lastPeerPulse = now;
try {
_processBackgroundTasks_eachPeer pf(now,this,tPtr);
RR->topology->eachPeerWithRoot<_processBackgroundTasks_eachPeer &>(pf);
if (pf.online != _online) {
_online = pf.online;
postEvent(tPtr, _online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
if (pf.online != _online) {
_online = pf.online;
postEvent(tPtr, _online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
}
RR->topology->rankRoots(now);
if (pf.online) {
// If we have at least one online root, request whois for roots not online.
// This will give us updated locators for these roots which may contain new
// IP addresses. It will also auto-discover IPs for roots that were not added
// with an initial bootstrap address.
// TODO
//for (std::vector<Address>::const_iterator r(pf.rootsNotOnline.begin()); r != pf.rootsNotOnline.end(); ++r)
// RR->sw->requestWhois(tPtr,now,*r);
}
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
RR->topology->rankRoots(now);
if (pf.online) {
// If we have at least one online root, request whois for roots not online.
// This will give us updated locators for these roots which may contain new
// IP addresses. It will also auto-discover IPs for roots that were not added
// with an initial bootstrap address.
// TODO
//for (std::vector<Address>::const_iterator r(pf.rootsNotOnline.begin()); r != pf.rootsNotOnline.end(); ++r)
// RR->sw->requestWhois(tPtr,now,*r);
}
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
if ((now - _lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
_lastHousekeepingRun = now;
{
RWMutex::RLock l(_networks_m);
for(Map< uint64_t,SharedPtr<Network> >::const_iterator i(_networks.begin());i!=_networks.end();++i)
i->second->doPeriodicTasks(tPtr,now);
}
}
if ((now - _lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
_lastHousekeepingRun = now;
try {
// Clean up any old local controller auth memoizations. This is an
// optimization for network controllers to know whether to accept
// or trust nodes without doing an extra cert check.
// Perform network housekeeping and possibly request new certs and configs every ZT_NETWORK_HOUSEKEEPING_PERIOD.
if ((now - _lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
_lastHousekeepingRun = now;
{
RWMutex::RLock l(_networks_m);
for(Map< uint64_t,SharedPtr<Network> >::const_iterator i(_networks.begin());i!=_networks.end();++i)
i->second->doPeriodicTasks(tPtr,now);
}
}
// Clean up other stuff every ZT_HOUSEKEEPING_PERIOD.
if ((now - _lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
_lastHousekeepingRun = now;
try {
// Clean up any old local controller auth memoizations. This is an
// optimization for network controllers to know whether to accept
// or trust nodes without doing an extra cert check.
_localControllerAuthorizations_m.lock();
for(Map<_LocalControllerAuth,int64_t>::iterator i(_localControllerAuthorizations.begin());i!=_localControllerAuthorizations.end();) { // NOLINT(hicpp-use-auto,modernize-use-auto)
if ((i->second - now) > (ZT_NETWORK_AUTOCONF_DELAY * 3))
@ -287,45 +272,38 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
else ++i;
}
_localControllerAuthorizations_m.unlock();
}
RR->topology->doPeriodicTasks(tPtr, now);
RR->sa->clean(now);
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
if ((now - _lastPathKeepaliveCheck) >= ZT_PATH_KEEPALIVE_PERIOD) {
_lastPathKeepaliveCheck = now;
_processBackgroundTasks_eachPath pf(now,RR,tPtr);
RR->topology->eachPath<_processBackgroundTasks_eachPath &>(pf);
}
int64_t earliestAlarmAt = 0x7fffffffffffffffLL;
std::vector<Address> bzzt;
{
RWMutex::RMaybeWLock l(_peerAlarms_l);
for(std::map<Address,int64_t>::iterator a(_peerAlarms.begin());a!=_peerAlarms.end();) { // NOLINT(hicpp-use-auto,modernize-use-auto)
if (now >= a->second) {
bzzt.push_back(a->first);
l.writing();
_peerAlarms.erase(a++);
} else {
if (a->second < earliestAlarmAt)
earliestAlarmAt = a->second;
++a;
RR->topology->doPeriodicTasks(tPtr, now);
RR->sa->clean(now);
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
}
for(std::vector<Address>::iterator a(bzzt.begin());a!=bzzt.end();++a) { // NOLINT(hicpp-use-auto,modernize-use-auto,modernize-loop-convert)
const SharedPtr<Peer> p(RR->topology->peer(tPtr,*a,false));
if (p)
p->alarm(tPtr,now);
}
try {
*nextBackgroundTaskDeadline = std::min(earliestAlarmAt,now + ZT_MAX_TIMER_TASK_INTERVAL);
// Set off any due or overdue peer alarms.
int64_t earliestAlarmAt = now + ZT_MAX_TIMER_TASK_INTERVAL;
std::vector<Fingerprint> bzzt;
{
Mutex::Lock l(_peerAlarms_l);
for(std::map<Fingerprint,int64_t>::iterator a(_peerAlarms.begin());a!=_peerAlarms.end();) { // NOLINT(hicpp-use-auto,modernize-use-auto)
if (now >= a->second) {
bzzt.push_back(a->first);
_peerAlarms.erase(a++);
} else {
if (a->second < earliestAlarmAt)
earliestAlarmAt = a->second;
++a;
}
}
}
for(std::vector<Fingerprint>::iterator a(bzzt.begin());a!=bzzt.end();++a) { // NOLINT(hicpp-use-auto,modernize-use-auto,modernize-loop-convert)
const SharedPtr<Peer> p(RR->topology->peer(tPtr,a->address(),false));
if ((p)&&(p->identity().fingerprint() == *a))
p->alarm(tPtr,now);
}
// Tell caller when to call this method next.
*nextBackgroundTaskDeadline = earliestAlarmAt;
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
@ -365,11 +343,12 @@ ZT_ResultCode Node::leave(uint64_t nwid,void **uptr,void *tptr)
if (uptr)
*uptr = *nw->userPtr();
nw->externalConfig(&ctmp);
nw->destroy();
nw.zero();
RR->node->configureVirtualNetworkPort(tptr,nwid,uptr,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY,&ctmp);
nw->destroy();
nw.zero();
uint64_t tmp[2];
tmp[0] = nwid; tmp[1] = 0;
RR->node->stateObjectDelete(tptr,ZT_STATE_OBJECT_NETWORK_CONFIG,tmp);
@ -379,7 +358,7 @@ ZT_ResultCode Node::leave(uint64_t nwid,void **uptr,void *tptr)
ZT_ResultCode Node::multicastSubscribe(void *tPtr,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
{
SharedPtr<Network> nw(this->network(nwid));
const SharedPtr<Network> nw(this->network(nwid));
if (nw) {
nw->multicastSubscribe(tPtr,MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
return ZT_RESULT_OK;
@ -388,7 +367,7 @@ ZT_ResultCode Node::multicastSubscribe(void *tPtr,uint64_t nwid,uint64_t multica
ZT_ResultCode Node::multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
{
SharedPtr<Network> nw(this->network(nwid));
const SharedPtr<Network> nw(this->network(nwid));
if (nw) {
nw->multicastUnsubscribe(MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
return ZT_RESULT_OK;

View file

@ -60,7 +60,7 @@ public:
*/
void shutdown(void *tPtr);
// Get rid of alignment warnings on 32-bit Windows and possibly improve performance
// Get rid of alignment warnings on 32-bit Windows
#ifdef __WINDOWS__
void * operator new(size_t i) { return _mm_malloc(i,16); }
void operator delete(void* p) { _mm_free(p); }
@ -300,17 +300,15 @@ public:
ZT_INLINE bool natMustDie() const noexcept { return _natMustDie; }
/**
* Wake any peers with the given address by calling their alarm() methods at or after the specified time
* Wake peer by calling its alarm() method at or after a given time.
*
* @param peerAddress Peer address
* @param peer Identity fingerprint of peer to wake
* @param triggerTime Time alarm should go off
*/
ZT_INLINE void setPeerAlarm(const Address &peerAddress,const int64_t triggerTime)
ZT_INLINE void setPeerAlarm(const Fingerprint &peer,const int64_t triggerTime)
{
RWMutex::Lock l(_peerAlarms_l);
int64_t &t = _peerAlarms[peerAddress];
if ((t <= 0)||(t > triggerTime))
t = triggerTime;
Mutex::Lock l(_peerAlarms_l);
_peerAlarms[peer] = triggerTime;
}
/**
@ -335,25 +333,30 @@ public:
private:
RuntimeEnvironment _RR;
void *_objects;
RuntimeEnvironment *RR;
ZT_Node_Callbacks _cb;
void *_uPtr; // _uptr (lower case) is reserved in Visual Studio :P
RuntimeEnvironment *const RR;
// Addresses of peers that want to have their alarm() function called at some point in the future.
// Pointer to a struct defined in Node that holds instances of core objects.
void *_objects;
// Function pointers to C callbacks supplied via the API.
ZT_Node_Callbacks _cb;
// A user-specified opaque pointer passed back via API callbacks.
void *_uPtr;
// Fingerprints of peers that want to have their alarm() function called at some point in the future.
// These behave like weak references in that the node looks them up in Topology and calls alarm()
// in each peer if that peer object is still held in memory. Calling alarm() unnecessarily on a peer
// is harmless. This just exists as an optimization to prevent having to iterate through all peers
// on every processBackgroundTasks call. A simple map<> is used here because there are usually only
// a few of these, if any.
std::map<Address,int64_t> _peerAlarms;
RWMutex _peerAlarms_l;
std::map<Fingerprint,int64_t> _peerAlarms;
Mutex _peerAlarms_l;
// Map that remembers if we have recently sent a network config to someone
// querying us as a controller. This is an optimization to allow network
// controllers to know whether to treat things like multicast queries the
// way authorized members would be treated without requiring an extra cert
// validation.
// Cache that remembers whether or not the locally running network controller (if any) has authorized
// someone on their most recent query. This is used by the network controller as a memoization optimization
// to elide unnecessary signature verifications. It might get moved in the future since this is sort of a
// weird place to put it.
struct _LocalControllerAuth
{
uint64_t nwid,address;
@ -366,8 +369,7 @@ private:
Map<_LocalControllerAuth,int64_t> _localControllerAuthorizations;
Mutex _localControllerAuthorizations_m;
// Networks are stored in a flat hash table that is resized on any network ID collision. This makes
// network lookup by network ID a few bitwise ops and an array index.
// Locally joined networks by network ID.
Map< uint64_t,SharedPtr<Network> > _networks;
RWMutex _networks_m;
@ -376,16 +378,22 @@ private:
std::vector< ZT_InterfaceAddress > _localInterfaceAddresses;
Mutex _localInterfaceAddresses_m;
// This is locked while running processBackgroundTasks to ensure that calls to it are not concurrent.
// This is locked while running processBackgroundTasks().
Mutex _backgroundTasksLock;
volatile int64_t _now;
volatile int64_t _lastPing;
volatile int64_t _lastHousekeepingRun;
volatile int64_t _lastNetworkHousekeepingRun;
volatile int64_t _lastPathKeepaliveCheck;
volatile bool _natMustDie;
volatile bool _online;
// These are locked via _backgroundTasksLock as they're only checked and modified in processBackgroundTasks().
int64_t _lastPeerPulse;
int64_t _lastHousekeepingRun;
int64_t _lastNetworkHousekeepingRun;
// This is the most recent value for time passed in via any of the core API methods.
std::atomic<int64_t> _now;
// True if we are to use really intensive NAT-busting measures.
std::atomic<bool> _natMustDie;
// True if at least one root appears reachable.
std::atomic<bool> _online;
};
} // namespace ZeroTier

View file

@ -22,37 +22,34 @@
#include "Protocol.hpp"
#include "Endpoint.hpp"
#include <set>
namespace ZeroTier {
struct _PathPriorityComparisonOperator
{
ZT_INLINE bool operator()(const SharedPtr<Path> &a,const SharedPtr<Path> &b) const
{
return ( ((a)&&(a->lastIn() > 0)) && ((!b)||(b->lastIn() <= 0)||(a->lastIn() < b->lastIn())) );
}
};
Peer::Peer(const RuntimeEnvironment *renv) :
Peer::Peer(const RuntimeEnvironment *renv) : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
RR(renv),
_lastReceive(0),
_lastSend(0),
_lastSentHello(),
_lastWhoisRequestReceived(0),
_lastEchoRequestReceived(0),
_lastPushDirectPathsReceived(0),
_lastProbeReceived(0),
_lastAttemptedP2PInit(0),
_lastTriedStaticPath(0),
_lastPrioritizedPaths(0),
_lastAttemptedAggressiveNATTraversal(0),
_latency(0xffff),
_latency(-1),
_alivePathCount(0),
_vProto(0),
_vMajor(0),
_vMinor(0),
_vRevision(0)
{
Utils::memoryLock(_key,sizeof(_key));
Utils::memoryLock(_identityKey,sizeof(_identityKey));
}
Peer::~Peer()
{
Utils::memoryUnlock(_identityKey,sizeof(_identityKey));
Utils::burn(_identityKey,sizeof(_identityKey));
}
bool Peer::init(const Identity &peerIdentity)
@ -61,9 +58,9 @@ bool Peer::init(const Identity &peerIdentity)
if (_id == peerIdentity)
return true;
_id = peerIdentity;
if (!RR->identity.agree(peerIdentity,_key))
if (!RR->identity.agree(peerIdentity,_identityKey))
return false;
_incomingProbe = Protocol::createProbe(_id,RR->identity,_key);
_incomingProbe = Protocol::createProbe(_id,RR->identity,_identityKey);
return true;
}
@ -77,138 +74,117 @@ void Peer::received(
const Protocol::Verb inReVerb)
{
const int64_t now = RR->node->now();
_lastReceive = now;
_inMeter.log(now,payloadLength);
if (hops == 0) {
_lock.rlock();
for(int i=0;i<(int)_alivePathCount;++i) {
RWMutex::RMaybeWLock l(_lock);
// If this matches an existing path, skip path learning stuff.
for (unsigned int i=0;i<_alivePathCount;++i) {
if (_paths[i] == path) {
_lock.runlock();
goto path_check_done;
return;
}
}
_lock.runlock();
if (verb == Protocol::VERB_OK) {
RWMutex::Lock l(_lock);
// If we made it here, we don't already know this path.
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,path->localSocket(),path->address())) {
if (verb == Protocol::VERB_OK) {
l.writing();
int64_t lastReceiveTimeMax = 0;
int lastReceiveTimeMaxAt = 0;
for(int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if ((_paths[i]->address().family() == path->address().family()) &&
(_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
(_paths[i]->address().ipsEqual2(path->address()))) {
// Replace older path if everything is the same except the port number.
_paths[i] = path;
goto path_check_done;
} else {
if (_paths[i]) {
if (_paths[i]->lastIn() > lastReceiveTimeMax) {
// If the path list is full, replace the least recently active path.
unsigned int newPathIdx = 0;
if (_alivePathCount >= ZT_MAX_PEER_NETWORK_PATHS) {
int64_t lastReceiveTimeMax = 0;
for (unsigned int i=0;i<_alivePathCount;++i) {
if ((_paths[i]->address().family() == path->address().family()) &&
(_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
(_paths[i]->address().ipsEqual2(path->address()))) {
// Replace older path if everything is the same except the port number, since NAT/firewall reboots
// and other wacky stuff can change port number assignments.
_paths[i] = path;
return;
} else if (_paths[i]->lastIn() > lastReceiveTimeMax) {
lastReceiveTimeMax = _paths[i]->lastIn();
lastReceiveTimeMaxAt = i;
newPathIdx = i;
}
} else {
lastReceiveTimeMax = 0x7fffffffffffffffLL;
lastReceiveTimeMaxAt = i;
}
} else {
newPathIdx = _alivePathCount++;
}
}
_lastPrioritizedPaths = now;
InetAddress old;
if (_paths[lastReceiveTimeMaxAt])
old = _paths[lastReceiveTimeMaxAt]->address();
_paths[lastReceiveTimeMaxAt] = path;
_bootstrap = Endpoint(path->address());
_prioritizePaths(now);
RR->t->learnedNewPath(tPtr,0x582fabdd,packetId,_id,path->address(),old);
} else {
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,path->localSocket(),path->address())) {
InetAddress old;
if (_paths[newPathIdx])
old = _paths[newPathIdx]->address();
_paths[newPathIdx] = path;
_prioritizePaths(now);
Endpoint pathEndpoint(path->address());
_bootstrap[pathEndpoint.type()] = pathEndpoint;
RR->t->learnedNewPath(tPtr,0x582fabdd,packetId,_id,path->address(),old);
} else {
path->sent(now,hello(tPtr,path->localSocket(),path->address(),now));
RR->t->tryingNewPath(tPtr,0xb7747ddd,_id,path->address(),path->address(),packetId,(uint8_t)verb,_id,ZT_TRACE_TRYING_NEW_PATH_REASON_PACKET_RECEIVED_FROM_UNKNOWN_PATH);
path->sent(now,sendHELLO(tPtr,path->localSocket(),path->address(),now));
}
}
}
path_check_done:
if ((now - _lastAttemptedP2PInit) >= ((hops == 0) ? ZT_DIRECT_PATH_PUSH_INTERVAL_HAVEPATH : ZT_DIRECT_PATH_PUSH_INTERVAL)) {
} else if ((now - _lastAttemptedP2PInit) >= ZT_DIRECT_CONNECT_ATTEMPT_INTERVAL) {
_lastAttemptedP2PInit = now;
InetAddress addr;
if ((_bootstrap.type() == Endpoint::TYPE_INETADDR_V4)||(_bootstrap.type() == Endpoint::TYPE_INETADDR_V6)) {
RR->t->tryingNewPath(tPtr,0x0a009444,_id,_bootstrap.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_BOOTSTRAP_ADDRESS);
sendHELLO(tPtr,-1,_bootstrap.inetAddr(),now);
} if (RR->node->externalPathLookup(tPtr,_id,-1,addr)) {
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,addr)) {
RR->t->tryingNewPath(tPtr,0x84a10000,_id,_bootstrap.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_EXPLICITLY_SUGGESTED_ADDRESS);
sendHELLO(tPtr,-1,addr,now);
}
}
std::vector<ZT_InterfaceAddress> localInterfaceAddresses(RR->node->localInterfaceAddresses());
std::multimap<unsigned long,InetAddress> detectedAddresses(RR->sa->externalAddresses(now));
std::set<InetAddress> addrs;
// Addresses assigned to local system interfaces (as configured via the API).
std::vector<ZT_InterfaceAddress> localInterfaceAddresses(RR->node->localInterfaceAddresses());
for(std::vector<ZT_InterfaceAddress>::const_iterator i(localInterfaceAddresses.begin());i!=localInterfaceAddresses.end();++i)
addrs.insert(asInetAddress(i->address));
// We also advertise IPs reported to us by our peers in OK(HELLO) replies.
std::multimap<unsigned long,InetAddress> detectedAddresses(RR->sa->externalAddresses(now));
for(std::multimap<unsigned long,InetAddress>::const_reverse_iterator i(detectedAddresses.rbegin());i!=detectedAddresses.rend();++i) {
if (i->first <= 1)
break;
if (addrs.count(i->second) == 0) {
addrs.insert(i->second);
break;
}
if (i->first <= 1)
break;
}
if (!addrs.empty()) {
#if 0
ScopedPtr<Packet> outp(new Packet(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS));
outp->addSize(2); // leave room for count
unsigned int count = 0;
for(std::set<InetAddress>::iterator a(addrs.begin());a!=addrs.end();++a) {
uint8_t addressType = 4;
uint8_t addressLength = 6;
unsigned int ipLength = 4;
const void *rawIpData = nullptr;
uint16_t port = 0;
switch(a->ss_family) {
case AF_INET:
rawIpData = &(reinterpret_cast<const sockaddr_in *>(&(*a))->sin_addr.s_addr);
port = Utils::ntoh((uint16_t)reinterpret_cast<const sockaddr_in *>(&(*a))->sin_port);
break;
case AF_INET6:
rawIpData = reinterpret_cast<const sockaddr_in6 *>(&(*a))->sin6_addr.s6_addr;
port = Utils::ntoh((uint16_t)reinterpret_cast<const sockaddr_in6 *>(&(*a))->sin6_port);
addressType = 6;
addressLength = 18;
ipLength = 16;
break;
default:
continue;
}
outp->append((uint8_t)0); // no flags
outp->append((uint16_t)0); // no extensions
outp->append(addressType);
outp->append(addressLength);
outp->append(rawIpData,ipLength);
outp->append(port);
++count;
if (outp->size() >= (ZT_PROTO_MAX_PACKET_LENGTH - 32))
break;
}
if (count > 0) {
outp->setAt(ZT_PACKET_IDX_PAYLOAD,(uint16_t)count);
outp->compress();
outp->armor(_key,true);
path->send(RR,tPtr,outp->data(),outp->size(),now);
}
#endif
// TODO
}
}
}
unsigned int Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
void Peer::send(void *const tPtr,const int64_t now,const void *const data,const unsigned int len,const SharedPtr<Path> &via) noexcept
{
via->send(RR,tPtr,data,len,now);
sent(now,len);
}
void Peer::send(void *const tPtr,const int64_t now,const void *const data,const unsigned int len) noexcept
{
SharedPtr<Path> via(this->path(now));
if (via) {
via->send(RR,tPtr,data,len,now);
} else {
const SharedPtr<Peer> root(RR->topology->root());
if ((root)&&(root.ptr() != this)) {
via = root->path(now);
if (via) {
via->send(RR,tPtr,data,len,now);
root->relayed(now,len);
} else {
return;
}
} else {
return;
}
}
sent(now,len);
}
unsigned int Peer::hello(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now)
{
#if 0
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_HELLO);
@ -232,7 +208,7 @@ unsigned int Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddr
#endif
}
unsigned int Peer::sendNOP(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
unsigned int Peer::sendNOP(void *const tPtr,const int64_t localSocket,const InetAddress &atAddress,const int64_t now)
{
Buf outp;
Protocol::Header &ph = outp.as<Protocol::Header>(); // NOLINT(hicpp-use-auto,modernize-use-auto)
@ -241,36 +217,73 @@ unsigned int Peer::sendNOP(void *tPtr,const int64_t localSocket,const InetAddres
RR->identity.address().copyTo(ph.source);
ph.flags = 0;
ph.verb = Protocol::VERB_NOP;
Protocol::armor(outp,sizeof(Protocol::Header),_key,this->cipher());
Protocol::armor(outp,sizeof(Protocol::Header),_identityKey,this->cipher());
RR->node->putPacket(tPtr,localSocket,atAddress,outp.unsafeData,sizeof(Protocol::Header));
return sizeof(Protocol::Header);
}
void Peer::ping(void *tPtr,int64_t now,const bool pingAllAddressTypes)
void Peer::pulse(void *const tPtr,const int64_t now,const bool isRoot)
{
RWMutex::RLock l(_lock);
RWMutex::Lock l(_lock);
_lastPrioritizedPaths = now;
_prioritizePaths(now);
if (_alivePathCount > 0) {
for (unsigned int i = 0; i < _alivePathCount; ++i) {
_paths[i]->sent(now,sendHELLO(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now));
if (!pingAllAddressTypes)
return;
}
return;
bool needHello = false;
if ((now - _lastSentHello) >= ZT_PEER_HELLO_INTERVAL) {
_lastSentHello = now;
needHello = true;
}
if ((_bootstrap.type() == Endpoint::TYPE_INETADDR_V4)||(_bootstrap.type() == Endpoint::TYPE_INETADDR_V6))
sendHELLO(tPtr,-1,_bootstrap.inetAddr(),now);
_prioritizePaths(now);
SharedPtr<Peer> r(RR->topology->root());
if ((r)&&(r.ptr() != this)) {
SharedPtr<Path> rp(r->path(now));
if (rp) {
rp->sent(now,sendHELLO(tPtr,rp->localSocket(),rp->address(),now));
for(unsigned int i=0;i<_alivePathCount;++i) {
if (needHello) {
needHello = false;
const unsigned int bytes = hello(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now);
_paths[i]->sent(now,bytes);
sent(now,bytes);
} else if ((now - _paths[i]->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
_paths[i]->send(RR,tPtr,&now,1,now);
sent(now,1);
}
// TODO: when we merge multipath we'll keep one open per interface to non-roots.
// For roots we try to keep every path open.
if (!isRoot)
return;
}
if (needHello) {
// Try any statically configured addresses.
InetAddress addr;
if (RR->node->externalPathLookup(tPtr,_id,-1,addr)) {
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,addr)) {
RR->t->tryingNewPath(tPtr,0x84a10000,_id,addr,InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_EXPLICITLY_SUGGESTED_ADDRESS);
hello(tPtr,-1,addr,now);
}
}
if (!_bootstrap.empty()) {
if (isRoot) {
// Try all bootstrap addresses if this is a root.
for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) {
if ( ((i->first == Endpoint::TYPE_INETADDR_V4)||(i->first == Endpoint::TYPE_INETADDR_V6)) && (!i->second.inetAddr().ipsEqual(addr)) ) {
RR->t->tryingNewPath(tPtr,0x0a009444,_id,i->second.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_BOOTSTRAP_ADDRESS);
hello(tPtr,-1,i->second.inetAddr(),now);
}
}
} else {
// Otherwise try a random bootstrap address.
unsigned int tryAtIndex = (unsigned int)Utils::random() % (unsigned int)_bootstrap.size();
for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) {
if (tryAtIndex > 0) {
--tryAtIndex;
} else {
if ( ((i->first == Endpoint::TYPE_INETADDR_V4)||(i->first == Endpoint::TYPE_INETADDR_V6)) && (!i->second.inetAddr().ipsEqual(addr)) ) {
RR->t->tryingNewPath(tPtr,0x0a009444,_id,i->second.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_BOOTSTRAP_ADDRESS);
hello(tPtr,-1,i->second.inetAddr(),now);
}
}
}
}
}
}
}
@ -278,52 +291,21 @@ void Peer::ping(void *tPtr,int64_t now,const bool pingAllAddressTypes)
void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now)
{
RWMutex::RLock l(_lock);
for(unsigned int i=0; i < _alivePathCount; ++i) {
if ((_paths[i])&&((_paths[i]->address().family() == inetAddressFamily)&&(_paths[i]->address().ipScope() == scope))) {
_paths[i]->sent(now,sendHELLO(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now));
}
for(unsigned int i=0;i<_alivePathCount;++i) {
if ((_paths[i])&&((_paths[i]->address().family() == inetAddressFamily)&&(_paths[i]->address().ipScope() == scope)))
_paths[i]->sent(now,sendNOP(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now));
}
}
void Peer::updateLatency(const unsigned int l) noexcept
{
if ((l > 0)&&(l < 0xffff)) {
unsigned int lat = _latency;
if (lat < 0xffff) {
_latency = (l + l + lat) / 3;
} else {
_latency = l;
}
}
}
SharedPtr<Path> Peer::path(const int64_t now)
bool Peer::directlyConnected(int64_t now)
{
if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
_lastPrioritizedPaths = now;
RWMutex::Lock l(_lock);
_prioritizePaths(now);
if (_alivePathCount == 0)
return SharedPtr<Path>();
return _paths[0];
return _alivePathCount > 0;
} else {
RWMutex::RLock l(_lock);
if (_alivePathCount == 0)
return SharedPtr<Path>();
return _paths[0];
}
}
bool Peer::direct(const int64_t now)
{
if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
_lastPrioritizedPaths = now;
RWMutex::Lock l(_lock);
_prioritizePaths(now);
return (_alivePathCount > 0);
} else {
RWMutex::RLock l(_lock);
return (_alivePathCount > 0);
return _alivePathCount > 0;
}
}
@ -336,26 +318,21 @@ void Peer::getAllPaths(std::vector< SharedPtr<Path> > &paths)
void Peer::save(void *tPtr) const
{
uint8_t *const buf = (uint8_t *)malloc(8 + ZT_PEER_MARSHAL_SIZE_MAX); // NOLINT(hicpp-use-auto,modernize-use-auto)
if (!buf) return;
uint8_t buf[8 + ZT_PEER_MARSHAL_SIZE_MAX];
// Prefix each saved peer with the current timestamp.
Utils::storeBigEndian<uint64_t>(buf,(uint64_t)RR->node->now());
_lock.rlock();
const int len = marshal(buf + 8);
_lock.runlock();
if (len > 0) {
uint64_t id[2];
id[0] = _id.address().toInt();
id[1] = 0;
RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_PEER,id,buf,(unsigned int)len + 8);
}
free(buf);
}
void Peer::contact(void *tPtr,const Endpoint &ep,const int64_t now,const bool bfg1024)
void Peer::tryToContactAt(void *const tPtr,const Endpoint &ep,const int64_t now,const bool bfg1024)
{
static uint8_t junk = 0;
@ -376,7 +353,7 @@ void Peer::contact(void *tPtr,const Endpoint &ep,const int64_t now,const bool bf
// If the peer indicates that they may be behind a symmetric NAT and there are no
// living direct paths, try a few more aggressive things.
if ((ep.inetAddr().family() == AF_INET) && (!direct(now))) {
if ((ep.inetAddr().family() == AF_INET) && (!directlyConnected(now))) {
unsigned int port = ep.inetAddr().port();
if ((bfg1024)&&(port < 1024)&&(RR->node->natMustDie())) {
// If the other side is using a low-numbered port and has elected to
@ -422,12 +399,14 @@ void Peer::contact(void *tPtr,const Endpoint &ep,const int64_t now,const bool bf
}
// Start alarms going off to actually send these...
RR->node->setPeerAlarm(_id.address(),now + ZT_NAT_TRAVERSAL_INTERVAL);
RR->node->setPeerAlarm(_id.fingerprint(),now + ZT_NAT_TRAVERSAL_INTERVAL);
}
}
void Peer::alarm(void *tPtr,const int64_t now)
{
// Right now alarms are only used for multi-phase or multi-step NAT traversal operations.
// Pop one contact queue item and also clean the queue of any that are no
// longer applicable because the alive path count has exceeded their threshold.
bool stillHaveContactQueueItems;
@ -459,7 +438,7 @@ void Peer::alarm(void *tPtr,const int64_t now)
}
if (_vProto >= 11) {
uint64_t outgoingProbe = Protocol::createProbe(RR->identity,_id,_key);
uint64_t outgoingProbe = Protocol::createProbe(RR->identity,_id,_identityKey);
if (qi.ports.empty()) {
RR->node->putPacket(tPtr,-1,qi.address,&outgoingProbe,ZT_PROTO_PROBE_LENGTH);
} else {
@ -480,7 +459,7 @@ void Peer::alarm(void *tPtr,const int64_t now)
}
if (stillHaveContactQueueItems)
RR->node->setPeerAlarm(_id.address(),now + ZT_NAT_TRAVERSAL_INTERVAL);
RR->node->setPeerAlarm(_id.fingerprint(),now + ZT_NAT_TRAVERSAL_INTERVAL);
}
int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
@ -493,8 +472,8 @@ int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
// code can check this address and not use this cached key if the local identity has
// changed. In that case agreement must be executed again.
RR->identity.address().copyTo(data + 1);
RR->localCacheSymmetric.encrypt(_key,data + 6);
RR->localCacheSymmetric.encrypt(_key + 16,data + 22);
RR->localCacheSymmetric.encrypt(_identityKey,data + 6);
RR->localCacheSymmetric.encrypt(_identityKey + 16,data + 22);
RWMutex::RLock l(_lock);
@ -502,14 +481,19 @@ int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
if (s <= 0)
return s;
int p = s + 38;
s = _locator.marshal(data + p);
if (s <= 0)
return s;
p += s;
s = _bootstrap.marshal(data + p);
if (s <= 0)
return s;
p += s;
data[p++] = (uint8_t)_bootstrap.size();
for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) { // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
s = i->second.marshal(data + p);
if (s <= 0)
return s;
p += s;
}
Utils::storeBigEndian(data + p,(uint16_t)_vProto);
p += 2;
@ -538,8 +522,8 @@ int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
return -1;
if (Address(data + 1) == RR->identity.address()) {
RR->localCacheSymmetric.decrypt(data + 6,_key);
RR->localCacheSymmetric.decrypt(data + 22,_key + 16);
RR->localCacheSymmetric.decrypt(data + 6,_identityKey);
RR->localCacheSymmetric.decrypt(data + 22,_identityKey + 16);
mustRecomputeSecret = false;
} else {
mustRecomputeSecret = true; // can't use cached key if local identity has changed
@ -553,22 +537,29 @@ int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
if (s <= 0)
return s;
p += s;
s = _bootstrap.unmarshal(data + p,len - p);
if (s <= 0)
return s;
p += s;
if (p >= len)
return -1;
const unsigned int bootstrapCount = data[p++];
if (bootstrapCount > ZT_MAX_PEER_NETWORK_PATHS)
return -1;
_bootstrap.clear();
for(unsigned int i=0;i<bootstrapCount;++i) {
Endpoint tmp;
s = tmp.unmarshal(data + p,len - p);
if (s <= 0)
return s;
p += s;
_bootstrap[tmp.type()] = tmp;
}
if ((p + 10) > len)
return -1;
_vProto = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
_vMajor = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
_vMinor = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
_vRevision = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
_vProto = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
_vMajor = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
_vMinor = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
_vRevision = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)
@ -576,28 +567,41 @@ int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
}
if (mustRecomputeSecret) {
if (!RR->identity.agree(_id,_key))
if (!RR->identity.agree(_id,_identityKey))
return -1;
}
_incomingProbe = Protocol::createProbe(_id,RR->identity,_key);
_incomingProbe = Protocol::createProbe(_id,RR->identity,_identityKey);
return p;
}
struct _PathPriorityComparisonOperator
{
ZT_INLINE bool operator()(const SharedPtr<Path> &a,const SharedPtr<Path> &b) const noexcept
{
// Sort in order of last received time for receipt of anything over path, which prioritizes
// paths by aliveness. This will go away when we merge in multipath in favor of something
// much smarter.
return ( ((a)&&(a->lastIn() > 0)) && ((!b)||(b->lastIn() <= 0)||(a->lastIn() < b->lastIn())) );
}
};
void Peer::_prioritizePaths(const int64_t now)
{
// assumes _lock is locked for writing
_lastPrioritizedPaths = now;
std::sort(_paths,_paths + ZT_MAX_PEER_NETWORK_PATHS,_PathPriorityComparisonOperator());
for(int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if ((!_paths[i]) || (!_paths[i]->alive(now))) {
_alivePathCount = i;
for(;i<ZT_MAX_PEER_NETWORK_PATHS;++i)
_paths[i].zero();
return;
break;
}
}
}

View file

@ -30,9 +30,11 @@
#include <vector>
#include <list>
#include <set>
#include <map>
// version, identity, locator, bootstrap, version info, length of any additional fields
#define ZT_PEER_MARSHAL_SIZE_MAX (1 + ZT_ADDRESS_LENGTH + ZT_PEER_SECRET_KEY_LENGTH + ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + ZT_INETADDRESS_MARSHAL_SIZE_MAX + (2*4) + 2)
#define ZT_PEER_MARSHAL_SIZE_MAX (1 + ZT_ADDRESS_LENGTH + ZT_PEER_SECRET_KEY_LENGTH + ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + 1 + (ZT_MAX_PEER_NETWORK_PATHS * ZT_ENDPOINT_MARSHAL_SIZE_MAX) + (2*4) + 2)
namespace ZeroTier {
@ -57,11 +59,7 @@ public:
*/
explicit Peer(const RuntimeEnvironment *renv);
ZT_INLINE ~Peer()
{
Utils::memoryUnlock(_key,sizeof(_key));
Utils::burn(_key,sizeof(_key));
}
~Peer();
/**
* Initialize peer with an identity
@ -113,9 +111,74 @@ public:
Protocol::Verb inReVerb);
/**
* Send a HELLO to this peer at a specified physical address
* Log sent data
*
* No statistics or sent times are updated here.
* @param now Current time
* @param bytes Number of bytes written
*/
ZT_INLINE void sent(const int64_t now,const unsigned int bytes) noexcept
{
_lastSend = now;
_outMeter.log(now,bytes);
}
/**
* Called when traffic destined for a different peer is sent to this one
*
* @param now Current time
* @param bytes Number of bytes relayed
*/
ZT_INLINE void relayed(const int64_t now,const unsigned int bytes) noexcept
{
_relayedMeter.log(now,bytes);
}
/**
* Get the current best direct path or NULL if none
*
* @return Current best path or NULL if there is no direct path
*/
ZT_INLINE SharedPtr<Path> path(const int64_t now) noexcept
{
if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
RWMutex::Lock l(_lock);
_prioritizePaths(now);
if (_alivePathCount > 0)
return _paths[0];
} else {
RWMutex::RLock l(_lock);
if (_alivePathCount > 0)
return _paths[0];
}
return SharedPtr<Path>();
}
/**
* Send data to this peer over a specific path only
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param now Current time
* @param data Data to send
* @param len Length in bytes
* @param via Path over which to send data (may or may not be an already-learned path for this peer)
*/
void send(void *tPtr,int64_t now,const void *data,unsigned int len,const SharedPtr<Path> &via) noexcept;
/**
* Send data to this peer over the best available path
*
* If there is a working direct path it will be used. Otherwise the data will be
* sent via a root server.
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param now Current time
* @param data Data to send
* @param len Length in bytes
*/
void send(void *tPtr,int64_t now,const void *data,unsigned int len) noexcept;
/**
* Send a HELLO to this peer at a specified physical address.
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param localSocket Local source socket
@ -123,7 +186,7 @@ public:
* @param now Current time
* @return Number of bytes sent
*/
unsigned int sendHELLO(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now);
unsigned int hello(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now);
/**
* Send a NOP message to e.g. probe a new link
@ -137,13 +200,13 @@ public:
unsigned int sendNOP(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now);
/**
* Send ping to this peer
* Ping this peer if needed and/or perform other periodic tasks.
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param now Current time
* @param pingAllAddressTypes If true, try to keep a link up for each address type/family
* @param isRoot True if this peer is a root
*/
void ping(void *tPtr,int64_t now,bool pingAllAddressTypes);
void pulse(void *tPtr,int64_t now,bool isRoot);
/**
* Reset paths within a given IP scope and address family
@ -161,21 +224,18 @@ public:
void resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now);
/**
* Update peer latency information
*
* This is called from packet parsing code.
* Method called to update peer latency with a new measurement.
*
* @param l New latency measurment (in milliseconds)
*/
void updateLatency(unsigned int l) noexcept;
/**
* @return Bootstrap address or NULL if none
*/
ZT_INLINE const Endpoint &bootstrap() const noexcept
ZT_INLINE void updateLatency(const unsigned int measurement) noexcept
{
RWMutex::RLock l(_lock);
return _bootstrap;
int l = _latency;
if (l > 0) {
_latency = (l + (int)measurement) / 2;
} else {
_latency = (int)measurement;
}
}
/**
@ -186,7 +246,7 @@ public:
ZT_INLINE void setBootstrap(const Endpoint &ep) noexcept
{
RWMutex::Lock l(_lock);
_bootstrap = ep;
_bootstrap[ep.type()] = ep;
}
/**
@ -194,16 +254,6 @@ public:
*/
ZT_INLINE int64_t lastReceive() const noexcept { return _lastReceive; }
/**
* @return True if we've heard from this peer in less than ZT_PEER_ALIVE_TIMEOUT
*/
ZT_INLINE bool alive(const int64_t now) const noexcept { return ((now - _lastReceive) < ZT_PEER_ALIVE_TIMEOUT); }
/**
* @return True if we've heard from this peer in less than ZT_PEER_ACTIVITY_TIMEOUT
*/
ZT_INLINE bool active(const int64_t now) const noexcept { return ((now - _lastReceive) < ZT_PEER_ACTIVITY_TIMEOUT); }
/**
* @return Latency in milliseconds of best/aggregate path or 0xffff if unknown
*/
@ -212,7 +262,7 @@ public:
/**
* @return 256-bit secret symmetric encryption key
*/
ZT_INLINE const unsigned char *key() const noexcept { return _key; }
ZT_INLINE const unsigned char *key() const noexcept { return _identityKey; }
/**
* @return Preferred cipher suite for normal encrypted P2P communication
@ -224,7 +274,7 @@ public:
/**
* @return Incoming probe packet (in big-endian byte order)
0 */
*/
ZT_INLINE uint64_t incomingProbe() const noexcept { return _incomingProbe; }
/**
@ -249,63 +299,10 @@ public:
ZT_INLINE unsigned int remoteVersionRevision() const noexcept { return _vRevision; }
ZT_INLINE bool remoteVersionKnown() const noexcept { return ((_vMajor > 0) || (_vMinor > 0) || (_vRevision > 0)); }
/**
* Rate limit gate for inbound WHOIS requests
*/
ZT_INLINE bool rateGateInboundWhoisRequest(const int64_t now) noexcept
{
if ((now - _lastWhoisRequestReceived) >= ZT_PEER_WHOIS_RATE_LIMIT) {
_lastWhoisRequestReceived = now;
return true;
}
return false;
}
/**
* Rate limit gate for inbound PUSH_DIRECT_PATHS requests
*/
ZT_INLINE bool rateGateInboundPushDirectPaths(const int64_t now) noexcept
{
if ((now - _lastPushDirectPathsReceived) >= ZT_DIRECT_PATH_PUSH_INTERVAL) {
_lastPushDirectPathsReceived = now;
return true;
}
return false;
}
/**
* Rate limit attempts in response to incoming short probe packets
*/
ZT_INLINE bool rateGateInboundProbe(const int64_t now) noexcept
{
if ((now - _lastProbeReceived) >= ZT_DIRECT_PATH_PUSH_INTERVAL) {
_lastProbeReceived = now;
return true;
}
return false;
}
/**
* Rate limit gate for inbound ECHO requests
*/
ZT_INLINE bool rateGateEchoRequest(const int64_t now) noexcept
{
if ((now - _lastEchoRequestReceived) >= ZT_PEER_GENERAL_RATE_LIMIT) {
_lastEchoRequestReceived = now;
return true;
}
return false;
}
/**
* @return Current best path
*/
SharedPtr<Path> path(int64_t now);
/**
* @return True if there is at least one alive direct path
*/
bool direct(int64_t now);
bool directlyConnected(int64_t now);
/**
* Get all paths
@ -327,7 +324,7 @@ public:
* @param now Current time
* @param bfg1024 Use BFG1024 brute force symmetric NAT busting algorithm if applicable
*/
void contact(void *tPtr,const Endpoint &ep,int64_t now,bool bfg1024);
void tryToContactAt(void *tPtr,const Endpoint &ep,int64_t now,bool bfg1024);
/**
* Called by Node when an alarm set by this peer goes off
@ -343,40 +340,95 @@ public:
int marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept;
int unmarshal(const uint8_t *restrict data,int len) noexcept;
/**
* Rate limit gate for inbound WHOIS requests
*/
ZT_INLINE bool rateGateInboundWhoisRequest(const int64_t now) noexcept
{
if ((now - _lastWhoisRequestReceived) >= ZT_PEER_WHOIS_RATE_LIMIT) {
_lastWhoisRequestReceived = now;
return true;
}
return false;
}
/**
* Rate limit gate for inbound PUSH_DIRECT_PATHS requests
*/
ZT_INLINE bool rateGateInboundPushDirectPaths(const int64_t now) noexcept
{
if ((now - _lastPushDirectPathsReceived) >= ZT_DIRECT_CONNECT_ATTEMPT_INTERVAL) {
_lastPushDirectPathsReceived = now;
return true;
}
return false;
}
/**
* Rate limit attempts in response to incoming short probe packets
*/
ZT_INLINE bool rateGateInboundProbe(const int64_t now) noexcept
{
if ((now - _lastProbeReceived) >= ZT_DIRECT_CONNECT_ATTEMPT_INTERVAL) {
_lastProbeReceived = now;
return true;
}
return false;
}
/**
* Rate limit gate for inbound ECHO requests
*/
ZT_INLINE bool rateGateEchoRequest(const int64_t now) noexcept
{
if ((now - _lastEchoRequestReceived) >= ZT_PEER_GENERAL_RATE_LIMIT) {
_lastEchoRequestReceived = now;
return true;
}
return false;
}
private:
void _prioritizePaths(int64_t now);
uint8_t _key[ZT_PEER_SECRET_KEY_LENGTH];
// The long-lived identity key resulting from agreement between our identity and this peer's identity.
uint8_t _identityKey[ZT_PEER_SECRET_KEY_LENGTH];
// Read/write mutex for non-atomic non-const fields.
RWMutex _lock;
const RuntimeEnvironment *RR;
// The last time various things happened, for rate limiting and periodic events.
std::atomic<int64_t> _lastReceive;
std::atomic<int64_t> _lastSend;
int64_t _lastSentHello; // only checked while locked
std::atomic<int64_t> _lastWhoisRequestReceived;
std::atomic<int64_t> _lastEchoRequestReceived;
std::atomic<int64_t> _lastPushDirectPathsReceived;
std::atomic<int64_t> _lastProbeReceived;
std::atomic<int64_t> _lastAttemptedP2PInit;
std::atomic<int64_t> _lastTriedStaticPath;
std::atomic<int64_t> _lastPrioritizedPaths;
std::atomic<int64_t> _lastAttemptedAggressiveNATTraversal;
// Latency in milliseconds
std::atomic<unsigned int> _latency;
// Meters measuring actual bandwidth in, out, and relayed via this peer (mostly if this is a root).
Meter<> _inMeter;
Meter<> _outMeter;
Meter<> _relayedMeter;
// For SharedPtr<>
std::atomic<int> __refCount;
// Read/write mutex for non-atomic non-const fields.
RWMutex _lock;
// Milliseconds of latency over best path or -1 if unknown.
std::atomic<int> _latency;
// Number of paths current alive as of last _prioritizePaths
unsigned int _alivePathCount;
// Direct paths sorted in descending order of preference (can be NULL, if first is NULL there's no direct path)
// Direct paths sorted in descending order of preference.
SharedPtr<Path> _paths[ZT_MAX_PEER_NETWORK_PATHS];
// Queue of batches of one or more physical addresses to try at some point in the future (for NAT traversal logic)
// Number of paths current alive (number of non-NULL entries in _paths).
unsigned int _alivePathCount;
// Queue of batches of one or more physical addresses to try at some point in the future (for NAT traversal logic).
struct _ContactQueueItem
{
ZT_INLINE _ContactQueueItem() {} // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init,hicpp-use-equals-default,modernize-use-equals-default)
@ -394,10 +446,12 @@ private:
};
std::list<_ContactQueueItem> _contactQueue;
// Remembered addresses by endpoint type (std::map is smaller for only a few keys).
std::map< Endpoint::Type,Endpoint > _bootstrap;
Identity _id;
uint64_t _incomingProbe;
Locator _locator;
Endpoint _bootstrap; // right now only InetAddress endpoints are supported for bootstrap
uint16_t _vProto;
uint16_t _vMajor;

View file

@ -45,8 +45,6 @@ class Revocation : public Credential
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_REVOCATION; }
ZT_INLINE Revocation() noexcept { memoryZero(this); } // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
/**

View file

@ -18,8 +18,6 @@
#include "Peer.hpp"
#include "Trace.hpp"
#include <cstdlib>
#include <cstring>
#include <set>
// Entry timeout -- make it fairly long since this is just to prevent stale buildup
@ -69,7 +67,7 @@ void SelfAwareness::iam(void *tPtr,const Identity &reporter,const int64_t receiv
// Erase all entries in this scope that were not reported from this remote address to prevent 'thrashing'
// due to multiple reports of endpoint change.
// Don't use 'entry' after this since hash table gets modified.
for(Map<PhySurfaceKey,PhySurfaceEntry>::iterator i(_phy.begin());i!=_phy.end();) {
for(Map<PhySurfaceKey,PhySurfaceEntry>::iterator i(_phy.begin());i!=_phy.end();) { // NOLINT(modernize-loop-convert,modernize-use-auto,hicpp-use-auto)
if ((i->first.scope == scope)&&(i->first.reporterPhysicalAddress != reporterPhysicalAddress))
_phy.erase(i++);
else ++i;
@ -91,7 +89,7 @@ void SelfAwareness::iam(void *tPtr,const Identity &reporter,const int64_t receiv
void SelfAwareness::clean(int64_t now)
{
Mutex::Lock l(_phy_l);
for(Map<PhySurfaceKey,PhySurfaceEntry>::iterator i(_phy.begin());i!=_phy.end();) {
for(Map<PhySurfaceKey,PhySurfaceEntry>::iterator i(_phy.begin());i!=_phy.end();) { // NOLINT(modernize-loop-convert,modernize-use-auto,hicpp-use-auto)
if ((now - i->second.ts) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT)
_phy.erase(i++);
else ++i;
@ -105,13 +103,13 @@ std::multimap<unsigned long,InetAddress> SelfAwareness::externalAddresses(const
{
Mutex::Lock l(_phy_l);
for(Map<PhySurfaceKey,PhySurfaceEntry>::const_iterator i(_phy.begin());i!=_phy.end();++i) {
for(Map<PhySurfaceKey,PhySurfaceEntry>::const_iterator i(_phy.begin());i!=_phy.end();++i) { // NOLINT(modernize-loop-convert,modernize-use-auto,hicpp-use-auto)
if ((now - i->second.ts) < ZT_SELFAWARENESS_ENTRY_TIMEOUT)
++counts[i->second.mySurface];
}
}
for(Map<InetAddress,unsigned long>::iterator i(counts.begin());i!=counts.end();++i)
for(Map<InetAddress,unsigned long>::iterator i(counts.begin());i!=counts.end();++i) // NOLINT(modernize-loop-convert,modernize-use-auto,hicpp-use-auto)
r.insert(std::pair<unsigned long,InetAddress>(i->second,i->first));
return r;

View file

@ -25,9 +25,8 @@ namespace ZeroTier {
* Speck does not specify a mandatory endian-ness. This implementation is
* little-endian for higher performance on the majority of platforms.
*
* This is only used as part of the work function for V1 identity generation
* and for the built-in secure random source on systems that lack AES
* hardware acceleration.
* Right now this is only used as part of the PoW function for V1 identity
* generation.
*
* @tparam R Number of rounds (default: 32)
*/

View file

@ -53,8 +53,6 @@ class Tag : public Credential
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_TAG; }
ZT_INLINE Tag() noexcept { memoryZero(this); } // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
/**

View file

@ -60,13 +60,6 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
try {
// Handle 8-byte short probes, which are used as a low-bandwidth way to initiate a real handshake.
// These are only minimally "secure" in the sense that they are unique per graph edge (sender->recipient)
// to within 1/2^64 but can easily be replayed. We rate limit this to prevent ZeroTier being used as
// a vector in DDOS amplification attacks, then send a larger fully authenticated message to initiate
// a handshake. We do not send HELLO since we don't want this to be a vector for third parties to
// mass-probe for ZeroTier nodes and obtain all of the information in a HELLO. This isn't a huge risk
// but we might as well avoid it. When the peer receives NOP on a path that hasn't been handshaked yet
// it will send its own HELLO to which we will respond with a fully encrypted OK(HELLO).
if (len == ZT_PROTO_PROBE_LENGTH) {
const SharedPtr<Peer> peer(RR->topology->peerByProbe(data->lI64(0)));
if ((peer)&&(peer->rateGateInboundProbe(now)))
@ -819,7 +812,7 @@ bool VL1::_RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Pee
case 16:
if ((int)(sizeof(Protocol::RENDEZVOUS) + rdv.addressLength) <= packetSize) {
const InetAddress atAddr(pkt.unsafeData + sizeof(Protocol::RENDEZVOUS),rdv.addressLength,port);
peer->contact(tPtr,Endpoint(atAddr),now,false);
peer->tryToContactAt(tPtr,Endpoint(atAddr),now,false);
RR->t->tryingNewPath(tPtr,0x55a19aaa,with->identity(),atAddr,path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->identity(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
}
break;
@ -831,7 +824,7 @@ bool VL1::_RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Pee
switch (ep.type()) {
case Endpoint::TYPE_INETADDR_V4:
case Endpoint::TYPE_INETADDR_V6:
peer->contact(tPtr,ep,now,false);
peer->tryToContactAt(tPtr,ep,now,false);
RR->t->tryingNewPath(tPtr,0x55a19aab,with->identity(),ep.inetAddr(),path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->identity(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
break;
default:

View file

@ -11,6 +11,8 @@
*/
/****/
// TODO: roots will need to PUSH_DIRECT_PATHS to make sure peers know both their IPv4 and IPv6 addresses.
/*
* This is a high-throughput minimal root server. It implements only
* those functions of a ZT node that a root must perform and does so