Clean up some NAT traversal code, modify algorithm to eliminate the need for toggle-able options.

This commit is contained in:
Adam Ierymenko 2020-05-31 15:11:47 -07:00
parent dcc686a3a7
commit 20ae12d385
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
8 changed files with 262 additions and 229 deletions

View file

@ -137,17 +137,17 @@
/** /**
* Maximum number of queued endpoints to try per "pulse." * Maximum number of queued endpoints to try per "pulse."
*/ */
#define ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE 4 #define ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE 16
/** /**
* Delay between calls to the pulse() method in Peer for each peer * Delay between calls to the pulse() method in Peer for each peer
*/ */
#define ZT_PEER_PULSE_INTERVAL (ZT_PATH_KEEPALIVE_PERIOD / 2) #define ZT_PEER_PULSE_INTERVAL 8000
/** /**
* Interval between HELLOs to peers. * Interval between HELLOs to peers.
*/ */
#define ZT_PEER_HELLO_INTERVAL 120000LL #define ZT_PEER_HELLO_INTERVAL 120000
/** /**
* Timeout for peers being alive * Timeout for peers being alive

View file

@ -123,6 +123,38 @@ public:
} }
} }
/**
* Check whether this endpoint's address is the same as another.
*
* Right now this checks whether IPs are equal if both are IP based endpoints.
* Otherwise it checks for simple equality.
*
* @param ep Endpoint to check
* @return True if endpoints seem to refer to the same address/host
*/
ZT_INLINE bool isSameAddress(const Endpoint &ep) const noexcept
{
switch (this->type) {
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_HTTP2:
switch(ep.type) {
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_HTTP2:
return ip().ipsEqual(ep.ip());
default:
break;
}
break;
default:
break;
}
return (*this) == ep;
}
/** /**
* Get InetAddress if this type uses IPv4 or IPv6 addresses (undefined otherwise) * Get InetAddress if this type uses IPv4 or IPv6 addresses (undefined otherwise)
* *

View file

@ -77,7 +77,6 @@ Node::Node(void *uPtr, void *tPtr, const struct ZT_Node_Callbacks *callbacks, in
m_lastHousekeepingRun(0), m_lastHousekeepingRun(0),
m_lastNetworkHousekeepingRun(0), m_lastNetworkHousekeepingRun(0),
m_now(now), m_now(now),
m_natMustDie(true),
m_online(false) m_online(false)
{ {
// Load this node's identity. // Load this node's identity.
@ -112,9 +111,27 @@ Node::Node(void *uPtr, void *tPtr, const struct ZT_Node_Callbacks *callbacks, in
stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp, RR->publicIdentityStr, (unsigned int) strlen(RR->publicIdentityStr)); stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp, RR->publicIdentityStr, (unsigned int) strlen(RR->publicIdentityStr));
} }
// 2X hash our identity private key(s) to obtain a symmetric key for encrypting
// locally cached data at rest (as a defense in depth measure). This is not used
// for any network level encryption or authentication.
uint8_t tmph[ZT_SHA384_DIGEST_SIZE]; uint8_t tmph[ZT_SHA384_DIGEST_SIZE];
RR->identity.hashWithPrivate(tmph); RR->identity.hashWithPrivate(tmph);
SHA384(tmph, tmph, ZT_SHA384_DIGEST_SIZE);
RR->localCacheSymmetric.init(tmph); RR->localCacheSymmetric.init(tmph);
Utils::burn(tmph, ZT_SHA384_DIGEST_SIZE);
// Generate a random sort order for privileged ports for use in NAT-t algorithms.
for(unsigned int i=0;i<1023;++i)
RR->randomPrivilegedPortOrder[i] = (uint16_t)(i + 1);
for(unsigned int i=0;i<512;++i) {
const unsigned int a = (unsigned int)Utils::random() % 1023;
const unsigned int b = (unsigned int)Utils::random() % 1023;
if (a != b) {
const uint16_t tmp = RR->randomPrivilegedPortOrder[a];
RR->randomPrivilegedPortOrder[a] = RR->randomPrivilegedPortOrder[b];
RR->randomPrivilegedPortOrder[b] = tmp;
}
}
// This constructs all the components of the ZeroTier core within a single contiguous memory container, // This constructs all the components of the ZeroTier core within a single contiguous memory container,
// which reduces memory fragmentation and may improve cache locality. // which reduces memory fragmentation and may improve cache locality.
@ -186,29 +203,20 @@ ZT_ResultCode Node::processVirtualNetworkFrame(
struct _processBackgroundTasks_eachPeer struct _processBackgroundTasks_eachPeer
{ {
ZT_INLINE _processBackgroundTasks_eachPeer(const int64_t now_, Node *const parent_, void *const tPtr_) noexcept:
now(now_),
parent(parent_),
tPtr(tPtr_),
online(false),
rootsNotOnline()
{}
const int64_t now; const int64_t now;
Node *const parent;
void *const tPtr; void *const tPtr;
bool online; bool online;
Vector<SharedPtr<Peer> > rootsNotOnline;
ZT_INLINE _processBackgroundTasks_eachPeer(const int64_t now_, void *const tPtr_) noexcept :
now(now_),
tPtr(tPtr_),
online(false)
{}
ZT_INLINE void operator()(const SharedPtr<Peer> &peer, const bool isRoot) noexcept ZT_INLINE void operator()(const SharedPtr<Peer> &peer, const bool isRoot) noexcept
{ {
peer->pulse(tPtr, now, isRoot); peer->pulse(tPtr, now, isRoot);
if (isRoot) { this->online |= (isRoot && peer->directlyConnected(now));
if (peer->directlyConnected(now)) {
online = true;
} else {
rootsNotOnline.push_back(peer);
}
}
} }
}; };
@ -222,22 +230,13 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
if ((now - m_lastPeerPulse) >= ZT_PEER_PULSE_INTERVAL) { if ((now - m_lastPeerPulse) >= ZT_PEER_PULSE_INTERVAL) {
m_lastPeerPulse = now; m_lastPeerPulse = now;
try { try {
_processBackgroundTasks_eachPeer pf(now, this, tPtr); _processBackgroundTasks_eachPeer pf(now, tPtr);
RR->topology->eachPeerWithRoot<_processBackgroundTasks_eachPeer &>(pf); RR->topology->eachPeerWithRoot<_processBackgroundTasks_eachPeer &>(pf);
if (pf.online != m_online) { if (m_online.exchange(pf.online) != pf.online)
m_online = pf.online; postEvent(tPtr, pf.online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
postEvent(tPtr, m_online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
}
RR->topology->rankRoots(); RR->topology->rankRoots();
if (pf.online) {
// If we have at least one online root, request whois for roots not online.
// TODO
//for (Vector<Address>::const_iterator r(pf.rootsNotOnline.begin()); r != pf.rootsNotOnline.end(); ++r)
// RR->sw->requestWhois(tPtr,now,*r);
}
} catch (...) { } catch (...) {
return ZT_RESULT_FATAL_ERROR_INTERNAL; return ZT_RESULT_FATAL_ERROR_INTERNAL;
} }
@ -246,9 +245,8 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
// Perform network housekeeping and possibly request new certs and configs every ZT_NETWORK_HOUSEKEEPING_PERIOD. // Perform network housekeeping and possibly request new certs and configs every ZT_NETWORK_HOUSEKEEPING_PERIOD.
if ((now - m_lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) { if ((now - m_lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
m_lastHousekeepingRun = now; m_lastHousekeepingRun = now;
{
RWMutex::RLock l(m_networks_l); RWMutex::RLock l(m_networks_l);
for (Map<uint64_t, SharedPtr<Network> >::const_iterator i(m_networks.begin());i != m_networks.end();++i) for (Map<uint64_t, SharedPtr<Network> >::const_iterator i(m_networks.begin());i != m_networks.end();++i) {
i->second->doPeriodicTasks(tPtr, now); i->second->doPeriodicTasks(tPtr, now);
} }
} }
@ -256,7 +254,7 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
// Clean up other stuff every ZT_HOUSEKEEPING_PERIOD. // Clean up other stuff every ZT_HOUSEKEEPING_PERIOD.
if ((now - m_lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) { if ((now - m_lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
m_lastHousekeepingRun = now; m_lastHousekeepingRun = now;
try {
// Clean up any old local controller auth memoizations. This is an // Clean up any old local controller auth memoizations. This is an
// optimization for network controllers to know whether to accept // optimization for network controllers to know whether to accept
// or trust nodes without doing an extra cert check. // or trust nodes without doing an extra cert check.
@ -270,9 +268,6 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
RR->topology->doPeriodicTasks(tPtr, now); RR->topology->doPeriodicTasks(tPtr, now);
RR->sa->clean(now); RR->sa->clean(now);
} catch (...) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
} }
*nextBackgroundTaskDeadline = now + ZT_TIMER_TASK_INTERVAL; *nextBackgroundTaskDeadline = now + ZT_TIMER_TASK_INTERVAL;

View file

@ -320,12 +320,6 @@ public:
ZT_INLINE const Identity &identity() const noexcept ZT_INLINE const Identity &identity() const noexcept
{ return m_RR.identity; } { return m_RR.identity; }
/**
* @return True if aggressive NAT-traversal mechanisms like scanning of <1024 ports are enabled
*/
ZT_INLINE bool natMustDie() const noexcept
{ return m_natMustDie; }
/** /**
* Check whether a local controller has authorized a member on a network * Check whether a local controller has authorized a member on a network
* *
@ -407,9 +401,6 @@ private:
// This is the most recent value for time passed in via any of the core API methods. // This is the most recent value for time passed in via any of the core API methods.
std::atomic<int64_t> m_now; std::atomic<int64_t> m_now;
// True if we are to use really intensive NAT-busting measures.
std::atomic<bool> m_natMustDie;
// True if at least one root appears reachable. // True if at least one root appears reachable.
std::atomic<bool> m_online; std::atomic<bool> m_online;
}; };

View file

@ -241,6 +241,7 @@ void Peer::pulse(void *const tPtr,const int64_t now,const bool isRoot)
// If there are no living paths and nothing in the try queue, try addresses // If there are no living paths and nothing in the try queue, try addresses
// from any locator we have on file or that are fetched via the external API // from any locator we have on file or that are fetched via the external API
// callback (if one was supplied). // callback (if one was supplied).
if (m_locator) { if (m_locator) {
for (Vector<Endpoint>::const_iterator ep(m_locator->endpoints().begin());ep != m_locator->endpoints().end();++ep) { for (Vector<Endpoint>::const_iterator ep(m_locator->endpoints().begin());ep != m_locator->endpoints().end();++ep) {
if (ep->type == ZT_ENDPOINT_TYPE_IP_UDP) { if (ep->type == ZT_ENDPOINT_TYPE_IP_UDP) {
@ -261,48 +262,48 @@ void Peer::pulse(void *const tPtr,const int64_t now,const bool isRoot)
} else { } else {
// Attempt up to ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE queued addresses. // Attempt up to ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE queued addresses.
for (int k=0;k<ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE;++k) { unsigned int attempts = 0;
do {
p_TryQueueItem &qi = m_tryQueue.front(); p_TryQueueItem &qi = m_tryQueue.front();
if (likely((now - qi.ts) < ZT_PATH_ALIVE_TIMEOUT)) { if (qi.target.isInetAddr()) {
if (qi.target.type == ZT_ENDPOINT_TYPE_IP_UDP) {
// Skip entry if it overlaps with any currently active IP. // Skip entry if it overlaps with any currently active IP.
for (unsigned int i = 0;i < m_alivePathCount;++i) { for (unsigned int i = 0;i < m_alivePathCount;++i) {
if (m_paths[i]->address().ipsEqual(qi.target.ip())) if (m_paths[i]->address().ipsEqual(qi.target.ip()))
goto skip_tryQueue_item; goto next_tryQueue_item;
}
} }
if ((m_alivePathCount == 0) && (qi.natMustDie) && (RR->node->natMustDie())) { if (qi.target.type == ZT_ENDPOINT_TYPE_IP_UDP) {
// Attempt aggressive NAT traversal if both requested and enabled. This sends a probe ++attempts;
// to all ports under 1024, which assumes that the peer has bound to such a port and if (qi.privilegedPortTrialIteration < 0) {
// has attempted to initiate a connection through it. This can traverse a decent number
// of symmetric NATs at the cost of 32KiB per attempt and the potential to trigger IDS
// systems by looking like a port scan (because it is).
uint16_t ports[1023];
for (unsigned int i=0;i<1023;++i)
ports[i] = (uint64_t)(i + 1);
for (unsigned int i=0;i<512;++i) {
const uint64_t rn = Utils::random();
const unsigned int a = (unsigned int)rn % 1023;
const unsigned int b = (unsigned int)(rn >> 32U) % 1023;
if (a != b) {
const uint16_t tmp = ports[a];
ports[a] = ports[b];
ports[b] = tmp;
}
}
sent(now,m_sendProbe(tPtr, -1, qi.target.ip(), ports, 1023, now));
} else {
sent(now, m_sendProbe(tPtr, -1, qi.target.ip(), nullptr, 0, now)); sent(now, m_sendProbe(tPtr, -1, qi.target.ip(), nullptr, 0, now));
if ((qi.target.ip().isV4()) && (qi.target.ip().port() < 1024)) {
qi.privilegedPortTrialIteration = 0;
if (m_tryQueue.size() > 1)
m_tryQueue.splice(m_tryQueue.end(),m_tryQueue,m_tryQueue.begin());
continue;
} // else goto next_tryQueue_item;
} else if (qi.privilegedPortTrialIteration < 1023) {
uint16_t ports[ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE];
unsigned int pn = 0;
while ((pn < ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE) && (qi.privilegedPortTrialIteration < 1023)) {
const uint16_t p = RR->randomPrivilegedPortOrder[qi.privilegedPortTrialIteration++];
if ((unsigned int)p != qi.target.ip().port())
ports[pn++] = p;
} }
sent(now, m_sendProbe(tPtr, -1, qi.target.ip(), ports, pn, now));
if (qi.privilegedPortTrialIteration < 1023) {
if (m_tryQueue.size() > 1)
m_tryQueue.splice(m_tryQueue.end(),m_tryQueue,m_tryQueue.begin());
continue;
} // else goto next_tryQueue_item;
} }
} }
skip_tryQueue_item: next_tryQueue_item:
m_tryQueue.pop_front(); m_tryQueue.pop_front();
if (m_tryQueue.empty()) } while ((attempts < ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE) && (!m_tryQueue.empty()));
break;
}
} }
// Do keepalive on all currently active paths, sending HELLO to the first // Do keepalive on all currently active paths, sending HELLO to the first
@ -337,28 +338,38 @@ skip_tryQueue_item:
} }
} }
void Peer::contact(void *tPtr,const int64_t now,const Endpoint &ep,const bool natMustDie) void Peer::contact(void *tPtr, const int64_t now, const Endpoint &ep)
{ {
static uint8_t foo = 0; static uint8_t foo = 0;
RWMutex::Lock l(m_lock); RWMutex::Lock l(m_lock);
if (ep.isInetAddr()&&ep.ip().isV4()) { // See if there's already a path to this endpoint and if so ignore it.
// For IPv4 addresses we send a tiny packet with a low TTL, which helps to if (ep.isInetAddr()) {
// traverse some NAT types. It has no effect otherwise. It's important to if ((now - m_lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL)
// send this right away in case this is a coordinated attempt via RENDEZVOUS. m_prioritizePaths(now);
RR->node->putPacket(tPtr,-1,ep.ip(),&foo,1,2); for (unsigned int i = 0;i < m_alivePathCount;++i) {
++foo; if (m_paths[i]->address().ipsEqual(ep.ip()))
}
for(List<p_TryQueueItem>::iterator i(m_tryQueue.begin());i!=m_tryQueue.end();++i) {
if (i->target == ep) {
i->ts = now;
i->natMustDie = natMustDie;
return; return;
} }
} }
m_tryQueue.push_back(p_TryQueueItem(now, ep, natMustDie)); // For IPv4 addresses we send a tiny packet with a low TTL, which helps to
// traverse some NAT types. It has no effect otherwise.
if (ep.isInetAddr() && ep.ip().isV4()) {
++foo;
RR->node->putPacket(tPtr, -1, ep.ip(), &foo, 1, 2);
}
// Make sure address is not already in the try queue. If so just update it.
for (List<p_TryQueueItem>::iterator i(m_tryQueue.begin());i != m_tryQueue.end();++i) {
if (i->target.isSameAddress(ep)) {
i->target = ep;
i->privilegedPortTrialIteration = -1;
return;
}
}
m_tryQueue.push_back(p_TryQueueItem(ep));
} }
void Peer::resetWithinScope(void *tPtr, InetAddress::IpScope scope, int inetAddressFamily, int64_t now) void Peer::resetWithinScope(void *tPtr, InetAddress::IpScope scope, int inetAddressFamily, int64_t now)
@ -520,10 +531,14 @@ int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
if ((p + 10) > len) if ((p + 10) > len)
return -1; return -1;
m_vProto = Utils::loadBigEndian<uint16_t>(data + p); p += 2; m_vProto = Utils::loadBigEndian<uint16_t>(data + p);
m_vMajor = Utils::loadBigEndian<uint16_t>(data + p); p += 2; p += 2;
m_vMinor = Utils::loadBigEndian<uint16_t>(data + p); p += 2; m_vMajor = Utils::loadBigEndian<uint16_t>(data + p);
m_vRevision = Utils::loadBigEndian<uint16_t>(data + p); p += 2; p += 2;
m_vMinor = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
m_vRevision = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
p += 2 + (int) Utils::loadBigEndian<uint16_t>(data + p); p += 2 + (int) Utils::loadBigEndian<uint16_t>(data + p);
m_deriveSecondaryIdentityKeys(); m_deriveSecondaryIdentityKeys();
@ -567,15 +582,14 @@ unsigned int Peer::m_sendProbe(void *tPtr,int64_t localSocket,const InetAddress
const SharedPtr<SymmetricKey> k(m_key()); const SharedPtr<SymmetricKey> k(m_key());
const uint64_t packetId = k->nextMessage(RR->identity.address(), m_id.address()); const uint64_t packetId = k->nextMessage(RR->identity.address(), m_id.address());
uint8_t p[ZT_PROTO_MIN_PACKET_LENGTH + 1]; uint8_t p[ZT_PROTO_MIN_PACKET_LENGTH];
Utils::storeAsIsEndian<uint64_t>(p + ZT_PROTO_PACKET_ID_INDEX, packetId); Utils::storeAsIsEndian<uint64_t>(p + ZT_PROTO_PACKET_ID_INDEX, packetId);
m_id.address().copyTo(p + ZT_PROTO_PACKET_DESTINATION_INDEX); m_id.address().copyTo(p + ZT_PROTO_PACKET_DESTINATION_INDEX);
RR->identity.address().copyTo(p + ZT_PROTO_PACKET_SOURCE_INDEX); RR->identity.address().copyTo(p + ZT_PROTO_PACKET_SOURCE_INDEX);
p[ZT_PROTO_PACKET_FLAGS_INDEX] = 0; p[ZT_PROTO_PACKET_FLAGS_INDEX] = 0;
p[ZT_PROTO_PACKET_VERB_INDEX] = Protocol::VERB_ECHO; p[ZT_PROTO_PACKET_VERB_INDEX] = Protocol::VERB_ECHO;
p[ZT_PROTO_PACKET_VERB_INDEX + 1] = 0; // arbitrary payload
Protocol::armor(p,ZT_PROTO_MIN_PACKET_LENGTH + 1,k,cipher()); Protocol::armor(p, ZT_PROTO_MIN_PACKET_LENGTH, k, cipher());
RR->expect->sending(packetId, now); RR->expect->sending(packetId, now);
@ -583,11 +597,11 @@ unsigned int Peer::m_sendProbe(void *tPtr,int64_t localSocket,const InetAddress
InetAddress tmp(atAddress); InetAddress tmp(atAddress);
for (unsigned int i = 0;i < numPorts;++i) { for (unsigned int i = 0;i < numPorts;++i) {
tmp.setPort(ports[i]); tmp.setPort(ports[i]);
RR->node->putPacket(tPtr,-1,tmp,p,ZT_PROTO_MIN_PACKET_LENGTH + 1); RR->node->putPacket(tPtr, -1, tmp, p, ZT_PROTO_MIN_PACKET_LENGTH);
} }
return ZT_PROTO_MIN_PACKET_LENGTH * numPorts; return ZT_PROTO_MIN_PACKET_LENGTH * numPorts;
} else { } else {
RR->node->putPacket(tPtr,-1,atAddress,p,ZT_PROTO_MIN_PACKET_LENGTH + 1); RR->node->putPacket(tPtr, -1, atAddress, p, ZT_PROTO_MIN_PACKET_LENGTH);
return ZT_PROTO_MIN_PACKET_LENGTH; return ZT_PROTO_MIN_PACKET_LENGTH;
} }
} }

View file

@ -231,9 +231,8 @@ public:
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param now Current time * @param now Current time
* @param ep Endpoint to attempt to contact * @param ep Endpoint to attempt to contact
* @param bfg1024 Use BFG1024 brute force symmetric NAT busting algorithm if applicable
*/ */
void contact(void *tPtr, int64_t now, const Endpoint &ep, bool breakSymmetricBFG1024); void contact(void *tPtr, int64_t now, const Endpoint &ep);
/** /**
* Reset paths within a given IP scope and address family * Reset paths within a given IP scope and address family
@ -524,15 +523,18 @@ private:
// Addresses recieved via PUSH_DIRECT_PATHS etc. that we are scheduled to try. // Addresses recieved via PUSH_DIRECT_PATHS etc. that we are scheduled to try.
struct p_TryQueueItem struct p_TryQueueItem
{ {
ZT_INLINE p_TryQueueItem() : ts(0), target(), natMustDie(false) ZT_INLINE p_TryQueueItem() :
target(),
privilegedPortTrialIteration(-1)
{} {}
ZT_INLINE p_TryQueueItem(const int64_t now, const Endpoint &t, const bool nmd) : ts(now), target(t), natMustDie(nmd) ZT_INLINE p_TryQueueItem(const Endpoint &t) :
target(t),
privilegedPortTrialIteration(-1)
{} {}
int64_t ts;
Endpoint target; Endpoint target;
bool natMustDie; int privilegedPortTrialIteration;
}; };
List<p_TryQueueItem> m_tryQueue; List<p_TryQueueItem> m_tryQueue;

View file

@ -627,24 +627,20 @@ enum Verb
/** /**
* Push of potential endpoints for direct communication: * Push of potential endpoints for direct communication:
* <[2] 16-bit number of paths> * <[2] 16-bit number of endpoints>
* <[...] paths> * <[...] endpoints>
* *
* Path record format: * If the target node is pre-2.0 path records of the following format
* <[1] 8-bit path flags> * are sent instead of post-2.x endpoints:
* <[2] length of endpoint record> * <[1] 8-bit path flags (zero)>
* <[...] endpoint> * <[2] length of extended path characteristics (0)>
* [<[...] extended path characteristics>]
* <[1] address type>
* <[1] address length in bytes>
* <[...] address>
* *
* The following fields are also included if the node is pre-2.x: * Recipients will add these endpoints to a queue of possible endpoints
* <[1] address type (LEGACY)> * to try for a given peer.
* <[1] address length in bytes (LEGACY)>
* <[...] address (LEGACY)>
*
* Path record flags:
* 0x01 - reserved (legacy)
* 0x02 - reserved (legacy)
* 0x04 - Symmetric NAT detected at sender side
* 0x08 - Request aggressive symmetric NAT traversal
* *
* OK and ERROR are not generated. * OK and ERROR are not generated.
*/ */

View file

@ -87,6 +87,9 @@ public:
// AES keyed with a hash of this node's identity secret keys for local cache encryption at rest (where needed). // AES keyed with a hash of this node's identity secret keys for local cache encryption at rest (where needed).
AES localCacheSymmetric; AES localCacheSymmetric;
// Privileged ports from 1 to 1023 in a random order (for IPv4 NAT traversal)
uint16_t randomPrivilegedPortOrder[1023];
}; };
} // namespace ZeroTier } // namespace ZeroTier