/* * Copyright (c)2013-2020 ZeroTier, Inc. * * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * * Change Date: 2024-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. */ /****/ #include "Constants.hpp" #include "RuntimeEnvironment.hpp" #include "Trace.hpp" #include "Peer.hpp" #include "Topology.hpp" #include "Node.hpp" #include "SelfAwareness.hpp" #include "InetAddress.hpp" #include "Protocol.hpp" #include "Endpoint.hpp" namespace ZeroTier { Peer::Peer(const RuntimeEnvironment *renv) : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init) RR(renv), _lastReceive(0), _lastSend(0), _lastSentHello(), _lastWhoisRequestReceived(0), _lastEchoRequestReceived(0), _lastProbeReceived(0), _lastAttemptedP2PInit(0), _lastPrioritizedPaths(0), _lastAttemptedAggressiveNATTraversal(0), _alivePathCount(0), _probe(0), _vProto(0), _vMajor(0), _vMinor(0), _vRevision(0) { } Peer::~Peer() // NOLINT(hicpp-use-equals-default,modernize-use-equals-default) { } bool Peer::init(const Identity &peerIdentity) { RWMutex::Lock l(_lock); if (_id == peerIdentity) return true; _id = peerIdentity; uint8_t ktmp[ZT_SYMMETRIC_KEY_SIZE]; if (!RR->identity.agree(peerIdentity,ktmp)) return false; _identityKey.init(RR->node->now(),ktmp); Utils::burn(ktmp,sizeof(ktmp)); return true; } void Peer::received( void *tPtr, const SharedPtr &path, const unsigned int hops, const uint64_t packetId, const unsigned int payloadLength, const Protocol::Verb verb, const Protocol::Verb inReVerb) { const int64_t now = RR->node->now(); _lastReceive = now; _inMeter.log(now,payloadLength); if (hops == 0) { RWMutex::RMaybeWLock l(_lock); // If this matches an existing path, skip path learning stuff. for (unsigned int i=0;i<_alivePathCount;++i) { if (_paths[i] == path) { _lock.runlock(); return; } } // If we made it here, we don't already know this path. if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,path->localSocket(),path->address())) { if (verb == Protocol::VERB_OK) { l.writing(); // SECURITY: in the future we may not accept anything but OK(HELLO) to learn paths, // but right now we accept any OK for backward compatibility. Note that OK will // have been checked against expected packet IDs (see Expect.hpp) before we get here, // and this guards against replay attacks. // If the path list is full, replace the least recently active path. Otherwise append new path. unsigned int newPathIdx = 0; if (_alivePathCount >= ZT_MAX_PEER_NETWORK_PATHS) { int64_t lastReceiveTimeMax = 0; for (unsigned int i=0;i<_alivePathCount;++i) { if ((_paths[i]->address().family() == path->address().family()) && (_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated (_paths[i]->address().ipsEqual2(path->address()))) { // Replace older path if everything is the same except the port number, since NAT/firewall reboots // and other wacky stuff can change port number assignments. _paths[i] = path; return; } else if (_paths[i]->lastIn() > lastReceiveTimeMax) { lastReceiveTimeMax = _paths[i]->lastIn(); newPathIdx = i; } } } else { newPathIdx = _alivePathCount++; } InetAddress old; if (_paths[newPathIdx]) old = _paths[newPathIdx]->address(); _paths[newPathIdx] = path; // Re-prioritize paths to include the new one. _prioritizePaths(now); // Remember most recently learned paths for future bootstrap attempts on restart. Endpoint pathEndpoint(path->address()); _bootstrap[pathEndpoint.type()] = pathEndpoint; RR->t->learnedNewPath(tPtr,0x582fabdd,packetId,_id,path->address(),old); } else { path->sent(now,hello(tPtr,path->localSocket(),path->address(),now)); RR->t->tryingNewPath(tPtr,0xb7747ddd,_id,path->address(),path->address(),packetId,(uint8_t)verb,_id,ZT_TRACE_TRYING_NEW_PATH_REASON_PACKET_RECEIVED_FROM_UNKNOWN_PATH); } } } } void Peer::send(void *const tPtr,const int64_t now,const void *const data,const unsigned int len,const SharedPtr &via) noexcept { via->send(RR,tPtr,data,len,now); sent(now,len); } void Peer::send(void *const tPtr,const int64_t now,const void *const data,const unsigned int len) noexcept { SharedPtr via(this->path(now)); if (via) { via->send(RR,tPtr,data,len,now); } else { const SharedPtr root(RR->topology->root()); if ((root)&&(root.ptr() != this)) { via = root->path(now); if (via) { via->send(RR,tPtr,data,len,now); root->relayed(now,len); } else { return; } } else { return; } } sent(now,len); } unsigned int Peer::hello(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now) { #if 0 Packet outp(_id.address(),RR->identity.address(),Packet::VERB_HELLO); outp.append((unsigned char)ZT_PROTO_VERSION); outp.append((unsigned char)ZEROTIER_VERSION_MAJOR); outp.append((unsigned char)ZEROTIER_VERSION_MINOR); outp.append((uint16_t)ZEROTIER_VERSION_REVISION); outp.append(now); RR->identity.serialize(outp,false); atAddress.serialize(outp); RR->node->expectReplyTo(outp.packetId()); if (atAddress) { outp.armor(_key,false); // false == don't encrypt full payload, but add MAC RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size()); } else { RR->sw->send(tPtr,outp,false); // false == don't encrypt full payload, but add MAC } #endif } unsigned int Peer::sendNOP(void *const tPtr,const int64_t localSocket,const InetAddress &atAddress,const int64_t now) { Buf outp; Protocol::Header &ph = outp.as(); // NOLINT(hicpp-use-auto,modernize-use-auto) ph.packetId = Protocol::getPacketId(); _id.address().copyTo(ph.destination); RR->identity.address().copyTo(ph.source); ph.flags = 0; ph.verb = Protocol::VERB_NOP; Protocol::armor(outp,sizeof(Protocol::Header),_identityKey.key(),this->cipher()); RR->node->putPacket(tPtr,localSocket,atAddress,outp.unsafeData,sizeof(Protocol::Header)); return sizeof(Protocol::Header); } void Peer::pulse(void *const tPtr,const int64_t now,const bool isRoot) { RWMutex::Lock l(_lock); bool needHello = false; if ((now - _lastSentHello) >= ZT_PEER_HELLO_INTERVAL) { _lastSentHello = now; needHello = true; } _prioritizePaths(now); for(unsigned int i=0;i<_alivePathCount;++i) { if (needHello) { needHello = false; const unsigned int bytes = hello(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now); _paths[i]->sent(now,bytes); sent(now,bytes); } else if ((now - _paths[i]->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) { _paths[i]->send(RR,tPtr,&now,1,now); sent(now,1); } // TODO: when we merge multipath we'll keep one open per interface to non-roots. // For roots we try to keep every path open. if (!isRoot) return; } if (needHello) { // Try any statically configured addresses. InetAddress addr; if (RR->node->externalPathLookup(tPtr,_id,-1,addr)) { if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,addr)) { RR->t->tryingNewPath(tPtr,0x84a10000,_id,addr,InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_EXPLICITLY_SUGGESTED_ADDRESS); hello(tPtr,-1,addr,now); } } if (!_bootstrap.empty()) { if (isRoot) { // Try all bootstrap addresses if this is a root. for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) { if ( ((i->first == Endpoint::TYPE_INETADDR_V4)||(i->first == Endpoint::TYPE_INETADDR_V6)) && (!i->second.inetAddr().ipsEqual(addr)) ) { RR->t->tryingNewPath(tPtr,0x0a009444,_id,i->second.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_BOOTSTRAP_ADDRESS); hello(tPtr,-1,i->second.inetAddr(),now); } } } else { // Otherwise try a random bootstrap address. unsigned int tryAtIndex = (unsigned int)Utils::random() % (unsigned int)_bootstrap.size(); for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) { if (tryAtIndex > 0) { --tryAtIndex; } else { if ( ((i->first == Endpoint::TYPE_INETADDR_V4)||(i->first == Endpoint::TYPE_INETADDR_V6)) && (!i->second.inetAddr().ipsEqual(addr)) ) { RR->t->tryingNewPath(tPtr,0x0a009444,_id,i->second.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_BOOTSTRAP_ADDRESS); hello(tPtr,-1,i->second.inetAddr(),now); } } } } } } } void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now) { RWMutex::RLock l(_lock); for(unsigned int i=0;i<_alivePathCount;++i) { if ((_paths[i])&&((_paths[i]->address().family() == inetAddressFamily)&&(_paths[i]->address().ipScope() == scope))) _paths[i]->sent(now,sendNOP(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now)); } } bool Peer::directlyConnected(int64_t now) { if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) { RWMutex::Lock l(_lock); _prioritizePaths(now); return _alivePathCount > 0; } else { RWMutex::RLock l(_lock); return _alivePathCount > 0; } } void Peer::getAllPaths(std::vector< SharedPtr > &paths) { RWMutex::RLock l(_lock); paths.clear(); paths.assign(_paths,_paths + _alivePathCount); } void Peer::save(void *tPtr) const { uint8_t buf[8 + ZT_PEER_MARSHAL_SIZE_MAX]; // Prefix each saved peer with the current timestamp. Utils::storeBigEndian(buf,(uint64_t)RR->node->now()); const int len = marshal(buf + 8); if (len > 0) { uint64_t id[2]; id[0] = _id.address().toInt(); id[1] = 0; RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_PEER,id,buf,(unsigned int)len + 8); } } void Peer::tryToContactAt(void *const tPtr,const Endpoint &ep,const int64_t now,const bool bfg1024) { static uint8_t junk = 0; if (ep.inetAddr()) { // only this endpoint type is currently implemented if (!RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,ep.inetAddr())) return; // Sending a packet with a low TTL before the real message assists traversal with some // stateful firewalls and is harmless otherwise AFAIK. ++junk; RR->node->putPacket(tPtr,-1,ep.inetAddr(),&junk,1,2); // In a few hundred milliseconds we'll send the real packet. { RWMutex::Lock l(_lock); _contactQueue.push_back(_ContactQueueItem(ep.inetAddr(),ZT_MAX_PEER_NETWORK_PATHS)); // NOLINT(hicpp-use-emplace,modernize-use-emplace) } // If the peer indicates that they may be behind a symmetric NAT and there are no // living direct paths, try a few more aggressive things. if ((ep.inetAddr().family() == AF_INET) && (!directlyConnected(now))) { unsigned int port = ep.inetAddr().port(); if ((bfg1024)&&(port < 1024)&&(RR->node->natMustDie())) { // If the other side is using a low-numbered port and has elected to // have this done, we can try scanning every port below 1024. The search // space here is small enough that we have a very good chance of punching. // Generate a random order list of all <1024 ports except 0 and the original sending port. uint16_t ports[1022]; uint16_t ctr = 1; for (int i=0;i<1022;++i) { // NOLINT(modernize-loop-convert) if (ctr == port) ++ctr; ports[i] = ctr++; } for (int i=0;i<512;++i) { uint64_t rn = Utils::random(); unsigned int a = ((unsigned int)rn) % 1022; unsigned int b = ((unsigned int)(rn >> 24U)) % 1022; if (a != b) { uint16_t tmp = ports[a]; ports[a] = ports[b]; ports[b] = tmp; } } // Chunk ports into chunks of 128 to try in few hundred millisecond intervals, // abandoning attempts once there is at least one direct path. { static_assert((896 % ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE) == 0,"port scan chunk size doesn't evenly divide port list"); static_assert((1022 - 896) <= ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE,"port scan chunk size needs to be adjusted"); RWMutex::Lock l(_lock); for (int i=0;i<896;i+=ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE) _contactQueue.push_back(_ContactQueueItem(ep.inetAddr(),ports + i,ports + i + ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE,1)); // NOLINT(hicpp-use-emplace,modernize-use-emplace) _contactQueue.push_back(_ContactQueueItem(ep.inetAddr(),ports + 896,ports + 1022,1)); // NOLINT(hicpp-use-emplace,modernize-use-emplace) } } else { // Otherwise use the simpler sequential port attempt method in intervals. RWMutex::Lock l(_lock); for (int k=0;k<3;++k) { if (++port > 65535) break; InetAddress tryNext(ep.inetAddr()); tryNext.setPort(port); _contactQueue.push_back(_ContactQueueItem(tryNext,1)); // NOLINT(hicpp-use-emplace,modernize-use-emplace) } } } // Start alarms going off to actually send these... RR->node->setPeerAlarm(_id.fingerprint(),now + ZT_NAT_TRAVERSAL_INTERVAL); } } void Peer::alarm(void *tPtr,const int64_t now) { // Right now alarms are only used for multi-phase or multi-step NAT traversal operations. // Pop one contact queue item and also clean the queue of any that are no // longer applicable because the alive path count has exceeded their threshold. bool stillHaveContactQueueItems; _ContactQueueItem qi; { RWMutex::Lock l(_lock); if (_contactQueue.empty()) return; while (_alivePathCount >= _contactQueue.front().alivePathThreshold) { _contactQueue.pop_front(); if (_contactQueue.empty()) return; } _ContactQueueItem &qi2 = _contactQueue.front(); qi.address = qi2.address; qi.ports = qi2.ports; qi.alivePathThreshold = qi2.alivePathThreshold; _contactQueue.pop_front(); for(std::list< _ContactQueueItem,Utils::Mallocator<_ContactQueueItem> >::iterator q(_contactQueue.begin());q!=_contactQueue.end();) { // NOLINT(hicpp-use-auto,modernize-use-auto) if (_alivePathCount >= q->alivePathThreshold) _contactQueue.erase(q++); else ++q; } stillHaveContactQueueItems = !_contactQueue.empty(); } if ((_vProto >= 11) && (_probe != 0)) { if (qi.ports.empty()) { RR->node->putPacket(tPtr,-1,qi.address,&_probe,ZT_PROTO_PROBE_LENGTH); } else { for (FCV::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) { // NOLINT(hicpp-use-auto,modernize-use-auto) qi.address.setPort(*p); RR->node->putPacket(tPtr,-1,qi.address,&_probe,ZT_PROTO_PROBE_LENGTH); } } } else { if (qi.ports.empty()) { this->sendNOP(tPtr,-1,qi.address,now); } else { for (FCV::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) { // NOLINT(hicpp-use-auto,modernize-use-auto) qi.address.setPort(*p); this->sendNOP(tPtr,-1,qi.address,now); } } } if (stillHaveContactQueueItems) RR->node->setPeerAlarm(_id.fingerprint(),now + ZT_NAT_TRAVERSAL_INTERVAL); } int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept { data[0] = 0; // serialized peer version RWMutex::RLock l(_lock); int s = _identityKey.marshal(RR->localCacheSymmetric,data + 1); if (s < 0) return -1; int p = 1 + s; s = _id.marshal(data + p,false); if (s < 0) return -1; p += s; s = _locator.marshal(data + p); if (s <= 0) return s; p += s; data[p++] = (uint8_t)_bootstrap.size(); for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) { // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto) s = i->second.marshal(data + p); if (s <= 0) return -1; p += s; } Utils::storeBigEndian(data + p,(uint16_t)_vProto); p += 2; Utils::storeBigEndian(data + p,(uint16_t)_vMajor); p += 2; Utils::storeBigEndian(data + p,(uint16_t)_vMinor); p += 2; Utils::storeBigEndian(data + p,(uint16_t)_vRevision); p += 2; data[p++] = 0; data[p++] = 0; return p; } int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept { RWMutex::Lock l(_lock); if ((len <= 1) || (data[0] != 0)) return -1; int s = _identityKey.unmarshal(RR->localCacheSymmetric,data + 1,len); if (s < 0) return -1; int p = 1 + s; // If the identity key did not pass verification, it may mean that our local // identity has changed. In this case we do not have to forget everything about // the peer but we must generate a new identity key by key agreement with our // new identity. if (!_identityKey) { uint8_t tmp[ZT_SYMMETRIC_KEY_SIZE]; if (!RR->identity.agree(_id,tmp)) return -1; _identityKey.init(RR->node->now(),tmp); Utils::burn(tmp,sizeof(tmp)); } // These are ephemeral and start out as NIL after unmarshal. _ephemeralKeys[0].clear(); _ephemeralKeys[1].clear(); s = _id.unmarshal(data + 38,len - 38); if (s < 0) return s; p += s; s = _locator.unmarshal(data + p,len - p); if (s < 0) return s; p += s; if (p >= len) return -1; const unsigned int bootstrapCount = data[p++]; if (bootstrapCount > ZT_MAX_PEER_NETWORK_PATHS) return -1; _bootstrap.clear(); for(unsigned int i=0;i len) return -1; _vProto = Utils::loadBigEndian(data + p); p += 2; _vMajor = Utils::loadBigEndian(data + p); p += 2; _vMinor = Utils::loadBigEndian(data + p); p += 2; _vRevision = Utils::loadBigEndian(data + p); p += 2; p += 2 + (int)Utils::loadBigEndian(data + p); return (p > len) ? -1 : p; } struct _PathPriorityComparisonOperator { ZT_INLINE bool operator()(const SharedPtr &a,const SharedPtr &b) const noexcept { // Sort in order of last received time for receipt of anything over path, which prioritizes // paths by aliveness. This will go away when we merge in multipath in favor of something // much smarter. return ( ((a)&&(a->lastIn() > 0)) && ((!b)||(b->lastIn() <= 0)||(a->lastIn() < b->lastIn())) ); } }; void Peer::_prioritizePaths(const int64_t now) { // assumes _lock is locked for writing _lastPrioritizedPaths = now; std::sort(_paths,_paths + ZT_MAX_PEER_NETWORK_PATHS,_PathPriorityComparisonOperator()); for(unsigned int i=0;ialive(now))) { _alivePathCount = i; for(;i