Code formatting and similar.

This commit is contained in:
Adam Ierymenko 2020-06-08 11:47:28 -07:00
parent d18c33d6df
commit 016d85b169
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
22 changed files with 918 additions and 683 deletions

View file

@ -34,10 +34,10 @@ Global Options:
Commands:
help Show this help
version Print version
service Start a node (see below)
service Start node (see below)
status Show node status and configuration
join [-options] <network> Join a virtual network
-a <token> Join authorization token
-a <token> Token to submit to controller
-c <identity|fingerprint> Controller identity or fingerprint
leave <network> Leave a virtual network
networks List VL2 virtual networks

View file

@ -26,7 +26,7 @@ namespace ZeroTier {
class RuntimeEnvironment;
template<unsigned int MF, unsigned int MFP, unsigned int GCT, unsigned int GCS, typename P>
template< unsigned int MF, unsigned int MFP, unsigned int GCT, unsigned int GCS, typename P >
class Defragmenter;
/**
@ -34,10 +34,10 @@ class Defragmenter;
*/
class Path
{
friend class SharedPtr<Path>;
friend class SharedPtr< Path >;
// Allow defragmenter to access fragment-in-flight info stored in Path for performance reasons.
template<unsigned int MF, unsigned int MFP, unsigned int GCT, unsigned int GCS, typename P>
template< unsigned int MF, unsigned int MFP, unsigned int GCT, unsigned int GCS, typename P >
friend
class Defragmenter;
@ -142,9 +142,9 @@ public:
private:
const int64_t m_localSocket;
std::atomic<int64_t> m_lastIn;
std::atomic<int64_t> m_lastOut;
std::atomic<int> m_latency;
std::atomic< int64_t > m_lastIn;
std::atomic< int64_t > m_lastOut;
std::atomic< int > m_latency;
const InetAddress m_addr;
Meter<> m_inMeter;
Meter<> m_outMeter;
@ -152,10 +152,10 @@ private:
// These fields belong to Defragmenter but are kept in Path for performance
// as it's much faster this way than having Defragmenter maintain another
// mapping from paths to inbound message IDs.
Set<uint64_t> m_inboundFragmentedMessages;
Set< uint64_t > m_inboundFragmentedMessages;
Mutex m_inboundFragmentedMessages_l;
std::atomic<int> __refCount;
std::atomic< int > __refCount;
};
} // namespace ZeroTier

View file

@ -69,7 +69,7 @@ bool Peer::init(const Identity &peerIdentity)
void Peer::received(
void *tPtr,
const SharedPtr<Path> &path,
const SharedPtr< Path > &path,
const unsigned int hops,
const uint64_t packetId,
const unsigned int payloadLength,
@ -86,7 +86,7 @@ void Peer::received(
// If this matches an existing path, skip path learning stuff. For the small number
// of paths a peer will have linear scan is the fastest way to do lookup.
for (unsigned int i = 0;i < m_alivePathCount;++i) {
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if (m_paths[i] == path)
return;
}
@ -103,7 +103,7 @@ void Peer::received(
unsigned int newPathIdx = 0;
if (m_alivePathCount == ZT_MAX_PEER_NETWORK_PATHS) {
int64_t lastReceiveTimeMax = 0;
for (unsigned int i = 0;i < m_alivePathCount;++i) {
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if ((m_paths[i]->address().family() == path->address().family()) &&
(m_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
(m_paths[i]->address().ipsEqual2(path->address()))) {
@ -133,7 +133,7 @@ void Peer::received(
// it replaces the lowest ranked entry.
std::sort(m_endpointCache, m_endpointCache + ZT_PEER_ENDPOINT_CACHE_SIZE);
Endpoint thisEndpoint(path->address());
for (unsigned int i = 0;;++i) {
for (unsigned int i = 0;; ++i) {
if (i == (ZT_PEER_ENDPOINT_CACHE_SIZE - 1)) {
m_endpointCache[i].target = thisEndpoint;
m_endpointCache[i].lastSeen = now;
@ -155,11 +155,11 @@ void Peer::received(
void Peer::send(void *tPtr, int64_t now, const void *data, unsigned int len) noexcept
{
SharedPtr<Path> via(this->path(now));
SharedPtr< Path > via(this->path(now));
if (via) {
via->send(RR, tPtr, data, len, now);
} else {
const SharedPtr<Peer> root(RR->topology->root());
const SharedPtr< Peer > root(RR->topology->root());
if ((root) && (root.ptr() != this)) {
via = root->path(now);
if (via) {
@ -202,7 +202,7 @@ unsigned int Peer::hello(void *tPtr, int64_t localSocket, const InetAddress &atA
Salsa20(m_identityKey->secret, &legacySalsaIv).crypt12(legacyMoonCountStart, legacyMoonCountStart, 2);
const int cryptSectionStart = ii;
FCV<uint8_t, 4096> md;
FCV< uint8_t, 4096 > md;
Dictionary::append(md, ZT_PROTO_HELLO_NODE_META_INSTANCE_ID, RR->instanceId);
outp.wI16(ii, (uint16_t)md.size());
outp.wB(ii, md.data(), (unsigned int)md.size());
@ -227,7 +227,7 @@ unsigned int Peer::hello(void *tPtr, int64_t localSocket, const InetAddress &atA
p1305.update(outp.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, ii - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START);
uint64_t polyMac[2];
p1305.finish(polyMac);
Utils::storeAsIsEndian<uint64_t>(outp.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, polyMac[0]);
Utils::storeAsIsEndian< uint64_t >(outp.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, polyMac[0]);
return (likely(RR->node->putPacket(tPtr, localSocket, atAddress, outp.unsafeData, ii))) ? ii : 0;
}
@ -257,7 +257,7 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
// callback (if one was supplied).
if (m_locator) {
for (Vector<Endpoint>::const_iterator ep(m_locator->endpoints().begin());ep != m_locator->endpoints().end();++ep) {
for (Vector< Endpoint >::const_iterator ep(m_locator->endpoints().begin()); ep != m_locator->endpoints().end(); ++ep) {
if (ep->type == ZT_ENDPOINT_TYPE_IP_UDP) {
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, -1, ep->ip())) {
int64_t &lt = m_lastTried[*ep];
@ -271,7 +271,7 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
}
}
for (unsigned int i = 0;i < ZT_PEER_ENDPOINT_CACHE_SIZE;++i) {
for (unsigned int i = 0; i < ZT_PEER_ENDPOINT_CACHE_SIZE; ++i) {
if ((m_endpointCache[i].lastSeen > 0) && (m_endpointCache[i].target.type == ZT_ENDPOINT_TYPE_IP_UDP)) {
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, -1, m_endpointCache[i].target.ip())) {
int64_t &lt = m_lastTried[m_endpointCache[i].target];
@ -308,7 +308,7 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
if (qi.target.isInetAddr()) {
// Skip entry if it overlaps with any currently active IP.
for (unsigned int i = 0;i < m_alivePathCount;++i) {
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if (m_paths[i]->address().ipsEqual(qi.target.ip()))
goto discard_queue_item;
}
@ -385,7 +385,7 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
// Do keepalive on all currently active paths, sending HELLO to the first
// if needHello is true and sending small keepalives to others.
uint64_t randomJunk = Utils::random();
for (unsigned int i = 0;i < m_alivePathCount;++i) {
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if (needHello) {
needHello = false;
const unsigned int bytes = hello(tPtr, m_paths[i]->localSocket(), m_paths[i]->address(), now);
@ -400,9 +400,9 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
// Send a HELLO indirectly if we were not able to send one via any direct path.
if (needHello) {
const SharedPtr<Peer> root(RR->topology->root());
const SharedPtr< Peer > root(RR->topology->root());
if (root) {
const SharedPtr<Path> via(root->path(now));
const SharedPtr< Path > via(root->path(now));
if (via) {
const unsigned int bytes = hello(tPtr, via->localSocket(), via->address(), now);
via->sent(now, bytes);
@ -414,7 +414,7 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
}
// Clean m_lastTried
for (Map<Endpoint, int64_t>::iterator i(m_lastTried.begin());i != m_lastTried.end();) {
for (Map< Endpoint, int64_t >::iterator i(m_lastTried.begin()); i != m_lastTried.end();) {
if ((now - i->second) > (ZT_PATH_MIN_TRY_INTERVAL * 4))
m_lastTried.erase(i++);
else ++i;
@ -430,7 +430,7 @@ void Peer::contact(void *tPtr, const int64_t now, const Endpoint &ep, int tries)
if (ep.isInetAddr()) {
if ((now - m_lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL)
m_prioritizePaths(now);
for (unsigned int i = 0;i < m_alivePathCount;++i) {
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if (m_paths[i]->address().ipsEqual(ep.ip()))
return;
}
@ -450,7 +450,7 @@ void Peer::contact(void *tPtr, const int64_t now, const Endpoint &ep, int tries)
}
// Make sure address is not already in the try queue. If so just update it.
for (List<p_TryQueueItem>::iterator i(m_tryQueue.begin());i != m_tryQueue.end();++i) {
for (List< p_TryQueueItem >::iterator i(m_tryQueue.begin()); i != m_tryQueue.end(); ++i) {
if (i->target.isSameAddress(ep)) {
i->target = ep;
i->iteration = -tries;
@ -465,7 +465,7 @@ void Peer::resetWithinScope(void *tPtr, InetAddress::IpScope scope, int inetAddr
{
RWMutex::Lock l(m_lock);
unsigned int pc = 0;
for (unsigned int i = 0;i < m_alivePathCount;++i) {
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if ((m_paths[i]) && ((m_paths[i]->address().family() == inetAddressFamily) && (m_paths[i]->address().ipScope() == scope))) {
const unsigned int bytes = m_sendProbe(tPtr, m_paths[i]->localSocket(), m_paths[i]->address(), nullptr, 0, now);
m_paths[i]->sent(now, bytes);
@ -491,7 +491,7 @@ bool Peer::directlyConnected(int64_t now)
}
}
void Peer::getAllPaths(Vector<SharedPtr<Path> > &paths)
void Peer::getAllPaths(Vector< SharedPtr< Path > > &paths)
{
RWMutex::RLock l(m_lock);
paths.clear();
@ -504,7 +504,7 @@ void Peer::save(void *tPtr) const
uint8_t buf[8 + ZT_PEER_MARSHAL_SIZE_MAX];
// Prefix each saved peer with the current timestamp.
Utils::storeBigEndian<uint64_t>(buf, (uint64_t)RR->node->now());
Utils::storeBigEndian< uint64_t >(buf, (uint64_t)RR->node->now());
const int len = marshal(buf + 8);
if (len > 0) {
@ -553,13 +553,13 @@ int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
}
unsigned int cachedEndpointCount = 0;
for (unsigned int i = 0;i < ZT_PEER_ENDPOINT_CACHE_SIZE;++i) {
for (unsigned int i = 0; i < ZT_PEER_ENDPOINT_CACHE_SIZE; ++i) {
if (m_endpointCache[i].lastSeen > 0)
++cachedEndpointCount;
}
Utils::storeBigEndian(data + p, (uint16_t)cachedEndpointCount);
p += 2;
for (unsigned int i = 0;i < ZT_PEER_ENDPOINT_CACHE_SIZE;++i) {
for (unsigned int i = 0; i < ZT_PEER_ENDPOINT_CACHE_SIZE; ++i) {
Utils::storeBigEndian(data + p, (uint64_t)m_endpointCache[i].lastSeen);
s = m_endpointCache[i].target.marshal(data + p);
if (s <= 0)
@ -635,13 +635,13 @@ int Peer::unmarshal(const uint8_t *restrict data, const int len) noexcept
return -1;
}
const unsigned int cachedEndpointCount = Utils::loadBigEndian<uint16_t>(data + p);
const unsigned int cachedEndpointCount = Utils::loadBigEndian< uint16_t >(data + p);
p += 2;
for (unsigned int i = 0;i < cachedEndpointCount;++i) {
for (unsigned int i = 0; i < cachedEndpointCount; ++i) {
if (i < ZT_PEER_ENDPOINT_CACHE_SIZE) {
if ((p + 8) >= len)
return -1;
m_endpointCache[i].lastSeen = (int64_t)Utils::loadBigEndian<uint64_t>(data + p);
m_endpointCache[i].lastSeen = (int64_t)Utils::loadBigEndian< uint64_t >(data + p);
p += 8;
s = m_endpointCache[i].target.unmarshal(data + p, len - p);
if (s <= 0)
@ -652,15 +652,15 @@ int Peer::unmarshal(const uint8_t *restrict data, const int len) noexcept
if ((p + 10) > len)
return -1;
m_vProto = Utils::loadBigEndian<uint16_t>(data + p);
m_vProto = Utils::loadBigEndian< uint16_t >(data + p);
p += 2;
m_vMajor = Utils::loadBigEndian<uint16_t>(data + p);
m_vMajor = Utils::loadBigEndian< uint16_t >(data + p);
p += 2;
m_vMinor = Utils::loadBigEndian<uint16_t>(data + p);
m_vMinor = Utils::loadBigEndian< uint16_t >(data + p);
p += 2;
m_vRevision = Utils::loadBigEndian<uint16_t>(data + p);
m_vRevision = Utils::loadBigEndian< uint16_t >(data + p);
p += 2;
p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
p += 2 + (int)Utils::loadBigEndian< uint16_t >(data + p);
m_deriveSecondaryIdentityKeys();
@ -669,7 +669,7 @@ int Peer::unmarshal(const uint8_t *restrict data, const int len) noexcept
struct _PathPriorityComparisonOperator
{
ZT_INLINE bool operator()(const SharedPtr<Path> &a, const SharedPtr<Path> &b) const noexcept
ZT_INLINE bool operator()(const SharedPtr< Path > &a, const SharedPtr< Path > &b) const noexcept
{
// Sort in descending order of most recent receive time.
return (a->lastIn() > b->lastIn());
@ -686,10 +686,10 @@ void Peer::m_prioritizePaths(int64_t now)
std::sort(m_paths, m_paths + m_alivePathCount, _PathPriorityComparisonOperator());
// Let go of paths that have expired.
for (unsigned int i = 0;i < ZT_MAX_PEER_NETWORK_PATHS;++i) {
for (unsigned int i = 0; i < ZT_MAX_PEER_NETWORK_PATHS; ++i) {
if ((!m_paths[i]) || (!m_paths[i]->alive(now))) {
m_alivePathCount = i;
for (;i < ZT_MAX_PEER_NETWORK_PATHS;++i)
for (; i < ZT_MAX_PEER_NETWORK_PATHS; ++i)
m_paths[i].zero();
break;
}
@ -700,11 +700,11 @@ void Peer::m_prioritizePaths(int64_t now)
unsigned int Peer::m_sendProbe(void *tPtr, int64_t localSocket, const InetAddress &atAddress, const uint16_t *ports, const unsigned int numPorts, int64_t now)
{
// Assumes m_lock is locked
const SharedPtr<SymmetricKey> k(m_key());
const SharedPtr< SymmetricKey > k(m_key());
const uint64_t packetId = k->nextMessage(RR->identity.address(), m_id.address());
uint8_t p[ZT_PROTO_MIN_PACKET_LENGTH];
Utils::storeAsIsEndian<uint64_t>(p + ZT_PROTO_PACKET_ID_INDEX, packetId);
Utils::storeAsIsEndian< uint64_t >(p + ZT_PROTO_PACKET_ID_INDEX, packetId);
m_id.address().copyTo(p + ZT_PROTO_PACKET_DESTINATION_INDEX);
RR->identity.address().copyTo(p + ZT_PROTO_PACKET_SOURCE_INDEX);
p[ZT_PROTO_PACKET_FLAGS_INDEX] = 0;
@ -716,7 +716,7 @@ unsigned int Peer::m_sendProbe(void *tPtr, int64_t localSocket, const InetAddres
if (numPorts > 0) {
InetAddress tmp(atAddress);
for (unsigned int i = 0;i < numPorts;++i) {
for (unsigned int i = 0; i < numPorts; ++i) {
tmp.setPort(ports[i]);
RR->node->putPacket(tPtr, -1, tmp, p, ZT_PROTO_MIN_PACKET_LENGTH);
}

View file

@ -33,14 +33,14 @@
#include "Containers.hpp"
#define ZT_PEER_MARSHAL_SIZE_MAX ( \
1 + \
ZT_ADDRESS_LENGTH + \
ZT_SYMMETRIC_KEY_SIZE + \
ZT_IDENTITY_MARSHAL_SIZE_MAX + \
1 + ZT_LOCATOR_MARSHAL_SIZE_MAX + \
2 + ((8 + ZT_ENDPOINT_MARSHAL_SIZE_MAX) * ZT_PEER_ENDPOINT_CACHE_SIZE) + \
(2 * 4) + \
2 )
1 + \
ZT_ADDRESS_LENGTH + \
ZT_SYMMETRIC_KEY_SIZE + \
ZT_IDENTITY_MARSHAL_SIZE_MAX + \
1 + ZT_LOCATOR_MARSHAL_SIZE_MAX + \
2 + ((8 + ZT_ENDPOINT_MARSHAL_SIZE_MAX) * ZT_PEER_ENDPOINT_CACHE_SIZE) + \
(2 * 4) + \
2 )
#define ZT_PEER_DEDUP_BUFFER_SIZE 1024
#define ZT_PEER_DEDUP_BUFFER_MASK 1023U
@ -54,7 +54,7 @@ class Topology;
*/
class Peer
{
friend class SharedPtr<Peer>;
friend class SharedPtr< Peer >;
friend class Topology;
@ -94,7 +94,7 @@ public:
/**
* @return Current locator or NULL if no locator is known
*/
ZT_INLINE const SharedPtr<const Locator> &locator() const noexcept
ZT_INLINE const SharedPtr< const Locator > &locator() const noexcept
{
RWMutex::RLock l(m_lock);
return m_locator;
@ -112,7 +112,7 @@ public:
* @param loc Locator update
* @return New locator or previous if it was not replaced.
*/
ZT_INLINE SharedPtr<const Locator> setLocator(const SharedPtr<const Locator> &loc) noexcept
ZT_INLINE SharedPtr< const Locator > setLocator(const SharedPtr< const Locator > &loc) noexcept
{
RWMutex::Lock l(m_lock);
if ((loc) && ((!m_locator) || (m_locator->timestamp() < loc->timestamp())))
@ -135,7 +135,7 @@ public:
*/
void received(
void *tPtr,
const SharedPtr<Path> &path,
const SharedPtr< Path > &path,
unsigned int hops,
uint64_t packetId,
unsigned int payloadLength,
@ -168,7 +168,7 @@ public:
*
* @return Current best path or NULL if there is no direct path
*/
ZT_INLINE SharedPtr<Path> path(const int64_t now) noexcept
ZT_INLINE SharedPtr< Path > path(const int64_t now) noexcept
{
if (likely((now - m_lastPrioritizedPaths) < ZT_PEER_PRIORITIZE_PATHS_INTERVAL)) {
RWMutex::RLock l(m_lock);
@ -180,7 +180,7 @@ public:
if (m_alivePathCount > 0)
return m_paths[0];
}
return SharedPtr<Path>();
return SharedPtr< Path >();
}
/**
@ -192,7 +192,7 @@ public:
* @param len Length in bytes
* @param via Path over which to send data (may or may not be an already-learned path for this peer)
*/
ZT_INLINE void send(void *tPtr, int64_t now, const void *data, unsigned int len, const SharedPtr<Path> &via) noexcept
ZT_INLINE void send(void *tPtr, int64_t now, const void *data, unsigned int len, const SharedPtr< Path > &via) noexcept
{
via->send(RR, tPtr, data, len, now);
sent(now, len);
@ -270,7 +270,7 @@ public:
int ltot = 0;
int lcnt = 0;
RWMutex::RLock l(m_lock);
for (unsigned int i = 0;i < m_alivePathCount;++i) {
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
int lat = m_paths[i]->latency();
if (lat > 0) {
ltot += lat;
@ -293,7 +293,7 @@ public:
/**
* @return The permanent shared key for this peer computed by simple identity agreement
*/
ZT_INLINE SharedPtr<SymmetricKey> identityKey() noexcept
ZT_INLINE SharedPtr< SymmetricKey > identityKey() noexcept
{ return m_identityKey; }
/**
@ -320,7 +320,7 @@ public:
/**
* @return Current best key: either the latest ephemeral or the identity key
*/
ZT_INLINE SharedPtr<SymmetricKey> key() noexcept
ZT_INLINE SharedPtr< SymmetricKey > key() noexcept
{
RWMutex::RLock l(m_lock);
return m_key();
@ -335,7 +335,7 @@ public:
* @param k Key to check
* @return True if this key is ephemeral, false if it's the long-lived identity key
*/
ZT_INLINE bool isEphemeral(const SharedPtr<SymmetricKey> &k) const noexcept
ZT_INLINE bool isEphemeral(const SharedPtr< SymmetricKey > &k) const noexcept
{ return m_identityKey != k; }
/**
@ -379,7 +379,7 @@ public:
*
* @param paths Vector of paths with the first path being the current preferred path
*/
void getAllPaths(Vector< SharedPtr<Path> > &paths);
void getAllPaths(Vector< SharedPtr< Path > > &paths);
/**
* Save the latest version of this peer to the data store
@ -453,7 +453,7 @@ private:
void m_deriveSecondaryIdentityKeys() noexcept;
ZT_INLINE SharedPtr<SymmetricKey> m_key() noexcept
ZT_INLINE SharedPtr< SymmetricKey > m_key() noexcept
{
// assumes m_lock is locked (for read at least)
return (m_ephemeralKeys[0]) ? m_ephemeralKeys[0] : m_identityKey;
@ -465,7 +465,7 @@ private:
RWMutex m_lock;
// Static identity key
SharedPtr<SymmetricKey> m_identityKey;
SharedPtr< SymmetricKey > m_identityKey;
// Cipher for encrypting or decrypting the encrypted section of HELLO packets.
AES m_helloCipher;
@ -478,32 +478,32 @@ private:
int64_t m_ephemeralPairTimestamp;
// Current and previous ephemeral key
SharedPtr<SymmetricKey> m_ephemeralKeys[2];
SharedPtr< SymmetricKey > m_ephemeralKeys[2];
Identity m_id;
SharedPtr<const Locator> m_locator;
SharedPtr< const Locator > m_locator;
// the last time something was sent or received from this peer (direct or indirect).
std::atomic<int64_t> m_lastReceive;
std::atomic<int64_t> m_lastSend;
std::atomic< int64_t > m_lastReceive;
std::atomic< int64_t > m_lastSend;
// The last time we sent a full HELLO to this peer.
int64_t m_lastSentHello; // only checked while locked
// The last time a WHOIS request was received from this peer (anti-DOS / anti-flood).
std::atomic<int64_t> m_lastWhoisRequestReceived;
std::atomic< int64_t > m_lastWhoisRequestReceived;
// The last time an ECHO request was received from this peer (anti-DOS / anti-flood).
std::atomic<int64_t> m_lastEchoRequestReceived;
std::atomic< int64_t > m_lastEchoRequestReceived;
// The last time we sorted paths in order of preference. (This happens pretty often.)
std::atomic<int64_t> m_lastPrioritizedPaths;
std::atomic< int64_t > m_lastPrioritizedPaths;
// The last time we got a probe from this peer.
std::atomic<int64_t> m_lastProbeReceived;
std::atomic< int64_t > m_lastProbeReceived;
// Deduplication buffer
std::atomic<uint64_t> m_dedup[ZT_PEER_DEDUP_BUFFER_SIZE];
std::atomic< uint64_t > m_dedup[ZT_PEER_DEDUP_BUFFER_SIZE];
// Meters measuring actual bandwidth in, out, and relayed via this peer (mostly if this is a root).
Meter<> m_inMeter;
@ -511,13 +511,13 @@ private:
Meter<> m_relayedMeter;
// Direct paths sorted in descending order of preference.
SharedPtr<Path> m_paths[ZT_MAX_PEER_NETWORK_PATHS];
SharedPtr< Path > m_paths[ZT_MAX_PEER_NETWORK_PATHS];
// Number of paths current alive (number of non-NULL entries in _paths).
unsigned int m_alivePathCount;
// For SharedPtr<>
std::atomic<int> __refCount;
std::atomic< int > __refCount;
struct p_EndpointCacheItem
{
@ -527,7 +527,7 @@ private:
ZT_INLINE bool operator<(const p_EndpointCacheItem &ci) const noexcept
{ return lastSeen < ci.lastSeen; }
ZT_INLINE p_EndpointCacheItem() noexcept : target(), lastSeen(0)
ZT_INLINE p_EndpointCacheItem() noexcept: target(), lastSeen(0)
{}
};
@ -550,8 +550,8 @@ private:
int iteration;
};
List<p_TryQueueItem> m_tryQueue;
Map<Endpoint,int64_t> m_lastTried;
List< p_TryQueueItem > m_tryQueue;
Map< Endpoint, int64_t > m_lastTried;
uint16_t m_vProto;
uint16_t m_vMajor;

View file

@ -20,7 +20,7 @@ bool Revocation::sign(const Identity &signer) noexcept
uint8_t buf[ZT_REVOCATION_MARSHAL_SIZE_MAX + 32];
if (signer.hasPrivate()) {
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int) marshal(buf, true), m_signature, sizeof(m_signature));
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
@ -30,38 +30,38 @@ int Revocation::marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX], bool forSi
{
int p = 0;
if (forSign) {
for (int k = 0;k < 8;++k)
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint32_t>(data + p, 0);
Utils::storeBigEndian< uint32_t >(data + p, 0);
p += 4;
Utils::storeBigEndian<uint32_t>(data + p, m_id);
Utils::storeBigEndian< uint32_t >(data + p, m_id);
p += 4;
Utils::storeBigEndian<uint64_t>(data + p, m_networkId);
Utils::storeBigEndian< uint64_t >(data + p, m_networkId);
p += 8;
Utils::storeBigEndian<uint32_t>(data + p, 0);
Utils::storeBigEndian< uint32_t >(data + p, 0);
p += 4;
Utils::storeBigEndian<uint32_t>(data + p, m_credentialId);
Utils::storeBigEndian< uint32_t >(data + p, m_credentialId);
p += 4;
Utils::storeBigEndian<uint64_t>(data + p, (uint64_t) m_threshold);
Utils::storeBigEndian< uint64_t >(data + p, (uint64_t)m_threshold);
p += 8;
Utils::storeBigEndian<uint64_t>(data + p, m_flags);
Utils::storeBigEndian< uint64_t >(data + p, m_flags);
p += 8;
m_target.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
data[p++] = (uint8_t) m_type;
data[p++] = (uint8_t)m_type;
if (!forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t) m_signatureLength);
Utils::storeBigEndian< uint16_t >(data + p, (uint16_t)m_signatureLength);
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int) m_signatureLength;
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
if (forSign) {
for (int k = 0;k < 8;++k)
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
return p;
@ -72,24 +72,24 @@ int Revocation::unmarshal(const uint8_t *restrict data, const int len) noexcept
if (len < 54)
return -1;
// 4 bytes reserved
m_id = Utils::loadBigEndian<uint32_t>(data + 4);
m_networkId = Utils::loadBigEndian<uint64_t>(data + 8);
m_id = Utils::loadBigEndian< uint32_t >(data + 4);
m_networkId = Utils::loadBigEndian< uint64_t >(data + 8);
// 4 bytes reserved
m_credentialId = Utils::loadBigEndian<uint32_t>(data + 20);
m_threshold = (int64_t) Utils::loadBigEndian<uint64_t>(data + 24);
m_flags = Utils::loadBigEndian<uint64_t>(data + 32);
m_credentialId = Utils::loadBigEndian< uint32_t >(data + 20);
m_threshold = (int64_t)Utils::loadBigEndian< uint64_t >(data + 24);
m_flags = Utils::loadBigEndian< uint64_t >(data + 32);
m_target.setTo(data + 40);
m_signedBy.setTo(data + 45);
m_type = (ZT_CredentialType) data[50];
m_type = (ZT_CredentialType)data[50];
// 1 byte reserved
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + 52);
int p = 54 + (int) m_signatureLength;
m_signatureLength = Utils::loadBigEndian< uint16_t >(data + 52);
int p = 54 + (int)m_signatureLength;
if ((m_signatureLength > ZT_SIGNATURE_BUFFER_SIZE) || (p > len))
return -1;
Utils::copy(m_signature, data + 54, m_signatureLength);
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
p += 2 + Utils::loadBigEndian< uint16_t >(data + p);
if (p > len)
return -1;
return p;

View file

@ -40,7 +40,7 @@ class Expect;
class RuntimeEnvironment
{
public:
ZT_INLINE RuntimeEnvironment(Node *const n) noexcept :
ZT_INLINE RuntimeEnvironment(Node *const n) noexcept:
instanceId(Utils::getSecureRandomU64()),
node(n),
localNetworkController(nullptr),
@ -58,7 +58,7 @@ public:
ZT_INLINE ~RuntimeEnvironment() noexcept
{
Utils::burn(secretIdentityStr,sizeof(secretIdentityStr));
Utils::burn(secretIdentityStr, sizeof(secretIdentityStr));
}
// Unique ID generated on startup

View file

@ -34,7 +34,7 @@ public:
_scope(scope)
{}
ZT_INLINE void operator()(const SharedPtr<Peer> &p)
ZT_INLINE void operator()(const SharedPtr< Peer > &p)
{ p->resetWithinScope(_tPtr, _scope, _family, _now); }
private:
@ -68,15 +68,15 @@ void SelfAwareness::iam(void *tPtr, const Identity &reporter, const int64_t rece
// Erase all entries in this scope that were not reported from this remote address to prevent 'thrashing'
// due to multiple reports of endpoint change.
// Don't use 'entry' after this since hash table gets modified.
for (Map<p_PhySurfaceKey, p_PhySurfaceEntry>::iterator i(m_phy.begin());i != m_phy.end();) {
for (Map< p_PhySurfaceKey, p_PhySurfaceEntry >::iterator i(m_phy.begin()); i != m_phy.end();) {
if ((i->first.scope == scope) && (i->first.reporterPhysicalAddress != reporterPhysicalAddress))
m_phy.erase(i++);
else ++i;
}
// Reset all paths within this scope and address family
_ResetWithinScope rset(tPtr, now, myPhysicalAddress.family(), (InetAddress::IpScope) scope);
RR->topology->eachPeer<_ResetWithinScope &>(rset);
_ResetWithinScope rset(tPtr, now, myPhysicalAddress.family(), (InetAddress::IpScope)scope);
RR->topology->eachPeer< _ResetWithinScope & >(rset);
RR->t->resettingPathsInScope(tPtr, 0x9afff100, reporter, reporterPhysicalAddress, entry.mySurface, myPhysicalAddress, scope);
} else {
@ -90,30 +90,30 @@ void SelfAwareness::iam(void *tPtr, const Identity &reporter, const int64_t rece
void SelfAwareness::clean(int64_t now)
{
Mutex::Lock l(m_phy_l);
for (Map<p_PhySurfaceKey, p_PhySurfaceEntry>::iterator i(m_phy.begin());i != m_phy.end();) {
for (Map< p_PhySurfaceKey, p_PhySurfaceEntry >::iterator i(m_phy.begin()); i != m_phy.end();) {
if ((now - i->second.ts) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT)
m_phy.erase(i++);
else ++i;
}
}
MultiMap<unsigned int, InetAddress> SelfAwareness::externalAddresses(const int64_t now) const
MultiMap< unsigned int, InetAddress > SelfAwareness::externalAddresses(const int64_t now) const
{
MultiMap<unsigned int, InetAddress> r;
MultiMap< unsigned int, InetAddress > r;
// Count endpoints reporting each IP/port combo
Map<InetAddress, unsigned long> counts;
Map< InetAddress, unsigned long > counts;
{
Mutex::Lock l(m_phy_l);
for (Map<p_PhySurfaceKey, p_PhySurfaceEntry>::const_iterator i(m_phy.begin());i != m_phy.end();++i) {
for (Map< p_PhySurfaceKey, p_PhySurfaceEntry >::const_iterator i(m_phy.begin()); i != m_phy.end(); ++i) {
if ((now - i->second.ts) < ZT_SELFAWARENESS_ENTRY_TIMEOUT)
++counts[i->second.mySurface];
}
}
// Invert to create a map from count to address
for (Map<InetAddress, unsigned long>::iterator i(counts.begin());i != counts.end();++i)
r.insert(std::pair<unsigned long, InetAddress>(i->second, i->first));
for (Map< InetAddress, unsigned long >::iterator i(counts.begin()); i != counts.end(); ++i)
r.insert(std::pair< unsigned long, InetAddress >(i->second, i->first));
return r;
}

View file

@ -61,7 +61,7 @@ public:
* @param now Current time
* @return Map of count to IP/port representing how many endpoints reported each address
*/
MultiMap<unsigned int, InetAddress> externalAddresses(int64_t now) const;
MultiMap< unsigned int, InetAddress > externalAddresses(int64_t now) const;
private:
struct p_PhySurfaceKey
@ -78,7 +78,7 @@ private:
{}
ZT_INLINE unsigned long hashCode() const noexcept
{ return ((unsigned long) reporter.toInt() + (unsigned long) receivedOnLocalSocket + (unsigned long) scope); }
{ return ((unsigned long)reporter.toInt() + (unsigned long)receivedOnLocalSocket + (unsigned long)scope); }
ZT_INLINE bool operator==(const p_PhySurfaceKey &k) const noexcept
{ return ((reporter == k.reporter) && (receivedOnLocalSocket == k.receivedOnLocalSocket) && (reporterPhysicalAddress == k.reporterPhysicalAddress) && (scope == k.scope)); }
@ -119,7 +119,7 @@ private:
};
const RuntimeEnvironment *RR;
Map<p_PhySurfaceKey, p_PhySurfaceEntry> m_phy;
Map< p_PhySurfaceKey, p_PhySurfaceEntry > m_phy;
Mutex m_phy_l;
};

View file

@ -26,18 +26,23 @@ namespace ZeroTier {
* counted must list this as a 'friend' and must have a private instance of
* atomic<int> called __refCount.
*/
template<typename T>
template< typename T >
class SharedPtr : public TriviallyCopyable
{
public:
ZT_INLINE SharedPtr() noexcept : m_ptr((T *)0) {}
explicit ZT_INLINE SharedPtr(T *obj) noexcept : m_ptr(obj) { if (likely(obj != nullptr)) ++*const_cast<std::atomic<int> *>(&(obj->__refCount)); }
ZT_INLINE SharedPtr(const SharedPtr &sp) noexcept : m_ptr(sp._getAndInc()) {}
ZT_INLINE SharedPtr() noexcept: m_ptr((T *)0)
{}
explicit ZT_INLINE SharedPtr(T *obj) noexcept: m_ptr(obj)
{ if (likely(obj != nullptr)) ++*const_cast<std::atomic< int > *>(&(obj->__refCount)); }
ZT_INLINE SharedPtr(const SharedPtr &sp) noexcept: m_ptr(sp._getAndInc())
{}
ZT_INLINE ~SharedPtr()
{
if (likely(m_ptr != nullptr)) {
if (unlikely(--*const_cast<std::atomic<int> *>(&(m_ptr->__refCount)) <= 0))
if (unlikely(--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0))
delete m_ptr;
}
}
@ -47,7 +52,7 @@ public:
if (likely(m_ptr != sp.m_ptr)) {
T *p = sp._getAndInc();
if (likely(m_ptr != nullptr)) {
if (unlikely(--*const_cast<std::atomic<int> *>(&(m_ptr->__refCount)) <= 0))
if (unlikely(--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0))
delete m_ptr;
}
m_ptr = p;
@ -66,7 +71,7 @@ public:
ZT_INLINE void set(T *ptr) noexcept
{
zero();
++*const_cast<std::atomic<int> *>(&(ptr->__refCount));
++*const_cast<std::atomic< int > *>(&(ptr->__refCount));
m_ptr = ptr;
}
@ -77,7 +82,8 @@ public:
*
* @param ptr Pointer to set
*/
ZT_INLINE void unsafeSet(T *ptr) noexcept { m_ptr = ptr; }
ZT_INLINE void unsafeSet(T *ptr) noexcept
{ m_ptr = ptr; }
/**
* Swap with another pointer 'for free' without ref count overhead
@ -102,22 +108,27 @@ public:
ZT_INLINE void move(SharedPtr &from)
{
if (likely(m_ptr != nullptr)) {
if (--*const_cast<std::atomic<int> *>(&(m_ptr->__refCount)) <= 0)
if (--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0)
delete m_ptr;
}
m_ptr = from.m_ptr;
from.m_ptr = nullptr;
}
ZT_INLINE operator bool() const noexcept { return (m_ptr != nullptr); }
ZT_INLINE operator bool() const noexcept
{ return (m_ptr != nullptr); }
ZT_INLINE T &operator*() const noexcept { return *m_ptr; }
ZT_INLINE T *operator->() const noexcept { return m_ptr; }
ZT_INLINE T &operator*() const noexcept
{ return *m_ptr; }
ZT_INLINE T *operator->() const noexcept
{ return m_ptr; }
/**
* @return Raw pointer to held object
*/
ZT_INLINE T *ptr() const noexcept { return m_ptr; }
ZT_INLINE T *ptr() const noexcept
{ return m_ptr; }
/**
* Set this pointer to NULL
@ -125,7 +136,7 @@ public:
ZT_INLINE void zero()
{
if (likely(m_ptr != nullptr)) {
if (unlikely(--*const_cast<std::atomic<int> *>(&(m_ptr->__refCount)) <= 0))
if (unlikely(--*const_cast<std::atomic< int > *>(&(m_ptr->__refCount)) <= 0))
delete m_ptr;
m_ptr = nullptr;
}
@ -145,7 +156,7 @@ public:
{
if (likely(m_ptr != nullptr)) {
int one = 1;
if (const_cast<std::atomic<int> *>(&(m_ptr->__refCount))->compare_exchange_strong(one,(int)0)) {
if (const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->compare_exchange_strong(one, (int)0)) {
delete m_ptr;
m_ptr = nullptr;
return true;
@ -166,28 +177,41 @@ public:
return 0;
}
ZT_INLINE bool operator==(const SharedPtr &sp) const noexcept { return (m_ptr == sp.m_ptr); }
ZT_INLINE bool operator!=(const SharedPtr &sp) const noexcept { return (m_ptr != sp.m_ptr); }
ZT_INLINE bool operator>(const SharedPtr &sp) const noexcept { return (m_ptr > sp.m_ptr); }
ZT_INLINE bool operator<(const SharedPtr &sp) const noexcept { return (m_ptr < sp.m_ptr); }
ZT_INLINE bool operator>=(const SharedPtr &sp) const noexcept { return (m_ptr >= sp.m_ptr); }
ZT_INLINE bool operator<=(const SharedPtr &sp) const noexcept { return (m_ptr <= sp.m_ptr); }
ZT_INLINE bool operator==(const SharedPtr &sp) const noexcept
{ return (m_ptr == sp.m_ptr); }
ZT_INLINE bool operator!=(const SharedPtr &sp) const noexcept
{ return (m_ptr != sp.m_ptr); }
ZT_INLINE bool operator>(const SharedPtr &sp) const noexcept
{ return (m_ptr > sp.m_ptr); }
ZT_INLINE bool operator<(const SharedPtr &sp) const noexcept
{ return (m_ptr < sp.m_ptr); }
ZT_INLINE bool operator>=(const SharedPtr &sp) const noexcept
{ return (m_ptr >= sp.m_ptr); }
ZT_INLINE bool operator<=(const SharedPtr &sp) const noexcept
{ return (m_ptr <= sp.m_ptr); }
private:
ZT_INLINE T *_getAndInc() const noexcept
{
if (m_ptr)
++*const_cast<std::atomic<int> *>(&(m_ptr->__refCount));
++*const_cast<std::atomic< int > *>(&(m_ptr->__refCount));
return m_ptr;
}
T *m_ptr;
};
} // namespace ZeroTier
namespace std {
template<typename T>
ZT_INLINE void swap(ZeroTier::SharedPtr<T> &a,ZeroTier::SharedPtr<T> &b) noexcept { a.swap(b); }
template< typename T >
ZT_INLINE void swap(ZeroTier::SharedPtr< T > &a, ZeroTier::SharedPtr< T > &b) noexcept
{ a.swap(b); }
}
#endif

View file

@ -28,7 +28,7 @@ namespace ZeroTier {
*/
class SymmetricKey
{
friend class SharedPtr<SymmetricKey>;
friend class SharedPtr< SymmetricKey >;
public:
/**
@ -61,21 +61,21 @@ public:
* @param ts Current time
* @param key 48-bit / 384-byte key
*/
explicit ZT_INLINE SymmetricKey(const int64_t ts,const void *const key) noexcept :
explicit ZT_INLINE SymmetricKey(const int64_t ts, const void *const key) noexcept:
secret(),
cipher(key), // AES-256 uses first 256 bits of 384-bit key
m_initialNonce(((((uint64_t)ts / 1000ULL) << 32U) & 0x7fffffff00000000ULL) | (Utils::random() & 0x00000000ffffffffULL)),
m_nonce(m_initialNonce),
__refCount(0)
{
Utils::memoryLock(this,sizeof(SymmetricKey));
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(const_cast<uint8_t *>(secret), key);
Utils::memoryLock(this, sizeof(SymmetricKey));
Utils::copy< ZT_SYMMETRIC_KEY_SIZE >(const_cast<uint8_t *>(secret), key);
}
ZT_INLINE ~SymmetricKey() noexcept
{
Utils::burn(const_cast<uint8_t *>(secret),ZT_SYMMETRIC_KEY_SIZE);
Utils::memoryUnlock(this,sizeof(SymmetricKey));
Utils::burn(const_cast<uint8_t *>(secret), ZT_SYMMETRIC_KEY_SIZE);
Utils::memoryUnlock(this, sizeof(SymmetricKey));
}
/**
@ -85,7 +85,7 @@ public:
* @param receiver Receiving ZeroTier address
* @return Next unique IV for next message
*/
ZT_INLINE uint64_t nextMessage(const Address sender,const Address receiver) noexcept
ZT_INLINE uint64_t nextMessage(const Address sender, const Address receiver) noexcept
{
return m_nonce.fetch_add(1) ^ (((uint64_t)(sender > receiver)) << 63U);
}
@ -100,8 +100,8 @@ public:
private:
const uint64_t m_initialNonce;
std::atomic<uint64_t> m_nonce;
std::atomic<int> __refCount;
std::atomic< uint64_t > m_nonce;
std::atomic< int > __refCount;
};
} // namespace ZeroTier

View file

@ -20,7 +20,7 @@ bool Tag::sign(const Identity &signer) noexcept
uint8_t buf[ZT_TAG_MARSHAL_SIZE_MAX];
if (signer.hasPrivate()) {
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int) marshal(buf, true), m_signature, sizeof(m_signature));
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
@ -30,16 +30,16 @@ int Tag::marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX], bool forSign) const noex
{
int p = 0;
if (forSign) {
for (int k = 0;k < 8;++k)
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint64_t>(data + p, m_networkId);
Utils::storeBigEndian< uint64_t >(data + p, m_networkId);
p += 8;
Utils::storeBigEndian<uint64_t>(data + p, (uint64_t) m_ts);
Utils::storeBigEndian< uint64_t >(data + p, (uint64_t)m_ts);
p += 8;
Utils::storeBigEndian<uint32_t>(data + p, m_id);
Utils::storeBigEndian< uint32_t >(data + p, m_id);
p += 4;
Utils::storeBigEndian<uint32_t>(data + p, m_value);
Utils::storeBigEndian< uint32_t >(data + p, m_value);
p += 4;
m_issuedTo.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
@ -47,15 +47,15 @@ int Tag::marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX], bool forSign) const noex
p += ZT_ADDRESS_LENGTH;
if (!forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t) m_signatureLength);
Utils::storeBigEndian< uint16_t >(data + p, (uint16_t)m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int) m_signatureLength;
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
if (forSign) {
for (int k = 0;k < 8;++k)
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
return p;
@ -65,21 +65,21 @@ int Tag::unmarshal(const uint8_t *data, int len) noexcept
{
if (len < 37)
return -1;
m_networkId = Utils::loadBigEndian<uint64_t>(data);
m_ts = (int64_t) Utils::loadBigEndian<uint64_t>(data + 8);
m_id = Utils::loadBigEndian<uint32_t>(data + 16);
m_value = Utils::loadBigEndian<uint32_t>(data + 20);
m_networkId = Utils::loadBigEndian< uint64_t >(data);
m_ts = (int64_t)Utils::loadBigEndian< uint64_t >(data + 8);
m_id = Utils::loadBigEndian< uint32_t >(data + 16);
m_value = Utils::loadBigEndian< uint32_t >(data + 20);
m_issuedTo.setTo(data + 24);
m_signedBy.setTo(data + 29);
// 1 byte reserved
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + 35);
int p = 37 + (int) m_signatureLength;
m_signatureLength = Utils::loadBigEndian< uint16_t >(data + 35);
int p = 37 + (int)m_signatureLength;
if ((m_signatureLength > ZT_SIGNATURE_BUFFER_SIZE) || (p > len))
return -1;
Utils::copy(m_signature, data + p, m_signatureLength);
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
p += 2 + Utils::loadBigEndian< uint16_t >(data + p);
if (p > len)
return -1;
return p;

View file

@ -48,9 +48,11 @@ class Tag : public Credential
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_TAG; }
static constexpr ZT_CredentialType credentialType() noexcept
{ return ZT_CREDENTIAL_TYPE_TAG; }
ZT_INLINE Tag() noexcept { memoryZero(this); }
ZT_INLINE Tag() noexcept
{ memoryZero(this); }
/**
* @param nwid Network ID
@ -59,7 +61,7 @@ public:
* @param id Tag ID
* @param value Tag value
*/
ZT_INLINE Tag(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id,const uint32_t value) noexcept :
ZT_INLINE Tag(const uint64_t nwid, const int64_t ts, const Address &issuedTo, const uint32_t id, const uint32_t value) noexcept:
m_id(id),
m_value(value),
m_networkId(nwid),
@ -70,14 +72,29 @@ public:
{
}
ZT_INLINE uint32_t id() const noexcept { return m_id; }
ZT_INLINE const uint32_t &value() const noexcept { return m_value; }
ZT_INLINE uint64_t networkId() const noexcept { return m_networkId; }
ZT_INLINE int64_t timestamp() const noexcept { return m_ts; }
ZT_INLINE const Address &issuedTo() const noexcept { return m_issuedTo; }
ZT_INLINE const Address &signer() const noexcept { return m_signedBy; }
ZT_INLINE const uint8_t *signature() const noexcept { return m_signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return m_signatureLength; }
ZT_INLINE uint32_t id() const noexcept
{ return m_id; }
ZT_INLINE const uint32_t &value() const noexcept
{ return m_value; }
ZT_INLINE uint64_t networkId() const noexcept
{ return m_networkId; }
ZT_INLINE int64_t timestamp() const noexcept
{ return m_ts; }
ZT_INLINE const Address &issuedTo() const noexcept
{ return m_issuedTo; }
ZT_INLINE const Address &signer() const noexcept
{ return m_signedBy; }
ZT_INLINE const uint8_t *signature() const noexcept
{ return m_signature; }
ZT_INLINE unsigned int signatureLength() const noexcept
{ return m_signatureLength; }
/**
* Sign this tag
@ -93,30 +110,55 @@ public:
* @param RR Runtime environment to allow identity lookup for signedBy
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const noexcept { return _verify(RR,tPtr,*this); }
ZT_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR, void *tPtr) const noexcept
{ return _verify(RR, tPtr, *this); }
static constexpr int marshalSizeMax() noexcept { return ZT_TAG_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign = false) const noexcept;
int unmarshal(const uint8_t *data,int len) noexcept;
static constexpr int marshalSizeMax() noexcept
{ return ZT_TAG_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
// Provides natural sort order by ID
ZT_INLINE bool operator<(const Tag &t) const noexcept { return (m_id < t.m_id); }
ZT_INLINE bool operator<(const Tag &t) const noexcept
{ return (m_id < t.m_id); }
ZT_INLINE bool operator==(const Tag &t) const noexcept { return (memcmp(this,&t,sizeof(Tag)) == 0); }
ZT_INLINE bool operator!=(const Tag &t) const noexcept { return (memcmp(this,&t,sizeof(Tag)) != 0); }
ZT_INLINE bool operator==(const Tag &t) const noexcept
{ return (memcmp(this, &t, sizeof(Tag)) == 0); }
ZT_INLINE bool operator!=(const Tag &t) const noexcept
{ return (memcmp(this, &t, sizeof(Tag)) != 0); }
// For searching sorted arrays or lists of Tags by ID
struct IdComparePredicate
{
ZT_INLINE bool operator()(const Tag &a,const Tag &b) const noexcept { return (a.id() < b.id()); }
ZT_INLINE bool operator()(const uint32_t a,const Tag &b) const noexcept { return (a < b.id()); }
ZT_INLINE bool operator()(const Tag &a,const uint32_t b) const noexcept { return (a.id() < b); }
ZT_INLINE bool operator()(const Tag *a,const Tag *b) const noexcept { return (a->id() < b->id()); }
ZT_INLINE bool operator()(const Tag *a,const Tag &b) const noexcept { return (a->id() < b.id()); }
ZT_INLINE bool operator()(const Tag &a,const Tag *b) const noexcept { return (a.id() < b->id()); }
ZT_INLINE bool operator()(const uint32_t a,const Tag *b) const noexcept { return (a < b->id()); }
ZT_INLINE bool operator()(const Tag *a,const uint32_t b) const noexcept { return (a->id() < b); }
ZT_INLINE bool operator()(const uint32_t a,const uint32_t b) const noexcept { return (a < b); }
ZT_INLINE bool operator()(const Tag &a, const Tag &b) const noexcept
{ return (a.id() < b.id()); }
ZT_INLINE bool operator()(const uint32_t a, const Tag &b) const noexcept
{ return (a < b.id()); }
ZT_INLINE bool operator()(const Tag &a, const uint32_t b) const noexcept
{ return (a.id() < b); }
ZT_INLINE bool operator()(const Tag *a, const Tag *b) const noexcept
{ return (a->id() < b->id()); }
ZT_INLINE bool operator()(const Tag *a, const Tag &b) const noexcept
{ return (a->id() < b.id()); }
ZT_INLINE bool operator()(const Tag &a, const Tag *b) const noexcept
{ return (a.id() < b->id()); }
ZT_INLINE bool operator()(const uint32_t a, const Tag *b) const noexcept
{ return (a < b->id()); }
ZT_INLINE bool operator()(const Tag *a, const uint32_t b) const noexcept
{ return (a->id() < b); }
ZT_INLINE bool operator()(const uint32_t a, const uint32_t b) const noexcept
{ return (a < b); }
};
private:

View file

@ -21,7 +21,7 @@ Topology::Topology(const RuntimeEnvironment *renv, void *tPtr) :
uint64_t idtmp[2];
idtmp[0] = 0;
idtmp[1] = 0;
Vector<uint8_t> data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_ROOTS, idtmp));
Vector< uint8_t > data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_ROOTS, idtmp));
if (!data.empty()) {
uint8_t *dptr = data.data();
int drem = (int)data.size();
@ -39,10 +39,10 @@ Topology::Topology(const RuntimeEnvironment *renv, void *tPtr) :
m_updateRootPeers(tPtr);
}
SharedPtr<Peer> Topology::add(void *tPtr, const SharedPtr<Peer> &peer)
SharedPtr< Peer > Topology::add(void *tPtr, const SharedPtr< Peer > &peer)
{
RWMutex::Lock _l(m_peers_l);
SharedPtr<Peer> &hp = m_peers[peer->address()];
SharedPtr< Peer > &hp = m_peers[peer->address()];
if (hp)
return hp;
m_loadCached(tPtr, peer->address(), hp);
@ -54,7 +54,7 @@ SharedPtr<Peer> Topology::add(void *tPtr, const SharedPtr<Peer> &peer)
struct p_RootSortComparisonOperator
{
ZT_INLINE bool operator()(const SharedPtr<Peer> &a, const SharedPtr<Peer> &b) const noexcept
ZT_INLINE bool operator()(const SharedPtr< Peer > &a, const SharedPtr< Peer > &b) const noexcept
{
// Sort in inverse order of latency with lowest latency first (and -1 last).
const int bb = b->latency();
@ -64,7 +64,7 @@ struct p_RootSortComparisonOperator
}
};
SharedPtr<Peer> Topology::addRoot(void *const tPtr, const Identity &id)
SharedPtr< Peer > Topology::addRoot(void *const tPtr, const Identity &id)
{
if ((id != RR->identity) && id.locallyValidate()) {
RWMutex::Lock l1(m_peers_l);
@ -73,20 +73,20 @@ SharedPtr<Peer> Topology::addRoot(void *const tPtr, const Identity &id)
m_updateRootPeers(tPtr);
m_writeRootList(tPtr);
for(Vector< SharedPtr<Peer> >::const_iterator p(m_rootPeers.begin());p!=m_rootPeers.end();++p) {
for (Vector< SharedPtr< Peer > >::const_iterator p(m_rootPeers.begin()); p != m_rootPeers.end(); ++p) {
if ((*p)->identity() == id)
return *p;
}
}
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
bool Topology::removeRoot(void *const tPtr, Address address)
{
RWMutex::Lock l1(m_peers_l);
for (Vector<SharedPtr<Peer> >::const_iterator r(m_rootPeers.begin());r != m_rootPeers.end();++r) {
for (Vector< SharedPtr< Peer > >::const_iterator r(m_rootPeers.begin()); r != m_rootPeers.end(); ++r) {
if ((*r)->address() == address) {
Set<Identity>::iterator rr(m_roots.find((*r)->identity()));
Set< Identity >::iterator rr(m_roots.find((*r)->identity()));
if (rr != m_roots.end()) {
m_roots.erase(rr);
m_updateRootPeers(tPtr);
@ -109,7 +109,7 @@ void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
// Delete peers that haven't said anything in ZT_PEER_ALIVE_TIMEOUT.
{
RWMutex::Lock l1(m_peers_l);
for (Map<Address, SharedPtr<Peer> >::iterator i(m_peers.begin());i != m_peers.end();) {
for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end();) {
// TODO: also delete if the peer has not exchanged meaningful communication in a while, such as
// a network frame or non-trivial control packet.
if (((now - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT) && (m_roots.count(i->second->identity()) == 0)) {
@ -122,7 +122,7 @@ void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
// Delete paths that are no longer held by anyone else ("weak reference" type behavior).
{
RWMutex::Lock l1(m_paths_l);
for (Map<uint64_t, SharedPtr<Path> >::iterator i(m_paths.begin());i != m_paths.end();) {
for (Map< uint64_t, SharedPtr< Path > >::iterator i(m_paths.begin()); i != m_paths.end();) {
if (i->second.weakGC())
m_paths.erase(i++);
else ++i;
@ -133,22 +133,22 @@ void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
void Topology::saveAll(void *tPtr)
{
RWMutex::RLock l(m_peers_l);
for (Map<Address, SharedPtr<Peer> >::iterator i(m_peers.begin());i != m_peers.end();++i)
for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i)
i->second->save(tPtr);
}
void Topology::m_loadCached(void *tPtr, const Address &zta, SharedPtr<Peer> &peer)
void Topology::m_loadCached(void *tPtr, const Address &zta, SharedPtr< Peer > &peer)
{
try {
uint64_t id[2];
id[0] = zta.toInt();
id[1] = 0;
Vector<uint8_t> data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_PEER, id));
Vector< uint8_t > data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_PEER, id));
if (data.size() > 8) {
const uint8_t *d = data.data();
int dl = (int)data.size();
const int64_t ts = (int64_t)Utils::loadBigEndian<uint64_t>(d);
const int64_t ts = (int64_t)Utils::loadBigEndian< uint64_t >(d);
Peer *const p = new Peer(RR);
int n = p->unmarshal(d + 8, dl - 8);
if (n < 0) {
@ -172,7 +172,7 @@ void Topology::m_writeRootList(void *tPtr)
uint8_t *const roots = (uint8_t *)malloc((ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + 2) * m_roots.size());
if (roots) { // sanity check
int p = 0;
for (Set<Identity>::const_iterator r(m_roots.begin());r != m_roots.end();++r) {
for (Set< Identity >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r) {
const int pp = r->marshal(roots + p, false);
if (pp > 0)
p += pp;
@ -188,10 +188,10 @@ void Topology::m_writeRootList(void *tPtr)
void Topology::m_updateRootPeers(void *tPtr)
{
// assumes m_peers_l is locked for write
Vector<SharedPtr<Peer> > rp;
for (Set<Identity>::iterator r(m_roots.begin());r != m_roots.end();++r) {
Map<Address, SharedPtr<Peer> >::iterator pp(m_peers.find(r->address()));
SharedPtr<Peer> p;
Vector< SharedPtr< Peer > > rp;
for (Set< Identity >::iterator r(m_roots.begin()); r != m_roots.end(); ++r) {
Map< Address, SharedPtr< Peer > >::iterator pp(m_peers.find(r->address()));
SharedPtr< Peer > p;
if (pp != m_peers.end())
p = pp->second;

View file

@ -36,7 +36,7 @@ class RuntimeEnvironment;
class Topology
{
public:
Topology(const RuntimeEnvironment *renv,void *tPtr);
Topology(const RuntimeEnvironment *renv, void *tPtr);
/**
* Add peer to database
@ -47,7 +47,7 @@ public:
* @param peer Peer to add
* @return New or existing peer (should replace 'peer')
*/
SharedPtr<Peer> add(void *tPtr,const SharedPtr<Peer> &peer);
SharedPtr< Peer > add(void *tPtr, const SharedPtr< Peer > &peer);
/**
* Get a peer from its address
@ -57,21 +57,21 @@ public:
* @param loadFromCached If false do not load from cache if not in memory (default: true)
* @return Peer or NULL if not found
*/
ZT_INLINE SharedPtr<Peer> peer(void *tPtr,const Address &zta,const bool loadFromCached = true)
ZT_INLINE SharedPtr< Peer > peer(void *tPtr, const Address &zta, const bool loadFromCached = true)
{
{
RWMutex::RLock l(m_peers_l);
const SharedPtr<Peer> *const ap = m_peers.get(zta);
const SharedPtr< Peer > *const ap = m_peers.get(zta);
if (likely(ap != nullptr))
return *ap;
}
{
SharedPtr<Peer> p;
SharedPtr< Peer > p;
if (loadFromCached) {
m_loadCached(tPtr, zta, p);
if (p) {
RWMutex::Lock l(m_peers_l);
SharedPtr<Peer> &hp = m_peers[zta];
SharedPtr< Peer > &hp = m_peers[zta];
if (hp)
return hp;
hp = p;
@ -88,19 +88,19 @@ public:
* @param r Remote address
* @return Pointer to canonicalized Path object or NULL on error
*/
ZT_INLINE SharedPtr<Path> path(const int64_t l,const InetAddress &r)
ZT_INLINE SharedPtr< Path > path(const int64_t l, const InetAddress &r)
{
const uint64_t k = s_getPathKey(l, r);
{
RWMutex::RLock lck(m_paths_l);
SharedPtr<Path> *const p = m_paths.get(k);
SharedPtr< Path > *const p = m_paths.get(k);
if (likely(p != nullptr))
return *p;
}
{
SharedPtr<Path> p(new Path(l,r));
SharedPtr< Path > p(new Path(l, r));
RWMutex::Lock lck(m_paths_l);
SharedPtr<Path> &p2 = m_paths[k];
SharedPtr< Path > &p2 = m_paths[k];
if (p2)
return p2;
p2 = p;
@ -111,11 +111,11 @@ public:
/**
* @return Current best root server
*/
ZT_INLINE SharedPtr<Peer> root() const
ZT_INLINE SharedPtr< Peer > root() const
{
RWMutex::RLock l(m_peers_l);
if (unlikely(m_rootPeers.empty()))
return SharedPtr<Peer>();
return SharedPtr< Peer >();
return m_rootPeers.front();
}
@ -138,35 +138,35 @@ public:
* @param f Function to apply
* @tparam F Function or function object type
*/
template<typename F>
template< typename F >
ZT_INLINE void eachPeer(F f) const
{
RWMutex::RLock l(m_peers_l);
for(Map< Address,SharedPtr<Peer> >::const_iterator i(m_peers.begin());i != m_peers.end();++i)
for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
f(i->second);
}
/**
* @param allPeers vector to fill with all current peers
*/
ZT_INLINE void getAllPeers(Vector< SharedPtr<Peer> > &allPeers) const
ZT_INLINE void getAllPeers(Vector< SharedPtr< Peer > > &allPeers) const
{
allPeers.clear();
RWMutex::RLock l(m_peers_l);
allPeers.reserve(m_peers.size());
for(Map< Address,SharedPtr<Peer> >::const_iterator i(m_peers.begin());i != m_peers.end();++i)
for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
allPeers.push_back(i->second);
}
/**
* @param allPeers vector to fill with all current peers
*/
ZT_INLINE void getAllPeers(Vector< SharedPtr<Peer> > &allPeers,Vector< SharedPtr<Peer> > &rootPeers) const
ZT_INLINE void getAllPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const
{
allPeers.clear();
RWMutex::RLock l(m_peers_l);
allPeers.reserve(m_peers.size());
for(Map< Address,SharedPtr<Peer> >::const_iterator i(m_peers.begin());i != m_peers.end();++i)
for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
allPeers.push_back(i->second);
rootPeers = m_rootPeers;
}
@ -178,7 +178,7 @@ public:
* @param id Root identity (will be locally validated)
* @return Root peer or NULL if some problem occurred
*/
SharedPtr<Peer> addRoot(void *tPtr, const Identity &id);
SharedPtr< Peer > addRoot(void *tPtr, const Identity &id);
/**
* Remove a root server's identity from the root server set
@ -199,7 +199,7 @@ public:
/**
* Do periodic tasks such as database cleanup
*/
void doPeriodicTasks(void *tPtr,int64_t now);
void doPeriodicTasks(void *tPtr, int64_t now);
/**
* Save all currently known peers to data store
@ -207,12 +207,14 @@ public:
void saveAll(void *tPtr);
private:
void m_loadCached(void *tPtr, const Address &zta, SharedPtr<Peer> &peer);
void m_loadCached(void *tPtr, const Address &zta, SharedPtr< Peer > &peer);
void m_writeRootList(void *tPtr);
void m_updateRootPeers(void *tPtr);
// This gets an integer key from an InetAddress for looking up paths.
static ZT_INLINE uint64_t s_getPathKey(const int64_t l,const InetAddress &r) noexcept
static ZT_INLINE uint64_t s_getPathKey(const int64_t l, const InetAddress &r) noexcept
{
// SECURITY: these will be used as keys in a Map<> which uses its own hasher that
// mixes in a per-invocation secret to work against hash collision attacks. See the
@ -223,20 +225,20 @@ private:
if (r.family() == AF_INET) {
return ((uint64_t)(r.as.sa_in.sin_addr.s_addr) << 32U) ^ ((uint64_t)r.as.sa_in.sin_port << 16U) ^ (uint64_t)l;
} else if (r.family() == AF_INET6) {
return Utils::loadAsIsEndian<uint64_t>(r.as.sa_in6.sin6_addr.s6_addr) + Utils::loadAsIsEndian<uint64_t>(r.as.sa_in6.sin6_addr.s6_addr + 8) + (uint64_t)r.as.sa_in6.sin6_port + (uint64_t)l;
return Utils::loadAsIsEndian< uint64_t >(r.as.sa_in6.sin6_addr.s6_addr) + Utils::loadAsIsEndian< uint64_t >(r.as.sa_in6.sin6_addr.s6_addr + 8) + (uint64_t)r.as.sa_in6.sin6_port + (uint64_t)l;
} else {
// This should never really be used but it's here just in case.
return (uint64_t)Utils::fnv1a32(reinterpret_cast<const void *>(&r),sizeof(InetAddress)) + (uint64_t)l;
return (uint64_t)Utils::fnv1a32(reinterpret_cast<const void *>(&r), sizeof(InetAddress)) + (uint64_t)l;
}
}
const RuntimeEnvironment *const RR;
RWMutex m_paths_l; // locks m_paths
RWMutex m_peers_l; // locks m_peers, m_roots, and m_rootPeers
Map< uint64_t,SharedPtr<Path> > m_paths;
Map< Address,SharedPtr<Peer> > m_peers;
Map< uint64_t, SharedPtr< Path > > m_paths;
Map< Address, SharedPtr< Peer > > m_peers;
Set< Identity > m_roots;
Vector< SharedPtr<Peer> > m_rootPeers;
Vector< SharedPtr< Peer > > m_rootPeers;
};
} // namespace ZeroTier

View file

@ -17,9 +17,11 @@
#include "SHA512.hpp"
#ifdef __UNIX_LIKE__
#include <unistd.h>
#include <fcntl.h>
#include <sys/uio.h>
#endif
#include <time.h>
@ -33,9 +35,10 @@ namespace ZeroTier {
namespace Utils {
#ifdef ZT_ARCH_X64
CPUIDRegisters::CPUIDRegisters() noexcept
{
uint32_t eax,ebx,ecx,edx;
uint32_t eax, ebx, ecx, edx;
#ifdef __WINDOWS__
int regs[4];
@ -46,14 +49,14 @@ CPUIDRegisters::CPUIDRegisters() noexcept
edx = (uint32_t)regs[3];
#else
__asm__ __volatile__ (
"cpuid"
: "=a"(eax),"=b"(ebx),"=c"(ecx),"=d"(edx)
: "a"(1),"c"(0)
"cpuid"
: "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
: "a"(1), "c"(0)
);
#endif
rdrand = ((ecx & (1U << 30U)) != 0);
aes = ( ((ecx & (1U << 25U)) != 0) && ((ecx & (1U << 19U)) != 0) && ((ecx & (1U << 1U)) != 0) );
aes = (((ecx & (1U << 25U)) != 0) && ((ecx & (1U << 19U)) != 0) && ((ecx & (1U << 1U)) != 0));
avx = ((ecx & (1U << 25U)) != 0);
#ifdef __WINDOWS__
@ -64,9 +67,9 @@ CPUIDRegisters::CPUIDRegisters() noexcept
edx = (uint32_t)regs[3];
#else
__asm__ __volatile__ (
"cpuid"
: "=a"(eax),"=b"(ebx),"=c"(ecx),"=d"(edx)
: "a"(7),"c"(0)
"cpuid"
: "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
: "a"(7), "c"(0)
);
#endif
@ -77,52 +80,57 @@ CPUIDRegisters::CPUIDRegisters() noexcept
sha = ((ebx & (1U << 29U)) != 0);
fsrm = sha = ((edx & (1U << 4U)) != 0);
}
const CPUIDRegisters CPUID;
#endif
const uint64_t ZERO256[4] = { 0,0,0,0 };
const char HEXCHARS[16] = { '0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f' };
const uint64_t ZERO256[4] = {0, 0, 0, 0};
const char HEXCHARS[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
const uint64_t s_mapNonce = getSecureRandomU64();
bool secureEq(const void *a,const void *b,unsigned int len) noexcept
bool secureEq(const void *a, const void *b, unsigned int len) noexcept
{
uint8_t diff = 0;
for(unsigned int i=0;i<len;++i)
diff |= ( (reinterpret_cast<const uint8_t *>(a))[i] ^ (reinterpret_cast<const uint8_t *>(b))[i] );
for (unsigned int i = 0; i < len; ++i)
diff |= ((reinterpret_cast<const uint8_t *>(a))[i] ^ (reinterpret_cast<const uint8_t *>(b))[i]);
return (diff == 0);
}
// Crazy hack to force memory to be securely zeroed in spite of the best efforts of optimizing compilers.
static void _Utils_doBurn(volatile uint8_t *ptr,unsigned int len)
static void _Utils_doBurn(volatile uint8_t *ptr, unsigned int len)
{
for(unsigned int i=0;i<len;++i)
for (unsigned int i = 0; i < len; ++i)
ptr[i] = 0;
}
static void (*volatile _Utils_doBurn_ptr)(volatile uint8_t *,unsigned int) = _Utils_doBurn;
void burn(void *ptr,unsigned int len) { (_Utils_doBurn_ptr)((volatile uint8_t *)ptr,len); }
static unsigned long _Utils_itoa(unsigned long n,char *s)
static void (*volatile _Utils_doBurn_ptr)(volatile uint8_t *, unsigned int) = _Utils_doBurn;
void burn(void *ptr, unsigned int len)
{ (_Utils_doBurn_ptr)((volatile uint8_t *)ptr, len); }
static unsigned long _Utils_itoa(unsigned long n, char *s)
{
if (n == 0)
return 0;
unsigned long pos = _Utils_itoa(n / 10,s);
unsigned long pos = _Utils_itoa(n / 10, s);
if (pos >= 22) // sanity check,should be impossible
pos = 22;
s[pos] = (char)('0' + (n % 10));
return pos + 1;
}
char *decimal(unsigned long n,char s[24]) noexcept
char *decimal(unsigned long n, char s[24]) noexcept
{
if (n == 0) {
s[0] = '0';
s[1] = (char)0;
return s;
}
s[_Utils_itoa(n,s)] = (char)0;
s[_Utils_itoa(n, s)] = (char)0;
return s;
}
char *hex(uint64_t i,char buf[17]) noexcept
char *hex(uint64_t i, char buf[17]) noexcept
{
if (i) {
char *p = buf + 16;
@ -149,11 +157,11 @@ uint64_t unhex(const char *s) noexcept
if (!hc) break;
uint8_t c = 0;
if ((hc >= 48)&&(hc <= 57))
if ((hc >= 48) && (hc <= 57))
c = (uint8_t)hc - 48;
else if ((hc >= 97)&&(hc <= 102))
else if ((hc >= 97) && (hc <= 102))
c = (uint8_t)hc - 87;
else if ((hc >= 65)&&(hc <= 70))
else if ((hc >= 65) && (hc <= 70))
c = (uint8_t)hc - 55;
n <<= 4U;
@ -164,10 +172,10 @@ uint64_t unhex(const char *s) noexcept
return n;
}
char *hex(const void *d,unsigned int l,char *s) noexcept
char *hex(const void *d, unsigned int l, char *s) noexcept
{
char *const save = s;
for(unsigned int i=0;i<l;++i) {
for (unsigned int i = 0; i < l; ++i) {
const unsigned int b = reinterpret_cast<const uint8_t *>(d)[i];
*(s++) = HEXCHARS[b >> 4U];
*(s++) = HEXCHARS[b & 0xfU];
@ -176,7 +184,7 @@ char *hex(const void *d,unsigned int l,char *s) noexcept
return save;
}
unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen) noexcept
unsigned int unhex(const char *h, unsigned int hlen, void *buf, unsigned int buflen) noexcept
{
unsigned int l = 0;
const char *hend = h + hlen;
@ -186,11 +194,11 @@ unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen
if (!hc) break;
uint8_t c = 0;
if ((hc >= 48)&&(hc <= 57))
if ((hc >= 48) && (hc <= 57))
c = hc - 48;
else if ((hc >= 97)&&(hc <= 102))
else if ((hc >= 97) && (hc <= 102))
c = hc - 87;
else if ((hc >= 65)&&(hc <= 70))
else if ((hc >= 65) && (hc <= 70))
c = hc - 55;
if (h == hend) break;
@ -198,11 +206,11 @@ unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen
if (!hc) break;
c <<= 4U;
if ((hc >= 48)&&(hc <= 57))
if ((hc >= 48) && (hc <= 57))
c |= hc - 48;
else if ((hc >= 97)&&(hc <= 102))
else if ((hc >= 97) && (hc <= 102))
c |= hc - 87;
else if ((hc >= 65)&&(hc <= 70))
else if ((hc >= 65) && (hc <= 70))
c |= hc - 55;
reinterpret_cast<uint8_t *>(buf)[l++] = c;
@ -213,7 +221,7 @@ unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen
#define ZT_GETSECURERANDOM_STATE_SIZE 64
#define ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR 1048576
void getSecureRandom(void *const buf,unsigned int bytes) noexcept
void getSecureRandom(void *const buf, unsigned int bytes) noexcept
{
static Mutex globalLock;
static bool initialized = false;
@ -230,10 +238,10 @@ void getSecureRandom(void *const buf,unsigned int bytes) noexcept
initialized = true;
// Don't let randomState be swapped to disk (if supported by OS).
Utils::memoryLock(randomState,sizeof(randomState));
Utils::memoryLock(randomState, sizeof(randomState));
// Fill randomState with entropy from the system. Failure equals hard exit.
Utils::zero<sizeof(randomState)>(randomState);
Utils::zero< sizeof(randomState) >(randomState);
#ifdef __WINDOWS__
HCRYPTPROV cryptProvider = NULL;
if (!CryptAcquireContextA(&cryptProvider,NULL,NULL,PROV_RSA_FULL,CRYPT_VERIFYCONTEXT|CRYPT_SILENT)) {
@ -246,14 +254,14 @@ void getSecureRandom(void *const buf,unsigned int bytes) noexcept
}
CryptReleaseContext(cryptProvider,0);
#else
int devURandomFd = ::open("/dev/urandom",O_RDONLY);
int devURandomFd = ::open("/dev/urandom", O_RDONLY);
if (devURandomFd < 0) {
fprintf(stderr,"FATAL: Utils::getSecureRandom() unable to open /dev/urandom\n");
fprintf(stderr, "FATAL: Utils::getSecureRandom() unable to open /dev/urandom\n");
exit(1);
}
if ((long)::read(devURandomFd,randomState,sizeof(randomState)) != (long)sizeof(randomState)) {
if ((long)::read(devURandomFd, randomState, sizeof(randomState)) != (long)sizeof(randomState)) {
::close(devURandomFd);
fprintf(stderr,"FATAL: Utils::getSecureRandom() unable to read from /dev/urandom\n");
fprintf(stderr, "FATAL: Utils::getSecureRandom() unable to read from /dev/urandom\n");
exit(1);
}
close(devURandomFd);
@ -269,7 +277,7 @@ void getSecureRandom(void *const buf,unsigned int bytes) noexcept
#ifdef ZT_ARCH_X64
if (CPUID.rdrand) {
uint64_t tmp = 0;
for(int k=0;k<ZT_GETSECURERANDOM_STATE_SIZE;++k) {
for (int k = 0; k < ZT_GETSECURERANDOM_STATE_SIZE; ++k) {
_rdrand64_step((unsigned long long *)&tmp);
randomState[k] ^= tmp;
}
@ -281,7 +289,7 @@ void getSecureRandom(void *const buf,unsigned int bytes) noexcept
// replacing the first 64 bytes with this hash, and then re-initializing
// AES with the first 32 bytes.
randomByteCounter = 0;
SHA512(randomState,randomState,sizeof(randomState));
SHA512(randomState, randomState, sizeof(randomState));
randomGen.init(randomState);
}
@ -292,56 +300,56 @@ void getSecureRandom(void *const buf,unsigned int bytes) noexcept
uint8_t *out = reinterpret_cast<uint8_t *>(buf);
while (bytes >= 16) {
++*ctr;
randomGen.encrypt(ctr,out);
randomGen.encrypt(ctr, out);
out += 16;
bytes -= 16;
}
if (bytes > 0) {
uint8_t tmp[16];
++*ctr;
randomGen.encrypt(ctr,tmp);
for(unsigned int i=0;i<bytes;++i)
randomGen.encrypt(ctr, tmp);
for (unsigned int i = 0; i < bytes; ++i)
out[i] = tmp[i];
Utils::burn(tmp,sizeof(tmp)); // don't leave used cryptographic randomness lying around!
Utils::burn(tmp, sizeof(tmp)); // don't leave used cryptographic randomness lying around!
}
}
uint64_t getSecureRandomU64() noexcept
{
uint64_t tmp = 0;
getSecureRandom(&tmp,sizeof(tmp));
getSecureRandom(&tmp, sizeof(tmp));
return tmp;
}
int b32e(const uint8_t *data,int length,char *result,int bufSize) noexcept
int b32e(const uint8_t *data, int length, char *result, int bufSize) noexcept
{
if (length < 0 || length > (1 << 28U)) {
if (length < 0 || length > (1 << 28U)) {
result[0] = (char)0;
return -1;
return -1;
}
int count = 0;
if (length > 0) {
int buffer = data[0];
int next = 1;
int bitsLeft = 8;
while (count < bufSize && (bitsLeft > 0 || next < length)) {
if (bitsLeft < 5) {
if (next < length) {
buffer <<= 8U;
buffer |= data[next++] & 0xffU;
bitsLeft += 8;
} else {
int pad = 5 - bitsLeft;
buffer <<= pad;
bitsLeft += pad;
}
}
int index = 0x1f & (buffer >> (unsigned int)(bitsLeft - 5));
bitsLeft -= 5;
result[count++] = "abcdefghijklmnopqrstuvwxyz234567"[index];
}
}
if (count < bufSize) {
if (length > 0) {
int buffer = data[0];
int next = 1;
int bitsLeft = 8;
while (count < bufSize && (bitsLeft > 0 || next < length)) {
if (bitsLeft < 5) {
if (next < length) {
buffer <<= 8U;
buffer |= data[next++] & 0xffU;
bitsLeft += 8;
} else {
int pad = 5 - bitsLeft;
buffer <<= pad;
bitsLeft += pad;
}
}
int index = 0x1f & (buffer >> (unsigned int)(bitsLeft - 5));
bitsLeft -= 5;
result[count++] = "abcdefghijklmnopqrstuvwxyz234567"[index];
}
}
if (count < bufSize) {
result[count] = (char)0;
return count;
}
@ -349,44 +357,44 @@ int b32e(const uint8_t *data,int length,char *result,int bufSize) noexcept
return -1;
}
int b32d(const char *encoded,uint8_t *result,int bufSize) noexcept
int b32d(const char *encoded, uint8_t *result, int bufSize) noexcept
{
int buffer = 0;
int bitsLeft = 0;
int count = 0;
for (const uint8_t *ptr = (const uint8_t *)encoded;count<bufSize && *ptr; ++ptr) {
uint8_t ch = *ptr;
if (ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' || ch == '-' || ch == '.') {
continue;
}
buffer <<= 5;
int buffer = 0;
int bitsLeft = 0;
int count = 0;
for (const uint8_t *ptr = (const uint8_t *)encoded; count < bufSize && *ptr; ++ptr) {
uint8_t ch = *ptr;
if (ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' || ch == '-' || ch == '.') {
continue;
}
buffer <<= 5;
if (ch == '0') {
ch = 'O';
} else if (ch == '1') {
ch = 'L';
} else if (ch == '8') {
ch = 'B';
}
if (ch == '0') {
ch = 'O';
} else if (ch == '1') {
ch = 'L';
} else if (ch == '8') {
ch = 'B';
}
if ((ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z')) {
ch = (ch & 0x1f) - 1;
} else if (ch >= '2' && ch <= '7') {
ch -= '2' - 26;
} else {
return -1;
}
if ((ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z')) {
ch = (ch & 0x1f) - 1;
} else if (ch >= '2' && ch <= '7') {
ch -= '2' - 26;
} else {
return -1;
}
buffer |= ch;
bitsLeft += 5;
if (bitsLeft >= 8) {
result[count++] = buffer >> (bitsLeft - 8);
bitsLeft -= 8;
}
}
if (count < bufSize)
result[count] = (uint8_t)0;
return count;
buffer |= ch;
bitsLeft += 5;
if (bitsLeft >= 8) {
result[count++] = buffer >> (bitsLeft - 8);
bitsLeft -= 8;
}
}
if (count < bufSize)
result[count] = (uint8_t)0;
return count;
}
uint64_t random() noexcept
@ -403,14 +411,14 @@ uint64_t random() noexcept
uint64_t s2 = s_s2;
uint64_t s3 = s_s3;
const uint64_t s1x5 = s1 * 5;
const uint64_t result = ((s1x5 << 7U)|(s1x5 >> 57U)) * 9;
const uint64_t result = ((s1x5 << 7U) | (s1x5 >> 57U)) * 9;
const uint64_t t = s1 << 17U;
s2 ^= s0;
s3 ^= s1;
s1 ^= s2;
s0 ^= s3;
s2 ^= t;
s3 = ((s3 << 45U)|(s3 >> 19U));
s3 = ((s3 << 45U) | (s3 >> 19U));
s_s0 = s0;
s_s1 = s1;
s_s2 = s2;
@ -419,7 +427,7 @@ uint64_t random() noexcept
return result;
}
bool scopy(char *const dest,const unsigned int len,const char *const src) noexcept
bool scopy(char *const dest, const unsigned int len, const char *const src) noexcept
{
if (!len)
return false; // sanity check
@ -428,7 +436,7 @@ bool scopy(char *const dest,const unsigned int len,const char *const src) noexce
return true;
}
unsigned int i = 0;
for(;;) {
for (;;) {
if (i >= len) {
dest[len - 1] = 0;
return false;

View file

@ -25,21 +25,23 @@ namespace ZeroTier {
namespace Utils {
#ifndef __WINDOWS__
#include <sys/mman.h>
#endif
// Macros to convert endian-ness at compile time for constants.
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define ZT_CONST_TO_BE_UINT16(x) ((uint16_t)((uint16_t)((uint16_t)(x) << 8U) | (uint16_t)((uint16_t)(x) >> 8U)))
#define ZT_CONST_TO_BE_UINT64(x) ( \
(((uint64_t)(x) & 0x00000000000000ffULL) << 56U) | \
(((uint64_t)(x) & 0x000000000000ff00ULL) << 40U) | \
(((uint64_t)(x) & 0x0000000000ff0000ULL) << 24U) | \
(((uint64_t)(x) & 0x00000000ff000000ULL) << 8U) | \
(((uint64_t)(x) & 0x000000ff00000000ULL) >> 8U) | \
(((uint64_t)(x) & 0x0000ff0000000000ULL) >> 24U) | \
(((uint64_t)(x) & 0x00ff000000000000ULL) >> 40U) | \
(((uint64_t)(x) & 0xff00000000000000ULL) >> 56U))
(((uint64_t)(x) & 0x00000000000000ffULL) << 56U) | \
(((uint64_t)(x) & 0x000000000000ff00ULL) << 40U) | \
(((uint64_t)(x) & 0x0000000000ff0000ULL) << 24U) | \
(((uint64_t)(x) & 0x00000000ff000000ULL) << 8U) | \
(((uint64_t)(x) & 0x000000ff00000000ULL) >> 8U) | \
(((uint64_t)(x) & 0x0000ff0000000000ULL) >> 24U) | \
(((uint64_t)(x) & 0x00ff000000000000ULL) >> 40U) | \
(((uint64_t)(x) & 0xff00000000000000ULL) >> 56U))
#else
#define ZT_CONST_TO_BE_UINT16(x) ((uint16_t)(x))
#define ZT_CONST_TO_BE_UINT64(x) ((uint64_t)(x))
@ -51,9 +53,11 @@ namespace Utils {
#define ZT_ROL32(x, r) (((x) << (r)) | ((x) >> (32 - (r))))
#ifdef ZT_ARCH_X64
struct CPUIDRegisters
{
CPUIDRegisters() noexcept;
bool rdrand;
bool aes;
bool avx;
@ -64,6 +68,7 @@ struct CPUIDRegisters
bool sha;
bool fsrm;
};
extern const CPUIDRegisters CPUID;
#endif
@ -92,10 +97,10 @@ extern const uint64_t s_mapNonce;
* @param p Memory to lock
* @param l Size of memory
*/
static ZT_INLINE void memoryLock(const void *const p,const unsigned int l) noexcept
static ZT_INLINE void memoryLock(const void *const p, const unsigned int l) noexcept
{
#ifndef __WINDOWS__
mlock(p,l);
mlock(p, l);
#endif
}
@ -105,10 +110,10 @@ static ZT_INLINE void memoryLock(const void *const p,const unsigned int l) noexc
* @param p Memory to unlock
* @param l Size of memory
*/
static ZT_INLINE void memoryUnlock(const void *const p,const unsigned int l) noexcept
static ZT_INLINE void memoryUnlock(const void *const p, const unsigned int l) noexcept
{
#ifndef __WINDOWS__
munlock(p,l);
munlock(p, l);
#endif
}
@ -120,7 +125,7 @@ static ZT_INLINE void memoryUnlock(const void *const p,const unsigned int l) noe
* @param len Length of strings
* @return True if strings are equal
*/
bool secureEq(const void *a,const void *b,unsigned int len) noexcept;
bool secureEq(const void *a, const void *b, unsigned int len) noexcept;
/**
* Be absolutely sure to zero memory
@ -130,14 +135,14 @@ bool secureEq(const void *a,const void *b,unsigned int len) noexcept;
* @param ptr Memory to zero
* @param len Length of memory in bytes
*/
void burn(void *ptr,unsigned int len);
void burn(void *ptr, unsigned int len);
/**
* @param n Number to convert
* @param s Buffer, at least 24 bytes in size
* @return String containing 'n' in base 10 form
*/
char *decimal(unsigned long n,char s[24]) noexcept;
char *decimal(unsigned long n, char s[24]) noexcept;
/**
* Convert an unsigned integer into hex
@ -149,7 +154,7 @@ char *decimal(unsigned long n,char s[24]) noexcept;
* @param s Buffer to receive hex, must be at least (2*sizeof(i))+1 in size or overflow will occur.
* @return Pointer to s containing hex string with trailing zero byte
*/
char *hex(uint64_t i,char buf[17]) noexcept;
char *hex(uint64_t i, char buf[17]) noexcept;
/**
* Decode an unsigned integer in hex format
@ -167,7 +172,7 @@ uint64_t unhex(const char *s) noexcept;
* @param s String buffer, must be at least (l*2)+1 in size or overflow will occur
* @return Pointer to filled string buffer
*/
char *hex(const void *d,unsigned int l,char *s) noexcept;
char *hex(const void *d, unsigned int l, char *s) noexcept;
/**
* Decode a hex string
@ -178,7 +183,7 @@ char *hex(const void *d,unsigned int l,char *s) noexcept;
* @param buflen Length of output buffer
* @return Number of written bytes
*/
unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen) noexcept;
unsigned int unhex(const char *h, unsigned int hlen, void *buf, unsigned int buflen) noexcept;
/**
* Generate secure random bytes
@ -189,7 +194,7 @@ unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen
* @param buf Buffer to fill
* @param bytes Number of random bytes to generate
*/
void getSecureRandom(void *buf,unsigned int bytes) noexcept;
void getSecureRandom(void *buf, unsigned int bytes) noexcept;
/**
* @return Secure random 64-bit integer
@ -205,7 +210,7 @@ uint64_t getSecureRandomU64() noexcept;
* @param bufSize Size of result buffer
* @return Number of bytes written
*/
int b32e(const uint8_t *data,int length,char *result,int bufSize) noexcept;
int b32e(const uint8_t *data, int length, char *result, int bufSize) noexcept;
/**
* Decode base32 string
@ -237,7 +242,7 @@ uint64_t random() noexcept;
* @param src Source string (if NULL, dest will receive a zero-length string and true is returned)
* @return True on success, false on overflow (buffer will still be 0-terminated)
*/
bool scopy(char *dest,unsigned int len,const char *src) noexcept;
bool scopy(char *dest, unsigned int len, const char *src) noexcept;
/**
* Mix bits in a 64-bit integer (non-cryptographic, for hash tables)
@ -278,9 +283,9 @@ static ZT_INLINE uint32_t hash32(uint32_t x) noexcept
/**
* Check if a buffer's contents are all zero
*/
static ZT_INLINE bool allZero(const void *const b,unsigned int l) noexcept
static ZT_INLINE bool allZero(const void *const b, unsigned int l) noexcept
{
for(unsigned int i=0;i<l;++i) {
for (unsigned int i = 0; i < l; ++i) {
if (reinterpret_cast<const uint8_t *>(b)[i] != 0)
return false;
}
@ -295,18 +300,18 @@ static ZT_INLINE bool allZero(const void *const b,unsigned int l) noexcept
* @param saveptr Pointer to pointer where function can save state
* @return Next token or NULL if none
*/
static ZT_INLINE char *stok(char *str,const char *delim,char **saveptr) noexcept
static ZT_INLINE char *stok(char *str, const char *delim, char **saveptr) noexcept
{
#ifdef __WINDOWS__
return strtok_s(str,delim,saveptr);
#else
return strtok_r(str,delim,saveptr);
return strtok_r(str, delim, saveptr);
#endif
}
static ZT_INLINE unsigned int strToUInt(const char *s) noexcept
{
return (unsigned int)strtoul(s,nullptr,10);
return (unsigned int)strtoul(s, nullptr, 10);
}
static ZT_INLINE unsigned long long hexStrToU64(const char *s) noexcept
@ -314,7 +319,7 @@ static ZT_INLINE unsigned long long hexStrToU64(const char *s) noexcept
#ifdef __WINDOWS__
return (unsigned long long)_strtoui64(s,nullptr,16);
#else
return strtoull(s,nullptr,16);
return strtoull(s, nullptr, 16);
#endif
}
@ -327,20 +332,29 @@ static ZT_INLINE unsigned long long hexStrToU64(const char *s) noexcept
* @param len Length of data
* @return FNV1a checksum
*/
static ZT_INLINE uint32_t fnv1a32(const void *const data,const unsigned int len) noexcept
static ZT_INLINE uint32_t fnv1a32(const void *const data, const unsigned int len) noexcept
{
uint32_t h = 0x811c9dc5;
const uint32_t p = 0x01000193;
for(unsigned int i=0;i<len;++i)
for (unsigned int i = 0; i < len; ++i)
h = (h ^ (uint32_t)reinterpret_cast<const uint8_t *>(data)[i]) * p;
return h;
}
#ifdef __GNUC__
static ZT_INLINE unsigned int countBits(const uint8_t v) noexcept { return (unsigned int)__builtin_popcount((unsigned int)v); }
static ZT_INLINE unsigned int countBits(const uint16_t v) noexcept { return (unsigned int)__builtin_popcount((unsigned int)v); }
static ZT_INLINE unsigned int countBits(const uint32_t v) noexcept { return (unsigned int)__builtin_popcountl((unsigned long)v); }
static ZT_INLINE unsigned int countBits(const uint64_t v) noexcept{ return (unsigned int)__builtin_popcountll((unsigned long long)v); }
static ZT_INLINE unsigned int countBits(const uint8_t v) noexcept
{ return (unsigned int)__builtin_popcount((unsigned int)v); }
static ZT_INLINE unsigned int countBits(const uint16_t v) noexcept
{ return (unsigned int)__builtin_popcount((unsigned int)v); }
static ZT_INLINE unsigned int countBits(const uint32_t v) noexcept
{ return (unsigned int)__builtin_popcountl((unsigned long)v); }
static ZT_INLINE unsigned int countBits(const uint64_t v) noexcept
{ return (unsigned int)__builtin_popcountll((unsigned long long)v); }
#else
template<typename T>
static ZT_INLINE unsigned int countBits(T v) noexcept
@ -420,36 +434,110 @@ static ZT_INLINE uint16_t swapBytes(const uint16_t n) noexcept
// These are helper adapters to load and swap integer types special cased by size
// to work with all typedef'd variants, signed/unsigned, etc.
template<typename I,unsigned int S>
template< typename I, unsigned int S >
class _swap_bytes_bysize;
template<typename I>
class _swap_bytes_bysize<I,1> { public: static ZT_INLINE I s(const I n) noexcept { return n; } };
template<typename I>
class _swap_bytes_bysize<I,2> { public: static ZT_INLINE I s(const I n) noexcept { return (I)swapBytes((uint16_t)n); } };
template<typename I>
class _swap_bytes_bysize<I,4> { public: static ZT_INLINE I s(const I n) noexcept { return (I)swapBytes((uint32_t)n); } };
template<typename I>
class _swap_bytes_bysize<I,8> { public: static ZT_INLINE I s(const I n) noexcept { return (I)swapBytes((uint64_t)n); } };
template<typename I,unsigned int S>
template< typename I >
class _swap_bytes_bysize< I, 1 >
{
public:
static ZT_INLINE I s(const I n) noexcept
{ return n; }
};
template< typename I >
class _swap_bytes_bysize< I, 2 >
{
public:
static ZT_INLINE I s(const I n) noexcept
{ return (I)swapBytes((uint16_t)n); }
};
template< typename I >
class _swap_bytes_bysize< I, 4 >
{
public:
static ZT_INLINE I s(const I n) noexcept
{ return (I)swapBytes((uint32_t)n); }
};
template< typename I >
class _swap_bytes_bysize< I, 8 >
{
public:
static ZT_INLINE I s(const I n) noexcept
{ return (I)swapBytes((uint64_t)n); }
};
template< typename I, unsigned int S >
class _load_be_bysize;
template<typename I>
class _load_be_bysize<I,1> { public: static ZT_INLINE I l(const uint8_t *const p) noexcept { return p[0]; }};
template<typename I>
class _load_be_bysize<I,2> { public: static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)(((unsigned int)p[0] << 8U) | (unsigned int)p[1]); }};
template<typename I>
class _load_be_bysize<I,4> { public: static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)(((uint32_t)p[0] << 24U) | ((uint32_t)p[1] << 16U) | ((uint32_t)p[2] << 8U) | (uint32_t)p[3]); }};
template<typename I>
class _load_be_bysize<I,8> { public: static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)(((uint64_t)p[0] << 56U) | ((uint64_t)p[1] << 48U) | ((uint64_t)p[2] << 40U) | ((uint64_t)p[3] << 32U) | ((uint64_t)p[4] << 24U) | ((uint64_t)p[5] << 16U) | ((uint64_t)p[6] << 8U) | (uint64_t)p[7]); }};
template<typename I,unsigned int S>
template< typename I >
class _load_be_bysize< I, 1 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return p[0]; }
};
template< typename I >
class _load_be_bysize< I, 2 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)(((unsigned int)p[0] << 8U) | (unsigned int)p[1]); }
};
template< typename I >
class _load_be_bysize< I, 4 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)(((uint32_t)p[0] << 24U) | ((uint32_t)p[1] << 16U) | ((uint32_t)p[2] << 8U) | (uint32_t)p[3]); }
};
template< typename I >
class _load_be_bysize< I, 8 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)(((uint64_t)p[0] << 56U) | ((uint64_t)p[1] << 48U) | ((uint64_t)p[2] << 40U) | ((uint64_t)p[3] << 32U) | ((uint64_t)p[4] << 24U) | ((uint64_t)p[5] << 16U) | ((uint64_t)p[6] << 8U) | (uint64_t)p[7]); }
};
template< typename I, unsigned int S >
class _load_le_bysize;
template<typename I>
class _load_le_bysize<I,1> { public: static ZT_INLINE I l(const uint8_t *const p) noexcept { return p[0]; }};
template<typename I>
class _load_le_bysize<I,2> { public: static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)((unsigned int)p[0] | ((unsigned int)p[1] << 8U)); }};
template<typename I>
class _load_le_bysize<I,4> { public: static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)((uint32_t)p[0] | ((uint32_t)p[1] << 8U) | ((uint32_t)p[2] << 16U) | ((uint32_t)p[3] << 24U)); }};
template<typename I>
class _load_le_bysize<I,8> { public: static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)((uint64_t)p[0] | ((uint64_t)p[1] << 8U) | ((uint64_t)p[2] << 16U) | ((uint64_t)p[3] << 24U) | ((uint64_t)p[4] << 32U) | ((uint64_t)p[5] << 40U) | ((uint64_t)p[6] << 48U) | ((uint64_t)p[7]) << 56U); }};
template< typename I >
class _load_le_bysize< I, 1 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return p[0]; }
};
template< typename I >
class _load_le_bysize< I, 2 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)((unsigned int)p[0] | ((unsigned int)p[1] << 8U)); }
};
template< typename I >
class _load_le_bysize< I, 4 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)((uint32_t)p[0] | ((uint32_t)p[1] << 8U) | ((uint32_t)p[2] << 16U) | ((uint32_t)p[3] << 24U)); }
};
template< typename I >
class _load_le_bysize< I, 8 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)((uint64_t)p[0] | ((uint64_t)p[1] << 8U) | ((uint64_t)p[2] << 16U) | ((uint64_t)p[3] << 24U) | ((uint64_t)p[4] << 32U) | ((uint64_t)p[5] << 40U) | ((uint64_t)p[6] << 48U) | ((uint64_t)p[7]) << 56U); }
};
/**
* Convert any signed or unsigned integer type to big-endian ("network") byte order
@ -458,11 +546,11 @@ class _load_le_bysize<I,8> { public: static ZT_INLINE I l(const uint8_t *const p
* @param n Value to convert
* @return Value in big-endian order
*/
template<typename I>
template< typename I >
static ZT_INLINE I hton(const I n) noexcept
{
#if __BYTE_ORDER == __LITTLE_ENDIAN
return _swap_bytes_bysize<I,sizeof(I)>::s(n);
return _swap_bytes_bysize< I, sizeof(I) >::s(n);
#else
return n;
#endif
@ -475,11 +563,11 @@ static ZT_INLINE I hton(const I n) noexcept
* @param n Value to convert
* @return Value in host byte order
*/
template<typename I>
template< typename I >
static ZT_INLINE I ntoh(const I n) noexcept
{
#if __BYTE_ORDER == __LITTLE_ENDIAN
return _swap_bytes_bysize<I,sizeof(I)>::s(n);
return _swap_bytes_bysize< I, sizeof(I) >::s(n);
#else
return n;
#endif
@ -492,7 +580,7 @@ static ZT_INLINE I ntoh(const I n) noexcept
* @param p Byte stream, must be at least sizeof(I) in size
* @return Loaded raw integer
*/
template<typename I>
template< typename I >
static ZT_INLINE I loadAsIsEndian(const void *const p) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
@ -512,8 +600,8 @@ static ZT_INLINE I loadAsIsEndian(const void *const p) noexcept
* @param p Byte array (must be at least sizeof(I))
* @param i Integer to store
*/
template<typename I>
static ZT_INLINE void storeAsIsEndian(void *const p,const I i) noexcept
template< typename I >
static ZT_INLINE void storeAsIsEndian(void *const p, const I i) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
for(unsigned int k=0;k<sizeof(I);++k)
@ -530,7 +618,7 @@ static ZT_INLINE void storeAsIsEndian(void *const p,const I i) noexcept
* @param p Byte stream, must be at least sizeof(I) in size
* @return Decoded integer
*/
template<typename I>
template< typename I >
static ZT_INLINE I loadBigEndian(const void *const p) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
@ -547,8 +635,8 @@ static ZT_INLINE I loadBigEndian(const void *const p) noexcept
* @param p Byte stream to write (must be at least sizeof(I))
* #param i Integer to write
*/
template<typename I>
static ZT_INLINE void storeBigEndian(void *const p,I i) noexcept
template< typename I >
static ZT_INLINE void storeBigEndian(void *const p, I i) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
storeAsIsEndian(p,hton(i));
@ -564,7 +652,7 @@ static ZT_INLINE void storeBigEndian(void *const p,I i) noexcept
* @param p Byte stream, must be at least sizeof(I) in size
* @return Decoded integer
*/
template<typename I>
template< typename I >
static ZT_INLINE I loadLittleEndian(const void *const p) noexcept
{
#if __BYTE_ORDER == __BIG_ENDIAN || defined(ZT_NO_UNALIGNED_ACCESS)
@ -581,8 +669,8 @@ static ZT_INLINE I loadLittleEndian(const void *const p) noexcept
* @param p Byte stream to write (must be at least sizeof(I))
* #param i Integer to write
*/
template<typename I>
static ZT_INLINE void storeLittleEndian(void *const p,const I i) noexcept
template< typename I >
static ZT_INLINE void storeLittleEndian(void *const p, const I i) noexcept
{
#if __BYTE_ORDER == __BIG_ENDIAN
storeAsIsEndian(p,_swap_bytes_bysize<I,sizeof(I)>::s(i));
@ -602,36 +690,36 @@ static ZT_INLINE void storeLittleEndian(void *const p,const I i) noexcept
* @param dest Destination memory
* @param src Source memory
*/
template<unsigned int L>
static ZT_INLINE void copy(void *const dest,const void *const src) noexcept
template< unsigned int L >
static ZT_INLINE void copy(void *const dest, const void *const src) noexcept
{
#ifdef ZT_ARCH_X64
uint8_t *volatile d = reinterpret_cast<uint8_t *>(dest);
const uint8_t *s = reinterpret_cast<const uint8_t *>(src);
for(unsigned int i=0;i<(L >> 6U);++i) {
for (unsigned int i = 0; i < (L >> 6U); ++i) {
__m128i x0 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(s));
__m128i x1 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(s + 16));
__m128i x2 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(s + 32));
__m128i x3 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(s + 48));
s += 64;
_mm_storeu_si128(reinterpret_cast<__m128i *>(d),x0);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 16),x1);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 32),x2);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 48),x3);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d), x0);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 16), x1);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 32), x2);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 48), x3);
d += 64;
}
if ((L & 32U) != 0) {
__m128i x0 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(s));
__m128i x1 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(s + 16));
s += 32;
_mm_storeu_si128(reinterpret_cast<__m128i *>(d),x0);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 16),x1);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d), x0);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 16), x1);
d += 32;
}
if ((L & 16U) != 0) {
__m128i x0 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(s));
s += 16;
_mm_storeu_si128(reinterpret_cast<__m128i *>(d),x0);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d), x0);
d += 16;
}
if ((L & 8U) != 0) {
@ -664,7 +752,8 @@ static ZT_INLINE void copy(void *const dest,const void *const src) noexcept
* @param src Source memory
* @param len Bytes to copy
*/
static ZT_INLINE void copy(void *const dest,const void *const src,unsigned int len) noexcept { memcpy(dest,src,len); }
static ZT_INLINE void copy(void *const dest, const void *const src, unsigned int len) noexcept
{ memcpy(dest, src, len); }
/**
* Zero memory block whose size is known at compile time
@ -672,26 +761,26 @@ static ZT_INLINE void copy(void *const dest,const void *const src,unsigned int l
* @tparam L Size in bytes
* @param dest Memory to zero
*/
template<unsigned int L>
template< unsigned int L >
static ZT_INLINE void zero(void *const dest) noexcept
{
#ifdef ZT_ARCH_X64
uint8_t *volatile d = reinterpret_cast<uint8_t *>(dest);
__m128i z = _mm_setzero_si128();
for(unsigned int i=0;i<(L >> 6U);++i) {
_mm_storeu_si128(reinterpret_cast<__m128i *>(d),z);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 16),z);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 32),z);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 48),z);
for (unsigned int i = 0; i < (L >> 6U); ++i) {
_mm_storeu_si128(reinterpret_cast<__m128i *>(d), z);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 16), z);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 32), z);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 48), z);
d += 64;
}
if ((L & 32U) != 0) {
_mm_storeu_si128(reinterpret_cast<__m128i *>(d),z);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 16),z);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d), z);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d + 16), z);
d += 32;
}
if ((L & 16U) != 0) {
_mm_storeu_si128(reinterpret_cast<__m128i *>(d),z);
_mm_storeu_si128(reinterpret_cast<__m128i *>(d), z);
d += 16;
}
if ((L & 8U) != 0) {
@ -720,7 +809,8 @@ static ZT_INLINE void zero(void *const dest) noexcept
* @param dest Memory to zero
* @param len Size in bytes
*/
static ZT_INLINE void zero(void *const dest,const unsigned int len) noexcept { memset(dest,0,len); }
static ZT_INLINE void zero(void *const dest, const unsigned int len) noexcept
{ memset(dest, 0, len); }
/**
* Simple malloc/free based C++ STL allocator.
@ -731,24 +821,36 @@ static ZT_INLINE void zero(void *const dest,const unsigned int len) noexcept { m
*
* @tparam T Allocated type
*/
template<typename T>
template< typename T >
struct Mallocator
{
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T * pointer;
typedef const T * const_pointer;
typedef T & reference;
typedef const T & const_reference;
typedef T *pointer;
typedef const T *const_pointer;
typedef T &reference;
typedef const T &const_reference;
typedef T value_type;
template <class U> struct rebind { typedef Mallocator<U> other; };
ZT_INLINE Mallocator() noexcept {}
ZT_INLINE Mallocator(const Mallocator&) noexcept {}
template <class U> ZT_INLINE Mallocator(const Mallocator<U>&) noexcept {}
ZT_INLINE ~Mallocator() noexcept {}
template< class U >
struct rebind
{
typedef Mallocator< U > other;
};
ZT_INLINE Mallocator() noexcept
{}
ZT_INLINE pointer allocate(size_type s,void const * = nullptr)
ZT_INLINE Mallocator(const Mallocator &) noexcept
{}
template< class U >
ZT_INLINE Mallocator(const Mallocator< U > &) noexcept
{}
ZT_INLINE ~Mallocator() noexcept
{}
ZT_INLINE pointer allocate(size_type s, void const * = nullptr)
{
if (0 == s)
return nullptr;
@ -758,15 +860,29 @@ struct Mallocator
return temp;
}
ZT_INLINE pointer address(reference x) const { return &x; }
ZT_INLINE const_pointer address(const_reference x) const { return &x; }
ZT_INLINE void deallocate(pointer p,size_type) { free(p); }
ZT_INLINE size_type max_size() const noexcept { return std::numeric_limits<size_t>::max() / sizeof(T); }
ZT_INLINE void construct(pointer p,const T& val) { new((void *)p) T(val); }
ZT_INLINE void destroy(pointer p) { p->~T(); }
ZT_INLINE pointer address(reference x) const
{ return &x; }
constexpr bool operator==(const Mallocator &) const noexcept { return true; }
constexpr bool operator!=(const Mallocator &) const noexcept { return false; }
ZT_INLINE const_pointer address(const_reference x) const
{ return &x; }
ZT_INLINE void deallocate(pointer p, size_type)
{ free(p); }
ZT_INLINE size_type max_size() const noexcept
{ return std::numeric_limits< size_t >::max() / sizeof(T); }
ZT_INLINE void construct(pointer p, const T &val)
{ new((void *)p) T(val); }
ZT_INLINE void destroy(pointer p)
{ p->~T(); }
constexpr bool operator==(const Mallocator &) const noexcept
{ return true; }
constexpr bool operator!=(const Mallocator &) const noexcept
{ return false; }
};
} // namespace Utils

View file

@ -31,7 +31,7 @@ namespace ZeroTier {
namespace {
ZT_INLINE const Identity &identityFromPeerPtr(const SharedPtr<Peer> &p)
ZT_INLINE const Identity &identityFromPeerPtr(const SharedPtr< Peer > &p)
{
return (p) ? p->identity() : Identity::NIL;
}
@ -103,9 +103,9 @@ VL1::VL1(const RuntimeEnvironment *renv) :
{
}
void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const InetAddress &fromAddr, SharedPtr<Buf> &data, const unsigned int len) noexcept
void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const InetAddress &fromAddr, SharedPtr< Buf > &data, const unsigned int len) noexcept
{
const SharedPtr<Path> path(RR->topology->path(localSocket, fromAddr));
const SharedPtr< Path > path(RR->topology->path(localSocket, fromAddr));
const int64_t now = RR->node->now();
ZT_SPEW("%u bytes from %s (local socket %lld)", len, fromAddr.toString().c_str(), localSocket);
@ -120,7 +120,7 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
return;
static_assert((ZT_PROTO_PACKET_ID_INDEX + sizeof(uint64_t)) < ZT_PROTO_MIN_FRAGMENT_LENGTH, "overflow");
const uint64_t packetId = Utils::loadAsIsEndian<uint64_t>(data->unsafeData + ZT_PROTO_PACKET_ID_INDEX);
const uint64_t packetId = Utils::loadAsIsEndian< uint64_t >(data->unsafeData + ZT_PROTO_PACKET_ID_INDEX);
static_assert((ZT_PROTO_PACKET_DESTINATION_INDEX + ZT_ADDRESS_LENGTH) < ZT_PROTO_MIN_FRAGMENT_LENGTH, "overflow");
const Address destination(data->unsafeData + ZT_PROTO_PACKET_DESTINATION_INDEX);
@ -151,7 +151,7 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
totalFragments,
now,
path)) {
case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::COMPLETE:
case Defragmenter< ZT_MAX_PACKET_FRAGMENTS >::COMPLETE:
break;
default:
//case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::OK:
@ -177,7 +177,7 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
0, // this is specified in fragments, not in the head
now,
path)) {
case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::COMPLETE:
case Defragmenter< ZT_MAX_PACKET_FRAGMENTS >::COMPLETE:
break;
default:
//case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::OK:
@ -207,7 +207,7 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
const uint8_t hops = hdr[ZT_PROTO_PACKET_FLAGS_INDEX] & ZT_PROTO_FLAG_FIELD_HOPS_MASK;
const uint8_t cipher = (hdr[ZT_PROTO_PACKET_FLAGS_INDEX] >> 3U) & 3U;
SharedPtr<Buf> pkt(new Buf());
SharedPtr< Buf > pkt(new Buf());
int pktSize = 0;
static_assert(ZT_PROTO_PACKET_VERB_INDEX < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
@ -218,7 +218,7 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
ZT_SPEW("discarding packet %.16llx from %s(%s): assembled packet size: %d", packetId, source.toString().c_str(), fromAddr.toString().c_str(), pktSize);
return;
}
const SharedPtr<Peer> peer(m_HELLO(tPtr, path, *pkt, pktSize));
const SharedPtr< Peer > peer(m_HELLO(tPtr, path, *pkt, pktSize));
if (likely(peer))
peer->received(tPtr, path, hops, packetId, pktSize - ZT_PROTO_PACKET_PAYLOAD_START, Protocol::VERB_HELLO, Protocol::VERB_NOP);
return;
@ -229,7 +229,7 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
// secrecy status.
unsigned int auth = 0;
SharedPtr<Peer> peer(RR->topology->peer(tPtr, source));
SharedPtr< Peer > peer(RR->topology->peer(tPtr, source));
if (likely(peer)) {
switch (cipher) {
@ -238,7 +238,7 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
Protocol::salsa2012DeriveKey(peer->rawIdentityKey(), perPacketKey, *pktv[0].b, pktv.totalSize());
p_PolyCopyFunction s20cf(perPacketKey, &packetId);
pktSize = pktv.mergeMap<p_PolyCopyFunction &>(*pkt, ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, s20cf);
pktSize = pktv.mergeMap< p_PolyCopyFunction & >(*pkt, ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, s20cf);
if (unlikely(pktSize < ZT_PROTO_MIN_PACKET_LENGTH)) {
ZT_SPEW("discarding packet %.16llx from %s(%s): assembled packet size: %d", packetId, source.toString().c_str(), fromAddr.toString().c_str(), pktSize);
return;
@ -247,21 +247,22 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
uint64_t mac[2];
s20cf.poly1305.finish(mac);
static_assert((ZT_PROTO_PACKET_MAC_INDEX + 8) < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
if (unlikely(Utils::loadAsIsEndian<uint64_t>(hdr + ZT_PROTO_PACKET_MAC_INDEX) != mac[0])) {
if (unlikely(Utils::loadAsIsEndian< uint64_t >(hdr + ZT_PROTO_PACKET_MAC_INDEX) != mac[0])) {
ZT_SPEW("discarding packet %.16llx from %s(%s): packet MAC failed (none/poly1305)", packetId, source.toString().c_str(), fromAddr.toString().c_str());
RR->t->incomingPacketDropped(tPtr, 0xcc89c812, packetId, 0, peer->identity(), path->address(), hops, Protocol::VERB_NOP, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return;
}
auth = ZT_VL1_AUTH_RESULT_FLAG_AUTHENTICATED;
} break;
}
break;
case ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012: {
uint8_t perPacketKey[ZT_SALSA20_KEY_SIZE];
Protocol::salsa2012DeriveKey(peer->rawIdentityKey(), perPacketKey, *pktv[0].b, pktv.totalSize());
p_SalsaPolyCopyFunction s20cf(perPacketKey, &packetId);
pktSize = pktv.mergeMap<p_SalsaPolyCopyFunction &>(*pkt, ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, s20cf);
pktSize = pktv.mergeMap< p_SalsaPolyCopyFunction & >(*pkt, ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, s20cf);
if (unlikely(pktSize < ZT_PROTO_MIN_PACKET_LENGTH)) {
ZT_SPEW("discarding packet %.16llx from %s(%s): assembled packet size: %d", packetId, source.toString().c_str(), fromAddr.toString().c_str(), pktSize);
return;
@ -270,22 +271,25 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
uint64_t mac[2];
s20cf.poly1305.finish(mac);
static_assert((ZT_PROTO_PACKET_MAC_INDEX + 8) < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
if (unlikely(Utils::loadAsIsEndian<uint64_t>(hdr + ZT_PROTO_PACKET_MAC_INDEX) != mac[0])) {
if (unlikely(Utils::loadAsIsEndian< uint64_t >(hdr + ZT_PROTO_PACKET_MAC_INDEX) != mac[0])) {
ZT_SPEW("discarding packet %.16llx from %s(%s): packet MAC failed (salsa/poly1305)", packetId, source.toString().c_str(), fromAddr.toString().c_str());
RR->t->incomingPacketDropped(tPtr, 0xcc89c812, packetId, 0, peer->identity(), path->address(), hops, Protocol::VERB_NOP, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return;
}
auth = ZT_VL1_AUTH_RESULT_FLAG_AUTHENTICATED | ZT_VL1_AUTH_RESULT_FLAG_ENCRYPTED;
} break;
}
break;
case ZT_PROTO_CIPHER_SUITE__NONE: {
// TODO
} break;
}
break;
case ZT_PROTO_CIPHER_SUITE__AES_GMAC_SIV: {
// TODO
} break;
}
break;
default:
RR->t->incomingPacketDropped(tPtr, 0x5b001099, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_NOP, ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
@ -309,14 +313,14 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
static_assert(ZT_PROTO_PACKET_VERB_INDEX < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
const uint8_t verbFlags = pkt->unsafeData[ZT_PROTO_PACKET_VERB_INDEX];
const Protocol::Verb verb = (Protocol::Verb) (verbFlags & ZT_PROTO_VERB_MASK);
const Protocol::Verb verb = (Protocol::Verb)(verbFlags & ZT_PROTO_VERB_MASK);
// Decompress packet payload if compressed. For additional safety decompression is
// only performed on packets whose MACs have already been validated. (Only HELLO is
// sent without this, and HELLO doesn't benefit from compression.)
if (((verbFlags & ZT_PROTO_VERB_FLAG_COMPRESSED) != 0) && (pktSize > ZT_PROTO_PACKET_PAYLOAD_START)) {
SharedPtr<Buf> dec(new Buf());
Utils::copy<ZT_PROTO_PACKET_PAYLOAD_START>(dec->unsafeData, pkt->unsafeData);
SharedPtr< Buf > dec(new Buf());
Utils::copy< ZT_PROTO_PACKET_PAYLOAD_START >(dec->unsafeData, pkt->unsafeData);
const int uncompressedLen = LZ4_decompress_safe(
reinterpret_cast<const char *>(pkt->unsafeData + ZT_PROTO_PACKET_PAYLOAD_START),
reinterpret_cast<char *>(dec->unsafeData + ZT_PROTO_PACKET_PAYLOAD_START),
@ -345,7 +349,7 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
case Protocol::VERB_NOP:
break;
case Protocol::VERB_HELLO:
ok = (bool) (m_HELLO(tPtr, path, *pkt, pktSize));
ok = (bool)(m_HELLO(tPtr, path, *pkt, pktSize));
break;
case Protocol::VERB_ERROR:
ok = m_ERROR(tPtr, packetId, auth, path, peer, *pkt, pktSize, inReVerb);
@ -417,7 +421,7 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
Mutex::Lock wl(m_whoisQueue_l);
p_WhoisQueueItem &wq = m_whoisQueue[source];
const unsigned int wpidx = wq.waitingPacketCount++ % ZT_VL1_MAX_WHOIS_WAITING_PACKETS;
wq.waitingPacketSize[wpidx] = (unsigned int) pktSize;
wq.waitingPacketSize[wpidx] = (unsigned int)pktSize;
wq.waitingPacket[wpidx] = pkt;
sendPending = (now - wq.lastRetry) >= ZT_WHOIS_RETRY_DELAY;
}
@ -430,23 +434,23 @@ void VL1::onRemotePacket(void *const tPtr, const int64_t localSocket, const Inet
}
}
void VL1::m_relay(void *tPtr, const SharedPtr<Path> &path, Address destination, SharedPtr<Buf> &pkt, int pktSize)
void VL1::m_relay(void *tPtr, const SharedPtr< Path > &path, Address destination, SharedPtr< Buf > &pkt, int pktSize)
{
}
void VL1::m_sendPendingWhois(void *tPtr, int64_t now)
{
const SharedPtr<Peer> root(RR->topology->root());
const SharedPtr< Peer > root(RR->topology->root());
if (unlikely(!root))
return;
const SharedPtr<Path> rootPath(root->path(now));
const SharedPtr< Path > rootPath(root->path(now));
if (unlikely(!rootPath))
return;
Vector<Address> toSend;
Vector< Address > toSend;
{
Mutex::Lock wl(m_whoisQueue_l);
for (Map<Address, p_WhoisQueueItem>::iterator wi(m_whoisQueue.begin());wi != m_whoisQueue.end();++wi) {
for (Map< Address, p_WhoisQueueItem >::iterator wi(m_whoisQueue.begin()); wi != m_whoisQueue.end(); ++wi) {
if ((now - wi->second.lastRetry) >= ZT_WHOIS_RETRY_DELAY) {
wi->second.lastRetry = now;
++wi->second.retries;
@ -456,9 +460,9 @@ void VL1::m_sendPendingWhois(void *tPtr, int64_t now)
}
if (!toSend.empty()) {
const SharedPtr<SymmetricKey> key(root->key());
const SharedPtr< SymmetricKey > key(root->key());
uint8_t outp[ZT_DEFAULT_UDP_MTU - ZT_PROTO_MIN_PACKET_LENGTH];
Vector<Address>::iterator a(toSend.begin());
Vector< Address >::iterator a(toSend.begin());
while (a != toSend.end()) {
const uint64_t packetId = key->nextMessage(RR->identity.address(), root->address());
int p = Protocol::newPacket(outp, packetId, root->address(), RR->identity.address(), Protocol::VERB_WHOIS);
@ -474,21 +478,21 @@ void VL1::m_sendPendingWhois(void *tPtr, int64_t now)
}
}
SharedPtr<Peer> VL1::m_HELLO(void *tPtr, const SharedPtr<Path> &path, Buf &pkt, int packetSize)
SharedPtr< Peer > VL1::m_HELLO(void *tPtr, const SharedPtr< Path > &path, Buf &pkt, int packetSize)
{
const uint64_t packetId = Utils::loadAsIsEndian<uint64_t>(pkt.unsafeData + ZT_PROTO_PACKET_ID_INDEX);
const uint64_t mac = Utils::loadAsIsEndian<uint64_t>(pkt.unsafeData + ZT_PROTO_PACKET_MAC_INDEX);
const uint64_t packetId = Utils::loadAsIsEndian< uint64_t >(pkt.unsafeData + ZT_PROTO_PACKET_ID_INDEX);
const uint64_t mac = Utils::loadAsIsEndian< uint64_t >(pkt.unsafeData + ZT_PROTO_PACKET_MAC_INDEX);
const uint8_t hops = pkt.unsafeData[ZT_PROTO_PACKET_FLAGS_INDEX] & ZT_PROTO_FLAG_FIELD_HOPS_MASK;
const uint8_t protoVersion = pkt.lI8<ZT_PROTO_PACKET_PAYLOAD_START>();
const uint8_t protoVersion = pkt.lI8< ZT_PROTO_PACKET_PAYLOAD_START >();
if (unlikely(protoVersion < ZT_PROTO_VERSION_MIN)) {
RR->t->incomingPacketDropped(tPtr, 0x907a9891, packetId, 0, Identity::NIL, path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_PEER_TOO_OLD);
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
const unsigned int versionMajor = pkt.lI8<ZT_PROTO_PACKET_PAYLOAD_START + 1>();
const unsigned int versionMinor = pkt.lI8<ZT_PROTO_PACKET_PAYLOAD_START + 2>();
const unsigned int versionRev = pkt.lI16<ZT_PROTO_PACKET_PAYLOAD_START + 3>();
const uint64_t timestamp = pkt.lI64<ZT_PROTO_PACKET_PAYLOAD_START + 5>();
const unsigned int versionMajor = pkt.lI8< ZT_PROTO_PACKET_PAYLOAD_START + 1 >();
const unsigned int versionMinor = pkt.lI8< ZT_PROTO_PACKET_PAYLOAD_START + 2 >();
const unsigned int versionRev = pkt.lI16< ZT_PROTO_PACKET_PAYLOAD_START + 3 >();
const uint64_t timestamp = pkt.lI64< ZT_PROTO_PACKET_PAYLOAD_START + 5 >();
int ii = ZT_PROTO_PACKET_PAYLOAD_START + 13;
@ -496,33 +500,33 @@ SharedPtr<Peer> VL1::m_HELLO(void *tPtr, const SharedPtr<Path> &path, Buf &pkt,
Identity id;
if (unlikely(pkt.rO(ii, id) < 0)) {
RR->t->incomingPacketDropped(tPtr, 0x707a9810, packetId, 0, Identity::NIL, path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
if (unlikely(id.address() != Address(pkt.unsafeData + ZT_PROTO_PACKET_SOURCE_INDEX))) {
RR->t->incomingPacketDropped(tPtr, 0x707a9010, packetId, 0, Identity::NIL, path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
// Get the peer that matches this identity, or learn a new one if we don't know it.
SharedPtr<Peer> peer(RR->topology->peer(tPtr, id.address(), true));
SharedPtr< Peer > peer(RR->topology->peer(tPtr, id.address(), true));
if (peer) {
if (unlikely(peer->identity() != id)) {
RR->t->incomingPacketDropped(tPtr, 0x707a9891, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
if (unlikely(peer->deduplicateIncomingPacket(packetId))) {
ZT_SPEW("discarding packet %.16llx from %s(%s): duplicate!", packetId, id.address().toString().c_str(), path->address().toString().c_str());
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
} else {
if (unlikely(!id.locallyValidate())) {
RR->t->incomingPacketDropped(tPtr, 0x707a9892, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
peer.set(new Peer(RR));
if (unlikely(!peer->init(id))) {
RR->t->incomingPacketDropped(tPtr, 0x707a9893, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_UNSPECIFIED);
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
peer = RR->topology->add(tPtr, peer);
}
@ -538,15 +542,15 @@ SharedPtr<Peer> VL1::m_HELLO(void *tPtr, const SharedPtr<Path> &path, Buf &pkt,
uint8_t hmac[ZT_HMACSHA384_LEN];
if (unlikely(packetSize < ZT_HMACSHA384_LEN)) {
RR->t->incomingPacketDropped(tPtr, 0xab9c9891, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
packetSize -= ZT_HMACSHA384_LEN;
pkt.unsafeData[ZT_PROTO_PACKET_FLAGS_INDEX] &= ~ZT_PROTO_FLAG_FIELD_HOPS_MASK; // mask hops to 0
Utils::storeAsIsEndian<uint64_t>(pkt.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, 0); // set MAC field to 0
Utils::storeAsIsEndian< uint64_t >(pkt.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, 0); // set MAC field to 0
HMACSHA384(peer->identityHelloHmacKey(), pkt.unsafeData, packetSize, hmac);
if (unlikely(!Utils::secureEq(hmac, pkt.unsafeData + packetSize, ZT_HMACSHA384_LEN))) {
RR->t->incomingPacketDropped(tPtr, 0x707a9891, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
} else {
// Older versions use Poly1305 MAC (but no whole packet encryption) for HELLO.
@ -561,11 +565,11 @@ SharedPtr<Peer> VL1::m_HELLO(void *tPtr, const SharedPtr<Path> &path, Buf &pkt,
poly1305.finish(polyMac);
if (unlikely(mac != polyMac[0])) {
RR->t->incomingPacketDropped(tPtr, 0x11bfff82, packetId, 0, id, path->address(), hops, Protocol::VERB_NOP, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
} else {
RR->t->incomingPacketDropped(tPtr, 0x11bfff81, packetId, 0, id, path->address(), hops, Protocol::VERB_NOP, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
}
@ -576,10 +580,10 @@ SharedPtr<Peer> VL1::m_HELLO(void *tPtr, const SharedPtr<Path> &path, Buf &pkt,
InetAddress sentTo;
if (unlikely(pkt.rO(ii, sentTo) < 0)) {
RR->t->incomingPacketDropped(tPtr, 0x707a9811, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return SharedPtr<Peer>();
return SharedPtr< Peer >();
}
const SharedPtr<SymmetricKey> key(peer->identityKey());
const SharedPtr< SymmetricKey > key(peer->identityKey());
if (protoVersion >= 11) {
// V2.x and newer supports an encrypted section and has a new OK format.
@ -623,12 +627,12 @@ SharedPtr<Peer> VL1::m_HELLO(void *tPtr, const SharedPtr<Path> &path, Buf &pkt,
pkt.wI16(ii, 0); // reserved, specifies no "moons" for older versions
if (protoVersion >= 11) {
FCV<uint8_t, 1024> okmd;
pkt.wI16(ii, (uint16_t) okmd.size());
FCV< uint8_t, 1024 > okmd;
pkt.wI16(ii, (uint16_t)okmd.size());
pkt.wB(ii, okmd.data(), okmd.size());
if (unlikely((ii + ZT_HMACSHA384_LEN) > ZT_BUF_MEM_SIZE)) // sanity check, should be impossible
return SharedPtr<Peer>();
return SharedPtr< Peer >();
HMACSHA384(peer->identityHelloHmacKey(), pkt.unsafeData, ii, pkt.unsafeData + ii);
ii += ZT_HMACSHA384_LEN;
@ -639,7 +643,7 @@ SharedPtr<Peer> VL1::m_HELLO(void *tPtr, const SharedPtr<Path> &path, Buf &pkt,
return peer;
}
bool VL1::m_ERROR(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb)
bool VL1::m_ERROR(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb)
{
#if 0
if (packetSize < (int)sizeof(Protocol::ERROR::Header)) {
@ -686,11 +690,11 @@ bool VL1::m_ERROR(void *tPtr, const uint64_t packetId, const unsigned int auth,
#endif
}
bool VL1::m_OK(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb)
bool VL1::m_OK(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb)
{
int ii = ZT_PROTO_PACKET_PAYLOAD_START + 13;
inReVerb = (Protocol::Verb) pkt.rI8(ii);
inReVerb = (Protocol::Verb)pkt.rI8(ii);
const uint64_t inRePacketId = pkt.rI64(ii);
if (unlikely(Buf::readOverflow(ii, packetSize))) {
RR->t->incomingPacketDropped(tPtr, 0x4c1f1ff7, packetId, 0, identityFromPeerPtr(peer), path->address(), 0, Protocol::VERB_OK, ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
@ -724,7 +728,7 @@ bool VL1::m_OK(void *tPtr, const uint64_t packetId, const unsigned int auth, con
return true;
}
bool VL1::m_WHOIS(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL1::m_WHOIS(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
#if 0
if (packetSize < (int)sizeof(Protocol::OK::Header)) {
@ -778,7 +782,7 @@ bool VL1::m_WHOIS(void *tPtr, const uint64_t packetId, const unsigned int auth,
#endif
}
bool VL1::m_RENDEZVOUS(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL1::m_RENDEZVOUS(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
#if 0
if (RR->topology->isRoot(peer->identity())) {
@ -826,7 +830,7 @@ bool VL1::m_RENDEZVOUS(void *tPtr, const uint64_t packetId, const unsigned int a
#endif
}
bool VL1::m_ECHO(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL1::m_ECHO(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
#if 0
const uint64_t packetId = Protocol::packetId(pkt,packetSize);
@ -864,7 +868,7 @@ bool VL1::m_ECHO(void *tPtr, const uint64_t packetId, const unsigned int auth, c
#endif
}
bool VL1::m_PUSH_DIRECT_PATHS(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL1::m_PUSH_DIRECT_PATHS(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
#if 0
if (packetSize < (int)sizeof(Protocol::PUSH_DIRECT_PATHS)) {
@ -955,13 +959,13 @@ bool VL1::m_PUSH_DIRECT_PATHS(void *tPtr, const uint64_t packetId, const unsigne
#endif
}
bool VL1::m_USER_MESSAGE(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL1::m_USER_MESSAGE(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
// TODO
return true;
}
bool VL1::m_ENCAP(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL1::m_ENCAP(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
// TODO: not implemented yet
return true;

View file

@ -32,7 +32,9 @@
namespace ZeroTier {
class RuntimeEnvironment;
class Peer;
class VL2;
/**
@ -60,39 +62,50 @@ public:
* @param data Packet data
* @param len Packet length
*/
void onRemotePacket(void *tPtr,int64_t localSocket,const InetAddress &fromAddr,SharedPtr<Buf> &data,unsigned int len) noexcept;
void onRemotePacket(void *tPtr, int64_t localSocket, const InetAddress &fromAddr, SharedPtr< Buf > &data, unsigned int len) noexcept;
private:
const RuntimeEnvironment *RR;
void m_relay(void *tPtr, const SharedPtr<Path> &path, Address destination, SharedPtr<Buf> &pkt, int pktSize);
void m_relay(void *tPtr, const SharedPtr< Path > &path, Address destination, SharedPtr< Buf > &pkt, int pktSize);
void m_sendPendingWhois(void *tPtr, int64_t now);
SharedPtr<Peer> m_HELLO(void *tPtr, const SharedPtr<Path> &path, Buf &pkt, int packetSize);
SharedPtr< Peer > m_HELLO(void *tPtr, const SharedPtr< Path > &path, Buf &pkt, int packetSize);
bool m_ERROR(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb);
bool m_OK(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb);
bool m_WHOIS(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_RENDEZVOUS(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_ECHO(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_PUSH_DIRECT_PATHS(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_USER_MESSAGE(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_ENCAP(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_ERROR(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb);
bool m_OK(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb);
bool m_WHOIS(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_RENDEZVOUS(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_ECHO(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_PUSH_DIRECT_PATHS(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_USER_MESSAGE(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_ENCAP(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
// Defragmentation engine for handling inbound packets with more than one fragment.
Defragmenter<ZT_MAX_PACKET_FRAGMENTS> m_inputPacketAssembler;
Defragmenter< ZT_MAX_PACKET_FRAGMENTS > m_inputPacketAssembler;
// Queue of outbound WHOIS reqeusts and packets waiting on them.
struct p_WhoisQueueItem
{
ZT_INLINE p_WhoisQueueItem() : lastRetry(0),retries(0),waitingPacketCount(0) {}
ZT_INLINE p_WhoisQueueItem() : lastRetry(0), retries(0), waitingPacketCount(0)
{}
int64_t lastRetry;
unsigned int retries;
unsigned int waitingPacketCount;
unsigned int waitingPacketSize[ZT_VL1_MAX_WHOIS_WAITING_PACKETS];
SharedPtr<Buf> waitingPacket[ZT_VL1_MAX_WHOIS_WAITING_PACKETS];
SharedPtr< Buf > waitingPacket[ZT_VL1_MAX_WHOIS_WAITING_PACKETS];
};
Map<Address,p_WhoisQueueItem> m_whoisQueue;
Map< Address, p_WhoisQueueItem > m_whoisQueue;
Mutex m_whoisQueue_l;
};

View file

@ -27,43 +27,43 @@ VL2::VL2(const RuntimeEnvironment *renv)
{
}
void VL2::onLocalEthernet(void *const tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,const unsigned int etherType,unsigned int vlanId,SharedPtr<Buf> &data,unsigned int len)
void VL2::onLocalEthernet(void *const tPtr, const SharedPtr< Network > &network, const MAC &from, const MAC &to, const unsigned int etherType, unsigned int vlanId, SharedPtr< Buf > &data, unsigned int len)
{
}
bool VL2::m_FRAME(void *tPtr,const uint64_t packetId,const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL2::m_FRAME(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
}
bool VL2::m_EXT_FRAME(void *tPtr,const uint64_t packetId,const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL2::m_EXT_FRAME(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
}
bool VL2::m_MULTICAST_LIKE(void *tPtr,const uint64_t packetId,const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL2::m_MULTICAST_LIKE(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
}
bool VL2::m_NETWORK_CREDENTIALS(void *tPtr,const uint64_t packetId,const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL2::m_NETWORK_CREDENTIALS(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
}
bool VL2::m_NETWORK_CONFIG_REQUEST(void *tPtr,const uint64_t packetId,const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL2::m_NETWORK_CONFIG_REQUEST(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
}
bool VL2::m_NETWORK_CONFIG(void *tPtr,const uint64_t packetId,const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL2::m_NETWORK_CONFIG(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
}
bool VL2::m_MULTICAST_GATHER(void *tPtr,const uint64_t packetId,const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL2::m_MULTICAST_GATHER(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
}
bool VL2::m_MULTICAST_FRAME_deprecated(void *tPtr,const uint64_t packetId,const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL2::m_MULTICAST_FRAME_deprecated(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
}
bool VL2::m_MULTICAST(void *tPtr,const uint64_t packetId,const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
bool VL2::m_MULTICAST(void *tPtr, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
{
}

View file

@ -25,10 +25,15 @@
namespace ZeroTier {
class Path;
class Peer;
class RuntimeEnvironment;
class VL1;
class Network;
class MAC;
class VL2
@ -50,18 +55,26 @@ public:
* @param data Ethernet payload
* @param len Frame length
*/
void onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,SharedPtr<Buf> &data,unsigned int len);
void onLocalEthernet(void *tPtr, const SharedPtr< Network > &network, const MAC &from, const MAC &to, unsigned int etherType, unsigned int vlanId, SharedPtr< Buf > &data, unsigned int len);
protected:
bool m_FRAME(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_EXT_FRAME(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_LIKE(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CREDENTIALS(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CONFIG_REQUEST(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CONFIG(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_GATHER(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_FRAME_deprecated(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_FRAME(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_EXT_FRAME(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_LIKE(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CREDENTIALS(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CONFIG_REQUEST(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CONFIG(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_GATHER(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_FRAME_deprecated(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_MULTICAST(void *tPtr, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
private:
};

View file

@ -591,7 +591,7 @@ func (n *Node) runMaintenance() {
addrs, _ := i.Addrs()
for _, a := range addrs {
ipn, _ := a.(*net.IPNet)
if ipn != nil && len(ipn.IP) > 0 && ipn.IP.IsGlobalUnicast() {
if ipn != nil && len(ipn.IP) > 0 && !ipn.IP.IsLoopback() && !ipn.IP.IsMulticast() && !ipn.IP.IsInterfaceLocalMulticast() && !ipn.IP.IsLinkLocalMulticast() && !ipn.IP.IsLinkLocalUnicast() {
isTemporary := false
if len(ipn.IP) == 16 {
var ss C.struct_sockaddr_storage

View file

@ -24,6 +24,7 @@
#include "../osdep/EthernetTap.hpp"
#ifndef __WINDOWS__
#include <unistd.h>
#include <sys/socket.h>
#include <sys/un.h>
@ -32,11 +33,16 @@
#include <ifaddrs.h>
#include <net/if.h>
#include <netinet/in.h>
#if __has_include(<netinet/in6_var.h>)
#ifdef __BSD__
#include <netinet6/in6_var.h>
#endif
#include <arpa/inet.h>
#include <errno.h>
#ifdef __LINUX__
#ifndef IPV6_DONTFRAG
#define IPV6_DONTFRAG 62
@ -71,7 +77,7 @@ struct ZT_GoNodeThread
int port;
int af;
bool primary;
std::atomic<bool> run;
std::atomic< bool > run;
std::thread thr;
};
@ -82,10 +88,10 @@ struct ZT_GoNode_Impl
volatile int64_t nextBackgroundTaskDeadline;
String path;
std::atomic<bool> run;
std::atomic< bool > run;
Map< ZT_SOCKET,ZT_GoNodeThread > threads;
Map< uint64_t,std::shared_ptr<EthernetTap> > taps;
Map< ZT_SOCKET, ZT_GoNodeThread > threads;
Map< uint64_t, std::shared_ptr< EthernetTap > > taps;
std::mutex threads_l;
std::mutex taps_l;
@ -97,14 +103,14 @@ static const String defaultHomePath(OSUtils::platformDefaultHomePath());
const char *const ZT_PLATFORM_DEFAULT_HOMEPATH = defaultHomePath.c_str();
// These are implemented in Go code.
extern "C" int goPathCheckFunc(void *,const ZT_Identity *,int,const void *,int);
extern "C" int goPathLookupFunc(void *,uint64_t,int,const ZT_Identity *,int *,uint8_t [16],int *);
extern "C" void goStateObjectPutFunc(void *,int,const uint64_t [2],const void *,int);
extern "C" int goStateObjectGetFunc(void *,int,const uint64_t [2],void **);
extern "C" void goVirtualNetworkConfigFunc(void *,ZT_GoTap *,uint64_t,int,const ZT_VirtualNetworkConfig *);
extern "C" void goZtEvent(void *,int,const void *);
extern "C" void goHandleTapAddedMulticastGroup(void *,ZT_GoTap *,uint64_t,uint64_t,uint32_t);
extern "C" void goHandleTapRemovedMulticastGroup(void *,ZT_GoTap *,uint64_t,uint64_t,uint32_t);
extern "C" int goPathCheckFunc(void *, const ZT_Identity *, int, const void *, int);
extern "C" int goPathLookupFunc(void *, uint64_t, int, const ZT_Identity *, int *, uint8_t [16], int *);
extern "C" void goStateObjectPutFunc(void *, int, const uint64_t [2], const void *, int);
extern "C" int goStateObjectGetFunc(void *, int, const uint64_t [2], void **);
extern "C" void goVirtualNetworkConfigFunc(void *, ZT_GoTap *, uint64_t, int, const ZT_VirtualNetworkConfig *);
extern "C" void goZtEvent(void *, int, const void *);
extern "C" void goHandleTapAddedMulticastGroup(void *, ZT_GoTap *, uint64_t, uint64_t, uint32_t);
extern "C" void goHandleTapRemovedMulticastGroup(void *, ZT_GoTap *, uint64_t, uint64_t, uint32_t);
static void ZT_GoNode_VirtualNetworkConfigFunction(
ZT_Node *node,
@ -115,7 +121,7 @@ static void ZT_GoNode_VirtualNetworkConfigFunction(
enum ZT_VirtualNetworkConfigOperation op,
const ZT_VirtualNetworkConfig *cfg)
{
goVirtualNetworkConfigFunc(reinterpret_cast<ZT_GoNode *>(uptr)->goUserPtr,reinterpret_cast<ZT_GoTap *>(*nptr),nwid,op,cfg);
goVirtualNetworkConfigFunc(reinterpret_cast<ZT_GoNode *>(uptr)->goUserPtr, reinterpret_cast<ZT_GoTap *>(*nptr), nwid, op, cfg);
}
static void ZT_GoNode_VirtualNetworkFrameFunction(
@ -132,7 +138,7 @@ static void ZT_GoNode_VirtualNetworkFrameFunction(
unsigned int len)
{
if (*nptr)
reinterpret_cast<EthernetTap *>(*nptr)->put(MAC(srcMac),MAC(destMac),etherType,data,len);
reinterpret_cast<EthernetTap *>(*nptr)->put(MAC(srcMac), MAC(destMac), etherType, data, len);
}
static void ZT_GoNode_EventCallback(
@ -142,7 +148,7 @@ static void ZT_GoNode_EventCallback(
enum ZT_Event et,
const void *data)
{
goZtEvent(reinterpret_cast<ZT_GoNode *>(uptr)->goUserPtr,et,data);
goZtEvent(reinterpret_cast<ZT_GoNode *>(uptr)->goUserPtr, et, data);
}
static void ZT_GoNode_StatePutFunction(
@ -162,7 +168,9 @@ static void ZT_GoNode_StatePutFunction(
len);
}
static void _freeFunc(void *p) { if (p) free(p); }
static void _freeFunc(void *p)
{ if (p) free(p); }
static int ZT_GoNode_StateGetFunction(
ZT_Node *node,
void *uptr,
@ -180,29 +188,29 @@ static int ZT_GoNode_StateGetFunction(
data);
}
static ZT_INLINE void doUdpSend(ZT_SOCKET sock,const struct sockaddr_storage *addr,const void *data,const unsigned int len,const unsigned int ipTTL)
static ZT_INLINE void doUdpSend(ZT_SOCKET sock, const struct sockaddr_storage *addr, const void *data, const unsigned int len, const unsigned int ipTTL)
{
switch(addr->ss_family) {
switch (addr->ss_family) {
case AF_INET:
if (unlikely((ipTTL > 0)&&(ipTTL < 255))) {
if (unlikely((ipTTL > 0) && (ipTTL < 255))) {
#ifdef __WINDOWS__
DWORD tmp = (DWORD)ipTTL;
#else
int tmp = (int)ipTTL;
#endif
setsockopt(sock,IPPROTO_IP,IP_TTL,&tmp,sizeof(tmp));
sendto(sock,data,len,MSG_DONTWAIT,(const sockaddr *)addr,sizeof(struct sockaddr_in));
setsockopt(sock, IPPROTO_IP, IP_TTL, &tmp, sizeof(tmp));
sendto(sock, data, len, MSG_DONTWAIT, (const sockaddr *)addr, sizeof(struct sockaddr_in));
tmp = 255;
setsockopt(sock,IPPROTO_IP,IP_TTL,&tmp,sizeof(tmp));
setsockopt(sock, IPPROTO_IP, IP_TTL, &tmp, sizeof(tmp));
} else {
sendto(sock,data,len,MSG_DONTWAIT,(const sockaddr *)addr,sizeof(struct sockaddr_in));
sendto(sock, data, len, MSG_DONTWAIT, (const sockaddr *)addr, sizeof(struct sockaddr_in));
}
break;
case AF_INET6:
// The ipTTL option isn't currently used with IPv6. It's only used
// with IPv4 "firewall opener" / "NAT buster" preamble packets as part
// of IPv4 NAT traversal.
sendto(sock,data,len,MSG_DONTWAIT,(const sockaddr *)addr,sizeof(struct sockaddr_in6));
sendto(sock, data, len, MSG_DONTWAIT, (const sockaddr *)addr, sizeof(struct sockaddr_in6));
break;
}
}
@ -218,13 +226,13 @@ static int ZT_GoNode_WirePacketSendFunction(
unsigned int ipTTL)
{
if (likely(localSocket > 0)) {
doUdpSend((ZT_SOCKET)localSocket,addr,data,len,ipTTL);
doUdpSend((ZT_SOCKET)localSocket, addr, data, len, ipTTL);
} else {
ZT_GoNode *const gn = reinterpret_cast<ZT_GoNode *>(uptr);
std::lock_guard<std::mutex> l(gn->threads_l);
for(auto t=gn->threads.begin();t!=gn->threads.end();++t) {
if ((t->second.af == addr->ss_family)&&(t->second.primary)) {
doUdpSend(t->first,addr,data,len,ipTTL);
std::lock_guard< std::mutex > l(gn->threads_l);
for (auto t = gn->threads.begin(); t != gn->threads.end(); ++t) {
if ((t->second.af == addr->ss_family) && (t->second.primary)) {
doUdpSend(t->first, addr, data, len, ipTTL);
break;
}
}
@ -241,7 +249,7 @@ static int ZT_GoNode_PathCheckFunction(
int64_t localSocket,
const struct sockaddr_storage *sa)
{
switch(sa->ss_family) {
switch (sa->ss_family) {
case AF_INET:
return goPathCheckFunc(
reinterpret_cast<ZT_GoNode *>(uptr)->goUserPtr,
@ -282,15 +290,15 @@ static int ZT_GoNode_PathLookupFunction(
&port
);
if (result != 0) {
switch(family) {
switch (family) {
case AF_INET:
reinterpret_cast<struct sockaddr_in *>(sa)->sin_family = AF_INET;
memcpy(&(reinterpret_cast<struct sockaddr_in *>(sa)->sin_addr.s_addr),ip,4);
memcpy(&(reinterpret_cast<struct sockaddr_in *>(sa)->sin_addr.s_addr), ip, 4);
reinterpret_cast<struct sockaddr_in *>(sa)->sin_port = Utils::hton((uint16_t)port);
return 1;
case AF_INET6:
reinterpret_cast<struct sockaddr_in6 *>(sa)->sin6_family = AF_INET6;
memcpy(reinterpret_cast<struct sockaddr_in6 *>(sa)->sin6_addr.s6_addr,ip,16);
memcpy(reinterpret_cast<struct sockaddr_in6 *>(sa)->sin6_addr.s6_addr, ip, 16);
reinterpret_cast<struct sockaddr_in6 *>(sa)->sin6_port = Utils::hton((uint16_t)port);
return 1;
}
@ -298,7 +306,7 @@ static int ZT_GoNode_PathLookupFunction(
return 0;
}
extern "C" ZT_GoNode *ZT_GoNode_new(const char *workingPath,uintptr_t userPtr)
extern "C" ZT_GoNode *ZT_GoNode_new(const char *workingPath, uintptr_t userPtr)
{
try {
struct ZT_Node_Callbacks cb;
@ -315,7 +323,7 @@ extern "C" ZT_GoNode *ZT_GoNode_new(const char *workingPath,uintptr_t userPtr)
ZT_GoNode_Impl *gn = new ZT_GoNode_Impl;
const int64_t now = OSUtils::now();
gn->goUserPtr = reinterpret_cast<void *>(userPtr);
gn->node = new Node(reinterpret_cast<void *>(gn),nullptr,&cb,now);
gn->node = new Node(reinterpret_cast<void *>(gn), nullptr, &cb, now);
gn->nextBackgroundTaskDeadline = now;
gn->path = workingPath;
gn->run = true;
@ -327,28 +335,28 @@ extern "C" ZT_GoNode *ZT_GoNode_new(const char *workingPath,uintptr_t userPtr)
const int64_t now = OSUtils::now();
if (now >= gn->nextBackgroundTaskDeadline)
gn->node->processBackgroundTasks(nullptr,now,&(gn->nextBackgroundTaskDeadline));
gn->node->processBackgroundTasks(nullptr, now, &(gn->nextBackgroundTaskDeadline));
if ((now - lastCheckedTaps) > 10000) {
lastCheckedTaps = now;
std::vector<MulticastGroup> added,removed;
std::lock_guard<std::mutex> tl(gn->taps_l);
for(auto t=gn->taps.begin();t!=gn->taps.end();++t) {
std::vector< MulticastGroup > added, removed;
std::lock_guard< std::mutex > tl(gn->taps_l);
for (auto t = gn->taps.begin(); t != gn->taps.end(); ++t) {
added.clear();
removed.clear();
t->second->scanMulticastGroups(added,removed);
for(auto g=added.begin();g!=added.end();++g)
goHandleTapAddedMulticastGroup(gn,(ZT_GoTap *)t->second.get(),t->first,g->mac().toInt(),g->adi());
for(auto g=removed.begin();g!=removed.end();++g)
goHandleTapRemovedMulticastGroup(gn,(ZT_GoTap *)t->second.get(),t->first,g->mac().toInt(),g->adi());
t->second->scanMulticastGroups(added, removed);
for (auto g = added.begin(); g != added.end(); ++g)
goHandleTapAddedMulticastGroup(gn, (ZT_GoTap *)t->second.get(), t->first, g->mac().toInt(), g->adi());
for (auto g = removed.begin(); g != removed.end(); ++g)
goHandleTapRemovedMulticastGroup(gn, (ZT_GoTap *)t->second.get(), t->first, g->mac().toInt(), g->adi());
}
}
}
});
return gn;
} catch ( ... ) {
fprintf(stderr,"FATAL: unable to create new instance of Node (out of memory?)" ZT_EOL_S);
} catch (...) {
fprintf(stderr, "FATAL: unable to create new instance of Node (out of memory?)" ZT_EOL_S);
exit(1);
}
}
@ -358,17 +366,17 @@ extern "C" void ZT_GoNode_delete(ZT_GoNode *gn)
gn->run = false;
gn->threads_l.lock();
for(auto t=gn->threads.begin();t!=gn->threads.end();++t) {
for (auto t = gn->threads.begin(); t != gn->threads.end(); ++t) {
t->second.run = false;
shutdown(t->first,SHUT_RDWR);
shutdown(t->first, SHUT_RDWR);
close(t->first);
t->second.thr.join();
}
gn->threads_l.unlock();
gn->taps_l.lock();
for(auto t=gn->taps.begin();t!=gn->taps.end();++t)
gn->node->leave(t->first,nullptr,nullptr);
for (auto t = gn->taps.begin(); t != gn->taps.end(); ++t)
gn->node->leave(t->first, nullptr, nullptr);
gn->taps.clear();
gn->taps_l.unlock();
@ -385,17 +393,17 @@ extern "C" ZT_Node *ZT_GoNode_getNode(ZT_GoNode *gn)
return gn->node;
}
static void setCommonUdpSocketSettings(ZT_SOCKET udpSock,const char *dev)
static void setCommonUdpSocketSettings(ZT_SOCKET udpSock, const char *dev)
{
int bufSize = 1048576;
while (bufSize > 131072) {
if (setsockopt(udpSock,SOL_SOCKET,SO_RCVBUF,(const char *)&bufSize,sizeof(bufSize)) == 0)
if (setsockopt(udpSock, SOL_SOCKET, SO_RCVBUF, (const char *)&bufSize, sizeof(bufSize)) == 0)
break;
bufSize -= 131072;
}
bufSize = 1048576;
while (bufSize > 131072) {
if (setsockopt(udpSock,SOL_SOCKET,SO_SNDBUF,(const char *)&bufSize,sizeof(bufSize)) == 0)
if (setsockopt(udpSock, SOL_SOCKET, SO_SNDBUF, (const char *)&bufSize, sizeof(bufSize)) == 0)
break;
bufSize -= 131072;
}
@ -404,15 +412,15 @@ static void setCommonUdpSocketSettings(ZT_SOCKET udpSock,const char *dev)
#ifdef SO_REUSEPORT
fl = SETSOCKOPT_FLAG_TRUE;
setsockopt(udpSock,SOL_SOCKET,SO_REUSEPORT,(void *)&fl,sizeof(fl));
setsockopt(udpSock, SOL_SOCKET, SO_REUSEPORT, (void *)&fl, sizeof(fl));
#endif
#ifndef __LINUX__ // linux wants just SO_REUSEPORT
fl = SETSOCKOPT_FLAG_TRUE;
setsockopt(udpSock,SOL_SOCKET,SO_REUSEADDR,(void *)&fl,sizeof(fl));
setsockopt(udpSock, SOL_SOCKET, SO_REUSEADDR, (void *)&fl, sizeof(fl));
#endif
fl = SETSOCKOPT_FLAG_TRUE;
setsockopt(udpSock,SOL_SOCKET,SO_BROADCAST,(void *)&fl,sizeof(fl));
setsockopt(udpSock, SOL_SOCKET, SO_BROADCAST, (void *)&fl, sizeof(fl));
#ifdef IP_DONTFRAG
fl = SETSOCKOPT_FLAG_FALSE;
@ -428,54 +436,54 @@ static void setCommonUdpSocketSettings(ZT_SOCKET udpSock,const char *dev)
setsockopt(udpSock,SOL_SOCKET,SO_BINDTODEVICE,dev,strlen(dev));
#endif
#if defined(__BSD__) && defined(IP_BOUND_IF)
if ((dev)&&(strlen(dev))) {
if ((dev) && (strlen(dev))) {
int idx = if_nametoindex(dev);
if (idx != 0)
setsockopt(udpSock,IPPROTO_IP,IP_BOUND_IF,(void *)&idx,sizeof(idx));
setsockopt(udpSock, IPPROTO_IP, IP_BOUND_IF, (void *)&idx, sizeof(idx));
}
#endif
}
extern "C" int ZT_GoNode_phyStartListen(ZT_GoNode *gn,const char *dev,const char *ip,const int port,const int primary)
extern "C" int ZT_GoNode_phyStartListen(ZT_GoNode *gn, const char *dev, const char *ip, const int port, const int primary)
{
if (strchr(ip,':')) {
if (strchr(ip, ':')) {
struct sockaddr_in6 in6;
memset(&in6,0,sizeof(in6));
memset(&in6, 0, sizeof(in6));
in6.sin6_family = AF_INET6;
if (inet_pton(AF_INET6,ip,&(in6.sin6_addr)) <= 0)
if (inet_pton(AF_INET6, ip, &(in6.sin6_addr)) <= 0)
return errno;
in6.sin6_port = htons((uint16_t)port);
ZT_SOCKET udpSock = socket(AF_INET6,SOCK_DGRAM,0);
ZT_SOCKET udpSock = socket(AF_INET6, SOCK_DGRAM, 0);
if (udpSock == ZT_INVALID_SOCKET)
return errno;
setCommonUdpSocketSettings(udpSock,dev);
setCommonUdpSocketSettings(udpSock, dev);
SETSOCKOPT_FLAG_TYPE fl = SETSOCKOPT_FLAG_TRUE;
setsockopt(udpSock,IPPROTO_IPV6,IPV6_V6ONLY,(const char *)&fl,sizeof(fl));
setsockopt(udpSock, IPPROTO_IPV6, IPV6_V6ONLY, (const char *)&fl, sizeof(fl));
#ifdef IPV6_DONTFRAG
fl = SETSOCKOPT_FLAG_FALSE;
setsockopt(udpSock,IPPROTO_IPV6,IPV6_DONTFRAG,&fl,sizeof(fl));
#endif
if (bind(udpSock,reinterpret_cast<const struct sockaddr *>(&in6),sizeof(in6)) != 0)
if (bind(udpSock, reinterpret_cast<const struct sockaddr *>(&in6), sizeof(in6)) != 0)
return errno;
{
std::lock_guard<std::mutex> l(gn->threads_l);
std::lock_guard< std::mutex > l(gn->threads_l);
ZT_GoNodeThread &gnt = gn->threads[udpSock];
gnt.ip = ip;
gnt.port = port;
gnt.af = AF_INET6;
gnt.primary = (primary != 0);
gnt.run = true;
gnt.thr = std::thread([udpSock,gn,&gnt] {
gnt.thr = std::thread([udpSock, gn, &gnt] {
struct sockaddr_in6 in6;
socklen_t salen;
while (gnt.run) {
salen = sizeof(in6);
void *buf = ZT_getBuffer();
if (buf) {
int s = (int)recvfrom(udpSock,buf,16384,0,reinterpret_cast<struct sockaddr *>(&in6),&salen);
int s = (int)recvfrom(udpSock, buf, 16384, 0, reinterpret_cast<struct sockaddr *>(&in6), &salen);
if (s > 0) {
ZT_Node_processWirePacket(
reinterpret_cast<ZT_Node *>(gn->node),
@ -497,40 +505,40 @@ extern "C" int ZT_GoNode_phyStartListen(ZT_GoNode *gn,const char *dev,const char
}
} else {
struct sockaddr_in in;
memset(&in,0,sizeof(in));
memset(&in, 0, sizeof(in));
in.sin_family = AF_INET;
if (inet_pton(AF_INET,ip,&(in.sin_addr)) <= 0)
if (inet_pton(AF_INET, ip, &(in.sin_addr)) <= 0)
return errno;
in.sin_port = htons((uint16_t)port);
ZT_SOCKET udpSock = socket(AF_INET,SOCK_DGRAM,0);
ZT_SOCKET udpSock = socket(AF_INET, SOCK_DGRAM, 0);
if (udpSock == ZT_INVALID_SOCKET)
return errno;
setCommonUdpSocketSettings(udpSock,dev);
setCommonUdpSocketSettings(udpSock, dev);
#ifdef SO_NO_CHECK
SETSOCKOPT_FLAG_TYPE fl = SETSOCKOPT_FLAG_TRUE;
setsockopt(udpSock,SOL_SOCKET,SO_NO_CHECK,&fl,sizeof(fl));
#endif
if (bind(udpSock,reinterpret_cast<const struct sockaddr *>(&in),sizeof(in)) != 0)
if (bind(udpSock, reinterpret_cast<const struct sockaddr *>(&in), sizeof(in)) != 0)
return errno;
{
std::lock_guard<std::mutex> l(gn->threads_l);
std::lock_guard< std::mutex > l(gn->threads_l);
ZT_GoNodeThread &gnt = gn->threads[udpSock];
gnt.ip = ip;
gnt.port = port;
gnt.af = AF_INET6;
gnt.primary = (primary != 0);
gnt.run = true;
gnt.thr = std::thread([udpSock,gn,&gnt] {
gnt.thr = std::thread([udpSock, gn, &gnt] {
struct sockaddr_in in4;
socklen_t salen;
while (gnt.run) {
salen = sizeof(in4);
void *buf = ZT_getBuffer();
if (buf) {
int s = (int)recvfrom(udpSock,buf,sizeof(buf),0,reinterpret_cast<struct sockaddr *>(&in4),&salen);
int s = (int)recvfrom(udpSock, buf, sizeof(buf), 0, reinterpret_cast<struct sockaddr *>(&in4), &salen);
if (s > 0) {
ZT_Node_processWirePacket(
reinterpret_cast<ZT_Node *>(gn->node),
@ -555,14 +563,14 @@ extern "C" int ZT_GoNode_phyStartListen(ZT_GoNode *gn,const char *dev,const char
return 0;
}
extern "C" int ZT_GoNode_phyStopListen(ZT_GoNode *gn,const char *dev,const char *ip,const int port)
extern "C" int ZT_GoNode_phyStopListen(ZT_GoNode *gn, const char *dev, const char *ip, const int port)
{
{
std::lock_guard<std::mutex> l(gn->threads_l);
for(auto t=gn->threads.begin();t!=gn->threads.end();) {
if ((t->second.ip == ip)&&(t->second.port == port)) {
std::lock_guard< std::mutex > l(gn->threads_l);
for (auto t = gn->threads.begin(); t != gn->threads.end();) {
if ((t->second.ip == ip) && (t->second.port == port)) {
t->second.run = false;
shutdown(t->first,SHUT_RDWR);
shutdown(t->first, SHUT_RDWR);
close(t->first);
t->second.thr.join();
gn->threads.erase(t++);
@ -572,7 +580,7 @@ extern "C" int ZT_GoNode_phyStopListen(ZT_GoNode *gn,const char *dev,const char
return 0;
}
static void tapFrameHandler(void *uptr,void *tptr,uint64_t nwid,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
static void tapFrameHandler(void *uptr, void *tptr, uint64_t nwid, const MAC &from, const MAC &to, unsigned int etherType, unsigned int vlanId, const void *data, unsigned int len)
{
ZT_Node_processVirtualNetworkFrame(
reinterpret_cast<ZT_Node *>(reinterpret_cast<ZT_GoNode *>(uptr)->node),
@ -589,69 +597,69 @@ static void tapFrameHandler(void *uptr,void *tptr,uint64_t nwid,const MAC &from,
&(reinterpret_cast<ZT_GoNode *>(uptr)->nextBackgroundTaskDeadline));
}
extern "C" ZT_GoTap *ZT_GoNode_join(ZT_GoNode *gn,uint64_t nwid,const ZT_Fingerprint *const controllerFingerprint)
extern "C" ZT_GoTap *ZT_GoNode_join(ZT_GoNode *gn, uint64_t nwid, const ZT_Fingerprint *const controllerFingerprint)
{
try {
std::lock_guard<std::mutex> l(gn->taps_l);
std::lock_guard< std::mutex > l(gn->taps_l);
auto existingTap = gn->taps.find(nwid);
if (existingTap != gn->taps.end())
return (ZT_GoTap *)existingTap->second.get();
char tmp[256];
OSUtils::ztsnprintf(tmp,sizeof(tmp),"ZeroTier Network %.16llx",(unsigned long long)nwid);
std::shared_ptr<EthernetTap> tap(EthernetTap::newInstance(nullptr,gn->path.c_str(),MAC(Address(gn->node->address()),nwid),ZT_DEFAULT_MTU,0,nwid,tmp,&tapFrameHandler,gn));
OSUtils::ztsnprintf(tmp, sizeof(tmp), "ZeroTier Network %.16llx", (unsigned long long)nwid);
std::shared_ptr< EthernetTap > tap(EthernetTap::newInstance(nullptr, gn->path.c_str(), MAC(Address(gn->node->address()), nwid), ZT_DEFAULT_MTU, 0, nwid, tmp, &tapFrameHandler, gn));
if (!tap)
return nullptr;
gn->taps[nwid] = tap;
gn->node->join(nwid,controllerFingerprint,tap.get(),nullptr);
gn->node->join(nwid, controllerFingerprint, tap.get(), nullptr);
return (ZT_GoTap *)tap.get();
} catch ( ... ) {
} catch (...) {
return nullptr;
}
}
extern "C" void ZT_GoNode_leave(ZT_GoNode *gn,uint64_t nwid)
extern "C" void ZT_GoNode_leave(ZT_GoNode *gn, uint64_t nwid)
{
std::lock_guard<std::mutex> l(gn->taps_l);
std::lock_guard< std::mutex > l(gn->taps_l);
auto existingTap = gn->taps.find(nwid);
if (existingTap != gn->taps.end()) {
gn->node->leave(nwid,nullptr,nullptr);
gn->node->leave(nwid, nullptr, nullptr);
gn->taps.erase(existingTap);
}
}
extern "C" void ZT_GoTap_setEnabled(ZT_GoTap *tap,int enabled)
extern "C" void ZT_GoTap_setEnabled(ZT_GoTap *tap, int enabled)
{
reinterpret_cast<EthernetTap *>(tap)->setEnabled(enabled != 0);
}
extern "C" int ZT_GoTap_addIp(ZT_GoTap *tap,int af,const void *ip,int netmaskBits)
extern "C" int ZT_GoTap_addIp(ZT_GoTap *tap, int af, const void *ip, int netmaskBits)
{
switch(af) {
switch (af) {
case AF_INET:
return (reinterpret_cast<EthernetTap *>(tap)->addIp(InetAddress(ip,4,(unsigned int)netmaskBits)) ? 1 : 0);
return (reinterpret_cast<EthernetTap *>(tap)->addIp(InetAddress(ip, 4, (unsigned int)netmaskBits)) ? 1 : 0);
case AF_INET6:
return (reinterpret_cast<EthernetTap *>(tap)->addIp(InetAddress(ip,16,(unsigned int)netmaskBits)) ? 1 : 0);
return (reinterpret_cast<EthernetTap *>(tap)->addIp(InetAddress(ip, 16, (unsigned int)netmaskBits)) ? 1 : 0);
}
return 0;
}
extern "C" int ZT_GoTap_removeIp(ZT_GoTap *tap,int af,const void *ip,int netmaskBits)
extern "C" int ZT_GoTap_removeIp(ZT_GoTap *tap, int af, const void *ip, int netmaskBits)
{
switch(af) {
switch (af) {
case AF_INET:
return (reinterpret_cast<EthernetTap *>(tap)->removeIp(InetAddress(ip,4,(unsigned int)netmaskBits)) ? 1 : 0);
return (reinterpret_cast<EthernetTap *>(tap)->removeIp(InetAddress(ip, 4, (unsigned int)netmaskBits)) ? 1 : 0);
case AF_INET6:
return (reinterpret_cast<EthernetTap *>(tap)->removeIp(InetAddress(ip,16,(unsigned int)netmaskBits)) ? 1 : 0);
return (reinterpret_cast<EthernetTap *>(tap)->removeIp(InetAddress(ip, 16, (unsigned int)netmaskBits)) ? 1 : 0);
}
return 0;
}
extern "C" int ZT_GoTap_ips(ZT_GoTap *tap,void *buf,unsigned int bufSize)
extern "C" int ZT_GoTap_ips(ZT_GoTap *tap, void *buf, unsigned int bufSize)
{
auto ips = reinterpret_cast<EthernetTap *>(tap)->ips();
unsigned int p = 0;
uint8_t *const b = reinterpret_cast<uint8_t *>(buf);
for(auto ip=ips.begin();ip!=ips.end();++ip) {
for (auto ip = ips.begin(); ip != ips.end(); ++ip) {
if ((p + 6) > bufSize)
break;
const uint8_t *const ipd = reinterpret_cast<const uint8_t *>(ip->rawIpData());
@ -665,7 +673,7 @@ extern "C" int ZT_GoTap_ips(ZT_GoTap *tap,void *buf,unsigned int bufSize)
} else if (ip->isV6()) {
if ((p + 18) <= bufSize) {
b[p++] = AF_INET6;
for(int j=0;j<16;++j)
for (int j = 0; j < 16; ++j)
b[p++] = ipd[j];
b[p++] = (uint8_t)ip->netmaskBits();
}
@ -674,36 +682,41 @@ extern "C" int ZT_GoTap_ips(ZT_GoTap *tap,void *buf,unsigned int bufSize)
return (int)p;
}
extern "C" void ZT_GoTap_deviceName(ZT_GoTap *tap,char nbuf[256])
extern "C" void ZT_GoTap_deviceName(ZT_GoTap *tap, char nbuf[256])
{
Utils::scopy(nbuf,256,reinterpret_cast<EthernetTap *>(tap)->deviceName().c_str());
Utils::scopy(nbuf, 256, reinterpret_cast<EthernetTap *>(tap)->deviceName().c_str());
}
extern "C" void ZT_GoTap_setFriendlyName(ZT_GoTap *tap,const char *friendlyName)
extern "C" void ZT_GoTap_setFriendlyName(ZT_GoTap *tap, const char *friendlyName)
{
reinterpret_cast<EthernetTap *>(tap)->setFriendlyName(friendlyName);
}
extern "C" void ZT_GoTap_setMtu(ZT_GoTap *tap,unsigned int mtu)
extern "C" void ZT_GoTap_setMtu(ZT_GoTap *tap, unsigned int mtu)
{
reinterpret_cast<EthernetTap *>(tap)->setMtu(mtu);
}
extern "C" int ZT_isTemporaryV6Address(const char *ifname,const struct sockaddr_storage *a)
#if defined(IFA_F_SECONDARY) && !defined(IFA_F_TEMPORARY)
#define IFA_F_TEMPORARY IFA_F_SECONDARY
#endif
extern "C" int ZT_isTemporaryV6Address(const char *ifname, const struct sockaddr_storage *a)
{
#ifdef IN6_IFF_TEMPORARY
#if defined(IN6_IFF_TEMPORARY) && defined(SIOCGIFAFLAG_IN6)
static ZT_SOCKET s_tmpV6Socket = ZT_INVALID_SOCKET;
static std::mutex s_lock;
std::lock_guard<std::mutex> l(s_lock);
std::lock_guard< std::mutex > l(s_lock);
if (s_tmpV6Socket == ZT_INVALID_SOCKET) {
s_tmpV6Socket = socket(AF_INET6,SOCK_DGRAM,0);
s_tmpV6Socket = socket(AF_INET6, SOCK_DGRAM, 0);
if (s_tmpV6Socket <= 0)
return 0;
}
struct in6_ifreq ifr;
strncpy(ifr.ifr_name,ifname,sizeof(ifr.ifr_name));
memcpy(&(ifr.ifr_addr),a,sizeof(sockaddr_in6));
if (ioctl(s_tmpV6Socket,SIOCGIFAFLAG_IN6,&ifr) < 0) {
memset(&ifr, 0, sizeof(ifr));
strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
memcpy(&(ifr.ifr_addr), a, sizeof(sockaddr_in6));
if (ioctl(s_tmpV6Socket, SIOCGIFAFLAG_IN6, &ifr) < 0) {
return 0;
}
return ((ifr.ifr_ifru.ifru_flags6 & IN6_IFF_TEMPORARY) != 0) ? 1 : 0;