A bunch of cleanup and refactoring, implementation of prep for forward secrecy still in progress.

This commit is contained in:
Adam Ierymenko 2020-04-13 12:22:08 -07:00
parent c65391a344
commit 16a3c14c53
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
48 changed files with 1377 additions and 1600 deletions

View file

@ -23,10 +23,10 @@
#ifndef ZT_AES_NO_ACCEL
#ifdef ZT_ARCH_X64
#include <wmmintrin.h>
#include <emmintrin.h>
#include <smmintrin.h>
#include <immintrin.h>
#include <wmmintrin.h>
#define ZT_AES_AESNI 1
#endif
#endif

View file

@ -17,6 +17,7 @@
#include "Constants.hpp"
#include "Utils.hpp"
#include "TriviallyCopyable.hpp"
#include "Containers.hpp"
#define ZT_ADDRESS_STRING_SIZE_MAX 11
@ -91,6 +92,7 @@ public:
s[10] = 0;
return s;
}
ZT_INLINE String toString() const { char s[ZT_ADDRESS_STRING_SIZE_MAX]; toString(s); return String(s); }
/**
* Check if this address is reserved

View file

@ -2371,14 +2371,14 @@ namespace ZeroTier {
void C25519::generateCombined(uint8_t *pub,uint8_t *priv)
{
Utils::getSecureRandom(priv,ZT_C25519_COMBINED_PRIVATE_KEY_SIZE);
_calcPubDH(pub,priv);
_calcPubED(pub,priv);
s_calcPubDH(pub, priv);
s_calcPubED(pub, priv);
}
void C25519::generateC25519(uint8_t pub[ZT_C25519_ECDH_PUBLIC_KEY_SIZE],uint8_t priv[ZT_C25519_ECDH_PRIVATE_KEY_SIZE])
{
Utils::getSecureRandom(priv,ZT_C25519_ECDH_PRIVATE_KEY_SIZE);
_calcPubDH(pub,priv);
s_calcPubDH(pub, priv);
}
void C25519::agree(const uint8_t mine[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE],const uint8_t their[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],uint8_t rawkey[ZT_C25519_ECDH_SHARED_SECRET_SIZE])
@ -2472,14 +2472,14 @@ bool C25519::verify(const uint8_t their[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],cons
return Utils::secureEq(sig,t2,32);
}
void C25519::_calcPubDH(uint8_t *const pub,const uint8_t *const priv)
void C25519::s_calcPubDH(uint8_t *pub, const uint8_t *priv)
{
// First 32 bytes of pub and priv are the keys for ECDH key
// agreement. This generates the public portion from the private.
crypto_scalarmult_base(pub,priv);
}
void C25519::_calcPubED(uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],const uint8_t priv[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE])
void C25519::s_calcPubED(uint8_t *pub, const uint8_t *priv)
{
struct {
uint8_t extsk[64];

View file

@ -62,11 +62,11 @@ public:
static ZT_INLINE void generateSatisfying(F cond,uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],uint8_t priv[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE])
{
Utils::getSecureRandom(priv,ZT_C25519_COMBINED_PRIVATE_KEY_SIZE);
_calcPubED(pub,priv); // do Ed25519 key -- bytes 32-63 of pub and priv
s_calcPubED(pub, priv); // do Ed25519 key -- bytes 32-63 of pub and priv
do {
++(((uint64_t *)priv)[1]);
--(((uint64_t *)priv)[2]);
_calcPubDH(pub,priv); // keep regenerating bytes 0-31 until satisfied
s_calcPubDH(pub, priv); // keep regenerating bytes 0-31 until satisfied
} while (!cond(pub));
}
@ -116,11 +116,11 @@ public:
private:
// derive first 32 bytes of kp.pub from first 32 bytes of kp.priv
// this is the ECDH key
static void _calcPubDH(uint8_t *pub,const uint8_t *priv);
static void s_calcPubDH(uint8_t *pub, const uint8_t *priv);
// derive 2nd 32 bytes of kp.pub from 2nd 32 bytes of kp.priv
// this is the Ed25519 sign/verify key
static void _calcPubED(uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],const uint8_t priv[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE]);
static void s_calcPubED(uint8_t *pub, const uint8_t *priv);
};
} // namespace ZeroTier

View file

@ -21,10 +21,10 @@ namespace ZeroTier {
bool Capability::sign(const Identity &from,const Address &to) noexcept
{
uint8_t buf[ZT_CAPABILITY_MARSHAL_SIZE_MAX + 16];
_issuedTo = to;
_signedBy = from.address();
_signatureLength = from.sign(buf,(unsigned int)marshal(buf,true),_signature,sizeof(_signature));
return _signatureLength > 0;
m_issuedTo = to;
m_signedBy = from.address();
m_signatureLength = from.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return m_signatureLength > 0;
}
int Capability::marshal(uint8_t data[ZT_CAPABILITY_MARSHAL_SIZE_MAX],const bool forSign) const noexcept
@ -36,23 +36,23 @@ int Capability::marshal(uint8_t data[ZT_CAPABILITY_MARSHAL_SIZE_MAX],const bool
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint64_t>(data + p,_nwid); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,(uint64_t)_ts); p += 8;
Utils::storeBigEndian<uint32_t>(data + p,_id); p += 4;
Utils::storeBigEndian<uint64_t>(data + p, m_nwid); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,(uint64_t)m_ts); p += 8;
Utils::storeBigEndian<uint32_t>(data + p, m_id); p += 4;
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)_ruleCount); p += 2;
p += Capability::marshalVirtualNetworkRules(data + p,_rules,_ruleCount);
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)m_ruleCount); p += 2;
p += Capability::marshalVirtualNetworkRules(data + p, m_rules, m_ruleCount);
// LEGACY: older versions supported multiple records with this being a maximum custody
// chain length. This is deprecated so set the max chain length to one.
data[p++] = (uint8_t)1;
if (!forSign) {
_issuedTo.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
_signedBy.copyTo(data + 0); p += ZT_ADDRESS_LENGTH;
m_issuedTo.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + 0); p += ZT_ADDRESS_LENGTH;
data[p++] = 1; // LEGACY: old versions require a reserved byte here
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)_signatureLength); p += 2;
Utils::copy(data + p,_signature,_signatureLength); p += (int)_signatureLength;
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)m_signatureLength); p += 2;
Utils::copy(data + p, m_signature, m_signatureLength); p += (int)m_signatureLength;
// LEGACY: older versions supported more than one record terminated by a zero address.
for(int k=0;k<ZT_ADDRESS_LENGTH;++k)
@ -75,14 +75,14 @@ int Capability::unmarshal(const uint8_t *data,int len) noexcept
if (len < 22)
return -1;
_nwid = Utils::loadBigEndian<uint64_t>(data);
_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
_id = Utils::loadBigEndian<uint32_t>(data + 16);
m_nwid = Utils::loadBigEndian<uint64_t>(data);
m_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
m_id = Utils::loadBigEndian<uint32_t>(data + 16);
const unsigned int rc = Utils::loadBigEndian<uint16_t>(data + 20);
if (rc > ZT_MAX_CAPABILITY_RULES)
return -1;
const int rulesLen = unmarshalVirtualNetworkRules(data + 22,len - 22,_rules,_ruleCount,rc);
const int rulesLen = unmarshalVirtualNetworkRules(data + 22,len - 22, m_rules, m_ruleCount, rc);
if (rulesLen < 0)
return rulesLen;
int p = 22 + rulesLen;
@ -103,17 +103,17 @@ int Capability::unmarshal(const uint8_t *data,int len) noexcept
if (!to)
break;
_issuedTo = to;
m_issuedTo = to;
if ((p + ZT_ADDRESS_LENGTH) > len)
return -1;
_signedBy.setTo(data + p); p += ZT_ADDRESS_LENGTH + 1; // LEGACY: +1 to skip reserved field
m_signedBy.setTo(data + p); p += ZT_ADDRESS_LENGTH + 1; // LEGACY: +1 to skip reserved field
if ((p + 2) > len)
return -1;
_signatureLength = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
if ((_signatureLength > sizeof(_signature))||((p + (int)_signatureLength) > len))
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
if ((m_signatureLength > sizeof(m_signature)) || ((p + (int)m_signatureLength) > len))
return -1;
Utils::copy(_signature,data + p,_signatureLength); p += (int)_signatureLength;
Utils::copy(m_signature, data + p, m_signatureLength); p += (int)m_signatureLength;
}
if ((p + 2) > len)

View file

@ -68,33 +68,33 @@ public:
* @param ruleCount Number of flow rules
*/
ZT_INLINE Capability(const uint32_t id,const uint64_t nwid,const int64_t ts,const ZT_VirtualNetworkRule *const rules,const unsigned int ruleCount) noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
_nwid(nwid),
_ts(ts),
_id(id),
_ruleCount((ruleCount < ZT_MAX_CAPABILITY_RULES) ? ruleCount : ZT_MAX_CAPABILITY_RULES),
_signatureLength(0)
m_nwid(nwid),
m_ts(ts),
m_id(id),
m_ruleCount((ruleCount < ZT_MAX_CAPABILITY_RULES) ? ruleCount : ZT_MAX_CAPABILITY_RULES),
m_signatureLength(0)
{
if (_ruleCount > 0)
Utils::copy(_rules,rules,sizeof(ZT_VirtualNetworkRule) * _ruleCount);
if (m_ruleCount > 0)
Utils::copy(m_rules, rules, sizeof(ZT_VirtualNetworkRule) * m_ruleCount);
}
/**
* @return Rules -- see ruleCount() for size of array
*/
ZT_INLINE const ZT_VirtualNetworkRule *rules() const noexcept { return _rules; }
ZT_INLINE const ZT_VirtualNetworkRule *rules() const noexcept { return m_rules; }
/**
* @return Number of rules in rules()
*/
ZT_INLINE unsigned int ruleCount() const noexcept { return _ruleCount; }
ZT_INLINE unsigned int ruleCount() const noexcept { return m_ruleCount; }
ZT_INLINE uint32_t id() const noexcept { return _id; }
ZT_INLINE uint64_t networkId() const noexcept { return _nwid; }
ZT_INLINE int64_t timestamp() const noexcept { return _ts; }
ZT_INLINE const Address &issuedTo() const noexcept { return _issuedTo; }
ZT_INLINE const Address &signer() const noexcept { return _signedBy; }
ZT_INLINE const uint8_t *signature() const noexcept { return _signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return _signatureLength; }
ZT_INLINE uint32_t id() const noexcept { return m_id; }
ZT_INLINE uint64_t networkId() const noexcept { return m_nwid; }
ZT_INLINE int64_t timestamp() const noexcept { return m_ts; }
ZT_INLINE const Address &issuedTo() const noexcept { return m_issuedTo; }
ZT_INLINE const Address &signer() const noexcept { return m_signedBy; }
ZT_INLINE const uint8_t *signature() const noexcept { return m_signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return m_signatureLength; }
/**
* Sign this capability and add signature to its chain of custody
@ -145,21 +145,21 @@ public:
static int unmarshalVirtualNetworkRules(const uint8_t *data,int len,ZT_VirtualNetworkRule *rules,unsigned int &ruleCount,unsigned int maxRuleCount) noexcept;
// Provides natural sort order by ID
ZT_INLINE bool operator<(const Capability &c) const noexcept { return (_id < c._id); }
ZT_INLINE bool operator<(const Capability &c) const noexcept { return (m_id < c.m_id); }
ZT_INLINE bool operator==(const Capability &c) const noexcept { return (memcmp(this,&c,sizeof(Capability)) == 0); }
ZT_INLINE bool operator!=(const Capability &c) const noexcept { return (memcmp(this,&c,sizeof(Capability)) != 0); }
private:
uint64_t _nwid;
int64_t _ts;
uint32_t _id;
unsigned int _ruleCount;
ZT_VirtualNetworkRule _rules[ZT_MAX_CAPABILITY_RULES];
Address _issuedTo;
Address _signedBy;
unsigned int _signatureLength;
uint8_t _signature[ZT_SIGNATURE_BUFFER_SIZE];
uint64_t m_nwid;
int64_t m_ts;
uint32_t m_id;
unsigned int m_ruleCount;
ZT_VirtualNetworkRule m_rules[ZT_MAX_CAPABILITY_RULES];
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier

View file

@ -16,30 +16,30 @@
namespace ZeroTier {
CertificateOfMembership::CertificateOfMembership(const int64_t timestamp,const int64_t timestampMaxDelta,const uint64_t nwid,const Identity &issuedTo) noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
_timestamp(timestamp),
_timestampMaxDelta(timestampMaxDelta),
_networkId(nwid),
_issuedTo(issuedTo.fingerprint()),
_signatureLength(0) {}
m_timestamp(timestamp),
m_timestampMaxDelta(timestampMaxDelta),
m_networkId(nwid),
m_issuedTo(issuedTo.fingerprint()),
m_signatureLength(0) {}
bool CertificateOfMembership::agreesWith(const CertificateOfMembership &other) const noexcept
{
// NOTE: we always do explicit absolute value with an if() since llabs() can have overflow
// conditions that could introduce a vulnerability.
if (other._timestamp > _timestamp) {
if ((other._timestamp - _timestamp) > std::min(_timestampMaxDelta,other._timestampMaxDelta))
if (other.m_timestamp > m_timestamp) {
if ((other.m_timestamp - m_timestamp) > std::min(m_timestampMaxDelta, other.m_timestampMaxDelta))
return false;
} else {
if ((_timestamp - other._timestamp) > std::min(_timestampMaxDelta,other._timestampMaxDelta))
if ((m_timestamp - other.m_timestamp) > std::min(m_timestampMaxDelta, other.m_timestampMaxDelta))
return false;
}
// us <> them
for(FCV<_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(_additionalQualifiers.begin());i != _additionalQualifiers.end();++i) {
for(FCV<p_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(m_additionalQualifiers.begin());i != m_additionalQualifiers.end();++i) {
if (i->delta != 0xffffffffffffffffULL) {
const uint64_t *v2 = nullptr;
for(FCV<_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS>::const_iterator j(other._additionalQualifiers.begin());j != other._additionalQualifiers.end();++i) {
for(FCV<p_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS>::const_iterator j(other.m_additionalQualifiers.begin());j != other.m_additionalQualifiers.end();++i) {
if (j->id == i->id) {
v2 = &(j->value);
break;
@ -58,10 +58,10 @@ bool CertificateOfMembership::agreesWith(const CertificateOfMembership &other) c
}
// them <> us (we need a second pass in case they have qualifiers we don't or vice versa)
for(FCV<_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(other._additionalQualifiers.begin());i != other._additionalQualifiers.end();++i) {
for(FCV<p_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(other.m_additionalQualifiers.begin());i != other.m_additionalQualifiers.end();++i) {
if (i->delta != 0xffffffffffffffffULL) {
const uint64_t *v2 = nullptr;
for(FCV<_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS>::const_iterator j(_additionalQualifiers.begin());j != _additionalQualifiers.end();++i) {
for(FCV<p_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS>::const_iterator j(m_additionalQualifiers.begin());j != m_additionalQualifiers.end();++i) {
if (j->id == i->id) {
v2 = &(j->value);
break;
@ -81,16 +81,16 @@ bool CertificateOfMembership::agreesWith(const CertificateOfMembership &other) c
// SECURITY: check for issued-to inequality is a sanity check. This should be impossible since elsewhere
// in the code COMs are checked to ensure that they do in fact belong to their issued-to identities.
return (other._networkId == _networkId) && (_networkId != 0) && (other._issuedTo.address() != _issuedTo.address());
return (other.m_networkId == m_networkId) && (m_networkId != 0) && (other.m_issuedTo.address() != m_issuedTo.address());
}
bool CertificateOfMembership::sign(const Identity &with) noexcept
{
_signedBy = with.address();
m_signedBy = with.address();
uint64_t buf[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX / 8];
const unsigned int bufSize = _fillSigningBuf(buf);
_signatureLength = with.sign(buf,bufSize,_signature,sizeof(_signature));
return _signatureLength > 0;
const unsigned int bufSize = m_fillSigningBuf(buf);
m_signatureLength = with.sign(buf, bufSize, m_signature, sizeof(m_signature));
return m_signatureLength > 0;
}
int CertificateOfMembership::marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX],const bool v2) const noexcept
@ -101,40 +101,40 @@ int CertificateOfMembership::marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MAR
// equality compare, and the address of the issued-to node as an informational tuple.
int p = 3;
Utils::storeBigEndian<uint64_t>(data + p,0); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,(uint64_t)_timestamp); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,(uint64_t)_timestampMaxDelta); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,(uint64_t)m_timestamp); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,(uint64_t)m_timestampMaxDelta); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,1); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,_networkId); p += 8;
Utils::storeBigEndian<uint64_t>(data + p, m_networkId); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,0); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,2); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,_issuedTo.address().toInt()); p += 8;
Utils::storeBigEndian<uint64_t>(data + p, m_issuedTo.address().toInt()); p += 8;
Utils::storeAsIsEndian<uint64_t>(data + p,0xffffffffffffffffULL); p += 8;
if (v2) {
// V2 marshal format will have three tuples followed by the fingerprint hash.
Utils::storeBigEndian<uint16_t>(data + 1,3);
Utils::copy<48>(data + p,_issuedTo.hash());
Utils::copy<48>(data + p, m_issuedTo.hash());
p += 48;
} else {
// V1 marshal format must shove everything into tuples, resulting in nine.
Utils::storeBigEndian<uint16_t>(data + 1,9);
for(int k=0;k<6;++k) {
Utils::storeBigEndian<uint64_t>(data + p,(uint64_t)k + 3); p += 8;
Utils::storeAsIsEndian<uint64_t>(data + p,Utils::loadAsIsEndian<uint64_t>(_issuedTo.hash() + (k * 8))); p += 8;
Utils::storeAsIsEndian<uint64_t>(data + p,Utils::loadAsIsEndian<uint64_t>(m_issuedTo.hash() + (k * 8))); p += 8;
Utils::storeAsIsEndian<uint64_t>(data + p,0xffffffffffffffffULL); p += 8;
}
}
_signedBy.copyTo(data + p); p += 5;
m_signedBy.copyTo(data + p); p += 5;
if (v2) {
// V2 marshal format prefixes signatures with a 16-bit length to support future signature types.
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)_signatureLength); p += 2;
Utils::copy(data + p,_signature,_signatureLength);
p += (int)_signatureLength;
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)m_signatureLength); p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
} else {
// V1 only supports 96-byte signature fields.
Utils::copy<96>(data + p,_signature);
Utils::copy<96>(data + p, m_signature);
p += 96;
}
@ -160,70 +160,70 @@ int CertificateOfMembership::unmarshal(const uint8_t *data,int len) noexcept
const uint64_t delta = Utils::loadBigEndian<uint64_t>(data + p); p += 8; // NOLINT(hicpp-use-auto,modernize-use-auto)
switch(id) {
case 0:
_timestamp = (int64_t)value;
_timestampMaxDelta = (int64_t)delta;
m_timestamp = (int64_t)value;
m_timestampMaxDelta = (int64_t)delta;
break;
case 1:
_networkId = value;
m_networkId = value;
break;
case 2:
_issuedTo.apiFingerprint()->address = value;
m_issuedTo.apiFingerprint()->address = value;
break;
// V1 nodes will pack the hash into qualifier tuples.
case 3:
Utils::storeBigEndian<uint64_t>(_issuedTo.apiFingerprint()->hash,value);
Utils::storeBigEndian<uint64_t>(m_issuedTo.apiFingerprint()->hash, value);
break;
case 4:
Utils::storeBigEndian<uint64_t>(_issuedTo.apiFingerprint()->hash + 8,value);
Utils::storeBigEndian<uint64_t>(m_issuedTo.apiFingerprint()->hash + 8, value);
break;
case 5:
Utils::storeBigEndian<uint64_t>(_issuedTo.apiFingerprint()->hash + 16,value);
Utils::storeBigEndian<uint64_t>(m_issuedTo.apiFingerprint()->hash + 16, value);
break;
case 6:
Utils::storeBigEndian<uint64_t>(_issuedTo.apiFingerprint()->hash + 24,value);
Utils::storeBigEndian<uint64_t>(m_issuedTo.apiFingerprint()->hash + 24, value);
break;
case 7:
Utils::storeBigEndian<uint64_t>(_issuedTo.apiFingerprint()->hash + 32,value);
Utils::storeBigEndian<uint64_t>(m_issuedTo.apiFingerprint()->hash + 32, value);
break;
case 8:
Utils::storeBigEndian<uint64_t>(_issuedTo.apiFingerprint()->hash + 40,value);
Utils::storeBigEndian<uint64_t>(m_issuedTo.apiFingerprint()->hash + 40, value);
break;
default:
if (_additionalQualifiers.size() >= ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS)
if (m_additionalQualifiers.size() >= ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS)
return -1;
_additionalQualifiers.push_back(_Qualifier(id,value,delta));
m_additionalQualifiers.push_back(p_Qualifier(id, value, delta));
break;
}
}
std::sort(_additionalQualifiers.begin(),_additionalQualifiers.end());
std::sort(m_additionalQualifiers.begin(), m_additionalQualifiers.end());
if (data[0] == 1) {
if ((p + 96) > len)
return -1;
_signatureLength = 96;
Utils::copy<96>(_signature,data + p);
m_signatureLength = 96;
Utils::copy<96>(m_signature, data + p);
return p + 96;
} else if (data[0] == 2) {
if ((p + 48) > len)
return -1;
Utils::copy<48>(_issuedTo.apiFingerprint()->hash,data + p);
Utils::copy<48>(m_issuedTo.apiFingerprint()->hash, data + p);
p += 48;
if ((p + 2) > len)
return -1;
_signatureLength = Utils::loadBigEndian<uint16_t>(data + p);
if ((_signatureLength > (unsigned int)sizeof(_signature))||((p + (int)_signatureLength) > len))
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + p);
if ((m_signatureLength > (unsigned int)sizeof(m_signature)) || ((p + (int)m_signatureLength) > len))
return -1;
Utils::copy(_signature,data + p,_signatureLength);
return p + (int)_signatureLength;
Utils::copy(m_signature, data + p, m_signatureLength);
return p + (int)m_signatureLength;
}
return -1;
}
unsigned int CertificateOfMembership::_fillSigningBuf(uint64_t buf[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX / 8]) const noexcept
unsigned int CertificateOfMembership::m_fillSigningBuf(uint64_t *buf) const noexcept
{
const uint64_t informational = 0xffffffffffffffffULL;
@ -235,41 +235,41 @@ unsigned int CertificateOfMembership::_fillSigningBuf(uint64_t buf[ZT_CERTIFICAT
// The standard three tuples that must begin every COM.
buf[0] = 0;
buf[1] = Utils::hton((uint64_t)_timestamp);
buf[2] = Utils::hton((uint64_t)_timestampMaxDelta);
buf[1] = Utils::hton((uint64_t)m_timestamp);
buf[2] = Utils::hton((uint64_t)m_timestampMaxDelta);
buf[3] = ZT_CONST_TO_BE_UINT64(1);
buf[4] = Utils::hton(_networkId);
buf[4] = Utils::hton(m_networkId);
buf[5] = 0;
buf[6] = ZT_CONST_TO_BE_UINT64(2);
buf[7] = Utils::hton(_issuedTo.address().toInt());
buf[7] = Utils::hton(m_issuedTo.address().toInt());
buf[8] = informational;
unsigned int p = 9;
// The full identity fingerprint of the peer to whom the COM was issued,
// embeded as a series of informational tuples.
if (_issuedTo.haveHash()) {
if (m_issuedTo.haveHash()) {
buf[p++] = ZT_CONST_TO_BE_UINT64(3);
buf[p++] = Utils::loadAsIsEndian<uint64_t>(_issuedTo.hash());
buf[p++] = Utils::loadAsIsEndian<uint64_t>(m_issuedTo.hash());
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(4);
buf[p++] = Utils::loadAsIsEndian<uint64_t>(_issuedTo.hash() + 8);
buf[p++] = Utils::loadAsIsEndian<uint64_t>(m_issuedTo.hash() + 8);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(5);
buf[p++] = Utils::loadAsIsEndian<uint64_t>(_issuedTo.hash() + 16);
buf[p++] = Utils::loadAsIsEndian<uint64_t>(m_issuedTo.hash() + 16);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(6);
buf[p++] = Utils::loadAsIsEndian<uint64_t>(_issuedTo.hash() + 24);
buf[p++] = Utils::loadAsIsEndian<uint64_t>(m_issuedTo.hash() + 24);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(7);
buf[p++] = Utils::loadAsIsEndian<uint64_t>(_issuedTo.hash() + 32);
buf[p++] = Utils::loadAsIsEndian<uint64_t>(m_issuedTo.hash() + 32);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(8);
buf[p++] = Utils::loadAsIsEndian<uint64_t>(_issuedTo.hash() + 40);
buf[p++] = Utils::loadAsIsEndian<uint64_t>(m_issuedTo.hash() + 40);
buf[p++] = informational;
}
for(FCV<_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(_additionalQualifiers.begin());i != _additionalQualifiers.end();++i) { // NOLINT(modernize-loop-convert)
for(FCV<p_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(m_additionalQualifiers.begin());i != m_additionalQualifiers.end();++i) { // NOLINT(modernize-loop-convert)
buf[p++] = Utils::hton(i->id);
buf[p++] = Utils::hton(i->value);
buf[p++] = Utils::hton(i->delta);

View file

@ -126,7 +126,7 @@ public:
/**
* @return True if there's something here
*/
ZT_INLINE operator bool() const noexcept { return (_networkId != 0); } // NOLINT(google-explicit-constructor,hicpp-explicit-conversions)
ZT_INLINE operator bool() const noexcept { return (m_networkId != 0); } // NOLINT(google-explicit-constructor,hicpp-explicit-conversions)
/**
* @return Credential ID, always 0 for COMs
@ -136,22 +136,22 @@ public:
/**
* @return Timestamp for this cert and maximum delta for timestamp
*/
ZT_INLINE int64_t timestamp() const noexcept { return _timestamp; }
ZT_INLINE int64_t timestamp() const noexcept { return m_timestamp; }
/**
* @return Maximum allowed difference between timestamps
*/
ZT_INLINE int64_t timestampMaxDelta() const noexcept { return _timestampMaxDelta; }
ZT_INLINE int64_t timestampMaxDelta() const noexcept { return m_timestampMaxDelta; }
/**
* @return Fingerprint of identity to which this cert was issued
*/
ZT_INLINE const Fingerprint &issuedTo() const noexcept { return _issuedTo; }
ZT_INLINE const Fingerprint &issuedTo() const noexcept { return m_issuedTo; }
/**
* @return Network ID for which this cert was issued
*/
ZT_INLINE uint64_t networkId() const noexcept { return _networkId; }
ZT_INLINE uint64_t networkId() const noexcept { return m_networkId; }
/**
* Compare two certificates for parameter agreement
@ -191,26 +191,26 @@ public:
int unmarshal(const uint8_t *data,int len) noexcept;
private:
unsigned int _fillSigningBuf(uint64_t buf[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX / 8]) const noexcept;
unsigned int m_fillSigningBuf(uint64_t *buf) const noexcept;
struct _Qualifier
struct p_Qualifier
{
ZT_INLINE _Qualifier() noexcept : id(0),value(0),delta(0) {}
ZT_INLINE _Qualifier(const uint64_t id_,const uint64_t value_,const uint64_t delta_) noexcept : id(id_),value(value_),delta(delta_) {}
ZT_INLINE p_Qualifier() noexcept : id(0), value(0), delta(0) {}
ZT_INLINE p_Qualifier(const uint64_t id_, const uint64_t value_, const uint64_t delta_) noexcept : id(id_), value(value_), delta(delta_) {}
uint64_t id;
uint64_t value;
uint64_t delta;
ZT_INLINE bool operator<(const _Qualifier &q) const noexcept { return (id < q.id); } // sort order
ZT_INLINE bool operator<(const p_Qualifier &q) const noexcept { return (id < q.id); } // sort order
};
FCV<_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS> _additionalQualifiers;
int64_t _timestamp;
int64_t _timestampMaxDelta;
uint64_t _networkId;
Fingerprint _issuedTo;
Address _signedBy;
unsigned int _signatureLength;
uint8_t _signature[ZT_SIGNATURE_BUFFER_SIZE];
FCV<p_Qualifier,ZT_CERTIFICATEOFMEMBERSHIP_MAX_ADDITIONAL_QUALIFIERS> m_additionalQualifiers;
int64_t m_timestamp;
int64_t m_timestampMaxDelta;
uint64_t m_networkId;
Fingerprint m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier

View file

@ -17,34 +17,34 @@ namespace ZeroTier {
void CertificateOfOwnership::addThing(const InetAddress &ip)
{
if (_thingCount >= ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
if (m_thingCount >= ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return;
if (ip.family() == AF_INET) {
_thingTypes[_thingCount] = THING_IPV4_ADDRESS;
Utils::copy<4>(_thingValues[_thingCount],&(reinterpret_cast<const struct sockaddr_in *>(&ip)->sin_addr.s_addr));
++_thingCount;
m_thingTypes[m_thingCount] = THING_IPV4_ADDRESS;
Utils::copy<4>(m_thingValues[m_thingCount], &(reinterpret_cast<const struct sockaddr_in *>(&ip)->sin_addr.s_addr));
++m_thingCount;
} else if (ip.family() == AF_INET6) {
_thingTypes[_thingCount] = THING_IPV6_ADDRESS;
Utils::copy<16>(_thingValues[_thingCount],reinterpret_cast<const struct sockaddr_in6 *>(&ip)->sin6_addr.s6_addr);
++_thingCount;
m_thingTypes[m_thingCount] = THING_IPV6_ADDRESS;
Utils::copy<16>(m_thingValues[m_thingCount], reinterpret_cast<const struct sockaddr_in6 *>(&ip)->sin6_addr.s6_addr);
++m_thingCount;
}
}
void CertificateOfOwnership::addThing(const MAC &mac)
{
if (_thingCount >= ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
if (m_thingCount >= ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return;
_thingTypes[_thingCount] = THING_MAC_ADDRESS;
mac.copyTo(_thingValues[_thingCount]);
++_thingCount;
m_thingTypes[m_thingCount] = THING_MAC_ADDRESS;
mac.copyTo(m_thingValues[m_thingCount]);
++m_thingCount;
}
bool CertificateOfOwnership::sign(const Identity &signer)
{
uint8_t buf[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX + 16];
if (signer.hasPrivate()) {
_signedBy = signer.address();
_signatureLength = signer.sign(buf,(unsigned int)marshal(buf,true),_signature,sizeof(_signature));
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
@ -57,23 +57,23 @@ int CertificateOfOwnership::marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSH
for(int k=0;k<16;++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint64_t>(data + p,_networkId);
Utils::storeBigEndian<uint64_t>(data + p + 8,(uint64_t)_ts);
Utils::storeBigEndian<uint64_t>(data + p + 16,_flags);
Utils::storeBigEndian<uint32_t>(data + p + 24,_id);
Utils::storeBigEndian<uint16_t>(data + p + 28,(uint16_t)_thingCount);
Utils::storeBigEndian<uint64_t>(data + p, m_networkId);
Utils::storeBigEndian<uint64_t>(data + p + 8,(uint64_t)m_ts);
Utils::storeBigEndian<uint64_t>(data + p + 16, m_flags);
Utils::storeBigEndian<uint32_t>(data + p + 24, m_id);
Utils::storeBigEndian<uint16_t>(data + p + 28,(uint16_t)m_thingCount);
p += 30;
for(unsigned int i=0,j=_thingCount;i<j;++i) {
data[p++] = _thingTypes[i];
Utils::copy<ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE>(data + p,_thingValues[i]);
for(unsigned int i=0,j=m_thingCount;i < j;++i) {
data[p++] = m_thingTypes[i];
Utils::copy<ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE>(data + p, m_thingValues[i]);
p += ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE;
}
_issuedTo.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
_signedBy.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
m_issuedTo.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
if (!forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)_signatureLength); p += 2;
Utils::copy(data + p,_signature,_signatureLength); p += (int)_signatureLength;
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)m_signatureLength); p += 2;
Utils::copy(data + p, m_signature, m_signatureLength); p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
@ -89,27 +89,27 @@ int CertificateOfOwnership::unmarshal(const uint8_t *data,int len) noexcept
if (len < 30)
return -1;
_networkId = Utils::loadBigEndian<uint64_t>(data);
_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
_flags = Utils::loadBigEndian<uint64_t>(data + 16);
_id = Utils::loadBigEndian<uint32_t>(data + 24);
_thingCount = Utils::loadBigEndian<uint16_t>(data + 28);
if (_thingCount > ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
m_networkId = Utils::loadBigEndian<uint64_t>(data);
m_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
m_flags = Utils::loadBigEndian<uint64_t>(data + 16);
m_id = Utils::loadBigEndian<uint32_t>(data + 24);
m_thingCount = Utils::loadBigEndian<uint16_t>(data + 28);
if (m_thingCount > ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return -1;
int p = 30;
for(unsigned int i=0,j=_thingCount;i<j;++i) {
for(unsigned int i=0,j=m_thingCount;i < j;++i) {
if ((p + 1 + ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE) > len)
return -1;
_thingTypes[i] = data[p++];
Utils::copy<ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE>(_thingValues[i],data + p);
m_thingTypes[i] = data[p++];
Utils::copy<ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE>(m_thingValues[i], data + p);
p += ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE;
}
if ((p + ZT_ADDRESS_LENGTH + ZT_ADDRESS_LENGTH + 1 + 2) > len)
return -1;
_issuedTo.setTo(data + p); p += ZT_ADDRESS_LENGTH;
_signedBy.setTo(data + p); p += ZT_ADDRESS_LENGTH + 1;
m_issuedTo.setTo(data + p); p += ZT_ADDRESS_LENGTH;
m_signedBy.setTo(data + p); p += ZT_ADDRESS_LENGTH + 1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)

View file

@ -65,23 +65,23 @@ public:
ZT_INLINE CertificateOfOwnership(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id) noexcept // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
{
memoryZero(this);
_networkId = nwid;
_ts = ts;
_id = id;
_issuedTo = issuedTo;
m_networkId = nwid;
m_ts = ts;
m_id = id;
m_issuedTo = issuedTo;
}
ZT_INLINE uint64_t networkId() const noexcept { return _networkId; }
ZT_INLINE int64_t timestamp() const noexcept { return _ts; }
ZT_INLINE uint32_t id() const noexcept { return _id; }
ZT_INLINE const Address &issuedTo() const noexcept { return _issuedTo; }
ZT_INLINE const Address &signer() const noexcept { return _signedBy; }
ZT_INLINE const uint8_t *signature() const noexcept { return _signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return _signatureLength; }
ZT_INLINE uint64_t networkId() const noexcept { return m_networkId; }
ZT_INLINE int64_t timestamp() const noexcept { return m_ts; }
ZT_INLINE uint32_t id() const noexcept { return m_id; }
ZT_INLINE const Address &issuedTo() const noexcept { return m_issuedTo; }
ZT_INLINE const Address &signer() const noexcept { return m_signedBy; }
ZT_INLINE const uint8_t *signature() const noexcept { return m_signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return m_signatureLength; }
ZT_INLINE unsigned int thingCount() const noexcept { return (unsigned int)_thingCount; }
ZT_INLINE Thing thingType(const unsigned int i) const noexcept { return (Thing)_thingTypes[i]; }
ZT_INLINE const uint8_t *thingValue(const unsigned int i) const noexcept { return _thingValues[i]; }
ZT_INLINE unsigned int thingCount() const noexcept { return (unsigned int)m_thingCount; }
ZT_INLINE Thing thingType(const unsigned int i) const noexcept { return (Thing)m_thingTypes[i]; }
ZT_INLINE const uint8_t *thingValue(const unsigned int i) const noexcept { return m_thingValues[i]; }
ZT_INLINE bool owns(const InetAddress &ip) const noexcept
{
@ -139,7 +139,7 @@ public:
int unmarshal(const uint8_t *data,int len) noexcept;
// Provides natural sort order by ID
ZT_INLINE bool operator<(const CertificateOfOwnership &coo) const noexcept { return (_id < coo._id); }
ZT_INLINE bool operator<(const CertificateOfOwnership &coo) const noexcept { return (m_id < coo.m_id); }
ZT_INLINE bool operator==(const CertificateOfOwnership &coo) const noexcept { return (memcmp(this,&coo,sizeof(CertificateOfOwnership)) == 0); }
ZT_INLINE bool operator!=(const CertificateOfOwnership &coo) const noexcept { return (memcmp(this,&coo,sizeof(CertificateOfOwnership)) != 0); }
@ -147,11 +147,11 @@ public:
private:
ZT_INLINE bool _owns(const Thing &t,const void *v,unsigned int l) const noexcept
{
for(unsigned int i=0,j=_thingCount;i<j;++i) {
if (_thingTypes[i] == (uint8_t)t) {
for(unsigned int i=0,j=m_thingCount;i < j;++i) {
if (m_thingTypes[i] == (uint8_t)t) {
unsigned int k = 0;
while (k < l) {
if (reinterpret_cast<const uint8_t *>(v)[k] != _thingValues[i][k])
if (reinterpret_cast<const uint8_t *>(v)[k] != m_thingValues[i][k])
break;
++k;
}
@ -162,17 +162,17 @@ private:
return false;
}
uint64_t _networkId;
int64_t _ts;
uint64_t _flags;
uint32_t _id;
uint16_t _thingCount;
uint8_t _thingTypes[ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS];
uint8_t _thingValues[ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS][ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE];
Address _issuedTo;
Address _signedBy;
unsigned int _signatureLength;
uint8_t _signature[ZT_SIGNATURE_BUFFER_SIZE];
uint64_t m_networkId;
int64_t m_ts;
uint64_t m_flags;
uint32_t m_id;
uint16_t m_thingCount;
uint8_t m_thingTypes[ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS];
uint8_t m_thingValues[ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS][ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE];
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier

View file

@ -87,22 +87,14 @@
#define ZT_SYMMETRIC_KEY_TTL_MESSAGES 2147483648
/**
* Maximum delay between timer task checks
* Normal delay between processBackgroundTasks calls.
*/
#define ZT_MAX_TIMER_TASK_INTERVAL 1000
/**
* Interval between steps or stages in multi-stage NAT traversal operations.
*
* This is for example the interval between initial firewall openers and real packets
* for two-phase IPv4 hole punch.
*/
#define ZT_NAT_TRAVERSAL_INTERVAL 200
#define ZT_TIMER_TASK_INTERVAL 2000
/**
* How often most internal cleanup and housekeeping tasks are performed
*/
#define ZT_HOUSEKEEPING_PERIOD 120000
#define ZT_HOUSEKEEPING_PERIOD 300000
/**
* How often network housekeeping is performed
@ -125,9 +117,12 @@
#define ZT_RELAY_MAX_HOPS 4
/**
* Period between keepalives sent to paths if no other traffic has been sent
* Period between keepalives sent to paths if no other traffic has been sent.
*
* The average NAT timeout is 60-120s, but there exist NATs in the wild with timeouts
* as short as 30s. Come in just under 30s and we should be fine.
*/
#define ZT_PATH_KEEPALIVE_PERIOD 20000
#define ZT_PATH_KEEPALIVE_PERIOD 28000
/**
* Timeout for path alive-ness (measured from last receive)
@ -135,12 +130,22 @@
#define ZT_PATH_ALIVE_TIMEOUT ((ZT_PATH_KEEPALIVE_PERIOD * 2) + 5000)
/**
* Delay between calls to the pulse() method in Peer for each peer
* Number of ports to try for each BFG1024 scan attempt (if enabled).
*/
#define ZT_PEER_PULSE_INTERVAL ZT_PATH_KEEPALIVE_PERIOD
#define ZT_NAT_T_BFG1024_PORTS_PER_ATTEMPT 256
/**
* Minimum interval between HELLOs to peers.
* Maximum number of queued endpoints to try per "pulse."
*/
#define ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE 4
/**
* Delay between calls to the pulse() method in Peer for each peer
*/
#define ZT_PEER_PULSE_INTERVAL (ZT_PATH_KEEPALIVE_PERIOD / 2)
/**
* Interval between HELLOs to peers.
*/
#define ZT_PEER_HELLO_INTERVAL 120000LL
@ -154,7 +159,7 @@
#define ZT_PEER_GLOBAL_TIMEOUT 2592000000LL
/**
* Maximum interval between sort/prioritize of paths for a peer
* Interval between sort/prioritize of paths for a peer
*/
#define ZT_PEER_PRIORITIZE_PATHS_INTERVAL 5000
@ -182,16 +187,6 @@
*/
#define ZT_MAX_BRIDGE_SPAM 32
/**
* Interval between attempts to make a direct connection if one does not exist
*/
#define ZT_DIRECT_CONNECT_ATTEMPT_INTERVAL 30000
/**
* Maximum number of paths per IP scope (e.g. global, link-local) and family (e.g. v4/v6)
*/
#define ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY 4
/**
* WHOIS rate limit (we allow these to be pretty fast)
*/

View file

@ -26,6 +26,7 @@
#include <vector>
#include <list>
#include <set>
#include <string>
namespace ZeroTier {
@ -39,6 +40,7 @@ struct _MapHasher
std::size_t operator()(const uint32_t i) const noexcept { return (std::size_t)Utils::hash32(i ^ (uint32_t)Utils::s_mapNonce); }
std::size_t operator()(const int32_t i) const noexcept { return (std::size_t)Utils::hash32((uint32_t)i ^ (uint32_t)Utils::s_mapNonce); }
};
template<typename K,typename V>
class Map : public std::unordered_map< K,V,_MapHasher,std::equal_to<K>,Utils::Mallocator< std::pair<const K,V> > >
{
@ -133,6 +135,12 @@ class Set : public std::set< V,std::less<V>,Utils::Mallocator<V> >
{
};
class String : public std::basic_string< char,std::char_traits<char>,Utils::Mallocator<char> >
{
public:
explicit ZT_INLINE String(const char *const s) { assign(s); }
};
} // ZeroTier
#endif

View file

@ -74,18 +74,18 @@ Credential::VerifyResult Credential::_verify(const RuntimeEnvironment *const RR,
Credential::VerifyResult Credential::_verify(const RuntimeEnvironment *const RR,void *tPtr,const CertificateOfMembership &credential) const
{
// Sanity check network ID.
if ((!credential._signedBy)||(credential._signedBy != Network::controllerFor(credential._networkId)))
if ((!credential.m_signedBy) || (credential.m_signedBy != Network::controllerFor(credential.m_networkId)))
return Credential::VERIFY_BAD_SIGNATURE;
// If we don't know the peer, get its identity. This shouldn't happen here but should be handled.
const SharedPtr<Peer> peer(RR->topology->peer(tPtr,credential._signedBy));
const SharedPtr<Peer> peer(RR->topology->peer(tPtr,credential.m_signedBy));
if (!peer)
return Credential::VERIFY_NEED_IDENTITY;
// Now verify the controller's signature.
uint64_t buf[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX / 8];
const unsigned int bufSize = credential._fillSigningBuf(buf);
return peer->identity().verify(buf,bufSize,credential._signature,credential._signatureLength) ? Credential::VERIFY_OK : Credential::VERIFY_BAD_SIGNATURE;
const unsigned int bufSize = credential.m_fillSigningBuf(buf);
return peer->identity().verify(buf, bufSize, credential.m_signature, credential.m_signatureLength) ? Credential::VERIFY_OK : Credential::VERIFY_BAD_SIGNATURE;
}
} // namespace ZeroTier

View file

@ -39,16 +39,22 @@ namespace ZeroTier {
*
* This class is thread-safe and handles locking internally.
*
* Templating is so that this class can be placed in a test harness and tested
* without dependencies on external code. The default template parameters are
* the ones used throughout the ZeroTier core.
*
* @tparam MF Maximum number of fragments that each message can possess (default: ZT_MAX_PACKET_FRAGMENTS)
* @tparam MFP Maximum number of incoming fragments per path (if paths are specified) (default: ZT_MAX_INCOMING_FRAGMENTS_PER_PATH)
* @tparam GCS Garbage collection target size for the incoming message queue (default: ZT_MAX_PACKET_FRAGMENTS * 2)
* @tparam GCT Garbage collection trigger threshold, usually 2X GCS (default: ZT_MAX_PACKET_FRAGMENTS * 4)
* @tparam P Type for pointer to a path object (default: SharedPtr<Path>)
*/
template<
unsigned int MF = ZT_MAX_PACKET_FRAGMENTS,
unsigned int MFP = ZT_MAX_INCOMING_FRAGMENTS_PER_PATH,
unsigned int GCS = (ZT_MAX_PACKET_FRAGMENTS * 2),
unsigned int GCT = (ZT_MAX_PACKET_FRAGMENTS * 4)>
unsigned int GCT = (ZT_MAX_PACKET_FRAGMENTS * 4),
typename P = SharedPtr<Path> >
class Defragmenter
{
public:
@ -146,17 +152,17 @@ public:
const unsigned int fragmentNo,
const unsigned int totalFragmentsExpected,
const int64_t now,
const SharedPtr<Path> &via)
const P &via)
{
// Sanity checks for malformed fragments or invalid input parameters.
if ((fragmentNo >= totalFragmentsExpected)||(totalFragmentsExpected > MF)||(totalFragmentsExpected == 0))
return ERR_INVALID_FRAGMENT;
// We hold the read lock on _messages unless we need to add a new entry or do GC.
RWMutex::RMaybeWLock ml(_messages_l);
RWMutex::RMaybeWLock ml(m_messages_l);
// Check message hash table size and perform GC if necessary.
if (_messages.size() >= GCT) {
if (m_messages.size() >= GCT) {
try {
// Scan messages with read lock still locked first and make a sorted list of
// message entries by last modified time. Then lock for writing and delete
@ -165,27 +171,27 @@ public:
// lock is held since many threads can hold the read lock but all threads must
// wait if someone holds the write lock.
std::vector<std::pair<int64_t,uint64_t> > messagesByLastUsedTime;
messagesByLastUsedTime.reserve(_messages.size());
messagesByLastUsedTime.reserve(m_messages.size());
for(typename Map< uint64_t,_E >::const_iterator i(_messages.begin());i!=_messages.end();++i)
for(typename Map< uint64_t,p_E >::const_iterator i(m_messages.begin());i != m_messages.end();++i)
messagesByLastUsedTime.push_back(std::pair<int64_t,uint64_t>(i->second.lastUsed,i->first));
std::sort(messagesByLastUsedTime.begin(),messagesByLastUsedTime.end());
ml.writing(); // acquire write lock on _messages
for (unsigned long x = 0,y = (messagesByLastUsedTime.size() - GCS); x <= y; ++x)
_messages.erase(messagesByLastUsedTime[x].second);
m_messages.erase(messagesByLastUsedTime[x].second);
} catch (...) {
return ERR_OUT_OF_MEMORY;
}
}
// Get or create message fragment.
_E *e = _messages.get(messageId);
p_E *e = m_messages.get(messageId);
if (!e) {
ml.writing(); // acquire write lock on _messages if not already
try {
e = &(_messages[messageId]);
} catch (...) {
e = &(m_messages[messageId]);
} catch ( ... ) {
return ERR_OUT_OF_MEMORY;
}
e->id = messageId;
@ -275,8 +281,8 @@ public:
*/
ZT_INLINE void clear()
{
RWMutex::Lock ml(_messages_l);
_messages.clear();
RWMutex::Lock ml(m_messages_l);
m_messages.clear();
}
/**
@ -284,21 +290,21 @@ public:
*/
ZT_INLINE unsigned int cacheSize() noexcept
{
RWMutex::RLock ml(_messages_l);
return _messages.size();
RWMutex::RLock ml(m_messages_l);
return m_messages.size();
}
private:
// _E is an entry in the message queue.
struct _E
// p_E is an entry in the message queue.
struct p_E
{
ZT_INLINE _E() noexcept :
ZT_INLINE p_E() noexcept :
id(0),
lastUsed(0),
totalFragmentsExpected(0),
fragmentsReceived(0) {}
ZT_INLINE _E(const _E &e) noexcept :
ZT_INLINE p_E(const p_E &e) noexcept :
id(e.id),
lastUsed(e.lastUsed),
totalFragmentsExpected(e.totalFragmentsExpected),
@ -307,7 +313,7 @@ private:
message(e.message),
lock() {}
ZT_INLINE ~_E()
ZT_INLINE ~p_E()
{
if (via) {
via->_inboundFragmentedMessages_l.lock();
@ -316,7 +322,7 @@ private:
}
}
ZT_INLINE _E &operator=(const _E &e)
ZT_INLINE p_E &operator=(const p_E &e)
{
if (this != &e) {
id = e.id;
@ -333,13 +339,13 @@ private:
int64_t lastUsed;
unsigned int totalFragmentsExpected;
unsigned int fragmentsReceived;
SharedPtr<Path> via;
P via;
FCV< Buf::Slice,MF > message;
Mutex lock;
};
Map< uint64_t,Defragmenter<MF,MFP,GCS,GCT>::_E > _messages;
RWMutex _messages_l;
Map< uint64_t,Defragmenter<MF,MFP,GCS,GCT,P>::p_E > m_messages;
RWMutex m_messages_l;
};
} // namespace ZeroTier

View file

@ -25,14 +25,14 @@ Dictionary::~Dictionary()
std::vector<uint8_t> &Dictionary::operator[](const char *k)
{
return _t[_toKey(k)];
return m_entries[s_toKey(k)];
}
const std::vector<uint8_t> &Dictionary::operator[](const char *k) const
{
static const std::vector<uint8_t> emptyEntry;
Map< uint64_t,std::vector<uint8_t> >::const_iterator e(_t.find(_toKey(k)));
return (e == _t.end()) ? emptyEntry : e->second;
Map< uint64_t,std::vector<uint8_t> >::const_iterator e(m_entries.find(s_toKey(k)));
return (e == m_entries.end()) ? emptyEntry : e->second;
}
void Dictionary::add(const char *k,bool v)
@ -147,7 +147,7 @@ void Dictionary::getS(const char *k,char *v,unsigned int cap) const
void Dictionary::clear()
{
_t.clear();
m_entries.clear();
}
void Dictionary::encode(std::vector<uint8_t> &out) const
@ -156,7 +156,7 @@ void Dictionary::encode(std::vector<uint8_t> &out) const
out.clear();
for(Map< uint64_t,std::vector<uint8_t> >::const_iterator ti(_t.begin());ti!=_t.end();++ti) {
for(Map< uint64_t,std::vector<uint8_t> >::const_iterator ti(m_entries.begin());ti != m_entries.end();++ti) {
str[0] = ti->first;
const char *k = (const char *)str;
for(;;) {
@ -247,7 +247,7 @@ bool Dictionary::decode(const void *data,unsigned int len)
if ((c < 33)||(c > 126)||(c == 92)) {
return false;
} else if (c == 61) {
v = &_t[k];
v = &m_entries[k];
} else {
reinterpret_cast<uint8_t *>(&k)[ki & 7U] ^= c;
}

View file

@ -137,12 +137,12 @@ public:
/**
* @return Number of entries
*/
ZT_INLINE unsigned int size() const noexcept { return _t.size(); }
ZT_INLINE unsigned int size() const noexcept { return m_entries.size(); }
/**
* @return True if dictionary is not empty
*/
ZT_INLINE bool empty() const noexcept { return _t.empty(); }
ZT_INLINE bool empty() const noexcept { return m_entries.empty(); }
/**
* Encode to a string in the supplied vector
@ -169,7 +169,7 @@ public:
private:
// This just packs up to 8 character bytes into a 64-bit word. There is no need
// for this to be portable in terms of endian-ness. It's just for fast key lookup.
static ZT_INLINE uint64_t _toKey(const char *k)
static ZT_INLINE uint64_t s_toKey(const char *k)
{
uint64_t key = 0;
for(int i=0;i<8;++i) {
@ -179,7 +179,7 @@ private:
return key;
}
Map< uint64_t,Vector<uint8_t> > _t;
Map< uint64_t,Vector<uint8_t> > m_entries;
};
} // namespace ZeroTier

View file

@ -57,21 +57,29 @@ public:
* Protocol identifier bits.
*
* Endpoint types can support more than one of these, though it depends on the type.
*
* Most of these are reserved for possible future use.
*/
enum Protocol
{
PROTO_DGRAM = 0x0001,
PROTO_TCP = 0x0002,
PROTO_HTTP = 0x0004,
PROTO_HTTPS = 0x0008,
PROTO_WS = 0x0010,
PROTO_WEBRTC = 0x0020
PROTO_DGRAM = 0x0001, // UDP for IP or naked Ethernet frames
PROTO_STREAM = 0x0002, // TCP
PROTO_HTTP2 = 0x0004, // HTTP2 bidirectional protocol
PROTO_HTTPS2 = 0x0008, // HTTP2 over SSL/TLS
PROTO_WS = 0x0010, // Web sockets
PROTO_WEBRTC = 0x0020, // WebRTC data channels
PROTO_WIREGUARD = 0x0040 // Wireguard as low-level transport
};
ZT_INLINE Endpoint() noexcept { memoryZero(this); } // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
explicit Endpoint(const InetAddress &sa,Protocol proto = PROTO_DGRAM) noexcept;
/**
* @return True if this is an IPv4 or IPv6 IP address
*/
ZT_INLINE bool isInetAddr() const noexcept { return ((_t == TYPE_INETADDR_V4)||(_t == TYPE_INETADDR_V6)); }
/**
* @return InetAddress or NIL if not of this type
*/
@ -113,7 +121,7 @@ public:
private:
Type _t;
Protocol _proto;
int _l[3]; // X,Y,Z location in kilometers from the nearest gravitational center of mass
int _l[3]; // X,Y,Z location in kilometers from the nearest gravitational center of mass (e.g. Earth)
union {
sockaddr_storage sa;
ZT_Fingerprint zt;

View file

@ -20,22 +20,14 @@
/**
* Number of buckets to use to maintain a list of expected replies.
*
* More buckets means less chance of two packets tagging the same
* bucket. This doesn't actually hurt anything since this class
* behaves like a bloom filter: you can have false positives but
* not false negatives.
*
* OKs are also cryptographically authenticated, so this is not a
* huge problem, but this helps harden the system against replay
* attacks for e.g. denial of service.
* Making this a power of two improves efficiency a little by allowing bit shift division.
*/
#define ZT_EXPECT_BUCKETS 131072
#define ZT_EXPECT_BUCKETS 32768
/**
* 1/2 the TTL for expected replies in milliseconds
*
* Making this a power of two improves efficiency a little by allowing bit
* shift division.
* Making this a power of two improves efficiency a little by allowing bit shift division.
*/
#define ZT_EXPECT_TTL 4096LL
@ -47,40 +39,37 @@ namespace ZeroTier {
class Expect
{
public:
ZT_INLINE Expect() : _salt(Utils::getSecureRandomU64()) {} // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
ZT_INLINE Expect() {} // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init,hicpp-use-equals-default,modernize-use-equals-default)
/**
* Called by other code when something is sending a packet that may receive an OK response
* Called by other code when something is sending a packet that could potentially receive an OK response
*
* @param packetId Packet ID of packet being sent (be sure it's post-armor())
* @param now Current time
*/
ZT_INLINE void sending(const uint64_t packetId,const int64_t now) noexcept
{
_packetIdSent[Utils::hash64(packetId ^ _salt) % ZT_EXPECT_BUCKETS].store((int32_t)(now / ZT_EXPECT_TTL));
_packetIdSent[Utils::hash64(packetId ^ Utils::s_mapNonce) % ZT_EXPECT_BUCKETS].store((uint32_t)(now / ZT_EXPECT_TTL));
}
/**
* Check whether an OK is expected for this packet
* Check if an OK is expected and if so reset the corresponding bucket.
*
* @param inRePacketId
* @param now
* @return
* This means this call mutates the state. If it returns true, it will
* subsequently return false. This is for replay protection for OKs.
*
* @param inRePacketId In-re packet ID we're expecting
* @param now Current time
* @return True if we're expecting a reply (and a reset occurred)
*/
ZT_INLINE bool expecting(const uint64_t inRePacketId,const int64_t now) const noexcept
ZT_INLINE bool expecting(const uint64_t inRePacketId,const int64_t now) noexcept
{
return (((now / ZT_EXPECT_TTL) - (int64_t)_packetIdSent[Utils::hash64(inRePacketId ^ _salt) % ZT_EXPECT_BUCKETS].load()) <= 1);
return (((now / ZT_EXPECT_TTL) - (int64_t)_packetIdSent[(unsigned long)Utils::hash64(inRePacketId ^ Utils::s_mapNonce) % ZT_EXPECT_BUCKETS].exchange(0)) <= 1);
}
private:
// This is a static per-runtime salt that's XORed and mixed with the packet ID
// to make it difficult for a third party to predict expected-reply buckets.
// Such prediction would not be catastrophic but it's easy and good to harden
// against it.
const uint64_t _salt;
// Each bucket contains a timestamp in units of the expect duration.
std::atomic<int32_t> _packetIdSent[ZT_EXPECT_BUCKETS];
// Each bucket contains a timestamp in units of the max expect duration.
std::atomic<uint32_t> _packetIdSent[ZT_EXPECT_BUCKETS];
};
} // namespace ZeroTier

View file

@ -29,7 +29,7 @@ InetAddress::IpScope InetAddress::ipScope() const noexcept
switch(_data.ss_family) {
case AF_INET: {
const uint32_t ip = Utils::ntoh((uint32_t)reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr);
const uint32_t ip = Utils::ntoh((uint32_t)reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr);
switch(ip >> 24U) {
case 0x00: return IP_SCOPE_NONE; // 0.0.0.0/8 (reserved, never used)
case 0x06: return IP_SCOPE_PSEUDOPRIVATE; // 6.0.0.0/8 (US Army)
@ -68,7 +68,7 @@ InetAddress::IpScope InetAddress::ipScope() const noexcept
}
case AF_INET6: {
const unsigned char *ip = reinterpret_cast<const unsigned char *>(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const unsigned char *ip = reinterpret_cast<const unsigned char *>(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
if ((ip[0] & 0xf0U) == 0xf0) {
if (ip[0] == 0xff) return IP_SCOPE_MULTICAST; // ff00::/8
if ((ip[0] == 0xfe)&&((ip[1] & 0xc0U) == 0x80)) {
@ -100,12 +100,12 @@ void InetAddress::set(const void *ipBytes,unsigned int ipLen,unsigned int port)
uint32_t ipb[1];
Utils::copy<4>(ipb,ipBytes);
_data.ss_family = AF_INET;
reinterpret_cast<struct sockaddr_in *>(this)->sin_addr.s_addr = ipb[0];
reinterpret_cast<struct sockaddr_in *>(this)->sin_port = Utils::hton((uint16_t)port);
reinterpret_cast<sockaddr_in *>(this)->sin_addr.s_addr = ipb[0];
reinterpret_cast<sockaddr_in *>(this)->sin_port = Utils::hton((uint16_t)port);
} else if (ipLen == 16) {
_data.ss_family = AF_INET6;
Utils::copy<16>(reinterpret_cast<struct sockaddr_in6 *>(this)->sin6_addr.s6_addr,ipBytes);
reinterpret_cast<struct sockaddr_in6 *>(this)->sin6_port = Utils::hton((uint16_t)port);
Utils::copy<16>(reinterpret_cast<sockaddr_in6 *>(this)->sin6_addr.s6_addr,ipBytes);
reinterpret_cast<sockaddr_in6 *>(this)->sin6_port = Utils::hton((uint16_t)port);
}
}
@ -113,14 +113,14 @@ bool InetAddress::isDefaultRoute() const noexcept
{
switch(_data.ss_family) {
case AF_INET:
return ( (reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr == 0) && (reinterpret_cast<const struct sockaddr_in *>(this)->sin_port == 0) );
return ( (reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr == 0) && (reinterpret_cast<const sockaddr_in *>(this)->sin_port == 0) );
case AF_INET6:
const uint8_t *ipb = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *ipb = reinterpret_cast<const uint8_t *>(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
for(int i=0;i<16;++i) {
if (ipb[i])
return false;
}
return (reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_port == 0);
return (reinterpret_cast<const sockaddr_in6 *>(this)->sin6_port == 0);
}
return false;
}
@ -142,17 +142,17 @@ char *InetAddress::toIpString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const no
switch(_data.ss_family) {
case AF_INET: {
#ifdef _WIN32
inet_ntop(AF_INET, (void*)&reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr, buf, INET_ADDRSTRLEN);
inet_ntop(AF_INET, (void*)&reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr, buf, INET_ADDRSTRLEN);
#else
inet_ntop(AF_INET, &reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr, buf, INET_ADDRSTRLEN);
inet_ntop(AF_INET, &reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr, buf, INET_ADDRSTRLEN);
#endif
} break;
case AF_INET6: {
#ifdef _WIN32
inet_ntop(AF_INET6, (void*)reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr, buf, INET6_ADDRSTRLEN);
inet_ntop(AF_INET6, (void*)reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr, buf, INET6_ADDRSTRLEN);
#else
inet_ntop(AF_INET6, reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr, buf, INET6_ADDRSTRLEN);
inet_ntop(AF_INET6, reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr, buf, INET6_ADDRSTRLEN);
#endif
} break;
}
@ -180,13 +180,13 @@ bool InetAddress::fromString(const char *ipSlashPort) noexcept
}
if (strchr(buf,':')) {
struct sockaddr_in6 *const in6 = reinterpret_cast<struct sockaddr_in6 *>(this); // NOLINT(hicpp-use-auto,modernize-use-auto)
sockaddr_in6 *const in6 = reinterpret_cast<sockaddr_in6 *>(this); // NOLINT(hicpp-use-auto,modernize-use-auto)
inet_pton(AF_INET6, buf, &in6->sin6_addr.s6_addr);
in6->sin6_family = AF_INET6;
in6->sin6_port = Utils::hton((uint16_t)port);
return true;
} else if (strchr(buf,'.')) {
struct sockaddr_in *const in = reinterpret_cast<struct sockaddr_in *>(this); // NOLINT(hicpp-use-auto,modernize-use-auto)
sockaddr_in *const in = reinterpret_cast<sockaddr_in *>(this); // NOLINT(hicpp-use-auto,modernize-use-auto)
inet_pton(AF_INET, buf, &in->sin_addr.s_addr);
in->sin_family = AF_INET;
in->sin_port = Utils::hton((uint16_t)port);
@ -201,7 +201,7 @@ InetAddress InetAddress::netmask() const noexcept
InetAddress r(*this);
switch(r._data.ss_family) {
case AF_INET:
reinterpret_cast<struct sockaddr_in *>(&r)->sin_addr.s_addr = Utils::hton((uint32_t)(0xffffffffU << (32 - netmaskBits())));
reinterpret_cast<sockaddr_in *>(&r)->sin_addr.s_addr = Utils::hton((uint32_t)(0xffffffffU << (32 - netmaskBits())));
break;
case AF_INET6: {
uint64_t nm[2];
@ -213,7 +213,7 @@ InetAddress InetAddress::netmask() const noexcept
nm[0] = 0;
nm[1] = 0;
}
Utils::copy<16>(reinterpret_cast<struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr,nm);
Utils::copy<16>(reinterpret_cast<sockaddr_in6 *>(&r)->sin6_addr.s6_addr,nm);
} break;
}
return r;
@ -223,7 +223,7 @@ InetAddress InetAddress::broadcast() const noexcept
{
if (_data.ss_family == AF_INET) {
InetAddress r(*this);
reinterpret_cast<struct sockaddr_in *>(&r)->sin_addr.s_addr |= Utils::hton((uint32_t)(0xffffffffU >> netmaskBits()));
reinterpret_cast<sockaddr_in *>(&r)->sin_addr.s_addr |= Utils::hton((uint32_t)(0xffffffffU >> netmaskBits()));
return r;
}
return InetAddress();
@ -234,15 +234,15 @@ InetAddress InetAddress::network() const noexcept
InetAddress r(*this);
switch(r._data.ss_family) {
case AF_INET:
reinterpret_cast<struct sockaddr_in *>(&r)->sin_addr.s_addr &= Utils::hton((uint32_t)(0xffffffffU << (32 - netmaskBits())));
reinterpret_cast<sockaddr_in *>(&r)->sin_addr.s_addr &= Utils::hton((uint32_t)(0xffffffffU << (32 - netmaskBits())));
break;
case AF_INET6: {
uint64_t nm[2];
const unsigned int bits = netmaskBits();
Utils::copy<16>(nm,reinterpret_cast<struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr);
Utils::copy<16>(nm,reinterpret_cast<sockaddr_in6 *>(&r)->sin6_addr.s6_addr);
nm[0] &= Utils::hton((uint64_t)((bits >= 64) ? 0xffffffffffffffffULL : (0xffffffffffffffffULL << (64 - bits))));
nm[1] &= Utils::hton((uint64_t)((bits <= 64) ? 0ULL : (0xffffffffffffffffULL << (128 - bits))));
Utils::copy<16>(reinterpret_cast<struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr,nm);
Utils::copy<16>(reinterpret_cast<sockaddr_in6 *>(&r)->sin6_addr.s6_addr,nm);
} break;
}
return r;
@ -255,10 +255,10 @@ bool InetAddress::isEqualPrefix(const InetAddress &addr) const noexcept
case AF_INET6: {
const InetAddress mask(netmask());
InetAddress addr_mask(addr.netmask());
const uint8_t *n = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&addr_mask)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *m = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&mask)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *a = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&addr)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *b = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *n = reinterpret_cast<const uint8_t *>(reinterpret_cast<const sockaddr_in6 *>(&addr_mask)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *m = reinterpret_cast<const uint8_t *>(reinterpret_cast<const sockaddr_in6 *>(&mask)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *a = reinterpret_cast<const uint8_t *>(reinterpret_cast<const sockaddr_in6 *>(&addr)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *b = reinterpret_cast<const uint8_t *>(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
for(unsigned int i=0;i<16;++i) {
if ((a[i] & m[i]) != (b[i] & n[i]))
return false;
@ -279,15 +279,15 @@ bool InetAddress::containsAddress(const InetAddress &addr) const noexcept
if (bits == 0)
return true;
return (
(Utils::ntoh((uint32_t)reinterpret_cast<const struct sockaddr_in *>(&addr)->sin_addr.s_addr) >> (32 - bits)) ==
(Utils::ntoh((uint32_t)reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr) >> (32 - bits))
(Utils::ntoh((uint32_t)reinterpret_cast<const sockaddr_in *>(&addr)->sin_addr.s_addr) >> (32 - bits)) ==
(Utils::ntoh((uint32_t)reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr) >> (32 - bits))
);
}
case AF_INET6: {
const InetAddress mask(netmask());
const uint8_t *m = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&mask)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *a = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&addr)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *b = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *m = reinterpret_cast<const uint8_t *>(reinterpret_cast<const sockaddr_in6 *>(&mask)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *a = reinterpret_cast<const uint8_t *>(reinterpret_cast<const sockaddr_in6 *>(&addr)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint8_t *b = reinterpret_cast<const uint8_t *>(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
for(unsigned int i=0;i<16;++i) {
if ((a[i] & m[i]) != b[i])
return false;
@ -308,19 +308,19 @@ void InetAddress::forTrace(ZT_TraceEventPathAddress &ta) const noexcept
break;
case AF_INET:
ta.type = ZT_TRACE_EVENT_PATH_TYPE_INETADDR_V4;
tmp = (uint32_t)(reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr);
tmp = (uint32_t)(reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr);
ta.address[0] = reinterpret_cast<const uint8_t *>(&tmp)[0];
ta.address[1] = reinterpret_cast<const uint8_t *>(&tmp)[1];
ta.address[2] = reinterpret_cast<const uint8_t *>(&tmp)[2];
ta.address[3] = reinterpret_cast<const uint8_t *>(&tmp)[3];
Utils::zero<sizeof(ta.address) - 4>(ta.address + 4);
ta.port = reinterpret_cast<const struct sockaddr_in *>(this)->sin_port;
ta.port = reinterpret_cast<const sockaddr_in *>(this)->sin_port;
break;
case AF_INET6:
ta.type = ZT_TRACE_EVENT_PATH_TYPE_INETADDR_V6;
Utils::copy<16>(ta.address,reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr);
Utils::copy<16>(ta.address,reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr);
Utils::zero<sizeof(ta.address) - 16>(ta.address + 16);
ta.port = reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_port;
ta.port = reinterpret_cast<const sockaddr_in6 *>(this)->sin6_port;
break;
}
}
@ -334,7 +334,7 @@ bool InetAddress::isNetwork() const noexcept
return false;
if (bits >= 32)
return false;
uint32_t ip = Utils::ntoh((uint32_t)reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr);
uint32_t ip = Utils::ntoh((uint32_t)reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr);
return ((ip & (0xffffffffU >> bits)) == 0);
}
case AF_INET6: {
@ -343,7 +343,7 @@ bool InetAddress::isNetwork() const noexcept
return false;
if (bits >= 128)
return false;
const unsigned char *ip = reinterpret_cast<const unsigned char *>(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
const unsigned char *ip = reinterpret_cast<const unsigned char *>(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr); // NOLINT(hicpp-use-auto,modernize-use-auto)
unsigned int p = bits / 8;
if ((ip[p++] & (0xffU >> (bits % 8))) != 0)
return false;
@ -425,14 +425,14 @@ bool InetAddress::operator==(const InetAddress &a) const noexcept
switch(_data.ss_family) {
case AF_INET:
return (
(reinterpret_cast<const struct sockaddr_in *>(this)->sin_port == reinterpret_cast<const struct sockaddr_in *>(&a)->sin_port)&&
(reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr == reinterpret_cast<const struct sockaddr_in *>(&a)->sin_addr.s_addr));
(reinterpret_cast<const sockaddr_in *>(this)->sin_port == reinterpret_cast<const sockaddr_in *>(&a)->sin_port)&&
(reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr == reinterpret_cast<const sockaddr_in *>(&a)->sin_addr.s_addr));
case AF_INET6:
return (
(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_port == reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_port)&&
(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_flowinfo == reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_flowinfo)&&
(memcmp(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_addr.s6_addr,16) == 0)&&
(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_scope_id == reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_scope_id));
(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_port == reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_port)&&
(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_flowinfo == reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_flowinfo)&&
(memcmp(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_addr.s6_addr,16) == 0)&&
(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_scope_id == reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_scope_id));
default:
return (memcmp(this,&a,sizeof(InetAddress)) == 0);
}
@ -447,24 +447,24 @@ bool InetAddress::operator<(const InetAddress &a) const noexcept
else if (_data.ss_family == a._data.ss_family) {
switch(_data.ss_family) {
case AF_INET:
if (reinterpret_cast<const struct sockaddr_in *>(this)->sin_port < reinterpret_cast<const struct sockaddr_in *>(&a)->sin_port)
if (reinterpret_cast<const sockaddr_in *>(this)->sin_port < reinterpret_cast<const sockaddr_in *>(&a)->sin_port)
return true;
else if (reinterpret_cast<const struct sockaddr_in *>(this)->sin_port == reinterpret_cast<const struct sockaddr_in *>(&a)->sin_port) {
if (reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr < reinterpret_cast<const struct sockaddr_in *>(&a)->sin_addr.s_addr)
else if (reinterpret_cast<const sockaddr_in *>(this)->sin_port == reinterpret_cast<const sockaddr_in *>(&a)->sin_port) {
if (reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr < reinterpret_cast<const sockaddr_in *>(&a)->sin_addr.s_addr)
return true;
}
break;
case AF_INET6:
if (reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_port < reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_port)
if (reinterpret_cast<const sockaddr_in6 *>(this)->sin6_port < reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_port)
return true;
else if (reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_port == reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_port) {
if (reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_flowinfo < reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_flowinfo)
else if (reinterpret_cast<const sockaddr_in6 *>(this)->sin6_port == reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_port) {
if (reinterpret_cast<const sockaddr_in6 *>(this)->sin6_flowinfo < reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_flowinfo)
return true;
else if (reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_flowinfo == reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_flowinfo) {
if (memcmp(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_addr.s6_addr,16) < 0)
else if (reinterpret_cast<const sockaddr_in6 *>(this)->sin6_flowinfo == reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_flowinfo) {
if (memcmp(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_addr.s6_addr,16) < 0)
return true;
else if (memcmp(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_addr.s6_addr,16) == 0) {
if (reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_scope_id < reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_scope_id)
else if (memcmp(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_addr.s6_addr,16) == 0) {
if (reinterpret_cast<const sockaddr_in6 *>(this)->sin6_scope_id < reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_scope_id)
return true;
}
}

View file

@ -14,13 +14,10 @@
#ifndef ZT_INETADDRESS_HPP
#define ZT_INETADDRESS_HPP
#include <cstdlib>
#include <cstring>
#include <cstdint>
#include "Constants.hpp"
#include "Utils.hpp"
#include "MAC.hpp"
#include "Containers.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
@ -194,10 +191,10 @@ public:
{
switch(_data.ss_family) {
case AF_INET:
reinterpret_cast<struct sockaddr_in *>(this)->sin_port = Utils::hton((uint16_t)port);
reinterpret_cast<sockaddr_in *>(this)->sin_port = Utils::hton((uint16_t)port);
break;
case AF_INET6:
reinterpret_cast<struct sockaddr_in6 *>(this)->sin6_port = Utils::hton((uint16_t)port);
reinterpret_cast<sockaddr_in6 *>(this)->sin6_port = Utils::hton((uint16_t)port);
break;
}
}
@ -211,11 +208,13 @@ public:
* @return ASCII IP/port format representation
*/
char *toString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const noexcept;
ZT_INLINE String toString() const { char buf[ZT_INETADDRESS_STRING_SIZE_MAX]; toString(buf); return String(buf); }
/**
* @return IP portion only, in ASCII string format
*/
char *toIpString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const noexcept;
ZT_INLINE String toIpString() const { char buf[ZT_INETADDRESS_STRING_SIZE_MAX]; toIpString(buf); return String(buf); }
/**
* @param ipSlashPort IP/port (port is optional, will be 0 if not included)
@ -229,8 +228,8 @@ public:
ZT_INLINE unsigned int port() const noexcept
{
switch(_data.ss_family) {
case AF_INET: return Utils::ntoh((uint16_t)(reinterpret_cast<const struct sockaddr_in *>(this)->sin_port));
case AF_INET6: return Utils::ntoh((uint16_t)(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_port));
case AF_INET: return Utils::ntoh((uint16_t)(reinterpret_cast<const sockaddr_in *>(this)->sin_port));
case AF_INET6: return Utils::ntoh((uint16_t)(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_port));
default: return 0;
}
}
@ -325,8 +324,8 @@ public:
ZT_INLINE const void *rawIpData() const noexcept
{
switch(_data.ss_family) {
case AF_INET: return (const void *)&(reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr);
case AF_INET6: return (const void *)(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr);
case AF_INET: return (const void *)&(reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr);
case AF_INET6: return (const void *)(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr);
default: return nullptr;
}
}
@ -339,12 +338,12 @@ public:
InetAddress r;
switch(_data.ss_family) {
case AF_INET:
reinterpret_cast<struct sockaddr_in *>(&r)->sin_family = AF_INET;
reinterpret_cast<struct sockaddr_in *>(&r)->sin_addr.s_addr = reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr;
reinterpret_cast<sockaddr_in *>(&r)->sin_family = AF_INET;
reinterpret_cast<sockaddr_in *>(&r)->sin_addr.s_addr = reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr;
break;
case AF_INET6:
reinterpret_cast<struct sockaddr_in6 *>(&r)->sin6_family = AF_INET;
Utils::copy<16>(reinterpret_cast<struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr,reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr);
reinterpret_cast<sockaddr_in6 *>(&r)->sin6_family = AF_INET;
Utils::copy<16>(reinterpret_cast<sockaddr_in6 *>(&r)->sin6_addr.s6_addr,reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr);
break;
}
return r;
@ -361,9 +360,9 @@ public:
const uint8_t f = _data.ss_family;
if (f == a._data.ss_family) {
if (f == AF_INET)
return (reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr == reinterpret_cast<const struct sockaddr_in *>(&a)->sin_addr.s_addr);
return (reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr == reinterpret_cast<const sockaddr_in *>(&a)->sin_addr.s_addr);
if (f == AF_INET6)
return (memcmp(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_addr.s6_addr,16) == 0);
return (memcmp(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_addr.s6_addr,16) == 0);
return (memcmp(this,&a,sizeof(InetAddress)) == 0);
}
return false;
@ -382,9 +381,9 @@ public:
const uint8_t f = _data.ss_family;
if (f == a._data.ss_family) {
if (f == AF_INET)
return (reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr == reinterpret_cast<const struct sockaddr_in *>(&a)->sin_addr.s_addr);
return (reinterpret_cast<const sockaddr_in *>(this)->sin_addr.s_addr == reinterpret_cast<const sockaddr_in *>(&a)->sin_addr.s_addr);
if (f == AF_INET6)
return (memcmp(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_addr.s6_addr,8) == 0);
return (memcmp(reinterpret_cast<const sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const sockaddr_in6 *>(&a)->sin6_addr.s6_addr,8) == 0);
return (memcmp(this,&a,sizeof(InetAddress)) == 0);
}
return false;
@ -393,12 +392,12 @@ public:
ZT_INLINE unsigned long hashCode() const noexcept
{
if (_data.ss_family == AF_INET) {
return (unsigned long)Utils::hash32(((uint32_t)reinterpret_cast<const struct sockaddr_in *>(&_data)->sin_addr.s_addr + (uint32_t)reinterpret_cast<const struct sockaddr_in *>(&_data)->sin_port) ^ (uint32_t)Utils::s_mapNonce);
return (unsigned long)Utils::hash32(((uint32_t)reinterpret_cast<const sockaddr_in *>(&_data)->sin_addr.s_addr + (uint32_t)reinterpret_cast<const sockaddr_in *>(&_data)->sin_port) ^ (uint32_t)Utils::s_mapNonce);
} else if (_data.ss_family == AF_INET6) {
return (unsigned long)Utils::hash64(
(Utils::loadAsIsEndian<uint64_t>(reinterpret_cast<const struct sockaddr_in6 *>(&_data)->sin6_addr.s6_addr) +
Utils::loadAsIsEndian<uint64_t>(reinterpret_cast<const struct sockaddr_in6 *>(&_data)->sin6_addr.s6_addr + 8) +
(uint64_t)reinterpret_cast<const struct sockaddr_in6 *>(&_data)->sin6_port) ^ Utils::s_mapNonce
(Utils::loadAsIsEndian<uint64_t>(reinterpret_cast<const sockaddr_in6 *>(&_data)->sin6_addr.s6_addr) +
Utils::loadAsIsEndian<uint64_t>(reinterpret_cast<const sockaddr_in6 *>(&_data)->sin6_addr.s6_addr + 8) +
(uint64_t)reinterpret_cast<const sockaddr_in6 *>(&_data)->sin6_port) ^ Utils::s_mapNonce
);
}
return Utils::fnv1a32(&_data,sizeof(_data));
@ -502,14 +501,14 @@ static_assert(sizeof(sockaddr_in) <= sizeof(InetAddress),"InetAddress sizing inc
static_assert(sizeof(sockaddr_in6) <= sizeof(InetAddress),"InetAddress sizing incorrect");
static_assert(sizeof(sockaddr) <= sizeof(InetAddress),"InetAddress sizing incorrect");
static ZT_INLINE InetAddress *asInetAddress(sockaddr_in *p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE InetAddress *asInetAddress(sockaddr_in6 *p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE InetAddress *asInetAddress(sockaddr *p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE InetAddress *asInetAddress(sockaddr_storage *p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr_in *p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr_in6 *p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr *p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr_storage *p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE InetAddress *asInetAddress(sockaddr_in *const p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE InetAddress *asInetAddress(sockaddr_in6 *const p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE InetAddress *asInetAddress(sockaddr *const p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE InetAddress *asInetAddress(sockaddr_storage *const p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr_in *const p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr_in6 *const p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr *const p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr_storage *const p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE InetAddress &asInetAddress(sockaddr_in &p) noexcept { return *reinterpret_cast<InetAddress *>(&p); }
static ZT_INLINE InetAddress &asInetAddress(sockaddr_in6 &p) noexcept { return *reinterpret_cast<InetAddress *>(&p); }
static ZT_INLINE InetAddress &asInetAddress(sockaddr &p) noexcept { return *reinterpret_cast<InetAddress *>(&p); }

View file

@ -142,7 +142,7 @@ public:
*/
ZT_INLINE unsigned int size() const noexcept { return 6; }
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)_m; }
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)Utils::hash64(_m); }
ZT_INLINE char *toString(char buf[18]) const noexcept
{

View file

@ -75,18 +75,18 @@ struct _sortPeerPtrsByAddress
} // anonymous namespace
Node::Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64_t now) :
_RR(this),
RR(&_RR),
_objects(nullptr),
_cb(*callbacks),
_uPtr(uPtr),
_networks(),
_lastPeerPulse(0),
_lastHousekeepingRun(0),
_lastNetworkHousekeepingRun(0),
_now(now),
_natMustDie(true),
_online(false)
m_RR(this),
RR(&m_RR),
m_objects(nullptr),
m_cb(*callbacks),
m_uPtr(uPtr),
m_networks(),
m_lastPeerPulse(0),
m_lastHousekeepingRun(0),
m_lastNetworkHousekeepingRun(0),
m_now(now),
m_natMustDie(true),
m_online(false)
{
// Load this node's identity.
uint64_t idtmp[2]; idtmp[0] = 0; idtmp[1] = 0;
@ -116,28 +116,27 @@ Node::Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64
stateObjectPut(tPtr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp,RR->publicIdentityStr,(unsigned int)strlen(RR->publicIdentityStr));
}
uint8_t tmph[ZT_FINGERPRINT_HASH_SIZE];
uint8_t tmph[ZT_SHA384_DIGEST_SIZE];
RR->identity.hashWithPrivate(tmph);
RR->localCacheSymmetric.init(tmph);
Utils::burn(tmph,sizeof(tmph));
// This constructs all the components of the ZeroTier core within a single contiguous memory container,
// which reduces memory fragmentation and may improve cache locality.
_objects = new _NodeObjects(RR,tPtr);
m_objects = new _NodeObjects(RR, tPtr);
postEvent(tPtr, ZT_EVENT_UP);
}
Node::~Node()
{
_networks_m.lock();
_networks_m.unlock();
_networks.clear();
_networks_m.lock();
_networks_m.unlock();
m_networks_l.lock();
m_networks_l.unlock();
m_networks.clear();
m_networks_l.lock();
m_networks_l.unlock();
if (_objects)
delete (_NodeObjects *)_objects;
if (m_objects)
delete (_NodeObjects *)m_objects;
// Let go of cached Buf objects. If other nodes happen to be running in this
// same process space new Bufs will be allocated as needed, but this is almost
@ -161,7 +160,7 @@ ZT_ResultCode Node::processWirePacket(
unsigned int packetLength,
volatile int64_t *nextBackgroundTaskDeadline)
{
_now = now;
m_now = now;
RR->vl1->onRemotePacket(tPtr,localSocket,(remoteAddress) ? InetAddress::NIL : *asInetAddress(remoteAddress),packetData,packetLength);
return ZT_RESULT_OK;
}
@ -178,7 +177,7 @@ ZT_ResultCode Node::processVirtualNetworkFrame(
unsigned int frameLength,
volatile int64_t *nextBackgroundTaskDeadline)
{
_now = now;
m_now = now;
SharedPtr<Network> nw(this->network(nwid));
if (nw) {
RR->vl2->onLocalEthernet(tPtr,nw,MAC(sourceMac),MAC(destMac),etherType,vlanId,frameData,frameLength);
@ -215,20 +214,20 @@ struct _processBackgroundTasks_eachPeer
};
ZT_ResultCode Node::processBackgroundTasks(void *tPtr,int64_t now,volatile int64_t *nextBackgroundTaskDeadline)
{
_now = now;
Mutex::Lock bl(_backgroundTasksLock);
m_now = now;
Mutex::Lock bl(m_backgroundTasksLock);
try {
// Call peer pulse() method of all peers every ZT_PEER_PULSE_INTERVAL.
if ((now - _lastPeerPulse) >= ZT_PEER_PULSE_INTERVAL) {
_lastPeerPulse = now;
if ((now - m_lastPeerPulse) >= ZT_PEER_PULSE_INTERVAL) {
m_lastPeerPulse = now;
try {
_processBackgroundTasks_eachPeer pf(now,this,tPtr);
RR->topology->eachPeerWithRoot<_processBackgroundTasks_eachPeer &>(pf);
if (pf.online != _online) {
_online = pf.online;
postEvent(tPtr, _online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
if (pf.online != m_online) {
m_online = pf.online;
postEvent(tPtr, m_online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
}
RR->topology->rankRoots(now);
@ -248,29 +247,29 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr,int64_t now,volatile int64
}
// Perform network housekeeping and possibly request new certs and configs every ZT_NETWORK_HOUSEKEEPING_PERIOD.
if ((now - _lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
_lastHousekeepingRun = now;
if ((now - m_lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
m_lastHousekeepingRun = now;
{
RWMutex::RLock l(_networks_m);
for(Map< uint64_t,SharedPtr<Network> >::const_iterator i(_networks.begin());i!=_networks.end();++i)
RWMutex::RLock l(m_networks_l);
for(Map< uint64_t,SharedPtr<Network> >::const_iterator i(m_networks.begin());i != m_networks.end();++i)
i->second->doPeriodicTasks(tPtr,now);
}
}
// Clean up other stuff every ZT_HOUSEKEEPING_PERIOD.
if ((now - _lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
_lastHousekeepingRun = now;
if ((now - m_lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
m_lastHousekeepingRun = now;
try {
// Clean up any old local controller auth memoizations. This is an
// optimization for network controllers to know whether to accept
// or trust nodes without doing an extra cert check.
_localControllerAuthorizations_m.lock();
for(Map<_LocalControllerAuth,int64_t>::iterator i(_localControllerAuthorizations.begin());i!=_localControllerAuthorizations.end();) { // NOLINT(hicpp-use-auto,modernize-use-auto)
m_localControllerAuthorizations_l.lock();
for(Map<p_LocalControllerAuth,int64_t>::iterator i(m_localControllerAuthorizations.begin());i != m_localControllerAuthorizations.end();) { // NOLINT(hicpp-use-auto,modernize-use-auto)
if ((i->second - now) > (ZT_NETWORK_AUTOCONF_DELAY * 3))
_localControllerAuthorizations.erase(i++);
m_localControllerAuthorizations.erase(i++);
else ++i;
}
_localControllerAuthorizations_m.unlock();
m_localControllerAuthorizations_l.unlock();
RR->topology->doPeriodicTasks(tPtr, now);
RR->sa->clean(now);
@ -279,30 +278,7 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr,int64_t now,volatile int64
}
}
// Set off any due or overdue peer alarms.
int64_t earliestAlarmAt = now + ZT_MAX_TIMER_TASK_INTERVAL;
std::vector<Fingerprint> bzzt;
{
Mutex::Lock l(_peerAlarms_l);
for(std::map< Fingerprint,int64_t,std::less<Fingerprint>,Utils::Mallocator< std::pair<const Fingerprint,int64_t> > >::iterator a(_peerAlarms.begin());a!=_peerAlarms.end();) { // NOLINT(hicpp-use-auto,modernize-use-auto)
if (now >= a->second) {
bzzt.push_back(a->first);
_peerAlarms.erase(a++);
} else {
if (a->second < earliestAlarmAt)
earliestAlarmAt = a->second;
++a;
}
}
}
for(std::vector<Fingerprint>::iterator a(bzzt.begin());a!=bzzt.end();++a) { // NOLINT(hicpp-use-auto,modernize-use-auto,modernize-loop-convert)
const SharedPtr<Peer> p(RR->topology->peer(tPtr,a->address(),false));
if ((p)&&(p->identity().fingerprint() == *a))
p->alarm(tPtr,now);
}
// Tell caller when to call this method next.
*nextBackgroundTaskDeadline = earliestAlarmAt;
*nextBackgroundTaskDeadline = now + ZT_TIMER_TASK_INTERVAL;
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
@ -316,8 +292,8 @@ ZT_ResultCode Node::join(uint64_t nwid,const ZT_Fingerprint *controllerFingerpri
if (controllerFingerprint)
Utils::copy<sizeof(ZT_Fingerprint)>(fp.apiFingerprint(),controllerFingerprint);
RWMutex::Lock l(_networks_m);
SharedPtr<Network> &nw = _networks[nwid];
RWMutex::Lock l(m_networks_l);
SharedPtr<Network> &nw = m_networks[nwid];
if (nw)
return ZT_RESULT_OK;
nw.set(new Network(RR,tptr,nwid,fp,uptr,nullptr));
@ -329,15 +305,15 @@ ZT_ResultCode Node::leave(uint64_t nwid,void **uptr,void *tptr)
{
ZT_VirtualNetworkConfig ctmp;
_networks_m.lock();
Map< uint64_t,SharedPtr<Network> >::iterator nwi(_networks.find(nwid)); // NOLINT(hicpp-use-auto,modernize-use-auto)
if (nwi == _networks.end()) {
_networks_m.unlock();
m_networks_l.lock();
Map< uint64_t,SharedPtr<Network> >::iterator nwi(m_networks.find(nwid)); // NOLINT(hicpp-use-auto,modernize-use-auto)
if (nwi == m_networks.end()) {
m_networks_l.unlock();
return ZT_RESULT_OK;
}
SharedPtr<Network> nw(nwi->second);
_networks.erase(nwi);
_networks_m.unlock();
m_networks.erase(nwi);
m_networks_l.unlock();
if (uptr)
*uptr = *nw->userPtr();
@ -403,7 +379,7 @@ void Node::status(ZT_NodeStatus *status) const
status->identity = reinterpret_cast<const ZT_Identity *>(&RR->identity);
status->publicIdentity = RR->publicIdentityStr;
status->secretIdentity = RR->secretIdentityStr;
status->online = _online ? 1 : 0;
status->online = m_online ? 1 : 0;
}
ZT_PeerList *Node::peers() const
@ -419,7 +395,7 @@ ZT_PeerList *Node::peers() const
pl->peers = (ZT_Peer *)(buf + sizeof(ZT_PeerList));
Identity *identities = (Identity *)(buf + sizeof(ZT_PeerList) + (sizeof(ZT_Peer) * peers.size())); // NOLINT(modernize-use-auto,hicpp-use-auto)
const int64_t now = _now;
const int64_t now = m_now;
pl->peerCount = 0;
for(std::vector< SharedPtr<Peer> >::iterator pi(peers.begin());pi!=peers.end();++pi) { // NOLINT(modernize-use-auto,modernize-loop-convert,hicpp-use-auto)
ZT_Peer *const p = &(pl->peers[pl->peerCount]);
@ -480,16 +456,16 @@ ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
ZT_VirtualNetworkList *Node::networks() const
{
RWMutex::RLock l(_networks_m);
RWMutex::RLock l(m_networks_l);
char *const buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * _networks.size()));
char *const buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * m_networks.size()));
if (!buf)
return nullptr;
ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *)buf; // NOLINT(modernize-use-auto,hicpp-use-auto)
nl->networks = (ZT_VirtualNetworkConfig *)(buf + sizeof(ZT_VirtualNetworkList));
nl->networkCount = 0;
for(Map< uint64_t,SharedPtr<Network> >::const_iterator i(_networks.begin());i!=_networks.end();++i) // NOLINT(modernize-use-auto,modernize-loop-convert,hicpp-use-auto)
for(Map< uint64_t,SharedPtr<Network> >::const_iterator i(m_networks.begin());i != m_networks.end();++i) // NOLINT(modernize-use-auto,modernize-loop-convert,hicpp-use-auto)
i->second->externalConfig(&(nl->networks[nl->networkCount++]));
return nl;
@ -510,8 +486,8 @@ void Node::freeQueryResult(void *qr)
void Node::setInterfaceAddresses(const ZT_InterfaceAddress *addrs,unsigned int addrCount)
{
Mutex::Lock _l(_localInterfaceAddresses_m);
_localInterfaceAddresses.clear();
Mutex::Lock _l(m_localInterfaceAddresses_m);
m_localInterfaceAddresses.clear();
for(unsigned int i=0;i<addrCount;++i) {
bool dupe = false;
for(unsigned int j=0;j<i;++j) {
@ -521,7 +497,7 @@ void Node::setInterfaceAddresses(const ZT_InterfaceAddress *addrs,unsigned int a
}
}
if (!dupe)
_localInterfaceAddresses.push_back(addrs[i]);
m_localInterfaceAddresses.push_back(addrs[i]);
}
}
@ -555,12 +531,12 @@ void Node::setController(void *networkControllerInstance)
std::vector<uint8_t> Node::stateObjectGet(void *const tPtr,ZT_StateObjectType type,const uint64_t id[2])
{
std::vector<uint8_t> r;
if (_cb.stateGetFunction) {
if (m_cb.stateGetFunction) {
void *data = 0;
void (*freeFunc)(void *) = 0;
int l = _cb.stateGetFunction(
int l = m_cb.stateGetFunction(
reinterpret_cast<ZT_Node *>(this),
_uPtr,
m_uPtr,
tPtr,
type,
id,
@ -577,8 +553,8 @@ std::vector<uint8_t> Node::stateObjectGet(void *const tPtr,ZT_StateObjectType ty
bool Node::shouldUsePathForZeroTierTraffic(void *tPtr,const Identity &id,const int64_t localSocket,const InetAddress &remoteAddress)
{
{
RWMutex::RLock l(_networks_m);
for(Map< uint64_t,SharedPtr<Network> >::iterator i(_networks.begin());i!=_networks.end();++i) { // NOLINT(hicpp-use-auto,modernize-use-auto,modernize-loop-convert)
RWMutex::RLock l(m_networks_l);
for(Map< uint64_t,SharedPtr<Network> >::iterator i(m_networks.begin());i != m_networks.end();++i) { // NOLINT(hicpp-use-auto,modernize-use-auto,modernize-loop-convert)
for (unsigned int k = 0,j = i->second->config().staticIpCount; k < j; ++k) {
if (i->second->config().staticIps[k].containsAddress(remoteAddress))
return false;
@ -586,10 +562,10 @@ bool Node::shouldUsePathForZeroTierTraffic(void *tPtr,const Identity &id,const i
}
}
if (_cb.pathCheckFunction) {
return (_cb.pathCheckFunction(
if (m_cb.pathCheckFunction) {
return (m_cb.pathCheckFunction(
reinterpret_cast<ZT_Node *>(this),
_uPtr,
m_uPtr,
tPtr,
id.address().toInt(),
(const ZT_Identity *)&id,
@ -602,10 +578,10 @@ bool Node::shouldUsePathForZeroTierTraffic(void *tPtr,const Identity &id,const i
bool Node::externalPathLookup(void *tPtr,const Identity &id,int family,InetAddress &addr)
{
if (_cb.pathLookupFunction) {
return (_cb.pathLookupFunction(
if (m_cb.pathLookupFunction) {
return (m_cb.pathLookupFunction(
reinterpret_cast<ZT_Node *>(this),
_uPtr,
m_uPtr,
tPtr,
id.address().toInt(),
reinterpret_cast<const ZT_Identity *>(&id),
@ -623,9 +599,9 @@ ZT_ResultCode Node::setPhysicalPathConfiguration(const struct sockaddr_storage *
bool Node::localControllerHasAuthorized(const int64_t now,const uint64_t nwid,const Address &addr) const
{
_localControllerAuthorizations_m.lock();
const int64_t *const at = _localControllerAuthorizations.get(_LocalControllerAuth(nwid,addr));
_localControllerAuthorizations_m.unlock();
m_localControllerAuthorizations_l.lock();
const int64_t *const at = m_localControllerAuthorizations.get(p_LocalControllerAuth(nwid, addr));
m_localControllerAuthorizations_l.unlock();
if (at)
return ((now - *at) < (ZT_NETWORK_AUTOCONF_DELAY * 3));
return false;
@ -635,9 +611,9 @@ bool Node::localControllerHasAuthorized(const int64_t now,const uint64_t nwid,co
void Node::ncSendConfig(uint64_t nwid,uint64_t requestPacketId,const Address &destination,const NetworkConfig &nc,bool sendLegacyFormatConfig)
{
_localControllerAuthorizations_m.lock();
_localControllerAuthorizations[_LocalControllerAuth(nwid,destination)] = now();
_localControllerAuthorizations_m.unlock();
m_localControllerAuthorizations_l.lock();
m_localControllerAuthorizations[p_LocalControllerAuth(nwid, destination)] = now();
m_localControllerAuthorizations_l.unlock();
if (destination == RR->identity.address()) {
SharedPtr<Network> n(network(nwid));

View file

@ -110,7 +110,7 @@ public:
/**
* @return Most recent time value supplied to core via API
*/
ZT_INLINE int64_t now() const noexcept { return _now; }
ZT_INLINE int64_t now() const noexcept { return m_now; }
/**
* Send packet to to the physical wire via callback
@ -125,9 +125,9 @@ public:
*/
ZT_INLINE bool putPacket(void *tPtr,const int64_t localSocket,const InetAddress &addr,const void *data,unsigned int len,unsigned int ttl = 0) noexcept
{
return (_cb.wirePacketSendFunction(
return (m_cb.wirePacketSendFunction(
reinterpret_cast<ZT_Node *>(this),
_uPtr,
m_uPtr,
tPtr,
localSocket,
reinterpret_cast<const struct sockaddr_storage *>(&addr),
@ -151,9 +151,9 @@ public:
*/
ZT_INLINE void putFrame(void *tPtr,uint64_t nwid,void **nuptr,const MAC &source,const MAC &dest,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len) noexcept
{
_cb.virtualNetworkFrameFunction(
m_cb.virtualNetworkFrameFunction(
reinterpret_cast<ZT_Node *>(this),
_uPtr,
m_uPtr,
tPtr,
nwid,
nuptr,
@ -171,8 +171,8 @@ public:
*/
ZT_INLINE SharedPtr<Network> network(uint64_t nwid) const noexcept
{
RWMutex::RLock l(_networks_m);
const SharedPtr<Network> *const n = _networks.get(nwid);
RWMutex::RLock l(m_networks_l);
const SharedPtr<Network> *const n = m_networks.get(nwid);
if (n)
return *n;
return SharedPtr<Network>();
@ -183,8 +183,8 @@ public:
*/
ZT_INLINE Vector<ZT_InterfaceAddress> localInterfaceAddresses() const
{
Mutex::Lock _l(_localInterfaceAddresses_m);
return _localInterfaceAddresses;
Mutex::Lock _l(m_localInterfaceAddresses_m);
return m_localInterfaceAddresses;
}
/**
@ -196,7 +196,7 @@ public:
*/
ZT_INLINE void postEvent(void *tPtr,ZT_Event ev,const void *md = nullptr) noexcept
{
_cb.eventCallback(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,ev,md);
m_cb.eventCallback(reinterpret_cast<ZT_Node *>(this), m_uPtr, tPtr, ev, md);
}
/**
@ -210,13 +210,13 @@ public:
*/
ZT_INLINE void configureVirtualNetworkPort(void *tPtr,uint64_t nwid,void **nuptr,ZT_VirtualNetworkConfigOperation op,const ZT_VirtualNetworkConfig *nc) noexcept
{
_cb.virtualNetworkConfigFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,nwid,nuptr,op,nc);
m_cb.virtualNetworkConfigFunction(reinterpret_cast<ZT_Node *>(this), m_uPtr, tPtr, nwid, nuptr, op, nc);
}
/**
* @return True if node appears online
*/
ZT_INLINE bool online() const noexcept { return _online; }
ZT_INLINE bool online() const noexcept { return m_online; }
/**
* Get a state object
@ -239,8 +239,8 @@ public:
*/
ZT_INLINE void stateObjectPut(void *const tPtr,ZT_StateObjectType type,const uint64_t id[2],const void *const data,const unsigned int len) noexcept
{
if (_cb.statePutFunction)
_cb.statePutFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,type,id,data,(int)len);
if (m_cb.statePutFunction)
m_cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_uPtr, tPtr, type, id, data, (int)len);
}
/**
@ -252,8 +252,8 @@ public:
*/
ZT_INLINE void stateObjectDelete(void *const tPtr,ZT_StateObjectType type,const uint64_t id[2]) noexcept
{
if (_cb.statePutFunction)
_cb.statePutFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,type,id,nullptr,-1);
if (m_cb.statePutFunction)
m_cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_uPtr, tPtr, type, id, nullptr, -1);
}
/**
@ -292,12 +292,12 @@ public:
/**
* @return This node's identity
*/
ZT_INLINE const Identity &identity() const noexcept { return _RR.identity; }
ZT_INLINE const Identity &identity() const noexcept { return m_RR.identity; }
/**
* @return True if aggressive NAT-traversal mechanisms like scanning of <1024 ports are enabled
*/
ZT_INLINE bool natMustDie() const noexcept { return _natMustDie; }
ZT_INLINE bool natMustDie() const noexcept { return m_natMustDie; }
/**
* Wake peer by calling its alarm() method at or after a given time.
@ -332,68 +332,59 @@ public:
virtual void ncSendError(uint64_t nwid,uint64_t requestPacketId,const Address &destination,NetworkController::ErrorCode errorCode); // NOLINT(cppcoreguidelines-explicit-virtual-functions,hicpp-use-override,modernize-use-override)
private:
RuntimeEnvironment _RR;
RuntimeEnvironment m_RR;
RuntimeEnvironment *const RR;
// Pointer to a struct defined in Node that holds instances of core objects.
void *_objects;
void *m_objects;
// Function pointers to C callbacks supplied via the API.
ZT_Node_Callbacks _cb;
ZT_Node_Callbacks m_cb;
// A user-specified opaque pointer passed back via API callbacks.
void *_uPtr;
// Fingerprints of peers that want to have their alarm() function called at some point in the future.
// These behave like weak references in that the node looks them up in Topology and calls alarm()
// in each peer if that peer object is still held in memory. Calling alarm() unnecessarily on a peer
// is harmless. This just exists as an optimization to prevent having to iterate through all peers
// on every processBackgroundTasks call. A simple map<> is used here because there are usually only
// a few of these, if any.
std::map< Fingerprint,int64_t,std::less<Fingerprint>,Utils::Mallocator< std::pair<const Fingerprint,int64_t> > > _peerAlarms;
Mutex _peerAlarms_l;
void *m_uPtr;
// Cache that remembers whether or not the locally running network controller (if any) has authorized
// someone on their most recent query. This is used by the network controller as a memoization optimization
// to elide unnecessary signature verifications. It might get moved in the future since this is sort of a
// weird place to put it.
struct _LocalControllerAuth
struct p_LocalControllerAuth
{
uint64_t nwid,address;
ZT_INLINE _LocalControllerAuth(const uint64_t nwid_,const Address &address_) noexcept: nwid(nwid_),address(address_.toInt()) {}
ZT_INLINE p_LocalControllerAuth(const uint64_t nwid_, const Address &address_) noexcept: nwid(nwid_), address(address_.toInt()) {}
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)(nwid + address); }
ZT_INLINE bool operator==(const _LocalControllerAuth &a) const noexcept { return ((a.nwid == nwid) && (a.address == address)); }
ZT_INLINE bool operator!=(const _LocalControllerAuth &a) const noexcept { return ((a.nwid != nwid) || (a.address != address)); }
ZT_INLINE bool operator<(const _LocalControllerAuth &a) const noexcept { return ((a.nwid < nwid) || ((a.nwid == nwid)&&(a.address < address))); }
ZT_INLINE bool operator==(const p_LocalControllerAuth &a) const noexcept { return ((a.nwid == nwid) && (a.address == address)); }
ZT_INLINE bool operator!=(const p_LocalControllerAuth &a) const noexcept { return ((a.nwid != nwid) || (a.address != address)); }
ZT_INLINE bool operator<(const p_LocalControllerAuth &a) const noexcept { return ((a.nwid < nwid) || ((a.nwid == nwid) && (a.address < address))); }
};
Map<_LocalControllerAuth,int64_t> _localControllerAuthorizations;
Mutex _localControllerAuthorizations_m;
Map<p_LocalControllerAuth,int64_t> m_localControllerAuthorizations;
Mutex m_localControllerAuthorizations_l;
// Locally joined networks by network ID.
Map< uint64_t,SharedPtr<Network> > _networks;
RWMutex _networks_m;
Map< uint64_t,SharedPtr<Network> > m_networks;
RWMutex m_networks_l;
// These are local interface addresses that have been configured via the API
// and can be pushed to other nodes.
Vector< ZT_InterfaceAddress > _localInterfaceAddresses;
Mutex _localInterfaceAddresses_m;
Vector< ZT_InterfaceAddress > m_localInterfaceAddresses;
Mutex m_localInterfaceAddresses_m;
// This is locked while running processBackgroundTasks().
Mutex _backgroundTasksLock;
Mutex m_backgroundTasksLock;
// These are locked via _backgroundTasksLock as they're only checked and modified in processBackgroundTasks().
int64_t _lastPeerPulse;
int64_t _lastHousekeepingRun;
int64_t _lastNetworkHousekeepingRun;
int64_t m_lastPeerPulse;
int64_t m_lastHousekeepingRun;
int64_t m_lastNetworkHousekeepingRun;
// This is the most recent value for time passed in via any of the core API methods.
std::atomic<int64_t> _now;
std::atomic<int64_t> m_now;
// True if we are to use really intensive NAT-busting measures.
std::atomic<bool> _natMustDie;
std::atomic<bool> m_natMustDie;
// True if at least one root appears reachable.
std::atomic<bool> _online;
std::atomic<bool> m_online;
};
} // namespace ZeroTier

View file

@ -19,9 +19,9 @@ namespace ZeroTier {
bool Path::send(const RuntimeEnvironment *const RR,void *const tPtr,const void *const data,const unsigned int len,const int64_t now) noexcept
{
if (RR->node->putPacket(tPtr,_localSocket,_addr,data,len)) {
_lastOut = now;
_outMeter.log(now,len);
if (RR->node->putPacket(tPtr, m_localSocket, m_addr, data, len)) {
m_lastOut = now;
m_outMeter.log(now, len);
return true;
}
return false;

View file

@ -32,7 +32,7 @@ namespace ZeroTier {
class RuntimeEnvironment;
template<unsigned int MF,unsigned int MFP,unsigned int GCT,unsigned int GCS>
template<unsigned int MF,unsigned int MFP,unsigned int GCT,unsigned int GCS,typename P>
class Defragmenter;
/**
@ -43,16 +43,16 @@ class Path
friend class SharedPtr<Path>;
// Allow defragmenter to access fragment-in-flight info stored in Path for performance reasons.
template<unsigned int MF,unsigned int MFP,unsigned int GCT,unsigned int GCS>
template<unsigned int MF,unsigned int MFP,unsigned int GCT,unsigned int GCS,typename P>
friend class Defragmenter;
public:
ZT_INLINE Path(const int64_t l,const InetAddress &r) noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
_localSocket(l),
_lastIn(0),
_lastOut(0),
_latency(-1),
_addr(r)
m_localSocket(l),
m_lastIn(0),
m_lastOut(0),
m_latency(-1),
m_addr(r)
{
}
@ -76,8 +76,8 @@ public:
*/
ZT_INLINE void sent(const int64_t now,const unsigned int bytes) noexcept
{
_lastOut.store(now);
_outMeter.log(now,bytes);
m_lastOut.store(now);
m_outMeter.log(now, bytes);
}
/**
@ -88,8 +88,8 @@ public:
*/
ZT_INLINE void received(const int64_t now,const unsigned int bytes) noexcept
{
_lastIn.store(now);
_inMeter.log(now,bytes);
m_lastIn.store(now);
m_inMeter.log(now, bytes);
}
/**
@ -99,54 +99,54 @@ public:
*/
ZT_INLINE void updateLatency(const unsigned int newMeasurement) noexcept
{
int lat = _latency;
int lat = m_latency;
if (lat > 0) {
_latency = (lat + newMeasurement) / 2;
m_latency = (lat + newMeasurement) / 2;
} else {
_latency = newMeasurement;
m_latency = newMeasurement;
}
}
/**
* @return Latency in milliseconds or -1 if unknown
*/
ZT_INLINE int latency() const noexcept { return _latency; }
ZT_INLINE int latency() const noexcept { return m_latency; }
/**
* Check path aliveness
*
* @param now Current time
*/
ZT_INLINE bool alive(const int64_t now) const noexcept { return ((now - _lastIn.load()) < ZT_PATH_ALIVE_TIMEOUT); }
ZT_INLINE bool alive(const int64_t now) const noexcept { return ((now - m_lastIn.load()) < ZT_PATH_ALIVE_TIMEOUT); }
/**
* @return Physical address
*/
ZT_INLINE const InetAddress &address() const noexcept { return _addr; }
ZT_INLINE const InetAddress &address() const noexcept { return m_addr; }
/**
* @return Local socket as specified by external code
*/
ZT_INLINE int64_t localSocket() const noexcept { return _localSocket; }
ZT_INLINE int64_t localSocket() const noexcept { return m_localSocket; }
/**
* @return Last time we received anything
*/
ZT_INLINE int64_t lastIn() const noexcept { return _lastIn.load(); }
ZT_INLINE int64_t lastIn() const noexcept { return m_lastIn.load(); }
/**
* @return Last time we sent something
*/
ZT_INLINE int64_t lastOut() const noexcept { return _lastOut.load(); }
ZT_INLINE int64_t lastOut() const noexcept { return m_lastOut.load(); }
private:
const int64_t _localSocket;
std::atomic<int64_t> _lastIn;
std::atomic<int64_t> _lastOut;
std::atomic<int> _latency;
const InetAddress _addr;
Meter<> _inMeter;
Meter<> _outMeter;
const int64_t m_localSocket;
std::atomic<int64_t> m_lastIn;
std::atomic<int64_t> m_lastOut;
std::atomic<int> m_latency;
const InetAddress m_addr;
Meter<> m_inMeter;
Meter<> m_outMeter;
// These fields belong to Defragmenter but are kept in Path for performance
// as it's much faster this way than having Defragmenter maintain another

View file

@ -26,21 +26,20 @@ namespace ZeroTier {
Peer::Peer(const RuntimeEnvironment *renv) : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
RR(renv),
_lastReceive(0),
_lastSend(0),
_lastSentHello(),
_lastWhoisRequestReceived(0),
_lastEchoRequestReceived(0),
_lastProbeReceived(0),
_lastAttemptedP2PInit(0),
_lastPrioritizedPaths(0),
_lastAttemptedAggressiveNATTraversal(0),
_alivePathCount(0),
_probe(0),
_vProto(0),
_vMajor(0),
_vMinor(0),
_vRevision(0)
m_lastReceive(0),
m_lastSend(0),
m_lastSentHello(),
m_lastWhoisRequestReceived(0),
m_lastEchoRequestReceived(0),
m_lastPrioritizedPaths(0),
m_alivePathCount(0),
m_tryQueue(),
m_tryQueuePtr(m_tryQueue.end()),
m_probe(0),
m_vProto(0),
m_vMajor(0),
m_vMinor(0),
m_vRevision(0)
{
}
@ -50,16 +49,16 @@ Peer::~Peer() // NOLINT(hicpp-use-equals-default,modernize-use-equals-default)
bool Peer::init(const Identity &peerIdentity)
{
RWMutex::Lock l(_lock);
RWMutex::Lock l(m_lock);
if (_id == peerIdentity)
return true;
_id = peerIdentity;
if (m_id) // already initialized sanity check
return false;
m_id = peerIdentity;
uint8_t ktmp[ZT_SYMMETRIC_KEY_SIZE];
if (!RR->identity.agree(peerIdentity,ktmp))
return false;
_identityKey.init(RR->node->now(),ktmp);
m_identityKey.init(RR->node->now(), ktmp);
Utils::burn(ktmp,sizeof(ktmp));
return true;
@ -76,67 +75,64 @@ void Peer::received(
{
const int64_t now = RR->node->now();
_lastReceive = now;
_inMeter.log(now,payloadLength);
m_lastReceive = now;
m_inMeter.log(now, payloadLength);
if (hops == 0) {
RWMutex::RMaybeWLock l(_lock);
RWMutex::RMaybeWLock l(m_lock);
// If this matches an existing path, skip path learning stuff.
for (unsigned int i=0;i<_alivePathCount;++i) {
if (_paths[i] == path) {
_lock.runlock();
// If this matches an existing path, skip path learning stuff. For the small number
// of paths a peer will have linear scan is the fastest way to do lookup.
for (unsigned int i=0;i < m_alivePathCount;++i) {
if (m_paths[i] == path)
return;
}
}
// If we made it here, we don't already know this path.
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,path->localSocket(),path->address())) {
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, path->localSocket(), path->address())) {
// SECURITY: note that if we've made it here we expected this OK, see Expect.hpp.
// There is replay protection in effect for OK responses.
if (verb == Protocol::VERB_OK) {
// If we're learning a new path convert the lock to an exclusive write lock.
l.writing();
// SECURITY: in the future we may not accept anything but OK(HELLO) to learn paths,
// but right now we accept any OK for backward compatibility. Note that OK will
// have been checked against expected packet IDs (see Expect.hpp) before we get here,
// and this guards against replay attacks.
// If the path list is full, replace the least recently active path. Otherwise append new path.
unsigned int newPathIdx = 0;
if (_alivePathCount >= ZT_MAX_PEER_NETWORK_PATHS) {
if (m_alivePathCount >= ZT_MAX_PEER_NETWORK_PATHS) {
int64_t lastReceiveTimeMax = 0;
for (unsigned int i=0;i<_alivePathCount;++i) {
if ((_paths[i]->address().family() == path->address().family()) &&
(_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
(_paths[i]->address().ipsEqual2(path->address()))) {
for (unsigned int i=0;i < m_alivePathCount;++i) {
if ((m_paths[i]->address().family() == path->address().family()) &&
(m_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
(m_paths[i]->address().ipsEqual2(path->address()))) {
// Replace older path if everything is the same except the port number, since NAT/firewall reboots
// and other wacky stuff can change port number assignments.
_paths[i] = path;
m_paths[i] = path;
return;
} else if (_paths[i]->lastIn() > lastReceiveTimeMax) {
lastReceiveTimeMax = _paths[i]->lastIn();
} else if (m_paths[i]->lastIn() > lastReceiveTimeMax) {
lastReceiveTimeMax = m_paths[i]->lastIn();
newPathIdx = i;
}
}
} else {
newPathIdx = _alivePathCount++;
newPathIdx = m_alivePathCount++;
}
InetAddress old;
if (_paths[newPathIdx])
old = _paths[newPathIdx]->address();
_paths[newPathIdx] = path;
if (m_paths[newPathIdx])
old = m_paths[newPathIdx]->address();
m_paths[newPathIdx] = path;
// Re-prioritize paths to include the new one.
_prioritizePaths(now);
m_prioritizePaths(now);
// Remember most recently learned paths for future bootstrap attempts on restart.
Endpoint pathEndpoint(path->address());
_bootstrap[pathEndpoint.type()] = pathEndpoint;
m_bootstrap[pathEndpoint.type()] = pathEndpoint;
RR->t->learnedNewPath(tPtr,0x582fabdd,packetId,_id,path->address(),old);
RR->t->learnedNewPath(tPtr, 0x582fabdd, packetId, m_id, path->address(), old);
} else {
path->sent(now,hello(tPtr,path->localSocket(),path->address(),now));
RR->t->tryingNewPath(tPtr,0xb7747ddd,_id,path->address(),path->address(),packetId,(uint8_t)verb,_id,ZT_TRACE_TRYING_NEW_PATH_REASON_PACKET_RECEIVED_FROM_UNKNOWN_PATH);
RR->t->tryingNewPath(tPtr, 0xb7747ddd, m_id, path->address(), path->address(), packetId, (uint8_t)verb, m_id, ZT_TRACE_TRYING_NEW_PATH_REASON_PACKET_RECEIVED_FROM_UNKNOWN_PATH);
}
}
}
@ -194,112 +190,181 @@ unsigned int Peer::hello(void *tPtr,int64_t localSocket,const InetAddress &atAdd
#endif
}
unsigned int Peer::sendNOP(void *const tPtr,const int64_t localSocket,const InetAddress &atAddress,const int64_t now)
unsigned int Peer::probe(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now)
{
Buf outp;
Protocol::Header &ph = outp.as<Protocol::Header>(); // NOLINT(hicpp-use-auto,modernize-use-auto)
ph.packetId = Protocol::getPacketId();
_id.address().copyTo(ph.destination);
RR->identity.address().copyTo(ph.source);
ph.flags = 0;
ph.verb = Protocol::VERB_NOP;
Protocol::armor(outp,sizeof(Protocol::Header),_identityKey.key(),this->cipher());
RR->node->putPacket(tPtr,localSocket,atAddress,outp.unsafeData,sizeof(Protocol::Header));
return sizeof(Protocol::Header);
if (m_vProto < 11) {
Buf outp;
Protocol::Header &ph = outp.as<Protocol::Header>(); // NOLINT(hicpp-use-auto,modernize-use-auto)
//ph.packetId = Protocol::getPacketId();
m_id.address().copyTo(ph.destination);
RR->identity.address().copyTo(ph.source);
ph.flags = 0;
ph.verb = Protocol::VERB_NOP;
Protocol::armor(outp, sizeof(Protocol::Header), m_identityKey.key(), this->cipher());
RR->node->putPacket(tPtr,localSocket,atAddress,outp.unsafeData,sizeof(Protocol::Header));
return sizeof(Protocol::Header);
} else {
RR->node->putPacket(tPtr, -1, atAddress, &m_probe, 4);
return 4;
}
}
void Peer::pulse(void *const tPtr,const int64_t now,const bool isRoot)
{
RWMutex::Lock l(_lock);
RWMutex::Lock l(m_lock);
bool needHello = false;
if ((now - _lastSentHello) >= ZT_PEER_HELLO_INTERVAL) {
_lastSentHello = now;
if ((now - m_lastSentHello) >= ZT_PEER_HELLO_INTERVAL) {
m_lastSentHello = now;
needHello = true;
}
_prioritizePaths(now);
m_prioritizePaths(now);
for(unsigned int i=0;i<_alivePathCount;++i) {
if (needHello) {
needHello = false;
const unsigned int bytes = hello(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now);
_paths[i]->sent(now,bytes);
sent(now,bytes);
} else if ((now - _paths[i]->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
_paths[i]->send(RR,tPtr,&now,1,now);
sent(now,1);
}
// TODO: when we merge multipath we'll keep one open per interface to non-roots.
// For roots we try to keep every path open.
if (!isRoot)
return;
}
if (needHello) {
// Try any statically configured addresses.
InetAddress addr;
if (RR->node->externalPathLookup(tPtr,_id,-1,addr)) {
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,addr)) {
RR->t->tryingNewPath(tPtr,0x84a10000,_id,addr,InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_EXPLICITLY_SUGGESTED_ADDRESS);
hello(tPtr,-1,addr,now);
}
}
if (!_bootstrap.empty()) {
if (isRoot) {
// Try all bootstrap addresses if this is a root.
for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) {
if ( ((i->first == Endpoint::TYPE_INETADDR_V4)||(i->first == Endpoint::TYPE_INETADDR_V6)) && (!i->second.inetAddr().ipsEqual(addr)) ) {
RR->t->tryingNewPath(tPtr,0x0a009444,_id,i->second.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_BOOTSTRAP_ADDRESS);
hello(tPtr,-1,i->second.inetAddr(),now);
}
if (m_alivePathCount == 0) {
// If there are no direct paths, attempt to make one. If there are queued addresses
// to try, attempt one of those. Otherwise try a path we can fetch via API callbacks
// and/or a remembered bootstrap path.
if (m_tryQueue.empty()) {
InetAddress addr;
if (RR->node->externalPathLookup(tPtr, m_id, -1, addr)) {
if ((addr)&&(RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, -1, addr))) {
RR->t->tryingNewPath(tPtr, 0x84a10000, m_id, addr, InetAddress::NIL, 0, 0, Identity::NIL, ZT_TRACE_TRYING_NEW_PATH_REASON_EXPLICITLY_SUGGESTED_ADDRESS);
sent(now,probe(tPtr,-1,addr,now));
}
} else {
// Otherwise try a random bootstrap address.
unsigned int tryAtIndex = (unsigned int)Utils::random() % (unsigned int)_bootstrap.size();
for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) {
}
if (!m_bootstrap.empty()) {
unsigned int tryAtIndex = (unsigned int)Utils::random() % (unsigned int)m_bootstrap.size();
for(SortedMap< Endpoint::Type,Endpoint >::const_iterator i(m_bootstrap.begin());i != m_bootstrap.end();++i) {
if (tryAtIndex > 0) {
--tryAtIndex;
} else {
if ( ((i->first == Endpoint::TYPE_INETADDR_V4)||(i->first == Endpoint::TYPE_INETADDR_V6)) && (!i->second.inetAddr().ipsEqual(addr)) ) {
RR->t->tryingNewPath(tPtr,0x0a009444,_id,i->second.inetAddr(),InetAddress::NIL,0,0,Identity::NIL,ZT_TRACE_TRYING_NEW_PATH_REASON_BOOTSTRAP_ADDRESS);
hello(tPtr,-1,i->second.inetAddr(),now);
if ((i->second.isInetAddr())&&(!i->second.inetAddr().ipsEqual(addr))) {
RR->t->tryingNewPath(tPtr, 0x0a009444, m_id, i->second.inetAddr(), InetAddress::NIL, 0, 0, Identity::NIL, ZT_TRACE_TRYING_NEW_PATH_REASON_BOOTSTRAP_ADDRESS);
sent(now,probe(tPtr,-1,i->second.inetAddr(),now));
break;
}
}
}
}
} else {
for(int k=0;(k<ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE)&&(!m_tryQueue.empty());++k) {
if (m_tryQueuePtr == m_tryQueue.end())
m_tryQueuePtr = m_tryQueue.begin();
if ((now - m_tryQueuePtr->ts) > ZT_PATH_ALIVE_TIMEOUT) {
m_tryQueue.erase(m_tryQueuePtr++);
continue;
}
if (m_tryQueuePtr->target.isInetAddr()) {
if ((m_tryQueuePtr->breakSymmetricBFG1024) && (RR->node->natMustDie())) {
// Attempt aggressive NAT traversal if both requested and enabled.
uint16_t ports[1023];
for (unsigned int i=0;i<1023;++i)
ports[i] = (uint64_t)(i + 1);
for (unsigned int i=0;i<512;++i) {
const uint64_t rn = Utils::random();
const unsigned int a = (unsigned int)rn % 1023;
const unsigned int b = (unsigned int)(rn >> 32U) % 1023;
if (a != b) {
uint16_t tmp = ports[a];
ports[a] = ports[b];
ports[b] = tmp;
}
}
InetAddress addr(m_tryQueuePtr->target.inetAddr());
for (unsigned int i = 0;i < ZT_NAT_T_BFG1024_PORTS_PER_ATTEMPT;++i) {
addr.setPort(ports[i]);
sent(now,probe(tPtr,-1,addr,now));
}
} else {
// Otherwise send a normal probe.
sent(now,probe(tPtr, -1, m_tryQueuePtr->target.inetAddr(), now));
}
}
++m_tryQueuePtr;
}
}
} else {
// Keep direct paths alive, sending a HELLO if we need one or else just a simple byte.
for(unsigned int i=0;i < m_alivePathCount;++i) {
if (needHello) {
needHello = false;
const unsigned int bytes = hello(tPtr, m_paths[i]->localSocket(), m_paths[i]->address(), now);
m_paths[i]->sent(now, bytes);
sent(now,bytes);
} else if ((now - m_paths[i]->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
m_paths[i]->send(RR, tPtr, &now, 1, now);
sent(now,1);
}
}
}
// If we could not reliably send a HELLO via a direct path, send it by way of a root.
if (needHello) {
const SharedPtr<Peer> root(RR->topology->root());
if (root) {
const SharedPtr<Path> via(root->path(now));
if (via) {
const unsigned int bytes = hello(tPtr,via->localSocket(),via->address(),now);
via->sent(now,bytes);
root->relayed(now,bytes);
sent(now,bytes);
}
}
}
}
void Peer::tryDirectPath(const int64_t now,const Endpoint &ep,const bool breakSymmetricBFG1024)
{
RWMutex::Lock l(m_lock);
for(List<p_TryQueueItem>::iterator i(m_tryQueue.begin());i != m_tryQueue.end();++i) { // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
if (i->target == ep) {
i->ts = now;
i->breakSymmetricBFG1024 = breakSymmetricBFG1024;
return;
}
}
#ifdef __CPP11__
m_tryQueue.emplace_back(now, ep, breakSymmetricBFG1024);
#else
_tryQueue.push_back(_TryQueueItem(now,ep,breakSymmetricBFG1024));
#endif
}
void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now)
{
RWMutex::RLock l(_lock);
for(unsigned int i=0;i<_alivePathCount;++i) {
if ((_paths[i])&&((_paths[i]->address().family() == inetAddressFamily)&&(_paths[i]->address().ipScope() == scope)))
_paths[i]->sent(now,sendNOP(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now));
RWMutex::RLock l(m_lock);
for(unsigned int i=0;i < m_alivePathCount;++i) {
if ((m_paths[i]) && ((m_paths[i]->address().family() == inetAddressFamily) && (m_paths[i]->address().ipScope() == scope))) {
const unsigned int bytes = probe(tPtr, m_paths[i]->localSocket(), m_paths[i]->address(), now);
m_paths[i]->sent(now, bytes);
sent(now,bytes);
}
}
}
bool Peer::directlyConnected(int64_t now)
{
if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
RWMutex::Lock l(_lock);
_prioritizePaths(now);
return _alivePathCount > 0;
if ((now - m_lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
RWMutex::Lock l(m_lock);
m_prioritizePaths(now);
return m_alivePathCount > 0;
} else {
RWMutex::RLock l(_lock);
return _alivePathCount > 0;
RWMutex::RLock l(m_lock);
return m_alivePathCount > 0;
}
}
void Peer::getAllPaths(std::vector< SharedPtr<Path> > &paths)
{
RWMutex::RLock l(_lock);
RWMutex::RLock l(m_lock);
paths.clear();
paths.assign(_paths,_paths + _alivePathCount);
paths.assign(m_paths, m_paths + m_alivePathCount);
}
void Peer::save(void *tPtr) const
@ -312,179 +377,48 @@ void Peer::save(void *tPtr) const
const int len = marshal(buf + 8);
if (len > 0) {
uint64_t id[2];
id[0] = _id.address().toInt();
id[0] = m_id.address().toInt();
id[1] = 0;
RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_PEER,id,buf,(unsigned int)len + 8);
}
}
void Peer::tryToContactAt(void *const tPtr,const Endpoint &ep,const int64_t now,const bool bfg1024)
{
static uint8_t junk = 0;
if (ep.inetAddr()) { // only this endpoint type is currently implemented
if (!RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,ep.inetAddr()))
return;
// Sending a packet with a low TTL before the real message assists traversal with some
// stateful firewalls and is harmless otherwise AFAIK.
++junk;
RR->node->putPacket(tPtr,-1,ep.inetAddr(),&junk,1,2);
// In a few hundred milliseconds we'll send the real packet.
{
RWMutex::Lock l(_lock);
_contactQueue.push_back(_ContactQueueItem(ep.inetAddr(),ZT_MAX_PEER_NETWORK_PATHS)); // NOLINT(hicpp-use-emplace,modernize-use-emplace)
}
// If the peer indicates that they may be behind a symmetric NAT and there are no
// living direct paths, try a few more aggressive things.
if ((ep.inetAddr().family() == AF_INET) && (!directlyConnected(now))) {
unsigned int port = ep.inetAddr().port();
if ((bfg1024)&&(port < 1024)&&(RR->node->natMustDie())) {
// If the other side is using a low-numbered port and has elected to
// have this done, we can try scanning every port below 1024. The search
// space here is small enough that we have a very good chance of punching.
// Generate a random order list of all <1024 ports except 0 and the original sending port.
uint16_t ports[1022];
uint16_t ctr = 1;
for (int i=0;i<1022;++i) { // NOLINT(modernize-loop-convert)
if (ctr == port) ++ctr;
ports[i] = ctr++;
}
for (int i=0;i<512;++i) {
uint64_t rn = Utils::random();
unsigned int a = ((unsigned int)rn) % 1022;
unsigned int b = ((unsigned int)(rn >> 24U)) % 1022;
if (a != b) {
uint16_t tmp = ports[a];
ports[a] = ports[b];
ports[b] = tmp;
}
}
// Chunk ports into chunks of 128 to try in few hundred millisecond intervals,
// abandoning attempts once there is at least one direct path.
{
static_assert((896 % ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE) == 0,"port scan chunk size doesn't evenly divide port list");
static_assert((1022 - 896) <= ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE,"port scan chunk size needs to be adjusted");
RWMutex::Lock l(_lock);
for (int i=0;i<896;i+=ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE)
_contactQueue.push_back(_ContactQueueItem(ep.inetAddr(),ports + i,ports + i + ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE,1)); // NOLINT(hicpp-use-emplace,modernize-use-emplace)
_contactQueue.push_back(_ContactQueueItem(ep.inetAddr(),ports + 896,ports + 1022,1)); // NOLINT(hicpp-use-emplace,modernize-use-emplace)
}
} else {
// Otherwise use the simpler sequential port attempt method in intervals.
RWMutex::Lock l(_lock);
for (int k=0;k<3;++k) {
if (++port > 65535) break;
InetAddress tryNext(ep.inetAddr());
tryNext.setPort(port);
_contactQueue.push_back(_ContactQueueItem(tryNext,1)); // NOLINT(hicpp-use-emplace,modernize-use-emplace)
}
}
}
// Start alarms going off to actually send these...
RR->node->setPeerAlarm(_id.fingerprint(),now + ZT_NAT_TRAVERSAL_INTERVAL);
}
}
void Peer::alarm(void *tPtr,const int64_t now)
{
// Right now alarms are only used for multi-phase or multi-step NAT traversal operations.
// Pop one contact queue item and also clean the queue of any that are no
// longer applicable because the alive path count has exceeded their threshold.
bool stillHaveContactQueueItems;
_ContactQueueItem qi;
{
RWMutex::Lock l(_lock);
if (_contactQueue.empty())
return;
while (_alivePathCount >= _contactQueue.front().alivePathThreshold) {
_contactQueue.pop_front();
if (_contactQueue.empty())
return;
}
_ContactQueueItem &qi2 = _contactQueue.front();
qi.address = qi2.address;
qi.ports = qi2.ports;
qi.alivePathThreshold = qi2.alivePathThreshold;
_contactQueue.pop_front();
for(std::list< _ContactQueueItem,Utils::Mallocator<_ContactQueueItem> >::iterator q(_contactQueue.begin());q!=_contactQueue.end();) { // NOLINT(hicpp-use-auto,modernize-use-auto)
if (_alivePathCount >= q->alivePathThreshold)
_contactQueue.erase(q++);
else ++q;
}
stillHaveContactQueueItems = !_contactQueue.empty();
}
if ((_vProto >= 11) && (_probe != 0)) {
if (qi.ports.empty()) {
RR->node->putPacket(tPtr,-1,qi.address,&_probe,ZT_PROTO_PROBE_LENGTH);
} else {
for (FCV<uint16_t,ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE>::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) { // NOLINT(hicpp-use-auto,modernize-use-auto)
qi.address.setPort(*p);
RR->node->putPacket(tPtr,-1,qi.address,&_probe,ZT_PROTO_PROBE_LENGTH);
}
}
} else {
if (qi.ports.empty()) {
this->sendNOP(tPtr,-1,qi.address,now);
} else {
for (FCV<uint16_t,ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE>::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) { // NOLINT(hicpp-use-auto,modernize-use-auto)
qi.address.setPort(*p);
this->sendNOP(tPtr,-1,qi.address,now);
}
}
}
if (stillHaveContactQueueItems)
RR->node->setPeerAlarm(_id.fingerprint(),now + ZT_NAT_TRAVERSAL_INTERVAL);
}
int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
{
data[0] = 0; // serialized peer version
RWMutex::RLock l(_lock);
RWMutex::RLock l(m_lock);
int s = _identityKey.marshal(RR->localCacheSymmetric,data + 1);
int s = m_identityKey.marshal(RR->localCacheSymmetric, data + 1);
if (s < 0)
return -1;
int p = 1 + s;
s = _id.marshal(data + p,false);
s = m_id.marshal(data + p, false);
if (s < 0)
return -1;
p += s;
s = _locator.marshal(data + p);
s = m_locator.marshal(data + p);
if (s <= 0)
return s;
p += s;
data[p++] = (uint8_t)_bootstrap.size();
for(std::map< Endpoint::Type,Endpoint >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) { // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
data[p++] = (uint8_t)m_bootstrap.size();
for(std::map< Endpoint::Type,Endpoint >::const_iterator i(m_bootstrap.begin());i != m_bootstrap.end();++i) { // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
s = i->second.marshal(data + p);
if (s <= 0)
return -1;
p += s;
}
Utils::storeBigEndian(data + p,(uint16_t)_vProto);
Utils::storeBigEndian(data + p,(uint16_t)m_vProto);
p += 2;
Utils::storeBigEndian(data + p,(uint16_t)_vMajor);
Utils::storeBigEndian(data + p,(uint16_t)m_vMajor);
p += 2;
Utils::storeBigEndian(data + p,(uint16_t)_vMinor);
Utils::storeBigEndian(data + p,(uint16_t)m_vMinor);
p += 2;
Utils::storeBigEndian(data + p,(uint16_t)_vRevision);
Utils::storeBigEndian(data + p,(uint16_t)m_vRevision);
p += 2;
data[p++] = 0;
@ -495,12 +429,12 @@ int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
{
RWMutex::Lock l(_lock);
RWMutex::Lock l(m_lock);
if ((len <= 1) || (data[0] != 0))
return -1;
int s = _identityKey.unmarshal(RR->localCacheSymmetric,data + 1,len);
int s = m_identityKey.unmarshal(RR->localCacheSymmetric, data + 1, len);
if (s < 0)
return -1;
int p = 1 + s;
@ -509,24 +443,24 @@ int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
// identity has changed. In this case we do not have to forget everything about
// the peer but we must generate a new identity key by key agreement with our
// new identity.
if (!_identityKey) {
if (!m_identityKey) {
uint8_t tmp[ZT_SYMMETRIC_KEY_SIZE];
if (!RR->identity.agree(_id,tmp))
if (!RR->identity.agree(m_id, tmp))
return -1;
_identityKey.init(RR->node->now(),tmp);
m_identityKey.init(RR->node->now(), tmp);
Utils::burn(tmp,sizeof(tmp));
}
// These are ephemeral and start out as NIL after unmarshal.
_ephemeralKeys[0].clear();
_ephemeralKeys[1].clear();
m_ephemeralKeys[0].clear();
m_ephemeralKeys[1].clear();
s = _id.unmarshal(data + 38,len - 38);
s = m_id.unmarshal(data + 38, len - 38);
if (s < 0)
return s;
p += s;
s = _locator.unmarshal(data + p,len - p);
s = m_locator.unmarshal(data + p, len - p);
if (s < 0)
return s;
p += s;
@ -536,24 +470,24 @@ int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
const unsigned int bootstrapCount = data[p++];
if (bootstrapCount > ZT_MAX_PEER_NETWORK_PATHS)
return -1;
_bootstrap.clear();
m_bootstrap.clear();
for(unsigned int i=0;i<bootstrapCount;++i) {
Endpoint tmp;
s = tmp.unmarshal(data + p,len - p);
if (s < 0)
return s;
p += s;
_bootstrap[tmp.type()] = tmp;
m_bootstrap[tmp.type()] = tmp;
}
_probe = 0; // ephemeral token, reset on unmarshal
m_probe = 0; // ephemeral token, reset on unmarshal
if ((p + 10) > len)
return -1;
_vProto = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
_vMajor = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
_vMinor = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
_vRevision = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
m_vProto = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
m_vMajor = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
m_vMinor = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
m_vRevision = Utils::loadBigEndian<uint16_t>(data + p); p += 2;
p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
return (p > len) ? -1 : p;
@ -563,28 +497,28 @@ struct _PathPriorityComparisonOperator
{
ZT_INLINE bool operator()(const SharedPtr<Path> &a,const SharedPtr<Path> &b) const noexcept
{
// Sort in order of last received time for receipt of anything over path, which prioritizes
// paths by aliveness. This will go away when we merge in multipath in favor of something
// much smarter.
return ( ((a)&&(a->lastIn() > 0)) && ((!b)||(b->lastIn() <= 0)||(a->lastIn() < b->lastIn())) );
// Sort in descending order of most recent receive time.
return (a->lastIn() > b->lastIn());
}
};
void Peer::_prioritizePaths(const int64_t now)
void Peer::m_prioritizePaths(int64_t now)
{
// assumes _lock is locked for writing
_lastPrioritizedPaths = now;
m_lastPrioritizedPaths = now;
std::sort(_paths,_paths + ZT_MAX_PEER_NETWORK_PATHS,_PathPriorityComparisonOperator());
if (m_alivePathCount > 0) {
// Sort paths in descending order of priority.
std::sort(m_paths, m_paths + m_alivePathCount, _PathPriorityComparisonOperator());
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if ((!_paths[i]) || (!_paths[i]->alive(now))) {
_alivePathCount = i;
for(;i<ZT_MAX_PEER_NETWORK_PATHS;++i)
_paths[i].zero();
break;
// Let go of paths that have expired.
for (unsigned int i = 0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if ((!m_paths[i]) || (!m_paths[i]->alive(now))) {
m_alivePathCount = i;
for (;i < ZT_MAX_PEER_NETWORK_PATHS;++i)
m_paths[i].zero();
break;
}
}
}
}

View file

@ -34,8 +34,6 @@
// version, identity, locator, bootstrap, version info, length of any additional fields
#define ZT_PEER_MARSHAL_SIZE_MAX (1 + ZT_SYMMETRICKEY_MARSHAL_SIZE_MAX + ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + 1 + (ZT_MAX_PEER_NETWORK_PATHS * ZT_ENDPOINT_MARSHAL_SIZE_MAX) + (2*4) + 2)
#define ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE 128
namespace ZeroTier {
class Topology;
@ -72,20 +70,20 @@ public:
/**
* @return This peer's ZT address (short for identity().address())
*/
ZT_INLINE Address address() const noexcept { return _id.address(); }
ZT_INLINE Address address() const noexcept { return m_id.address(); }
/**
* @return This peer's identity
*/
ZT_INLINE const Identity &identity() const noexcept { return _id; }
ZT_INLINE const Identity &identity() const noexcept { return m_id; }
/**
* @return Copy of current locator
*/
ZT_INLINE Locator locator() const noexcept
{
RWMutex::RLock l(_lock);
return _locator;
RWMutex::RLock l(m_lock);
return m_locator;
}
/**
@ -118,8 +116,8 @@ public:
*/
ZT_INLINE void sent(const int64_t now,const unsigned int bytes) noexcept
{
_lastSend = now;
_outMeter.log(now,bytes);
m_lastSend = now;
m_outMeter.log(now, bytes);
}
/**
@ -130,7 +128,7 @@ public:
*/
ZT_INLINE void relayed(const int64_t now,const unsigned int bytes) noexcept
{
_relayedMeter.log(now,bytes);
m_relayedMeter.log(now, bytes);
}
/**
@ -140,15 +138,15 @@ public:
*/
ZT_INLINE SharedPtr<Path> path(const int64_t now) noexcept
{
if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
RWMutex::Lock l(_lock);
_prioritizePaths(now);
if (_alivePathCount > 0)
return _paths[0];
if ((now - m_lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
RWMutex::Lock l(m_lock);
m_prioritizePaths(now);
if (m_alivePathCount > 0)
return m_paths[0];
} else {
RWMutex::RLock l(_lock);
if (_alivePathCount > 0)
return _paths[0];
RWMutex::RLock l(m_lock);
if (m_alivePathCount > 0)
return m_paths[0];
}
return SharedPtr<Path>();
}
@ -197,7 +195,7 @@ public:
* @param now Current time
* @return Number of bytes sent
*/
unsigned int sendNOP(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now);
unsigned int probe(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now);
/**
* Ping this peer if needed and/or perform other periodic tasks.
@ -208,6 +206,15 @@ public:
*/
void pulse(void *tPtr,int64_t now,bool isRoot);
/**
* Add a potential candidate direct path to the P2P "try" queue.
*
* @param now Current time
* @param ep Endpoint to attempt to contact
* @param bfg1024 Use BFG1024 brute force symmetric NAT busting algorithm if applicable
*/
void tryDirectPath(int64_t now,const Endpoint &ep,bool breakSymmetricBFG1024);
/**
* Reset paths within a given IP scope and address family
*
@ -228,8 +235,9 @@ public:
*/
ZT_INLINE FCV<Endpoint,ZT_MAX_PEER_NETWORK_PATHS> bootstrap() const noexcept
{
RWMutex::RLock l(m_lock);
FCV<Endpoint,ZT_MAX_PEER_NETWORK_PATHS> r;
for(std::map< Endpoint::Type,Endpoint,std::less<Endpoint::Type>,Utils::Mallocator< std::pair<const Endpoint::Type,Endpoint> > >::const_iterator i(_bootstrap.begin());i!=_bootstrap.end();++i) // NOLINT(hicpp-use-auto,modernize-use-auto,modernize-loop-convert)
for(SortedMap<Endpoint::Type,Endpoint>::const_iterator i(m_bootstrap.begin());i != m_bootstrap.end();++i) // NOLINT(hicpp-use-auto,modernize-use-auto,modernize-loop-convert)
r.push_back(i->second);
return r;
}
@ -241,14 +249,14 @@ public:
*/
ZT_INLINE void setBootstrap(const Endpoint &ep) noexcept
{
RWMutex::Lock l(_lock);
_bootstrap[ep.type()] = ep;
RWMutex::Lock l(m_lock);
m_bootstrap[ep.type()] = ep;
}
/**
* @return Time of last receive of anything, whether direct or relayed
*/
ZT_INLINE int64_t lastReceive() const noexcept { return _lastReceive; }
ZT_INLINE int64_t lastReceive() const noexcept { return m_lastReceive; }
/**
* @return Average latency of all direct paths or -1 if no direct paths or unknown
@ -257,9 +265,9 @@ public:
{
int ltot = 0;
int lcnt = 0;
RWMutex::RLock l(_lock);
for(unsigned int i=0;i<_alivePathCount;++i) {
int lat = _paths[i]->latency();
RWMutex::RLock l(m_lock);
for(unsigned int i=0;i < m_alivePathCount;++i) {
int lat = m_paths[i]->latency();
if (lat > 0) {
ltot += lat;
++lcnt;
@ -286,17 +294,17 @@ public:
*/
ZT_INLINE void setRemoteVersion(unsigned int vproto,unsigned int vmaj,unsigned int vmin,unsigned int vrev) noexcept
{
_vProto = (uint16_t)vproto;
_vMajor = (uint16_t)vmaj;
_vMinor = (uint16_t)vmin;
_vRevision = (uint16_t)vrev;
m_vProto = (uint16_t)vproto;
m_vMajor = (uint16_t)vmaj;
m_vMinor = (uint16_t)vmin;
m_vRevision = (uint16_t)vrev;
}
ZT_INLINE unsigned int remoteVersionProtocol() const noexcept { return _vProto; }
ZT_INLINE unsigned int remoteVersionMajor() const noexcept { return _vMajor; }
ZT_INLINE unsigned int remoteVersionMinor() const noexcept { return _vMinor; }
ZT_INLINE unsigned int remoteVersionRevision() const noexcept { return _vRevision; }
ZT_INLINE bool remoteVersionKnown() const noexcept { return ((_vMajor > 0) || (_vMinor > 0) || (_vRevision > 0)); }
ZT_INLINE unsigned int remoteVersionProtocol() const noexcept { return m_vProto; }
ZT_INLINE unsigned int remoteVersionMajor() const noexcept { return m_vMajor; }
ZT_INLINE unsigned int remoteVersionMinor() const noexcept { return m_vMinor; }
ZT_INLINE unsigned int remoteVersionRevision() const noexcept { return m_vRevision; }
ZT_INLINE bool remoteVersionKnown() const noexcept { return ((m_vMajor > 0) || (m_vMinor > 0) || (m_vRevision > 0)); }
/**
* @return True if there is at least one alive direct path
@ -315,24 +323,6 @@ public:
*/
void save(void *tPtr) const;
/**
* Attempt to contact this peer at a physical address, subject to internal checks
*
* @param tPtr External user pointer we pass around
* @param ep Endpoint to attempt to contact
* @param now Current time
* @param bfg1024 Use BFG1024 brute force symmetric NAT busting algorithm if applicable
*/
void tryToContactAt(void *tPtr,const Endpoint &ep,int64_t now,bool bfg1024);
/**
* Called by Node when an alarm set by this peer goes off
*
* @param tPtr External user pointer we pass around
* @param now Current time
*/
void alarm(void *tPtr,int64_t now);
// NOTE: peer marshal/unmarshal only saves/restores the identity, locator, most
// recent bootstrap address, and version information.
static constexpr int marshalSizeMax() noexcept { return ZT_PEER_MARSHAL_SIZE_MAX; }
@ -344,20 +334,8 @@ public:
*/
ZT_INLINE bool rateGateInboundWhoisRequest(const int64_t now) noexcept
{
if ((now - _lastWhoisRequestReceived) >= ZT_PEER_WHOIS_RATE_LIMIT) {
_lastWhoisRequestReceived = now;
return true;
}
return false;
}
/**
* Rate limit attempts in response to incoming short probe packets
*/
ZT_INLINE bool rateGateInboundProbe(const int64_t now) noexcept
{
if ((now - _lastProbeReceived) >= ZT_DIRECT_CONNECT_ATTEMPT_INTERVAL) {
_lastProbeReceived = now;
if ((now - m_lastWhoisRequestReceived) >= ZT_PEER_WHOIS_RATE_LIMIT) {
m_lastWhoisRequestReceived = now;
return true;
}
return false;
@ -368,97 +346,82 @@ public:
*/
ZT_INLINE bool rateGateEchoRequest(const int64_t now) noexcept
{
if ((now - _lastEchoRequestReceived) >= ZT_PEER_GENERAL_RATE_LIMIT) {
_lastEchoRequestReceived = now;
if ((now - m_lastEchoRequestReceived) >= ZT_PEER_GENERAL_RATE_LIMIT) {
m_lastEchoRequestReceived = now;
return true;
}
return false;
}
private:
void _prioritizePaths(int64_t now);
void m_prioritizePaths(int64_t now);
const RuntimeEnvironment *RR;
// Read/write mutex for non-atomic non-const fields.
RWMutex _lock;
RWMutex m_lock;
// The permanent identity key resulting from agreement between our identity and this peer's identity.
SymmetricKey< AES,0,0 > _identityKey;
SymmetricKey< AES,0,0 > m_identityKey;
// Most recently successful (for decrypt) ephemeral key and one previous key.
SymmetricKey< AES,ZT_SYMMETRIC_KEY_TTL,ZT_SYMMETRIC_KEY_TTL_MESSAGES > _ephemeralKeys[2];
SymmetricKey< AES,ZT_SYMMETRIC_KEY_TTL,ZT_SYMMETRIC_KEY_TTL_MESSAGES > m_ephemeralKeys[2];
Identity _id;
Locator _locator;
Identity m_id;
Locator m_locator;
// the last time something was sent or received from this peer (direct or indirect).
std::atomic<int64_t> _lastReceive;
std::atomic<int64_t> _lastSend;
std::atomic<int64_t> m_lastReceive;
std::atomic<int64_t> m_lastSend;
// The last time we sent a full HELLO to this peer.
int64_t _lastSentHello; // only checked while locked
int64_t m_lastSentHello; // only checked while locked
// The last time a WHOIS request was received from this peer (anti-DOS / anti-flood).
std::atomic<int64_t> _lastWhoisRequestReceived;
std::atomic<int64_t> m_lastWhoisRequestReceived;
// The last time an ECHO request was received from this peer (anti-DOS / anti-flood).
std::atomic<int64_t> _lastEchoRequestReceived;
// The last time a probe was received from this peer (for anti-DOS / anti-flood use).
std::atomic<int64_t> _lastProbeReceived;
// The last time we tried to init P2P connectivity with this peer.
std::atomic<int64_t> _lastAttemptedP2PInit;
std::atomic<int64_t> m_lastEchoRequestReceived;
// The last time we sorted paths in order of preference. (This happens pretty often.)
std::atomic<int64_t> _lastPrioritizedPaths;
// The last time we opened a can of whupass against this peer's NAT (if enabled).
std::atomic<int64_t> _lastAttemptedAggressiveNATTraversal;
std::atomic<int64_t> m_lastPrioritizedPaths;
// Meters measuring actual bandwidth in, out, and relayed via this peer (mostly if this is a root).
Meter<> _inMeter;
Meter<> _outMeter;
Meter<> _relayedMeter;
Meter<> m_inMeter;
Meter<> m_outMeter;
Meter<> m_relayedMeter;
// Direct paths sorted in descending order of preference.
SharedPtr<Path> m_paths[ZT_MAX_PEER_NETWORK_PATHS];
// For SharedPtr<>
std::atomic<int> __refCount;
// Direct paths sorted in descending order of preference.
SharedPtr<Path> _paths[ZT_MAX_PEER_NETWORK_PATHS];
// Number of paths current alive (number of non-NULL entries in _paths).
unsigned int _alivePathCount;
// Queue of batches of one or more physical addresses to try at some point in the future (for NAT traversal logic).
struct _ContactQueueItem
{
ZT_INLINE _ContactQueueItem() {} // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init,hicpp-use-equals-default,modernize-use-equals-default)
ZT_INLINE _ContactQueueItem(const InetAddress &a,const uint16_t *pstart,const uint16_t *pend,const unsigned int apt) :
address(a),
ports(pstart,pend),
alivePathThreshold(apt) {}
ZT_INLINE _ContactQueueItem(const InetAddress &a,const unsigned int apt) :
address(a),
ports(),
alivePathThreshold(apt) {}
InetAddress address;
FCV<uint16_t,ZT_PEER_BFG1024_PORT_SCAN_CHUNK_SIZE> ports; // if non-empty try these ports, otherwise use the one in address
unsigned int alivePathThreshold; // skip and forget if alive path count is >= this
};
List<_ContactQueueItem> _contactQueue;
unsigned int m_alivePathCount;
// Remembered addresses by endpoint type (std::map is smaller for only a few keys).
std::map< Endpoint::Type,Endpoint,std::less<Endpoint::Type>,Utils::Mallocator< std::pair<const Endpoint::Type,Endpoint> > > _bootstrap;
SortedMap<Endpoint::Type,Endpoint> m_bootstrap;
// Addresses recieved via PUSH_DIRECT_PATHS etc. that we are scheduled to try.
struct p_TryQueueItem
{
ZT_INLINE p_TryQueueItem() : target(), ts(0), breakSymmetricBFG1024(false) {}
ZT_INLINE p_TryQueueItem(const int64_t now, const Endpoint &t, const bool bfg) : target(t), ts(now), breakSymmetricBFG1024(bfg) {}
Endpoint target;
int64_t ts;
bool breakSymmetricBFG1024;
};
List<p_TryQueueItem> m_tryQueue;
List<p_TryQueueItem>::iterator m_tryQueuePtr; // loops over _tryQueue like a circular buffer
// 32-bit probe or 0 if unknown.
uint32_t _probe;
uint32_t m_probe;
uint16_t _vProto;
uint16_t _vMajor;
uint16_t _vMinor;
uint16_t _vRevision;
uint16_t m_vProto;
uint16_t m_vMajor;
uint16_t m_vMinor;
uint16_t m_vRevision;
};
} // namespace ZeroTier

View file

@ -24,8 +24,6 @@
#include "Identity.hpp"
/*
* Core ZeroTier protocol packet formats ------------------------------------------------------------------------------
*
* Packet format:
* <[8] 64-bit packet ID / crypto IV>
* <[5] destination ZT address>
@ -77,8 +75,6 @@
* Fragments do not carry their own packet MAC. The entire packet is
* authenticated once it is assembled by the receiver. Incomplete packets
* are discarded after a receiver configured period of time.
*
* --------------------------------------------------------------------------------------------------------------------
*/
/*
@ -112,14 +108,13 @@
* 11 - 2.0.0 ... CURRENT
* + New more WAN-efficient P2P-assisted multicast algorithm
* + HELLO and OK(HELLO) include an extra HMAC to harden authentication
* + HELLO and OK(HELLO) can carry structured meta-data
* + Ephemeral keys for forward secrecy and limited key lifetime
* + HELLO and OK(HELLO) carry meta-data in a dictionary that's encrypted
* + Forward secrecy, key lifetime management
* + Old planet/moon stuff is DEAD! Independent roots are easier.
* + AES encryption is now the default
* + AES encryption with the SIV construction AES-GMAC-SIV
* + New combined Curve25519/NIST P-384 identity type (type 1)
* + Short probe packets to reduce probe bandwidth
* + Aggressive NAT traversal techniques for IPv4 symmetric NATs
* + Remote diagnostics including rewrite of remote tracing
* + More aggressive NAT traversal techniques for IPv4 symmetric NATs
*/
#define ZT_PROTO_VERSION 11
@ -158,12 +153,12 @@
#define ZT_PROTO_MAX_HOPS 7
/**
* NONE/Poly1305 (using Salsa20/12 to generate poly1305 key)
* NONE/Poly1305 (legacy)
*/
#define ZT_PROTO_CIPHER_SUITE__POLY1305_NONE 0
/**
* Salsa2012/Poly1305
* Salsa2012/Poly1305 (legacy)
*/
#define ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012 1
@ -179,7 +174,7 @@
#define ZT_PROTO_CIPHER_SUITE__NONE 2
/**
* AES-GMAC-SIV (AES-256)
* AES-GMAC-SIV
*/
#define ZT_PROTO_CIPHER_SUITE__AES_GMAC_SIV 3
@ -229,62 +224,41 @@
#define ZT_PROTO_VERB_FLAG_COMPRESSED 0x80U
/**
* Mask to extract just the verb from the verb field, which also includes flags
* Mask to extract just the verb from the verb / verb flags field
*/
#define ZT_PROTO_VERB_MASK 0x1fU
/**
* Key derivation function label for the keys used with HMAC-384 in HELLO
*
* With the KDF the 'iter' parameter is 0 for the key used for
* HMAC in HELLO and 1 for the one used in OK(HELLO).
* AES-GMAC-SIV first of two keys
*/
#define ZT_PROTO_KDF_KEY_LABEL_HELLO_HMAC 'H'
#define ZT_KBKDF_LABEL_AES_GMAC_SIV_K0 '0'
/**
* HELLO exchange meta-data: random 128-bit identifier for each running instance
* AES-GMAC-SIV second of two keys
*/
#define ZT_PROTO_HELLO_NODE_META_INSTANCE_ID "i"
#define ZT_KBKDF_LABEL_AES_GMAC_SIV_K1 '1'
/**
* HELLO exchange meta-data: signed locator for this node
* Key used to encrypt dictionary in HELLO with AES-CTR.
*/
#define ZT_PROTO_HELLO_NODE_META_LOCATOR "l"
#define ZT_KBKDF_LABEL_HELLO_DICTIONARY_ENCRYPT 'H'
/**
* HELLO exchange meta-data: ephemeral C25519 public key
* Key used for extra HMAC-SHA384 authentication on some packets.
*/
#define ZT_PROTO_HELLO_NODE_META_EPHEMERAL_C25519 "e0"
#define ZT_KBKDF_LABEL_PACKET_HMAC 'M'
/**
* HELLO exchange meta-data: ephemeral NIST P-384 public key
*/
#define ZT_PROTO_HELLO_NODE_META_EPHEMERAL_P384 "e1"
/**
* HELLO exchange meta-data: address(es) of nodes to whom this node will relay
*/
#define ZT_PROTO_HELLO_NODE_META_NEIGHBORS "wr"
/**
* HELLO exchange meta-data: X coordinate of your node (sent in OK(HELLO))
*/
#define ZT_PROTO_HELLO_NODE_META_LOCATION_X "gX"
/**
* HELLO exchange meta-data: Y coordinate of your node (sent in OK(HELLO))
*/
#define ZT_PROTO_HELLO_NODE_META_LOCATION_Y "gY"
/**
* HELLO exchange meta-data: Z coordinate of your node (sent in OK(HELLO))
*/
#define ZT_PROTO_HELLO_NODE_META_LOCATION_Z "gZ"
/**
* HELLO exchange meta-data: preferred cipher suite (may be ignored)
*/
#define ZT_PROTO_HELLO_NODE_META_PREFERRED_CIPHER_SUITE "c"
#define ZT_PROTO_HELLO_NODE_META_INSTANCE_ID "i"
#define ZT_PROTO_HELLO_NODE_META_LOCATOR "l"
#define ZT_PROTO_HELLO_NODE_META_PROBE_TOKEN "p"
#define ZT_PROTO_HELLO_NODE_META_NEIGHBORS "n"
#define ZT_PROTO_HELLO_NODE_META_SOFTWARE_VENDOR "s"
#define ZT_PROTO_HELLO_NODE_META_SOFTWARE_VERSION "v"
#define ZT_PROTO_HELLO_NODE_META_PHYSICAL_DEST "d"
#define ZT_PROTO_HELLO_NODE_META_COMPLIANCE "c"
#define ZT_PROTO_HELOO_NODE_META_EPHEMERAL_C25519 "0"
#define ZT_PROTO_HELOO_NODE_META_EPHEMERAL_P384 "1"
#define ZT_PROTO_HELOO_NODE_META_EPHEMERAL_REMOTE "R"
namespace ZeroTier {
namespace Protocol {
@ -309,42 +283,48 @@ enum Verb
* <[1] software major version (LEGACY)>
* <[1] software minor version (LEGACY)>
* <[2] software revision (LEGACY)>
* <[8] timestamp for determining latency (LEGACY)>
* <[...] binary serialized identity>
* <[8] timestamp>
* <[...] binary serialized full sender identity>
* <[...] physical destination address of packet (LEGACY)>
* <[2] 16-bit reserved "encrypted zero" field (LEGACY)>
* <[4] 32 additional random nonce bits>
* [... start of encrypted section ...]
* <[2] 16-bit length of encrypted dictionary>
* <[...] encrypted dictionary>
* <[2] 16-bit length of preceding encrypted dictionary>
* <[48] HMAC-SHA384 of plaintext packet (with hops masked to 0)>
* [... end of encrypted section ...]
* <[48] HMAC-SHA384 of plaintext packet>
*
* HELLO is sent to initiate a new pairing between two nodes.
* HELLO is sent to initiate a new pairing between two nodes and
* periodically to refresh information.
*
* HELLO is the only packet ever sent without normal payload encryption,
* HELLO is the only packet ever sent without whole payload encryption,
* though an inner encrypted envelope exists to obscure all fields that
* do not need to be sent in the clear. HELLO's MAC field contains a
* Poly1305 MAC for backward compatibility, and v2.x adds an additional
* HMAC-SHA384 at the end for stronger authentication of sessions. HELLO
* authentication is performed using the long-lived identity key only,
* and the encryption of the inner dictionary field is done using a key
* derived from this identity key explicitly for this purpose.
* do not need to be sent in the clear. There is nothing in this
* encrypted section that would be catastrophic if it leaked, but it's
* good to proactively limit exposed information.
*
* The main payload of HELLO is the protocol version and the full identity
* of the sender, which includes the sender's public key(s). An encrypted
* dictionary (key/value store) is also included for additional information.
* This is encrypted using AES-CTR with a derived key and using the final
* 96 bits of the packet's HMAC-SHA384 as the CTR IV. (The HMAC authenticates
* the packet prior to this field being encrypted, making this a SIV
* construction much like AES-GMAC-SIV.)
* Inner encryption is AES-CTR with a key derived using KBKDF and a
* label indicating this specific usage. The 96-bit CTR nonce is the
* packet ID followed by the additional 32 random bits provided before
* the encrypted section.
*
* The length of the dictionary field is included immediately after it so
* that it can be decrypted and the HMAC validated without performing any
* parsing of anything else, since it's a good idea to authenticate any
* message as early as possible in any secure protocol.
* Authentication and encryption in HELLO and OK(HELLO) are always done
* with the long-lived identity key, not ephemeral shared keys. This
* is so ephemeral key negotiation can always occur on the first try
* even if things get out of sync e.g. by one side restarting. Nothing
* in HELLO is likely to be dangerous if decrypted later.
*
* V1.x will ignore the HMAC and dictionary fields as it doesn't understand
* them, but the packet is constructed so that 1.x nodes will parse what
* they need to communicate with 2.x nodes (without forward secrecy) as long
* as we wish to support this.
* HELLO and OK(HELLO) include an extra HMAC at the end of the packet.
* This authenticates them to a level of certainty beyond that afforded
* by regular AEAD. HMAC is computed over the whole packet prior to
* encryption/MAC and with the 3-bit hop count field masked as it is
* with regular packet AEAD, and it is then included in the regular
* packet MAC.
*
* LEGACY: for legacy reasons the MAC field of HELLO is a poly1305
* MAC initialized in the same manner as 1.x. Since HMAC provides
* additional full 256-bit strength authentication this should not be
* a problem for FIPS.
*
* Several legacy fields are present as well for the benefit of 1.x nodes.
* These will go away and become simple reserved space once 1.x is no longer
@ -354,49 +334,52 @@ enum Verb
* old encrypted field is no longer there and that it should stop parsing
* the packet at that point.
*
* The following fields are nearly always present and must exist to support
* forward secrecy (in the case of the instance ID, keys, and key revision)
* or federated root membership (in the case of the locator).
* 1.x does not understand the dictionary and HMAC fields, but it will
* ignore them due to the "encrypted zero" field indicating that the
* packet contains no more information.
*
* Dictionary fields:
*
* The following fields are always present in HELLO:
*
* TIMESTAMP - node's timestamp in milliseconds (supersedes legacy field)
* INSTANCE_ID - a 64-bit unique value generated on each node start
* EPHEMERAL_C25519 - an ephemeral Curve25519 public key
* EPHEMERAL_P384 - an ephemeral NIST P-384 public key
* LOCATOR - signed record enumerating this node's trusted contact points
* PROBE_TOKEN - 32-bit token that can be used to try to contact this peer
* PROBE_TOKEN - 32-bit probe token
*
* The following fields are used to establish forward secrecy:
*
* EPHEMERAL_C25519 - C25519 ephemeral public key (32 bytes)
* EPHEMERAL_P384 - NIST P-384 ephemneral public key (49 bytes)
* EPHEMERAL_REMOTE - SHA-384 of keys we have for peer (absent if none)
*
* The following optional fields may also be present:
*
* NAME - arbitrary short user-defined name for this node
* CONTACT - arbitrary short contact information string for this node
* NEIGHBORS - addresses of node(s) to whom we'll relay (mesh-like routing)
* LOC_X, LOC_Y, LOC_Z - location relative to the nearest large center of mass
* PEER_LOC_X, PEER_LOC_Y, PEER_LOC_Z - where sender thinks peer is located
* SOFTWARE_VENDOR - short name or description of vendor, such as a URL
* SOFTWARE_VERSION - major, minor, revision, and build (packed 64-bit int)
* PHYSICAL_DEST - serialized Endpoint to which this message was sent
* COMPLIANCE - bit mask containing bits for e.g. a FIPS-compliant node
*
* A valid and successfully authenticated HELLO will generate the following
* OK response. It contains an echo of the timestamp supplied by the
* initiating peer, the protocol version, and a dictionary containing
* the same information about the responding peer as the originating peer
* sent.
* The actual keys for these fields are in corresponding #defines by these
* names.
*
* Note that OK(HELLO) as well as HELLO itself is always sent using the long
* lived identity key, not ephemeral keys. This allows ephemeral re-keying to
* always succeed if one side's ephemeral keys are out of date.
* The timestamp field in OK is echoed but the others represent the sender
* of the OK and are not echoes from HELLO. The dictionary in OK typically
* only contains the EPHEMERAL fields, allowing the receiver of the OK to
* confirm that both sides know the correct keys and thus begin using the
* ephemeral shared secret to send packets.
*
* OK payload:
* <[8] timestamp echoed from original HELLO>
* <[1] protocol version>
* <[1] software major version (LEGACY)>
* <[1] software minor version (LEGACY)>
* <[2] software revision (LEGACY)>
* <[...] physical destination address of packet (LEGACY)>
* <[2] 16-bit reserved zero field (LEGACY)>
* <[1] protocol version of responding node>
* <[2] 16-bit length of dictionary>
* <[...] dictionary>
* <[48] HMAC-SHA384 of plaintext packet (with hops masked to 0)>
* <[48] HMAC-SHA384 of plaintext packet>
*
* LEGACY: a legacy format OK will be sent to nodes with older protocol
* versions.
*/
VERB_HELLO = 0x01,
@ -416,7 +399,7 @@ enum Verb
* Success response:
* <[1] in-re verb>
* <[8] in-re packet ID>
* <[...] request-specific payload>
* <[...] response-specific payload>
*/
VERB_OK = 0x03,
@ -693,26 +676,14 @@ enum Verb
*
* Path record format:
* <[1] 8-bit path flags>
* <[2] length of extended path characteristics or 0 for none>
* <[...] extended path characteristics>
* <[2] length of extended path data or 0 for none>
* <[...] extended path data>
* <[1] address type>
* <[1] address record length in bytes>
* <[...] address>
*
* Path flags:
* 0x01 - Sender is likely behind a symmetric NAT
* 0x02 - Use BFG1024 algorithm for symmetric NAT-t if conditions met
*
* The receiver may, upon receiving a push, attempt to establish a
* direct link to one or more of the indicated addresses. It is the
* responsibility of the sender to limit which peers it pushes direct
* paths to to those with whom it has a trust relationship. The receiver
* must obey any restrictions provided such as exclusivity or blacklists.
* OK responses to this message are optional.
*
* Note that a direct path push does not imply that learned paths can't
* be used unless they are blacklisted explicitly or unless flag 0x01
* is set.
* 0x01 - BFG1024 symmetric NAT-t requested
*
* OK and ERROR are not generated.
*/
@ -754,11 +725,9 @@ enum Verb
*
* Encapsulation exists to enable secure relaying as opposed to the usual
* "dumb" relaying. The latter is faster but secure relaying has roles
* where endpoint privacy is desired. Multiply nested ENCAP packets
* could allow ZeroTier to act as an onion router.
* where endpoint privacy is desired.
*
* When encapsulated packets are forwarded they do have their hop count
* field incremented.
* Packet hop count is incremented as normal.
*/
VERB_ENCAP = 0x17

View file

@ -19,8 +19,8 @@ bool Revocation::sign(const Identity &signer) noexcept
{
uint8_t buf[ZT_REVOCATION_MARSHAL_SIZE_MAX+32];
if (signer.hasPrivate()) {
_signedBy = signer.address();
_signatureLength = signer.sign(buf,(unsigned int)marshal(buf,true),_signature,sizeof(_signature));
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
@ -34,20 +34,20 @@ int Revocation::marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSig
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint32_t>(data + p,0); p += 4;
Utils::storeBigEndian<uint32_t>(data + p,_id); p += 4;
Utils::storeBigEndian<uint64_t>(data + p,_networkId); p += 8;
Utils::storeBigEndian<uint32_t>(data + p, m_id); p += 4;
Utils::storeBigEndian<uint64_t>(data + p, m_networkId); p += 8;
Utils::storeBigEndian<uint32_t>(data + p,0); p += 4;
Utils::storeBigEndian<uint32_t>(data + p,_credentialId); p += 4;
Utils::storeBigEndian<uint64_t>(data + p,(uint64_t)_threshold); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,_flags); p += 8;
_target.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
_signedBy.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
data[p++] = (uint8_t)_type;
Utils::storeBigEndian<uint32_t>(data + p, m_credentialId); p += 4;
Utils::storeBigEndian<uint64_t>(data + p,(uint64_t)m_threshold); p += 8;
Utils::storeBigEndian<uint64_t>(data + p, m_flags); p += 8;
m_target.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
data[p++] = (uint8_t)m_type;
if (!forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)_signatureLength);
Utils::copy(data + p,_signature,_signatureLength);
p += (int)_signatureLength;
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)m_signatureLength);
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
@ -63,21 +63,21 @@ int Revocation::unmarshal(const uint8_t *restrict data,const int len) noexcept
if (len < 54)
return -1;
// 4 bytes reserved
_id = Utils::loadBigEndian<uint32_t>(data + 4);
_networkId = Utils::loadBigEndian<uint64_t>(data + 8);
m_id = Utils::loadBigEndian<uint32_t>(data + 4);
m_networkId = Utils::loadBigEndian<uint64_t>(data + 8);
// 4 bytes reserved
_credentialId = Utils::loadBigEndian<uint32_t>(data + 20);
_threshold = (int64_t)Utils::loadBigEndian<uint64_t>(data + 24);
_flags = Utils::loadBigEndian<uint64_t>(data + 32);
_target.setTo(data + 40);
_signedBy.setTo(data + 45);
_type = (ZT_CredentialType)data[50];
m_credentialId = Utils::loadBigEndian<uint32_t>(data + 20);
m_threshold = (int64_t)Utils::loadBigEndian<uint64_t>(data + 24);
m_flags = Utils::loadBigEndian<uint64_t>(data + 32);
m_target.setTo(data + 40);
m_signedBy.setTo(data + 45);
m_type = (ZT_CredentialType)data[50];
// 1 byte reserved
_signatureLength = Utils::loadBigEndian<uint16_t>(data + 52);
int p = 54 + (int)_signatureLength;
if ((_signatureLength > ZT_SIGNATURE_BUFFER_SIZE)||(p > len))
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + 52);
int p = 54 + (int)m_signatureLength;
if ((m_signatureLength > ZT_SIGNATURE_BUFFER_SIZE) || (p > len))
return -1;
Utils::copy(_signature,data + 54,_signatureLength);
Utils::copy(m_signature, data + 54, m_signatureLength);
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);

View file

@ -59,28 +59,28 @@ public:
* @param ct Credential type being revoked
*/
ZT_INLINE Revocation(const uint32_t i,const uint64_t nwid,const uint32_t cid,const uint64_t thr,const uint64_t fl,const Address &tgt,const ZT_CredentialType ct) noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
_id(i),
_credentialId(cid),
_networkId(nwid),
_threshold(thr),
_flags(fl),
_target(tgt),
_signedBy(),
_type(ct),
_signatureLength(0)
m_id(i),
m_credentialId(cid),
m_networkId(nwid),
m_threshold(thr),
m_flags(fl),
m_target(tgt),
m_signedBy(),
m_type(ct),
m_signatureLength(0)
{
}
ZT_INLINE uint32_t id() const noexcept { return _id; }
ZT_INLINE uint32_t credentialId() const noexcept { return _credentialId; }
ZT_INLINE uint64_t networkId() const noexcept { return _networkId; }
ZT_INLINE int64_t threshold() const noexcept { return _threshold; }
ZT_INLINE const Address &target() const noexcept { return _target; }
ZT_INLINE const Address &signer() const noexcept { return _signedBy; }
ZT_INLINE ZT_CredentialType typeBeingRevoked() const noexcept { return _type; }
ZT_INLINE const uint8_t *signature() const noexcept { return _signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return _signatureLength; }
ZT_INLINE bool fastPropagate() const noexcept { return ((_flags & ZT_REVOCATION_FLAG_FAST_PROPAGATE) != 0); }
ZT_INLINE uint32_t id() const noexcept { return m_id; }
ZT_INLINE uint32_t credentialId() const noexcept { return m_credentialId; }
ZT_INLINE uint64_t networkId() const noexcept { return m_networkId; }
ZT_INLINE int64_t threshold() const noexcept { return m_threshold; }
ZT_INLINE const Address &target() const noexcept { return m_target; }
ZT_INLINE const Address &signer() const noexcept { return m_signedBy; }
ZT_INLINE ZT_CredentialType typeBeingRevoked() const noexcept { return m_type; }
ZT_INLINE const uint8_t *signature() const noexcept { return m_signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return m_signatureLength; }
ZT_INLINE bool fastPropagate() const noexcept { return ((m_flags & ZT_REVOCATION_FLAG_FAST_PROPAGATE) != 0); }
/**
* @param signer Signing identity, must have private key
@ -101,16 +101,16 @@ public:
int unmarshal(const uint8_t *restrict data,int len) noexcept;
private:
uint32_t _id;
uint32_t _credentialId;
uint64_t _networkId;
int64_t _threshold;
uint64_t _flags;
Address _target;
Address _signedBy;
ZT_CredentialType _type;
unsigned int _signatureLength;
uint8_t _signature[ZT_SIGNATURE_BUFFER_SIZE];
uint32_t m_id;
uint32_t m_credentialId;
uint64_t m_networkId;
int64_t m_threshold;
uint64_t m_flags;
Address m_target;
Address m_signedBy;
ZT_CredentialType m_type;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier

View file

@ -28,32 +28,32 @@ template<typename T>
class ScopedPtr : public TriviallyCopyable
{
public:
explicit ZT_INLINE ScopedPtr(T *const p) noexcept : _p(p) {}
ZT_INLINE ~ScopedPtr() { delete _p; }
explicit ZT_INLINE ScopedPtr(T *const p) noexcept : m_ptr(p) {}
ZT_INLINE ~ScopedPtr() { delete m_ptr; }
ZT_INLINE T *operator->() const noexcept { return _p; }
ZT_INLINE T &operator*() const noexcept { return *_p; }
explicit ZT_INLINE operator bool() const noexcept { return (_p != (T *)0); }
ZT_INLINE T *ptr() const noexcept { return _p; }
ZT_INLINE T *operator->() const noexcept { return m_ptr; }
ZT_INLINE T &operator*() const noexcept { return *m_ptr; }
explicit ZT_INLINE operator bool() const noexcept { return (m_ptr != (T *)0); }
ZT_INLINE T *ptr() const noexcept { return m_ptr; }
ZT_INLINE void swap(const ScopedPtr &p) noexcept
{
T *const tmp = _p;
_p = p._p;
p._p = tmp;
T *const tmp = m_ptr;
m_ptr = p.m_ptr;
p.m_ptr = tmp;
}
ZT_INLINE bool operator==(const ScopedPtr &p) const noexcept { return (_p == p._p); }
ZT_INLINE bool operator!=(const ScopedPtr &p) const noexcept { return (_p != p._p); }
ZT_INLINE bool operator==(T *const p) const noexcept { return (_p == p); }
ZT_INLINE bool operator!=(T *const p) const noexcept { return (_p != p); }
ZT_INLINE bool operator==(const ScopedPtr &p) const noexcept { return (m_ptr == p.m_ptr); }
ZT_INLINE bool operator!=(const ScopedPtr &p) const noexcept { return (m_ptr != p.m_ptr); }
ZT_INLINE bool operator==(T *const p) const noexcept { return (m_ptr == p); }
ZT_INLINE bool operator!=(T *const p) const noexcept { return (m_ptr != p); }
private:
ZT_INLINE ScopedPtr() noexcept {} // NOLINT(hicpp-use-equals-default,hicpp-use-equals-delete,modernize-use-equals-default)
ZT_INLINE ScopedPtr(const ScopedPtr &p) noexcept : _p(nullptr) {}
ZT_INLINE ScopedPtr(const ScopedPtr &p) noexcept : m_ptr(nullptr) {}
ZT_INLINE ScopedPtr &operator=(const ScopedPtr &p) noexcept { return *this; }
T *const _p;
T *const m_ptr;
};
} // namespace ZeroTier

View file

@ -55,8 +55,8 @@ void SelfAwareness::iam(void *tPtr,const Identity &reporter,const int64_t receiv
if ((scope != reporterPhysicalAddress.ipScope())||(scope == InetAddress::IP_SCOPE_NONE)||(scope == InetAddress::IP_SCOPE_LOOPBACK)||(scope == InetAddress::IP_SCOPE_MULTICAST))
return;
Mutex::Lock l(_phy_l);
PhySurfaceEntry &entry = _phy[PhySurfaceKey(reporter.address(),receivedOnLocalSocket,reporterPhysicalAddress,scope)];
Mutex::Lock l(m_phy_l);
p_PhySurfaceEntry &entry = m_phy[p_PhySurfaceKey(reporter.address(), receivedOnLocalSocket, reporterPhysicalAddress, scope)];
if ( (trusted) && ((now - entry.ts) < ZT_SELFAWARENESS_ENTRY_TIMEOUT) && (!entry.mySurface.ipsEqual(myPhysicalAddress)) ) {
// Changes to external surface reported by trusted peers causes path reset in this scope
@ -67,9 +67,9 @@ void SelfAwareness::iam(void *tPtr,const Identity &reporter,const int64_t receiv
// Erase all entries in this scope that were not reported from this remote address to prevent 'thrashing'
// due to multiple reports of endpoint change.
// Don't use 'entry' after this since hash table gets modified.
for(Map<PhySurfaceKey,PhySurfaceEntry>::iterator i(_phy.begin());i!=_phy.end();) { // NOLINT(modernize-loop-convert,modernize-use-auto,hicpp-use-auto)
for(Map<p_PhySurfaceKey,p_PhySurfaceEntry>::iterator i(m_phy.begin());i != m_phy.end();) { // NOLINT(modernize-loop-convert,modernize-use-auto,hicpp-use-auto)
if ((i->first.scope == scope)&&(i->first.reporterPhysicalAddress != reporterPhysicalAddress))
_phy.erase(i++);
m_phy.erase(i++);
else ++i;
}
@ -88,10 +88,10 @@ void SelfAwareness::iam(void *tPtr,const Identity &reporter,const int64_t receiv
void SelfAwareness::clean(int64_t now)
{
Mutex::Lock l(_phy_l);
for(Map<PhySurfaceKey,PhySurfaceEntry>::iterator i(_phy.begin());i!=_phy.end();) { // NOLINT(modernize-loop-convert,modernize-use-auto,hicpp-use-auto)
Mutex::Lock l(m_phy_l);
for(Map<p_PhySurfaceKey,p_PhySurfaceEntry>::iterator i(m_phy.begin());i != m_phy.end();) { // NOLINT(modernize-loop-convert,modernize-use-auto,hicpp-use-auto)
if ((now - i->second.ts) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT)
_phy.erase(i++);
m_phy.erase(i++);
else ++i;
}
}
@ -102,8 +102,8 @@ SelfAwareness::ExternalAddressList SelfAwareness::externalAddresses(const int64_
Map<InetAddress,unsigned long> counts;
{
Mutex::Lock l(_phy_l);
for(Map<PhySurfaceKey,PhySurfaceEntry>::const_iterator i(_phy.begin());i!=_phy.end();++i) { // NOLINT(modernize-loop-convert,modernize-use-auto,hicpp-use-auto)
Mutex::Lock l(m_phy_l);
for(Map<p_PhySurfaceKey,p_PhySurfaceEntry>::const_iterator i(m_phy.begin());i != m_phy.end();++i) { // NOLINT(modernize-loop-convert,modernize-use-auto,hicpp-use-auto)
if ((now - i->second.ts) < ZT_SELFAWARENESS_ENTRY_TIMEOUT)
++counts[i->second.mySurface];
}

View file

@ -20,8 +20,6 @@
#include "Address.hpp"
#include "Mutex.hpp"
#include <map>
namespace ZeroTier {
class Identity;
@ -30,7 +28,7 @@ class RuntimeEnvironment;
/**
* SelfAwareness manages awareness of this peer's external address(es) and NAT situation.
*
* This code should not be capable of achieving sentience and triggering the Terminator wars.
* Name aside, it shouldn't be capable of achieving sentience.
*/
class SelfAwareness
{
@ -67,21 +65,21 @@ public:
ExternalAddressList externalAddresses(int64_t now) const;
private:
struct PhySurfaceKey
struct p_PhySurfaceKey
{
Address reporter;
int64_t receivedOnLocalSocket;
InetAddress reporterPhysicalAddress;
InetAddress::IpScope scope;
ZT_INLINE PhySurfaceKey() noexcept {} // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init,hicpp-use-equals-default,modernize-use-equals-default)
ZT_INLINE PhySurfaceKey(const Address &r,const int64_t rol,const InetAddress &ra,InetAddress::IpScope s) noexcept : reporter(r),receivedOnLocalSocket(rol),reporterPhysicalAddress(ra),scope(s) {}
ZT_INLINE p_PhySurfaceKey() noexcept {} // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init,hicpp-use-equals-default,modernize-use-equals-default)
ZT_INLINE p_PhySurfaceKey(const Address &r, const int64_t rol, const InetAddress &ra, InetAddress::IpScope s) noexcept : reporter(r), receivedOnLocalSocket(rol), reporterPhysicalAddress(ra), scope(s) {}
ZT_INLINE unsigned long hashCode() const noexcept { return ((unsigned long)reporter.toInt() + (unsigned long)receivedOnLocalSocket + (unsigned long)scope); }
ZT_INLINE bool operator==(const PhySurfaceKey &k) const noexcept { return ((reporter == k.reporter) && (receivedOnLocalSocket == k.receivedOnLocalSocket) && (reporterPhysicalAddress == k.reporterPhysicalAddress) && (scope == k.scope)); }
ZT_INLINE bool operator!=(const PhySurfaceKey &k) const noexcept { return (!(*this == k)); }
ZT_INLINE bool operator<(const PhySurfaceKey &k) const noexcept
ZT_INLINE bool operator==(const p_PhySurfaceKey &k) const noexcept { return ((reporter == k.reporter) && (receivedOnLocalSocket == k.receivedOnLocalSocket) && (reporterPhysicalAddress == k.reporterPhysicalAddress) && (scope == k.scope)); }
ZT_INLINE bool operator!=(const p_PhySurfaceKey &k) const noexcept { return (!(*this == k)); }
ZT_INLINE bool operator<(const p_PhySurfaceKey &k) const noexcept
{
if (reporter < k.reporter) {
return true;
@ -100,19 +98,19 @@ private:
}
};
struct PhySurfaceEntry
struct p_PhySurfaceEntry
{
InetAddress mySurface;
uint64_t ts;
bool trusted;
ZT_INLINE PhySurfaceEntry() noexcept : mySurface(),ts(0),trusted(false) {}
ZT_INLINE PhySurfaceEntry(const InetAddress &a,const uint64_t t) noexcept : mySurface(a),ts(t),trusted(false) {}
ZT_INLINE p_PhySurfaceEntry() noexcept : mySurface(), ts(0), trusted(false) {}
ZT_INLINE p_PhySurfaceEntry(const InetAddress &a, const uint64_t t) noexcept : mySurface(a), ts(t), trusted(false) {}
};
const RuntimeEnvironment *RR;
Map< PhySurfaceKey,PhySurfaceEntry > _phy;
Mutex _phy_l;
Map< p_PhySurfaceKey,p_PhySurfaceEntry > m_phy;
Mutex m_phy_l;
};
} // namespace ZeroTier

View file

@ -30,27 +30,27 @@ template<typename T>
class SharedPtr : public TriviallyCopyable
{
public:
ZT_INLINE SharedPtr() noexcept : _ptr((T *)0) {}
explicit ZT_INLINE SharedPtr(T *obj) noexcept : _ptr(obj) { ++obj->__refCount; }
ZT_INLINE SharedPtr(const SharedPtr &sp) noexcept : _ptr(sp._getAndInc()) {}
ZT_INLINE SharedPtr() noexcept : m_ptr((T *)0) {}
explicit ZT_INLINE SharedPtr(T *obj) noexcept : m_ptr(obj) { ++obj->__refCount; }
ZT_INLINE SharedPtr(const SharedPtr &sp) noexcept : m_ptr(sp._getAndInc()) {}
ZT_INLINE ~SharedPtr()
{
if (_ptr) {
if (--_ptr->__refCount <= 0)
delete _ptr;
if (m_ptr) {
if (--m_ptr->__refCount <= 0)
delete m_ptr;
}
}
ZT_INLINE SharedPtr &operator=(const SharedPtr &sp)
{
if (_ptr != sp._ptr) {
if (m_ptr != sp.m_ptr) {
T *p = sp._getAndInc();
if (_ptr) {
if (--_ptr->__refCount <= 0)
delete _ptr;
if (m_ptr) {
if (--m_ptr->__refCount <= 0)
delete m_ptr;
}
_ptr = p;
m_ptr = p;
}
return *this;
}
@ -67,7 +67,7 @@ public:
{
zero();
++ptr->__refCount;
_ptr = ptr;
m_ptr = ptr;
}
/**
@ -77,7 +77,7 @@ public:
*
* @param ptr Pointer to set
*/
ZT_INLINE void unsafeSet(T *ptr) noexcept { _ptr = ptr; }
ZT_INLINE void unsafeSet(T *ptr) noexcept { m_ptr = ptr; }
/**
* Swap with another pointer 'for free' without ref count overhead
@ -86,9 +86,9 @@ public:
*/
ZT_INLINE void swap(SharedPtr &with) noexcept
{
T *tmp = _ptr;
_ptr = with._ptr;
with._ptr = tmp;
T *tmp = m_ptr;
m_ptr = with.m_ptr;
with.m_ptr = tmp;
}
/**
@ -101,33 +101,33 @@ public:
*/
ZT_INLINE void move(SharedPtr &from)
{
if (_ptr) {
if (--_ptr->__refCount <= 0)
delete _ptr;
if (m_ptr) {
if (--m_ptr->__refCount <= 0)
delete m_ptr;
}
_ptr = from._ptr;
from._ptr = nullptr;
m_ptr = from.m_ptr;
from.m_ptr = nullptr;
}
ZT_INLINE operator bool() const noexcept { return (_ptr != nullptr); } // NOLINT(google-explicit-constructor,hicpp-explicit-conversions)
ZT_INLINE operator bool() const noexcept { return (m_ptr != nullptr); } // NOLINT(google-explicit-constructor,hicpp-explicit-conversions)
ZT_INLINE T &operator*() const noexcept { return *_ptr; }
ZT_INLINE T *operator->() const noexcept { return _ptr; }
ZT_INLINE T &operator*() const noexcept { return *m_ptr; }
ZT_INLINE T *operator->() const noexcept { return m_ptr; }
/**
* @return Raw pointer to held object
*/
ZT_INLINE T *ptr() const noexcept { return _ptr; }
ZT_INLINE T *ptr() const noexcept { return m_ptr; }
/**
* Set this pointer to NULL
*/
ZT_INLINE void zero()
{
if (_ptr) {
if (--_ptr->__refCount <= 0)
delete _ptr;
_ptr = (T *)0;
if (m_ptr) {
if (--m_ptr->__refCount <= 0)
delete m_ptr;
m_ptr = (T *)0;
}
}
@ -136,26 +136,26 @@ public:
*/
ZT_INLINE int references() noexcept
{
if (_ptr)
return _ptr->__refCount;
if (m_ptr)
return m_ptr->__refCount;
return 0;
}
ZT_INLINE bool operator==(const SharedPtr &sp) const noexcept { return (_ptr == sp._ptr); }
ZT_INLINE bool operator!=(const SharedPtr &sp) const noexcept { return (_ptr != sp._ptr); }
ZT_INLINE bool operator>(const SharedPtr &sp) const noexcept { return (_ptr > sp._ptr); }
ZT_INLINE bool operator<(const SharedPtr &sp) const noexcept { return (_ptr < sp._ptr); }
ZT_INLINE bool operator>=(const SharedPtr &sp) const noexcept { return (_ptr >= sp._ptr); }
ZT_INLINE bool operator<=(const SharedPtr &sp) const noexcept { return (_ptr <= sp._ptr); }
ZT_INLINE bool operator==(const SharedPtr &sp) const noexcept { return (m_ptr == sp.m_ptr); }
ZT_INLINE bool operator!=(const SharedPtr &sp) const noexcept { return (m_ptr != sp.m_ptr); }
ZT_INLINE bool operator>(const SharedPtr &sp) const noexcept { return (m_ptr > sp.m_ptr); }
ZT_INLINE bool operator<(const SharedPtr &sp) const noexcept { return (m_ptr < sp.m_ptr); }
ZT_INLINE bool operator>=(const SharedPtr &sp) const noexcept { return (m_ptr >= sp.m_ptr); }
ZT_INLINE bool operator<=(const SharedPtr &sp) const noexcept { return (m_ptr <= sp.m_ptr); }
private:
ZT_INLINE T *_getAndInc() const noexcept
{
if (_ptr)
++_ptr->__refCount;
return _ptr;
if (m_ptr)
++m_ptr->__refCount;
return m_ptr;
}
T *_ptr;
T *m_ptr;
};
} // namespace ZeroTier

View file

@ -26,7 +26,9 @@ namespace ZeroTier {
* little-endian for higher performance on the majority of platforms.
*
* Right now this is only used as part of the PoW function for V1 identity
* generation.
* generation. It's used because it's faster than SHA for filling a buffer
* with randomness and unlike AES its relative performance advantage
* across CPU architectures is pretty much identical.
*
* @tparam R Number of rounds (default: 32)
*/
@ -46,7 +48,7 @@ public:
*/
ZT_INLINE Speck128(const void *k) noexcept { this->init(k); } // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init,google-explicit-constructor,hicpp-explicit-conversions)
ZT_INLINE ~Speck128() { Utils::burn(_k,sizeof(_k)); }
ZT_INLINE ~Speck128() { Utils::burn(m_expandedKey, sizeof(m_expandedKey)); }
/**
* Initialize Speck from a 128-bit key
@ -66,14 +68,14 @@ public:
*/
ZT_INLINE void initXY(uint64_t x,uint64_t y) noexcept
{
_k[0] = x;
m_expandedKey[0] = x;
for(uint64_t i=0;i<(R-1);++i) {
x = x >> 8U | x << 56U;
x += y;
x ^= i;
y = y << 3U | y >> 61U;
y ^= x;
_k[i + 1] = y;
m_expandedKey[i + 1] = y;
}
}
@ -89,7 +91,7 @@ public:
ZT_INLINE void encryptXY(uint64_t &x,uint64_t &y) const noexcept
{
for (int i=0;i<R;++i) {
const uint64_t kk = _k[i];
const uint64_t kk = m_expandedKey[i];
x = x >> 8U | x << 56U;
x += y;
x ^= kk;
@ -107,7 +109,7 @@ public:
ZT_INLINE void encryptXYXYXYXY(uint64_t &x0,uint64_t &y0,uint64_t &x1,uint64_t &y1,uint64_t &x2,uint64_t &y2,uint64_t &x3,uint64_t &y3) const noexcept
{
for (int i=0;i<R;++i) {
const uint64_t kk = _k[i];
const uint64_t kk = m_expandedKey[i];
x0 = x0 >> 8U | x0 << 56U;
x1 = x1 >> 8U | x1 << 56U;
x2 = x2 >> 8U | x2 << 56U;
@ -143,7 +145,7 @@ public:
ZT_INLINE void decryptXY(uint64_t &x,uint64_t &y) const noexcept
{
for (int i=(R-1);i>=0;--i) {
const uint64_t kk = _k[i];
const uint64_t kk = m_expandedKey[i];
y ^= x;
y = y >> 3U | y << 61U;
x ^= kk;
@ -183,7 +185,7 @@ public:
}
private:
uint64_t _k[R];
uint64_t m_expandedKey[R];
};
} // namespace ZeroTier

View file

@ -49,11 +49,11 @@ public:
*/
ZT_INLINE SymmetricKey() noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init,hicpp-use-equals-default,modernize-use-equals-default)
cipher(),
_ts(0),
_nonceBase(0),
_odometer(0)
m_ts(0),
m_nonceBase(0),
m_odometer(0)
{
Utils::memoryLock(_k,sizeof(_k));
Utils::memoryLock(m_secret, sizeof(m_secret));
}
/**
@ -64,38 +64,38 @@ public:
*/
explicit ZT_INLINE SymmetricKey(const int64_t ts,const void *const key) noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
cipher(key),
_ts(ts),
_nonceBase((uint64_t)ts << 22U), // << 22 to shift approximately the seconds since epoch into the most significant 32 bits
_odometer(0)
m_ts(ts),
m_nonceBase((uint64_t)ts << 22U), // << 22 to shift approximately the seconds since epoch into the most significant 32 bits
m_odometer(0)
{
Utils::memoryLock(_k,sizeof(_k));
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(_k,key);
Utils::memoryLock(m_secret, sizeof(m_secret));
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(m_secret, key);
}
ZT_INLINE SymmetricKey(const SymmetricKey &k) noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
cipher(k._k),
_ts(k.ts),
_nonceBase(k._nonceBase),
_odometer(k._odometer)
cipher(k.m_secret),
m_ts(k.ts),
m_nonceBase(k.m_nonceBase),
m_odometer(k.m_odometer)
{
Utils::memoryLock(_k,sizeof(_k));
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(_k,k._k);
Utils::memoryLock(m_secret, sizeof(m_secret));
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(m_secret, k.m_secret);
}
ZT_INLINE ~SymmetricKey() noexcept
{
Utils::burn(_k,sizeof(_k));
Utils::memoryUnlock(_k,sizeof(_k));
Utils::burn(m_secret, sizeof(m_secret));
Utils::memoryUnlock(m_secret, sizeof(m_secret));
}
ZT_INLINE SymmetricKey &operator=(const SymmetricKey &k) noexcept
{
if (&k != this) {
cipher.init(k._k);
_ts = k._ts;
_nonceBase = k._nonceBase;
_odometer = k._odometer;
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(_k,k._k);
cipher.init(k.m_secret);
m_ts = k.m_ts;
m_nonceBase = k.m_nonceBase;
m_odometer = k.m_odometer;
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(m_secret, k.m_secret);
}
return *this;
}
@ -111,13 +111,13 @@ public:
*/
ZT_INLINE bool init(const int64_t ts,const void *const key) noexcept
{
if ((_ts > 0)&&(memcmp(_k,key,ZT_SYMMETRIC_KEY_SIZE) == 0))
if ((m_ts > 0) && (memcmp(m_secret, key, ZT_SYMMETRIC_KEY_SIZE) == 0))
return false;
cipher.init(key);
_ts = ts;
_nonceBase = (uint64_t)ts << 22U; // << 22 to shift approximately the seconds since epoch into the most significant 32 bits;
_odometer = 0;
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(_k,key);
m_ts = ts;
m_nonceBase = (uint64_t)ts << 22U; // << 22 to shift approximately the seconds since epoch into the most significant 32 bits;
m_odometer = 0;
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(m_secret, key);
return true;
}
@ -126,10 +126,10 @@ public:
*/
ZT_INLINE void clear() noexcept
{
_ts = 0;
_nonceBase = 0;
_odometer = 0;
Utils::zero<ZT_SYMMETRIC_KEY_SIZE>(_k);
m_ts = 0;
m_nonceBase = 0;
m_odometer = 0;
Utils::zero<ZT_SYMMETRIC_KEY_SIZE>(m_secret);
}
/**
@ -140,7 +140,7 @@ public:
*/
ZT_INLINE bool expiringSoon(const int64_t now) const noexcept
{
return (TTL > 0) && ( ((now - _ts) >= (TTL / 2)) || (_odometer >= (TTLM / 2)) );
return (TTL > 0) && (((now - m_ts) >= (TTL / 2)) || (m_odometer >= (TTLM / 2)) );
}
/**
@ -151,7 +151,7 @@ public:
*/
ZT_INLINE bool expired(const int64_t now) const noexcept
{
return (TTL > 0) && ( ((now - _ts) >= TTL) || (_odometer >= TTLM) );
return (TTL > 0) && (((now - m_ts) >= TTL) || (m_odometer >= TTLM) );
}
/**
@ -169,7 +169,7 @@ public:
*/
ZT_INLINE const uint8_t *key() const noexcept
{
return _k;
return m_secret;
}
/**
@ -179,13 +179,13 @@ public:
*/
ZT_INLINE uint64_t nextMessageIv() noexcept
{
return _nonceBase + _odometer++;
return m_nonceBase + m_odometer++;
}
/**
* @return True if this object is not NIL
*/
ZT_INLINE operator bool() const noexcept { return (_ts > 0); } // NOLINT(google-explicit-constructor,hicpp-explicit-conversions)
ZT_INLINE operator bool() const noexcept { return (m_ts > 0); } // NOLINT(google-explicit-constructor,hicpp-explicit-conversions)
static constexpr int marshalSizeMax() noexcept { return ZT_SYMMETRICKEY_MARSHAL_SIZE_MAX; }
@ -200,9 +200,9 @@ public:
template<typename MC>
ZT_INLINE int marshal(const MC &keyEncCipher,uint8_t data[ZT_SYMMETRICKEY_MARSHAL_SIZE_MAX]) const noexcept
{
Utils::storeBigEndian<uint64_t>(data,(uint64_t)_ts);
Utils::storeBigEndian<uint64_t>(data + 8,_odometer.load());
Utils::storeBigEndian<uint32_t>(data + 16,Utils::fnv1a32(_k,sizeof(_k)));
Utils::storeBigEndian<uint64_t>(data,(uint64_t)m_ts);
Utils::storeBigEndian<uint64_t>(data + 8, m_odometer.load());
Utils::storeBigEndian<uint32_t>(data + 16,Utils::fnv1a32(m_secret, sizeof(m_secret)));
// Key encryption at rest is CBC using the last 32 bits of the timestamp, the odometer,
// and the FNV1a checksum as a 128-bit IV. A duplicate IV wouldn't matter much anyway since
@ -210,10 +210,10 @@ public:
// looks better.
uint8_t tmp[16];
for(int i=0;i<16;++i)
tmp[i] = data[i + 4] ^ _k[i];
tmp[i] = data[i + 4] ^ m_secret[i];
keyEncCipher.encrypt(tmp,data + 20);
for(int i=0;i<16;++i)
tmp[i] = data[i + 20] ^ _k[i + 16];
tmp[i] = data[i + 20] ^ m_secret[i + 16];
keyEncCipher.encrypt(tmp,data + 36);
return ZT_SYMMETRICKEY_MARSHAL_SIZE_MAX;
@ -238,29 +238,29 @@ public:
if (len < ZT_SYMMETRICKEY_MARSHAL_SIZE_MAX)
return -1;
_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data);
_odometer = (uint64_t)Utils::loadBigEndian<uint64_t>(data + 8);
m_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data);
m_odometer = (uint64_t)Utils::loadBigEndian<uint64_t>(data + 8);
const uint32_t fnv = Utils::loadBigEndian<uint32_t>(data + 16); // NOLINT(hicpp-use-auto,modernize-use-auto)
uint8_t tmp[16];
keyDecCipher.decrypt(data + 20,tmp);
for(int i=0;i<16;++i)
_k[i] = data[i + 4] ^ tmp[i];
m_secret[i] = data[i + 4] ^ tmp[i];
keyDecCipher.decrypt(data + 36,tmp);
for(int i=0;i<16;++i)
_k[i + 16] = data[i + 20] ^ tmp[i];
m_secret[i + 16] = data[i + 20] ^ tmp[i];
if (Utils::fnv1a32(_k,sizeof(_k)) != fnv)
if (Utils::fnv1a32(m_secret, sizeof(m_secret)) != fnv)
clear();
return ZT_SYMMETRICKEY_MARSHAL_SIZE_MAX;
}
private:
int64_t _ts;
uint64_t _nonceBase;
std::atomic<uint64_t> _odometer;
uint8_t _k[ZT_SYMMETRIC_KEY_SIZE];
int64_t m_ts;
uint64_t m_nonceBase;
std::atomic<uint64_t> m_odometer;
uint8_t m_secret[ZT_SYMMETRIC_KEY_SIZE];
};
} // namespace ZeroTier

View file

@ -19,8 +19,8 @@ bool Tag::sign(const Identity &signer) noexcept
{
uint8_t buf[ZT_TAG_MARSHAL_SIZE_MAX];
if (signer.hasPrivate()) {
_signedBy = signer.address();
_signatureLength = signer.sign(buf,(unsigned int)marshal(buf,true),_signature,sizeof(_signature));
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
@ -33,17 +33,17 @@ int Tag::marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign) const noexc
for(int k=0;k<8;++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint64_t>(data + p,_networkId); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,(uint64_t)_ts); p += 8;
Utils::storeBigEndian<uint32_t>(data + p,_id); p += 4;
Utils::storeBigEndian<uint32_t>(data + p,_value); p += 4;
_issuedTo.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
_signedBy.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
Utils::storeBigEndian<uint64_t>(data + p, m_networkId); p += 8;
Utils::storeBigEndian<uint64_t>(data + p,(uint64_t)m_ts); p += 8;
Utils::storeBigEndian<uint32_t>(data + p, m_id); p += 4;
Utils::storeBigEndian<uint32_t>(data + p, m_value); p += 4;
m_issuedTo.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p); p += ZT_ADDRESS_LENGTH;
if (!forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)_signatureLength); p += 2;
Utils::copy(data + p,_signature,_signatureLength);
p += (int)_signatureLength;
Utils::storeBigEndian<uint16_t>(data + p,(uint16_t)m_signatureLength); p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
@ -58,18 +58,18 @@ int Tag::unmarshal(const uint8_t *data,int len) noexcept
{
if (len < 37)
return -1;
_networkId = Utils::loadBigEndian<uint64_t>(data);
_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
_id = Utils::loadBigEndian<uint32_t>(data + 16);
_value = Utils::loadBigEndian<uint32_t>(data + 20);
_issuedTo.setTo(data + 24);
_signedBy.setTo(data + 29);
m_networkId = Utils::loadBigEndian<uint64_t>(data);
m_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
m_id = Utils::loadBigEndian<uint32_t>(data + 16);
m_value = Utils::loadBigEndian<uint32_t>(data + 20);
m_issuedTo.setTo(data + 24);
m_signedBy.setTo(data + 29);
// 1 byte reserved
_signatureLength = Utils::loadBigEndian<uint16_t>(data + 35);
int p = 37 + (int)_signatureLength;
if ((_signatureLength > ZT_SIGNATURE_BUFFER_SIZE)||(p > len))
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + 35);
int p = 37 + (int)m_signatureLength;
if ((m_signatureLength > ZT_SIGNATURE_BUFFER_SIZE) || (p > len))
return -1;
Utils::copy(_signature,data + p,_signatureLength);
Utils::copy(m_signature, data + p, m_signatureLength);
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);

View file

@ -65,24 +65,24 @@ public:
* @param value Tag value
*/
ZT_INLINE Tag(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id,const uint32_t value) noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
_id(id),
_value(value),
_networkId(nwid),
_ts(ts),
_issuedTo(issuedTo),
_signedBy(),
_signatureLength(0)
m_id(id),
m_value(value),
m_networkId(nwid),
m_ts(ts),
m_issuedTo(issuedTo),
m_signedBy(),
m_signatureLength(0)
{
}
ZT_INLINE uint32_t id() const noexcept { return _id; }
ZT_INLINE const uint32_t &value() const noexcept { return _value; }
ZT_INLINE uint64_t networkId() const noexcept { return _networkId; }
ZT_INLINE int64_t timestamp() const noexcept { return _ts; }
ZT_INLINE const Address &issuedTo() const noexcept { return _issuedTo; }
ZT_INLINE const Address &signer() const noexcept { return _signedBy; }
ZT_INLINE const uint8_t *signature() const noexcept { return _signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return _signatureLength; }
ZT_INLINE uint32_t id() const noexcept { return m_id; }
ZT_INLINE const uint32_t &value() const noexcept { return m_value; }
ZT_INLINE uint64_t networkId() const noexcept { return m_networkId; }
ZT_INLINE int64_t timestamp() const noexcept { return m_ts; }
ZT_INLINE const Address &issuedTo() const noexcept { return m_issuedTo; }
ZT_INLINE const Address &signer() const noexcept { return m_signedBy; }
ZT_INLINE const uint8_t *signature() const noexcept { return m_signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return m_signatureLength; }
/**
* Sign this tag
@ -105,7 +105,7 @@ public:
int unmarshal(const uint8_t *data,int len) noexcept;
// Provides natural sort order by ID
ZT_INLINE bool operator<(const Tag &t) const noexcept { return (_id < t._id); }
ZT_INLINE bool operator<(const Tag &t) const noexcept { return (m_id < t.m_id); }
ZT_INLINE bool operator==(const Tag &t) const noexcept { return (memcmp(this,&t,sizeof(Tag)) == 0); }
ZT_INLINE bool operator!=(const Tag &t) const noexcept { return (memcmp(this,&t,sizeof(Tag)) != 0); }
@ -125,14 +125,14 @@ public:
};
private:
uint32_t _id;
uint32_t _value;
uint64_t _networkId;
int64_t _ts;
Address _issuedTo;
Address _signedBy;
unsigned int _signatureLength;
uint8_t _signature[ZT_SIGNATURE_BUFFER_SIZE];
uint32_t m_id;
uint32_t m_value;
uint64_t m_networkId;
int64_t m_ts;
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier

View file

@ -407,18 +407,6 @@ extern "C" const char *ZTT_general()
ZT_T_PRINTF("[general] Utils::getSecureRandom() sample: %s" ZT_EOL_S,secrands);
}
{
ZT_T_PRINTF("[general] Sanity checking Protocol::getPacketId()... ");
std::set<uint64_t> pids;
for(unsigned long i=0;i<1048576;++i)
pids.insert(Protocol::getPacketId());
if (pids.size() != 1048576) {
ZT_T_PRINTF("FAILED (collision after only 1048576 generations!)");
return "getPacketId() produced collisions";
}
ZT_T_PRINTF("OK" ZT_EOL_S);
}
{
ZT_T_PRINTF("[general] Testing FCV (fixed capacity vector)... ");
long cnt = 0;

View file

@ -38,8 +38,8 @@
* in "valgrind" or a similar tool to detect marginal bad behvaior.
*/
#ifndef ZT_TESTS_HPP
#define ZT_TESTS_HPP
#ifndef ZT_TESTS_H
#define ZT_TESTS_H
#ifdef ZT_ENABLE_TESTS
@ -50,6 +50,10 @@
#define ZT_T_PRINTF(fmt,...) printf((fmt),##__VA_ARGS__),fflush(stdout)
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* Test platform, compiler behavior, utility functions, and core classes
*/

View file

@ -36,7 +36,7 @@ struct _RootSortComparisonOperator
Topology::Topology(const RuntimeEnvironment *renv,void *tPtr) :
RR(renv),
_numConfiguredPhysicalPaths(0)
m_numConfiguredPhysicalPaths(0)
{
uint64_t idtmp[2]; idtmp[0] = 0; idtmp[1] = 0;
std::vector<uint8_t> data(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_ROOTS,idtmp));
@ -47,24 +47,24 @@ Topology::Topology(const RuntimeEnvironment *renv,void *tPtr) :
Identity id;
int l = id.unmarshal(dptr,drem);
if (l > 0) {
_roots.insert(id);
m_roots.insert(id);
dptr += l;
drem -= l;
}
}
}
for(std::set<Identity>::const_iterator r(_roots.begin());r!=_roots.end();++r) {
for(std::set<Identity>::const_iterator r(m_roots.begin());r != m_roots.end();++r) {
SharedPtr<Peer> p;
_loadCached(tPtr,r->address(),p);
m_loadCached(tPtr, r->address(), p);
if ((!p)||(p->identity() != *r)) {
p.set(new Peer(RR));
p->init(*r);
}
_rootPeers.push_back(p);
_peers[p->address()] = p;
_peersByIncomingProbe[p->incomingProbe()] = p;
_peersByIdentityHash[p->identity().fingerprint()] = p;
m_rootPeers.push_back(p);
m_peers[p->address()] = p;
m_peersByIncomingProbe[p->incomingProbe()] = p;
m_peersByIdentityHash[p->identity().fingerprint()] = p;
}
}
@ -74,43 +74,43 @@ Topology::~Topology()
SharedPtr<Peer> Topology::add(void *tPtr,const SharedPtr<Peer> &peer)
{
RWMutex::Lock _l(_peers_l);
RWMutex::Lock _l(m_peers_l);
SharedPtr<Peer> &hp = _peers[peer->address()];
SharedPtr<Peer> &hp = m_peers[peer->address()];
if (hp)
return hp;
_loadCached(tPtr,peer->address(),hp);
m_loadCached(tPtr, peer->address(), hp);
if (hp) {
_peersByIncomingProbe[peer->incomingProbe()] = hp;
_peersByIdentityHash[peer->identity().fingerprint()] = hp;
m_peersByIncomingProbe[peer->incomingProbe()] = hp;
m_peersByIdentityHash[peer->identity().fingerprint()] = hp;
return hp;
}
hp = peer;
_peersByIncomingProbe[peer->incomingProbe()] = peer;
_peersByIdentityHash[peer->identity().fingerprint()] = peer;
m_peersByIncomingProbe[peer->incomingProbe()] = peer;
m_peersByIdentityHash[peer->identity().fingerprint()] = peer;
return peer;
}
void Topology::getAllPeers(std::vector< SharedPtr<Peer> > &allPeers) const
{
RWMutex::RLock l(_peers_l);
RWMutex::RLock l(m_peers_l);
allPeers.clear();
allPeers.reserve(_peers.size());
for(Map< Address,SharedPtr<Peer> >::const_iterator i(_peers.begin());i!=_peers.end();++i)
allPeers.reserve(m_peers.size());
for(Map< Address,SharedPtr<Peer> >::const_iterator i(m_peers.begin());i != m_peers.end();++i)
allPeers.push_back(i->second);
}
void Topology::setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork,const ZT_PhysicalPathConfiguration *pathConfig)
{
if (!pathNetwork) {
_numConfiguredPhysicalPaths = 0;
m_numConfiguredPhysicalPaths = 0;
} else {
std::map<InetAddress,ZT_PhysicalPathConfiguration> cpaths;
for(unsigned int i=0,j=_numConfiguredPhysicalPaths;i<j;++i)
cpaths[_physicalPathConfig[i].first] = _physicalPathConfig[i].second;
for(unsigned int i=0,j=m_numConfiguredPhysicalPaths;i < j;++i)
cpaths[m_physicalPathConfig[i].first] = m_physicalPathConfig[i].second;
if (pathConfig) {
ZT_PhysicalPathConfiguration pc(*pathConfig);
@ -129,35 +129,35 @@ void Topology::setPhysicalPathConfiguration(const struct sockaddr_storage *pathN
unsigned int cnt = 0;
for(std::map<InetAddress,ZT_PhysicalPathConfiguration>::const_iterator i(cpaths.begin());((i!=cpaths.end())&&(cnt<ZT_MAX_CONFIGURABLE_PATHS));++i) {
_physicalPathConfig[cnt].first = i->first;
_physicalPathConfig[cnt].second = i->second;
m_physicalPathConfig[cnt].first = i->first;
m_physicalPathConfig[cnt].second = i->second;
++cnt;
}
_numConfiguredPhysicalPaths = cnt;
m_numConfiguredPhysicalPaths = cnt;
}
}
void Topology::addRoot(void *tPtr,const Identity &id,const InetAddress &bootstrap)
{
if (id == RR->identity) return; // sanity check
RWMutex::Lock l1(_peers_l);
std::pair< std::set<Identity>::iterator,bool > ir(_roots.insert(id));
RWMutex::Lock l1(m_peers_l);
std::pair< std::set<Identity>::iterator,bool > ir(m_roots.insert(id));
if (ir.second) {
SharedPtr<Peer> &p = _peers[id.address()];
SharedPtr<Peer> &p = m_peers[id.address()];
if (!p) {
p.set(new Peer(RR));
p->init(id);
if (bootstrap)
p->setBootstrap(Endpoint(bootstrap));
_peersByIncomingProbe[p->incomingProbe()] = p;
_peersByIdentityHash[p->identity().fingerprint()] = p;
m_peersByIncomingProbe[p->incomingProbe()] = p;
m_peersByIdentityHash[p->identity().fingerprint()] = p;
}
_rootPeers.push_back(p);
m_rootPeers.push_back(p);
uint8_t *const roots = (uint8_t *)malloc(ZT_IDENTITY_MARSHAL_SIZE_MAX * _roots.size());
uint8_t *const roots = (uint8_t *)malloc(ZT_IDENTITY_MARSHAL_SIZE_MAX * m_roots.size());
if (roots) {
int p = 0;
for(std::set<Identity>::const_iterator i(_roots.begin());i!=_roots.end();++i) {
for(std::set<Identity>::const_iterator i(m_roots.begin());i != m_roots.end();++i) {
int pp = i->marshal(roots + p,false);
if (pp > 0)
p += pp;
@ -173,16 +173,16 @@ void Topology::addRoot(void *tPtr,const Identity &id,const InetAddress &bootstra
bool Topology::removeRoot(const Identity &id)
{
RWMutex::Lock l1(_peers_l);
std::set<Identity>::iterator r(_roots.find(id));
if (r != _roots.end()) {
for(std::vector< SharedPtr<Peer> >::iterator p(_rootPeers.begin());p!=_rootPeers.end();++p) {
RWMutex::Lock l1(m_peers_l);
std::set<Identity>::iterator r(m_roots.find(id));
if (r != m_roots.end()) {
for(std::vector< SharedPtr<Peer> >::iterator p(m_rootPeers.begin());p != m_rootPeers.end();++p) {
if ((*p)->identity() == id) {
_rootPeers.erase(p);
m_rootPeers.erase(p);
break;
}
}
_roots.erase(r);
m_roots.erase(r);
return true;
}
return false;
@ -190,28 +190,28 @@ bool Topology::removeRoot(const Identity &id)
void Topology::rankRoots(const int64_t now)
{
RWMutex::Lock l1(_peers_l);
std::sort(_rootPeers.begin(),_rootPeers.end(),_RootSortComparisonOperator(now));
RWMutex::Lock l1(m_peers_l);
std::sort(m_rootPeers.begin(), m_rootPeers.end(), _RootSortComparisonOperator(now));
}
void Topology::doPeriodicTasks(void *tPtr,const int64_t now)
{
{
RWMutex::Lock l1(_peers_l);
for(Map< Address,SharedPtr<Peer> >::iterator i(_peers.begin());i!=_peers.end();) {
if ( (!i->second->alive(now)) && (_roots.count(i->second->identity()) == 0) ) {
RWMutex::Lock l1(m_peers_l);
for(Map< Address,SharedPtr<Peer> >::iterator i(m_peers.begin());i != m_peers.end();) {
if ( (!i->second->alive(now)) && (m_roots.count(i->second->identity()) == 0) ) {
i->second->save(tPtr);
_peersByIncomingProbe.erase(i->second->incomingProbe());
_peersByIdentityHash.erase(i->second->identity().fingerprint());
_peers.erase(i++);
m_peersByIncomingProbe.erase(i->second->incomingProbe());
m_peersByIdentityHash.erase(i->second->identity().fingerprint());
m_peers.erase(i++);
} else ++i;
}
}
{
RWMutex::Lock l1(_paths_l);
for(Map< uint64_t,SharedPtr<Path> >::iterator i(_paths.begin());i!=_paths.end();) {
RWMutex::Lock l1(m_paths_l);
for(Map< uint64_t,SharedPtr<Path> >::iterator i(m_paths.begin());i != m_paths.end();) {
if ((i->second.references() <= 1)&&(!i->second->alive(now)))
_paths.erase(i++);
m_paths.erase(i++);
else ++i;
}
}
@ -219,12 +219,12 @@ void Topology::doPeriodicTasks(void *tPtr,const int64_t now)
void Topology::saveAll(void *tPtr)
{
RWMutex::RLock l(_peers_l);
for(Map< Address,SharedPtr<Peer> >::iterator i(_peers.begin());i!=_peers.end();++i)
RWMutex::RLock l(m_peers_l);
for(Map< Address,SharedPtr<Peer> >::iterator i(m_peers.begin());i != m_peers.end();++i)
i->second->save(tPtr);
}
void Topology::_loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer)
void Topology::m_loadCached(void *tPtr, const Address &zta, SharedPtr<Peer> &peer)
{
try {
uint64_t id[2];

View file

@ -67,18 +67,18 @@ public:
ZT_INLINE SharedPtr<Peer> peer(void *tPtr,const Address &zta,const bool loadFromCached = true)
{
{
RWMutex::RLock l(_peers_l);
const SharedPtr<Peer> *const ap = _peers.get(zta);
RWMutex::RLock l(m_peers_l);
const SharedPtr<Peer> *const ap = m_peers.get(zta);
if (ap)
return *ap;
}
{
SharedPtr<Peer> p;
if (loadFromCached) {
_loadCached(tPtr,zta,p);
m_loadCached(tPtr, zta, p);
if (p) {
RWMutex::Lock l(_peers_l);
SharedPtr<Peer> &hp = _peers[zta];
RWMutex::Lock l(m_peers_l);
SharedPtr<Peer> &hp = m_peers[zta];
if (hp)
return hp;
hp = p;
@ -96,8 +96,8 @@ public:
*/
ZT_INLINE SharedPtr<Peer> peerByHash(const Fingerprint &hash)
{
RWMutex::RLock _l(_peers_l);
const SharedPtr<Peer> *const ap = _peersByIdentityHash.get(hash);
RWMutex::RLock _l(m_peers_l);
const SharedPtr<Peer> *const ap = m_peersByIdentityHash.get(hash);
if (ap)
return *ap;
return SharedPtr<Peer>();
@ -111,8 +111,8 @@ public:
*/
ZT_INLINE SharedPtr<Peer> peerByProbe(const uint64_t probe)
{
RWMutex::RLock _l(_peers_l);
const SharedPtr<Peer> *const ap = _peersByIncomingProbe.get(probe);
RWMutex::RLock _l(m_peers_l);
const SharedPtr<Peer> *const ap = m_peersByIncomingProbe.get(probe);
if (ap)
return *ap;
return SharedPtr<Peer>();
@ -127,17 +127,17 @@ public:
*/
ZT_INLINE SharedPtr<Path> path(const int64_t l,const InetAddress &r)
{
const uint64_t k = _getPathKey(l,r);
const uint64_t k = s_getPathKey(l, r);
{
RWMutex::RLock lck(_paths_l);
SharedPtr<Path> *const p = _paths.get(k);
RWMutex::RLock lck(m_paths_l);
SharedPtr<Path> *const p = m_paths.get(k);
if (p)
return *p;
}
{
SharedPtr<Path> p(new Path(l,r));
RWMutex::Lock lck(_paths_l);
SharedPtr<Path> &p2 = _paths[k];
RWMutex::Lock lck(m_paths_l);
SharedPtr<Path> &p2 = m_paths[k];
if (p2)
return p2;
p2 = p;
@ -150,10 +150,10 @@ public:
*/
ZT_INLINE SharedPtr<Peer> root() const
{
RWMutex::RLock l(_peers_l);
if (_rootPeers.empty())
RWMutex::RLock l(m_peers_l);
if (m_rootPeers.empty())
return SharedPtr<Peer>();
return _rootPeers.front();
return m_rootPeers.front();
}
/**
@ -162,8 +162,8 @@ public:
*/
ZT_INLINE bool isRoot(const Identity &id) const
{
RWMutex::RLock l(_peers_l);
return (_roots.count(id) > 0);
RWMutex::RLock l(m_peers_l);
return (m_roots.count(id) > 0);
}
/**
@ -178,8 +178,8 @@ public:
template<typename F>
ZT_INLINE void eachPeer(F f) const
{
RWMutex::RLock l(_peers_l);
for(Map< Address,SharedPtr<Peer> >::const_iterator i(_peers.begin());i!=_peers.end();++i) // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
RWMutex::RLock l(m_peers_l);
for(Map< Address,SharedPtr<Peer> >::const_iterator i(m_peers.begin());i != m_peers.end();++i) // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
f(i->second);
}
@ -195,16 +195,16 @@ public:
template<typename F>
ZT_INLINE void eachPeerWithRoot(F f) const
{
RWMutex::RLock l(_peers_l);
RWMutex::RLock l(m_peers_l);
std::vector<uintptr_t> rootPeerPtrs;
rootPeerPtrs.reserve(_rootPeers.size());
for(std::vector< SharedPtr<Peer> >::const_iterator rp(_rootPeers.begin());rp!=_rootPeers.end();++rp) // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
rootPeerPtrs.reserve(m_rootPeers.size());
for(std::vector< SharedPtr<Peer> >::const_iterator rp(m_rootPeers.begin());rp != m_rootPeers.end();++rp) // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
rootPeerPtrs.push_back((uintptr_t)rp->ptr());
std::sort(rootPeerPtrs.begin(),rootPeerPtrs.end());
try {
for(Map< Address,SharedPtr<Peer> >::const_iterator i(_peers.begin());i!=_peers.end();++i) // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
for(Map< Address,SharedPtr<Peer> >::const_iterator i(m_peers.begin());i != m_peers.end();++i) // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
f(i->second,std::binary_search(rootPeerPtrs.begin(),rootPeerPtrs.end(),(uintptr_t)i->second.ptr()));
} catch ( ... ) {} // should not throw
}
@ -218,8 +218,8 @@ public:
template<typename F>
ZT_INLINE void eachPath(F f) const
{
RWMutex::RLock l(_paths_l);
for(Map< uint64_t,SharedPtr<Path> >::const_iterator i(_paths.begin());i!=_paths.end();++i) // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
RWMutex::RLock l(m_paths_l);
for(Map< uint64_t,SharedPtr<Path> >::const_iterator i(m_paths.begin());i != m_paths.end();++i) // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
f(i->second);
}
@ -239,10 +239,10 @@ public:
*/
ZT_INLINE void getOutboundPathInfo(const InetAddress &physicalAddress,unsigned int &mtu,uint64_t &trustedPathId)
{
for(unsigned int i=0,j=_numConfiguredPhysicalPaths;i<j;++i) {
if (_physicalPathConfig[i].first.containsAddress(physicalAddress)) {
trustedPathId = _physicalPathConfig[i].second.trustedPathId;
mtu = _physicalPathConfig[i].second.mtu;
for(unsigned int i=0,j=m_numConfiguredPhysicalPaths;i < j;++i) {
if (m_physicalPathConfig[i].first.containsAddress(physicalAddress)) {
trustedPathId = m_physicalPathConfig[i].second.trustedPathId;
mtu = m_physicalPathConfig[i].second.mtu;
return;
}
}
@ -256,9 +256,9 @@ public:
*/
ZT_INLINE uint64_t getOutboundPathTrust(const InetAddress &physicalAddress)
{
for(unsigned int i=0,j=_numConfiguredPhysicalPaths;i<j;++i) {
if (_physicalPathConfig[i].first.containsAddress(physicalAddress))
return _physicalPathConfig[i].second.trustedPathId;
for(unsigned int i=0,j=m_numConfiguredPhysicalPaths;i < j;++i) {
if (m_physicalPathConfig[i].first.containsAddress(physicalAddress))
return m_physicalPathConfig[i].second.trustedPathId;
}
return 0;
}
@ -271,8 +271,8 @@ public:
*/
ZT_INLINE bool shouldInboundPathBeTrusted(const InetAddress &physicalAddress,const uint64_t trustedPathId)
{
for(unsigned int i=0,j=_numConfiguredPhysicalPaths;i<j;++i) {
if ((_physicalPathConfig[i].second.trustedPathId == trustedPathId)&&(_physicalPathConfig[i].first.containsAddress(physicalAddress)))
for(unsigned int i=0,j=m_numConfiguredPhysicalPaths;i < j;++i) {
if ((m_physicalPathConfig[i].second.trustedPathId == trustedPathId) && (m_physicalPathConfig[i].first.containsAddress(physicalAddress)))
return true;
}
return false;
@ -319,13 +319,13 @@ public:
private:
// Load cached peer and set 'peer' to it, if one is found.
void _loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer);
void m_loadCached(void *tPtr, const Address &zta, SharedPtr<Peer> &peer);
// This is a secure random integer created at startup to salt the calculation of path hash map keys
static const uint64_t s_pathHashSalt;
// This gets an integer key from an InetAddress for looking up paths.
ZT_INLINE uint64_t _getPathKey(int64_t l,const InetAddress &r) const
static ZT_INLINE uint64_t s_getPathKey(int64_t l, const InetAddress &r)
{
if (r.family() == AF_INET) {
return s_pathHashSalt + (uint64_t)(reinterpret_cast<const struct sockaddr_in *>(&r)->sin_addr.s_addr) + (uint64_t)Utils::ntoh(reinterpret_cast<const struct sockaddr_in *>(&r)->sin_port) + (uint64_t)l;
@ -348,18 +348,19 @@ private:
const RuntimeEnvironment *const RR;
RWMutex _peers_l;
RWMutex _paths_l;
RWMutex m_peers_l;
RWMutex m_paths_l;
std::pair< InetAddress,ZT_PhysicalPathConfiguration > _physicalPathConfig[ZT_MAX_CONFIGURABLE_PATHS];
unsigned int _numConfiguredPhysicalPaths;
std::pair< InetAddress,ZT_PhysicalPathConfiguration > m_physicalPathConfig[ZT_MAX_CONFIGURABLE_PATHS];
unsigned int m_numConfiguredPhysicalPaths;
Map< Address,SharedPtr<Peer> > _peers;
Map< uint64_t,SharedPtr<Peer> > _peersByIncomingProbe;
Map< Fingerprint,SharedPtr<Peer> > _peersByIdentityHash;
Map< uint64_t,SharedPtr<Path> > _paths;
std::set< Identity > _roots; // locked by _peers_l
std::vector< SharedPtr<Peer> > _rootPeers; // locked by _peers_l
Map< uint64_t,SharedPtr<Path> > m_paths;
Map< Address,SharedPtr<Peer> > m_peers;
Map< uint64_t,SharedPtr<Peer> > m_peersByIncomingProbe;
Map< Fingerprint,SharedPtr<Peer> > m_peersByIdentityHash;
Set< Identity > m_roots;
Vector< SharedPtr<Peer> > m_rootPeers;
};
} // namespace ZeroTier

View file

@ -28,41 +28,10 @@ namespace ZeroTier {
Trace::Trace(const RuntimeEnvironment *renv) :
RR(renv),
_vl1(false),
_vl2(false),
_vl2Filter(false),
_vl2Multicast(false)
_f(0)
{
}
Trace::Str<ZT_INETADDRESS_STRING_SIZE_MAX> Trace::str(const InetAddress &a,const bool ipOnly)
{
Str<ZT_INETADDRESS_STRING_SIZE_MAX> s;
if (ipOnly)
a.toIpString(s.s);
else a.toString(s.s);
return s;
}
Trace::Str<ZT_ADDRESS_STRING_SIZE_MAX> Trace::str(const Address &a)
{
Str<ZT_ADDRESS_STRING_SIZE_MAX> s;
a.toString(s.s);
return s;
}
Trace::Str<ZT_ADDRESS_STRING_SIZE_MAX + ZT_INETADDRESS_STRING_SIZE_MAX + 4> Trace::str(const Address &peerAddress,const SharedPtr<Path> &path)
{
Str<ZT_ADDRESS_STRING_SIZE_MAX + ZT_INETADDRESS_STRING_SIZE_MAX + 4> s;
peerAddress.toString(s.s);
s.s[11] = '(';
path->address().toString(s.s + 12);
int x = strlen(s.s);
s.s[x] = ')';
s.s[x+1] = 0;
return s;
}
void Trace::unexpectedError(
void *tPtr,
uint32_t codeLocation,

View file

@ -26,6 +26,11 @@
#include <cstdlib>
#include <vector>
#define ZT_TRACE_F_VL1 0x01U
#define ZT_TRACE_F_VL2 0x02U
#define ZT_TRACE_F_VL2_FILTER 0x04U
#define ZT_TRACE_F_VL2_MULTICAST 0x08U
namespace ZeroTier {
class RuntimeEnvironment;
@ -74,25 +79,8 @@ public:
}
};
/**
* Simple container for a C string
*
* @tparam C Capacity of string
*/
template<unsigned int C>
struct Str
{
ZT_INLINE Str() { Utils::zero<sizeof(s)>(s); } // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
constexpr static unsigned int capacity() { return C; }
char s[C];
};
explicit Trace(const RuntimeEnvironment *renv);
static Str<ZT_INETADDRESS_STRING_SIZE_MAX> str(const InetAddress &a,bool ipOnly = false);
static Str<ZT_ADDRESS_STRING_SIZE_MAX> str(const Address &a);
static Str<ZT_ADDRESS_STRING_SIZE_MAX + ZT_INETADDRESS_STRING_SIZE_MAX + 4> str(const Address &peerAddress,const SharedPtr<Path> &path);
void unexpectedError(
void *tPtr,
uint32_t codeLocation,
@ -108,7 +96,8 @@ public:
const InetAddress &newExternal,
const InetAddress::IpScope scope)
{
if (_vl1) _resettingPathsInScope(tPtr,codeLocation,reporter,from,oldExternal,newExternal,scope);
if ((_f & ZT_TRACE_F_VL1) != 0)
_resettingPathsInScope(tPtr,codeLocation,reporter,from,oldExternal,newExternal,scope);
}
ZT_INLINE void tryingNewPath(
@ -122,7 +111,8 @@ public:
const Identity &triggeringPeer,
ZT_TraceTryingNewPathReason reason)
{
if (_vl1) _tryingNewPath(tPtr,codeLocation,trying,physicalAddress,triggerAddress,triggeringPacketId,triggeringPacketVerb,triggeringPeer,reason);
if ((_f & ZT_TRACE_F_VL1) != 0)
_tryingNewPath(tPtr,codeLocation,trying,physicalAddress,triggerAddress,triggeringPacketId,triggeringPacketVerb,triggeringPeer,reason);
}
ZT_INLINE void learnedNewPath(
@ -133,7 +123,8 @@ public:
const InetAddress &physicalAddress,
const InetAddress &replaced)
{
if (_vl1) _learnedNewPath(tPtr,codeLocation,packetId,peerIdentity,physicalAddress,replaced);
if ((_f & ZT_TRACE_F_VL1) != 0)
_learnedNewPath(tPtr,codeLocation,packetId,peerIdentity,physicalAddress,replaced);
}
ZT_INLINE void incomingPacketDropped(
@ -147,7 +138,8 @@ public:
uint8_t verb,
const ZT_TracePacketDropReason reason)
{
if (_vl1) _incomingPacketDropped(tPtr,codeLocation,packetId,networkId,peerIdentity,physicalAddress,hops,verb,reason);
if ((_f & ZT_TRACE_F_VL1) != 0)
_incomingPacketDropped(tPtr,codeLocation,packetId,networkId,peerIdentity,physicalAddress,hops,verb,reason);
}
ZT_INLINE void outgoingNetworkFrameDropped(
@ -161,7 +153,8 @@ public:
const uint8_t *frameData,
ZT_TraceFrameDropReason reason)
{
if (_vl2) _outgoingNetworkFrameDropped(tPtr,codeLocation,networkId,sourceMac,destMac,etherType,frameLength,frameData,reason);
if ((_f & ZT_TRACE_F_VL2) != 0)
_outgoingNetworkFrameDropped(tPtr,codeLocation,networkId,sourceMac,destMac,etherType,frameLength,frameData,reason);
}
ZT_INLINE void incomingNetworkFrameDropped(
@ -179,7 +172,8 @@ public:
bool credentialRequestSent,
ZT_TraceFrameDropReason reason)
{
if (_vl2) _incomingNetworkFrameDropped(tPtr,codeLocation,networkId,sourceMac,destMac,peerIdentity,physicalAddress,hops,frameLength,frameData,verb,credentialRequestSent,reason);
if ((_f & ZT_TRACE_F_VL2) != 0)
_incomingNetworkFrameDropped(tPtr,codeLocation,networkId,sourceMac,destMac,peerIdentity,physicalAddress,hops,frameLength,frameData,verb,credentialRequestSent,reason);
}
ZT_INLINE void networkConfigRequestSent(
@ -187,7 +181,8 @@ public:
const uint32_t codeLocation,
uint64_t networkId)
{
if (_vl2) _networkConfigRequestSent(tPtr,codeLocation,networkId);
if ((_f & ZT_TRACE_F_VL2) != 0)
_networkConfigRequestSent(tPtr,codeLocation,networkId);
}
ZT_INLINE void networkFilter(
@ -210,7 +205,7 @@ public:
bool inbound,
int accept)
{
if (_vl2Filter) {
if ((_f & ZT_TRACE_F_VL2_FILTER) != 0) {
_networkFilter(
tPtr,
codeLocation,
@ -244,7 +239,8 @@ public:
uint8_t credentialType,
ZT_TraceCredentialRejectionReason reason)
{
if (_vl2) _credentialRejected(tPtr,codeLocation,networkId,address,identity,credentialId,credentialTimestamp,credentialType,reason);
if ((_f & ZT_TRACE_F_VL2) != 0)
_credentialRejected(tPtr,codeLocation,networkId,address,identity,credentialId,credentialTimestamp,credentialType,reason);
}
private:
@ -342,18 +338,7 @@ private:
ZT_TraceCredentialRejectionReason reason);
const RuntimeEnvironment *const RR;
volatile bool _vl1,_vl2,_vl2Filter,_vl2Multicast;
struct _MonitoringPeer
{
int64_t _timeSet;
unsigned int _traceTypes;
SharedPtr<Peer> peer;
Mutex lock;
};
std::vector<_MonitoringPeer> _monitoringPeers;
RWMutex _monitoringPeers_l;
volatile unsigned int _f; // faster than atomic, but may not "instantly" change... will after next memory fence
};
} // namespace ZeroTier

View file

@ -44,10 +44,6 @@ VL1::VL1(const RuntimeEnvironment *renv) :
{
}
VL1::~VL1()
{
}
void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAddress &fromAddr,SharedPtr<Buf> &data,const unsigned int len)
{
// Get canonical Path object for this originating address and local socket pair.
@ -63,7 +59,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
if (len == ZT_PROTO_PROBE_LENGTH) {
const SharedPtr<Peer> peer(RR->topology->peerByProbe(data->lI64(0)));
if ((peer)&&(peer->rateGateInboundProbe(now)))
path->sent(now,peer->sendNOP(tPtr,path->localSocket(),path->address(),now));
path->sent(now,peer->nop(tPtr,path->localSocket(),path->address(),now));
return;
}
@ -88,11 +84,11 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
destination.setTo(fragmentHeader.destination);
if (destination != RR->identity.address()) {
_relay(tPtr,path,destination,data,len);
m_relay(tPtr, path, destination, data, len);
return;
}
switch (_inputPacketAssembler.assemble(
switch (m_inputPacketAssembler.assemble(
fragmentHeader.packetId,
pktv,
data,
@ -122,12 +118,12 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
destination.setTo(packetHeader.destination);
if (destination != RR->identity.address()) {
_relay(tPtr,path,destination,data,len);
m_relay(tPtr, path, destination, data, len);
return;
}
if ((packetHeader.flags & ZT_PROTO_FLAG_FRAGMENTED) != 0) {
switch (_inputPacketAssembler.assemble(
switch (m_inputPacketAssembler.assemble(
packetHeader.packetId,
pktv,
data,
@ -199,11 +195,11 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
return;
}
{
Mutex::Lock wl(_whoisQueue_l);
_WhoisQueueItem &wq = _whoisQueue[source];
Mutex::Lock wl(m_whoisQueue_l);
p_WhoisQueueItem &wq = m_whoisQueue[source];
wq.inboundPackets.push_back(pkt);
}
_sendPendingWhois(tPtr,now);
m_sendPendingWhois(tPtr, now);
return;
}
@ -374,24 +370,24 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
Protocol::Verb inReVerb = Protocol::VERB_NOP; // set via result parameter to _ERROR and _OK
switch(verb) {
case Protocol::VERB_NOP: break;
case Protocol::VERB_HELLO: ok = _HELLO(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_ERROR: ok = _ERROR(tPtr,path,peer,*pkt.b,(int)packetSize,inReVerb); break;
case Protocol::VERB_OK: ok = _OK(tPtr,path,peer,*pkt.b,(int)packetSize,inReVerb); break;
case Protocol::VERB_WHOIS: ok = _WHOIS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_RENDEZVOUS: ok = _RENDEZVOUS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_FRAME: ok = RR->vl2->_FRAME(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_EXT_FRAME: ok = RR->vl2->_EXT_FRAME(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_ECHO: ok = _ECHO(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_MULTICAST_LIKE: ok = RR->vl2->_MULTICAST_LIKE(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_NETWORK_CREDENTIALS: ok = RR->vl2->_NETWORK_CREDENTIALS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_NETWORK_CONFIG_REQUEST: ok = RR->vl2->_NETWORK_CONFIG_REQUEST(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_NETWORK_CONFIG: ok = RR->vl2->_NETWORK_CONFIG(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_MULTICAST_GATHER: ok = RR->vl2->_MULTICAST_GATHER(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_MULTICAST_FRAME_deprecated: ok = RR->vl2->_MULTICAST_FRAME_deprecated(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_PUSH_DIRECT_PATHS: ok = _PUSH_DIRECT_PATHS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_USER_MESSAGE: ok = _USER_MESSAGE(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_MULTICAST: ok = RR->vl2->_MULTICAST(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_ENCAP: ok = _ENCAP(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_HELLO: ok = m_HELLO(tPtr, path, peer, *pkt.b, (int) packetSize, authenticated); break;
case Protocol::VERB_ERROR: ok = m_ERROR(tPtr, path, peer, *pkt.b, (int) packetSize, inReVerb); break;
case Protocol::VERB_OK: ok = m_OK(tPtr, path, peer, *pkt.b, (int) packetSize, inReVerb); break;
case Protocol::VERB_WHOIS: ok = m_WHOIS(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_RENDEZVOUS: ok = m_RENDEZVOUS(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_FRAME: ok = RR->vl2->m_FRAME(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_EXT_FRAME: ok = RR->vl2->m_EXT_FRAME(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_ECHO: ok = m_ECHO(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_MULTICAST_LIKE: ok = RR->vl2->m_MULTICAST_LIKE(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_NETWORK_CREDENTIALS: ok = RR->vl2->m_NETWORK_CREDENTIALS(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_NETWORK_CONFIG_REQUEST: ok = RR->vl2->m_NETWORK_CONFIG_REQUEST(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_NETWORK_CONFIG: ok = RR->vl2->m_NETWORK_CONFIG(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_MULTICAST_GATHER: ok = RR->vl2->m_MULTICAST_GATHER(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_MULTICAST_FRAME_deprecated: ok = RR->vl2->m_MULTICAST_FRAME_deprecated(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_PUSH_DIRECT_PATHS: ok = m_PUSH_DIRECT_PATHS(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_USER_MESSAGE: ok = m_USER_MESSAGE(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_MULTICAST: ok = RR->vl2->m_MULTICAST(tPtr, path, peer, *pkt.b, (int) packetSize); break;
case Protocol::VERB_ENCAP: ok = m_ENCAP(tPtr, path, peer, *pkt.b, (int) packetSize); break;
default:
RR->t->incomingPacketDropped(tPtr,0xeeeeeff0,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_UNRECOGNIZED_VERB);
break;
@ -403,7 +399,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
}
}
void VL1::_relay(void *tPtr,const SharedPtr<Path> &path,const Address &destination,SharedPtr<Buf> &data,unsigned int len)
void VL1::m_relay(void *tPtr, const SharedPtr<Path> &path, const Address &destination, SharedPtr<Buf> &data, unsigned int len)
{
const uint8_t newHopCount = (data->lI8(ZT_PROTO_PACKET_FLAGS_INDEX) & 7U) + 1;
if (newHopCount >= ZT_RELAY_MAX_HOPS)
@ -421,7 +417,7 @@ void VL1::_relay(void *tPtr,const SharedPtr<Path> &path,const Address &destinati
toPath->send(RR,tPtr,data->unsafeData,len,now);
}
void VL1::_sendPendingWhois(void *const tPtr,const int64_t now)
void VL1::m_sendPendingWhois(void *tPtr, int64_t now)
{
SharedPtr<Peer> root(RR->topology->root());
if (!root)
@ -432,8 +428,8 @@ void VL1::_sendPendingWhois(void *const tPtr,const int64_t now)
std::vector<Address> toSend;
{
Mutex::Lock wl(_whoisQueue_l);
for(Map<Address,_WhoisQueueItem>::iterator wi(_whoisQueue.begin());wi!=_whoisQueue.end();++wi) {
Mutex::Lock wl(m_whoisQueue_l);
for(Map<Address,p_WhoisQueueItem>::iterator wi(m_whoisQueue.begin());wi != m_whoisQueue.end();++wi) {
if ((now - wi->second.lastRetry) >= ZT_WHOIS_RETRY_DELAY) {
wi->second.lastRetry = now;
++wi->second.retries;
@ -468,7 +464,7 @@ void VL1::_sendPendingWhois(void *const tPtr,const int64_t now)
}
}
bool VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,const bool authenticated)
bool VL1::m_HELLO(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize, bool authenticated)
{
if (packetSize < (int)sizeof(Protocol::HELLO)) {
RR->t->incomingPacketDropped(tPtr,0x2bdb0001,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
@ -663,7 +659,7 @@ bool VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
return true;
}
bool VL1::_ERROR(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,Protocol::Verb &inReVerb)
bool VL1::m_ERROR(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb)
{
if (packetSize < (int)sizeof(Protocol::ERROR::Header)) {
RR->t->incomingPacketDropped(tPtr,0x3beb1947,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_ERROR,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
@ -708,7 +704,7 @@ bool VL1::_ERROR(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &p
return true;
}
bool VL1::_OK(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,Protocol::Verb &inReVerb)
bool VL1::m_OK(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb)
{
if (packetSize < (int)sizeof(Protocol::OK::Header)) {
RR->t->incomingPacketDropped(tPtr,0x4c1f1ff7,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_OK,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
@ -741,7 +737,7 @@ bool VL1::_OK(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer
return true;
}
bool VL1::_WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL1::m_WHOIS(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
if (packetSize < (int)sizeof(Protocol::OK::Header)) {
RR->t->incomingPacketDropped(tPtr,0x4c1f1ff7,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_OK,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
@ -793,7 +789,7 @@ bool VL1::_WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &p
return true;
}
bool VL1::_RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL1::m_RENDEZVOUS(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
if (RR->topology->isRoot(peer->identity())) {
if (packetSize < (int)sizeof(Protocol::RENDEZVOUS)) {
@ -839,7 +835,7 @@ bool VL1::_RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Pee
return true;
}
bool VL1::_ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL1::m_ECHO(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
const uint64_t packetId = Protocol::packetId(pkt,packetSize);
const uint64_t now = RR->node->now();
@ -875,7 +871,7 @@ bool VL1::_ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &pe
return true;
}
bool VL1::_PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL1::m_PUSH_DIRECT_PATHS(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
if (packetSize < (int)sizeof(Protocol::PUSH_DIRECT_PATHS)) {
RR->t->incomingPacketDropped(tPtr,0x1bb1bbb1,Protocol::packetId(pkt,packetSize),0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
@ -964,13 +960,13 @@ bool VL1::_PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const Shared
return true;
}
bool VL1::_USER_MESSAGE(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL1::m_USER_MESSAGE(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
// TODO
return true;
}
bool VL1::_ENCAP(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL1::m_ENCAP(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
// TODO: not implemented yet
return true;

View file

@ -40,7 +40,6 @@ class VL1
{
public:
explicit VL1(const RuntimeEnvironment *renv);
~VL1();
/**
* Called when a packet is received from the real network
@ -63,34 +62,34 @@ private:
const RuntimeEnvironment *RR;
// Code to handle relaying of packets to other nodes.
void _relay(void *tPtr,const SharedPtr<Path> &path,const Address &destination,SharedPtr<Buf> &data,unsigned int len);
void m_relay(void *tPtr, const SharedPtr<Path> &path, const Address &destination, SharedPtr<Buf> &data, unsigned int len);
// Send any pending WHOIS requests.
void _sendPendingWhois(void *tPtr,int64_t now);
void m_sendPendingWhois(void *tPtr, int64_t now);
// Handlers for VL1 verbs -- for clarity's sake VL2 verbs are in the VL2 class.
bool _HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
bool _ERROR(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,Protocol::Verb &inReVerb);
bool _OK(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,Protocol::Verb &inReVerb);
bool _WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _USER_MESSAGE(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _ENCAP(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool m_HELLO(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize, bool authenticated);
bool m_ERROR(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb);
bool m_OK(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb);
bool m_WHOIS(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_RENDEZVOUS(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_ECHO(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_PUSH_DIRECT_PATHS(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_USER_MESSAGE(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_ENCAP(void *tPtr, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
struct _WhoisQueueItem
struct p_WhoisQueueItem
{
ZT_INLINE _WhoisQueueItem() : lastRetry(0),inboundPackets(),retries(0) {}
ZT_INLINE p_WhoisQueueItem() : lastRetry(0), inboundPackets(), retries(0) {}
int64_t lastRetry;
FCV<Buf::Slice,32> inboundPackets; // capacity can be changed but this should be plenty
unsigned int retries;
};
Defragmenter<ZT_MAX_PACKET_FRAGMENTS> _inputPacketAssembler;
Defragmenter<ZT_MAX_PACKET_FRAGMENTS> m_inputPacketAssembler;
Map<Address,_WhoisQueueItem> _whoisQueue;
Mutex _whoisQueue_l;
Map<Address,p_WhoisQueueItem> m_whoisQueue;
Mutex m_whoisQueue_l;
};
} // namespace ZeroTier

View file

@ -27,47 +27,43 @@ VL2::VL2(const RuntimeEnvironment *renv)
{
}
VL2::~VL2()
{
}
void VL2::onLocalEthernet(void *const tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,const unsigned int etherType,unsigned int vlanId,SharedPtr<Buf> &data,unsigned int len)
{
}
bool VL2::_FRAME(void *const tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL2::m_FRAME(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
}
bool VL2::_EXT_FRAME(void *const tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL2::m_EXT_FRAME(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
}
bool VL2::_MULTICAST_LIKE(void *const tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL2::m_MULTICAST_LIKE(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
}
bool VL2::_NETWORK_CREDENTIALS(void *const tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL2::m_NETWORK_CREDENTIALS(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
}
bool VL2::_NETWORK_CONFIG_REQUEST(void *const tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL2::m_NETWORK_CONFIG_REQUEST(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
}
bool VL2::_NETWORK_CONFIG(void *const tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL2::m_NETWORK_CONFIG(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
}
bool VL2::_MULTICAST_GATHER(void *const tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL2::m_MULTICAST_GATHER(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
}
bool VL2::_MULTICAST_FRAME_deprecated(void *const tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL2::m_MULTICAST_FRAME_deprecated(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
}
bool VL2::_MULTICAST(void *const tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
bool VL2::m_MULTICAST(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
}

View file

@ -36,7 +36,6 @@ class VL2
public:
explicit VL2(const RuntimeEnvironment *renv);
~VL2();
/**
* Called when a packet comes from a local Ethernet tap
@ -53,15 +52,15 @@ public:
void onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,SharedPtr<Buf> &data,unsigned int len);
protected:
bool _FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _EXT_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _MULTICAST_LIKE(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _NETWORK_CREDENTIALS(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _NETWORK_CONFIG_REQUEST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _NETWORK_CONFIG(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _MULTICAST_GATHER(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _MULTICAST_FRAME_deprecated(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _MULTICAST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool m_FRAME(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_EXT_FRAME(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_LIKE(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CREDENTIALS(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CONFIG_REQUEST(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CONFIG(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_GATHER(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_FRAME_deprecated(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST(void *tPtr, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
private:
};