Yet more major and very nit-picky refactoring for performance, etc. Also use std::atomic<> now with a TODO to implement a shim if we ever do need to build this on a pre-c++11 compiler.

This commit is contained in:
Adam Ierymenko 2020-02-12 14:04:25 -08:00
parent 60de5ed3dd
commit f21ecb3762
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
64 changed files with 1711 additions and 1135 deletions

View file

@ -1352,7 +1352,7 @@ typedef struct
const ZT_Identity *identity;
/**
* Hash of identity public key(s)
* SHA384 hash of identity public key(s)
*/
uint8_t identityHash[48];
@ -1390,15 +1390,25 @@ typedef struct
*/
struct sockaddr_storage bootstrap;
/**
* Number of networks in which this peer is authenticated
*/
unsigned int networkCount;
/**
* Network IDs for networks (array size: networkCount)
*/
uint64_t *networks;
/**
* Number of paths (size of paths[])
*/
unsigned int pathCount;
/**
* Known network paths to peer
* Known network paths to peer (array size: pathCount)
*/
ZT_PeerPhysicalPath paths[ZT_MAX_PEER_NETWORK_PATHS];
ZT_PeerPhysicalPath *paths;
} ZT_Peer;
/**

View file

@ -14,10 +14,6 @@
#include "Constants.hpp"
#include "AES.hpp"
#ifdef __WINDOWS__
#include <intrin.h>
#endif
namespace ZeroTier {
#ifdef ZT_NO_UNALIGNED_ACCESS
@ -50,7 +46,7 @@ const uint32_t AES::Te2[256] = { 0x63a5c663,0x7c84f87c,0x7799ee77,0x7b8df67b,0xf
const uint32_t AES::Te3[256] = { 0x6363a5c6,0x7c7c84f8,0x777799ee,0x7b7b8df6,0xf2f20dff,0x6b6bbdd6,0x6f6fb1de,0xc5c55491,0x30305060,0x01010302,0x6767a9ce,0x2b2b7d56,0xfefe19e7,0xd7d762b5,0xababe64d,0x76769aec,0xcaca458f,0x82829d1f,0xc9c94089,0x7d7d87fa,0xfafa15ef,0x5959ebb2,0x4747c98e,0xf0f00bfb,0xadadec41,0xd4d467b3,0xa2a2fd5f,0xafafea45,0x9c9cbf23,0xa4a4f753,0x727296e4,0xc0c05b9b,0xb7b7c275,0xfdfd1ce1,0x9393ae3d,0x26266a4c,0x36365a6c,0x3f3f417e,0xf7f702f5,0xcccc4f83,0x34345c68,0xa5a5f451,0xe5e534d1,0xf1f108f9,0x717193e2,0xd8d873ab,0x31315362,0x15153f2a,0x04040c08,0xc7c75295,0x23236546,0xc3c35e9d,0x18182830,0x9696a137,0x05050f0a,0x9a9ab52f,0x0707090e,0x12123624,0x80809b1b,0xe2e23ddf,0xebeb26cd,0x2727694e,0xb2b2cd7f,0x75759fea,0x09091b12,0x83839e1d,0x2c2c7458,0x1a1a2e34,0x1b1b2d36,0x6e6eb2dc,0x5a5aeeb4,0xa0a0fb5b,0x5252f6a4,0x3b3b4d76,0xd6d661b7,0xb3b3ce7d,0x29297b52,0xe3e33edd,0x2f2f715e,0x84849713,0x5353f5a6,0xd1d168b9,0x00000000,0xeded2cc1,0x20206040,0xfcfc1fe3,0xb1b1c879,0x5b5bedb6,0x6a6abed4,0xcbcb468d,0xbebed967,0x39394b72,0x4a4ade94,0x4c4cd498,0x5858e8b0,0xcfcf4a85,0xd0d06bbb,0xefef2ac5,0xaaaae54f,0xfbfb16ed,0x4343c586,0x4d4dd79a,0x33335566,0x85859411,0x4545cf8a,0xf9f910e9,0x02020604,0x7f7f81fe,0x5050f0a0,0x3c3c4478,0x9f9fba25,0xa8a8e34b,0x5151f3a2,0xa3a3fe5d,0x4040c080,0x8f8f8a05,0x9292ad3f,0x9d9dbc21,0x38384870,0xf5f504f1,0xbcbcdf63,0xb6b6c177,0xdada75af,0x21216342,0x10103020,0xffff1ae5,0xf3f30efd,0xd2d26dbf,0xcdcd4c81,0x0c0c1418,0x13133526,0xecec2fc3,0x5f5fe1be,0x9797a235,0x4444cc88,0x1717392e,0xc4c45793,0xa7a7f255,0x7e7e82fc,0x3d3d477a,0x6464acc8,0x5d5de7ba,0x19192b32,0x737395e6,0x6060a0c0,0x81819819,0x4f4fd19e,0xdcdc7fa3,0x22226644,0x2a2a7e54,0x9090ab3b,0x8888830b,0x4646ca8c,0xeeee29c7,0xb8b8d36b,0x14143c28,0xdede79a7,0x5e5ee2bc,0x0b0b1d16,0xdbdb76ad,0xe0e03bdb,0x32325664,0x3a3a4e74,0x0a0a1e14,0x4949db92,0x06060a0c,0x24246c48,0x5c5ce4b8,0xc2c25d9f,0xd3d36ebd,0xacacef43,0x6262a6c4,0x9191a839,0x9595a431,0xe4e437d3,0x79798bf2,0xe7e732d5,0xc8c8438b,0x3737596e,0x6d6db7da,0x8d8d8c01,0xd5d564b1,0x4e4ed29c,0xa9a9e049,0x6c6cb4d8,0x5656faac,0xf4f407f3,0xeaea25cf,0x6565afca,0x7a7a8ef4,0xaeaee947,0x08081810,0xbabad56f,0x787888f0,0x25256f4a,0x2e2e725c,0x1c1c2438,0xa6a6f157,0xb4b4c773,0xc6c65197,0xe8e823cb,0xdddd7ca1,0x74749ce8,0x1f1f213e,0x4b4bdd96,0xbdbddc61,0x8b8b860d,0x8a8a850f,0x707090e0,0x3e3e427c,0xb5b5c471,0x6666aacc,0x4848d890,0x03030506,0xf6f601f7,0x0e0e121c,0x6161a3c2,0x35355f6a,0x5757f9ae,0xb9b9d069,0x86869117,0xc1c15899,0x1d1d273a,0x9e9eb927,0xe1e138d9,0xf8f813eb,0x9898b32b,0x11113322,0x6969bbd2,0xd9d970a9,0x8e8e8907,0x9494a733,0x9b9bb62d,0x1e1e223c,0x87879215,0xe9e920c9,0xcece4987,0x5555ffaa,0x28287850,0xdfdf7aa5,0x8c8c8f03,0xa1a1f859,0x89898009,0x0d0d171a,0xbfbfda65,0xe6e631d7,0x4242c684,0x6868b8d0,0x4141c382,0x9999b029,0x2d2d775a,0x0f0f111e,0xb0b0cb7b,0x5454fca8,0xbbbbd66d,0x16163a2c };
const uint32_t AES::rcon[10] = { 0x01000000,0x02000000,0x04000000,0x08000000,0x10000000,0x20000000,0x40000000,0x80000000,0x1B000000,0x36000000 };
void AES::_initSW(const uint8_t key[32])
void AES::_initSW(const uint8_t key[32]) noexcept
{
uint32_t *rk = _k.sw.ek;
@ -84,7 +80,7 @@ void AES::_initSW(const uint8_t key[32])
_k.sw.h[1] = Utils::ntoh(_k.sw.h[1]);
}
void AES::_encryptSW(const uint8_t in[16],uint8_t out[16]) const
void AES::_encryptSW(const uint8_t in[16],uint8_t out[16]) const noexcept
{
const uint32_t *const rk = _k.sw.ek;
uint32_t s0 = readuint32_t(in) ^ rk[0];
@ -157,7 +153,7 @@ typedef unsigned __int128 uint128_t;
typedef unsigned uint128_t __attribute__((mode(TI)));
#endif
static inline void s_bmul64(const uint64_t x,const uint64_t y,uint64_t &r_high,uint64_t &r_low)
static ZT_ALWAYS_INLINE void s_bmul64(const uint64_t x,const uint64_t y,uint64_t &r_high,uint64_t &r_low) noexcept
{
static uint128_t m1 = (uint128_t)0x2108421084210842ULL << 64U | 0x1084210842108421ULL;
static uint128_t m2 = (uint128_t)0x4210842108421084ULL << 64U | 0x2108421084210842ULL;
@ -188,7 +184,7 @@ static inline void s_bmul64(const uint64_t x,const uint64_t y,uint64_t &r_high,u
r_low = (uint64_t)r;
}
static inline void s_gfmul(const uint64_t h_high,const uint64_t h_low,uint64_t &y0, uint64_t &y1)
static ZT_ALWAYS_INLINE void s_gfmul(const uint64_t h_high,const uint64_t h_low,uint64_t &y0, uint64_t &y1) noexcept
{
uint64_t z2_low,z2_high,z0_low,z0_high,z1a_low,z1a_high;
uint64_t y_high = Utils::ntoh(y0);
@ -210,7 +206,7 @@ static inline void s_gfmul(const uint64_t h_high,const uint64_t h_low,uint64_t &
#else
static inline void s_bmul32(uint32_t x,uint32_t y,uint32_t &r_high,uint32_t &r_low)
static ZT_ALWAYS_INLINE void s_bmul32(uint32_t x,uint32_t y,uint32_t &r_high,uint32_t &r_low) noexcept
{
const uint32_t m1 = (uint32_t)0x11111111;
const uint32_t m2 = (uint32_t)0x22222222;
@ -237,7 +233,7 @@ static inline void s_bmul32(uint32_t x,uint32_t y,uint32_t &r_high,uint32_t &r_l
r_low = (uint32_t)z;
}
static inline void s_gfmul(const uint64_t h_high,const uint64_t h_low,uint64_t &y0,uint64_t &y1)
static ZT_ALWAYS_INLINE void s_gfmul(const uint64_t h_high,const uint64_t h_low,uint64_t &y0,uint64_t &y1) noexcept
{
uint32_t h_high_h = (uint32_t)(h_high >> 32);
uint32_t h_high_l = (uint32_t)h_high;
@ -298,7 +294,7 @@ static inline void s_gfmul(const uint64_t h_high,const uint64_t h_low,uint64_t &
#endif
void AES::_gmacSW(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const
void AES::_gmacSW(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const noexcept
{
const uint64_t h0 = _k.sw.h[0];
const uint64_t h1 = _k.sw.h[1];
@ -347,7 +343,7 @@ void AES::_gmacSW(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_
#ifdef ZT_AES_AESNI
static ZT_ALWAYS_INLINE __m128i _mult_block_aesni(__m128i shuf,__m128i h,__m128i y)
static ZT_ALWAYS_INLINE __m128i _mult_block_aesni(__m128i shuf,__m128i h,__m128i y) noexcept
{
y = _mm_shuffle_epi8(y,shuf);
__m128i t1 = _mm_clmulepi64_si128(h,y,0x00);
@ -387,12 +383,13 @@ static ZT_ALWAYS_INLINE __m128i _mult_block_aesni(__m128i shuf,__m128i h,__m128i
t4 = _mm_xor_si128(t4,t5);
return _mm_shuffle_epi8(t4,shuf);
}
static ZT_ALWAYS_INLINE __m128i _ghash_aesni(__m128i shuf,__m128i h,__m128i y,__m128i x)
static ZT_ALWAYS_INLINE __m128i _ghash_aesni(__m128i shuf,__m128i h,__m128i y,__m128i x) noexcept
{
return _mult_block_aesni(shuf,h,_mm_xor_si128(y,x));
}
static ZT_ALWAYS_INLINE __m128i _init256_1_aesni(__m128i a,__m128i b)
static ZT_ALWAYS_INLINE __m128i _init256_1_aesni(__m128i a,__m128i b) noexcept
{
__m128i x,y;
b = _mm_shuffle_epi32(b,0xff);
@ -405,7 +402,8 @@ static ZT_ALWAYS_INLINE __m128i _init256_1_aesni(__m128i a,__m128i b)
x = _mm_xor_si128(x,b);
return x;
}
static ZT_ALWAYS_INLINE __m128i _init256_2_aesni(__m128i a,__m128i b)
static ZT_ALWAYS_INLINE __m128i _init256_2_aesni(__m128i a,__m128i b) noexcept
{
__m128i x,y,z;
y = _mm_aeskeygenassist_si128(a,0x00);
@ -420,9 +418,11 @@ static ZT_ALWAYS_INLINE __m128i _init256_2_aesni(__m128i a,__m128i b)
return x;
}
void AES::_init_aesni(const uint8_t key[32])
void AES::_init_aesni(const uint8_t key[32]) noexcept
{
__m128i t1,t2;
// AES-256 encryption key expansion
_k.ni.k[0] = t1 = _mm_loadu_si128((const __m128i *)key);
_k.ni.k[1] = t2 = _mm_loadu_si128((const __m128i *)(key+16));
_k.ni.k[2] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x01));
@ -439,6 +439,7 @@ void AES::_init_aesni(const uint8_t key[32])
_k.ni.k[13] = t2 = _init256_2_aesni(t1,t2);
_k.ni.k[14] = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x40));
// Initialize GMAC/GHASH constants
__m128i h = _mm_xor_si128(_mm_setzero_si128(),_k.ni.k[0]);
h = _mm_aesenc_si128(h,_k.ni.k[1]);
h = _mm_aesenc_si128(h,_k.ni.k[2]);
@ -454,7 +455,6 @@ void AES::_init_aesni(const uint8_t key[32])
h = _mm_aesenc_si128(h,_k.ni.k[12]);
h = _mm_aesenc_si128(h,_k.ni.k[13]);
h = _mm_aesenclast_si128(h,_k.ni.k[14]);
const __m128i shuf = _mm_set_epi8(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15);
__m128i hswap = _mm_shuffle_epi8(h,shuf);
__m128i hh = _mult_block_aesni(shuf,hswap,h);
@ -466,9 +466,9 @@ void AES::_init_aesni(const uint8_t key[32])
_k.ni.hhhh = _mm_shuffle_epi8(hhhh,shuf);
}
void AES::_gmac_aesni(const uint8_t iv[12],const uint8_t *in,const unsigned int len,uint8_t out[16]) const
void AES::_gmac_aesni(const uint8_t iv[12],const uint8_t *in,const unsigned int len,uint8_t out[16]) const noexcept
{
const __m128i *const ab = (const __m128i *)in;
const __m128i *const ab = reinterpret_cast<const __m128i *>(in);
const unsigned int blocks = len / 16;
const unsigned int pblocks = blocks - (blocks % 4);
const unsigned int rem = len % 16;
@ -477,11 +477,10 @@ void AES::_gmac_aesni(const uint8_t iv[12],const uint8_t *in,const unsigned int
__m128i y = _mm_setzero_si128();
unsigned int i = 0;
for (;i<pblocks;i+=4) {
__m128i d1 = _mm_shuffle_epi8(_mm_xor_si128(y,_mm_loadu_si128(ab + i + 0)),shuf);
__m128i d1 = _mm_shuffle_epi8(_mm_xor_si128(y,_mm_loadu_si128(ab + i)),shuf);
__m128i d2 = _mm_shuffle_epi8(_mm_loadu_si128(ab + i + 1),shuf);
__m128i d3 = _mm_shuffle_epi8(_mm_loadu_si128(ab + i + 2),shuf);
__m128i d4 = _mm_shuffle_epi8(_mm_loadu_si128(ab + i + 3),shuf);
_mm_prefetch(ab + i + 4,_MM_HINT_T0);
__m128i t0 = _mm_clmulepi64_si128(_k.ni.hhhh,d1,0x00);
__m128i t1 = _mm_clmulepi64_si128(_k.ni.hhh,d2,0x00);
__m128i t2 = _mm_clmulepi64_si128(_k.ni.hh,d3,0x00);

View file

@ -36,14 +36,14 @@ namespace ZeroTier {
class AES
{
public:
ZT_ALWAYS_INLINE AES() {}
explicit ZT_ALWAYS_INLINE AES(const uint8_t key[32]) { this->init(key); }
ZT_ALWAYS_INLINE AES() noexcept {}
explicit ZT_ALWAYS_INLINE AES(const uint8_t key[32]) noexcept { this->init(key); }
ZT_ALWAYS_INLINE ~AES() { Utils::burn(&_k,sizeof(_k)); }
/**
* Set (or re-set) this AES256 cipher's key
*/
ZT_ALWAYS_INLINE void init(const uint8_t key[32])
ZT_ALWAYS_INLINE void init(const uint8_t key[32]) noexcept
{
#ifdef ZT_AES_AESNI
if (likely(Utils::CPUID.aes)) {
@ -60,7 +60,7 @@ public:
* @param in Input block
* @param out Output block (can be same as input)
*/
ZT_ALWAYS_INLINE void encrypt(const uint8_t in[16],uint8_t out[16]) const
ZT_ALWAYS_INLINE void encrypt(const uint8_t in[16],uint8_t out[16]) const noexcept
{
#ifdef ZT_AES_AESNI
if (likely(Utils::CPUID.aes)) {
@ -71,11 +71,6 @@ public:
_encryptSW(in,out);
}
ZT_ALWAYS_INLINE void gcm(const uint8_t iv[12],const void *in,const unsigned int len,uint8_t out[16],uint8_t tag[16]) const
{
// TODO
}
private:
static const uint32_t Te0[256];
static const uint32_t Te1[256];
@ -83,11 +78,10 @@ private:
static const uint32_t Te3[256];
static const uint32_t rcon[10];
void _initSW(const uint8_t key[32]);
void _encryptSW(const uint8_t in[16],uint8_t out[16]) const;
void _gmacSW(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const;
void _initSW(const uint8_t key[32]) noexcept;
void _encryptSW(const uint8_t in[16],uint8_t out[16]) const noexcept;
void _gmacSW(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const noexcept;
/**************************************************************************/
union {
#ifdef ZT_AES_ARMNEON
// ARM NEON key and GMAC parameters
@ -110,10 +104,9 @@ private:
uint32_t ek[60];
} sw;
} _k;
/**************************************************************************/
#ifdef ZT_AES_ARMNEON /******************************************************/
static inline void _aes_256_expAssist_armneon(uint32x4_t prev1,uint32x4_t prev2,uint32_t rcon,uint32x4_t *e1,uint32x4_t *e2)
#ifdef ZT_AES_ARMNEON
static inline void _aes_256_expAssist_armneon(uint32x4_t prev1,uint32x4_t prev2,uint32_t rcon,uint32x4_t *e1,uint32x4_t *e2) noexcept
{
uint32_t round1[4], round2[4], prv1[4], prv2[4];
vst1q_u32(prv1, prev1);
@ -131,7 +124,8 @@ private:
//uint32x4_t expansion[2] = {vld1q_u3(round1), vld1q_u3(round2)};
//return expansion;
}
inline void _init_armneon(uint8x16_t encKey)
inline void _init_armneon(uint8x16_t encKey) noexcept
{
uint32x4_t *schedule = _k.neon.k;
uint32x4_t e1,e2;
@ -175,7 +169,7 @@ private:
*/
}
inline void _encrypt_armneon(uint8x16_t *data) const
inline void _encrypt_armneon(uint8x16_t *data) const noexcept
{
*data = veorq_u8(*data, _k.neon.k[0]);
*data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[1]));
@ -193,12 +187,12 @@ private:
*data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[13]));
*data = vaeseq_u8(*data, _k.neon.k[14]);
}
#endif /*********************************************************************/
#endif
#ifdef ZT_AES_AESNI /********************************************************/
void _init_aesni(const uint8_t key[32]);
#ifdef ZT_AES_AESNI
void _init_aesni(const uint8_t key[32]) noexcept;
ZT_ALWAYS_INLINE void _encrypt_aesni(const void *const in,void *const out) const
ZT_ALWAYS_INLINE void _encrypt_aesni(const void *const in,void *const out) const noexcept
{
__m128i tmp;
tmp = _mm_loadu_si128((const __m128i *)in);
@ -219,8 +213,8 @@ private:
_mm_storeu_si128((__m128i *)out,_mm_aesenclast_si128(tmp,_k.ni.k[14]));
}
void _gmac_aesni(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const;
#endif /* ZT_AES_AESNI ******************************************************/
void _gmac_aesni(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const noexcept;
#endif
};
} // namespace ZeroTier

View file

@ -14,20 +14,12 @@
#ifndef ZT_ADDRESS_HPP
#define ZT_ADDRESS_HPP
#include <cstdio>
#include <cstdlib>
#include <cstdint>
#include <cstring>
#include <cmath>
#include <string>
#include <vector>
#include <algorithm>
#include "Constants.hpp"
#include "Utils.hpp"
#include "TriviallyCopyable.hpp"
#define ZT_ADDRESS_STRING_SIZE_MAX 11
namespace ZeroTier {
/**
@ -36,17 +28,17 @@ namespace ZeroTier {
class Address : public TriviallyCopyable
{
public:
ZT_ALWAYS_INLINE Address() : _a(0) {}
explicit ZT_ALWAYS_INLINE Address(const uint8_t b[5]) : _a(((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4]) {}
explicit ZT_ALWAYS_INLINE Address(const uint64_t a) : _a(a & 0xffffffffffULL) {}
ZT_ALWAYS_INLINE Address() noexcept : _a(0) {}
explicit ZT_ALWAYS_INLINE Address(const uint8_t b[5]) noexcept : _a(((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4]) {}
explicit ZT_ALWAYS_INLINE Address(const uint64_t a) noexcept : _a(a & 0xffffffffffULL) {}
ZT_ALWAYS_INLINE Address &operator=(const uint64_t a) { _a = (a & 0xffffffffffULL); return *this; }
ZT_ALWAYS_INLINE Address &operator=(const uint64_t a) noexcept { _a = (a & 0xffffffffffULL); return *this; }
/**
* @param bits Raw address -- 5 bytes, big-endian byte order
* @param len Length of array
*/
ZT_ALWAYS_INLINE void setTo(const uint8_t b[5])
ZT_ALWAYS_INLINE void setTo(const uint8_t b[5]) noexcept
{
_a = ((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4];
}
@ -55,7 +47,7 @@ public:
* @param bits Buffer to hold 5-byte address in big-endian byte order
* @param len Length of array
*/
ZT_ALWAYS_INLINE void copyTo(uint8_t b[5]) const
ZT_ALWAYS_INLINE void copyTo(uint8_t b[5]) const noexcept
{
b[0] = (uint8_t)(_a >> 32U);
b[1] = (uint8_t)(_a >> 24U);
@ -67,17 +59,22 @@ public:
/**
* @return Integer containing address (0 to 2^40)
*/
ZT_ALWAYS_INLINE uint64_t toInt() const { return _a; }
ZT_ALWAYS_INLINE uint64_t toInt() const noexcept { return _a; }
/**
* Set address to zero/NIL
*/
ZT_ALWAYS_INLINE void zero() noexcept { _a = 0; }
/**
* @return Hash code for use with Hashtable
*/
ZT_ALWAYS_INLINE unsigned long hashCode() const { return (unsigned long)_a; }
ZT_ALWAYS_INLINE unsigned long hashCode() const noexcept { return (unsigned long)_a; }
/**
* @return Hexadecimal string
*/
ZT_ALWAYS_INLINE char *toString(char buf[11]) const { return Utils::hex10(_a,buf); }
ZT_ALWAYS_INLINE char *toString(char buf[ZT_ADDRESS_STRING_SIZE_MAX]) const noexcept { return Utils::hex10(_a,buf); }
/**
* Check if this address is reserved
@ -88,31 +85,29 @@ public:
*
* @return True if address is reserved and may not be used
*/
ZT_ALWAYS_INLINE bool isReserved() const { return ((!_a)||((_a >> 32U) == ZT_ADDRESS_RESERVED_PREFIX)); }
ZT_ALWAYS_INLINE bool isReserved() const noexcept { return ((!_a)||((_a >> 32U) == ZT_ADDRESS_RESERVED_PREFIX)); }
/**
* @param i Value from 0 to 4 (inclusive)
* @return Byte at said position (address interpreted in big-endian order)
*/
ZT_ALWAYS_INLINE uint8_t operator[](unsigned int i) const { return (uint8_t)(_a >> (32 - (i * 8))); }
ZT_ALWAYS_INLINE uint8_t operator[](unsigned int i) const noexcept { return (uint8_t)(_a >> (32 - (i * 8))); }
ZT_ALWAYS_INLINE operator bool() const { return (_a != 0); }
ZT_ALWAYS_INLINE operator bool() const noexcept { return (_a != 0); }
ZT_ALWAYS_INLINE void zero() { _a = 0; }
ZT_ALWAYS_INLINE bool operator==(const uint64_t &a) const noexcept { return (_a == (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator!=(const uint64_t &a) const noexcept { return (_a != (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator>(const uint64_t &a) const noexcept { return (_a > (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator<(const uint64_t &a) const noexcept { return (_a < (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator>=(const uint64_t &a) const noexcept { return (_a >= (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator<=(const uint64_t &a) const noexcept { return (_a <= (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator==(const uint64_t &a) const { return (_a == (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator!=(const uint64_t &a) const { return (_a != (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator>(const uint64_t &a) const { return (_a > (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator<(const uint64_t &a) const { return (_a < (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator>=(const uint64_t &a) const { return (_a >= (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator<=(const uint64_t &a) const { return (_a <= (a & 0xffffffffffULL)); }
ZT_ALWAYS_INLINE bool operator==(const Address &a) const { return (_a == a._a); }
ZT_ALWAYS_INLINE bool operator!=(const Address &a) const { return (_a != a._a); }
ZT_ALWAYS_INLINE bool operator>(const Address &a) const { return (_a > a._a); }
ZT_ALWAYS_INLINE bool operator<(const Address &a) const { return (_a < a._a); }
ZT_ALWAYS_INLINE bool operator>=(const Address &a) const { return (_a >= a._a); }
ZT_ALWAYS_INLINE bool operator<=(const Address &a) const { return (_a <= a._a); }
ZT_ALWAYS_INLINE bool operator==(const Address &a) const noexcept { return (_a == a._a); }
ZT_ALWAYS_INLINE bool operator!=(const Address &a) const noexcept { return (_a != a._a); }
ZT_ALWAYS_INLINE bool operator>(const Address &a) const noexcept { return (_a > a._a); }
ZT_ALWAYS_INLINE bool operator<(const Address &a) const noexcept { return (_a < a._a); }
ZT_ALWAYS_INLINE bool operator>=(const Address &a) const noexcept { return (_a >= a._a); }
ZT_ALWAYS_INLINE bool operator<=(const Address &a) const noexcept { return (_a <= a._a); }
#if 0
/**

View file

@ -1,80 +0,0 @@
/*
* Copyright (c)2013-2020 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2024-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_ATOMICCOUNTER_HPP
#define ZT_ATOMICCOUNTER_HPP
#include "Constants.hpp"
#include "TriviallyCopyable.hpp"
#ifndef __GNUC__
#include <intrin.h>
#endif
namespace ZeroTier {
/**
* Simple atomic integer used for reference and other counters
*
* @tparam T Type of underlying integer (default: int)
*/
template<typename T = int>
class AtomicCounter : public TriviallyCopyable
{
public:
ZT_ALWAYS_INLINE AtomicCounter() : _v(0) {}
explicit ZT_ALWAYS_INLINE AtomicCounter(T iv) : _v(iv) {}
ZT_ALWAYS_INLINE T load() const
{
#ifdef __GNUC__
return __sync_or_and_fetch(&_v,0);
#else
return _v.load();
#endif
}
ZT_ALWAYS_INLINE void zero() { _v = 0; }
ZT_ALWAYS_INLINE T operator++()
{
#ifdef __GNUC__
return __sync_add_and_fetch(&_v,1);
#else
return ++_v;
#endif
}
ZT_ALWAYS_INLINE T operator--()
{
#ifdef __GNUC__
return __sync_sub_and_fetch(&_v,1);
#else
return --_v;
#endif
}
private:
ZT_ALWAYS_INLINE AtomicCounter(const AtomicCounter &) {}
ZT_ALWAYS_INLINE const AtomicCounter &operator=(const AtomicCounter &) { return *this; }
#ifdef __GNUC__
T _v;
#else
typename std::atomic<T> _v;
#endif
};
} // namespace ZeroTier
#endif

View file

@ -15,92 +15,56 @@
namespace ZeroTier {
#ifdef __GNUC__
uintptr_t _Buf_pool = 0;
#else
std::atomic<uintptr_t> _Buf_pool(0);
#endif
static std::atomic<uintptr_t> s_pool(0);
void _Buf_release(void *ptr,std::size_t sz)
{
if (ptr) {
uintptr_t bb;
const uintptr_t locked = ~((uintptr_t)0);
for (;;) {
#ifdef __GNUC__
bb = __sync_fetch_and_or(&_Buf_pool,locked); // get value of s_pool and "lock" by filling with all 1's
#else
bb = s_pool.fetch_or(locked);
#endif
if (bb != locked)
break;
}
((Buf *)ptr)->__nextInPool = bb;
#ifdef __GNUC__
__sync_fetch_and_and(&_Buf_pool,(uintptr_t)ptr);
#else
s_pool.store((uintptr_t)ptr);
#endif
}
}
void *_Buf_get()
void *Buf::operator new(std::size_t sz) noexcept
{
uintptr_t bb;
const uintptr_t locked = ~((uintptr_t)0);
for (;;) {
#ifdef __GNUC__
bb = __sync_fetch_and_or(&_Buf_pool,locked); // get value of s_pool and "lock" by filling with all 1's
#else
bb = s_pool.fetch_or(locked);
#endif
if (bb != locked)
bb = s_pool.exchange(~((uintptr_t)0));
if (bb != ~((uintptr_t)0))
break;
}
Buf *b;
if (bb == 0) {
#ifdef __GNUC__
__sync_fetch_and_and(&_Buf_pool,bb);
#else
s_pool.store(bb);
#endif
b = (Buf *)malloc(sizeof(Buf));
if (!b)
throw std::bad_alloc();
} else {
if (bb) {
b = (Buf *)bb;
#ifdef __GNUC__
__sync_fetch_and_and(&_Buf_pool,b->__nextInPool);
#else
s_pool.store(b->__nextInPool);
#endif
} else {
s_pool.store(0);
b = (Buf *)malloc(sz);
if (!b)
return nullptr;
}
b->__refCount.zero();
b->__refCount.store(0);
return (void *)b;
}
void freeBufPool()
void Buf::operator delete(void *ptr) noexcept
{
if (ptr) {
uintptr_t bb;
const uintptr_t locked = ~((uintptr_t)0);
for (;;) {
#ifdef __GNUC__
bb = __sync_fetch_and_or(&_Buf_pool,locked); // get value of s_pool and "lock" by filling with all 1's
#else
bb = s_pool.fetch_or(locked);
#endif
if (bb != locked)
bb = s_pool.exchange(~((uintptr_t)0));
if (bb != ~((uintptr_t)0))
break;
}
#ifdef __GNUC__
__sync_fetch_and_and(&_Buf_pool,(uintptr_t)0);
#else
((Buf *)ptr)->__nextInPool = bb;
s_pool.store((uintptr_t)ptr);
}
}
void Buf::freePool() noexcept
{
uintptr_t bb;
for (;;) {
bb = s_pool.exchange(~((uintptr_t)0));
if (bb != ~((uintptr_t)0))
break;
}
s_pool.store((uintptr_t)0);
#endif
while (bb != 0) {
uintptr_t next = ((Buf *)bb)->__nextInPool;

View file

@ -15,7 +15,6 @@
#define ZT_BUF_HPP
#include "Constants.hpp"
#include "AtomicCounter.hpp"
#include "Utils.hpp"
#include "SharedPtr.hpp"
#include "Mutex.hpp"
@ -28,10 +27,7 @@
#include <stdexcept>
#include <utility>
#include <algorithm>
#ifndef __GNUC__
#include <atomic>
#endif
#include <new>
// Buffers are 16384 bytes in size because this is the smallest size that can hold any packet
// and is a power of two. It needs to be a power of two because masking is significantly faster
@ -41,23 +37,6 @@
namespace ZeroTier {
#ifdef __GNUC__
extern uintptr_t _Buf_pool;
#else
extern std::atomic<uintptr_t> _Buf_pool;
#endif
void _Buf_release(void *ptr,std::size_t sz);
void *_Buf_get();
/**
* Free all instances of Buf in shared pool.
*
* New buffers will be created and the pool repopulated if get() is called
* and outstanding buffers will still be returned to the pool. This just
* frees buffers currently held in reserve.
*/
void freeBufPool();
/**
* Buffer and methods for branch-free bounds-checked data assembly and parsing
*
@ -101,24 +80,32 @@ void freeBufPool();
class Buf
{
friend class SharedPtr< Buf >;
friend void _Buf_release(void *,std::size_t);
friend void *_Buf_get();
friend void freeBufPool();
public:
static void operator delete(void *ptr,std::size_t sz) { _Buf_release(ptr,sz); }
// New and delete operators that allocate Buf instances from a shared lock-free memory pool.
static void *operator new(std::size_t sz) noexcept;
static void operator delete(void *ptr) noexcept;
/**
* Free all instances of Buf in shared pool.
*
* New buffers will be created and the pool repopulated if get() is called
* and outstanding buffers will still be returned to the pool. This just
* frees buffers currently held in reserve.
*/
static void freePool() noexcept;
/**
* Slice is almost exactly like the built-in slice data structure in Go
*/
struct Slice : TriviallyCopyable
{
ZT_ALWAYS_INLINE Slice(const SharedPtr<Buf> &b_,const unsigned int s_,const unsigned int e_) : b(b_),s(s_),e(e_) {}
ZT_ALWAYS_INLINE Slice() : b(),s(0),e(0) {}
ZT_ALWAYS_INLINE Slice(const SharedPtr<Buf> &b_,const unsigned int s_,const unsigned int e_) noexcept : b(b_),s(s_),e(e_) {}
ZT_ALWAYS_INLINE Slice() noexcept : b(),s(0),e(0) {}
ZT_ALWAYS_INLINE operator bool() const { return (b); }
ZT_ALWAYS_INLINE unsigned int size() const { return (e - s); }
ZT_ALWAYS_INLINE void zero() { b.zero(); s = 0; e = 0; }
ZT_ALWAYS_INLINE operator bool() const noexcept { return (b); }
ZT_ALWAYS_INLINE unsigned int size() const noexcept { return (e - s); }
ZT_ALWAYS_INLINE void zero() noexcept { b.zero(); s = 0; e = 0; }
/**
* Buffer holding slice data
@ -151,7 +138,7 @@ public:
* @return Single slice containing fully assembled buffer (empty on error)
*/
template<unsigned int FCVC>
static ZT_ALWAYS_INLINE Buf::Slice assembleSliceVector(FCV<Buf::Slice,FCVC> &fcv)
static ZT_ALWAYS_INLINE Buf::Slice assembleSliceVector(FCV<Buf::Slice,FCVC> &fcv) noexcept
{
Buf::Slice r;
@ -179,23 +166,20 @@ public:
return r;
}
ZT_ALWAYS_INLINE Buf() {}
ZT_ALWAYS_INLINE Buf(const Buf &b2) { memcpy(b,b2.b,ZT_BUF_MEM_SIZE); }
/**
* Create a new uninitialized buffer with undefined contents (use clear() to zero if needed)
*/
ZT_ALWAYS_INLINE Buf() noexcept {}
ZT_ALWAYS_INLINE Buf &operator=(const Buf &b2)
ZT_ALWAYS_INLINE Buf(const Buf &b2) noexcept { memcpy(b,b2.b,ZT_BUF_MEM_SIZE); }
ZT_ALWAYS_INLINE Buf &operator=(const Buf &b2) noexcept
{
if (this != &b2)
memcpy(b,b2.b,ZT_BUF_MEM_SIZE);
return *this;
}
/**
* Get obtains a buffer from the pool or allocates a new buffer if the pool is empty
*
* @return Buffer instance
*/
static ZT_ALWAYS_INLINE SharedPtr< Buf > get() { return SharedPtr<Buf>((Buf *)_Buf_get()); }
/**
* Check for overflow beyond the size of the buffer
*
@ -205,7 +189,7 @@ public:
* @param ii Iterator to check
* @return True if iterator has read past the size of the buffer
*/
static ZT_ALWAYS_INLINE bool writeOverflow(const int &ii) { return ((ii - ZT_BUF_MEM_SIZE) > 0); }
static ZT_ALWAYS_INLINE bool writeOverflow(const int &ii) noexcept { return ((ii - ZT_BUF_MEM_SIZE) > 0); }
/**
* Check for overflow beyond the size of the data that should be in the buffer
@ -217,17 +201,17 @@ public:
* @param size Size of data that should be in buffer
* @return True if iterator has read past the size of the data
*/
static ZT_ALWAYS_INLINE bool readOverflow(const int &ii,const unsigned int size) { return ((ii - (int)size) > 0); }
static ZT_ALWAYS_INLINE bool readOverflow(const int &ii,const unsigned int size) noexcept { return ((ii - (int)size) > 0); }
/**
* Set all memory to zero
*/
ZT_ALWAYS_INLINE void clear() { memset(b,0,ZT_BUF_MEM_SIZE); }
ZT_ALWAYS_INLINE void clear() noexcept { memset(b,0,ZT_BUF_MEM_SIZE); }
/**
* Zero security critical data using Utils::burn() to ensure it's never optimized out.
*/
ZT_ALWAYS_INLINE void burn() { Utils::burn(b,ZT_BUF_MEM_SIZE); }
ZT_ALWAYS_INLINE void burn() noexcept { Utils::burn(b,ZT_BUF_MEM_SIZE); }
/**
* Read a byte
@ -235,7 +219,7 @@ public:
* @param ii Iterator
* @return Byte (undefined on overflow)
*/
ZT_ALWAYS_INLINE uint8_t rI8(int &ii) const
ZT_ALWAYS_INLINE uint8_t rI8(int &ii) const noexcept
{
const int s = ii++;
return b[(unsigned int)s & ZT_BUF_MEM_MASK];
@ -247,7 +231,7 @@ public:
* @param ii Integer
* @return Integer (undefined on overflow)
*/
ZT_ALWAYS_INLINE uint16_t rI16(int &ii) const
ZT_ALWAYS_INLINE uint16_t rI16(int &ii) const noexcept
{
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
ii += 2;
@ -266,7 +250,7 @@ public:
* @param ii Integer
* @return Integer (undefined on overflow)
*/
ZT_ALWAYS_INLINE uint32_t rI32(int &ii) const
ZT_ALWAYS_INLINE uint32_t rI32(int &ii) const noexcept
{
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
ii += 4;
@ -287,7 +271,7 @@ public:
* @param ii Integer
* @return Integer (undefined on overflow)
*/
ZT_ALWAYS_INLINE uint64_t rI64(int &ii) const
ZT_ALWAYS_INLINE uint64_t rI64(int &ii) const noexcept
{
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
ii += 8;
@ -322,7 +306,7 @@ public:
* @return Bytes read or a negative value on unmarshal error (passed from object) or overflow
*/
template<typename T>
ZT_ALWAYS_INLINE int rO(int &ii,T &obj) const
ZT_ALWAYS_INLINE int rO(int &ii,T &obj) const noexcept
{
if (ii < ZT_BUF_MEM_SIZE) {
int ms = obj.unmarshal(b + ii,ZT_BUF_MEM_SIZE - ii);
@ -344,7 +328,7 @@ public:
* @param bufSize Capacity of buffer in bytes
* @return Pointer to buf or NULL on overflow or error
*/
ZT_ALWAYS_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const
ZT_ALWAYS_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const noexcept
{
const char *const s = (const char *)(b + ii);
const int sii = ii;
@ -370,7 +354,7 @@ public:
* @param ii Iterator
* @return Pointer to null-terminated C-style string or NULL on overflow or error
*/
ZT_ALWAYS_INLINE const char *rSnc(int &ii) const
ZT_ALWAYS_INLINE const char *rSnc(int &ii) const noexcept
{
const char *const s = (const char *)(b + ii);
while (ii < ZT_BUF_MEM_SIZE) {
@ -391,7 +375,7 @@ public:
* @param len Length of buffer
* @return Pointer to data or NULL on overflow or error
*/
ZT_ALWAYS_INLINE uint8_t *rB(int &ii,void *bytes,unsigned int len) const
ZT_ALWAYS_INLINE uint8_t *rB(int &ii,void *bytes,unsigned int len) const noexcept
{
if ((ii += (int)len) <= ZT_BUF_MEM_SIZE) {
memcpy(bytes,b + ii,len);
@ -413,7 +397,7 @@ public:
* @param len Length of data field to obtain a pointer to
* @return Pointer to field or NULL on overflow
*/
ZT_ALWAYS_INLINE const uint8_t *rBnc(int &ii,unsigned int len) const
ZT_ALWAYS_INLINE const uint8_t *rBnc(int &ii,unsigned int len) const noexcept
{
const uint8_t *const b = b + ii;
return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
@ -425,7 +409,7 @@ public:
* @param ii Iterator
* @param n Byte
*/
ZT_ALWAYS_INLINE void wI(int &ii,uint8_t n)
ZT_ALWAYS_INLINE void wI(int &ii,uint8_t n) noexcept
{
const int s = ii++;
b[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
@ -437,7 +421,7 @@ public:
* @param ii Iterator
* @param n Integer
*/
ZT_ALWAYS_INLINE void wI(int &ii,uint16_t n)
ZT_ALWAYS_INLINE void wI(int &ii,uint16_t n) noexcept
{
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
ii += 2;
@ -455,7 +439,7 @@ public:
* @param ii Iterator
* @param n Integer
*/
ZT_ALWAYS_INLINE void wI(int &ii,uint32_t n)
ZT_ALWAYS_INLINE void wI(int &ii,uint32_t n) noexcept
{
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
ii += 4;
@ -475,7 +459,7 @@ public:
* @param ii Iterator
* @param n Integer
*/
ZT_ALWAYS_INLINE void wI(int &ii,uint64_t n)
ZT_ALWAYS_INLINE void wI(int &ii,uint64_t n) noexcept
{
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
ii += 8;
@ -501,7 +485,7 @@ public:
* @param t Object to write
*/
template<typename T>
ZT_ALWAYS_INLINE void wO(int &ii,T &t)
ZT_ALWAYS_INLINE void wO(int &ii,T &t) noexcept
{
const int s = ii;
if ((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE) {
@ -519,7 +503,7 @@ public:
* @param ii Iterator
* @param s String to write (writes an empty string if this is NULL)
*/
ZT_ALWAYS_INLINE void wS(int &ii,const char *s)
ZT_ALWAYS_INLINE void wS(int &ii,const char *s) noexcept
{
if (s) {
char c;
@ -539,7 +523,7 @@ public:
* @param bytes Bytes to write
* @param len Size of data in bytes
*/
ZT_ALWAYS_INLINE void wB(int &ii,const void *const bytes,const unsigned int len)
ZT_ALWAYS_INLINE void wB(int &ii,const void *const bytes,const unsigned int len) noexcept
{
const int s = ii;
if ((ii += (int)len) <= ZT_BUF_MEM_SIZE)
@ -549,7 +533,7 @@ public:
/**
* @return Capacity of this buffer (usable size of data.bytes)
*/
static constexpr unsigned int capacity() { return ZT_BUF_MEM_SIZE; }
static constexpr unsigned int capacity() noexcept { return ZT_BUF_MEM_SIZE; }
/**
* Cast data in 'b' to a (usually packed) structure type
@ -563,7 +547,7 @@ public:
* @return Reference to 'b' cast to type T
*/
template<typename T>
ZT_ALWAYS_INLINE T &as(const unsigned int i = 0) { return *reinterpret_cast<T *>(b + i); }
ZT_ALWAYS_INLINE T &as(const unsigned int i = 0) noexcept { return *reinterpret_cast<T *>(b + i); }
/**
* Cast data in 'b' to a (usually packed) structure type (const)
@ -577,14 +561,14 @@ public:
* @return Reference to 'b' cast to type T
*/
template<typename T>
ZT_ALWAYS_INLINE const T &as(const unsigned int i = 0) const { return *reinterpret_cast<const T *>(b + i); }
ZT_ALWAYS_INLINE const T &as(const unsigned int i = 0) const noexcept { return *reinterpret_cast<const T *>(b + i); }
ZT_ALWAYS_INLINE bool operator==(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) == 0); }
ZT_ALWAYS_INLINE bool operator!=(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) != 0); }
ZT_ALWAYS_INLINE bool operator<(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) < 0); }
ZT_ALWAYS_INLINE bool operator<=(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) <= 0); }
ZT_ALWAYS_INLINE bool operator>(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) > 0); }
ZT_ALWAYS_INLINE bool operator>=(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) >= 0); }
ZT_ALWAYS_INLINE bool operator==(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) == 0); }
ZT_ALWAYS_INLINE bool operator!=(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) != 0); }
ZT_ALWAYS_INLINE bool operator<(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) < 0); }
ZT_ALWAYS_INLINE bool operator<=(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) <= 0); }
ZT_ALWAYS_INLINE bool operator>(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) > 0); }
ZT_ALWAYS_INLINE bool operator>=(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) >= 0); }
/**
* Raw data held in buffer
@ -597,10 +581,10 @@ public:
private:
// Next item in free buffer pool linked list if Buf is placed in pool, undefined and unused otherwise
volatile uintptr_t __nextInPool;
std::atomic<uintptr_t> __nextInPool;
// Reference counter for SharedPtr<>
AtomicCounter<int> __refCount;
std::atomic<int> __refCount;
};
} // namespace ZeroTier

View file

@ -4,7 +4,6 @@ project(zt_core)
set(core_headers
../include/ZeroTierCore.h
Address.hpp
AtomicCounter.hpp
Buf.hpp
C25519.hpp
Capability.hpp

View file

@ -49,6 +49,42 @@ void CertificateOfMembership::setQualifier(uint64_t id,uint64_t value,uint64_t m
}
}
bool CertificateOfMembership::agreesWith(const CertificateOfMembership &other) const
{
unsigned int myidx = 0;
unsigned int otheridx = 0;
if ((_qualifierCount == 0)||(other._qualifierCount == 0))
return false;
while (myidx < _qualifierCount) {
// Fail if we're at the end of other, since this means the field is
// missing.
if (otheridx >= other._qualifierCount)
return false;
// Seek to corresponding tuple in other, ignoring tuples that
// we may not have. If we run off the end of other, the tuple is
// missing. This works because tuples are sorted by ID.
while (other._qualifiers[otheridx].id != _qualifiers[myidx].id) {
++otheridx;
if (otheridx >= other._qualifierCount)
return false;
}
// Compare to determine if the absolute value of the difference
// between these two parameters is within our maxDelta.
const uint64_t a = _qualifiers[myidx].value;
const uint64_t b = other._qualifiers[myidx].value;
if (((a >= b) ? (a - b) : (b - a)) > _qualifiers[myidx].maxDelta)
return false;
++myidx;
}
return true;
}
bool CertificateOfMembership::sign(const Identity &with)
{
uint64_t buf[ZT_NETWORK_COM_MAX_QUALIFIERS * 3];
@ -69,7 +105,7 @@ bool CertificateOfMembership::sign(const Identity &with)
}
}
int CertificateOfMembership::marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX]) const
int CertificateOfMembership::marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX]) const noexcept
{
data[0] = 1;
Utils::storeBigEndian<uint16_t>(data + 1,(uint16_t)_qualifierCount);
@ -90,7 +126,7 @@ int CertificateOfMembership::marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MAR
return p;
}
int CertificateOfMembership::unmarshal(const uint8_t *data,int len)
int CertificateOfMembership::unmarshal(const uint8_t *data,int len) noexcept
{
if ((len < 3)||(data[0] != 1))
return -1;

View file

@ -70,7 +70,7 @@ class CertificateOfMembership : public Credential
friend class Credential;
public:
static ZT_ALWAYS_INLINE ZT_CredentialType credentialType() { return ZT_CREDENTIAL_TYPE_COM; }
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_COM; }
/**
* Reserved qualifier IDs
@ -102,7 +102,7 @@ public:
/**
* Create an empty certificate of membership
*/
ZT_ALWAYS_INLINE CertificateOfMembership() { memoryZero(this); }
ZT_ALWAYS_INLINE CertificateOfMembership() noexcept { memoryZero(this); }
/**
* Create from required fields common to all networks
@ -117,17 +117,17 @@ public:
/**
* @return True if there's something here
*/
ZT_ALWAYS_INLINE operator bool() const { return (_qualifierCount != 0); }
ZT_ALWAYS_INLINE operator bool() const noexcept { return (_qualifierCount != 0); }
/**
* @return Credential ID, always 0 for COMs
*/
ZT_ALWAYS_INLINE uint32_t id() const { return 0; }
ZT_ALWAYS_INLINE uint32_t id() const noexcept { return 0; }
/**
* @return Timestamp for this cert and maximum delta for timestamp
*/
ZT_ALWAYS_INLINE int64_t timestamp() const
ZT_ALWAYS_INLINE int64_t timestamp() const noexcept
{
for(unsigned int i=0;i<_qualifierCount;++i) {
if (_qualifiers[i].id == COM_RESERVED_ID_TIMESTAMP)
@ -139,7 +139,7 @@ public:
/**
* @return Address to which this cert was issued
*/
ZT_ALWAYS_INLINE Address issuedTo() const
ZT_ALWAYS_INLINE Address issuedTo() const noexcept
{
for(unsigned int i=0;i<_qualifierCount;++i) {
if (_qualifiers[i].id == COM_RESERVED_ID_ISSUED_TO)
@ -151,7 +151,7 @@ public:
/**
* @return Network ID for which this cert was issued
*/
ZT_ALWAYS_INLINE uint64_t networkId() const
ZT_ALWAYS_INLINE uint64_t networkId() const noexcept
{
for(unsigned int i=0;i<_qualifierCount;++i) {
if (_qualifiers[i].id == COM_RESERVED_ID_NETWORK_ID)
@ -186,41 +186,7 @@ public:
* @param other Cert to compare with
* @return True if certs agree and 'other' may be communicated with
*/
ZT_ALWAYS_INLINE bool agreesWith(const CertificateOfMembership &other) const
{
unsigned int myidx = 0;
unsigned int otheridx = 0;
if ((_qualifierCount == 0)||(other._qualifierCount == 0))
return false;
while (myidx < _qualifierCount) {
// Fail if we're at the end of other, since this means the field is
// missing.
if (otheridx >= other._qualifierCount)
return false;
// Seek to corresponding tuple in other, ignoring tuples that
// we may not have. If we run off the end of other, the tuple is
// missing. This works because tuples are sorted by ID.
while (other._qualifiers[otheridx].id != _qualifiers[myidx].id) {
++otheridx;
if (otheridx >= other._qualifierCount)
return false;
}
// Compare to determine if the absolute value of the difference
// between these two parameters is within our maxDelta.
const uint64_t a = _qualifiers[myidx].value;
const uint64_t b = other._qualifiers[myidx].value;
if (((a >= b) ? (a - b) : (b - a)) > _qualifiers[myidx].maxDelta)
return false;
++myidx;
}
return true;
}
bool agreesWith(const CertificateOfMembership &other) const;
/**
* Sign this certificate
@ -241,11 +207,11 @@ public:
/**
* @return Address that signed this certificate or null address if none
*/
ZT_ALWAYS_INLINE const Address &signedBy() const { return _signedBy; }
ZT_ALWAYS_INLINE const Address &signedBy() const noexcept { return _signedBy; }
static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX]) const;
int unmarshal(const uint8_t *data,int len);
static constexpr int marshalSizeMax() noexcept { return ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX]) const noexcept;
int unmarshal(const uint8_t *data,int len) noexcept;
bool operator==(const CertificateOfMembership &c) const;
ZT_ALWAYS_INLINE bool operator!=(const CertificateOfMembership &c) const { return (!(*this == c)); }
@ -253,11 +219,11 @@ public:
private:
struct _Qualifier
{
ZT_ALWAYS_INLINE _Qualifier() : id(0),value(0),maxDelta(0) {}
ZT_ALWAYS_INLINE _Qualifier() noexcept : id(0),value(0),maxDelta(0) {}
uint64_t id;
uint64_t value;
uint64_t maxDelta;
ZT_ALWAYS_INLINE bool operator<(const _Qualifier &q) const { return (id < q.id); } // sort order
ZT_ALWAYS_INLINE bool operator<(const _Qualifier &q) const noexcept { return (id < q.id); } // sort order
};
Address _signedBy;

View file

@ -48,7 +48,7 @@ bool CertificateOfOwnership::sign(const Identity &signer)
return false;
}
int CertificateOfOwnership::marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX],bool forSign) const
int CertificateOfOwnership::marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX],bool forSign) const noexcept
{
int p = 0;
if (forSign) {
@ -82,7 +82,7 @@ int CertificateOfOwnership::marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSH
return p;
}
int CertificateOfOwnership::unmarshal(const uint8_t *data,int len)
int CertificateOfOwnership::unmarshal(const uint8_t *data,int len) noexcept
{
if (len < 30)
return -1;

View file

@ -47,7 +47,7 @@ class CertificateOfOwnership : public Credential
friend class Credential;
public:
static ZT_ALWAYS_INLINE ZT_CredentialType credentialType() { return ZT_CREDENTIAL_TYPE_COO; }
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_COO; }
enum Thing
{
@ -57,9 +57,9 @@ public:
THING_IPV6_ADDRESS = 3
};
ZT_ALWAYS_INLINE CertificateOfOwnership() { memoryZero(this); }
ZT_ALWAYS_INLINE CertificateOfOwnership() noexcept { memoryZero(this); }
ZT_ALWAYS_INLINE CertificateOfOwnership(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id)
ZT_ALWAYS_INLINE CertificateOfOwnership(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id) noexcept
{
memset(reinterpret_cast<void *>(this),0,sizeof(CertificateOfOwnership));
_networkId = nwid;
@ -68,19 +68,19 @@ public:
_issuedTo = issuedTo;
}
ZT_ALWAYS_INLINE uint64_t networkId() const { return _networkId; }
ZT_ALWAYS_INLINE int64_t timestamp() const { return _ts; }
ZT_ALWAYS_INLINE uint32_t id() const { return _id; }
ZT_ALWAYS_INLINE const Address &issuedTo() const { return _issuedTo; }
ZT_ALWAYS_INLINE const Address &signer() const { return _signedBy; }
ZT_ALWAYS_INLINE const uint8_t *signature() const { return _signature; }
ZT_ALWAYS_INLINE unsigned int signatureLength() const { return _signatureLength; }
ZT_ALWAYS_INLINE uint64_t networkId() const noexcept { return _networkId; }
ZT_ALWAYS_INLINE int64_t timestamp() const noexcept { return _ts; }
ZT_ALWAYS_INLINE uint32_t id() const noexcept { return _id; }
ZT_ALWAYS_INLINE const Address &issuedTo() const noexcept { return _issuedTo; }
ZT_ALWAYS_INLINE const Address &signer() const noexcept { return _signedBy; }
ZT_ALWAYS_INLINE const uint8_t *signature() const noexcept { return _signature; }
ZT_ALWAYS_INLINE unsigned int signatureLength() const noexcept { return _signatureLength; }
ZT_ALWAYS_INLINE unsigned int thingCount() const { return (unsigned int)_thingCount; }
ZT_ALWAYS_INLINE Thing thingType(const unsigned int i) const { return (Thing)_thingTypes[i]; }
ZT_ALWAYS_INLINE const uint8_t *thingValue(const unsigned int i) const { return _thingValues[i]; }
ZT_ALWAYS_INLINE unsigned int thingCount() const noexcept { return (unsigned int)_thingCount; }
ZT_ALWAYS_INLINE Thing thingType(const unsigned int i) const noexcept { return (Thing)_thingTypes[i]; }
ZT_ALWAYS_INLINE const uint8_t *thingValue(const unsigned int i) const noexcept { return _thingValues[i]; }
ZT_ALWAYS_INLINE bool owns(const InetAddress &ip) const
ZT_ALWAYS_INLINE bool owns(const InetAddress &ip) const noexcept
{
if (ip.ss_family == AF_INET)
return this->_owns(THING_IPV4_ADDRESS,&(reinterpret_cast<const struct sockaddr_in *>(&ip)->sin_addr.s_addr),4);
@ -89,7 +89,7 @@ public:
return false;
}
ZT_ALWAYS_INLINE bool owns(const MAC &mac) const
ZT_ALWAYS_INLINE bool owns(const MAC &mac) const noexcept
{
uint8_t tmp[6];
mac.copyTo(tmp);
@ -107,18 +107,18 @@ public:
ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const { return _verify(RR,tPtr,*this); }
static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX],bool forSign = false) const;
int unmarshal(const uint8_t *data,int len);
static constexpr int marshalSizeMax() noexcept { return ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX],bool forSign = false) const noexcept;
int unmarshal(const uint8_t *data,int len) noexcept;
// Provides natural sort order by ID
ZT_ALWAYS_INLINE bool operator<(const CertificateOfOwnership &coo) const { return (_id < coo._id); }
ZT_ALWAYS_INLINE bool operator<(const CertificateOfOwnership &coo) const noexcept { return (_id < coo._id); }
ZT_ALWAYS_INLINE bool operator==(const CertificateOfOwnership &coo) const { return (memcmp(this,&coo,sizeof(CertificateOfOwnership)) == 0); }
ZT_ALWAYS_INLINE bool operator!=(const CertificateOfOwnership &coo) const { return (memcmp(this,&coo,sizeof(CertificateOfOwnership)) != 0); }
ZT_ALWAYS_INLINE bool operator==(const CertificateOfOwnership &coo) const noexcept { return (memcmp(this,&coo,sizeof(CertificateOfOwnership)) == 0); }
ZT_ALWAYS_INLINE bool operator!=(const CertificateOfOwnership &coo) const noexcept { return (memcmp(this,&coo,sizeof(CertificateOfOwnership)) != 0); }
private:
ZT_ALWAYS_INLINE bool _owns(const Thing &t,const void *v,unsigned int l) const
ZT_ALWAYS_INLINE bool _owns(const Thing &t,const void *v,unsigned int l) const noexcept
{
for(unsigned int i=0,j=_thingCount;i<j;++i) {
if (_thingTypes[i] == (uint8_t)t) {

View file

@ -82,14 +82,14 @@
#define ZT_PEER_SECRET_KEY_LENGTH 32
/**
* Minimum delay between timer task checks to prevent thrashing
* Maximum delay between timer task checks
*/
#define ZT_MIN_TIMER_TASK_INTERVAL 500
#define ZT_MAX_TIMER_TASK_INTERVAL 1000
/**
* Maximum delay between timer task checks (should be a fraction of smallest housekeeping interval)
* Interval between steps or stages in NAT-t attempts
*/
#define ZT_MAX_TIMER_TASK_INTERVAL 5000
#define ZT_NAT_TRAVERSAL_INTERVAL 200
/**
* How often most internal cleanup and housekeeping tasks are performed

View file

@ -52,7 +52,7 @@ static ZT_ALWAYS_INLINE Credential::VerifyResult _credVerify(const RuntimeEnviro
if ((!signedBy)||(signedBy != Network::controllerFor(networkId)))
return Credential::VERIFY_BAD_SIGNATURE;
const SharedPtr<Peer> peer(RR->topology->get(tPtr,signedBy));
const SharedPtr<Peer> peer(RR->topology->peer(tPtr,signedBy));
if (!peer) {
RR->sw->requestWhois(tPtr,RR->node->now(),signedBy);
return Credential::VERIFY_NEED_IDENTITY;
@ -77,7 +77,7 @@ Credential::VerifyResult Credential::_verify(const RuntimeEnvironment *const RR,
if ((!credential._signedBy)||(credential._signedBy != Network::controllerFor(credential.networkId()))||(credential._qualifierCount > ZT_NETWORK_COM_MAX_QUALIFIERS))
return Credential::VERIFY_BAD_SIGNATURE;
const SharedPtr<Peer> peer(RR->topology->get(tPtr,credential._signedBy));
const SharedPtr<Peer> peer(RR->topology->peer(tPtr,credential._signedBy));
if (!peer) {
RR->sw->requestWhois(tPtr,RR->node->now(),credential._signedBy);
return Credential::VERIFY_NEED_IDENTITY;
@ -118,7 +118,7 @@ Credential::VerifyResult Credential::_verify(const RuntimeEnvironment *RR,void *
return Credential::VERIFY_BAD_SIGNATURE; // otherwise if we have another entry it must be from the previous holder in the chain
}
const SharedPtr<Peer> peer(RR->topology->get(tPtr,credential._custody[c].from));
const SharedPtr<Peer> peer(RR->topology->peer(tPtr,credential._custody[c].from));
if (peer) {
if (!peer->identity().verify(tmp,(unsigned int)l,credential._custody[c].signature,credential._custody[c].signatureLength))
return Credential::VERIFY_BAD_SIGNATURE;

View file

@ -16,7 +16,6 @@
#include "Constants.hpp"
#include "Buf.hpp"
#include "AtomicCounter.hpp"
#include "SharedPtr.hpp"
#include "Hashtable.hpp"
#include "Mutex.hpp"
@ -38,7 +37,7 @@ namespace ZeroTier {
* hairiness makes it very desirable to be able to test and fuzz this code
* independently.
*
* Here be dragons!
* This class is thread-safe and handles locking internally.
*
* @tparam MF Maximum number of fragments that each message can possess
* @tparam GCS Garbage collection target size for the incoming message queue
@ -148,10 +147,10 @@ public:
if ((fragmentNo >= totalFragmentsExpected)||(totalFragmentsExpected > MF)||(totalFragmentsExpected == 0))
return ERR_INVALID_FRAGMENT;
// Lock messages for read and look up current entry. Also check the
// GC trigger and if we've exceeded that threshold then older message
// entries are garbage collected.
_messages_l.rlock();
// We hold the read lock on _messages unless we need to add a new entry or do GC.
RWMutex::RMaybeWLock ml(_messages_l);
// Check message hash table size and perform GC if necessary.
if (_messages.size() >= GCT) {
try {
// Scan messages with read lock still locked first and make a sorted list of
@ -171,28 +170,19 @@ public:
std::sort(messagesByLastUsedTime.begin(),messagesByLastUsedTime.end());
_messages_l.runlock(); _messages_l.lock();
ml.writing(); // acquire write lock on _messages
for (unsigned long x = 0,y = (messagesByLastUsedTime.size() - GCS); x <= y; ++x)
_messages.erase(messagesByLastUsedTime[x].second);
_messages_l.unlock(); _messages_l.rlock();
} catch (...) {
// The only way something in that code can throw is if a bad_alloc occurs when
// reserve() is called in the vector. In this case we flush the entire queue
// and error out. This is very rare and on some platforms impossible.
_messages_l.runlock();
_messages_l.lock();
_messages.clear();
_messages_l.unlock();
return ERR_OUT_OF_MEMORY;
}
}
_E *e = _messages.get(messageId);
_messages_l.runlock();
// If no entry exists we must briefly lock messages for write and create a new one.
// Get or create message fragment.
_E *e = _messages.get(messageId);
if (!e) {
ml.writing(); // acquire write lock on _messages if not already
try {
RWMutex::Lock ml(_messages_l);
e = &(_messages[messageId]);
} catch (...) {
return ERR_OUT_OF_MEMORY;
@ -200,18 +190,17 @@ public:
e->id = messageId;
}
// Now handle this fragment within this individual message entry.
Mutex::Lock el(e->lock);
// Switch back to holding only the read lock on _messages if we have locked for write
ml.reading();
// Note: it's important that _messages_l is not locked while the entry
// is locked or a deadlock could occur due to GC or clear() being called
// in another thread.
// Acquire lock on entry itself
Mutex::Lock el(e->lock);
// This magic value means this message has already been assembled and is done.
if (e->lastUsed < 0)
return ERR_DUPLICATE_FRAGMENT;
// Update last-activity timestamp for this entry.
// Update last-activity timestamp for this entry, delaying GC.
e->lastUsed = now;
// Learn total fragments expected if a value is given. Otherwise the cached
@ -294,15 +283,10 @@ private:
ZT_ALWAYS_INLINE _E() : id(0),lastUsed(0),totalFragmentsExpected(0),via(),message(),lock() {}
ZT_ALWAYS_INLINE ~_E()
{
// Ensure that this entry is not in use while it is being deleted!
lock.lock();
if (via) {
via->_inboundFragmentedMessages_l.lock();
via->_inboundFragmentedMessages.erase(id);
via->_inboundFragmentedMessages_l.unlock();
}
lock.unlock();
}
uint64_t id;
volatile int64_t lastUsed;
unsigned int totalFragmentsExpected;

View file

@ -138,12 +138,12 @@ public:
/**
* @return Number of entries
*/
ZT_ALWAYS_INLINE unsigned int size() const { return _t.size(); }
ZT_ALWAYS_INLINE unsigned int size() const noexcept { return _t.size(); }
/**
* @return True if dictionary is not empty
*/
ZT_ALWAYS_INLINE bool empty() const { return _t.empty(); }
ZT_ALWAYS_INLINE bool empty() const noexcept { return _t.empty(); }
/**
* Encode to a string in the supplied vector

View file

@ -53,7 +53,7 @@ bool Endpoint::operator<(const Endpoint &ep) const
return false;
}
int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const
int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept
{
int p;
data[0] = (uint8_t)_t;
@ -108,7 +108,7 @@ int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const
}
}
int Endpoint::unmarshal(const uint8_t *restrict data,const int len)
int Endpoint::unmarshal(const uint8_t *restrict data,const int len) noexcept
{
if (len < 7)
return -1;

View file

@ -51,7 +51,7 @@ public:
UNRECOGNIZED = 255 // Unrecognized endpoint type encountered in stream
};
ZT_ALWAYS_INLINE Endpoint() { memoryZero(this); }
ZT_ALWAYS_INLINE Endpoint() noexcept { memoryZero(this); }
explicit ZT_ALWAYS_INLINE Endpoint(const InetAddress &sa) { *this = sa; }
@ -93,44 +93,44 @@ public:
/**
* @return InetAddress or NIL if not of this type
*/
ZT_ALWAYS_INLINE const InetAddress &inetAddr() const { return ((_t == INETADDR_V4)||(_t == INETADDR_V6)) ? *reinterpret_cast<const InetAddress *>(&_v.sa) : InetAddress::NIL; }
ZT_ALWAYS_INLINE const InetAddress &inetAddr() const noexcept { return ((_t == INETADDR_V4)||(_t == INETADDR_V6)) ? *reinterpret_cast<const InetAddress *>(&_v.sa) : InetAddress::NIL; }
/**
* @return DNS name or empty string if not of this type
*/
ZT_ALWAYS_INLINE const char *dnsName() const { return (_t == DNSNAME) ? _v.dns.name : ""; }
ZT_ALWAYS_INLINE const char *dnsName() const noexcept { return (_t == DNSNAME) ? _v.dns.name : ""; }
/**
* @return Port associated with DNS name or -1 if not of this type
*/
ZT_ALWAYS_INLINE int dnsPort() const { return (_t == DNSNAME) ? _v.dns.port : -1; }
ZT_ALWAYS_INLINE int dnsPort() const noexcept { return (_t == DNSNAME) ? _v.dns.port : -1; }
/**
* @return ZeroTier address or NIL if not of this type
*/
ZT_ALWAYS_INLINE Address ztAddress() const { return Address((_t == ZEROTIER) ? _v.zt.a : (uint64_t)0); }
ZT_ALWAYS_INLINE Address ztAddress() const noexcept { return Address((_t == ZEROTIER) ? _v.zt.a : (uint64_t)0); }
/**
* @return 384-bit hash of identity keys or NULL if not of this type
*/
ZT_ALWAYS_INLINE const uint8_t *ztIdentityHash() const { return (_t == ZEROTIER) ? _v.zt.idh : nullptr; }
ZT_ALWAYS_INLINE const uint8_t *ztIdentityHash() const noexcept { return (_t == ZEROTIER) ? _v.zt.idh : nullptr; }
/**
* @return URL or empty string if not of this type
*/
ZT_ALWAYS_INLINE const char *url() const { return (_t == URL) ? _v.url : ""; }
ZT_ALWAYS_INLINE const char *url() const noexcept { return (_t == URL) ? _v.url : ""; }
/**
* @return Ethernet address or NIL if not of this type
*/
ZT_ALWAYS_INLINE MAC ethernet() const { return (_t == ETHERNET) ? MAC(_v.eth) : MAC(); }
ZT_ALWAYS_INLINE MAC ethernet() const noexcept { return (_t == ETHERNET) ? MAC(_v.eth) : MAC(); }
/**
* @return Endpoint type or NIL if unset/empty
*/
ZT_ALWAYS_INLINE Type type() const { return _t; }
ZT_ALWAYS_INLINE Type type() const noexcept { return _t; }
explicit ZT_ALWAYS_INLINE operator bool() const { return _t != NIL; }
explicit ZT_ALWAYS_INLINE operator bool() const noexcept { return _t != NIL; }
bool operator==(const Endpoint &ep) const;
ZT_ALWAYS_INLINE bool operator!=(const Endpoint &ep) const { return (!(*this == ep)); }
@ -139,9 +139,9 @@ public:
ZT_ALWAYS_INLINE bool operator<=(const Endpoint &ep) const { return !(ep < *this); }
ZT_ALWAYS_INLINE bool operator>=(const Endpoint &ep) const { return !(*this < ep); }
static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_ENDPOINT_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const;
int unmarshal(const uint8_t *restrict data,int len);
static constexpr int marshalSizeMax() noexcept { return ZT_ENDPOINT_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept;
int unmarshal(const uint8_t *restrict data,int len) noexcept;
private:
Type _t;

View file

@ -43,7 +43,7 @@ public:
typedef T * iterator;
typedef const T * const_iterator;
ZT_ALWAYS_INLINE FCV() : _s(0) {}
ZT_ALWAYS_INLINE FCV() noexcept : _s(0) {}
template<unsigned int C2>
ZT_ALWAYS_INLINE FCV(const FCV<T,C2> &v) : _s(0) { *this = v; }
@ -77,7 +77,7 @@ public:
/**
* Clear without calling destructors (same as unsafeResize(0))
*/
ZT_ALWAYS_INLINE void unsafeClear() { _s = 0; }
ZT_ALWAYS_INLINE void unsafeClear() noexcept { _s = 0; }
/**
* This does a straight copy of one vector's data to another
@ -91,7 +91,7 @@ public:
* @param v Other vector to copy to this one
*/
template<unsigned int C2>
ZT_ALWAYS_INLINE void unsafeAssign(const FCV<T,C2> &v)
ZT_ALWAYS_INLINE void unsafeAssign(const FCV<T,C2> &v) noexcept
{
_s = ((C2 > C)&&(v._s > C)) ? C : v._s;
memcpy(_m,v._m,_s * sizeof(T));
@ -105,23 +105,23 @@ public:
*
* @param v Target vector
*/
ZT_ALWAYS_INLINE void unsafeMoveTo(FCV &v)
ZT_ALWAYS_INLINE void unsafeMoveTo(FCV &v) noexcept
{
memcpy(v._m,_m,(v._s = _s) * sizeof(T));
_s = 0;
}
ZT_ALWAYS_INLINE iterator begin() { return reinterpret_cast<T *>(_m); }
ZT_ALWAYS_INLINE const_iterator begin() const { return reinterpret_cast<const T *>(_m); }
ZT_ALWAYS_INLINE iterator end() { return reinterpret_cast<T *>(_m) + _s; }
ZT_ALWAYS_INLINE const_iterator end() const { return reinterpret_cast<const T *>(_m) + _s; }
ZT_ALWAYS_INLINE iterator begin() noexcept { return reinterpret_cast<T *>(_m); }
ZT_ALWAYS_INLINE const_iterator begin() const noexcept { return reinterpret_cast<const T *>(_m); }
ZT_ALWAYS_INLINE iterator end() noexcept { return reinterpret_cast<T *>(_m) + _s; }
ZT_ALWAYS_INLINE const_iterator end() const noexcept { return reinterpret_cast<const T *>(_m) + _s; }
ZT_ALWAYS_INLINE T &operator[](const unsigned int i) { return reinterpret_cast<T *>(_m)[i]; }
ZT_ALWAYS_INLINE const T &operator[](const unsigned int i) const { return reinterpret_cast<T *>(_m)[i]; }
ZT_ALWAYS_INLINE T &operator[](const unsigned int i) noexcept { return reinterpret_cast<T *>(_m)[i]; }
ZT_ALWAYS_INLINE const T &operator[](const unsigned int i) const noexcept { return reinterpret_cast<T *>(_m)[i]; }
ZT_ALWAYS_INLINE unsigned int size() const { return _s; }
ZT_ALWAYS_INLINE bool empty() const { return (_s == 0); }
static constexpr unsigned int capacity() { return C; }
ZT_ALWAYS_INLINE unsigned int size() const noexcept { return _s; }
ZT_ALWAYS_INLINE bool empty() const noexcept { return (_s == 0); }
static constexpr unsigned int capacity() noexcept { return C; }
/**
* Push a value onto the back of this vector
@ -200,7 +200,7 @@ public:
*
* @param ns New size (clipped to C if larger than capacity)
*/
ZT_ALWAYS_INLINE void unsafeResize(const unsigned int ns) { _s = (ns > C) ? C : ns; }
ZT_ALWAYS_INLINE void unsafeResize(const unsigned int ns) noexcept { _s = (ns > C) ? C : ns; }
/**
* This is a bounds checked auto-resizing variant of the [] operator

View file

@ -57,7 +57,7 @@ public:
/**
* @param ht Hash table to iterate over
*/
explicit ZT_ALWAYS_INLINE Iterator(Hashtable &ht) :
explicit ZT_ALWAYS_INLINE Iterator(Hashtable &ht) noexcept :
_idx(0),
_ht(&ht),
_b(ht._t[0])
@ -344,26 +344,26 @@ public:
/**
* @return Number of entries
*/
ZT_ALWAYS_INLINE unsigned long size() const { return _s; }
ZT_ALWAYS_INLINE unsigned long size() const noexcept { return _s; }
/**
* @return True if table is empty
*/
ZT_ALWAYS_INLINE bool empty() const { return (_s == 0); }
ZT_ALWAYS_INLINE bool empty() const noexcept { return (_s == 0); }
private:
template<typename O>
static ZT_ALWAYS_INLINE unsigned long _hc(const O &obj) { return (unsigned long)obj.hashCode(); }
static ZT_ALWAYS_INLINE unsigned long _hc(const uint64_t i) { return (unsigned long)(i ^ (i >> 32U)); }
static ZT_ALWAYS_INLINE unsigned long _hc(const uint32_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const uint16_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const uint8_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const int64_t i) { return (unsigned long)((unsigned long long)i ^ ((unsigned long long)i >> 32U)); }
static ZT_ALWAYS_INLINE unsigned long _hc(const int32_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const int16_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const int8_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(void *p) { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const void *p) { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const uint64_t i) noexcept { return (unsigned long)(i ^ (i >> 32U)); }
static ZT_ALWAYS_INLINE unsigned long _hc(const uint32_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const uint16_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const uint8_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const int64_t i) noexcept { return (unsigned long)((unsigned long long)i ^ ((unsigned long long)i >> 32U)); }
static ZT_ALWAYS_INLINE unsigned long _hc(const int32_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const int16_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const int8_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(void *p) noexcept { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
static ZT_ALWAYS_INLINE unsigned long _hc(const void *p) noexcept { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
ZT_ALWAYS_INLINE void _grow()
{

View file

@ -379,7 +379,7 @@ bool Identity::fromString(const char *str)
return true;
}
int Identity::marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX],const bool includePrivate) const
int Identity::marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX],const bool includePrivate) const noexcept
{
_address.copyTo(data);
switch(_type) {
@ -412,7 +412,7 @@ int Identity::marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX],const bool incl
return -1;
}
int Identity::unmarshal(const uint8_t *data,const int len)
int Identity::unmarshal(const uint8_t *data,const int len) noexcept
{
if (len < (ZT_ADDRESS_LENGTH + 1))
return -1;

View file

@ -61,7 +61,7 @@ public:
*/
static const Identity NIL;
ZT_ALWAYS_INLINE Identity() { memoryZero(this); }
ZT_ALWAYS_INLINE Identity() noexcept { memoryZero(this); }
ZT_ALWAYS_INLINE ~Identity() { Utils::burn(reinterpret_cast<void *>(&this->_priv),sizeof(this->_priv)); }
/**
@ -77,12 +77,12 @@ public:
/**
* Set identity to NIL value (all zero)
*/
ZT_ALWAYS_INLINE void zero() { memoryZero(this); }
ZT_ALWAYS_INLINE void zero() noexcept { memoryZero(this); }
/**
* @return Identity type (undefined if identity is null or invalid)
*/
ZT_ALWAYS_INLINE Type type() const { return _type; }
ZT_ALWAYS_INLINE Type type() const noexcept { return _type; }
/**
* Generate a new identity (address, key pair)
@ -103,7 +103,7 @@ public:
/**
* @return True if this identity contains a private key
*/
ZT_ALWAYS_INLINE bool hasPrivate() const { return _hasPrivate; }
ZT_ALWAYS_INLINE bool hasPrivate() const noexcept { return _hasPrivate; }
/**
* @return 384-bit/48-byte hash of this identity's public key(s)
@ -161,7 +161,7 @@ public:
/**
* @return This identity's address
*/
ZT_ALWAYS_INLINE const Address &address() const { return _address; }
ZT_ALWAYS_INLINE const Address &address() const noexcept { return _address; }
/**
* Serialize to a more human-friendly string
@ -186,9 +186,9 @@ public:
/**
* @return True if this identity contains something
*/
explicit ZT_ALWAYS_INLINE operator bool() const { return (_address); }
explicit ZT_ALWAYS_INLINE operator bool() const noexcept { return (_address); }
ZT_ALWAYS_INLINE bool operator==(const Identity &id) const
ZT_ALWAYS_INLINE bool operator==(const Identity &id) const noexcept
{
if ((_address == id._address)&&(_type == id._type)) {
switch(_type) {
@ -199,7 +199,7 @@ public:
}
return false;
}
ZT_ALWAYS_INLINE bool operator<(const Identity &id) const
ZT_ALWAYS_INLINE bool operator<(const Identity &id) const noexcept
{
if (_address < id._address)
return true;
@ -216,16 +216,16 @@ public:
}
return false;
}
ZT_ALWAYS_INLINE bool operator!=(const Identity &id) const { return !(*this == id); }
ZT_ALWAYS_INLINE bool operator>(const Identity &id) const { return (id < *this); }
ZT_ALWAYS_INLINE bool operator<=(const Identity &id) const { return !(id < *this); }
ZT_ALWAYS_INLINE bool operator>=(const Identity &id) const { return !(*this < id); }
ZT_ALWAYS_INLINE bool operator!=(const Identity &id) const noexcept { return !(*this == id); }
ZT_ALWAYS_INLINE bool operator>(const Identity &id) const noexcept { return (id < *this); }
ZT_ALWAYS_INLINE bool operator<=(const Identity &id) const noexcept { return !(id < *this); }
ZT_ALWAYS_INLINE bool operator>=(const Identity &id) const noexcept { return !(*this < id); }
ZT_ALWAYS_INLINE unsigned long hashCode() const { return ((unsigned long)_address.toInt() + (unsigned long)_pub.c25519[0] + (unsigned long)_pub.c25519[1] + (unsigned long)_pub.c25519[2]); }
ZT_ALWAYS_INLINE unsigned long hashCode() const noexcept { return ((unsigned long)_address.toInt() + (unsigned long)_pub.c25519[0] + (unsigned long)_pub.c25519[1] + (unsigned long)_pub.c25519[2]); }
static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_IDENTITY_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX],bool includePrivate = false) const;
int unmarshal(const uint8_t *data,int len);
static constexpr int marshalSizeMax() noexcept { return ZT_IDENTITY_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX],bool includePrivate = false) const noexcept;
int unmarshal(const uint8_t *data,int len) noexcept;
private:
void _computeHash(); // recompute _hash

View file

@ -110,7 +110,7 @@ void InetAddress::set(const void *ipBytes,unsigned int ipLen,unsigned int port)
}
}
char *InetAddress::toString(char buf[64]) const
char *InetAddress::toString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const
{
char *p = toIpString(buf);
if (*p) {
@ -121,7 +121,7 @@ char *InetAddress::toString(char buf[64]) const
return buf;
}
char *InetAddress::toIpString(char buf[64]) const
char *InetAddress::toIpString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const
{
buf[0] = (char)0;
switch(ss_family) {

View file

@ -26,6 +26,7 @@
namespace ZeroTier {
#define ZT_INETADDRESS_MARSHAL_SIZE_MAX 19
#define ZT_INETADDRESS_STRING_SIZE_MAX 64
/**
* Extends sockaddr_storage with friendly C++ methods
@ -213,12 +214,12 @@ public:
/**
* @return ASCII IP/port format representation
*/
char *toString(char buf[64]) const;
char *toString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const;
/**
* @return IP portion only, in ASCII string format
*/
char *toIpString(char buf[64]) const;
char *toIpString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const;
/**
* @param ipSlashPort IP/port (port is optional, will be 0 if not included)

View file

@ -31,33 +31,32 @@ namespace ZeroTier {
class MAC : public TriviallyCopyable
{
public:
ZT_ALWAYS_INLINE MAC() : _m(0ULL) {}
ZT_ALWAYS_INLINE MAC(const uint8_t a,const uint8_t b,const uint8_t c,const uint8_t d,const uint8_t e,const uint8_t f) :
_m( (((uint64_t)a) << 40U) | (((uint64_t)b) << 32U) | (((uint64_t)c) << 24U) | (((uint64_t)d) << 16U) | (((uint64_t)e) << 8U) | ((uint64_t)f) ) {}
explicit ZT_ALWAYS_INLINE MAC(const uint64_t m) : _m(m & 0xffffffffffffULL) {}
explicit ZT_ALWAYS_INLINE MAC(const uint8_t b[6]) { setTo(b); }
ZT_ALWAYS_INLINE MAC(const Address &ztaddr,uint64_t nwid) { fromAddress(ztaddr,nwid); }
ZT_ALWAYS_INLINE MAC() noexcept : _m(0ULL) {}
ZT_ALWAYS_INLINE MAC(const uint8_t a,const uint8_t b,const uint8_t c,const uint8_t d,const uint8_t e,const uint8_t f) noexcept : _m( (((uint64_t)a) << 40U) | (((uint64_t)b) << 32U) | (((uint64_t)c) << 24U) | (((uint64_t)d) << 16U) | (((uint64_t)e) << 8U) | ((uint64_t)f) ) {}
explicit ZT_ALWAYS_INLINE MAC(const uint64_t m) noexcept : _m(m & 0xffffffffffffULL) {}
explicit ZT_ALWAYS_INLINE MAC(const uint8_t b[6]) noexcept { setTo(b); }
ZT_ALWAYS_INLINE MAC(const Address &ztaddr,uint64_t nwid) noexcept { fromAddress(ztaddr,nwid); }
/**
* @return MAC in 64-bit integer
*/
ZT_ALWAYS_INLINE uint64_t toInt() const { return _m; }
ZT_ALWAYS_INLINE uint64_t toInt() const noexcept { return _m; }
/**
* Set MAC to zero
*/
ZT_ALWAYS_INLINE void zero() { _m = 0ULL; }
ZT_ALWAYS_INLINE void zero() noexcept { _m = 0ULL; }
/**
* @return True if MAC is non-zero
*/
ZT_ALWAYS_INLINE operator bool() const { return (_m != 0ULL); }
ZT_ALWAYS_INLINE operator bool() const noexcept { return (_m != 0ULL); }
/**
* @param bits Raw MAC in big-endian byte order
* @param len Length, must be >= 6 or result is zero
*/
ZT_ALWAYS_INLINE void setTo(const uint8_t b[6])
ZT_ALWAYS_INLINE void setTo(const uint8_t b[6]) noexcept
{
_m = ((uint64_t)b[0] << 40U) | ((uint64_t)b[1] << 32U) | ((uint64_t)b[2] << 24U) | ((uint64_t)b[3] << 16U) | ((uint64_t)b[4] << 8U) | (uint64_t)b[5];
}
@ -66,7 +65,7 @@ public:
* @param buf Destination buffer for MAC in big-endian byte order
* @param len Length of buffer, must be >= 6 or nothing is copied
*/
ZT_ALWAYS_INLINE void copyTo(uint8_t b[6]) const
ZT_ALWAYS_INLINE void copyTo(uint8_t b[6]) const noexcept
{
b[0] = (uint8_t)(_m >> 40U);
b[1] = (uint8_t)(_m >> 32U);
@ -79,12 +78,12 @@ public:
/**
* @return True if this is broadcast (all 0xff)
*/
ZT_ALWAYS_INLINE bool isBroadcast() const { return (_m == 0xffffffffffffULL); }
ZT_ALWAYS_INLINE bool isBroadcast() const noexcept { return (_m == 0xffffffffffffULL); }
/**
* @return True if this is a multicast MAC
*/
ZT_ALWAYS_INLINE bool isMulticast() const { return ((_m & 0x010000000000ULL) != 0ULL); }
ZT_ALWAYS_INLINE bool isMulticast() const noexcept { return ((_m & 0x010000000000ULL) != 0ULL); }
/**
* Set this MAC to a MAC derived from an address and a network ID
@ -92,7 +91,7 @@ public:
* @param ztaddr ZeroTier address
* @param nwid 64-bit network ID
*/
ZT_ALWAYS_INLINE void fromAddress(const Address &ztaddr,uint64_t nwid)
ZT_ALWAYS_INLINE void fromAddress(const Address &ztaddr,uint64_t nwid) noexcept
{
uint64_t m = ((uint64_t)firstOctetForNetwork(nwid)) << 40U;
m |= ztaddr.toInt(); // a is 40 bits
@ -111,7 +110,7 @@ public:
*
* @param nwid Network ID
*/
ZT_ALWAYS_INLINE Address toAddress(uint64_t nwid) const
ZT_ALWAYS_INLINE Address toAddress(uint64_t nwid) const noexcept
{
uint64_t a = _m & 0xffffffffffULL; // least significant 40 bits of MAC are formed from address
a ^= ((nwid >> 8U) & 0xffU) << 32U; // ... XORed with bits 8-48 of the nwid in little-endian byte order, so unmask it
@ -126,7 +125,7 @@ public:
* @param nwid Network ID
* @return First octet of MAC for this network
*/
static ZT_ALWAYS_INLINE unsigned char firstOctetForNetwork(uint64_t nwid)
static ZT_ALWAYS_INLINE unsigned char firstOctetForNetwork(uint64_t nwid) noexcept
{
const uint8_t a = ((uint8_t)(nwid & 0xfeU) | 0x02U); // locally administered, not multicast, from LSB of network ID
return ((a == 0x52) ? 0x32 : a); // blacklist 0x52 since it's used by KVM, libvirt, and other popular virtualization engines... seems de-facto standard on Linux
@ -136,16 +135,16 @@ public:
* @param i Value from 0 to 5 (inclusive)
* @return Byte at said position (address interpreted in big-endian order)
*/
ZT_ALWAYS_INLINE uint8_t operator[](unsigned int i) const { return (uint8_t)(_m >> (40 - (i * 8))); }
ZT_ALWAYS_INLINE uint8_t operator[](unsigned int i) const noexcept { return (uint8_t)(_m >> (40 - (i * 8))); }
/**
* @return 6, which is the number of bytes in a MAC, for container compliance
*/
ZT_ALWAYS_INLINE unsigned int size() const { return 6; }
ZT_ALWAYS_INLINE unsigned int size() const noexcept { return 6; }
ZT_ALWAYS_INLINE unsigned long hashCode() const { return (unsigned long)_m; }
ZT_ALWAYS_INLINE unsigned long hashCode() const noexcept { return (unsigned long)_m; }
ZT_ALWAYS_INLINE char *toString(char buf[18]) const
ZT_ALWAYS_INLINE char *toString(char buf[18]) const noexcept
{
buf[0] = Utils::HEXCHARS[(_m >> 44U) & 0xfU];
buf[1] = Utils::HEXCHARS[(_m >> 40U) & 0xfU];
@ -168,18 +167,18 @@ public:
return buf;
}
ZT_ALWAYS_INLINE MAC &operator=(const uint64_t m)
ZT_ALWAYS_INLINE MAC &operator=(const uint64_t m) noexcept
{
_m = m & 0xffffffffffffULL;
return *this;
}
ZT_ALWAYS_INLINE bool operator==(const MAC &m) const { return (_m == m._m); }
ZT_ALWAYS_INLINE bool operator!=(const MAC &m) const { return (_m != m._m); }
ZT_ALWAYS_INLINE bool operator<(const MAC &m) const { return (_m < m._m); }
ZT_ALWAYS_INLINE bool operator<=(const MAC &m) const { return (_m <= m._m); }
ZT_ALWAYS_INLINE bool operator>(const MAC &m) const { return (_m > m._m); }
ZT_ALWAYS_INLINE bool operator>=(const MAC &m) const { return (_m >= m._m); }
ZT_ALWAYS_INLINE bool operator==(const MAC &m) const noexcept { return (_m == m._m); }
ZT_ALWAYS_INLINE bool operator!=(const MAC &m) const noexcept { return (_m != m._m); }
ZT_ALWAYS_INLINE bool operator<(const MAC &m) const noexcept { return (_m < m._m); }
ZT_ALWAYS_INLINE bool operator<=(const MAC &m) const noexcept { return (_m <= m._m); }
ZT_ALWAYS_INLINE bool operator>(const MAC &m) const noexcept { return (_m > m._m); }
ZT_ALWAYS_INLINE bool operator>=(const MAC &m) const noexcept { return (_m >= m._m); }
private:
uint64_t _m;

View file

@ -126,7 +126,7 @@ public:
/**
* Generates a key for internal use in indexing credentials by type and credential ID
*/
static ZT_ALWAYS_INLINE uint64_t credentialKey(const ZT_CredentialType &t,const uint32_t i) { return (((uint64_t)t << 32U) | (uint64_t)i); }
static ZT_ALWAYS_INLINE uint64_t credentialKey(const ZT_CredentialType &t,const uint32_t i) noexcept { return (((uint64_t)t << 32U) | (uint64_t)i); }
AddCredentialResult addCredential(const RuntimeEnvironment *RR,void *tPtr,const Identity &sourcePeerIdentity,const NetworkConfig &nconf,const CertificateOfMembership &com);
AddCredentialResult addCredential(const RuntimeEnvironment *RR,void *tPtr,const Identity &sourcePeerIdentity,const NetworkConfig &nconf,const Tag &tag);

View file

@ -16,7 +16,6 @@
#include "Constants.hpp"
#include "Mutex.hpp"
#include "AtomicCounter.hpp"
#define ZT_METER_HISTORY_LENGTH 4
#define ZT_METER_HISTORY_TICK_DURATION 1000
@ -29,7 +28,7 @@ namespace ZeroTier {
class Meter
{
public:
ZT_ALWAYS_INLINE Meter()
ZT_ALWAYS_INLINE Meter() noexcept
{
for(int i=0;i<ZT_METER_HISTORY_LENGTH;++i)
_history[i] = 0.0;
@ -38,7 +37,7 @@ public:
}
template<typename I>
ZT_ALWAYS_INLINE void log(const int64_t now,I count)
ZT_ALWAYS_INLINE void log(const int64_t now,I count) noexcept
{
const int64_t since = now - _ts;
if (since >= ZT_METER_HISTORY_TICK_DURATION) {
@ -50,7 +49,7 @@ public:
}
}
ZT_ALWAYS_INLINE double perSecond(const int64_t now) const
ZT_ALWAYS_INLINE double perSecond(const int64_t now) const noexcept
{
double r = 0.0,n = 0.0;
const int64_t since = (now - _ts);
@ -69,7 +68,7 @@ private:
volatile double _history[ZT_METER_HISTORY_LENGTH];
volatile int64_t _ts;
volatile uint64_t _count;
AtomicCounter<unsigned int> _hptr;
std::atomic<unsigned int> _hptr;
};
} // namespace ZeroTier

View file

@ -42,8 +42,8 @@ namespace ZeroTier {
class MulticastGroup : public TriviallyCopyable
{
public:
ZT_ALWAYS_INLINE MulticastGroup() : _mac(),_adi(0) {}
ZT_ALWAYS_INLINE MulticastGroup(const MAC &m,uint32_t a) : _mac(m),_adi(a) {}
ZT_ALWAYS_INLINE MulticastGroup() noexcept : _mac(),_adi(0) {}
ZT_ALWAYS_INLINE MulticastGroup(const MAC &m,uint32_t a) noexcept : _mac(m),_adi(a) {}
/**
* Derive the multicast group used for address resolution (ARP/NDP) for an IP
@ -51,7 +51,7 @@ public:
* @param ip IP address (port field is ignored)
* @return Multicast group for ARP/NDP
*/
static ZT_ALWAYS_INLINE MulticastGroup deriveMulticastGroupForAddressResolution(const InetAddress &ip)
static ZT_ALWAYS_INLINE MulticastGroup deriveMulticastGroupForAddressResolution(const InetAddress &ip) noexcept
{
if (ip.isV4()) {
// IPv4 wants broadcast MACs, so we shove the V4 address itself into
@ -73,16 +73,16 @@ public:
/**
* @return Ethernet MAC portion of multicast group
*/
ZT_ALWAYS_INLINE const MAC &mac() const { return _mac; }
ZT_ALWAYS_INLINE const MAC &mac() const noexcept { return _mac; }
/**
* @return Additional distinguishing information, which is normally zero except for IPv4 ARP where it's the IPv4 address
*/
ZT_ALWAYS_INLINE uint32_t adi() const { return _adi; }
ZT_ALWAYS_INLINE bool operator==(const MulticastGroup &g) const { return ((_mac == g._mac)&&(_adi == g._adi)); }
ZT_ALWAYS_INLINE bool operator!=(const MulticastGroup &g) const { return ((_mac != g._mac)||(_adi != g._adi)); }
ZT_ALWAYS_INLINE bool operator<(const MulticastGroup &g) const
ZT_ALWAYS_INLINE bool operator==(const MulticastGroup &g) const noexcept { return ((_mac == g._mac)&&(_adi == g._adi)); }
ZT_ALWAYS_INLINE bool operator!=(const MulticastGroup &g) const noexcept { return ((_mac != g._mac)||(_adi != g._adi)); }
ZT_ALWAYS_INLINE bool operator<(const MulticastGroup &g) const noexcept
{
if (_mac < g._mac)
return true;
@ -90,11 +90,11 @@ public:
return (_adi < g._adi);
return false;
}
ZT_ALWAYS_INLINE bool operator>(const MulticastGroup &g) const { return (g < *this); }
ZT_ALWAYS_INLINE bool operator<=(const MulticastGroup &g) const { return !(g < *this); }
ZT_ALWAYS_INLINE bool operator>=(const MulticastGroup &g) const { return !(*this < g); }
ZT_ALWAYS_INLINE bool operator>(const MulticastGroup &g) const noexcept { return (g < *this); }
ZT_ALWAYS_INLINE bool operator<=(const MulticastGroup &g) const noexcept { return !(g < *this); }
ZT_ALWAYS_INLINE bool operator>=(const MulticastGroup &g) const noexcept { return !(*this < g); }
ZT_ALWAYS_INLINE unsigned long hashCode() const { return (_mac.hashCode() ^ (unsigned long)_adi); }
ZT_ALWAYS_INLINE unsigned long hashCode() const noexcept { return (_mac.hashCode() ^ (unsigned long)_adi); }
private:
MAC _mac;

View file

@ -19,34 +19,34 @@
#include <cstdint>
#include <cstdlib>
#ifdef __UNIX_LIKE__
#ifndef __WINDOWS__
#include <pthread.h>
#endif
namespace ZeroTier {
class Mutex
{
public:
ZT_ALWAYS_INLINE Mutex() { pthread_mutex_init(&_mh,0); }
ZT_ALWAYS_INLINE ~Mutex() { pthread_mutex_destroy(&_mh); }
ZT_ALWAYS_INLINE Mutex() noexcept { pthread_mutex_init(&_mh,0); }
ZT_ALWAYS_INLINE ~Mutex() noexcept { pthread_mutex_destroy(&_mh); }
ZT_ALWAYS_INLINE void lock() const { pthread_mutex_lock(&((const_cast <Mutex *> (this))->_mh)); }
ZT_ALWAYS_INLINE void unlock() const { pthread_mutex_unlock(&((const_cast <Mutex *> (this))->_mh)); }
ZT_ALWAYS_INLINE void lock() const noexcept { pthread_mutex_lock(&((const_cast <Mutex *> (this))->_mh)); }
ZT_ALWAYS_INLINE void unlock() const noexcept { pthread_mutex_unlock(&((const_cast <Mutex *> (this))->_mh)); }
class Lock
{
public:
ZT_ALWAYS_INLINE Lock(Mutex &m) : _m(&m) { m.lock(); }
ZT_ALWAYS_INLINE Lock(const Mutex &m) : _m(const_cast<Mutex *>(&m)) { _m->lock(); }
ZT_ALWAYS_INLINE Lock(Mutex &m) noexcept : _m(&m) { m.lock(); }
ZT_ALWAYS_INLINE Lock(const Mutex &m) noexcept : _m(const_cast<Mutex *>(&m)) { _m->lock(); }
ZT_ALWAYS_INLINE ~Lock() { _m->unlock(); }
private:
Mutex *const _m;
};
private:
ZT_ALWAYS_INLINE Mutex(const Mutex &) {}
ZT_ALWAYS_INLINE const Mutex &operator=(const Mutex &) { return *this; }
ZT_ALWAYS_INLINE Mutex(const Mutex &) noexcept {}
ZT_ALWAYS_INLINE const Mutex &operator=(const Mutex &) noexcept { return *this; }
pthread_mutex_t _mh;
};
@ -54,47 +54,69 @@ private:
class RWMutex
{
public:
ZT_ALWAYS_INLINE RWMutex() { pthread_rwlock_init(&_mh,0); }
ZT_ALWAYS_INLINE ~RWMutex() { pthread_rwlock_destroy(&_mh); }
ZT_ALWAYS_INLINE RWMutex() noexcept { pthread_rwlock_init(&_mh,0); }
ZT_ALWAYS_INLINE ~RWMutex() noexcept { pthread_rwlock_destroy(&_mh); }
ZT_ALWAYS_INLINE void lock() const { pthread_rwlock_wrlock(&((const_cast <RWMutex *> (this))->_mh)); }
ZT_ALWAYS_INLINE void rlock() const { pthread_rwlock_rdlock(&((const_cast <RWMutex *> (this))->_mh)); }
ZT_ALWAYS_INLINE void unlock() const { pthread_rwlock_unlock(&((const_cast <RWMutex *> (this))->_mh)); }
ZT_ALWAYS_INLINE void runlock() const { pthread_rwlock_unlock(&((const_cast <RWMutex *> (this))->_mh)); }
ZT_ALWAYS_INLINE void lock() const noexcept { pthread_rwlock_wrlock(&((const_cast <RWMutex *> (this))->_mh)); }
ZT_ALWAYS_INLINE void rlock() const noexcept { pthread_rwlock_rdlock(&((const_cast <RWMutex *> (this))->_mh)); }
ZT_ALWAYS_INLINE void unlock() const noexcept { pthread_rwlock_unlock(&((const_cast <RWMutex *> (this))->_mh)); }
ZT_ALWAYS_INLINE void runlock() const noexcept { pthread_rwlock_unlock(&((const_cast <RWMutex *> (this))->_mh)); }
/**
* RAAI locker that acquires only the read lock (shared read)
*/
class RLock
{
public:
ZT_ALWAYS_INLINE RLock(RWMutex &m) : _m(&m) { m.rlock(); }
ZT_ALWAYS_INLINE RLock(const RWMutex &m) : _m(const_cast<RWMutex *>(&m)) { _m->rlock(); }
ZT_ALWAYS_INLINE RLock(RWMutex &m) noexcept : _m(&m) { m.rlock(); }
ZT_ALWAYS_INLINE RLock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)) { _m->rlock(); }
ZT_ALWAYS_INLINE ~RLock() { _m->runlock(); }
private:
RWMutex *const _m;
};
/**
* RAAI locker that acquires the write lock (exclusive write, no readers)
*/
class Lock
{
public:
ZT_ALWAYS_INLINE Lock(RWMutex &m) : _m(&m) { m.lock(); }
ZT_ALWAYS_INLINE Lock(const RWMutex &m) : _m(const_cast<RWMutex *>(&m)) { _m->lock(); }
ZT_ALWAYS_INLINE Lock(RWMutex &m) noexcept : _m(&m) { m.lock(); }
ZT_ALWAYS_INLINE Lock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)) { _m->lock(); }
ZT_ALWAYS_INLINE ~Lock() { _m->unlock(); }
private:
RWMutex *const _m;
};
/**
* RAAI locker that acquires the read lock first and can switch modes
*
* Use writing() to acquire the write lock if not already acquired. Use reading() to
* let go of the write lock and go back to only holding the read lock.
*/
class RMaybeWLock
{
public:
ZT_ALWAYS_INLINE RMaybeWLock(RWMutex &m) noexcept : _m(&m),_w(false) { m.rlock(); }
ZT_ALWAYS_INLINE RMaybeWLock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)),_w(false) { _m->rlock(); }
ZT_ALWAYS_INLINE void writing() noexcept { if (!_w) { _w = true; _m->runlock(); _m->lock(); } }
ZT_ALWAYS_INLINE void reading() noexcept { if (_w) { _w = false; _m->unlock(); _m->rlock(); } }
ZT_ALWAYS_INLINE ~RMaybeWLock() { if (_w) _m->unlock(); else _m->runlock(); }
private:
ZT_ALWAYS_INLINE RWMutex(const RWMutex &) {}
ZT_ALWAYS_INLINE const RWMutex &operator=(const RWMutex &) { return *this; }
RWMutex *const _m;
bool _w;
};
private:
ZT_ALWAYS_INLINE RWMutex(const RWMutex &) noexcept {}
ZT_ALWAYS_INLINE const RWMutex &operator=(const RWMutex &) noexcept { return *this; }
pthread_rwlock_t _mh;
};
} // namespace ZeroTier
#endif
#ifdef __WINDOWS__
#if 0
#include <Windows.h>
namespace ZeroTier {
@ -127,7 +149,6 @@ private:
};
} // namespace ZeroTier
#endif // _WIN32
#endif
#endif

View file

@ -34,7 +34,7 @@ namespace ZeroTier {
namespace {
// Returns true if packet appears valid; pos and proto will be set
bool _ipv6GetPayload(const uint8_t *frameData,unsigned int frameLen,unsigned int &pos,unsigned int &proto)
bool _ipv6GetPayload(const uint8_t *frameData,unsigned int frameLen,unsigned int &pos,unsigned int &proto) noexcept
{
if (frameLen < 40)
return false;
@ -90,7 +90,7 @@ _doZtFilterResult _doZtFilter(
Address &cc, // MUTABLE -- set to TEE destination if TEE action is taken or left alone otherwise
unsigned int &ccLength, // MUTABLE -- set to length of packet payload to TEE
bool &ccWatch, // MUTABLE -- set to true for WATCH target as opposed to normal TEE
uint8_t &qosBucket) // MUTABLE -- set to the value of the argument provided to PRIORITY
uint8_t &qosBucket) noexcept // MUTABLE -- set to the value of the argument provided to PRIORITY
{
// Set to true if we are a TEE/REDIRECT/WATCH target
bool superAccept = false;

View file

@ -26,7 +26,6 @@
#include "Address.hpp"
#include "Mutex.hpp"
#include "SharedPtr.hpp"
#include "AtomicCounter.hpp"
#include "MulticastGroup.hpp"
#include "MAC.hpp"
#include "Buf.hpp"
@ -58,7 +57,7 @@ public:
/**
* Compute primary controller device ID from network ID
*/
static ZT_ALWAYS_INLINE Address controllerFor(uint64_t nwid) { return Address(nwid >> 24U); }
static ZT_ALWAYS_INLINE Address controllerFor(uint64_t nwid) noexcept { return Address(nwid >> 24U); }
/**
* Construct a new network
@ -76,14 +75,14 @@ public:
~Network();
ZT_ALWAYS_INLINE uint64_t id() const { return _id; }
ZT_ALWAYS_INLINE Address controller() const { return Address(_id >> 24U); }
ZT_ALWAYS_INLINE bool multicastEnabled() const { return (_config.multicastLimit > 0); }
ZT_ALWAYS_INLINE bool hasConfig() const { return (_config); }
ZT_ALWAYS_INLINE uint64_t lastConfigUpdate() const { return _lastConfigUpdate; }
ZT_ALWAYS_INLINE ZT_VirtualNetworkStatus status() const { return _status(); }
ZT_ALWAYS_INLINE const NetworkConfig &config() const { return _config; }
ZT_ALWAYS_INLINE const MAC &mac() const { return _mac; }
ZT_ALWAYS_INLINE uint64_t id() const noexcept { return _id; }
ZT_ALWAYS_INLINE Address controller() const noexcept { return Address(_id >> 24U); }
ZT_ALWAYS_INLINE bool multicastEnabled() const noexcept { return (_config.multicastLimit > 0); }
ZT_ALWAYS_INLINE bool hasConfig() const noexcept { return (_config); }
ZT_ALWAYS_INLINE uint64_t lastConfigUpdate() const noexcept { return _lastConfigUpdate; }
ZT_ALWAYS_INLINE ZT_VirtualNetworkStatus status() const noexcept { return _status(); }
ZT_ALWAYS_INLINE const NetworkConfig &config() const noexcept { return _config; }
ZT_ALWAYS_INLINE const MAC &mac() const noexcept { return _mac; }
/**
* Apply filters to an outgoing packet
@ -215,12 +214,12 @@ public:
/**
* Set netconf failure to 'access denied' -- called in IncomingPacket when controller reports this
*/
ZT_ALWAYS_INLINE void setAccessDenied() { _netconfFailure = NETCONF_FAILURE_ACCESS_DENIED; }
ZT_ALWAYS_INLINE void setAccessDenied() noexcept { _netconfFailure = NETCONF_FAILURE_ACCESS_DENIED; }
/**
* Set netconf failure to 'not found' -- called by IncomingPacket when controller reports this
*/
ZT_ALWAYS_INLINE void setNotFound() { _netconfFailure = NETCONF_FAILURE_NOT_FOUND; }
ZT_ALWAYS_INLINE void setNotFound() noexcept { _netconfFailure = NETCONF_FAILURE_NOT_FOUND; }
/**
* Determine whether this peer is permitted to communicate on this network
@ -355,7 +354,7 @@ public:
/**
* @return Externally usable pointer-to-pointer exported via the core API
*/
ZT_ALWAYS_INLINE void **userPtr() { return &_uPtr; }
ZT_ALWAYS_INLINE void **userPtr() noexcept { return &_uPtr; }
private:
void _requestConfiguration(void *tPtr);
@ -403,7 +402,7 @@ private:
Mutex _config_l;
Mutex _memberships_l;
AtomicCounter<int> __refCount;
std::atomic<int> __refCount;
};
} // namespace ZeroTier

View file

@ -32,6 +32,7 @@ bool NetworkConfig::toDictionary(Dictionary &d,bool includeLegacy) const
d.add(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA,this->credentialTimeMaxDelta);
d.add(ZT_NETWORKCONFIG_DICT_KEY_REVISION,this->revision);
d.add(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO,this->issuedTo.toString((char *)tmp));
d.add(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH,this->issuedToIdentityHash,ZT_IDENTITY_HASH_SIZE);
d.add(ZT_NETWORKCONFIG_DICT_KEY_FLAGS,this->flags);
d.add(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT,(uint64_t)this->multicastLimit);
d.add(ZT_NETWORKCONFIG_DICT_KEY_TYPE,(uint16_t)this->type);
@ -119,6 +120,12 @@ bool NetworkConfig::fromDictionary(const Dictionary &d)
this->credentialTimeMaxDelta = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA,0);
this->revision = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_REVISION,0);
this->issuedTo = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO,0);
const std::vector<uint8_t> *blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH]);
if (blob->size() == ZT_IDENTITY_HASH_SIZE) {
memcpy(this->issuedToIdentityHash,blob->data(),ZT_IDENTITY_HASH_SIZE);
} else {
memset(this->issuedToIdentityHash,0,ZT_IDENTITY_HASH_SIZE);
}
if (!this->issuedTo)
return false;
this->multicastLimit = (unsigned int)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT,0);
@ -135,7 +142,7 @@ bool NetworkConfig::fromDictionary(const Dictionary &d)
this->flags = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_FLAGS,0);
this->type = (ZT_VirtualNetworkType)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_TYPE,(uint64_t)ZT_NETWORK_TYPE_PRIVATE);
const std::vector<uint8_t> *blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_COM]);
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_COM]);
if (!blob->empty()) {
if (this->com.unmarshal(blob->data(),(int)(blob->size()) < 0))
return false;
@ -248,7 +255,7 @@ bool NetworkConfig::fromDictionary(const Dictionary &d)
return false;
}
bool NetworkConfig::addSpecialist(const Address &a,const uint64_t f)
bool NetworkConfig::addSpecialist(const Address &a,const uint64_t f) noexcept
{
const uint64_t aint = a.toInt();
for(unsigned int i=0;i<specialistCount;++i) {

View file

@ -124,6 +124,8 @@ namespace ZeroTier {
#define ZT_NETWORKCONFIG_DICT_KEY_REVISION "r"
// address of member
#define ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO "id"
// full identity hash of member
#define ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH "IDH"
// flags(hex)
#define ZT_NETWORKCONFIG_DICT_KEY_FLAGS "f"
// integer(hex)
@ -161,7 +163,7 @@ namespace ZeroTier {
*/
struct NetworkConfig : TriviallyCopyable
{
ZT_ALWAYS_INLINE NetworkConfig() { memoryZero(this); }
ZT_ALWAYS_INLINE NetworkConfig() noexcept { memoryZero(this); }
/**
* Write this network config to a dictionary for transport
@ -183,28 +185,28 @@ struct NetworkConfig : TriviallyCopyable
/**
* @return True if broadcast (ff:ff:ff:ff:ff:ff) address should work on this network
*/
ZT_ALWAYS_INLINE bool enableBroadcast() const { return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_BROADCAST) != 0); }
ZT_ALWAYS_INLINE bool enableBroadcast() const noexcept { return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_BROADCAST) != 0); }
/**
* @return True if IPv6 NDP emulation should be allowed for certain "magic" IPv6 address patterns
*/
ZT_ALWAYS_INLINE bool ndpEmulation() const { return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_IPV6_NDP_EMULATION) != 0); }
ZT_ALWAYS_INLINE bool ndpEmulation() const noexcept { return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_IPV6_NDP_EMULATION) != 0); }
/**
* @return Network type is public (no access control)
*/
ZT_ALWAYS_INLINE bool isPublic() const { return (this->type == ZT_NETWORK_TYPE_PUBLIC); }
ZT_ALWAYS_INLINE bool isPublic() const noexcept { return (this->type == ZT_NETWORK_TYPE_PUBLIC); }
/**
* @return Network type is private (certificate access control)
*/
ZT_ALWAYS_INLINE bool isPrivate() const { return (this->type == ZT_NETWORK_TYPE_PRIVATE); }
ZT_ALWAYS_INLINE bool isPrivate() const noexcept { return (this->type == ZT_NETWORK_TYPE_PRIVATE); }
/**
* @param fromPeer Peer attempting to bridge other Ethernet peers onto network
* @return True if this network allows bridging
*/
ZT_ALWAYS_INLINE bool permitsBridging(const Address &fromPeer) const
ZT_ALWAYS_INLINE bool permitsBridging(const Address &fromPeer) const noexcept
{
for(unsigned int i=0;i<specialistCount;++i) {
if ((fromPeer == specialists[i])&&((specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE) != 0))
@ -213,9 +215,9 @@ struct NetworkConfig : TriviallyCopyable
return false;
}
ZT_ALWAYS_INLINE operator bool() const { return (networkId != 0); }
ZT_ALWAYS_INLINE bool operator==(const NetworkConfig &nc) const { return (memcmp(this,&nc,sizeof(NetworkConfig)) == 0); }
ZT_ALWAYS_INLINE bool operator!=(const NetworkConfig &nc) const { return (!(*this == nc)); }
ZT_ALWAYS_INLINE operator bool() const noexcept { return (networkId != 0); }
ZT_ALWAYS_INLINE bool operator==(const NetworkConfig &nc) const noexcept { return (memcmp(this,&nc,sizeof(NetworkConfig)) == 0); }
ZT_ALWAYS_INLINE bool operator!=(const NetworkConfig &nc) const noexcept { return (!(*this == nc)); }
/**
* Add a specialist or mask flags if already present
@ -227,7 +229,7 @@ struct NetworkConfig : TriviallyCopyable
* @param f Flags (OR of specialist role/type flags)
* @return True if successfully masked or added
*/
bool addSpecialist(const Address &a,uint64_t f);
bool addSpecialist(const Address &a,uint64_t f) noexcept;
ZT_ALWAYS_INLINE const Capability *capability(const uint32_t id) const
{

View file

@ -19,7 +19,6 @@
#include "SharedPtr.hpp"
#include "Node.hpp"
#include "NetworkController.hpp"
#include "Switch.hpp"
#include "Topology.hpp"
#include "Address.hpp"
#include "Identity.hpp"
@ -44,6 +43,7 @@ Node::Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64
_lastHousekeepingRun(0),
_lastNetworkHousekeepingRun(0),
_lastPathKeepaliveCheck(0),
_natMustDie(true),
_online(false)
{
_networks.resize(64); // _networksMask + 1, must be power of two
@ -78,6 +78,7 @@ Node::Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64
stateObjectPut(tPtr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp,RR->publicIdentityStr,(unsigned int)strlen(RR->publicIdentityStr));
}
#if 0
char *m = nullptr;
try {
m = reinterpret_cast<char *>(malloc(16 + sizeof(Trace) + sizeof(Switch) + sizeof(Topology) + sizeof(SelfAwareness)));
@ -100,27 +101,34 @@ Node::Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64
if (m) ::free(m);
throw;
}
#endif
postEvent(tPtr, ZT_EVENT_UP);
}
Node::~Node()
{
// Let go of all networks to leave them. Do it this way in case Network wants to
// do anything in its destructor that locks the _networks lock to avoid a deadlock.
std::vector< SharedPtr<Network> > networks;
{
RWMutex::Lock _l(_networks_m);
for(std::vector< SharedPtr<Network> >::iterator i(_networks.begin());i!=_networks.end();++i)
i->zero();
networks.swap(_networks);
}
networks.clear();
_networks_m.lock();
_networks_m.unlock();
if (RR->sa) RR->sa->~SelfAwareness();
if (RR->topology) RR->topology->~Topology();
if (RR->sw) RR->sw->~Switch();
if (RR->t) RR->t->~Trace();
free(RR->rtmem);
// Let go of cached Buf objects. This does no harm if other nodes are running
// but usually that won't be the case.
freeBufPool();
// Let go of cached Buf objects. If other nodes happen to be running in this
// same process space new Bufs will be allocated as needed, but this is almost
// never the case. Calling this here saves RAM if we are running inside something
// that wants to keep running after tearing down its ZeroTier core instance.
Buf::freePool();
}
void Node::shutdown(void *tPtr)
@ -138,7 +146,7 @@ ZT_ResultCode Node::processWirePacket(
volatile int64_t *nextBackgroundTaskDeadline)
{
_now = now;
RR->sw->onRemotePacket(tptr,localSocket,*(reinterpret_cast<const InetAddress *>(remoteAddress)),packetData,packetLength);
//RR->sw->onRemotePacket(tptr,localSocket,*(reinterpret_cast<const InetAddress *>(remoteAddress)),packetData,packetLength);
return ZT_RESULT_OK;
}
@ -157,7 +165,7 @@ ZT_ResultCode Node::processVirtualNetworkFrame(
_now = now;
SharedPtr<Network> nw(this->network(nwid));
if (nw) {
RR->sw->onLocalEthernet(tptr,nw,MAC(sourceMac),MAC(destMac),etherType,vlanId,frameData,frameLength);
//RR->sw->onLocalEthernet(tptr,nw,MAC(sourceMac),MAC(destMac),etherType,vlanId,frameData,frameLength);
return ZT_RESULT_OK;
} else {
return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
@ -184,7 +192,7 @@ struct _processBackgroundTasks_ping_eachPeer
}
};
static uint8_t junk = 0; // junk payload for keepalive packets
static uint8_t keepAlivePayload = 0; // junk payload for keepalive packets
struct _processBackgroundTasks_path_keepalive
{
int64_t now;
@ -193,9 +201,8 @@ struct _processBackgroundTasks_path_keepalive
ZT_ALWAYS_INLINE void operator()(const SharedPtr<Path> &path)
{
if ((now - path->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
++junk;
path->send(RR,tPtr,&junk,sizeof(junk),now);
path->sent(now);
++keepAlivePayload;
path->send(RR,tPtr,&keepAlivePayload,1,now);
}
}
};
@ -227,8 +234,9 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
// This will give us updated locators for these roots which may contain new
// IP addresses. It will also auto-discover IPs for roots that were not added
// with an initial bootstrap address.
for (std::vector<Address>::const_iterator r(pf.rootsNotOnline.begin()); r != pf.rootsNotOnline.end(); ++r)
RR->sw->requestWhois(tPtr,now,*r);
// TODO
//for (std::vector<Address>::const_iterator r(pf.rootsNotOnline.begin()); r != pf.rootsNotOnline.end(); ++r)
// RR->sw->requestWhois(tPtr,now,*r);
}
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
@ -281,8 +289,30 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
RR->topology->eachPath<_processBackgroundTasks_path_keepalive &>(pf);
}
int64_t earliestAlarmAt = 0x7fffffffffffffffLL;
std::vector<Address> bzzt;
{
RWMutex::RMaybeWLock l(_peerAlarms_l);
for(std::map<Address,int64_t>::iterator a(_peerAlarms.begin());a!=_peerAlarms.end();) {
if (now >= a->second) {
bzzt.push_back(a->first);
l.write(); // acquire write lock if not already in write mode
_peerAlarms.erase(a++);
} else {
if (a->second < earliestAlarmAt)
earliestAlarmAt = a->second;
++a;
}
}
}
for(std::vector<Address>::iterator a(bzzt.begin());a!=bzzt.end();++a) {
const SharedPtr<Peer> p(RR->topology->peer(tPtr,*a,false));
if (p)
p->alarm(tPtr,now);
}
try {
*nextBackgroundTaskDeadline = now + (int64_t)std::max(std::min((unsigned long)ZT_MAX_TIMER_TASK_INTERVAL,RR->sw->doTimerTasks(tPtr, now)), (unsigned long)ZT_MIN_TIMER_TASK_INTERVAL);
*nextBackgroundTaskDeadline = std::min(earliestAlarmAt,now + ZT_MAX_TIMER_TASK_INTERVAL);
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}

View file

@ -14,12 +14,6 @@
#ifndef ZT_NODE_HPP
#define ZT_NODE_HPP
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
#include "Constants.hpp"
#include "RuntimeEnvironment.hpp"
#include "InetAddress.hpp"
@ -31,6 +25,12 @@
#include "NetworkController.hpp"
#include "Hashtable.hpp"
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
#include <map>
// Bit mask for "expecting reply" hash
#define ZT_EXPECTING_REPLIES_BUCKET_MASK1 255
#define ZT_EXPECTING_REPLIES_BUCKET_MASK2 31
@ -327,6 +327,11 @@ public:
return false;
}
/**
* @return True if aggressive NAT-traversal mechanisms like scanning of <1024 ports are enabled
*/
ZT_ALWAYS_INLINE bool natMustDie() const { return _natMustDie; }
/**
* Check whether we should do potentially expensive identity verification (rate limit)
*
@ -344,6 +349,20 @@ public:
return false;
}
/**
* Wake any peers with the given address by calling their alarm() methods at or after the specified time
*
* @param peerAddress Peer address
* @param triggerTime Time alarm should go off
*/
ZT_ALWAYS_INLINE void setPeerAlarm(const Address &peerAddress,const int64_t triggerTime)
{
RWMutex::Lock l(_peerAlarms_l);
int64_t &t = _peerAlarms[peerAddress];
if ((t <= 0)||(t > triggerTime))
t = triggerTime;
}
/**
* Check whether a local controller has authorized a member on a network
*
@ -370,18 +389,27 @@ private:
ZT_Node_Callbacks _cb;
void *_uPtr; // _uptr (lower case) is reserved in Visual Studio :P
// For tracking packet IDs to filter out OK/ERROR replies to packets we did not send
// For tracking packet IDs to filter out OK/ERROR replies to packets we did not send.
volatile uint8_t _expectingRepliesToBucketPtr[ZT_EXPECTING_REPLIES_BUCKET_MASK1 + 1];
volatile uint32_t _expectingRepliesTo[ZT_EXPECTING_REPLIES_BUCKET_MASK1 + 1][ZT_EXPECTING_REPLIES_BUCKET_MASK2 + 1];
// Time of last identity verification indexed by InetAddress.rateGateHash() -- used in IncomingPacket::_doHELLO() via rateGateIdentityVerification()
volatile int64_t _lastIdentityVerification[16384];
/* Map that remembers if we have recently sent a network config to someone
* querying us as a controller. This is an optimization to allow network
* controllers to know whether to treat things like multicast queries the
* way authorized members would be treated without requiring an extra cert
* validation. */
// Addresses of peers that want to have their alarm() function called at some point in the future.
// These behave like weak references in that the node looks them up in Topology and calls alarm()
// in each peer if that peer object is still held in memory. Calling alarm() unnecessarily on a peer
// is harmless. This just exists as an optimization to prevent having to iterate through all peers
// on every processBackgroundTasks call. A simple map<> is used here because there are usually only
// a few of these, if any, and it's slightly faster and lower memory in that case than a Hashtable.
std::map<Address,int64_t> _peerAlarms;
RWMutex _peerAlarms_l;
// Map that remembers if we have recently sent a network config to someone
// querying us as a controller. This is an optimization to allow network
// controllers to know whether to treat things like multicast queries the
// way authorized members would be treated without requiring an extra cert
// validation.
struct _LocalControllerAuth
{
uint64_t nwid,address;
@ -391,17 +419,20 @@ private:
ZT_ALWAYS_INLINE bool operator!=(const _LocalControllerAuth &a) const { return ((a.nwid != nwid)||(a.address != address)); }
};
Hashtable< _LocalControllerAuth,int64_t > _localControllerAuthorizations;
Mutex _localControllerAuthorizations_m;
// Networks are stored in a flat hash table that is resized on any network ID collision. This makes
// network lookup by network ID a few bitwise ops and an array index.
std::vector< SharedPtr<Network> > _networks;
uint64_t _networksMask;
std::vector< ZT_InterfaceAddress > _localInterfaceAddresses;
Mutex _localControllerAuthorizations_m;
RWMutex _networks_m;
// These are local interface addresses that have been configured via the API
// and can be pushed to other nodes.
std::vector< ZT_InterfaceAddress > _localInterfaceAddresses;
Mutex _localInterfaceAddresses_m;
// This is locked while running processBackgroundTasks to ensure that calls to it are not concurrent.
Mutex _backgroundTasksLock;
volatile int64_t _now;
@ -409,6 +440,7 @@ private:
volatile int64_t _lastHousekeepingRun;
volatile int64_t _lastNetworkHousekeepingRun;
volatile int64_t _lastPathKeepaliveCheck;
volatile bool _natMustDie;
volatile bool _online;
};

View file

@ -138,13 +138,16 @@
#endif
#if __cplusplus > 199711L
#include <atomic>
#ifndef __CPP11__
#define __CPP11__
#endif
#endif
#ifndef __CPP11__
/* TODO: will need some kind of very basic atomic<> implemenation if we want to compile on pre-c++11 compilers */
#define nullptr (0)
#define constexpr ZT_ALWAYS_INLINE
#define noexcept throw()
#endif
#ifdef SOCKET

View file

@ -17,7 +17,7 @@
namespace ZeroTier {
bool Path::send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now)
bool Path::send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now) noexcept
{
if (RR->node->putPacket(tPtr,_localSocket,_addr,data,len)) {
_lastOut = now;
@ -26,7 +26,7 @@ bool Path::send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigne
return false;
}
bool Path::isAddressValidForPath(const InetAddress &a)
bool Path::isAddressValidForPath(const InetAddress &a) noexcept
{
if ((a.ss_family == AF_INET)||(a.ss_family == AF_INET6)) {
switch(a.ipScope()) {

View file

@ -24,7 +24,6 @@
#include "Constants.hpp"
#include "InetAddress.hpp"
#include "SharedPtr.hpp"
#include "AtomicCounter.hpp"
#include "Utils.hpp"
#include "Mutex.hpp"
@ -47,7 +46,7 @@ class Path
friend class Defragmenter;
public:
ZT_ALWAYS_INLINE Path(const int64_t l,const InetAddress &r) :
ZT_ALWAYS_INLINE Path(const int64_t l,const InetAddress &r) noexcept :
_localSocket(l),
_lastIn(0),
_lastOut(0),
@ -65,55 +64,55 @@ public:
* @param now Current time
* @return True if transport reported success
*/
bool send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now);
bool send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now) noexcept;
/**
* Explicitly update last sent time
*
* @param t Time of send
*/
ZT_ALWAYS_INLINE void sent(const int64_t t) { _lastOut = t; }
ZT_ALWAYS_INLINE void sent(const int64_t t) noexcept { _lastOut = t; }
/**
* Called when a packet is received from this remote path, regardless of content
*
* @param t Time of receive
*/
ZT_ALWAYS_INLINE void received(const int64_t t) { _lastIn = t; }
ZT_ALWAYS_INLINE void received(const int64_t t) noexcept { _lastIn = t; }
/**
* Check path aliveness
*
* @param now Current time
*/
ZT_ALWAYS_INLINE bool alive(const int64_t now) const { return ((now - _lastIn) < ZT_PATH_ALIVE_TIMEOUT); }
ZT_ALWAYS_INLINE bool alive(const int64_t now) const noexcept { return ((now - _lastIn) < ZT_PATH_ALIVE_TIMEOUT); }
/**
* Check if path is considered active
*
* @param now Current time
*/
ZT_ALWAYS_INLINE bool active(const int64_t now) const { return ((now - _lastIn) < ZT_PATH_ACTIVITY_TIMEOUT); }
ZT_ALWAYS_INLINE bool active(const int64_t now) const noexcept { return ((now - _lastIn) < ZT_PATH_ACTIVITY_TIMEOUT); }
/**
* @return Physical address
*/
ZT_ALWAYS_INLINE const InetAddress &address() const { return _addr; }
ZT_ALWAYS_INLINE const InetAddress &address() const noexcept { return _addr; }
/**
* @return Local socket as specified by external code
*/
ZT_ALWAYS_INLINE int64_t localSocket() const { return _localSocket; }
ZT_ALWAYS_INLINE int64_t localSocket() const noexcept { return _localSocket; }
/**
* @return Last time we received anything
*/
ZT_ALWAYS_INLINE int64_t lastIn() const { return _lastIn; }
ZT_ALWAYS_INLINE int64_t lastIn() const noexcept { return _lastIn; }
/**
* @return Last time we sent something
*/
ZT_ALWAYS_INLINE int64_t lastOut() const { return _lastOut; }
ZT_ALWAYS_INLINE int64_t lastOut() const noexcept { return _lastOut; }
/**
* Check whether this address is valid for a ZeroTier path
@ -124,7 +123,7 @@ public:
* @param a Address to check
* @return True if address is good for ZeroTier path use
*/
static bool isAddressValidForPath(const InetAddress &a);
static bool isAddressValidForPath(const InetAddress &a) noexcept;
private:
int64_t _localSocket;
@ -138,7 +137,7 @@ private:
std::set<uint64_t> _inboundFragmentedMessages;
Mutex _inboundFragmentedMessages_l;
AtomicCounter<int> __refCount;
std::atomic<int> __refCount;
};
} // namespace ZeroTier

View file

@ -40,9 +40,11 @@ Peer::Peer(const RuntimeEnvironment *renv) :
_lastWhoisRequestReceived(0),
_lastEchoRequestReceived(0),
_lastPushDirectPathsReceived(0),
_lastProbeReceived(0),
_lastAttemptedP2PInit(0),
_lastTriedStaticPath(0),
_lastPrioritizedPaths(0),
_lastAttemptedAggressiveNATTraversal(0),
_latency(0xffff),
_alivePathCount(0),
_vProto(0),
@ -52,12 +54,16 @@ Peer::Peer(const RuntimeEnvironment *renv) :
{
}
bool Peer::init(const Identity &myIdentity,const Identity &peerIdentity)
bool Peer::init(const Identity &peerIdentity)
{
RWMutex::Lock l(_lock);
if (_id == peerIdentity)
return true;
_id = peerIdentity;
return myIdentity.agree(peerIdentity,_key);
if (!RR->identity.agree(peerIdentity,_key))
return false;
_incomingProbe = Protocol::createProbe(_id,RR->identity,_key);
return true;
}
void Peer::received(
@ -66,10 +72,7 @@ void Peer::received(
const unsigned int hops,
const uint64_t packetId,
const unsigned int payloadLength,
const Protocol::Verb verb,
const uint64_t inRePacketId,
const Protocol::Verb inReVerb,
const uint64_t networkId)
const Protocol::Verb verb)
{
const int64_t now = RR->node->now();
_lastReceive = now;
@ -204,25 +207,6 @@ path_check_done:
}
}
bool Peer::shouldTryPath(void *tPtr,int64_t now,const SharedPtr<Peer> &suggestedBy,const InetAddress &addr) const
{
int maxHaveScope = -1;
{
RWMutex::RLock l(_lock);
for (unsigned int i = 0; i < _alivePathCount; ++i) {
if (_paths[i]) {
if (_paths[i]->address().ipsEqual2(addr))
return false;
int s = (int)_paths[i]->address().ipScope();
if (s > maxHaveScope)
maxHaveScope = s;
}
}
}
return ( ((int)addr.ipScope() > maxHaveScope) && RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,addr) );
}
void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
{
#if 0
@ -247,6 +231,19 @@ void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atA
#endif
}
void Peer::sendNOP(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
{
Buf outp;
Protocol::Header &ph = outp.as<Protocol::Header>();
ph.packetId = Protocol::getPacketId();
_id.address().copyTo(ph.destination);
RR->identity.address().copyTo(ph.source);
ph.flags = 0;
ph.verb = Protocol::VERB_NOP;
Protocol::armor(outp,sizeof(Protocol::Header),_key,ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012);
RR->node->putPacket(tPtr,localSocket,atAddress,outp.b,sizeof(Protocol::Header));
}
void Peer::ping(void *tPtr,int64_t now,const bool pingAllAddressTypes)
{
RWMutex::RLock l(_lock);
@ -289,7 +286,7 @@ void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddres
}
}
void Peer::updateLatency(const unsigned int l)
void Peer::updateLatency(const unsigned int l) noexcept
{
if ((l > 0)&&(l < 0xffff)) {
unsigned int lat = _latency;
@ -301,31 +298,6 @@ void Peer::updateLatency(const unsigned int l)
}
}
bool Peer::sendDirect(void *tPtr,const void *data,const unsigned int len,const int64_t now)
{
if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
_lastPrioritizedPaths = now;
_lock.lock();
_prioritizePaths(now);
if (_alivePathCount == 0) {
_lock.unlock();
return false;
}
const bool r = _paths[0]->send(RR,tPtr,data,len,now);
_lock.unlock();
return r;
} else {
_lock.rlock();
if (_alivePathCount == 0) {
_lock.runlock();
return false;
}
const bool r = _paths[0]->send(RR,tPtr,data,len,now);
_lock.runlock();
return r;
}
}
SharedPtr<Path> Peer::path(const int64_t now)
{
if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
@ -343,6 +315,19 @@ SharedPtr<Path> Peer::path(const int64_t now)
}
}
bool Peer::direct(const int64_t now)
{
if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
_lastPrioritizedPaths = now;
RWMutex::Lock l(_lock);
_prioritizePaths(now);
return (_alivePathCount > 0);
} else {
RWMutex::RLock l(_lock);
return (_alivePathCount > 0);
}
}
void Peer::getAllPaths(std::vector< SharedPtr<Path> > &paths)
{
RWMutex::RLock l(_lock);
@ -369,7 +354,136 @@ void Peer::save(void *tPtr) const
free(buf);
}
int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const
void Peer::contact(void *tPtr,const Endpoint &ep,const int64_t now,bool behindSymmetric,bool bfg1024)
{
static uint8_t junk = 0;
InetAddress phyAddr(ep.inetAddr());
if (phyAddr) { // only this endpoint type is currently implemented
if (!RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,phyAddr))
return;
// Sending a packet with a low TTL before the real message assists traversal with some
// stateful firewalls and is harmless otherwise AFAIK.
++junk;
RR->node->putPacket(tPtr,-1,phyAddr,&junk,1,2);
// In a few hundred milliseconds we'll send the real packet.
{
RWMutex::Lock l(_lock);
_contactQueue.push_back(_ContactQueueItem(phyAddr,ZT_MAX_PEER_NETWORK_PATHS));
}
// If the peer indicates that they may be behind a symmetric NAT and there are no
// living direct paths, try a few more aggressive things.
if ((behindSymmetric) && (phyAddr.ss_family == AF_INET) && (!direct(now))) {
unsigned int port = phyAddr.port();
if ((bfg1024)&&(port < 1024)&&(RR->node->natMustDie())) {
// If the other side is using a low-numbered port and has elected to
// have this done, we can try scanning every port below 1024. The search
// space here is small enough that we have a very good chance of punching.
// Generate a random order list of all <1024 ports except 0 and the original sending port.
uint16_t ports[1022];
uint16_t ctr = 1;
for (int i=0;i<1022;++i) {
if (ctr == port) ++ctr;
ports[i] = ctr++;
}
for (int i=0;i<512;++i) {
uint64_t rn = Utils::random();
unsigned int a = ((unsigned int)rn) % 1022;
unsigned int b = ((unsigned int)(rn >> 24U)) % 1022;
if (a != b) {
uint16_t tmp = ports[a];
ports[a] = ports[b];
ports[b] = tmp;
}
}
// Chunk ports into chunks of 128 to try in few hundred millisecond intervals,
// abandoning attempts once there is at least one direct path.
{
RWMutex::Lock l(_lock);
for (int i=0;i<896;i+=128)
_contactQueue.push_back(_ContactQueueItem(phyAddr,ports + i,ports + i + 128,1));
_contactQueue.push_back(_ContactQueueItem(phyAddr,ports + 896,ports + 1022,1));
}
} else {
// Otherwise use the simpler sequential port attempt method in intervals.
RWMutex::Lock l(_lock);
for (int k=0;k<3;++k) {
if (++port > 65535) break;
InetAddress tryNext(phyAddr);
tryNext.setPort(port);
_contactQueue.push_back(_ContactQueueItem(tryNext,1));
}
}
}
// Start alarms going off to actually send these...
RR->node->setPeerAlarm(_id.address(),now + ZT_NAT_TRAVERSAL_INTERVAL);
}
}
void Peer::alarm(void *tPtr,const int64_t now)
{
// Pop one contact queue item and also clean the queue of any that are no
// longer applicable because the alive path count has exceeded their threshold.
bool stillHaveContactQueueItems;
_ContactQueueItem qi;
{
RWMutex::Lock l(_lock);
if (_contactQueue.empty())
return;
while (_alivePathCount >= _contactQueue.front().alivePathThreshold) {
_contactQueue.pop_front();
if (_contactQueue.empty())
return;
}
_ContactQueueItem &qi2 = _contactQueue.front();
qi.address = qi2.address;
qi.ports.swap(qi2.ports);
qi.alivePathThreshold = qi2.alivePathThreshold;
_contactQueue.pop_front();
for(std::list<_ContactQueueItem>::iterator q(_contactQueue.begin());q!=_contactQueue.end();) {
if (_alivePathCount >= q->alivePathThreshold)
_contactQueue.erase(q++);
else ++q;
}
stillHaveContactQueueItems = !_contactQueue.empty();
}
if (_vProto >= 11) {
uint64_t outgoingProbe = Protocol::createProbe(RR->identity,_id,_key);
if (qi.ports.empty()) {
RR->node->putPacket(tPtr,-1,qi.address,&outgoingProbe,ZT_PROTO_PROBE_LENGTH);
} else {
for (std::vector<uint16_t>::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) {
qi.address.setPort(*p);
RR->node->putPacket(tPtr,-1,qi.address,&outgoingProbe,ZT_PROTO_PROBE_LENGTH);
}
}
} else {
if (qi.ports.empty()) {
this->sendNOP(tPtr,-1,qi.address,now);
} else {
for (std::vector<uint16_t>::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) {
qi.address.setPort(*p);
this->sendNOP(tPtr,-1,qi.address,now);
}
}
}
if (stillHaveContactQueueItems)
RR->node->setPeerAlarm(_id.address(),now + ZT_NAT_TRAVERSAL_INTERVAL);
}
int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
{
RWMutex::RLock l(_lock);
@ -403,7 +517,10 @@ int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const
return p;
}
int Peer::unmarshal(const uint8_t *restrict data,const int len)
int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
{
int p;
{
RWMutex::Lock l(_lock);
@ -413,7 +530,7 @@ int Peer::unmarshal(const uint8_t *restrict data,const int len)
int s = _id.unmarshal(data + 1,len - 1);
if (s <= 0)
return s;
int p = 1 + s;
p = 1 + s;
s = _locator.unmarshal(data + p,len - p);
if (s <= 0)
return s;
@ -436,6 +553,11 @@ int Peer::unmarshal(const uint8_t *restrict data,const int len)
p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)
return -1;
}
if (!RR->identity.agree(_id,_key))
return -1;
_incomingProbe = Protocol::createProbe(_id,RR->identity,_key);
return p;
}

View file

@ -23,7 +23,6 @@
#include "Identity.hpp"
#include "InetAddress.hpp"
#include "SharedPtr.hpp"
#include "AtomicCounter.hpp"
#include "Hashtable.hpp"
#include "Mutex.hpp"
#include "Endpoint.hpp"
@ -31,6 +30,7 @@
#include "Protocol.hpp"
#include <vector>
#include <list>
// version, identity, locator, bootstrap, version info, length of any additional fields
#define ZT_PEER_MARSHAL_SIZE_MAX (1 + ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + ZT_INETADDRESS_MARSHAL_SIZE_MAX + (2*4) + 2)
@ -66,26 +66,25 @@ public:
/**
* Initialize peer with an identity
*
* @param myIdentity This node's identity including secret key
* @param peerIdentity The peer's identity
* @return True if initialization was succcesful
*/
bool init(const Identity &myIdentity,const Identity &peerIdentity);
bool init(const Identity &peerIdentity);
/**
* @return This peer's ZT address (short for identity().address())
*/
ZT_ALWAYS_INLINE const Address &address() const { return _id.address(); }
ZT_ALWAYS_INLINE const Address &address() const noexcept { return _id.address(); }
/**
* @return This peer's identity
*/
ZT_ALWAYS_INLINE const Identity &identity() const { return _id; }
ZT_ALWAYS_INLINE const Identity &identity() const noexcept { return _id; }
/**
* @return Copy of current locator
*/
ZT_ALWAYS_INLINE Locator locator() const
ZT_ALWAYS_INLINE Locator locator() const noexcept
{
RWMutex::RLock l(_lock);
return _locator;
@ -102,9 +101,6 @@ public:
* @param hops ZeroTier (not IP) hops
* @param packetId Packet ID
* @param verb Packet verb
* @param inRePacketId Packet ID in reply to (default: none)
* @param inReVerb Verb in reply to (for OK/ERROR, default: VERB_NOP)
* @param networkId Network ID if this packet is related to a network, 0 otherwise
*/
void received(
void *tPtr,
@ -112,20 +108,7 @@ public:
unsigned int hops,
uint64_t packetId,
unsigned int payloadLength,
Protocol::Verb verb,
uint64_t inRePacketId,
Protocol::Verb inReVerb,
uint64_t networkId);
/**
* Check whether a path to this peer should be tried if received via e.g. RENDEZVOUS OR PUSH_DIRECT_PATHS
*
* @param now Current time
* @param suggestingPeer Peer suggesting path (may be this peer)
* @param addr Remote address
* @return True if we have an active path to this destination
*/
bool shouldTryPath(void *tPtr,int64_t now,const SharedPtr<Peer> &suggestedBy,const InetAddress &addr) const;
Protocol::Verb verb);
/**
* Send a HELLO to this peer at a specified physical address
@ -139,6 +122,16 @@ public:
*/
void sendHELLO(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now);
/**
* Send a NOP message to e.g. probe a new link
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param localSocket Local source socket
* @param atAddress Destination address
* @param now Current time
*/
void sendNOP(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now);
/**
* Send ping to this peer
*
@ -170,19 +163,19 @@ public:
*
* @param l New latency measurment (in milliseconds)
*/
void updateLatency(unsigned int l);
void updateLatency(unsigned int l) noexcept;
/**
* @return Bootstrap address or NULL if none
*/
ZT_ALWAYS_INLINE const Endpoint &bootstrap() const { return _bootstrap; }
ZT_ALWAYS_INLINE const Endpoint &bootstrap() const noexcept { return _bootstrap; }
/**
* Set bootstrap endpoint
*
* @param ep Bootstrap endpoint
*/
ZT_ALWAYS_INLINE void setBootstrap(const Endpoint &ep)
ZT_ALWAYS_INLINE void setBootstrap(const Endpoint &ep) noexcept
{
_lock.lock();
_bootstrap = ep;
@ -192,27 +185,32 @@ public:
/**
* @return Time of last receive of anything, whether direct or relayed
*/
ZT_ALWAYS_INLINE int64_t lastReceive() const { return _lastReceive; }
ZT_ALWAYS_INLINE int64_t lastReceive() const noexcept { return _lastReceive; }
/**
* @return True if we've heard from this peer in less than ZT_PEER_ALIVE_TIMEOUT
*/
ZT_ALWAYS_INLINE bool alive(const int64_t now) const { return ((now - _lastReceive) < ZT_PEER_ALIVE_TIMEOUT); }
ZT_ALWAYS_INLINE bool alive(const int64_t now) const noexcept { return ((now - _lastReceive) < ZT_PEER_ALIVE_TIMEOUT); }
/**
* @return True if we've heard from this peer in less than ZT_PEER_ACTIVITY_TIMEOUT
*/
ZT_ALWAYS_INLINE bool active(const int64_t now) const { return ((now - _lastReceive) < ZT_PEER_ACTIVITY_TIMEOUT); }
ZT_ALWAYS_INLINE bool active(const int64_t now) const noexcept { return ((now - _lastReceive) < ZT_PEER_ACTIVITY_TIMEOUT); }
/**
* @return Latency in milliseconds of best/aggregate path or 0xffff if unknown
*/
ZT_ALWAYS_INLINE unsigned int latency() const { return _latency; }
ZT_ALWAYS_INLINE unsigned int latency() const noexcept { return _latency; }
/**
* @return 256-bit secret symmetric encryption key
*/
ZT_ALWAYS_INLINE const unsigned char *key() const { return _key; }
ZT_ALWAYS_INLINE const unsigned char *key() const noexcept { return _key; }
/**
* @return Incoming probe packet (in big-endian byte order)
0 */
ZT_ALWAYS_INLINE uint64_t incomingProbe() const noexcept { return _incomingProbe; }
/**
* Set the currently known remote version of this peer's client
@ -222,7 +220,7 @@ public:
* @param vmin Minor version
* @param vrev Revision
*/
ZT_ALWAYS_INLINE void setRemoteVersion(unsigned int vproto,unsigned int vmaj,unsigned int vmin,unsigned int vrev)
ZT_ALWAYS_INLINE void setRemoteVersion(unsigned int vproto,unsigned int vmaj,unsigned int vmin,unsigned int vrev) noexcept
{
_vProto = (uint16_t)vproto;
_vMajor = (uint16_t)vmaj;
@ -230,16 +228,16 @@ public:
_vRevision = (uint16_t)vrev;
}
ZT_ALWAYS_INLINE unsigned int remoteVersionProtocol() const { return _vProto; }
ZT_ALWAYS_INLINE unsigned int remoteVersionMajor() const { return _vMajor; }
ZT_ALWAYS_INLINE unsigned int remoteVersionMinor() const { return _vMinor; }
ZT_ALWAYS_INLINE unsigned int remoteVersionRevision() const { return _vRevision; }
ZT_ALWAYS_INLINE bool remoteVersionKnown() const { return ((_vMajor > 0)||(_vMinor > 0)||(_vRevision > 0)); }
ZT_ALWAYS_INLINE unsigned int remoteVersionProtocol() const noexcept { return _vProto; }
ZT_ALWAYS_INLINE unsigned int remoteVersionMajor() const noexcept { return _vMajor; }
ZT_ALWAYS_INLINE unsigned int remoteVersionMinor() const noexcept { return _vMinor; }
ZT_ALWAYS_INLINE unsigned int remoteVersionRevision() const noexcept { return _vRevision; }
ZT_ALWAYS_INLINE bool remoteVersionKnown() const noexcept { return ((_vMajor > 0)||(_vMinor > 0)||(_vRevision > 0)); }
/**
* Rate limit gate for inbound WHOIS requests
*/
ZT_ALWAYS_INLINE bool rateGateInboundWhoisRequest(const int64_t now)
ZT_ALWAYS_INLINE bool rateGateInboundWhoisRequest(const int64_t now) noexcept
{
if ((now - _lastWhoisRequestReceived) >= ZT_PEER_WHOIS_RATE_LIMIT) {
_lastWhoisRequestReceived = now;
@ -251,7 +249,7 @@ public:
/**
* Rate limit gate for inbound PUSH_DIRECT_PATHS requests
*/
ZT_ALWAYS_INLINE bool rateGateInboundPushDirectPaths(const int64_t now)
ZT_ALWAYS_INLINE bool rateGateInboundPushDirectPaths(const int64_t now) noexcept
{
if ((now - _lastPushDirectPathsReceived) >= ZT_DIRECT_PATH_PUSH_INTERVAL) {
_lastPushDirectPathsReceived = now;
@ -260,10 +258,22 @@ public:
return false;
}
/**
* Rate limit attempts in response to incoming short probe packets
*/
ZT_ALWAYS_INLINE bool rateGateInboundProbe(const int64_t now) noexcept
{
if ((now - _lastProbeReceived) >= ZT_DIRECT_PATH_PUSH_INTERVAL) {
_lastProbeReceived = now;
return true;
}
return false;
}
/**
* Rate limit gate for inbound ECHO requests
*/
ZT_ALWAYS_INLINE bool rateGateEchoRequest(const int64_t now)
ZT_ALWAYS_INLINE bool rateGateEchoRequest(const int64_t now) noexcept
{
if ((now - _lastEchoRequestReceived) >= ZT_PEER_GENERAL_RATE_LIMIT) {
_lastEchoRequestReceived = now;
@ -272,22 +282,16 @@ public:
return false;
}
/**
* Send directly if a direct path exists
*
* @param tPtr Thread pointer supplied by user
* @param data Data to send
* @param len Length of data
* @param now Current time
* @return True if packet appears to have been sent, false if no path or send failed
*/
bool sendDirect(void *tPtr,const void *data,unsigned int len,int64_t now);
/**
* @return Current best path
*/
SharedPtr<Path> path(int64_t now);
/**
* @return True if there is at least one alive direct path
*/
bool direct(int64_t now);
/**
* Get all paths
*
@ -300,11 +304,32 @@ public:
*/
void save(void *tPtr) const;
/**
* Attempt to contact this peer at a physical address
*
* This checks rate limits, path usability, sometimes deploys advanced NAT-t techniques, etc.
*
* @param tPtr External user pointer we pass around
* @param ep Endpoint to attempt to contact
* @param now Current time
* @param behindSymmetric This peer may be behind a symmetric NAT (only meaningful for IPv4)
* @param bfg1024 Use BFG1024 brute force symmetric NAT busting algorithm if applicable
*/
void contact(void *tPtr,const Endpoint &ep,int64_t now,bool behindSymmetric,bool bfg1024);
/**
* Called by Node when an alarm set by this peer goes off
*
* @param tPtr External user pointer we pass around
* @param now Current time
*/
void alarm(void *tPtr,int64_t now);
// NOTE: peer marshal/unmarshal only saves/restores the identity, locator, most
// recent bootstrap address, and version information.
static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_PEER_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const;
int unmarshal(const uint8_t *restrict data,int len);
static constexpr int marshalSizeMax() noexcept { return ZT_PEER_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept;
int unmarshal(const uint8_t *restrict data,int len) noexcept;
private:
void _prioritizePaths(int64_t now);
@ -317,19 +342,44 @@ private:
volatile int64_t _lastWhoisRequestReceived;
volatile int64_t _lastEchoRequestReceived;
volatile int64_t _lastPushDirectPathsReceived;
volatile int64_t _lastProbeReceived;
volatile int64_t _lastAttemptedP2PInit;
volatile int64_t _lastTriedStaticPath;
volatile int64_t _lastPrioritizedPaths;
volatile int64_t _lastAttemptedAggressiveNATTraversal;
volatile unsigned int _latency;
AtomicCounter<int> __refCount;
std::atomic<int> __refCount;
RWMutex _lock; // locks _alivePathCount, _paths, _locator, and _bootstrap.
// Lock for non-volatile read/write fields
RWMutex _lock;
// Number of paths current alive as of last _prioritizePaths
unsigned int _alivePathCount;
// Direct paths sorted in descending order of preference (can be NULL, if first is NULL there's no direct path)
SharedPtr<Path> _paths[ZT_MAX_PEER_NETWORK_PATHS];
// Queue of batches of one or more physical addresses to try at some point in the future (for NAT traversal logic)
struct _ContactQueueItem
{
ZT_ALWAYS_INLINE _ContactQueueItem() {}
ZT_ALWAYS_INLINE _ContactQueueItem(const InetAddress &a,const uint16_t *pstart,const uint16_t *pend,const unsigned int apt) :
address(a),
ports(pstart,pend),
alivePathThreshold(apt) {}
ZT_ALWAYS_INLINE _ContactQueueItem(const InetAddress &a,const unsigned int apt) :
address(a),
ports(),
alivePathThreshold(apt) {}
InetAddress address;
std::vector<uint16_t> ports; // if non-empty try these ports, otherwise use the one in address
unsigned int alivePathThreshold; // skip and forget if alive path count is >= this
};
std::list<_ContactQueueItem> _contactQueue;
Identity _id;
uint64_t _incomingProbe;
Locator _locator;
Endpoint _bootstrap; // right now only InetAddress endpoints are supported for bootstrap

View file

@ -467,7 +467,8 @@ poly1305_finish(poly1305_context *ctx, unsigned char mac[16]) {
#endif // MSC/GCC or not
static inline void poly1305_update(poly1305_context *ctx, const unsigned char *m, size_t bytes) {
static ZT_ALWAYS_INLINE void poly1305_update(poly1305_context *ctx, const unsigned char *m, size_t bytes) noexcept
{
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
size_t i;
@ -505,7 +506,7 @@ static inline void poly1305_update(poly1305_context *ctx, const unsigned char *m
} // anonymous namespace
void poly1305(void *auth,const void *data,unsigned int len,const void *key)
void poly1305(void *auth,const void *data,unsigned int len,const void *key) noexcept
{
poly1305_context ctx;
poly1305_init(&ctx,reinterpret_cast<const unsigned char *>(key));

View file

@ -27,7 +27,7 @@ namespace ZeroTier {
* @param len Length of data to authenticate in bytes
* @param key 32-byte one-time use key to authenticate data (must not be reused)
*/
void poly1305(void *auth,const void *data,unsigned int len,const void *key);
void poly1305(void *auth,const void *data,unsigned int len,const void *key) noexcept;
} // namespace ZeroTier

View file

@ -62,7 +62,17 @@ uintptr_t _checkSizes()
// Make compiler compile and "run" _checkSizes()
volatile uintptr_t _checkSizesIMeanIt = _checkSizes();
uint64_t getPacketId()
uint64_t createProbe(const Identity &sender,const Identity &recipient,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH]) noexcept
{
uint8_t tmp[ZT_IDENTITY_HASH_SIZE + ZT_IDENTITY_HASH_SIZE];
memcpy(tmp,sender.hash(),ZT_IDENTITY_HASH_SIZE);
memcpy(tmp + ZT_IDENTITY_HASH_SIZE,recipient.hash(),ZT_IDENTITY_HASH_SIZE);
uint64_t hash[6];
SHA384(hash,tmp,sizeof(tmp),key,ZT_PEER_SECRET_KEY_LENGTH);
return hash[0];
}
uint64_t getPacketId() noexcept
{
#ifdef ZT_PACKET_USE_ATOMIC_INTRINSICS
return __sync_add_and_fetch(&_packetIdCtr,1ULL);
@ -71,7 +81,7 @@ uint64_t getPacketId()
#endif
}
void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],uint8_t cipherSuite)
void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],uint8_t cipherSuite) noexcept
{
Protocol::Header &ph = pkt.as<Protocol::Header>();
ph.flags = (ph.flags & 0xc7U) | ((cipherSuite << 3U) & 0x38U); // flags: FFCCCHHH where CCC is cipher
@ -113,7 +123,7 @@ void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY
}
}
unsigned int compress(SharedPtr<Buf> &pkt,unsigned int packetSize)
unsigned int compress(SharedPtr<Buf> &pkt,unsigned int packetSize) noexcept
{
if (packetSize <= 128)
return packetSize;

View file

@ -20,6 +20,8 @@
#include "Poly1305.hpp"
#include "LZ4.hpp"
#include "Buf.hpp"
#include "Address.hpp"
#include "Identity.hpp"
/**
* Protocol version -- incremented only for major changes
@ -55,6 +57,7 @@
* + AES encryption support
* + NIST P-384 (type 1) identities
* + Ephemeral keys
* + Short probe packets to reduce probe bandwidth
*/
#define ZT_PROTO_VERSION 11
@ -127,11 +130,21 @@
*/
#define ZT_PROTO_PACKET_FRAGMENT_INDICATOR_INDEX 13
/**
* Index of flags field in regular packet headers
*/
#define ZT_PROTO_PACKET_FLAGS_INDEX 18
/**
* Minimum viable length for a fragment
*/
#define ZT_PROTO_MIN_FRAGMENT_LENGTH 16
/**
* Length of a probe
*/
#define ZT_PROTO_PROBE_LENGTH 8
/**
* Index at which packet fragment payload starts
*/
@ -809,6 +822,12 @@ ZT_PACKED_STRUCT(struct EXT_FRAME
uint8_t flags;
});
ZT_PACKED_STRUCT(struct PUSH_DIRECT_PATHS
{
Header h;
uint16_t numPaths;
});
ZT_PACKED_STRUCT(struct MULTICAST_LIKE
{
ZT_PACKED_STRUCT(struct Entry
@ -908,14 +927,37 @@ ZT_PACKED_STRUCT(struct UNSUPPORTED_OPERATION__NETWORK_CONFIG_REQUEST
/****************************************************************************/
/**
* Convenience function to pull packet ID from a raw buffer
*
* @param pkt Packet to read first 8 bytes from
* @param packetSize Packet's actual size in bytes
* @return Packet ID or 0 if packet size is less than 8
*/
ZT_ALWAYS_INLINE uint64_t packetId(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= 8) ? Utils::loadBigEndian<uint64_t>(pkt.b) : 0ULL; }
/**
* @param Packet to extract hops from
* @param packetSize Packet's actual size in bytes
* @return 3-bit hops field embedded in packet flags field
*/
ZT_ALWAYS_INLINE uint8_t packetHops(const Header &h) { return (h.flags & 0x07U); }
ZT_ALWAYS_INLINE uint8_t packetHops(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= ZT_PROTO_PACKET_FLAGS_INDEX) ? (pkt.b[ZT_PROTO_PACKET_FLAGS_INDEX] & 0x07U) : 0; }
/**
* @param Packet to extract cipher ID from
* @param packetSize Packet's actual size in bytes
* @return 3-bit cipher field embedded in packet flags field
*/
ZT_ALWAYS_INLINE uint8_t packetCipher(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= ZT_PROTO_PACKET_FLAGS_INDEX) ? ((pkt.b[ZT_PROTO_PACKET_FLAGS_INDEX] >> 3U) & 0x07U) : 0; }
/**
* @return 3-bit hops field embedded in packet flags field
*/
ZT_ALWAYS_INLINE uint8_t packetHops(const Header &ph) noexcept { return (ph.flags & 0x07U); }
/**
* @return 3-bit cipher field embedded in packet flags field
*/
ZT_ALWAYS_INLINE uint8_t packetCipher(const Header &h) { return ((h.flags >> 3U) & 0x07U); }
ZT_ALWAYS_INLINE uint8_t packetCipher(const Header &ph) noexcept { return ((ph.flags >> 3U) & 0x07U); }
/**
* Deterministically mangle a 256-bit crypto key based on packet characteristics
@ -927,7 +969,7 @@ ZT_ALWAYS_INLINE uint8_t packetCipher(const Header &h) { return ((h.flags >> 3U)
* @param in Input key (32 bytes)
* @param out Output buffer (32 bytes)
*/
ZT_ALWAYS_INLINE void salsa2012DeriveKey(const uint8_t *const in,uint8_t *const out,const Buf &packet,const unsigned int packetSize)
ZT_ALWAYS_INLINE void salsa2012DeriveKey(const uint8_t *const in,uint8_t *const out,const Buf &packet,const unsigned int packetSize) noexcept
{
// IV and source/destination addresses. Using the addresses divides the
// key space into two halves-- A->B and B->A (since order will change).
@ -960,12 +1002,22 @@ ZT_ALWAYS_INLINE void salsa2012DeriveKey(const uint8_t *const in,uint8_t *const
#endif
}
/**
* Create a short probe packet for probing a recipient for e.g. NAT traversal and path setup
*
* @param sender Sender identity
* @param recipient Recipient identity
* @param key Long-term shared secret key resulting from sender and recipient agreement
* @return Probe packed into 64-bit integer (in big-endian byte order)
*/
uint64_t createProbe(const Identity &sender,const Identity &recipient,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH]) noexcept;
/**
* Get a sequential non-repeating packet ID for the next packet (thread-safe)
*
* @return Next packet ID / cryptographic nonce
*/
uint64_t getPacketId();
uint64_t getPacketId() noexcept;
/**
* Encrypt and compute packet MAC
@ -975,7 +1027,7 @@ uint64_t getPacketId();
* @param key Key to use for encryption (not per-packet key)
* @param cipherSuite Cipher suite to use for AEAD encryption or just MAC
*/
void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],uint8_t cipherSuite);
void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],uint8_t cipherSuite) noexcept;
/**
* Attempt to compress packet payload
@ -989,7 +1041,7 @@ void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY
* @param packetSize Total size of packet in bytes (including headers)
* @return New size of packet after compression or original size of compression wasn't helpful
*/
unsigned int compress(SharedPtr<Buf> &pkt,unsigned int packetSize);
unsigned int compress(SharedPtr<Buf> &pkt,unsigned int packetSize) noexcept;
} // namespace Protocol
} // namespace ZeroTier

View file

@ -15,7 +15,7 @@
namespace ZeroTier {
bool Revocation::sign(const Identity &signer)
bool Revocation::sign(const Identity &signer) noexcept
{
uint8_t buf[ZT_REVOCATION_MARSHAL_SIZE_MAX+32];
if (signer.hasPrivate()) {
@ -26,7 +26,7 @@ bool Revocation::sign(const Identity &signer)
return false;
}
int Revocation::marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSign) const
int Revocation::marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSign) const noexcept
{
int p = 0;
if (forSign) {
@ -58,7 +58,7 @@ int Revocation::marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSig
return p;
}
int Revocation::unmarshal(const uint8_t *restrict data,const int len)
int Revocation::unmarshal(const uint8_t *restrict data,const int len) noexcept
{
if (len < 54)
return -1;

View file

@ -45,9 +45,9 @@ class Revocation : public Credential
friend class Credential;
public:
static ZT_ALWAYS_INLINE ZT_CredentialType credentialType() { return ZT_CREDENTIAL_TYPE_REVOCATION; }
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_REVOCATION; }
ZT_ALWAYS_INLINE Revocation() { memoryZero(this); }
ZT_ALWAYS_INLINE Revocation() noexcept { memoryZero(this); }
/**
* @param i ID (arbitrary for revocations, currently random)
@ -58,7 +58,7 @@ public:
* @param tgt Target node whose credential(s) are being revoked
* @param ct Credential type being revoked
*/
ZT_ALWAYS_INLINE Revocation(const uint32_t i,const uint64_t nwid,const uint32_t cid,const uint64_t thr,const uint64_t fl,const Address &tgt,const ZT_CredentialType ct) :
ZT_ALWAYS_INLINE Revocation(const uint32_t i,const uint64_t nwid,const uint32_t cid,const uint64_t thr,const uint64_t fl,const Address &tgt,const ZT_CredentialType ct) noexcept :
_id(i),
_credentialId(cid),
_networkId(nwid),
@ -71,22 +71,22 @@ public:
{
}
ZT_ALWAYS_INLINE uint32_t id() const { return _id; }
ZT_ALWAYS_INLINE uint32_t credentialId() const { return _credentialId; }
ZT_ALWAYS_INLINE uint64_t networkId() const { return _networkId; }
ZT_ALWAYS_INLINE int64_t threshold() const { return _threshold; }
ZT_ALWAYS_INLINE const Address &target() const { return _target; }
ZT_ALWAYS_INLINE const Address &signer() const { return _signedBy; }
ZT_ALWAYS_INLINE ZT_CredentialType typeBeingRevoked() const { return _type; }
ZT_ALWAYS_INLINE const uint8_t *signature() const { return _signature; }
ZT_ALWAYS_INLINE unsigned int signatureLength() const { return _signatureLength; }
ZT_ALWAYS_INLINE bool fastPropagate() const { return ((_flags & ZT_REVOCATION_FLAG_FAST_PROPAGATE) != 0); }
ZT_ALWAYS_INLINE uint32_t id() const noexcept { return _id; }
ZT_ALWAYS_INLINE uint32_t credentialId() const noexcept { return _credentialId; }
ZT_ALWAYS_INLINE uint64_t networkId() const noexcept { return _networkId; }
ZT_ALWAYS_INLINE int64_t threshold() const noexcept { return _threshold; }
ZT_ALWAYS_INLINE const Address &target() const noexcept { return _target; }
ZT_ALWAYS_INLINE const Address &signer() const noexcept { return _signedBy; }
ZT_ALWAYS_INLINE ZT_CredentialType typeBeingRevoked() const noexcept { return _type; }
ZT_ALWAYS_INLINE const uint8_t *signature() const noexcept { return _signature; }
ZT_ALWAYS_INLINE unsigned int signatureLength() const noexcept { return _signatureLength; }
ZT_ALWAYS_INLINE bool fastPropagate() const noexcept { return ((_flags & ZT_REVOCATION_FLAG_FAST_PROPAGATE) != 0); }
/**
* @param signer Signing identity, must have private key
* @return True if signature was successful
*/
bool sign(const Identity &signer);
bool sign(const Identity &signer) noexcept;
/**
* Verify this revocation's signature
@ -94,11 +94,11 @@ public:
* @param RR Runtime environment to provide for peer lookup, etc.
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const { return _verify(RR,tPtr,*this); }
ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const noexcept { return _verify(RR,tPtr,*this); }
static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_REVOCATION_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSign = false) const;
int unmarshal(const uint8_t *restrict data,int len);
static constexpr int marshalSizeMax() noexcept { return ZT_REVOCATION_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSign = false) const noexcept;
int unmarshal(const uint8_t *restrict data,int len) noexcept;
private:
uint32_t _id;

View file

@ -14,8 +14,6 @@
#ifndef ZT_RUNTIMEENVIRONMENT_HPP
#define ZT_RUNTIMEENVIRONMENT_HPP
#include <string.h>
#include "Constants.hpp"
#include "Utils.hpp"
#include "Identity.hpp"

View file

@ -28,30 +28,30 @@ template<typename T>
class ScopedPtr : public TriviallyCopyable
{
public:
explicit ZT_ALWAYS_INLINE ScopedPtr(T *const p) : _p(p) {}
explicit ZT_ALWAYS_INLINE ScopedPtr(T *const p) noexcept : _p(p) {}
ZT_ALWAYS_INLINE ~ScopedPtr() { delete _p; }
ZT_ALWAYS_INLINE T *operator->() const { return _p; }
ZT_ALWAYS_INLINE T &operator*() const { return *_p; }
explicit ZT_ALWAYS_INLINE operator bool() const { return (_p != (T *)0); }
ZT_ALWAYS_INLINE T *ptr() const { return _p; }
ZT_ALWAYS_INLINE T *operator->() const noexcept { return _p; }
ZT_ALWAYS_INLINE T &operator*() const noexcept { return *_p; }
explicit ZT_ALWAYS_INLINE operator bool() const noexcept { return (_p != (T *)0); }
ZT_ALWAYS_INLINE T *ptr() const noexcept { return _p; }
ZT_ALWAYS_INLINE void swap(const ScopedPtr &p)
ZT_ALWAYS_INLINE void swap(const ScopedPtr &p) noexcept
{
T *const tmp = _p;
_p = p._p;
p._p = tmp;
}
ZT_ALWAYS_INLINE bool operator==(const ScopedPtr &p) const { return (_p == p._p); }
ZT_ALWAYS_INLINE bool operator!=(const ScopedPtr &p) const { return (_p != p._p); }
ZT_ALWAYS_INLINE bool operator==(T *const p) const { return (_p == p); }
ZT_ALWAYS_INLINE bool operator!=(T *const p) const { return (_p != p); }
ZT_ALWAYS_INLINE bool operator==(const ScopedPtr &p) const noexcept { return (_p == p._p); }
ZT_ALWAYS_INLINE bool operator!=(const ScopedPtr &p) const noexcept { return (_p != p._p); }
ZT_ALWAYS_INLINE bool operator==(T *const p) const noexcept { return (_p == p); }
ZT_ALWAYS_INLINE bool operator!=(T *const p) const noexcept { return (_p != p); }
private:
ZT_ALWAYS_INLINE ScopedPtr() {}
ZT_ALWAYS_INLINE ScopedPtr(const ScopedPtr &p) : _p(nullptr) {}
ZT_ALWAYS_INLINE ScopedPtr &operator=(const ScopedPtr &p) { return *this; }
ZT_ALWAYS_INLINE ScopedPtr() noexcept {}
ZT_ALWAYS_INLINE ScopedPtr(const ScopedPtr &p) noexcept : _p(nullptr) {}
ZT_ALWAYS_INLINE ScopedPtr &operator=(const ScopedPtr &p) noexcept { return *this; }
T *const _p;
};
@ -60,7 +60,7 @@ private:
namespace std {
template<typename T>
ZT_ALWAYS_INLINE void swap(ZeroTier::ScopedPtr<T> &a,ZeroTier::ScopedPtr<T> &b) { a.swap(b); }
ZT_ALWAYS_INLINE void swap(ZeroTier::ScopedPtr<T> &a,ZeroTier::ScopedPtr<T> &b) noexcept { a.swap(b); }
}
#endif

View file

@ -15,7 +15,6 @@
#define ZT_SHAREDPTR_HPP
#include "Constants.hpp"
#include "AtomicCounter.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
@ -25,15 +24,19 @@ namespace ZeroTier {
*
* This is an introspective shared pointer. Classes that need to be reference
* counted must list this as a 'friend' and must have a private instance of
* AtomicCounter called __refCount.
* atomic<int> called __refCount.
*
* This is technically TriviallyCopyable but extreme care must be taken if
* one wishes to handle it in this manner. A memcpy must be followed by a
* memset of the source to 0 so as to achieve 'move' semantics.
*/
template<typename T>
class SharedPtr : public TriviallyCopyable
{
public:
ZT_ALWAYS_INLINE SharedPtr() : _ptr((T *)0) {}
explicit ZT_ALWAYS_INLINE SharedPtr(T *obj) : _ptr(obj) { ++obj->__refCount; }
ZT_ALWAYS_INLINE SharedPtr(const SharedPtr &sp) : _ptr(sp._getAndInc()) {}
ZT_ALWAYS_INLINE SharedPtr() noexcept : _ptr((T *)0) {}
explicit ZT_ALWAYS_INLINE SharedPtr(T *obj) noexcept : _ptr(obj) { ++obj->__refCount; }
ZT_ALWAYS_INLINE SharedPtr(const SharedPtr &sp) noexcept : _ptr(sp._getAndInc()) {}
ZT_ALWAYS_INLINE ~SharedPtr()
{
@ -64,19 +67,28 @@ public:
*
* @param ptr Naked pointer to assign
*/
ZT_ALWAYS_INLINE void set(T *ptr)
ZT_ALWAYS_INLINE void set(T *ptr) noexcept
{
zero();
++ptr->__refCount;
_ptr = ptr;
}
/**
* Stupidly set this SharedPtr to 'ptr', ignoring current value and not incrementing reference counter
*
* This must only be used in code that knows what it's doing. :)
*
* @param ptr Pointer to set
*/
ZT_ALWAYS_INLINE void unsafeSet(T *ptr) noexcept { _ptr = ptr; }
/**
* Swap with another pointer 'for free' without ref count overhead
*
* @param with Pointer to swap with
*/
ZT_ALWAYS_INLINE void swap(SharedPtr &with)
ZT_ALWAYS_INLINE void swap(SharedPtr &with) noexcept
{
T *tmp = _ptr;
_ptr = with._ptr;
@ -84,7 +96,10 @@ public:
}
/**
* Set this value to one from another pointer and set that pointer to zero (avoids ref count changes)
* Set this value to one from another pointer and set that pointer to zero (take ownership from)
*
* This is faster than setting and zeroing the source pointer since it
* avoids a synchronized reference count change.
*
* @param from Origin pointer; will be zeroed
*/
@ -98,14 +113,15 @@ public:
from._ptr = nullptr;
}
ZT_ALWAYS_INLINE operator bool() const { return (_ptr != nullptr); }
ZT_ALWAYS_INLINE T &operator*() const { return *_ptr; }
ZT_ALWAYS_INLINE T *operator->() const { return _ptr; }
ZT_ALWAYS_INLINE operator bool() const noexcept { return (_ptr != nullptr); }
ZT_ALWAYS_INLINE T &operator*() const noexcept { return *_ptr; }
ZT_ALWAYS_INLINE T *operator->() const noexcept { return _ptr; }
/**
* @return Raw pointer to held object
*/
ZT_ALWAYS_INLINE T *ptr() const { return _ptr; }
ZT_ALWAYS_INLINE T *ptr() const noexcept { return _ptr; }
/**
* Set this pointer to NULL
@ -122,22 +138,22 @@ public:
/**
* @return Number of references according to this object's ref count or 0 if NULL
*/
ZT_ALWAYS_INLINE int references()
ZT_ALWAYS_INLINE int references() noexcept
{
if (_ptr)
return _ptr->__refCount.load();
return _ptr->__refCount;
return 0;
}
ZT_ALWAYS_INLINE bool operator==(const SharedPtr &sp) const { return (_ptr == sp._ptr); }
ZT_ALWAYS_INLINE bool operator!=(const SharedPtr &sp) const { return (_ptr != sp._ptr); }
ZT_ALWAYS_INLINE bool operator>(const SharedPtr &sp) const { return (_ptr > sp._ptr); }
ZT_ALWAYS_INLINE bool operator<(const SharedPtr &sp) const { return (_ptr < sp._ptr); }
ZT_ALWAYS_INLINE bool operator>=(const SharedPtr &sp) const { return (_ptr >= sp._ptr); }
ZT_ALWAYS_INLINE bool operator<=(const SharedPtr &sp) const { return (_ptr <= sp._ptr); }
ZT_ALWAYS_INLINE bool operator==(const SharedPtr &sp) const noexcept { return (_ptr == sp._ptr); }
ZT_ALWAYS_INLINE bool operator!=(const SharedPtr &sp) const noexcept { return (_ptr != sp._ptr); }
ZT_ALWAYS_INLINE bool operator>(const SharedPtr &sp) const noexcept { return (_ptr > sp._ptr); }
ZT_ALWAYS_INLINE bool operator<(const SharedPtr &sp) const noexcept { return (_ptr < sp._ptr); }
ZT_ALWAYS_INLINE bool operator>=(const SharedPtr &sp) const noexcept { return (_ptr >= sp._ptr); }
ZT_ALWAYS_INLINE bool operator<=(const SharedPtr &sp) const noexcept { return (_ptr <= sp._ptr); }
private:
ZT_ALWAYS_INLINE T *_getAndInc() const
ZT_ALWAYS_INLINE T *_getAndInc() const noexcept
{
if (_ptr)
++_ptr->__refCount;
@ -150,7 +166,7 @@ private:
namespace std {
template<typename T>
ZT_ALWAYS_INLINE void swap(ZeroTier::SharedPtr<T> &a,ZeroTier::SharedPtr<T> &b) { a.swap(b); }
ZT_ALWAYS_INLINE void swap(ZeroTier::SharedPtr<T> &a,ZeroTier::SharedPtr<T> &b) noexcept { a.swap(b); }
}
#endif

View file

@ -15,7 +15,7 @@
namespace ZeroTier {
bool Tag::sign(const Identity &signer)
bool Tag::sign(const Identity &signer) noexcept
{
uint8_t buf[ZT_TAG_MARSHAL_SIZE_MAX];
if (signer.hasPrivate()) {
@ -26,7 +26,7 @@ bool Tag::sign(const Identity &signer)
return false;
}
int Tag::marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign) const
int Tag::marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign) const noexcept
{
int p = 0;
if (forSign) {
@ -54,7 +54,7 @@ int Tag::marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign) const
return p;
}
int Tag::unmarshal(const uint8_t *data,int len)
int Tag::unmarshal(const uint8_t *data,int len) noexcept
{
if (len < 37)
return -1;

View file

@ -53,9 +53,9 @@ class Tag : public Credential
friend class Credential;
public:
static ZT_ALWAYS_INLINE ZT_CredentialType credentialType() { return ZT_CREDENTIAL_TYPE_TAG; }
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_TAG; }
ZT_ALWAYS_INLINE Tag() { memoryZero(this); }
ZT_ALWAYS_INLINE Tag() noexcept { memoryZero(this); }
/**
* @param nwid Network ID
@ -64,7 +64,7 @@ public:
* @param id Tag ID
* @param value Tag value
*/
ZT_ALWAYS_INLINE Tag(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id,const uint32_t value) :
ZT_ALWAYS_INLINE Tag(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id,const uint32_t value) noexcept :
_id(id),
_value(value),
_networkId(nwid),
@ -75,14 +75,14 @@ public:
{
}
ZT_ALWAYS_INLINE uint32_t id() const { return _id; }
ZT_ALWAYS_INLINE const uint32_t &value() const { return _value; }
ZT_ALWAYS_INLINE uint64_t networkId() const { return _networkId; }
ZT_ALWAYS_INLINE int64_t timestamp() const { return _ts; }
ZT_ALWAYS_INLINE const Address &issuedTo() const { return _issuedTo; }
ZT_ALWAYS_INLINE const Address &signer() const { return _signedBy; }
ZT_ALWAYS_INLINE const uint8_t *signature() const { return _signature; }
ZT_ALWAYS_INLINE unsigned int signatureLength() const { return _signatureLength; }
ZT_ALWAYS_INLINE uint32_t id() const noexcept { return _id; }
ZT_ALWAYS_INLINE const uint32_t &value() const noexcept { return _value; }
ZT_ALWAYS_INLINE uint64_t networkId() const noexcept { return _networkId; }
ZT_ALWAYS_INLINE int64_t timestamp() const noexcept { return _ts; }
ZT_ALWAYS_INLINE const Address &issuedTo() const noexcept { return _issuedTo; }
ZT_ALWAYS_INLINE const Address &signer() const noexcept { return _signedBy; }
ZT_ALWAYS_INLINE const uint8_t *signature() const noexcept { return _signature; }
ZT_ALWAYS_INLINE unsigned int signatureLength() const noexcept { return _signatureLength; }
/**
* Sign this tag
@ -90,7 +90,7 @@ public:
* @param signer Signing identity, must have private key
* @return True if signature was successful
*/
bool sign(const Identity &signer);
bool sign(const Identity &signer) noexcept;
/**
* Check this tag's signature
@ -98,30 +98,30 @@ public:
* @param RR Runtime environment to allow identity lookup for signedBy
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const { return _verify(RR,tPtr,*this); }
ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const noexcept { return _verify(RR,tPtr,*this); }
static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_TAG_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign = false) const;
int unmarshal(const uint8_t *data,int len);
static constexpr int marshalSizeMax() noexcept { return ZT_TAG_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign = false) const noexcept;
int unmarshal(const uint8_t *data,int len) noexcept;
// Provides natural sort order by ID
ZT_ALWAYS_INLINE bool operator<(const Tag &t) const { return (_id < t._id); }
ZT_ALWAYS_INLINE bool operator<(const Tag &t) const noexcept { return (_id < t._id); }
ZT_ALWAYS_INLINE bool operator==(const Tag &t) const { return (memcmp(this,&t,sizeof(Tag)) == 0); }
ZT_ALWAYS_INLINE bool operator!=(const Tag &t) const { return (memcmp(this,&t,sizeof(Tag)) != 0); }
ZT_ALWAYS_INLINE bool operator==(const Tag &t) const noexcept { return (memcmp(this,&t,sizeof(Tag)) == 0); }
ZT_ALWAYS_INLINE bool operator!=(const Tag &t) const noexcept { return (memcmp(this,&t,sizeof(Tag)) != 0); }
// For searching sorted arrays or lists of Tags by ID
struct IdComparePredicate
{
ZT_ALWAYS_INLINE bool operator()(const Tag &a,const Tag &b) const { return (a.id() < b.id()); }
ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const Tag &b) const { return (a < b.id()); }
ZT_ALWAYS_INLINE bool operator()(const Tag &a,const uint32_t b) const { return (a.id() < b); }
ZT_ALWAYS_INLINE bool operator()(const Tag *a,const Tag *b) const { return (a->id() < b->id()); }
ZT_ALWAYS_INLINE bool operator()(const Tag *a,const Tag &b) const { return (a->id() < b.id()); }
ZT_ALWAYS_INLINE bool operator()(const Tag &a,const Tag *b) const { return (a.id() < b->id()); }
ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const Tag *b) const { return (a < b->id()); }
ZT_ALWAYS_INLINE bool operator()(const Tag *a,const uint32_t b) const { return (a->id() < b); }
ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const uint32_t b) const { return (a < b); }
ZT_ALWAYS_INLINE bool operator()(const Tag &a,const Tag &b) const noexcept { return (a.id() < b.id()); }
ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const Tag &b) const noexcept { return (a < b.id()); }
ZT_ALWAYS_INLINE bool operator()(const Tag &a,const uint32_t b) const noexcept { return (a.id() < b); }
ZT_ALWAYS_INLINE bool operator()(const Tag *a,const Tag *b) const noexcept { return (a->id() < b->id()); }
ZT_ALWAYS_INLINE bool operator()(const Tag *a,const Tag &b) const noexcept { return (a->id() < b.id()); }
ZT_ALWAYS_INLINE bool operator()(const Tag &a,const Tag *b) const noexcept { return (a.id() < b->id()); }
ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const Tag *b) const noexcept { return (a < b->id()); }
ZT_ALWAYS_INLINE bool operator()(const Tag *a,const uint32_t b) const noexcept { return (a->id() < b); }
ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const uint32_t b) const noexcept { return (a < b); }
};
private:

View file

@ -75,13 +75,20 @@ Topology::~Topology()
SharedPtr<Peer> Topology::add(void *tPtr,const SharedPtr<Peer> &peer)
{
RWMutex::Lock _l(_peers_l);
SharedPtr<Peer> &hp = _peers[peer->address()];
if (hp)
return hp;
_loadCached(tPtr,peer->address(),hp);
if (hp)
if (hp) {
_peersByIncomingProbe[peer->incomingProbe()] = hp;
return hp;
}
hp = peer;
_peersByIncomingProbe[peer->incomingProbe()] = peer;
return peer;
}
@ -196,6 +203,7 @@ void Topology::doPeriodicTasks(void *tPtr,const int64_t now)
while (i.next(a,p)) {
if ( (!(*p)->alive(now)) && (_roots.count((*p)->identity()) == 0) ) {
(*p)->save(tPtr);
_peersByIncomingProbe.erase((*p)->incomingProbe());
_peers.erase(*a);
}
}
@ -227,6 +235,7 @@ void Topology::saveAll(void *tPtr)
void Topology::_loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer)
{
try {
uint64_t id[2];
id[0] = zta.toInt();
id[1] = 0;
@ -246,6 +255,9 @@ void Topology::_loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer)
}
}
}
} catch ( ... ) {
peer.zero();
}
}
} // namespace ZeroTier

View file

@ -60,9 +60,10 @@ public:
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param zta ZeroTier address of peer
* @param loadFromCached If false do not load from cache if not in memory (default: true)
* @return Peer or NULL if not found
*/
ZT_ALWAYS_INLINE SharedPtr<Peer> get(void *tPtr,const Address &zta)
ZT_ALWAYS_INLINE SharedPtr<Peer> peer(void *tPtr,const Address &zta,const bool loadFromCached = true)
{
{
RWMutex::RLock _l(_peers_l);
@ -72,6 +73,7 @@ public:
}
SharedPtr<Peer> p;
if (loadFromCached) {
_loadCached(tPtr,zta,p);
if (p) {
RWMutex::Lock _l(_peers_l);
@ -79,10 +81,26 @@ public:
if (!hp)
hp = p;
}
}
return p;
}
/**
* Get a peer by its incoming short probe packet payload
*
* @param probe Short probe payload (in big-endian byte order)
* @return Peer or NULL if no peer is currently in memory matching this probe (cache is not checked in this case)
*/
ZT_ALWAYS_INLINE SharedPtr<Peer> peerByProbe(const uint64_t probe)
{
RWMutex::RLock _l(_peers_l);
const SharedPtr<Peer> *const ap = _peersByIncomingProbe.get(probe);
if (ap)
return *ap;
return SharedPtr<Peer>();
}
/**
* Get a Path object for a given local and remote physical address, creating if needed
*
@ -90,7 +108,7 @@ public:
* @param r Remote address
* @return Pointer to canonicalized Path object or NULL on error
*/
ZT_ALWAYS_INLINE SharedPtr<Path> getPath(const int64_t l,const InetAddress &r)
ZT_ALWAYS_INLINE SharedPtr<Path> path(const int64_t l,const InetAddress &r)
{
const uint64_t k = _pathHash(l,r);
@ -174,25 +192,20 @@ public:
{
RWMutex::RLock l(_peers_l);
const unsigned long rootPeerCnt = _rootPeers.size();
uintptr_t *const rootPeerPtrs = (uintptr_t *)malloc(sizeof(uintptr_t) * rootPeerCnt);
if (!rootPeerPtrs)
throw std::bad_alloc();
for(unsigned long i=0;i<rootPeerCnt;++i)
rootPeerPtrs[i] = (uintptr_t)_rootPeers[i].ptr();
std::sort(rootPeerPtrs,rootPeerPtrs + rootPeerCnt);
uintptr_t *const rootPeerPtrsEnd = rootPeerPtrs + rootPeerCnt;
std::vector<uintptr_t> rootPeerPtrs;
rootPeerPtrs.reserve(_rootPeers.size());
for(std::vector< SharedPtr<Peer> >::const_iterator rp(_rootPeers.begin());rp!=_rootPeers.end();++rp)
rootPeerPtrs.push_back((uintptr_t)rp->ptr());
std::sort(rootPeerPtrs.begin(),rootPeerPtrs.end());
try {
Hashtable< Address,SharedPtr<Peer> >::Iterator i(const_cast<Topology *>(this)->_peers);
Address *a = nullptr;
SharedPtr<Peer> *p = nullptr;
while (i.next(a,p)) {
f(*((const SharedPtr<Peer> *)p),std::binary_search(rootPeerPtrs,rootPeerPtrsEnd,(uintptr_t)p->ptr()));
f(*((const SharedPtr<Peer> *)p),std::binary_search(rootPeerPtrs.begin(),rootPeerPtrs.end(),(uintptr_t)p->ptr()));
}
} catch ( ... ) {} // should not throw
free((void *)rootPeerPtrs);
}
/**
@ -345,6 +358,7 @@ private:
unsigned int _numConfiguredPhysicalPaths;
Hashtable< Address,SharedPtr<Peer> > _peers;
Hashtable< uint64_t,SharedPtr<Peer> > _peersByIncomingProbe;
Hashtable< uint64_t,SharedPtr<Path> > _paths;
std::set< Identity > _roots; // locked by _peers_l
std::vector< SharedPtr<Peer> > _rootPeers; // locked by _peers_l

View file

@ -15,6 +15,8 @@
#include "RuntimeEnvironment.hpp"
#include "Node.hpp"
#include "Peer.hpp"
#include "Path.hpp"
#include "InetAddress.hpp"
#include <cstdio>
#include <cstdlib>
@ -33,6 +35,34 @@ Trace::Trace(const RuntimeEnvironment *renv) :
{
}
Trace::Str<ZT_INETADDRESS_STRING_SIZE_MAX> Trace::str(const InetAddress &a,const bool ipOnly)
{
Str<ZT_INETADDRESS_STRING_SIZE_MAX> s;
if (ipOnly)
a.toIpString(s.s);
else a.toString(s.s);
return s;
}
Trace::Str<ZT_ADDRESS_STRING_SIZE_MAX> Trace::str(const Address &a)
{
Str<ZT_ADDRESS_STRING_SIZE_MAX> s;
a.toString(s.s);
return s;
}
Trace::Str<ZT_ADDRESS_STRING_SIZE_MAX + ZT_INETADDRESS_STRING_SIZE_MAX + 4> Trace::str(const Address &peerAddress,const SharedPtr<Path> &path)
{
Str<ZT_ADDRESS_STRING_SIZE_MAX + ZT_INETADDRESS_STRING_SIZE_MAX + 4> s;
peerAddress.toString(s.s);
s.s[11] = '(';
path->address().toString(s.s + 12);
int x = strlen(s.s);
s.s[x] = ')';
s.s[x+1] = 0;
return s;
}
void Trace::unexpectedError(
void *tPtr,
uint32_t codeLocation,

View file

@ -14,11 +14,6 @@
#ifndef ZT_TRACE_HPP
#define ZT_TRACE_HPP
#include <cstdint>
#include <cstring>
#include <cstdlib>
#include <vector>
#include "Constants.hpp"
#include "SharedPtr.hpp"
#include "Mutex.hpp"
@ -26,6 +21,11 @@
#include "Address.hpp"
#include "MAC.hpp"
#include <cstdint>
#include <cstring>
#include <cstdlib>
#include <vector>
namespace ZeroTier {
class RuntimeEnvironment;
@ -42,6 +42,16 @@ struct NetworkConfig;
/**
* Remote tracing and trace logging handler
*
* These methods are called when things happen that may be of interested to
* someone debugging ZeroTier or its virtual networks. The codeLocation parameter
* is an arbitrary pseudo-random identifier of the form 0xNNNNNNNN that could be
* easily found by searching the code base. This makes it easy to locate the
* specific line where a trace originated without relying on brittle non-portable
* things like source file and line number. The same identifier should be used
* for the same 'place' in the code across versions. These could eventually be
* turned into constants that are semi-official and stored in a database to
* provide extra debug context.
*/
class Trace
{
@ -64,8 +74,25 @@ public:
}
};
/**
* Simple container for a C string
*
* @tparam C Capacity of string
*/
template<unsigned int C>
struct Str
{
ZT_ALWAYS_INLINE Str() { memset(s,0,sizeof(s)); }
constexpr static unsigned int capacity() { return C; }
char s[C];
};
explicit Trace(const RuntimeEnvironment *renv);
static Str<ZT_INETADDRESS_STRING_SIZE_MAX> str(const InetAddress &a,bool ipOnly = false);
static Str<ZT_ADDRESS_STRING_SIZE_MAX> str(const Address &a);
static Str<ZT_ADDRESS_STRING_SIZE_MAX + ZT_INETADDRESS_STRING_SIZE_MAX + 4> str(const Address &peerAddress,const SharedPtr<Path> &path);
void unexpectedError(
void *tPtr,
uint32_t codeLocation,

View file

@ -39,7 +39,7 @@ public:
* @param obj Any TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryBurn(T *obj)
static ZT_ALWAYS_INLINE void memoryBurn(T *obj) noexcept
{
TriviallyCopyable *const tmp = obj;
Utils::burn(tmp,sizeof(T));
@ -52,7 +52,7 @@ public:
* @param obj Any TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryBurn(T &obj)
static ZT_ALWAYS_INLINE void memoryBurn(T &obj) noexcept
{
TriviallyCopyable *const tmp = &obj;
Utils::burn(tmp,sizeof(T));
@ -65,7 +65,7 @@ public:
* @param obj Any TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryZero(T *obj)
static ZT_ALWAYS_INLINE void memoryZero(T *obj) noexcept
{
TriviallyCopyable *const tmp = obj;
memset(tmp,0,sizeof(T));
@ -78,7 +78,7 @@ public:
* @param obj Any TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryZero(T &obj)
static ZT_ALWAYS_INLINE void memoryZero(T &obj) noexcept
{
TriviallyCopyable *const tmp = &obj;
memset(tmp,0,sizeof(T));
@ -92,7 +92,7 @@ public:
* @param src Source memory of same size or less than sizeof(dest)
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopyUnsafe(T *dest,const void *src)
static ZT_ALWAYS_INLINE void memoryCopyUnsafe(T *dest,const void *src) noexcept
{
TriviallyCopyable *const tmp = dest;
memcpy(tmp,src,sizeof(T));
@ -106,7 +106,7 @@ public:
* @param src Source memory of same size or less than sizeof(dest)
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopyUnsafe(T &dest,const void *src)
static ZT_ALWAYS_INLINE void memoryCopyUnsafe(T &dest,const void *src) noexcept
{
TriviallyCopyable *const tmp = &dest;
memcpy(tmp,src,sizeof(T));
@ -120,7 +120,7 @@ public:
* @param src Source TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopy(T *dest,const T *src)
static ZT_ALWAYS_INLINE void memoryCopy(T *dest,const T *src) noexcept
{
TriviallyCopyable *const tmp = dest;
memcpy(tmp,src,sizeof(T));
@ -134,7 +134,7 @@ public:
* @param src Source TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopy(T *dest,const T &src)
static ZT_ALWAYS_INLINE void memoryCopy(T *dest,const T &src) noexcept
{
TriviallyCopyable *const tmp = dest;
memcpy(tmp,&src,sizeof(T));
@ -148,7 +148,7 @@ public:
* @param src Source TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopy(T &dest,const T *src)
static ZT_ALWAYS_INLINE void memoryCopy(T &dest,const T *src) noexcept
{
TriviallyCopyable *const tmp = &dest;
memcpy(tmp,src,sizeof(T));
@ -162,7 +162,7 @@ public:
* @param src Source TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopy(T &dest,const T &src)
static ZT_ALWAYS_INLINE void memoryCopy(T &dest,const T &src) noexcept
{
TriviallyCopyable *const tmp = &dest;
memcpy(tmp,&src,sizeof(T));

View file

@ -60,7 +60,7 @@ CPUIDRegisters CPUID;
const uint64_t ZERO256[4] = { 0,0,0,0 };
const char HEXCHARS[16] = { '0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f' };
bool secureEq(const void *a,const void *b,unsigned int len)
bool secureEq(const void *a,const void *b,unsigned int len) noexcept
{
uint8_t diff = 0;
for(unsigned int i=0;i<len;++i)
@ -87,7 +87,7 @@ static unsigned long _Utils_itoa(unsigned long n,char *s)
s[pos] = (char)('0' + (n % 10));
return pos + 1;
}
char *decimal(unsigned long n,char s[24])
char *decimal(unsigned long n,char s[24]) noexcept
{
if (n == 0) {
s[0] = '0';
@ -98,7 +98,7 @@ char *decimal(unsigned long n,char s[24])
return s;
}
char *hex(uint8_t i,char s[3])
char *hex(uint8_t i,char s[3]) noexcept
{
s[0] = HEXCHARS[(i >> 4U) & 0xfU];
s[1] = HEXCHARS[i & 0xfU];
@ -106,7 +106,7 @@ char *hex(uint8_t i,char s[3])
return s;
}
char *hex(uint16_t i,char s[5])
char *hex(uint16_t i,char s[5]) noexcept
{
s[0] = HEXCHARS[(i >> 12U) & 0xfU];
s[1] = HEXCHARS[(i >> 8U) & 0xfU];
@ -116,7 +116,7 @@ char *hex(uint16_t i,char s[5])
return s;
}
char *hex(uint32_t i,char s[9])
char *hex(uint32_t i,char s[9]) noexcept
{
s[0] = HEXCHARS[(i >> 28U) & 0xfU];
s[1] = HEXCHARS[(i >> 24U) & 0xfU];
@ -130,7 +130,7 @@ char *hex(uint32_t i,char s[9])
return s;
}
char *hex(uint64_t i,char s[17])
char *hex(uint64_t i,char s[17]) noexcept
{
s[0] = HEXCHARS[(i >> 60U) & 0xfU];
s[1] = HEXCHARS[(i >> 56U) & 0xfU];
@ -152,7 +152,7 @@ char *hex(uint64_t i,char s[17])
return s;
}
uint64_t unhex(const char *s)
uint64_t unhex(const char *s) noexcept
{
uint64_t n = 0;
if (s) {
@ -177,7 +177,7 @@ uint64_t unhex(const char *s)
return n;
}
char *hex10(uint64_t i,char s[11])
char *hex10(uint64_t i,char s[11]) noexcept
{
s[0] = HEXCHARS[(i >> 36U) & 0xfU];
s[1] = HEXCHARS[(i >> 32U) & 0xfU];
@ -193,7 +193,7 @@ char *hex10(uint64_t i,char s[11])
return s;
}
char *hex(const void *d,unsigned int l,char *s)
char *hex(const void *d,unsigned int l,char *s) noexcept
{
char *const save = s;
for(unsigned int i=0;i<l;++i) {
@ -205,7 +205,7 @@ char *hex(const void *d,unsigned int l,char *s)
return save;
}
unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen)
unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen) noexcept
{
unsigned int l = 0;
const char *hend = h + hlen;
@ -239,7 +239,7 @@ unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen
return l;
}
void getSecureRandom(void *buf,unsigned int bytes)
void getSecureRandom(void *buf,unsigned int bytes) noexcept
{
static Mutex globalLock;
static bool initialized = false;
@ -322,14 +322,14 @@ void getSecureRandom(void *buf,unsigned int bytes)
}
}
uint64_t getSecureRandomU64()
uint64_t getSecureRandomU64() noexcept
{
uint64_t tmp = 0;
getSecureRandom(&tmp,sizeof(tmp));
return tmp;
}
int b32e(const uint8_t *data,int length,char *result,int bufSize)
int b32e(const uint8_t *data,int length,char *result,int bufSize) noexcept
{
if (length < 0 || length > (1 << 28)) {
result[0] = (char)0;
@ -365,7 +365,7 @@ int b32e(const uint8_t *data,int length,char *result,int bufSize)
return -1;
}
int b32d(const char *encoded,uint8_t *result,int bufSize)
int b32d(const char *encoded,uint8_t *result,int bufSize) noexcept
{
int buffer = 0;
int bitsLeft = 0;
@ -406,7 +406,7 @@ int b32d(const char *encoded,uint8_t *result,int bufSize)
}
#define ROL64(x,k) (((x) << (k)) | ((x) >> (64 - (k))))
uint64_t random()
uint64_t random() noexcept
{
// https://en.wikipedia.org/wiki/Xorshift#xoshiro256**
static volatile uint64_t s_s0 = getSecureRandomU64();
@ -434,7 +434,7 @@ uint64_t random()
return result;
}
bool scopy(char *dest,unsigned int len,const char *src)
bool scopy(char *dest,unsigned int len,const char *src) noexcept
{
if (!len)
return false; // sanity check

View file

@ -64,7 +64,7 @@ extern const char HEXCHARS[16];
* @param len Length of strings
* @return True if strings are equal
*/
bool secureEq(const void *a,const void *b,unsigned int len);
bool secureEq(const void *a,const void *b,unsigned int len) noexcept;
/**
* Be absolutely sure to zero memory
@ -81,7 +81,7 @@ void burn(void *ptr,unsigned int len);
* @param s Buffer, at least 24 bytes in size
* @return String containing 'n' in base 10 form
*/
char *decimal(unsigned long n,char s[24]);
char *decimal(unsigned long n,char s[24]) noexcept;
/**
* Convert an unsigned integer into hex
@ -90,10 +90,10 @@ char *decimal(unsigned long n,char s[24]);
* @param s Buffer to receive hex, must be at least (2*sizeof(i))+1 in size or overflow will occur.
* @return Pointer to s containing hex string with trailing zero byte
*/
char *hex(uint8_t i,char s[3]);
char *hex(uint16_t i,char s[5]);
char *hex(uint32_t i,char s[9]);
char *hex(uint64_t i,char s[17]);
char *hex(uint8_t i,char s[3]) noexcept;
char *hex(uint16_t i,char s[5]) noexcept;
char *hex(uint32_t i,char s[9]) noexcept;
char *hex(uint64_t i,char s[17]) noexcept;
/**
* Decode an unsigned integer in hex format
@ -101,7 +101,7 @@ char *hex(uint64_t i,char s[17]);
* @param s String to decode, non-hex chars are ignored
* @return Unsigned integer
*/
uint64_t unhex(const char *s);
uint64_t unhex(const char *s) noexcept;
/**
* Convert the least significant 40 bits of a uint64_t to hex
@ -110,7 +110,7 @@ uint64_t unhex(const char *s);
* @param s Buffer of size [11] to receive 10 hex characters
* @return Pointer to buffer
*/
char *hex10(uint64_t i,char s[11]);
char *hex10(uint64_t i,char s[11]) noexcept;
/**
* Convert a byte array into hex
@ -120,7 +120,7 @@ char *hex10(uint64_t i,char s[11]);
* @param s String buffer, must be at least (l*2)+1 in size or overflow will occur
* @return Pointer to filled string buffer
*/
char *hex(const void *d,unsigned int l,char *s);
char *hex(const void *d,unsigned int l,char *s) noexcept;
/**
* Decode a hex string
@ -131,7 +131,7 @@ char *hex(const void *d,unsigned int l,char *s);
* @param buflen Length of output buffer
* @return Number of written bytes
*/
unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen);
unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen) noexcept;
/**
* Generate secure random bytes
@ -142,12 +142,12 @@ unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen
* @param buf Buffer to fill
* @param bytes Number of random bytes to generate
*/
void getSecureRandom(void *buf,unsigned int bytes);
void getSecureRandom(void *buf,unsigned int bytes) noexcept;
/**
* @return Secure random 64-bit integer
*/
uint64_t getSecureRandomU64();
uint64_t getSecureRandomU64() noexcept;
/**
* Encode string to base32
@ -158,7 +158,7 @@ uint64_t getSecureRandomU64();
* @param bufSize Size of result buffer
* @return Number of bytes written
*/
int b32e(const uint8_t *data,int length,char *result,int bufSize);
int b32e(const uint8_t *data,int length,char *result,int bufSize) noexcept;
/**
* Decode base32 string
@ -168,12 +168,12 @@ int b32e(const uint8_t *data,int length,char *result,int bufSize);
* @param bufSize Size of result buffer
* @return Number of bytes written or -1 on error
*/
int b32d(const char *encoded, uint8_t *result, int bufSize);
int b32d(const char *encoded, uint8_t *result, int bufSize) noexcept;
/**
* Get a non-cryptographic random integer
*/
uint64_t random();
uint64_t random() noexcept;
/**
* Perform a safe C string copy, ALWAYS null-terminating the result
@ -186,7 +186,7 @@ uint64_t random();
* @param src Source string (if NULL, dest will receive a zero-length string and true is returned)
* @return True on success, false on overflow (buffer will still be 0-terminated)
*/
bool scopy(char *dest,unsigned int len,const char *src);
bool scopy(char *dest,unsigned int len,const char *src) noexcept;
/**
* Mix bits in a 64-bit integer
@ -196,7 +196,7 @@ bool scopy(char *dest,unsigned int len,const char *src);
* @param x Integer to mix
* @return Hashed value
*/
static ZT_ALWAYS_INLINE uint64_t hash64(uint64_t x)
static ZT_ALWAYS_INLINE uint64_t hash64(uint64_t x) noexcept
{
x ^= x >> 30U;
x *= 0xbf58476d1ce4e5b9ULL;
@ -211,7 +211,7 @@ static ZT_ALWAYS_INLINE uint64_t hash64(uint64_t x)
* @param l Length of buffer
* @return True if buffer is all zero
*/
static ZT_ALWAYS_INLINE bool allZero(const void *const b,const unsigned int l)
static ZT_ALWAYS_INLINE bool allZero(const void *const b,const unsigned int l) noexcept
{
const uint8_t *x = reinterpret_cast<const uint8_t *>(b);
const uint8_t *const y = x + l;
@ -231,7 +231,7 @@ static ZT_ALWAYS_INLINE bool allZero(const void *const b,const unsigned int l)
* @param saveptr Pointer to pointer where function can save state
* @return Next token or NULL if none
*/
static ZT_ALWAYS_INLINE char *stok(char *str,const char *delim,char **saveptr)
static ZT_ALWAYS_INLINE char *stok(char *str,const char *delim,char **saveptr) noexcept
{
#ifdef __WINDOWS__
return strtok_s(str,delim,saveptr);
@ -240,9 +240,9 @@ static ZT_ALWAYS_INLINE char *stok(char *str,const char *delim,char **saveptr)
#endif
}
static ZT_ALWAYS_INLINE unsigned int strToUInt(const char *s) { return (unsigned int)strtoul(s,nullptr,10); }
static ZT_ALWAYS_INLINE unsigned int strToUInt(const char *s) noexcept { return (unsigned int)strtoul(s,nullptr,10); }
static ZT_ALWAYS_INLINE unsigned long long strToU64(const char *s)
static ZT_ALWAYS_INLINE unsigned long long strToU64(const char *s) noexcept
{
#ifdef __WINDOWS__
return (unsigned long long)_strtoui64(s,(char **)0,10);
@ -251,7 +251,7 @@ static ZT_ALWAYS_INLINE unsigned long long strToU64(const char *s)
#endif
}
static ZT_ALWAYS_INLINE unsigned long long hexStrToU64(const char *s)
static ZT_ALWAYS_INLINE unsigned long long hexStrToU64(const char *s) noexcept
{
#ifdef __WINDOWS__
return (unsigned long long)_strtoui64(s,nullptr,16);
@ -267,7 +267,7 @@ static ZT_ALWAYS_INLINE unsigned long long hexStrToU64(const char *s)
* @param len Length in bytes
* @return Non-cryptographic hash suitable for use in a hash table
*/
static ZT_ALWAYS_INLINE unsigned long hashString(const void *restrict key,const unsigned int len)
static ZT_ALWAYS_INLINE unsigned long hashString(const void *restrict key,const unsigned int len) noexcept
{
const uint8_t *p = reinterpret_cast<const uint8_t *>(key);
unsigned long h = 0;
@ -283,13 +283,13 @@ static ZT_ALWAYS_INLINE unsigned long hashString(const void *restrict key,const
}
#ifdef __GNUC__
static ZT_ALWAYS_INLINE unsigned int countBits(const uint8_t v) { return (unsigned int)__builtin_popcount((unsigned int)v); }
static ZT_ALWAYS_INLINE unsigned int countBits(const uint16_t v) { return (unsigned int)__builtin_popcount((unsigned int)v); }
static ZT_ALWAYS_INLINE unsigned int countBits(const uint32_t v) { return (unsigned int)__builtin_popcountl((unsigned long)v); }
static ZT_ALWAYS_INLINE unsigned int countBits(const uint64_t v) { return (unsigned int)__builtin_popcountll((unsigned long long)v); }
static ZT_ALWAYS_INLINE unsigned int countBits(const uint8_t v) noexcept { return (unsigned int)__builtin_popcount((unsigned int)v); }
static ZT_ALWAYS_INLINE unsigned int countBits(const uint16_t v) noexcept { return (unsigned int)__builtin_popcount((unsigned int)v); }
static ZT_ALWAYS_INLINE unsigned int countBits(const uint32_t v) noexcept { return (unsigned int)__builtin_popcountl((unsigned long)v); }
static ZT_ALWAYS_INLINE unsigned int countBits(const uint64_t v) noexcept{ return (unsigned int)__builtin_popcountll((unsigned long long)v); }
#else
template<typename T>
static ZT_ALWAYS_INLINE unsigned int countBits(T v)
static ZT_ALWAYS_INLINE unsigned int countBits(T v) noexcept
{
v = v - ((v >> 1) & (T)~(T)0/3);
v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3);
@ -299,9 +299,9 @@ static ZT_ALWAYS_INLINE unsigned int countBits(T v)
#endif
#if __BYTE_ORDER == __LITTLE_ENDIAN
static ZT_ALWAYS_INLINE uint8_t hton(uint8_t n) { return n; }
static ZT_ALWAYS_INLINE int8_t hton(int8_t n) { return n; }
static ZT_ALWAYS_INLINE uint16_t hton(uint16_t n)
static ZT_ALWAYS_INLINE uint8_t hton(uint8_t n) noexcept { return n; }
static ZT_ALWAYS_INLINE int8_t hton(int8_t n) noexcept { return n; }
static ZT_ALWAYS_INLINE uint16_t hton(uint16_t n) noexcept
{
#if defined(__GNUC__)
#if defined(__FreeBSD__)
@ -313,8 +313,8 @@ static ZT_ALWAYS_INLINE uint16_t hton(uint16_t n)
return htons(n);
#endif
}
static ZT_ALWAYS_INLINE int16_t hton(int16_t n) { return (int16_t)Utils::hton((uint16_t)n); }
static ZT_ALWAYS_INLINE uint32_t hton(uint32_t n)
static ZT_ALWAYS_INLINE int16_t hton(int16_t n) noexcept { return (int16_t)Utils::hton((uint16_t)n); }
static ZT_ALWAYS_INLINE uint32_t hton(uint32_t n) noexcept
{
#if defined(__GNUC__)
#if defined(__FreeBSD__)
@ -326,8 +326,8 @@ static ZT_ALWAYS_INLINE uint32_t hton(uint32_t n)
return htonl(n);
#endif
}
static ZT_ALWAYS_INLINE int32_t hton(int32_t n) { return (int32_t)Utils::hton((uint32_t)n); }
static ZT_ALWAYS_INLINE uint64_t hton(uint64_t n)
static ZT_ALWAYS_INLINE int32_t hton(int32_t n) noexcept { return (int32_t)Utils::hton((uint32_t)n); }
static ZT_ALWAYS_INLINE uint64_t hton(uint64_t n) noexcept
{
#if defined(__GNUC__)
#if defined(__FreeBSD__)
@ -348,16 +348,16 @@ static ZT_ALWAYS_INLINE uint64_t hton(uint64_t n)
);
#endif
}
static ZT_ALWAYS_INLINE int64_t hton(int64_t n) { return (int64_t)hton((uint64_t)n); }
static ZT_ALWAYS_INLINE int64_t hton(int64_t n) noexcept { return (int64_t)hton((uint64_t)n); }
#else
template<typename T>
static ZT_ALWAYS_INLINE T hton(T n) { return n; }
static ZT_ALWAYS_INLINE T hton(T n) noexcept { return n; }
#endif
#if __BYTE_ORDER == __LITTLE_ENDIAN
static ZT_ALWAYS_INLINE uint8_t ntoh(uint8_t n) { return n; }
static ZT_ALWAYS_INLINE int8_t ntoh(int8_t n) { return n; }
static ZT_ALWAYS_INLINE uint16_t ntoh(uint16_t n)
static ZT_ALWAYS_INLINE uint8_t ntoh(uint8_t n) noexcept { return n; }
static ZT_ALWAYS_INLINE int8_t ntoh(int8_t n) noexcept { return n; }
static ZT_ALWAYS_INLINE uint16_t ntoh(uint16_t n) noexcept
{
#if defined(__GNUC__)
#if defined(__FreeBSD__)
@ -369,8 +369,8 @@ static ZT_ALWAYS_INLINE uint16_t ntoh(uint16_t n)
return htons(n);
#endif
}
static ZT_ALWAYS_INLINE int16_t ntoh(int16_t n) { return (int16_t)Utils::ntoh((uint16_t)n); }
static ZT_ALWAYS_INLINE uint32_t ntoh(uint32_t n)
static ZT_ALWAYS_INLINE int16_t ntoh(int16_t n) noexcept { return (int16_t)Utils::ntoh((uint16_t)n); }
static ZT_ALWAYS_INLINE uint32_t ntoh(uint32_t n) noexcept
{
#if defined(__GNUC__)
#if defined(__FreeBSD__)
@ -382,8 +382,8 @@ static ZT_ALWAYS_INLINE uint32_t ntoh(uint32_t n)
return ntohl(n);
#endif
}
static ZT_ALWAYS_INLINE int32_t ntoh(int32_t n) { return (int32_t)Utils::ntoh((uint32_t)n); }
static ZT_ALWAYS_INLINE uint64_t ntoh(uint64_t n)
static ZT_ALWAYS_INLINE int32_t ntoh(int32_t n) noexcept { return (int32_t)Utils::ntoh((uint32_t)n); }
static ZT_ALWAYS_INLINE uint64_t ntoh(uint64_t n) noexcept
{
#if defined(__GNUC__)
#if defined(__FreeBSD__)
@ -404,14 +404,14 @@ static ZT_ALWAYS_INLINE uint64_t ntoh(uint64_t n)
);
#endif
}
static ZT_ALWAYS_INLINE int64_t ntoh(int64_t n) { return (int64_t)ntoh((uint64_t)n); }
static ZT_ALWAYS_INLINE int64_t ntoh(int64_t n) noexcept { return (int64_t)ntoh((uint64_t)n); }
#else
template<typename T>
static ZT_ALWAYS_INLINE T ntoh(T n) { return n; }
static ZT_ALWAYS_INLINE T ntoh(T n) noexcept { return n; }
#endif
template<typename I>
static ZT_ALWAYS_INLINE I loadBigEndian(const void *const p)
static ZT_ALWAYS_INLINE I loadBigEndian(const void *const p) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
I x = (I)0;
@ -429,7 +429,20 @@ static ZT_ALWAYS_INLINE I loadBigEndian(const void *const p)
}
template<typename I>
static ZT_ALWAYS_INLINE void storeBigEndian(void *const p,const I i)
static ZT_ALWAYS_INLINE I loadAsIsEndian(const void *const p) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
I x = (I)0;
for(unsigned int k=0;k<sizeof(I);++k)
reinterpret_cast<uint8_t *>(&x)[k] = reinterpret_cast<const uint8_t *>(p)[k];
return x;
#else
return *reinterpret_cast<const I *>(p);
#endif
}
template<typename I>
static ZT_ALWAYS_INLINE void storeBigEndian(void *const p,const I i) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
for(unsigned int k=0;k<sizeof(I);++k) {

View file

@ -29,7 +29,7 @@ namespace ZeroTier {
namespace {
ZT_ALWAYS_INLINE const Identity &ifPeerNonNull(const SharedPtr<Peer> &p)
ZT_ALWAYS_INLINE const Identity &identityFromPeerPtr(const SharedPtr<Peer> &p)
{
if (p)
return p->identity();
@ -49,15 +49,36 @@ VL1::~VL1()
void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAddress &fromAddr,SharedPtr<Buf> &data,const unsigned int len)
{
// Get canonical Path object for this originating address and local socket pair.
const SharedPtr<Path> path(RR->topology->path(localSocket,fromAddr));
const int64_t now = RR->node->now();
const SharedPtr<Path> path(RR->topology->getPath(localSocket,fromAddr));
// Update path's last receive time (this is updated when anything is received at all, even if invalid or a keepalive)
path->received(now);
// Really short packets are keepalives and other junk.
try {
// Handle 8-byte short probes, which are used as a low-bandwidth way to initiate a real handshake.
// These are only minimally "secure" in the sense that they are unique per graph edge (sender->recipient)
// to within 1/2^64 but can easily be replayed. We rate limit this to prevent ZeroTier being used as
// a vector in DDOS amplification attacks, then send a larger fully authenticated message to initiate
// a handshake. We do not send HELLO since we don't want this to be a vector for third parties to
// mass-probe for ZeroTier nodes and obtain all of the information in a HELLO. This isn't a huge risk
// but we might as well avoid it. When the peer receives NOP on a path that hasn't been handshaked yet
// it will send its own HELLO to which we will respond with a fully encrypted OK(HELLO).
if (len == ZT_PROTO_PROBE_LENGTH) {
const SharedPtr<Peer> peer(RR->topology->peerByProbe(Utils::loadAsIsEndian<uint64_t>(data->b)));
if ((peer)&&(peer->rateGateInboundProbe(now))) {
peer->sendNOP(tPtr,path->localSocket(),path->address(),now);
path->sent(now);
}
return;
}
// Discard any other runt packets that aren't probes. These are likely to be keepalives or corrupt junk.
if (len < ZT_PROTO_MIN_FRAGMENT_LENGTH)
return;
try {
FCV<Buf::Slice,ZT_MAX_PACKET_FRAGMENTS> pktv;
Address destination;
@ -103,7 +124,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
destination.setTo(ph.destination);
if (destination != RR->identity.address()) {
// Packet or packet head is not address to this node ----------------------------------------------------------
// Packet or packet head is not addressed to this node --------------------------------------------------------
_relay(tPtr,path,destination,data,len);
return;
}
@ -146,7 +167,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
// there is enough room in each slice to shift their contents to sizes that are multiples
// of 64 if needed for crypto.
if ((pktv.empty()) || (((int)pktv[0].e - (int)pktv[0].s) < sizeof(Protocol::Header))) {
RR->t->unexpectedError(tPtr,0x3df19990,"empty or undersized packet vector");
RR->t->unexpectedError(tPtr,0x3df19990,"empty or undersized packet vector after parsing packet from %s of length %d",Trace::str(path->address()).s,(int)len);
return;
}
for(FCV<Buf::Slice,ZT_MAX_PACKET_FRAGMENTS>::const_iterator s(pktv.begin());s!=pktv.end();++s) {
@ -159,7 +180,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
if (source == RR->identity.address())
return;
SharedPtr<Peer> peer(RR->topology->get(tPtr,source));
SharedPtr<Peer> peer(RR->topology->peer(tPtr,source));
Buf::Slice pkt;
bool authenticated = false;
@ -171,7 +192,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
for(FCV<Buf::Slice,ZT_MAX_PACKET_FRAGMENTS>::const_iterator s(pktv.begin()+1);s!=pktv.end();++s)
packetSize += s->e - s->s;
if (packetSize > ZT_PROTO_MAX_PACKET_LENGTH) {
RR->t->incomingPacketDropped(tPtr,0x010348da,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
RR->t->incomingPacketDropped(tPtr,0x010348da,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return;
}
@ -179,7 +200,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
if ((!peer)&&(!(((cipher == ZT_PROTO_CIPHER_SUITE__POLY1305_NONE)||(cipher == ZT_PROTO_CIPHER_SUITE__NONE))&&((ph->verb & 0x1fU) == Protocol::VERB_HELLO)))) {
pkt = Buf::assembleSliceVector(pktv);
if (pkt.e < ZT_PROTO_MIN_PACKET_LENGTH) {
RR->t->incomingPacketDropped(tPtr,0xbada9366,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
RR->t->incomingPacketDropped(tPtr,0xbada9366,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return;
}
{
@ -274,7 +295,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
}
authenticated = true;
} else {
RR->t->incomingPacketDropped(tPtr,0xb0b01999,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
RR->t->incomingPacketDropped(tPtr,0xb0b01999,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return;
}
break;
@ -286,13 +307,13 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
pkt = Buf::assembleSliceVector(pktv);
if (pkt.e < ZT_PROTO_MIN_PACKET_LENGTH)
RR->t->incomingPacketDropped(tPtr,0x3d3337df,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
RR->t->incomingPacketDropped(tPtr,0x3d3337df,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
ph = &(pkt.b->as<Protocol::Header>());
if (RR->topology->shouldInboundPathBeTrusted(path->address(),Utils::ntoh(ph->mac))) {
authenticated = true;
} else {
RR->t->incomingPacketDropped(tPtr,0x2dfa910b,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_NOT_TRUSTED_PATH);
RR->t->incomingPacketDropped(tPtr,0x2dfa910b,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_NOT_TRUSTED_PATH);
return;
}
} break;
@ -303,21 +324,27 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
// break;
default:
RR->t->incomingPacketDropped(tPtr,0x5b001099,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
RR->t->incomingPacketDropped(tPtr,0x5b001099,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return;
}
// Packet fully assembled and may be authenticated ----------------------------------------------------------------
// Packet fully assembled, authenticated 'true' if already authenticated via MAC ----------------------------------
// Return any still held buffers in pktv to the buffer pool.
pktv.clear();
const Protocol::Verb verb = (Protocol::Verb)(ph->verb & ZT_PROTO_VERB_MASK);
// Note that all verbs except HELLO require MAC.
if (((!authenticated)||(!peer))&&(verb != Protocol::VERB_HELLO)) {
RR->t->incomingPacketDropped(tPtr,0x5b001099,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return;
}
// Decompress packet payload if compressed.
if ((ph->verb & ZT_PROTO_VERB_FLAG_COMPRESSED) != 0) {
if (!authenticated) {
RR->t->incomingPacketDropped(tPtr,0x390bcd0a,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
RR->t->incomingPacketDropped(tPtr,0x390bcd0a,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return;
}
@ -337,49 +364,71 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
pkt.b.swap(nb);
pkt.e = packetSize = (unsigned int)uncompressedLen;
} else {
RR->t->incomingPacketDropped(tPtr,0xee9e4392,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_INVALID_COMPRESSED_DATA);
RR->t->incomingPacketDropped(tPtr,0xee9e4392,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_INVALID_COMPRESSED_DATA);
return;
}
}
// VL1 and VL2 are conceptually and (mostly) logically separate layers.
// Verbs that relate to VL1 (P2P transport) are handled in this class.
// VL2 (virtual Ethernet / SDN) verbs are handled in the VL2 class.
/*
* Important notes:
*
* All verbs except HELLO assume that authenticated is true and peer is non-NULL.
* This is checked above. HELLO will accept either case and always performs its
* own secondary validation. The path argument is never NULL.
*
* VL1 and VL2 are conceptually separate layers of the ZeroTier protocol. In the
* code they are almost entirely logically separate. To make the code easier to
* understand the handlers for VL2 data paths have been moved to a VL2 class.
*/
bool ok = true;
switch(verb) {
case Protocol::VERB_NOP:
peer->received(tPtr,path,hops,ph->packetId,packetSize - ZT_PROTO_PACKET_PAYLOAD_START,Protocol::VERB_NOP,0,Protocol::VERB_NOP,0);
break;
case Protocol::VERB_HELLO: _HELLO(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_ERROR: _ERROR(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_OK: _OK(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_WHOIS: _WHOIS(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_RENDEZVOUS: _RENDEZVOUS(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_FRAME: RR->vl2->_FRAME(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_EXT_FRAME: RR->vl2->_EXT_FRAME(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_ECHO: _ECHO(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated);
case Protocol::VERB_MULTICAST_LIKE: RR->vl2->_MULTICAST_LIKE(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_NETWORK_CREDENTIALS: RR->vl2->_NETWORK_CREDENTIALS(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_NETWORK_CONFIG_REQUEST: RR->vl2->_NETWORK_CONFIG_REQUEST(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_NETWORK_CONFIG: RR->vl2->_NETWORK_CONFIG(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_MULTICAST_GATHER: RR->vl2->_MULTICAST_GATHER(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_MULTICAST_FRAME_deprecated: RR->vl2->_MULTICAST_FRAME_deprecated(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_PUSH_DIRECT_PATHS: _PUSH_DIRECT_PATHS(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_USER_MESSAGE: _USER_MESSAGE(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_MULTICAST: RR->vl2->_MULTICAST(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_ENCAP: _ENCAP(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_NOP: break;
case Protocol::VERB_HELLO: ok = _HELLO(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
case Protocol::VERB_ERROR: ok = _ERROR(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_OK: ok = _OK(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_WHOIS: ok = _WHOIS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_RENDEZVOUS: ok = _RENDEZVOUS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_FRAME: ok = RR->vl2->_FRAME(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_EXT_FRAME: ok = RR->vl2->_EXT_FRAME(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_ECHO: ok = _ECHO(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_MULTICAST_LIKE: ok = RR->vl2->_MULTICAST_LIKE(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_NETWORK_CREDENTIALS: ok = RR->vl2->_NETWORK_CREDENTIALS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_NETWORK_CONFIG_REQUEST: ok = RR->vl2->_NETWORK_CONFIG_REQUEST(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_NETWORK_CONFIG: ok = RR->vl2->_NETWORK_CONFIG(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_MULTICAST_GATHER: ok = RR->vl2->_MULTICAST_GATHER(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_MULTICAST_FRAME_deprecated: ok = RR->vl2->_MULTICAST_FRAME_deprecated(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_PUSH_DIRECT_PATHS: ok = _PUSH_DIRECT_PATHS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_USER_MESSAGE: ok = _USER_MESSAGE(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_MULTICAST: ok = RR->vl2->_MULTICAST(tPtr,path,peer,*pkt.b,(int)packetSize); break;
case Protocol::VERB_ENCAP: ok = _ENCAP(tPtr,path,peer,*pkt.b,(int)packetSize); break;
default:
RR->t->incomingPacketDropped(tPtr,0xdeadeff0,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_UNRECOGNIZED_VERB);
RR->t->incomingPacketDropped(tPtr,0xdeadeff0,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_UNRECOGNIZED_VERB);
break;
}
if (ok)
peer->received(tPtr,path,hops,ph->packetId,packetSize - ZT_PROTO_PACKET_PAYLOAD_START,verb);
} catch ( ... ) {
RR->t->unexpectedError(tPtr,0xea1b6dea,"unexpected exception in onRemotePacket()");
RR->t->unexpectedError(tPtr,0xea1b6dea,"unexpected exception in onRemotePacket() parsing packet from %s",Trace::str(path->address()).s);
}
}
void VL1::_relay(void *tPtr,const SharedPtr<Path> &path,const Address &destination,SharedPtr<Buf> &data,unsigned int len)
{
const uint8_t newHopCount = (data->b[ZT_PROTO_PACKET_FLAGS_INDEX] & 7U) + 1;
if (newHopCount >= ZT_RELAY_MAX_HOPS)
return;
data->b[ZT_PROTO_PACKET_FLAGS_INDEX] = (data->b[ZT_PROTO_PACKET_FLAGS_INDEX] & 0xf8U) | newHopCount;
const SharedPtr<Peer> toPeer(RR->topology->peer(tPtr,destination,false));
if (!toPeer)
return;
const uint64_t now = RR->node->now();
const SharedPtr<Path> toPath(toPeer->path(now));
if (!toPath)
return;
toPath->send(RR,tPtr,data->b,len,now);
}
void VL1::_sendPendingWhois(void *const tPtr,const int64_t now)
@ -417,47 +466,46 @@ void VL1::_sendPendingWhois(void *const tPtr,const int64_t now)
ph.flags = 0;
ph.verb = Protocol::VERB_OK;
int ptr = sizeof(Protocol::Header);
while ((a != toSend.end())&&(ptr < (ZT_PROTO_MAX_PACKET_LENGTH - 1))) {
a->copyTo(outp.b + ptr);
int outl = sizeof(Protocol::Header);
while ((a != toSend.end())&&(outl < ZT_PROTO_MAX_PACKET_LENGTH)) {
a->copyTo(outp.b + outl);
++a;
ptr += ZT_ADDRESS_LENGTH;
outl += ZT_ADDRESS_LENGTH;
}
if (ptr > sizeof(Protocol::Header)) {
Protocol::armor(outp,ptr,root->key(),ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012);
rootPath->send(RR,tPtr,outp.b,ptr,now);
if (outl > sizeof(Protocol::Header)) {
Protocol::armor(outp,outl,root->key(),ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012);
rootPath->send(RR,tPtr,outp.b,outl,now);
}
}
}
void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
{
if (packetSize < sizeof(Protocol::HELLO)) {
RR->t->incomingPacketDropped(tPtr,0x2bdb0001,0,0,ifPeerNonNull(peer),path->address(),0,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return;
RR->t->incomingPacketDropped(tPtr,0x2bdb0001,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::HELLO &p = pkt.as<Protocol::HELLO>();
const uint8_t hops = Protocol::packetHops(p.h);
int ptr = sizeof(Protocol::HELLO);
if (p.versionProtocol < ZT_PROTO_VERSION_MIN) {
RR->t->incomingPacketDropped(tPtr,0xe8d12bad,p.h.packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_PEER_TOO_OLD);
return;
RR->t->incomingPacketDropped(tPtr,0xe8d12bad,p.h.packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_PEER_TOO_OLD);
return false;
}
Identity id;
if (pkt.rO(ptr,id) < 0) {
RR->t->incomingPacketDropped(tPtr,0x707a9810,p.h.packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return;
RR->t->incomingPacketDropped(tPtr,0x707a9810,p.h.packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return false;
}
if (Address(p.h.source) != id.address()) {
RR->t->incomingPacketDropped(tPtr,0x06aa9ff1,p.h.packetId,0,Identity::NIL,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return;
return false;
}
// Packet is basically valid and identity unmarshaled ---------------------------------------------------------------
// Packet is basically valid and identity unmarshaled successfully --------------------------------------------------
// Get long-term static key for this node.
uint8_t key[ZT_PEER_SECRET_KEY_LENGTH];
@ -467,11 +515,11 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
peer.zero();
if (!RR->identity.agree(id,key)) {
RR->t->incomingPacketDropped(tPtr,0x46db8010,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return;
return false;
}
}
// Verify packet using Poly1305, which for v2.x
// Verify packet using Poly1305 MAC
{
uint8_t perPacketKey[ZT_PEER_SECRET_KEY_LENGTH];
uint8_t macKey[ZT_POLY1305_KEY_LEN];
@ -481,7 +529,7 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
poly1305(mac,pkt.b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
if (p.h.mac != mac[0]) {
RR->t->incomingPacketDropped(tPtr,0x11bfff81,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return;
return false;
}
}
@ -495,8 +543,8 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
// Get external surface address if present.
if (ptr < packetSize) {
if (pkt.rO(ptr,externalSurfaceAddress) < 0) {
RR->t->incomingPacketDropped(tPtr,0xf1000023,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return;
RR->t->incomingPacketDropped(tPtr,0x10001003,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return false;
}
}
@ -517,13 +565,13 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
const void *const dictionaryBytes = pkt.b + ptr;
if ((ptr += (int)dictionarySize) > packetSize) {
RR->t->incomingPacketDropped(tPtr,0x0d0f0112,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return;
return false;
}
ptr += pkt.rI16(ptr); // skip any additional fields, currently always 0
if (ptr > packetSize) {
RR->t->incomingPacketDropped(tPtr,0x451f2341,0,p.h.packetId,id,path->address(),0,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return;
return false;
}
if ((ptr + ZT_SHA384_DIGEST_LEN) <= packetSize) {
@ -531,7 +579,7 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
HMACSHA384(hmacKey,pkt.b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,hmac);
if (!Utils::secureEq(pkt.b + ptr,hmac,ZT_HMACSHA384_LEN)) {
RR->t->incomingPacketDropped(tPtr,0x1000662a,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return;
return false;
}
hmacAuthenticated = true;
}
@ -539,35 +587,35 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
if (dictionarySize) {
if (!nodeMetaData.decode(dictionaryBytes,dictionarySize)) {
RR->t->incomingPacketDropped(tPtr,0x67192344,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return;
return false;
}
}
}
}
// v2.x+ peers must include HMAC, older peers don't
// v2.x+ peers must include HMAC, older peers don't (we'll drop support for them when 1.x is dead)
if ((!hmacAuthenticated) && (p.versionProtocol >= 11)) {
RR->t->incomingPacketDropped(tPtr,0x571feeea,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return;
return false;
}
// Packet is fully decoded and has passed full HMAC (if present) ----------------------------------------------------
// Packet is fully decoded and has passed all tests -----------------------------------------------------------------
const int64_t now = RR->node->now();
if (!peer) {
if (!RR->node->rateGateIdentityVerification(now,path->address())) {
RR->t->incomingPacketDropped(tPtr,0xaffa9ff7,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
return;
return false;
}
if (!id.locallyValidate()) {
RR->t->incomingPacketDropped(tPtr,0x2ff7a909,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return;
return false;
}
peer.set(new Peer(RR));
if (!peer)
return;
peer->init(RR->identity,id);
return false;
peer->init(id);
peer = RR->topology->add(tPtr,peer);
}
@ -581,8 +629,10 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
Dictionary myNodeMetaData;
myNodeMetaData.encode(myNodeMetaDataBin);
}
if (myNodeMetaDataBin.size() > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check
return;
if (myNodeMetaDataBin.size() > ZT_PROTO_MAX_PACKET_LENGTH) {
RR->t->unexpectedError(tPtr,0xbc8861e0,"node meta-data dictionary exceeds maximum packet length while composing OK(HELLO) to %s",Trace::str(id.address(),path).s);
return false;
}
Buf outp;
Protocol::OK::HELLO &ok = outp.as<Protocol::OK::HELLO>();
@ -612,7 +662,7 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
outp.wI(outl,(uint16_t)0); // length of additional fields, currently 0
if ((outl + ZT_HMACSHA384_LEN) > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check, shouldn't be possible
return;
return false;
KBKDFHMACSHA384(key,ZT_PROTO_KDF_KEY_LABEL_HELLO_HMAC,0,1,hmacKey); // iter == 1 for OK
HMACSHA384(hmacKey,outp.b + sizeof(ok.h),outl - sizeof(ok.h),outp.b + outl);
@ -623,39 +673,277 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
path->send(RR,tPtr,outp.b,outl,now);
peer->setRemoteVersion(p.versionProtocol,p.versionMajor,p.versionMinor,Utils::ntoh(p.versionRev));
peer->received(tPtr,path,hops,p.h.packetId,packetSize - ZT_PROTO_PACKET_PAYLOAD_START,Protocol::VERB_HELLO,0,Protocol::VERB_NOP,0);
return true;
}
void VL1::_ERROR(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL1::_ERROR(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
if (packetSize < sizeof(Protocol::ERROR::Header)) {
RR->t->incomingPacketDropped(tPtr,0x3beb1947,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_ERROR,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::ERROR::Header &eh = pkt.as<Protocol::ERROR::Header>();
switch(eh.error) {
//case Protocol::ERROR_INVALID_REQUEST:
//case Protocol::ERROR_BAD_PROTOCOL_VERSION:
//case Protocol::ERROR_CANNOT_DELIVER:
default:
break;
case Protocol::ERROR_OBJ_NOT_FOUND:
if (eh.inReVerb == Protocol::VERB_NETWORK_CONFIG_REQUEST) {
}
break;
case Protocol::ERROR_UNSUPPORTED_OPERATION:
if (eh.inReVerb == Protocol::VERB_NETWORK_CONFIG_REQUEST) {
}
break;
case Protocol::ERROR_NEED_MEMBERSHIP_CERTIFICATE:
break;
case Protocol::ERROR_NETWORK_ACCESS_DENIED_:
if (eh.inReVerb == Protocol::VERB_NETWORK_CONFIG_REQUEST) {
}
break;
}
return true;
}
bool VL1::_OK(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
if (packetSize < sizeof(Protocol::OK::Header)) {
RR->t->incomingPacketDropped(tPtr,0x4c1f1ff7,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_OK,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::OK::Header &oh = pkt.as<Protocol::OK::Header>();
switch(oh.inReVerb) {
case Protocol::VERB_HELLO:
break;
case Protocol::VERB_WHOIS:
break;
case Protocol::VERB_NETWORK_CONFIG_REQUEST:
break;
case Protocol::VERB_MULTICAST_GATHER:
break;
}
return true;
}
bool VL1::_WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
if (packetSize < sizeof(Protocol::OK::Header)) {
RR->t->incomingPacketDropped(tPtr,0x4c1f1ff7,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_OK,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::Header &ph = pkt.as<Protocol::Header>();
if (!peer->rateGateInboundWhoisRequest(RR->node->now())) {
RR->t->incomingPacketDropped(tPtr,0x19f7194a,ph.packetId,0,peer->identity(),path->address(),Protocol::packetHops(ph),Protocol::VERB_WHOIS,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
return true;
}
Buf outp;
Protocol::OK::WHOIS &outh = outp.as<Protocol::OK::WHOIS>();
int ptr = sizeof(Protocol::Header);
while ((ptr + ZT_ADDRESS_LENGTH) <= packetSize) {
outh.h.h.packetId = Protocol::getPacketId();
peer->address().copyTo(outh.h.h.destination);
RR->identity.address().copyTo(outh.h.h.source);
outh.h.h.flags = 0;
outh.h.h.verb = Protocol::VERB_OK;
outh.h.inReVerb = Protocol::VERB_WHOIS;
outh.h.inRePacketId = ph.packetId;
int outl = sizeof(Protocol::OK::WHOIS);
while ( ((ptr + ZT_ADDRESS_LENGTH) <= packetSize) && ((outl + ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX) < ZT_PROTO_MAX_PACKET_LENGTH) ) {
const SharedPtr<Peer> &wp(RR->topology->peer(tPtr,Address(pkt.b + ptr)));
if (wp) {
outp.wO(outl,wp->identity());
if (peer->remoteVersionProtocol() >= 11) { // older versions don't know what a locator is
const Locator loc(wp->locator());
outp.wO(outl,loc);
}
if (Buf::writeOverflow(outl)) { // sanity check, shouldn't be possible
RR->t->unexpectedError(tPtr,0xabc0f183,"Buf write overflow building OK(WHOIS) to reply to %s",Trace::str(peer->address(),path).s);
return false;
}
}
ptr += ZT_ADDRESS_LENGTH;
}
if (outl > sizeof(Protocol::OK::WHOIS)) {
Protocol::armor(outp,outl,peer->key(),ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012);
path->send(RR,tPtr,outp.b,outl,RR->node->now());
}
}
return true;
}
bool VL1::_RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
static uint16_t junk = 0;
if (RR->topology->isRoot(peer->identity())) {
if (packetSize < sizeof(Protocol::RENDEZVOUS)) {
RR->t->incomingPacketDropped(tPtr,0x43e90ab3,Protocol::packetId(pkt,packetSize),0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_RENDEZVOUS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::RENDEZVOUS &rdv = pkt.as<Protocol::RENDEZVOUS>();
const SharedPtr<Peer> with(RR->topology->peer(tPtr,Address(rdv.peerAddress)));
if (with) {
const unsigned int port = Utils::ntoh(rdv.port);
if (port != 0) {
switch(rdv.addressLength) {
case 4:
if ((sizeof(Protocol::RENDEZVOUS) + 4) <= packetSize) {
InetAddress atAddr(pkt.b + sizeof(Protocol::RENDEZVOUS),4,port);
++junk;
RR->node->putPacket(tPtr,path->localSocket(),atAddr,(const void *)&junk,2,2); // IPv4 "firewall opener" hack
with->sendHELLO(tPtr,path->localSocket(),atAddr,RR->node->now());
RR->t->tryingNewPath(tPtr,0x55a19aaa,with->identity(),atAddr,path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->address(),peer->identity().hash(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
}
break;
case 16:
if ((sizeof(Protocol::RENDEZVOUS) + 16) <= packetSize) {
InetAddress atAddr(pkt.b + sizeof(Protocol::RENDEZVOUS),16,port);
with->sendHELLO(tPtr,path->localSocket(),atAddr,RR->node->now());
RR->t->tryingNewPath(tPtr,0x54bada09,with->identity(),atAddr,path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->address(),peer->identity().hash(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
}
break;
}
}
}
}
return true;
}
bool VL1::_ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
const uint64_t packetId = Protocol::packetId(pkt,packetSize);
const uint64_t now = RR->node->now();
if (packetSize < sizeof(Protocol::Header)) {
RR->t->incomingPacketDropped(tPtr,0x14d70bb0,packetId,0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_ECHO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
if (peer->rateGateEchoRequest(now)) {
Buf outp;
Protocol::OK::ECHO &outh = outp.as<Protocol::OK::ECHO>();
outh.h.h.packetId = Protocol::getPacketId();
peer->address().copyTo(outh.h.h.destination);
RR->identity.address().copyTo(outh.h.h.source);
outh.h.h.flags = 0;
outh.h.h.verb = Protocol::VERB_OK;
outh.h.inReVerb = Protocol::VERB_ECHO;
outh.h.inRePacketId = packetId;
int outl = sizeof(Protocol::OK::ECHO);
outp.wB(outl,pkt.b + sizeof(Protocol::Header),packetSize - sizeof(Protocol::Header));
if (Buf::writeOverflow(outl)) {
RR->t->incomingPacketDropped(tPtr,0x14d70bb0,packetId,0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_ECHO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::armor(outp,outl,peer->key(),ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012);
path->send(RR,tPtr,outp.b,outl,now);
} else {
RR->t->incomingPacketDropped(tPtr,0x27878bc1,packetId,0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_ECHO,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
}
return true;
}
bool VL1::_PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
if (packetSize < sizeof(Protocol::PUSH_DIRECT_PATHS)) {
RR->t->incomingPacketDropped(tPtr,0x1bb1bbb1,Protocol::packetId(pkt,packetSize),0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::PUSH_DIRECT_PATHS &pdp = pkt.as<Protocol::PUSH_DIRECT_PATHS>();
const uint64_t now = RR->node->now();
if (!peer->rateGateInboundPushDirectPaths(now)) {
RR->t->incomingPacketDropped(tPtr,0x35b1aaaa,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
return true;
}
int ptr = sizeof(Protocol::PUSH_DIRECT_PATHS);
const unsigned int numPaths = Utils::ntoh(pdp.numPaths);
InetAddress a;
Endpoint ep;
for(unsigned int pi=0;pi<numPaths;++pi) {
/*const uint8_t flags = pkt.rI8(ptr);*/ ++ptr;
ptr += pkt.rI16(ptr); // extended attributes size, currently always 0
const unsigned int addrType = pkt.rI8(ptr);
const unsigned int addrRecordLen = pkt.rI8(ptr);
if (addrRecordLen == 0) {
RR->t->incomingPacketDropped(tPtr,0xaed00118,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
const void *addrBytes = nullptr;
unsigned int addrLen = 0;
unsigned int addrPort = 0;
switch(addrType) {
case 0:
addrBytes = pkt.rBnc(ptr,addrRecordLen);
addrLen = addrRecordLen;
break;
case 4:
addrBytes = pkt.rBnc(ptr,4);
addrLen = 4;
addrPort = pkt.rI16(ptr);
break;
case 6:
addrBytes = pkt.rBnc(ptr,16);
addrLen = 16;
addrPort = pkt.rI16(ptr);
break;
}
if (Buf::readOverflow(ptr,packetSize)) {
RR->t->incomingPacketDropped(tPtr,0xbad0f10f,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
if (addrPort) {
a.set(addrBytes,addrLen,addrPort);
} else if (addrLen) {
if (ep.unmarshal(reinterpret_cast<const uint8_t *>(addrBytes),(int)addrLen) <= 0) {
RR->t->incomingPacketDropped(tPtr,0xbad0f00d,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
if ((ep.type() == Endpoint::INETADDR_V4)||(ep.type() == Endpoint::INETADDR_V6))
a = ep.inetAddr();
}
if (a) {
}
ptr += (int)addrRecordLen;
}
return true;
}
bool VL1::_USER_MESSAGE(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
}
void VL1::_OK(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
{
}
void VL1::_WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
{
}
void VL1::_RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
{
}
void VL1::_ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
{
}
void VL1::_PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
{
}
void VL1::_USER_MESSAGE(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
{
}
void VL1::_ENCAP(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL1::_ENCAP(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
// TODO: not implemented yet
return true;
}
} // namespace ZeroTier

View file

@ -56,15 +56,15 @@ private:
void _sendPendingWhois(void *tPtr,int64_t now);
// Handlers for VL1 verbs
void _HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _ERROR(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _OK(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _USER_MESSAGE(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _ENCAP(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
bool _HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
bool _ERROR(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _OK(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _USER_MESSAGE(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _ENCAP(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
const RuntimeEnvironment *RR;

View file

@ -31,43 +31,43 @@ VL2::~VL2()
{
}
void VL2::onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
bool VL2::onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
{
}
void VL2::_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL2::_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
}
void VL2::_EXT_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL2::_EXT_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
}
void VL2::_MULTICAST_LIKE(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL2::_MULTICAST_LIKE(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
}
void VL2::_NETWORK_CREDENTIALS(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL2::_NETWORK_CREDENTIALS(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
}
void VL2::_NETWORK_CONFIG_REQUEST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL2::_NETWORK_CONFIG_REQUEST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
}
void VL2::_NETWORK_CONFIG(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL2::_NETWORK_CONFIG(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
}
void VL2::_MULTICAST_GATHER(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL2::_MULTICAST_GATHER(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
}
void VL2::_MULTICAST_FRAME_deprecated(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL2::_MULTICAST_FRAME_deprecated(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
}
void VL2::_MULTICAST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
bool VL2::_MULTICAST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
{
}

View file

@ -54,15 +54,15 @@ public:
void onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len);
protected:
void _FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _EXT_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _MULTICAST_LIKE(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _NETWORK_CREDENTIALS(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _NETWORK_CONFIG_REQUEST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _NETWORK_CONFIG(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _MULTICAST_GATHER(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _MULTICAST_FRAME_deprecated(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
void _MULTICAST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
bool _FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _EXT_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _MULTICAST_LIKE(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _NETWORK_CREDENTIALS(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _NETWORK_CONFIG_REQUEST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _NETWORK_CONFIG(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _MULTICAST_GATHER(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _MULTICAST_FRAME_deprecated(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
bool _MULTICAST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
private:
};

View file

@ -17,7 +17,7 @@
#include "../node/InetAddress.hpp"
#include "../node/Utils.hpp"
#include "../node/SharedPtr.hpp"
#include "../node/AtomicCounter.hpp"
#include "../node/Atomic.hpp"
#include <stdexcept>
#include <vector>
@ -83,7 +83,7 @@ private:
char _device[128];
char _systemDevice[128]; // for route overrides
AtomicCounter<int> __refCount;
Atomic<int> __refCount;
};
} // namespace ZeroTier

View file

@ -122,7 +122,7 @@ using json = nlohmann::json;
* RootPeer is a normal peer known to this root
*
* This struct must remain memcpy-able. Identity, InetAddress, and
* AtomicCounter all satisfy this. Take care when adding fields that
* Atomic all satisfy this. Take care when adding fields that
* this remains true.
*/
struct RootPeer
@ -140,7 +140,7 @@ struct RootPeer
int vProto; // Protocol version or -1 if unknown
int vMajor,vMinor,vRev; // Peer version or -1,-1,-1 if unknown
AtomicCounter __refCount;
Atomic __refCount;
};
// Hashers for std::unordered_map